• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/bpf.h>
6 #include <uapi/linux/bpf_perf_event.h>
7 #include <uapi/linux/types.h>
8 #include <linux/seq_file.h>
9 #include <linux/compiler.h>
10 #include <linux/ctype.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/uaccess.h>
16 #include <linux/kernel.h>
17 #include <linux/idr.h>
18 #include <linux/sort.h>
19 #include <linux/bpf_verifier.h>
20 #include <linux/btf.h>
21 #include <linux/btf_ids.h>
22 #include <linux/skmsg.h>
23 #include <linux/perf_event.h>
24 #include <linux/bsearch.h>
25 #include <linux/btf_ids.h>
26 #include <net/sock.h>
27 
28 /* BTF (BPF Type Format) is the meta data format which describes
29  * the data types of BPF program/map.  Hence, it basically focus
30  * on the C programming language which the modern BPF is primary
31  * using.
32  *
33  * ELF Section:
34  * ~~~~~~~~~~~
35  * The BTF data is stored under the ".BTF" ELF section
36  *
37  * struct btf_type:
38  * ~~~~~~~~~~~~~~~
39  * Each 'struct btf_type' object describes a C data type.
40  * Depending on the type it is describing, a 'struct btf_type'
41  * object may be followed by more data.  F.e.
42  * To describe an array, 'struct btf_type' is followed by
43  * 'struct btf_array'.
44  *
45  * 'struct btf_type' and any extra data following it are
46  * 4 bytes aligned.
47  *
48  * Type section:
49  * ~~~~~~~~~~~~~
50  * The BTF type section contains a list of 'struct btf_type' objects.
51  * Each one describes a C type.  Recall from the above section
52  * that a 'struct btf_type' object could be immediately followed by extra
53  * data in order to desribe some particular C types.
54  *
55  * type_id:
56  * ~~~~~~~
57  * Each btf_type object is identified by a type_id.  The type_id
58  * is implicitly implied by the location of the btf_type object in
59  * the BTF type section.  The first one has type_id 1.  The second
60  * one has type_id 2...etc.  Hence, an earlier btf_type has
61  * a smaller type_id.
62  *
63  * A btf_type object may refer to another btf_type object by using
64  * type_id (i.e. the "type" in the "struct btf_type").
65  *
66  * NOTE that we cannot assume any reference-order.
67  * A btf_type object can refer to an earlier btf_type object
68  * but it can also refer to a later btf_type object.
69  *
70  * For example, to describe "const void *".  A btf_type
71  * object describing "const" may refer to another btf_type
72  * object describing "void *".  This type-reference is done
73  * by specifying type_id:
74  *
75  * [1] CONST (anon) type_id=2
76  * [2] PTR (anon) type_id=0
77  *
78  * The above is the btf_verifier debug log:
79  *   - Each line started with "[?]" is a btf_type object
80  *   - [?] is the type_id of the btf_type object.
81  *   - CONST/PTR is the BTF_KIND_XXX
82  *   - "(anon)" is the name of the type.  It just
83  *     happens that CONST and PTR has no name.
84  *   - type_id=XXX is the 'u32 type' in btf_type
85  *
86  * NOTE: "void" has type_id 0
87  *
88  * String section:
89  * ~~~~~~~~~~~~~~
90  * The BTF string section contains the names used by the type section.
91  * Each string is referred by an "offset" from the beginning of the
92  * string section.
93  *
94  * Each string is '\0' terminated.
95  *
96  * The first character in the string section must be '\0'
97  * which is used to mean 'anonymous'. Some btf_type may not
98  * have a name.
99  */
100 
101 /* BTF verification:
102  *
103  * To verify BTF data, two passes are needed.
104  *
105  * Pass #1
106  * ~~~~~~~
107  * The first pass is to collect all btf_type objects to
108  * an array: "btf->types".
109  *
110  * Depending on the C type that a btf_type is describing,
111  * a btf_type may be followed by extra data.  We don't know
112  * how many btf_type is there, and more importantly we don't
113  * know where each btf_type is located in the type section.
114  *
115  * Without knowing the location of each type_id, most verifications
116  * cannot be done.  e.g. an earlier btf_type may refer to a later
117  * btf_type (recall the "const void *" above), so we cannot
118  * check this type-reference in the first pass.
119  *
120  * In the first pass, it still does some verifications (e.g.
121  * checking the name is a valid offset to the string section).
122  *
123  * Pass #2
124  * ~~~~~~~
125  * The main focus is to resolve a btf_type that is referring
126  * to another type.
127  *
128  * We have to ensure the referring type:
129  * 1) does exist in the BTF (i.e. in btf->types[])
130  * 2) does not cause a loop:
131  *	struct A {
132  *		struct B b;
133  *	};
134  *
135  *	struct B {
136  *		struct A a;
137  *	};
138  *
139  * btf_type_needs_resolve() decides if a btf_type needs
140  * to be resolved.
141  *
142  * The needs_resolve type implements the "resolve()" ops which
143  * essentially does a DFS and detects backedge.
144  *
145  * During resolve (or DFS), different C types have different
146  * "RESOLVED" conditions.
147  *
148  * When resolving a BTF_KIND_STRUCT, we need to resolve all its
149  * members because a member is always referring to another
150  * type.  A struct's member can be treated as "RESOLVED" if
151  * it is referring to a BTF_KIND_PTR.  Otherwise, the
152  * following valid C struct would be rejected:
153  *
154  *	struct A {
155  *		int m;
156  *		struct A *a;
157  *	};
158  *
159  * When resolving a BTF_KIND_PTR, it needs to keep resolving if
160  * it is referring to another BTF_KIND_PTR.  Otherwise, we cannot
161  * detect a pointer loop, e.g.:
162  * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
163  *                        ^                                         |
164  *                        +-----------------------------------------+
165  *
166  */
167 
168 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
169 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
170 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
171 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
172 #define BITS_ROUNDUP_BYTES(bits) \
173 	(BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
174 
175 #define BTF_INFO_MASK 0x8f00ffff
176 #define BTF_INT_MASK 0x0fffffff
177 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
178 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
179 
180 /* 16MB for 64k structs and each has 16 members and
181  * a few MB spaces for the string section.
182  * The hard limit is S32_MAX.
183  */
184 #define BTF_MAX_SIZE (16 * 1024 * 1024)
185 
186 #define for_each_member_from(i, from, struct_type, member)		\
187 	for (i = from, member = btf_type_member(struct_type) + from;	\
188 	     i < btf_type_vlen(struct_type);				\
189 	     i++, member++)
190 
191 #define for_each_vsi_from(i, from, struct_type, member)				\
192 	for (i = from, member = btf_type_var_secinfo(struct_type) + from;	\
193 	     i < btf_type_vlen(struct_type);					\
194 	     i++, member++)
195 
196 DEFINE_IDR(btf_idr);
197 DEFINE_SPINLOCK(btf_idr_lock);
198 
199 struct btf {
200 	void *data;
201 	struct btf_type **types;
202 	u32 *resolved_ids;
203 	u32 *resolved_sizes;
204 	const char *strings;
205 	void *nohdr_data;
206 	struct btf_header hdr;
207 	u32 nr_types;
208 	u32 types_size;
209 	u32 data_size;
210 	refcount_t refcnt;
211 	u32 id;
212 	struct rcu_head rcu;
213 };
214 
215 enum verifier_phase {
216 	CHECK_META,
217 	CHECK_TYPE,
218 };
219 
220 struct resolve_vertex {
221 	const struct btf_type *t;
222 	u32 type_id;
223 	u16 next_member;
224 };
225 
226 enum visit_state {
227 	NOT_VISITED,
228 	VISITED,
229 	RESOLVED,
230 };
231 
232 enum resolve_mode {
233 	RESOLVE_TBD,	/* To Be Determined */
234 	RESOLVE_PTR,	/* Resolving for Pointer */
235 	RESOLVE_STRUCT_OR_ARRAY,	/* Resolving for struct/union
236 					 * or array
237 					 */
238 };
239 
240 #define MAX_RESOLVE_DEPTH 32
241 
242 struct btf_sec_info {
243 	u32 off;
244 	u32 len;
245 };
246 
247 struct btf_verifier_env {
248 	struct btf *btf;
249 	u8 *visit_states;
250 	struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
251 	struct bpf_verifier_log log;
252 	u32 log_type_id;
253 	u32 top_stack;
254 	enum verifier_phase phase;
255 	enum resolve_mode resolve_mode;
256 };
257 
258 static const char * const btf_kind_str[NR_BTF_KINDS] = {
259 	[BTF_KIND_UNKN]		= "UNKNOWN",
260 	[BTF_KIND_INT]		= "INT",
261 	[BTF_KIND_PTR]		= "PTR",
262 	[BTF_KIND_ARRAY]	= "ARRAY",
263 	[BTF_KIND_STRUCT]	= "STRUCT",
264 	[BTF_KIND_UNION]	= "UNION",
265 	[BTF_KIND_ENUM]		= "ENUM",
266 	[BTF_KIND_FWD]		= "FWD",
267 	[BTF_KIND_TYPEDEF]	= "TYPEDEF",
268 	[BTF_KIND_VOLATILE]	= "VOLATILE",
269 	[BTF_KIND_CONST]	= "CONST",
270 	[BTF_KIND_RESTRICT]	= "RESTRICT",
271 	[BTF_KIND_FUNC]		= "FUNC",
272 	[BTF_KIND_FUNC_PROTO]	= "FUNC_PROTO",
273 	[BTF_KIND_VAR]		= "VAR",
274 	[BTF_KIND_DATASEC]	= "DATASEC",
275 };
276 
btf_type_str(const struct btf_type * t)277 static const char *btf_type_str(const struct btf_type *t)
278 {
279 	return btf_kind_str[BTF_INFO_KIND(t->info)];
280 }
281 
282 /* Chunk size we use in safe copy of data to be shown. */
283 #define BTF_SHOW_OBJ_SAFE_SIZE		32
284 
285 /*
286  * This is the maximum size of a base type value (equivalent to a
287  * 128-bit int); if we are at the end of our safe buffer and have
288  * less than 16 bytes space we can't be assured of being able
289  * to copy the next type safely, so in such cases we will initiate
290  * a new copy.
291  */
292 #define BTF_SHOW_OBJ_BASE_TYPE_SIZE	16
293 
294 /* Type name size */
295 #define BTF_SHOW_NAME_SIZE		80
296 
297 /*
298  * Common data to all BTF show operations. Private show functions can add
299  * their own data to a structure containing a struct btf_show and consult it
300  * in the show callback.  See btf_type_show() below.
301  *
302  * One challenge with showing nested data is we want to skip 0-valued
303  * data, but in order to figure out whether a nested object is all zeros
304  * we need to walk through it.  As a result, we need to make two passes
305  * when handling structs, unions and arrays; the first path simply looks
306  * for nonzero data, while the second actually does the display.  The first
307  * pass is signalled by show->state.depth_check being set, and if we
308  * encounter a non-zero value we set show->state.depth_to_show to
309  * the depth at which we encountered it.  When we have completed the
310  * first pass, we will know if anything needs to be displayed if
311  * depth_to_show > depth.  See btf_[struct,array]_show() for the
312  * implementation of this.
313  *
314  * Another problem is we want to ensure the data for display is safe to
315  * access.  To support this, the anonymous "struct {} obj" tracks the data
316  * object and our safe copy of it.  We copy portions of the data needed
317  * to the object "copy" buffer, but because its size is limited to
318  * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we
319  * traverse larger objects for display.
320  *
321  * The various data type show functions all start with a call to
322  * btf_show_start_type() which returns a pointer to the safe copy
323  * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the
324  * raw data itself).  btf_show_obj_safe() is responsible for
325  * using copy_from_kernel_nofault() to update the safe data if necessary
326  * as we traverse the object's data.  skbuff-like semantics are
327  * used:
328  *
329  * - obj.head points to the start of the toplevel object for display
330  * - obj.size is the size of the toplevel object
331  * - obj.data points to the current point in the original data at
332  *   which our safe data starts.  obj.data will advance as we copy
333  *   portions of the data.
334  *
335  * In most cases a single copy will suffice, but larger data structures
336  * such as "struct task_struct" will require many copies.  The logic in
337  * btf_show_obj_safe() handles the logic that determines if a new
338  * copy_from_kernel_nofault() is needed.
339  */
340 struct btf_show {
341 	u64 flags;
342 	void *target;	/* target of show operation (seq file, buffer) */
343 	void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
344 	const struct btf *btf;
345 	/* below are used during iteration */
346 	struct {
347 		u8 depth;
348 		u8 depth_to_show;
349 		u8 depth_check;
350 		u8 array_member:1,
351 		   array_terminated:1;
352 		u16 array_encoding;
353 		u32 type_id;
354 		int status;			/* non-zero for error */
355 		const struct btf_type *type;
356 		const struct btf_member *member;
357 		char name[BTF_SHOW_NAME_SIZE];	/* space for member name/type */
358 	} state;
359 	struct {
360 		u32 size;
361 		void *head;
362 		void *data;
363 		u8 safe[BTF_SHOW_OBJ_SAFE_SIZE];
364 	} obj;
365 };
366 
367 struct btf_kind_operations {
368 	s32 (*check_meta)(struct btf_verifier_env *env,
369 			  const struct btf_type *t,
370 			  u32 meta_left);
371 	int (*resolve)(struct btf_verifier_env *env,
372 		       const struct resolve_vertex *v);
373 	int (*check_member)(struct btf_verifier_env *env,
374 			    const struct btf_type *struct_type,
375 			    const struct btf_member *member,
376 			    const struct btf_type *member_type);
377 	int (*check_kflag_member)(struct btf_verifier_env *env,
378 				  const struct btf_type *struct_type,
379 				  const struct btf_member *member,
380 				  const struct btf_type *member_type);
381 	void (*log_details)(struct btf_verifier_env *env,
382 			    const struct btf_type *t);
383 	void (*show)(const struct btf *btf, const struct btf_type *t,
384 			 u32 type_id, void *data, u8 bits_offsets,
385 			 struct btf_show *show);
386 };
387 
388 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
389 static struct btf_type btf_void;
390 
391 static int btf_resolve(struct btf_verifier_env *env,
392 		       const struct btf_type *t, u32 type_id);
393 
btf_type_is_modifier(const struct btf_type * t)394 static bool btf_type_is_modifier(const struct btf_type *t)
395 {
396 	/* Some of them is not strictly a C modifier
397 	 * but they are grouped into the same bucket
398 	 * for BTF concern:
399 	 *   A type (t) that refers to another
400 	 *   type through t->type AND its size cannot
401 	 *   be determined without following the t->type.
402 	 *
403 	 * ptr does not fall into this bucket
404 	 * because its size is always sizeof(void *).
405 	 */
406 	switch (BTF_INFO_KIND(t->info)) {
407 	case BTF_KIND_TYPEDEF:
408 	case BTF_KIND_VOLATILE:
409 	case BTF_KIND_CONST:
410 	case BTF_KIND_RESTRICT:
411 		return true;
412 	}
413 
414 	return false;
415 }
416 
btf_type_is_void(const struct btf_type * t)417 bool btf_type_is_void(const struct btf_type *t)
418 {
419 	return t == &btf_void;
420 }
421 
btf_type_is_fwd(const struct btf_type * t)422 static bool btf_type_is_fwd(const struct btf_type *t)
423 {
424 	return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
425 }
426 
btf_type_nosize(const struct btf_type * t)427 static bool btf_type_nosize(const struct btf_type *t)
428 {
429 	return btf_type_is_void(t) || btf_type_is_fwd(t) ||
430 	       btf_type_is_func(t) || btf_type_is_func_proto(t);
431 }
432 
btf_type_nosize_or_null(const struct btf_type * t)433 static bool btf_type_nosize_or_null(const struct btf_type *t)
434 {
435 	return !t || btf_type_nosize(t);
436 }
437 
__btf_type_is_struct(const struct btf_type * t)438 static bool __btf_type_is_struct(const struct btf_type *t)
439 {
440 	return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
441 }
442 
btf_type_is_array(const struct btf_type * t)443 static bool btf_type_is_array(const struct btf_type *t)
444 {
445 	return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
446 }
447 
btf_type_is_datasec(const struct btf_type * t)448 static bool btf_type_is_datasec(const struct btf_type *t)
449 {
450 	return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
451 }
452 
btf_find_by_name_kind(const struct btf * btf,const char * name,u8 kind)453 s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
454 {
455 	const struct btf_type *t;
456 	const char *tname;
457 	u32 i;
458 
459 	for (i = 1; i <= btf->nr_types; i++) {
460 		t = btf->types[i];
461 		if (BTF_INFO_KIND(t->info) != kind)
462 			continue;
463 
464 		tname = btf_name_by_offset(btf, t->name_off);
465 		if (!strcmp(tname, name))
466 			return i;
467 	}
468 
469 	return -ENOENT;
470 }
471 
btf_type_skip_modifiers(const struct btf * btf,u32 id,u32 * res_id)472 const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
473 					       u32 id, u32 *res_id)
474 {
475 	const struct btf_type *t = btf_type_by_id(btf, id);
476 
477 	while (btf_type_is_modifier(t)) {
478 		id = t->type;
479 		t = btf_type_by_id(btf, t->type);
480 	}
481 
482 	if (res_id)
483 		*res_id = id;
484 
485 	return t;
486 }
487 
btf_type_resolve_ptr(const struct btf * btf,u32 id,u32 * res_id)488 const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
489 					    u32 id, u32 *res_id)
490 {
491 	const struct btf_type *t;
492 
493 	t = btf_type_skip_modifiers(btf, id, NULL);
494 	if (!btf_type_is_ptr(t))
495 		return NULL;
496 
497 	return btf_type_skip_modifiers(btf, t->type, res_id);
498 }
499 
btf_type_resolve_func_ptr(const struct btf * btf,u32 id,u32 * res_id)500 const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
501 						 u32 id, u32 *res_id)
502 {
503 	const struct btf_type *ptype;
504 
505 	ptype = btf_type_resolve_ptr(btf, id, res_id);
506 	if (ptype && btf_type_is_func_proto(ptype))
507 		return ptype;
508 
509 	return NULL;
510 }
511 
512 /* Types that act only as a source, not sink or intermediate
513  * type when resolving.
514  */
btf_type_is_resolve_source_only(const struct btf_type * t)515 static bool btf_type_is_resolve_source_only(const struct btf_type *t)
516 {
517 	return btf_type_is_var(t) ||
518 	       btf_type_is_datasec(t);
519 }
520 
521 /* What types need to be resolved?
522  *
523  * btf_type_is_modifier() is an obvious one.
524  *
525  * btf_type_is_struct() because its member refers to
526  * another type (through member->type).
527  *
528  * btf_type_is_var() because the variable refers to
529  * another type. btf_type_is_datasec() holds multiple
530  * btf_type_is_var() types that need resolving.
531  *
532  * btf_type_is_array() because its element (array->type)
533  * refers to another type.  Array can be thought of a
534  * special case of struct while array just has the same
535  * member-type repeated by array->nelems of times.
536  */
btf_type_needs_resolve(const struct btf_type * t)537 static bool btf_type_needs_resolve(const struct btf_type *t)
538 {
539 	return btf_type_is_modifier(t) ||
540 	       btf_type_is_ptr(t) ||
541 	       btf_type_is_struct(t) ||
542 	       btf_type_is_array(t) ||
543 	       btf_type_is_var(t) ||
544 	       btf_type_is_datasec(t);
545 }
546 
547 /* t->size can be used */
btf_type_has_size(const struct btf_type * t)548 static bool btf_type_has_size(const struct btf_type *t)
549 {
550 	switch (BTF_INFO_KIND(t->info)) {
551 	case BTF_KIND_INT:
552 	case BTF_KIND_STRUCT:
553 	case BTF_KIND_UNION:
554 	case BTF_KIND_ENUM:
555 	case BTF_KIND_DATASEC:
556 		return true;
557 	}
558 
559 	return false;
560 }
561 
btf_int_encoding_str(u8 encoding)562 static const char *btf_int_encoding_str(u8 encoding)
563 {
564 	if (encoding == 0)
565 		return "(none)";
566 	else if (encoding == BTF_INT_SIGNED)
567 		return "SIGNED";
568 	else if (encoding == BTF_INT_CHAR)
569 		return "CHAR";
570 	else if (encoding == BTF_INT_BOOL)
571 		return "BOOL";
572 	else
573 		return "UNKN";
574 }
575 
btf_type_int(const struct btf_type * t)576 static u32 btf_type_int(const struct btf_type *t)
577 {
578 	return *(u32 *)(t + 1);
579 }
580 
btf_type_array(const struct btf_type * t)581 static const struct btf_array *btf_type_array(const struct btf_type *t)
582 {
583 	return (const struct btf_array *)(t + 1);
584 }
585 
btf_type_enum(const struct btf_type * t)586 static const struct btf_enum *btf_type_enum(const struct btf_type *t)
587 {
588 	return (const struct btf_enum *)(t + 1);
589 }
590 
btf_type_var(const struct btf_type * t)591 static const struct btf_var *btf_type_var(const struct btf_type *t)
592 {
593 	return (const struct btf_var *)(t + 1);
594 }
595 
btf_type_ops(const struct btf_type * t)596 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
597 {
598 	return kind_ops[BTF_INFO_KIND(t->info)];
599 }
600 
btf_name_offset_valid(const struct btf * btf,u32 offset)601 static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
602 {
603 	return BTF_STR_OFFSET_VALID(offset) &&
604 		offset < btf->hdr.str_len;
605 }
606 
__btf_name_char_ok(char c,bool first)607 static bool __btf_name_char_ok(char c, bool first)
608 {
609 	if ((first ? !isalpha(c) :
610 		     !isalnum(c)) &&
611 	    c != '_' &&
612 	    c != '.')
613 		return false;
614 	return true;
615 }
616 
__btf_name_valid(const struct btf * btf,u32 offset)617 static bool __btf_name_valid(const struct btf *btf, u32 offset)
618 {
619 	/* offset must be valid */
620 	const char *src = &btf->strings[offset];
621 	const char *src_limit;
622 
623 	if (!__btf_name_char_ok(*src, true))
624 		return false;
625 
626 	/* set a limit on identifier length */
627 	src_limit = src + KSYM_NAME_LEN;
628 	src++;
629 	while (*src && src < src_limit) {
630 		if (!__btf_name_char_ok(*src, false))
631 			return false;
632 		src++;
633 	}
634 
635 	return !*src;
636 }
637 
btf_name_valid_identifier(const struct btf * btf,u32 offset)638 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
639 {
640 	return __btf_name_valid(btf, offset);
641 }
642 
btf_name_valid_section(const struct btf * btf,u32 offset)643 static bool btf_name_valid_section(const struct btf *btf, u32 offset)
644 {
645 	return __btf_name_valid(btf, offset);
646 }
647 
__btf_name_by_offset(const struct btf * btf,u32 offset)648 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
649 {
650 	if (!offset)
651 		return "(anon)";
652 	else if (offset < btf->hdr.str_len)
653 		return &btf->strings[offset];
654 	else
655 		return "(invalid-name-offset)";
656 }
657 
btf_name_by_offset(const struct btf * btf,u32 offset)658 const char *btf_name_by_offset(const struct btf *btf, u32 offset)
659 {
660 	if (offset < btf->hdr.str_len)
661 		return &btf->strings[offset];
662 
663 	return NULL;
664 }
665 
btf_type_by_id(const struct btf * btf,u32 type_id)666 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
667 {
668 	if (type_id > btf->nr_types)
669 		return NULL;
670 
671 	return btf->types[type_id];
672 }
673 
674 /*
675  * Regular int is not a bit field and it must be either
676  * u8/u16/u32/u64 or __int128.
677  */
btf_type_int_is_regular(const struct btf_type * t)678 static bool btf_type_int_is_regular(const struct btf_type *t)
679 {
680 	u8 nr_bits, nr_bytes;
681 	u32 int_data;
682 
683 	int_data = btf_type_int(t);
684 	nr_bits = BTF_INT_BITS(int_data);
685 	nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
686 	if (BITS_PER_BYTE_MASKED(nr_bits) ||
687 	    BTF_INT_OFFSET(int_data) ||
688 	    (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
689 	     nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
690 	     nr_bytes != (2 * sizeof(u64)))) {
691 		return false;
692 	}
693 
694 	return true;
695 }
696 
697 /*
698  * Check that given struct member is a regular int with expected
699  * offset and size.
700  */
btf_member_is_reg_int(const struct btf * btf,const struct btf_type * s,const struct btf_member * m,u32 expected_offset,u32 expected_size)701 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
702 			   const struct btf_member *m,
703 			   u32 expected_offset, u32 expected_size)
704 {
705 	const struct btf_type *t;
706 	u32 id, int_data;
707 	u8 nr_bits;
708 
709 	id = m->type;
710 	t = btf_type_id_size(btf, &id, NULL);
711 	if (!t || !btf_type_is_int(t))
712 		return false;
713 
714 	int_data = btf_type_int(t);
715 	nr_bits = BTF_INT_BITS(int_data);
716 	if (btf_type_kflag(s)) {
717 		u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
718 		u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
719 
720 		/* if kflag set, int should be a regular int and
721 		 * bit offset should be at byte boundary.
722 		 */
723 		return !bitfield_size &&
724 		       BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
725 		       BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
726 	}
727 
728 	if (BTF_INT_OFFSET(int_data) ||
729 	    BITS_PER_BYTE_MASKED(m->offset) ||
730 	    BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
731 	    BITS_PER_BYTE_MASKED(nr_bits) ||
732 	    BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
733 		return false;
734 
735 	return true;
736 }
737 
738 /* Similar to btf_type_skip_modifiers() but does not skip typedefs. */
btf_type_skip_qualifiers(const struct btf * btf,u32 id)739 static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf,
740 						       u32 id)
741 {
742 	const struct btf_type *t = btf_type_by_id(btf, id);
743 
744 	while (btf_type_is_modifier(t) &&
745 	       BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) {
746 		id = t->type;
747 		t = btf_type_by_id(btf, t->type);
748 	}
749 
750 	return t;
751 }
752 
753 #define BTF_SHOW_MAX_ITER	10
754 
755 #define BTF_KIND_BIT(kind)	(1ULL << kind)
756 
757 /*
758  * Populate show->state.name with type name information.
759  * Format of type name is
760  *
761  * [.member_name = ] (type_name)
762  */
btf_show_name(struct btf_show * show)763 static const char *btf_show_name(struct btf_show *show)
764 {
765 	/* BTF_MAX_ITER array suffixes "[]" */
766 	const char *array_suffixes = "[][][][][][][][][][]";
767 	const char *array_suffix = &array_suffixes[strlen(array_suffixes)];
768 	/* BTF_MAX_ITER pointer suffixes "*" */
769 	const char *ptr_suffixes = "**********";
770 	const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)];
771 	const char *name = NULL, *prefix = "", *parens = "";
772 	const struct btf_member *m = show->state.member;
773 	const struct btf_type *t = show->state.type;
774 	const struct btf_array *array;
775 	u32 id = show->state.type_id;
776 	const char *member = NULL;
777 	bool show_member = false;
778 	u64 kinds = 0;
779 	int i;
780 
781 	show->state.name[0] = '\0';
782 
783 	/*
784 	 * Don't show type name if we're showing an array member;
785 	 * in that case we show the array type so don't need to repeat
786 	 * ourselves for each member.
787 	 */
788 	if (show->state.array_member)
789 		return "";
790 
791 	/* Retrieve member name, if any. */
792 	if (m) {
793 		member = btf_name_by_offset(show->btf, m->name_off);
794 		show_member = strlen(member) > 0;
795 		id = m->type;
796 	}
797 
798 	/*
799 	 * Start with type_id, as we have resolved the struct btf_type *
800 	 * via btf_modifier_show() past the parent typedef to the child
801 	 * struct, int etc it is defined as.  In such cases, the type_id
802 	 * still represents the starting type while the struct btf_type *
803 	 * in our show->state points at the resolved type of the typedef.
804 	 */
805 	t = btf_type_by_id(show->btf, id);
806 	if (!t)
807 		return "";
808 
809 	/*
810 	 * The goal here is to build up the right number of pointer and
811 	 * array suffixes while ensuring the type name for a typedef
812 	 * is represented.  Along the way we accumulate a list of
813 	 * BTF kinds we have encountered, since these will inform later
814 	 * display; for example, pointer types will not require an
815 	 * opening "{" for struct, we will just display the pointer value.
816 	 *
817 	 * We also want to accumulate the right number of pointer or array
818 	 * indices in the format string while iterating until we get to
819 	 * the typedef/pointee/array member target type.
820 	 *
821 	 * We start by pointing at the end of pointer and array suffix
822 	 * strings; as we accumulate pointers and arrays we move the pointer
823 	 * or array string backwards so it will show the expected number of
824 	 * '*' or '[]' for the type.  BTF_SHOW_MAX_ITER of nesting of pointers
825 	 * and/or arrays and typedefs are supported as a precaution.
826 	 *
827 	 * We also want to get typedef name while proceeding to resolve
828 	 * type it points to so that we can add parentheses if it is a
829 	 * "typedef struct" etc.
830 	 */
831 	for (i = 0; i < BTF_SHOW_MAX_ITER; i++) {
832 
833 		switch (BTF_INFO_KIND(t->info)) {
834 		case BTF_KIND_TYPEDEF:
835 			if (!name)
836 				name = btf_name_by_offset(show->btf,
837 							       t->name_off);
838 			kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF);
839 			id = t->type;
840 			break;
841 		case BTF_KIND_ARRAY:
842 			kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY);
843 			parens = "[";
844 			if (!t)
845 				return "";
846 			array = btf_type_array(t);
847 			if (array_suffix > array_suffixes)
848 				array_suffix -= 2;
849 			id = array->type;
850 			break;
851 		case BTF_KIND_PTR:
852 			kinds |= BTF_KIND_BIT(BTF_KIND_PTR);
853 			if (ptr_suffix > ptr_suffixes)
854 				ptr_suffix -= 1;
855 			id = t->type;
856 			break;
857 		default:
858 			id = 0;
859 			break;
860 		}
861 		if (!id)
862 			break;
863 		t = btf_type_skip_qualifiers(show->btf, id);
864 	}
865 	/* We may not be able to represent this type; bail to be safe */
866 	if (i == BTF_SHOW_MAX_ITER)
867 		return "";
868 
869 	if (!name)
870 		name = btf_name_by_offset(show->btf, t->name_off);
871 
872 	switch (BTF_INFO_KIND(t->info)) {
873 	case BTF_KIND_STRUCT:
874 	case BTF_KIND_UNION:
875 		prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ?
876 			 "struct" : "union";
877 		/* if it's an array of struct/union, parens is already set */
878 		if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY))))
879 			parens = "{";
880 		break;
881 	case BTF_KIND_ENUM:
882 		prefix = "enum";
883 		break;
884 	default:
885 		break;
886 	}
887 
888 	/* pointer does not require parens */
889 	if (kinds & BTF_KIND_BIT(BTF_KIND_PTR))
890 		parens = "";
891 	/* typedef does not require struct/union/enum prefix */
892 	if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF))
893 		prefix = "";
894 
895 	if (!name)
896 		name = "";
897 
898 	/* Even if we don't want type name info, we want parentheses etc */
899 	if (show->flags & BTF_SHOW_NONAME)
900 		snprintf(show->state.name, sizeof(show->state.name), "%s",
901 			 parens);
902 	else
903 		snprintf(show->state.name, sizeof(show->state.name),
904 			 "%s%s%s(%s%s%s%s%s%s)%s",
905 			 /* first 3 strings comprise ".member = " */
906 			 show_member ? "." : "",
907 			 show_member ? member : "",
908 			 show_member ? " = " : "",
909 			 /* ...next is our prefix (struct, enum, etc) */
910 			 prefix,
911 			 strlen(prefix) > 0 && strlen(name) > 0 ? " " : "",
912 			 /* ...this is the type name itself */
913 			 name,
914 			 /* ...suffixed by the appropriate '*', '[]' suffixes */
915 			 strlen(ptr_suffix) > 0 ? " " : "", ptr_suffix,
916 			 array_suffix, parens);
917 
918 	return show->state.name;
919 }
920 
__btf_show_indent(struct btf_show * show)921 static const char *__btf_show_indent(struct btf_show *show)
922 {
923 	const char *indents = "                                ";
924 	const char *indent = &indents[strlen(indents)];
925 
926 	if ((indent - show->state.depth) >= indents)
927 		return indent - show->state.depth;
928 	return indents;
929 }
930 
btf_show_indent(struct btf_show * show)931 static const char *btf_show_indent(struct btf_show *show)
932 {
933 	return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show);
934 }
935 
btf_show_newline(struct btf_show * show)936 static const char *btf_show_newline(struct btf_show *show)
937 {
938 	return show->flags & BTF_SHOW_COMPACT ? "" : "\n";
939 }
940 
btf_show_delim(struct btf_show * show)941 static const char *btf_show_delim(struct btf_show *show)
942 {
943 	if (show->state.depth == 0)
944 		return "";
945 
946 	if ((show->flags & BTF_SHOW_COMPACT) && show->state.type &&
947 		BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION)
948 		return "|";
949 
950 	return ",";
951 }
952 
btf_show(struct btf_show * show,const char * fmt,...)953 __printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...)
954 {
955 	va_list args;
956 
957 	if (!show->state.depth_check) {
958 		va_start(args, fmt);
959 		show->showfn(show, fmt, args);
960 		va_end(args);
961 	}
962 }
963 
964 /* Macros are used here as btf_show_type_value[s]() prepends and appends
965  * format specifiers to the format specifier passed in; these do the work of
966  * adding indentation, delimiters etc while the caller simply has to specify
967  * the type value(s) in the format specifier + value(s).
968  */
969 #define btf_show_type_value(show, fmt, value)				       \
970 	do {								       \
971 		if ((value) != 0 || (show->flags & BTF_SHOW_ZERO) ||	       \
972 		    show->state.depth == 0) {				       \
973 			btf_show(show, "%s%s" fmt "%s%s",		       \
974 				 btf_show_indent(show),			       \
975 				 btf_show_name(show),			       \
976 				 value, btf_show_delim(show),		       \
977 				 btf_show_newline(show));		       \
978 			if (show->state.depth > show->state.depth_to_show)     \
979 				show->state.depth_to_show = show->state.depth; \
980 		}							       \
981 	} while (0)
982 
983 #define btf_show_type_values(show, fmt, ...)				       \
984 	do {								       \
985 		btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show),       \
986 			 btf_show_name(show),				       \
987 			 __VA_ARGS__, btf_show_delim(show),		       \
988 			 btf_show_newline(show));			       \
989 		if (show->state.depth > show->state.depth_to_show)	       \
990 			show->state.depth_to_show = show->state.depth;	       \
991 	} while (0)
992 
993 /* How much is left to copy to safe buffer after @data? */
btf_show_obj_size_left(struct btf_show * show,void * data)994 static int btf_show_obj_size_left(struct btf_show *show, void *data)
995 {
996 	return show->obj.head + show->obj.size - data;
997 }
998 
999 /* Is object pointed to by @data of @size already copied to our safe buffer? */
btf_show_obj_is_safe(struct btf_show * show,void * data,int size)1000 static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size)
1001 {
1002 	return data >= show->obj.data &&
1003 	       (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE);
1004 }
1005 
1006 /*
1007  * If object pointed to by @data of @size falls within our safe buffer, return
1008  * the equivalent pointer to the same safe data.  Assumes
1009  * copy_from_kernel_nofault() has already happened and our safe buffer is
1010  * populated.
1011  */
__btf_show_obj_safe(struct btf_show * show,void * data,int size)1012 static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size)
1013 {
1014 	if (btf_show_obj_is_safe(show, data, size))
1015 		return show->obj.safe + (data - show->obj.data);
1016 	return NULL;
1017 }
1018 
1019 /*
1020  * Return a safe-to-access version of data pointed to by @data.
1021  * We do this by copying the relevant amount of information
1022  * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault().
1023  *
1024  * If BTF_SHOW_UNSAFE is specified, just return data as-is; no
1025  * safe copy is needed.
1026  *
1027  * Otherwise we need to determine if we have the required amount
1028  * of data (determined by the @data pointer and the size of the
1029  * largest base type we can encounter (represented by
1030  * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures
1031  * that we will be able to print some of the current object,
1032  * and if more is needed a copy will be triggered.
1033  * Some objects such as structs will not fit into the buffer;
1034  * in such cases additional copies when we iterate over their
1035  * members may be needed.
1036  *
1037  * btf_show_obj_safe() is used to return a safe buffer for
1038  * btf_show_start_type(); this ensures that as we recurse into
1039  * nested types we always have safe data for the given type.
1040  * This approach is somewhat wasteful; it's possible for example
1041  * that when iterating over a large union we'll end up copying the
1042  * same data repeatedly, but the goal is safety not performance.
1043  * We use stack data as opposed to per-CPU buffers because the
1044  * iteration over a type can take some time, and preemption handling
1045  * would greatly complicate use of the safe buffer.
1046  */
btf_show_obj_safe(struct btf_show * show,const struct btf_type * t,void * data)1047 static void *btf_show_obj_safe(struct btf_show *show,
1048 			       const struct btf_type *t,
1049 			       void *data)
1050 {
1051 	const struct btf_type *rt;
1052 	int size_left, size;
1053 	void *safe = NULL;
1054 
1055 	if (show->flags & BTF_SHOW_UNSAFE)
1056 		return data;
1057 
1058 	rt = btf_resolve_size(show->btf, t, &size);
1059 	if (IS_ERR(rt)) {
1060 		show->state.status = PTR_ERR(rt);
1061 		return NULL;
1062 	}
1063 
1064 	/*
1065 	 * Is this toplevel object? If so, set total object size and
1066 	 * initialize pointers.  Otherwise check if we still fall within
1067 	 * our safe object data.
1068 	 */
1069 	if (show->state.depth == 0) {
1070 		show->obj.size = size;
1071 		show->obj.head = data;
1072 	} else {
1073 		/*
1074 		 * If the size of the current object is > our remaining
1075 		 * safe buffer we _may_ need to do a new copy.  However
1076 		 * consider the case of a nested struct; it's size pushes
1077 		 * us over the safe buffer limit, but showing any individual
1078 		 * struct members does not.  In such cases, we don't need
1079 		 * to initiate a fresh copy yet; however we definitely need
1080 		 * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left
1081 		 * in our buffer, regardless of the current object size.
1082 		 * The logic here is that as we resolve types we will
1083 		 * hit a base type at some point, and we need to be sure
1084 		 * the next chunk of data is safely available to display
1085 		 * that type info safely.  We cannot rely on the size of
1086 		 * the current object here because it may be much larger
1087 		 * than our current buffer (e.g. task_struct is 8k).
1088 		 * All we want to do here is ensure that we can print the
1089 		 * next basic type, which we can if either
1090 		 * - the current type size is within the safe buffer; or
1091 		 * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in
1092 		 *   the safe buffer.
1093 		 */
1094 		safe = __btf_show_obj_safe(show, data,
1095 					   min(size,
1096 					       BTF_SHOW_OBJ_BASE_TYPE_SIZE));
1097 	}
1098 
1099 	/*
1100 	 * We need a new copy to our safe object, either because we haven't
1101 	 * yet copied and are intializing safe data, or because the data
1102 	 * we want falls outside the boundaries of the safe object.
1103 	 */
1104 	if (!safe) {
1105 		size_left = btf_show_obj_size_left(show, data);
1106 		if (size_left > BTF_SHOW_OBJ_SAFE_SIZE)
1107 			size_left = BTF_SHOW_OBJ_SAFE_SIZE;
1108 		show->state.status = copy_from_kernel_nofault(show->obj.safe,
1109 							      data, size_left);
1110 		if (!show->state.status) {
1111 			show->obj.data = data;
1112 			safe = show->obj.safe;
1113 		}
1114 	}
1115 
1116 	return safe;
1117 }
1118 
1119 /*
1120  * Set the type we are starting to show and return a safe data pointer
1121  * to be used for showing the associated data.
1122  */
btf_show_start_type(struct btf_show * show,const struct btf_type * t,u32 type_id,void * data)1123 static void *btf_show_start_type(struct btf_show *show,
1124 				 const struct btf_type *t,
1125 				 u32 type_id, void *data)
1126 {
1127 	show->state.type = t;
1128 	show->state.type_id = type_id;
1129 	show->state.name[0] = '\0';
1130 
1131 	return btf_show_obj_safe(show, t, data);
1132 }
1133 
btf_show_end_type(struct btf_show * show)1134 static void btf_show_end_type(struct btf_show *show)
1135 {
1136 	show->state.type = NULL;
1137 	show->state.type_id = 0;
1138 	show->state.name[0] = '\0';
1139 }
1140 
btf_show_start_aggr_type(struct btf_show * show,const struct btf_type * t,u32 type_id,void * data)1141 static void *btf_show_start_aggr_type(struct btf_show *show,
1142 				      const struct btf_type *t,
1143 				      u32 type_id, void *data)
1144 {
1145 	void *safe_data = btf_show_start_type(show, t, type_id, data);
1146 
1147 	if (!safe_data)
1148 		return safe_data;
1149 
1150 	btf_show(show, "%s%s%s", btf_show_indent(show),
1151 		 btf_show_name(show),
1152 		 btf_show_newline(show));
1153 	show->state.depth++;
1154 	return safe_data;
1155 }
1156 
btf_show_end_aggr_type(struct btf_show * show,const char * suffix)1157 static void btf_show_end_aggr_type(struct btf_show *show,
1158 				   const char *suffix)
1159 {
1160 	show->state.depth--;
1161 	btf_show(show, "%s%s%s%s", btf_show_indent(show), suffix,
1162 		 btf_show_delim(show), btf_show_newline(show));
1163 	btf_show_end_type(show);
1164 }
1165 
btf_show_start_member(struct btf_show * show,const struct btf_member * m)1166 static void btf_show_start_member(struct btf_show *show,
1167 				  const struct btf_member *m)
1168 {
1169 	show->state.member = m;
1170 }
1171 
btf_show_start_array_member(struct btf_show * show)1172 static void btf_show_start_array_member(struct btf_show *show)
1173 {
1174 	show->state.array_member = 1;
1175 	btf_show_start_member(show, NULL);
1176 }
1177 
btf_show_end_member(struct btf_show * show)1178 static void btf_show_end_member(struct btf_show *show)
1179 {
1180 	show->state.member = NULL;
1181 }
1182 
btf_show_end_array_member(struct btf_show * show)1183 static void btf_show_end_array_member(struct btf_show *show)
1184 {
1185 	show->state.array_member = 0;
1186 	btf_show_end_member(show);
1187 }
1188 
btf_show_start_array_type(struct btf_show * show,const struct btf_type * t,u32 type_id,u16 array_encoding,void * data)1189 static void *btf_show_start_array_type(struct btf_show *show,
1190 				       const struct btf_type *t,
1191 				       u32 type_id,
1192 				       u16 array_encoding,
1193 				       void *data)
1194 {
1195 	show->state.array_encoding = array_encoding;
1196 	show->state.array_terminated = 0;
1197 	return btf_show_start_aggr_type(show, t, type_id, data);
1198 }
1199 
btf_show_end_array_type(struct btf_show * show)1200 static void btf_show_end_array_type(struct btf_show *show)
1201 {
1202 	show->state.array_encoding = 0;
1203 	show->state.array_terminated = 0;
1204 	btf_show_end_aggr_type(show, "]");
1205 }
1206 
btf_show_start_struct_type(struct btf_show * show,const struct btf_type * t,u32 type_id,void * data)1207 static void *btf_show_start_struct_type(struct btf_show *show,
1208 					const struct btf_type *t,
1209 					u32 type_id,
1210 					void *data)
1211 {
1212 	return btf_show_start_aggr_type(show, t, type_id, data);
1213 }
1214 
btf_show_end_struct_type(struct btf_show * show)1215 static void btf_show_end_struct_type(struct btf_show *show)
1216 {
1217 	btf_show_end_aggr_type(show, "}");
1218 }
1219 
__btf_verifier_log(struct bpf_verifier_log * log,const char * fmt,...)1220 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
1221 					      const char *fmt, ...)
1222 {
1223 	va_list args;
1224 
1225 	va_start(args, fmt);
1226 	bpf_verifier_vlog(log, fmt, args);
1227 	va_end(args);
1228 }
1229 
btf_verifier_log(struct btf_verifier_env * env,const char * fmt,...)1230 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
1231 					    const char *fmt, ...)
1232 {
1233 	struct bpf_verifier_log *log = &env->log;
1234 	va_list args;
1235 
1236 	if (!bpf_verifier_log_needed(log))
1237 		return;
1238 
1239 	va_start(args, fmt);
1240 	bpf_verifier_vlog(log, fmt, args);
1241 	va_end(args);
1242 }
1243 
__btf_verifier_log_type(struct btf_verifier_env * env,const struct btf_type * t,bool log_details,const char * fmt,...)1244 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
1245 						   const struct btf_type *t,
1246 						   bool log_details,
1247 						   const char *fmt, ...)
1248 {
1249 	struct bpf_verifier_log *log = &env->log;
1250 	u8 kind = BTF_INFO_KIND(t->info);
1251 	struct btf *btf = env->btf;
1252 	va_list args;
1253 
1254 	if (!bpf_verifier_log_needed(log))
1255 		return;
1256 
1257 	/* btf verifier prints all types it is processing via
1258 	 * btf_verifier_log_type(..., fmt = NULL).
1259 	 * Skip those prints for in-kernel BTF verification.
1260 	 */
1261 	if (log->level == BPF_LOG_KERNEL && !fmt)
1262 		return;
1263 
1264 	__btf_verifier_log(log, "[%u] %s %s%s",
1265 			   env->log_type_id,
1266 			   btf_kind_str[kind],
1267 			   __btf_name_by_offset(btf, t->name_off),
1268 			   log_details ? " " : "");
1269 
1270 	if (log_details)
1271 		btf_type_ops(t)->log_details(env, t);
1272 
1273 	if (fmt && *fmt) {
1274 		__btf_verifier_log(log, " ");
1275 		va_start(args, fmt);
1276 		bpf_verifier_vlog(log, fmt, args);
1277 		va_end(args);
1278 	}
1279 
1280 	__btf_verifier_log(log, "\n");
1281 }
1282 
1283 #define btf_verifier_log_type(env, t, ...) \
1284 	__btf_verifier_log_type((env), (t), true, __VA_ARGS__)
1285 #define btf_verifier_log_basic(env, t, ...) \
1286 	__btf_verifier_log_type((env), (t), false, __VA_ARGS__)
1287 
1288 __printf(4, 5)
btf_verifier_log_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const char * fmt,...)1289 static void btf_verifier_log_member(struct btf_verifier_env *env,
1290 				    const struct btf_type *struct_type,
1291 				    const struct btf_member *member,
1292 				    const char *fmt, ...)
1293 {
1294 	struct bpf_verifier_log *log = &env->log;
1295 	struct btf *btf = env->btf;
1296 	va_list args;
1297 
1298 	if (!bpf_verifier_log_needed(log))
1299 		return;
1300 
1301 	if (log->level == BPF_LOG_KERNEL && !fmt)
1302 		return;
1303 	/* The CHECK_META phase already did a btf dump.
1304 	 *
1305 	 * If member is logged again, it must hit an error in
1306 	 * parsing this member.  It is useful to print out which
1307 	 * struct this member belongs to.
1308 	 */
1309 	if (env->phase != CHECK_META)
1310 		btf_verifier_log_type(env, struct_type, NULL);
1311 
1312 	if (btf_type_kflag(struct_type))
1313 		__btf_verifier_log(log,
1314 				   "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
1315 				   __btf_name_by_offset(btf, member->name_off),
1316 				   member->type,
1317 				   BTF_MEMBER_BITFIELD_SIZE(member->offset),
1318 				   BTF_MEMBER_BIT_OFFSET(member->offset));
1319 	else
1320 		__btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
1321 				   __btf_name_by_offset(btf, member->name_off),
1322 				   member->type, member->offset);
1323 
1324 	if (fmt && *fmt) {
1325 		__btf_verifier_log(log, " ");
1326 		va_start(args, fmt);
1327 		bpf_verifier_vlog(log, fmt, args);
1328 		va_end(args);
1329 	}
1330 
1331 	__btf_verifier_log(log, "\n");
1332 }
1333 
1334 __printf(4, 5)
btf_verifier_log_vsi(struct btf_verifier_env * env,const struct btf_type * datasec_type,const struct btf_var_secinfo * vsi,const char * fmt,...)1335 static void btf_verifier_log_vsi(struct btf_verifier_env *env,
1336 				 const struct btf_type *datasec_type,
1337 				 const struct btf_var_secinfo *vsi,
1338 				 const char *fmt, ...)
1339 {
1340 	struct bpf_verifier_log *log = &env->log;
1341 	va_list args;
1342 
1343 	if (!bpf_verifier_log_needed(log))
1344 		return;
1345 	if (log->level == BPF_LOG_KERNEL && !fmt)
1346 		return;
1347 	if (env->phase != CHECK_META)
1348 		btf_verifier_log_type(env, datasec_type, NULL);
1349 
1350 	__btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
1351 			   vsi->type, vsi->offset, vsi->size);
1352 	if (fmt && *fmt) {
1353 		__btf_verifier_log(log, " ");
1354 		va_start(args, fmt);
1355 		bpf_verifier_vlog(log, fmt, args);
1356 		va_end(args);
1357 	}
1358 
1359 	__btf_verifier_log(log, "\n");
1360 }
1361 
btf_verifier_log_hdr(struct btf_verifier_env * env,u32 btf_data_size)1362 static void btf_verifier_log_hdr(struct btf_verifier_env *env,
1363 				 u32 btf_data_size)
1364 {
1365 	struct bpf_verifier_log *log = &env->log;
1366 	const struct btf *btf = env->btf;
1367 	const struct btf_header *hdr;
1368 
1369 	if (!bpf_verifier_log_needed(log))
1370 		return;
1371 
1372 	if (log->level == BPF_LOG_KERNEL)
1373 		return;
1374 	hdr = &btf->hdr;
1375 	__btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
1376 	__btf_verifier_log(log, "version: %u\n", hdr->version);
1377 	__btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
1378 	__btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
1379 	__btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
1380 	__btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
1381 	__btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
1382 	__btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
1383 	__btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
1384 }
1385 
btf_add_type(struct btf_verifier_env * env,struct btf_type * t)1386 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
1387 {
1388 	struct btf *btf = env->btf;
1389 
1390 	/* < 2 because +1 for btf_void which is always in btf->types[0].
1391 	 * btf_void is not accounted in btf->nr_types because btf_void
1392 	 * does not come from the BTF file.
1393 	 */
1394 	if (btf->types_size - btf->nr_types < 2) {
1395 		/* Expand 'types' array */
1396 
1397 		struct btf_type **new_types;
1398 		u32 expand_by, new_size;
1399 
1400 		if (btf->types_size == BTF_MAX_TYPE) {
1401 			btf_verifier_log(env, "Exceeded max num of types");
1402 			return -E2BIG;
1403 		}
1404 
1405 		expand_by = max_t(u32, btf->types_size >> 2, 16);
1406 		new_size = min_t(u32, BTF_MAX_TYPE,
1407 				 btf->types_size + expand_by);
1408 
1409 		new_types = kvcalloc(new_size, sizeof(*new_types),
1410 				     GFP_KERNEL | __GFP_NOWARN);
1411 		if (!new_types)
1412 			return -ENOMEM;
1413 
1414 		if (btf->nr_types == 0)
1415 			new_types[0] = &btf_void;
1416 		else
1417 			memcpy(new_types, btf->types,
1418 			       sizeof(*btf->types) * (btf->nr_types + 1));
1419 
1420 		kvfree(btf->types);
1421 		btf->types = new_types;
1422 		btf->types_size = new_size;
1423 	}
1424 
1425 	btf->types[++(btf->nr_types)] = t;
1426 
1427 	return 0;
1428 }
1429 
btf_alloc_id(struct btf * btf)1430 static int btf_alloc_id(struct btf *btf)
1431 {
1432 	int id;
1433 
1434 	idr_preload(GFP_KERNEL);
1435 	spin_lock_bh(&btf_idr_lock);
1436 	id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
1437 	if (id > 0)
1438 		btf->id = id;
1439 	spin_unlock_bh(&btf_idr_lock);
1440 	idr_preload_end();
1441 
1442 	if (WARN_ON_ONCE(!id))
1443 		return -ENOSPC;
1444 
1445 	return id > 0 ? 0 : id;
1446 }
1447 
btf_free_id(struct btf * btf)1448 static void btf_free_id(struct btf *btf)
1449 {
1450 	unsigned long flags;
1451 
1452 	/*
1453 	 * In map-in-map, calling map_delete_elem() on outer
1454 	 * map will call bpf_map_put on the inner map.
1455 	 * It will then eventually call btf_free_id()
1456 	 * on the inner map.  Some of the map_delete_elem()
1457 	 * implementation may have irq disabled, so
1458 	 * we need to use the _irqsave() version instead
1459 	 * of the _bh() version.
1460 	 */
1461 	spin_lock_irqsave(&btf_idr_lock, flags);
1462 	idr_remove(&btf_idr, btf->id);
1463 	spin_unlock_irqrestore(&btf_idr_lock, flags);
1464 }
1465 
btf_free(struct btf * btf)1466 static void btf_free(struct btf *btf)
1467 {
1468 	kvfree(btf->types);
1469 	kvfree(btf->resolved_sizes);
1470 	kvfree(btf->resolved_ids);
1471 	kvfree(btf->data);
1472 	kfree(btf);
1473 }
1474 
btf_free_rcu(struct rcu_head * rcu)1475 static void btf_free_rcu(struct rcu_head *rcu)
1476 {
1477 	struct btf *btf = container_of(rcu, struct btf, rcu);
1478 
1479 	btf_free(btf);
1480 }
1481 
btf_put(struct btf * btf)1482 void btf_put(struct btf *btf)
1483 {
1484 	if (btf && refcount_dec_and_test(&btf->refcnt)) {
1485 		btf_free_id(btf);
1486 		call_rcu(&btf->rcu, btf_free_rcu);
1487 	}
1488 }
1489 
env_resolve_init(struct btf_verifier_env * env)1490 static int env_resolve_init(struct btf_verifier_env *env)
1491 {
1492 	struct btf *btf = env->btf;
1493 	u32 nr_types = btf->nr_types;
1494 	u32 *resolved_sizes = NULL;
1495 	u32 *resolved_ids = NULL;
1496 	u8 *visit_states = NULL;
1497 
1498 	/* +1 for btf_void */
1499 	resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
1500 				  GFP_KERNEL | __GFP_NOWARN);
1501 	if (!resolved_sizes)
1502 		goto nomem;
1503 
1504 	resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
1505 				GFP_KERNEL | __GFP_NOWARN);
1506 	if (!resolved_ids)
1507 		goto nomem;
1508 
1509 	visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
1510 				GFP_KERNEL | __GFP_NOWARN);
1511 	if (!visit_states)
1512 		goto nomem;
1513 
1514 	btf->resolved_sizes = resolved_sizes;
1515 	btf->resolved_ids = resolved_ids;
1516 	env->visit_states = visit_states;
1517 
1518 	return 0;
1519 
1520 nomem:
1521 	kvfree(resolved_sizes);
1522 	kvfree(resolved_ids);
1523 	kvfree(visit_states);
1524 	return -ENOMEM;
1525 }
1526 
btf_verifier_env_free(struct btf_verifier_env * env)1527 static void btf_verifier_env_free(struct btf_verifier_env *env)
1528 {
1529 	kvfree(env->visit_states);
1530 	kfree(env);
1531 }
1532 
env_type_is_resolve_sink(const struct btf_verifier_env * env,const struct btf_type * next_type)1533 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
1534 				     const struct btf_type *next_type)
1535 {
1536 	switch (env->resolve_mode) {
1537 	case RESOLVE_TBD:
1538 		/* int, enum or void is a sink */
1539 		return !btf_type_needs_resolve(next_type);
1540 	case RESOLVE_PTR:
1541 		/* int, enum, void, struct, array, func or func_proto is a sink
1542 		 * for ptr
1543 		 */
1544 		return !btf_type_is_modifier(next_type) &&
1545 			!btf_type_is_ptr(next_type);
1546 	case RESOLVE_STRUCT_OR_ARRAY:
1547 		/* int, enum, void, ptr, func or func_proto is a sink
1548 		 * for struct and array
1549 		 */
1550 		return !btf_type_is_modifier(next_type) &&
1551 			!btf_type_is_array(next_type) &&
1552 			!btf_type_is_struct(next_type);
1553 	default:
1554 		BUG();
1555 	}
1556 }
1557 
env_type_is_resolved(const struct btf_verifier_env * env,u32 type_id)1558 static bool env_type_is_resolved(const struct btf_verifier_env *env,
1559 				 u32 type_id)
1560 {
1561 	return env->visit_states[type_id] == RESOLVED;
1562 }
1563 
env_stack_push(struct btf_verifier_env * env,const struct btf_type * t,u32 type_id)1564 static int env_stack_push(struct btf_verifier_env *env,
1565 			  const struct btf_type *t, u32 type_id)
1566 {
1567 	struct resolve_vertex *v;
1568 
1569 	if (env->top_stack == MAX_RESOLVE_DEPTH)
1570 		return -E2BIG;
1571 
1572 	if (env->visit_states[type_id] != NOT_VISITED)
1573 		return -EEXIST;
1574 
1575 	env->visit_states[type_id] = VISITED;
1576 
1577 	v = &env->stack[env->top_stack++];
1578 	v->t = t;
1579 	v->type_id = type_id;
1580 	v->next_member = 0;
1581 
1582 	if (env->resolve_mode == RESOLVE_TBD) {
1583 		if (btf_type_is_ptr(t))
1584 			env->resolve_mode = RESOLVE_PTR;
1585 		else if (btf_type_is_struct(t) || btf_type_is_array(t))
1586 			env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1587 	}
1588 
1589 	return 0;
1590 }
1591 
env_stack_set_next_member(struct btf_verifier_env * env,u16 next_member)1592 static void env_stack_set_next_member(struct btf_verifier_env *env,
1593 				      u16 next_member)
1594 {
1595 	env->stack[env->top_stack - 1].next_member = next_member;
1596 }
1597 
env_stack_pop_resolved(struct btf_verifier_env * env,u32 resolved_type_id,u32 resolved_size)1598 static void env_stack_pop_resolved(struct btf_verifier_env *env,
1599 				   u32 resolved_type_id,
1600 				   u32 resolved_size)
1601 {
1602 	u32 type_id = env->stack[--(env->top_stack)].type_id;
1603 	struct btf *btf = env->btf;
1604 
1605 	btf->resolved_sizes[type_id] = resolved_size;
1606 	btf->resolved_ids[type_id] = resolved_type_id;
1607 	env->visit_states[type_id] = RESOLVED;
1608 }
1609 
env_stack_peak(struct btf_verifier_env * env)1610 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
1611 {
1612 	return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1613 }
1614 
1615 /* Resolve the size of a passed-in "type"
1616  *
1617  * type: is an array (e.g. u32 array[x][y])
1618  * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
1619  * *type_size: (x * y * sizeof(u32)).  Hence, *type_size always
1620  *             corresponds to the return type.
1621  * *elem_type: u32
1622  * *elem_id: id of u32
1623  * *total_nelems: (x * y).  Hence, individual elem size is
1624  *                (*type_size / *total_nelems)
1625  * *type_id: id of type if it's changed within the function, 0 if not
1626  *
1627  * type: is not an array (e.g. const struct X)
1628  * return type: type "struct X"
1629  * *type_size: sizeof(struct X)
1630  * *elem_type: same as return type ("struct X")
1631  * *elem_id: 0
1632  * *total_nelems: 1
1633  * *type_id: id of type if it's changed within the function, 0 if not
1634  */
1635 static const struct btf_type *
__btf_resolve_size(const struct btf * btf,const struct btf_type * type,u32 * type_size,const struct btf_type ** elem_type,u32 * elem_id,u32 * total_nelems,u32 * type_id)1636 __btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1637 		   u32 *type_size, const struct btf_type **elem_type,
1638 		   u32 *elem_id, u32 *total_nelems, u32 *type_id)
1639 {
1640 	const struct btf_type *array_type = NULL;
1641 	const struct btf_array *array = NULL;
1642 	u32 i, size, nelems = 1, id = 0;
1643 
1644 	for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
1645 		switch (BTF_INFO_KIND(type->info)) {
1646 		/* type->size can be used */
1647 		case BTF_KIND_INT:
1648 		case BTF_KIND_STRUCT:
1649 		case BTF_KIND_UNION:
1650 		case BTF_KIND_ENUM:
1651 			size = type->size;
1652 			goto resolved;
1653 
1654 		case BTF_KIND_PTR:
1655 			size = sizeof(void *);
1656 			goto resolved;
1657 
1658 		/* Modifiers */
1659 		case BTF_KIND_TYPEDEF:
1660 		case BTF_KIND_VOLATILE:
1661 		case BTF_KIND_CONST:
1662 		case BTF_KIND_RESTRICT:
1663 			id = type->type;
1664 			type = btf_type_by_id(btf, type->type);
1665 			break;
1666 
1667 		case BTF_KIND_ARRAY:
1668 			if (!array_type)
1669 				array_type = type;
1670 			array = btf_type_array(type);
1671 			if (nelems && array->nelems > U32_MAX / nelems)
1672 				return ERR_PTR(-EINVAL);
1673 			nelems *= array->nelems;
1674 			type = btf_type_by_id(btf, array->type);
1675 			break;
1676 
1677 		/* type without size */
1678 		default:
1679 			return ERR_PTR(-EINVAL);
1680 		}
1681 	}
1682 
1683 	return ERR_PTR(-EINVAL);
1684 
1685 resolved:
1686 	if (nelems && size > U32_MAX / nelems)
1687 		return ERR_PTR(-EINVAL);
1688 
1689 	*type_size = nelems * size;
1690 	if (total_nelems)
1691 		*total_nelems = nelems;
1692 	if (elem_type)
1693 		*elem_type = type;
1694 	if (elem_id)
1695 		*elem_id = array ? array->type : 0;
1696 	if (type_id && id)
1697 		*type_id = id;
1698 
1699 	return array_type ? : type;
1700 }
1701 
1702 const struct btf_type *
btf_resolve_size(const struct btf * btf,const struct btf_type * type,u32 * type_size)1703 btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1704 		 u32 *type_size)
1705 {
1706 	return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL);
1707 }
1708 
1709 /* The input param "type_id" must point to a needs_resolve type */
btf_type_id_resolve(const struct btf * btf,u32 * type_id)1710 static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
1711 						  u32 *type_id)
1712 {
1713 	*type_id = btf->resolved_ids[*type_id];
1714 	return btf_type_by_id(btf, *type_id);
1715 }
1716 
btf_type_id_size(const struct btf * btf,u32 * type_id,u32 * ret_size)1717 const struct btf_type *btf_type_id_size(const struct btf *btf,
1718 					u32 *type_id, u32 *ret_size)
1719 {
1720 	const struct btf_type *size_type;
1721 	u32 size_type_id = *type_id;
1722 	u32 size = 0;
1723 
1724 	size_type = btf_type_by_id(btf, size_type_id);
1725 	if (btf_type_nosize_or_null(size_type))
1726 		return NULL;
1727 
1728 	if (btf_type_has_size(size_type)) {
1729 		size = size_type->size;
1730 	} else if (btf_type_is_array(size_type)) {
1731 		size = btf->resolved_sizes[size_type_id];
1732 	} else if (btf_type_is_ptr(size_type)) {
1733 		size = sizeof(void *);
1734 	} else {
1735 		if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
1736 				 !btf_type_is_var(size_type)))
1737 			return NULL;
1738 
1739 		size_type_id = btf->resolved_ids[size_type_id];
1740 		size_type = btf_type_by_id(btf, size_type_id);
1741 		if (btf_type_nosize_or_null(size_type))
1742 			return NULL;
1743 		else if (btf_type_has_size(size_type))
1744 			size = size_type->size;
1745 		else if (btf_type_is_array(size_type))
1746 			size = btf->resolved_sizes[size_type_id];
1747 		else if (btf_type_is_ptr(size_type))
1748 			size = sizeof(void *);
1749 		else
1750 			return NULL;
1751 	}
1752 
1753 	*type_id = size_type_id;
1754 	if (ret_size)
1755 		*ret_size = size;
1756 
1757 	return size_type;
1758 }
1759 
btf_df_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)1760 static int btf_df_check_member(struct btf_verifier_env *env,
1761 			       const struct btf_type *struct_type,
1762 			       const struct btf_member *member,
1763 			       const struct btf_type *member_type)
1764 {
1765 	btf_verifier_log_basic(env, struct_type,
1766 			       "Unsupported check_member");
1767 	return -EINVAL;
1768 }
1769 
btf_df_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)1770 static int btf_df_check_kflag_member(struct btf_verifier_env *env,
1771 				     const struct btf_type *struct_type,
1772 				     const struct btf_member *member,
1773 				     const struct btf_type *member_type)
1774 {
1775 	btf_verifier_log_basic(env, struct_type,
1776 			       "Unsupported check_kflag_member");
1777 	return -EINVAL;
1778 }
1779 
1780 /* Used for ptr, array and struct/union type members.
1781  * int, enum and modifier types have their specific callback functions.
1782  */
btf_generic_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)1783 static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
1784 					  const struct btf_type *struct_type,
1785 					  const struct btf_member *member,
1786 					  const struct btf_type *member_type)
1787 {
1788 	if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
1789 		btf_verifier_log_member(env, struct_type, member,
1790 					"Invalid member bitfield_size");
1791 		return -EINVAL;
1792 	}
1793 
1794 	/* bitfield size is 0, so member->offset represents bit offset only.
1795 	 * It is safe to call non kflag check_member variants.
1796 	 */
1797 	return btf_type_ops(member_type)->check_member(env, struct_type,
1798 						       member,
1799 						       member_type);
1800 }
1801 
btf_df_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)1802 static int btf_df_resolve(struct btf_verifier_env *env,
1803 			  const struct resolve_vertex *v)
1804 {
1805 	btf_verifier_log_basic(env, v->t, "Unsupported resolve");
1806 	return -EINVAL;
1807 }
1808 
btf_df_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offsets,struct btf_show * show)1809 static void btf_df_show(const struct btf *btf, const struct btf_type *t,
1810 			u32 type_id, void *data, u8 bits_offsets,
1811 			struct btf_show *show)
1812 {
1813 	btf_show(show, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
1814 }
1815 
btf_int_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)1816 static int btf_int_check_member(struct btf_verifier_env *env,
1817 				const struct btf_type *struct_type,
1818 				const struct btf_member *member,
1819 				const struct btf_type *member_type)
1820 {
1821 	u32 int_data = btf_type_int(member_type);
1822 	u32 struct_bits_off = member->offset;
1823 	u32 struct_size = struct_type->size;
1824 	u32 nr_copy_bits;
1825 	u32 bytes_offset;
1826 
1827 	if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
1828 		btf_verifier_log_member(env, struct_type, member,
1829 					"bits_offset exceeds U32_MAX");
1830 		return -EINVAL;
1831 	}
1832 
1833 	struct_bits_off += BTF_INT_OFFSET(int_data);
1834 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1835 	nr_copy_bits = BTF_INT_BITS(int_data) +
1836 		BITS_PER_BYTE_MASKED(struct_bits_off);
1837 
1838 	if (nr_copy_bits > BITS_PER_U128) {
1839 		btf_verifier_log_member(env, struct_type, member,
1840 					"nr_copy_bits exceeds 128");
1841 		return -EINVAL;
1842 	}
1843 
1844 	if (struct_size < bytes_offset ||
1845 	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1846 		btf_verifier_log_member(env, struct_type, member,
1847 					"Member exceeds struct_size");
1848 		return -EINVAL;
1849 	}
1850 
1851 	return 0;
1852 }
1853 
btf_int_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)1854 static int btf_int_check_kflag_member(struct btf_verifier_env *env,
1855 				      const struct btf_type *struct_type,
1856 				      const struct btf_member *member,
1857 				      const struct btf_type *member_type)
1858 {
1859 	u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
1860 	u32 int_data = btf_type_int(member_type);
1861 	u32 struct_size = struct_type->size;
1862 	u32 nr_copy_bits;
1863 
1864 	/* a regular int type is required for the kflag int member */
1865 	if (!btf_type_int_is_regular(member_type)) {
1866 		btf_verifier_log_member(env, struct_type, member,
1867 					"Invalid member base type");
1868 		return -EINVAL;
1869 	}
1870 
1871 	/* check sanity of bitfield size */
1872 	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
1873 	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
1874 	nr_int_data_bits = BTF_INT_BITS(int_data);
1875 	if (!nr_bits) {
1876 		/* Not a bitfield member, member offset must be at byte
1877 		 * boundary.
1878 		 */
1879 		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1880 			btf_verifier_log_member(env, struct_type, member,
1881 						"Invalid member offset");
1882 			return -EINVAL;
1883 		}
1884 
1885 		nr_bits = nr_int_data_bits;
1886 	} else if (nr_bits > nr_int_data_bits) {
1887 		btf_verifier_log_member(env, struct_type, member,
1888 					"Invalid member bitfield_size");
1889 		return -EINVAL;
1890 	}
1891 
1892 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1893 	nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
1894 	if (nr_copy_bits > BITS_PER_U128) {
1895 		btf_verifier_log_member(env, struct_type, member,
1896 					"nr_copy_bits exceeds 128");
1897 		return -EINVAL;
1898 	}
1899 
1900 	if (struct_size < bytes_offset ||
1901 	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1902 		btf_verifier_log_member(env, struct_type, member,
1903 					"Member exceeds struct_size");
1904 		return -EINVAL;
1905 	}
1906 
1907 	return 0;
1908 }
1909 
btf_int_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)1910 static s32 btf_int_check_meta(struct btf_verifier_env *env,
1911 			      const struct btf_type *t,
1912 			      u32 meta_left)
1913 {
1914 	u32 int_data, nr_bits, meta_needed = sizeof(int_data);
1915 	u16 encoding;
1916 
1917 	if (meta_left < meta_needed) {
1918 		btf_verifier_log_basic(env, t,
1919 				       "meta_left:%u meta_needed:%u",
1920 				       meta_left, meta_needed);
1921 		return -EINVAL;
1922 	}
1923 
1924 	if (btf_type_vlen(t)) {
1925 		btf_verifier_log_type(env, t, "vlen != 0");
1926 		return -EINVAL;
1927 	}
1928 
1929 	if (btf_type_kflag(t)) {
1930 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1931 		return -EINVAL;
1932 	}
1933 
1934 	int_data = btf_type_int(t);
1935 	if (int_data & ~BTF_INT_MASK) {
1936 		btf_verifier_log_basic(env, t, "Invalid int_data:%x",
1937 				       int_data);
1938 		return -EINVAL;
1939 	}
1940 
1941 	nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
1942 
1943 	if (nr_bits > BITS_PER_U128) {
1944 		btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
1945 				      BITS_PER_U128);
1946 		return -EINVAL;
1947 	}
1948 
1949 	if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
1950 		btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
1951 		return -EINVAL;
1952 	}
1953 
1954 	/*
1955 	 * Only one of the encoding bits is allowed and it
1956 	 * should be sufficient for the pretty print purpose (i.e. decoding).
1957 	 * Multiple bits can be allowed later if it is found
1958 	 * to be insufficient.
1959 	 */
1960 	encoding = BTF_INT_ENCODING(int_data);
1961 	if (encoding &&
1962 	    encoding != BTF_INT_SIGNED &&
1963 	    encoding != BTF_INT_CHAR &&
1964 	    encoding != BTF_INT_BOOL) {
1965 		btf_verifier_log_type(env, t, "Unsupported encoding");
1966 		return -ENOTSUPP;
1967 	}
1968 
1969 	btf_verifier_log_type(env, t, NULL);
1970 
1971 	return meta_needed;
1972 }
1973 
btf_int_log(struct btf_verifier_env * env,const struct btf_type * t)1974 static void btf_int_log(struct btf_verifier_env *env,
1975 			const struct btf_type *t)
1976 {
1977 	int int_data = btf_type_int(t);
1978 
1979 	btf_verifier_log(env,
1980 			 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
1981 			 t->size, BTF_INT_OFFSET(int_data),
1982 			 BTF_INT_BITS(int_data),
1983 			 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
1984 }
1985 
btf_int128_print(struct btf_show * show,void * data)1986 static void btf_int128_print(struct btf_show *show, void *data)
1987 {
1988 	/* data points to a __int128 number.
1989 	 * Suppose
1990 	 *     int128_num = *(__int128 *)data;
1991 	 * The below formulas shows what upper_num and lower_num represents:
1992 	 *     upper_num = int128_num >> 64;
1993 	 *     lower_num = int128_num & 0xffffffffFFFFFFFFULL;
1994 	 */
1995 	u64 upper_num, lower_num;
1996 
1997 #ifdef __BIG_ENDIAN_BITFIELD
1998 	upper_num = *(u64 *)data;
1999 	lower_num = *(u64 *)(data + 8);
2000 #else
2001 	upper_num = *(u64 *)(data + 8);
2002 	lower_num = *(u64 *)data;
2003 #endif
2004 	if (upper_num == 0)
2005 		btf_show_type_value(show, "0x%llx", lower_num);
2006 	else
2007 		btf_show_type_values(show, "0x%llx%016llx", upper_num,
2008 				     lower_num);
2009 }
2010 
btf_int128_shift(u64 * print_num,u16 left_shift_bits,u16 right_shift_bits)2011 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
2012 			     u16 right_shift_bits)
2013 {
2014 	u64 upper_num, lower_num;
2015 
2016 #ifdef __BIG_ENDIAN_BITFIELD
2017 	upper_num = print_num[0];
2018 	lower_num = print_num[1];
2019 #else
2020 	upper_num = print_num[1];
2021 	lower_num = print_num[0];
2022 #endif
2023 
2024 	/* shake out un-needed bits by shift/or operations */
2025 	if (left_shift_bits >= 64) {
2026 		upper_num = lower_num << (left_shift_bits - 64);
2027 		lower_num = 0;
2028 	} else {
2029 		upper_num = (upper_num << left_shift_bits) |
2030 			    (lower_num >> (64 - left_shift_bits));
2031 		lower_num = lower_num << left_shift_bits;
2032 	}
2033 
2034 	if (right_shift_bits >= 64) {
2035 		lower_num = upper_num >> (right_shift_bits - 64);
2036 		upper_num = 0;
2037 	} else {
2038 		lower_num = (lower_num >> right_shift_bits) |
2039 			    (upper_num << (64 - right_shift_bits));
2040 		upper_num = upper_num >> right_shift_bits;
2041 	}
2042 
2043 #ifdef __BIG_ENDIAN_BITFIELD
2044 	print_num[0] = upper_num;
2045 	print_num[1] = lower_num;
2046 #else
2047 	print_num[0] = lower_num;
2048 	print_num[1] = upper_num;
2049 #endif
2050 }
2051 
btf_bitfield_show(void * data,u8 bits_offset,u8 nr_bits,struct btf_show * show)2052 static void btf_bitfield_show(void *data, u8 bits_offset,
2053 			      u8 nr_bits, struct btf_show *show)
2054 {
2055 	u16 left_shift_bits, right_shift_bits;
2056 	u8 nr_copy_bytes;
2057 	u8 nr_copy_bits;
2058 	u64 print_num[2] = {};
2059 
2060 	nr_copy_bits = nr_bits + bits_offset;
2061 	nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
2062 
2063 	memcpy(print_num, data, nr_copy_bytes);
2064 
2065 #ifdef __BIG_ENDIAN_BITFIELD
2066 	left_shift_bits = bits_offset;
2067 #else
2068 	left_shift_bits = BITS_PER_U128 - nr_copy_bits;
2069 #endif
2070 	right_shift_bits = BITS_PER_U128 - nr_bits;
2071 
2072 	btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
2073 	btf_int128_print(show, print_num);
2074 }
2075 
2076 
btf_int_bits_show(const struct btf * btf,const struct btf_type * t,void * data,u8 bits_offset,struct btf_show * show)2077 static void btf_int_bits_show(const struct btf *btf,
2078 			      const struct btf_type *t,
2079 			      void *data, u8 bits_offset,
2080 			      struct btf_show *show)
2081 {
2082 	u32 int_data = btf_type_int(t);
2083 	u8 nr_bits = BTF_INT_BITS(int_data);
2084 	u8 total_bits_offset;
2085 
2086 	/*
2087 	 * bits_offset is at most 7.
2088 	 * BTF_INT_OFFSET() cannot exceed 128 bits.
2089 	 */
2090 	total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
2091 	data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
2092 	bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
2093 	btf_bitfield_show(data, bits_offset, nr_bits, show);
2094 }
2095 
btf_int_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2096 static void btf_int_show(const struct btf *btf, const struct btf_type *t,
2097 			 u32 type_id, void *data, u8 bits_offset,
2098 			 struct btf_show *show)
2099 {
2100 	u32 int_data = btf_type_int(t);
2101 	u8 encoding = BTF_INT_ENCODING(int_data);
2102 	bool sign = encoding & BTF_INT_SIGNED;
2103 	u8 nr_bits = BTF_INT_BITS(int_data);
2104 	void *safe_data;
2105 
2106 	safe_data = btf_show_start_type(show, t, type_id, data);
2107 	if (!safe_data)
2108 		return;
2109 
2110 	if (bits_offset || BTF_INT_OFFSET(int_data) ||
2111 	    BITS_PER_BYTE_MASKED(nr_bits)) {
2112 		btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2113 		goto out;
2114 	}
2115 
2116 	switch (nr_bits) {
2117 	case 128:
2118 		btf_int128_print(show, safe_data);
2119 		break;
2120 	case 64:
2121 		if (sign)
2122 			btf_show_type_value(show, "%lld", *(s64 *)safe_data);
2123 		else
2124 			btf_show_type_value(show, "%llu", *(u64 *)safe_data);
2125 		break;
2126 	case 32:
2127 		if (sign)
2128 			btf_show_type_value(show, "%d", *(s32 *)safe_data);
2129 		else
2130 			btf_show_type_value(show, "%u", *(u32 *)safe_data);
2131 		break;
2132 	case 16:
2133 		if (sign)
2134 			btf_show_type_value(show, "%d", *(s16 *)safe_data);
2135 		else
2136 			btf_show_type_value(show, "%u", *(u16 *)safe_data);
2137 		break;
2138 	case 8:
2139 		if (show->state.array_encoding == BTF_INT_CHAR) {
2140 			/* check for null terminator */
2141 			if (show->state.array_terminated)
2142 				break;
2143 			if (*(char *)data == '\0') {
2144 				show->state.array_terminated = 1;
2145 				break;
2146 			}
2147 			if (isprint(*(char *)data)) {
2148 				btf_show_type_value(show, "'%c'",
2149 						    *(char *)safe_data);
2150 				break;
2151 			}
2152 		}
2153 		if (sign)
2154 			btf_show_type_value(show, "%d", *(s8 *)safe_data);
2155 		else
2156 			btf_show_type_value(show, "%u", *(u8 *)safe_data);
2157 		break;
2158 	default:
2159 		btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2160 		break;
2161 	}
2162 out:
2163 	btf_show_end_type(show);
2164 }
2165 
2166 static const struct btf_kind_operations int_ops = {
2167 	.check_meta = btf_int_check_meta,
2168 	.resolve = btf_df_resolve,
2169 	.check_member = btf_int_check_member,
2170 	.check_kflag_member = btf_int_check_kflag_member,
2171 	.log_details = btf_int_log,
2172 	.show = btf_int_show,
2173 };
2174 
btf_modifier_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2175 static int btf_modifier_check_member(struct btf_verifier_env *env,
2176 				     const struct btf_type *struct_type,
2177 				     const struct btf_member *member,
2178 				     const struct btf_type *member_type)
2179 {
2180 	const struct btf_type *resolved_type;
2181 	u32 resolved_type_id = member->type;
2182 	struct btf_member resolved_member;
2183 	struct btf *btf = env->btf;
2184 
2185 	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2186 	if (!resolved_type) {
2187 		btf_verifier_log_member(env, struct_type, member,
2188 					"Invalid member");
2189 		return -EINVAL;
2190 	}
2191 
2192 	resolved_member = *member;
2193 	resolved_member.type = resolved_type_id;
2194 
2195 	return btf_type_ops(resolved_type)->check_member(env, struct_type,
2196 							 &resolved_member,
2197 							 resolved_type);
2198 }
2199 
btf_modifier_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2200 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
2201 					   const struct btf_type *struct_type,
2202 					   const struct btf_member *member,
2203 					   const struct btf_type *member_type)
2204 {
2205 	const struct btf_type *resolved_type;
2206 	u32 resolved_type_id = member->type;
2207 	struct btf_member resolved_member;
2208 	struct btf *btf = env->btf;
2209 
2210 	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2211 	if (!resolved_type) {
2212 		btf_verifier_log_member(env, struct_type, member,
2213 					"Invalid member");
2214 		return -EINVAL;
2215 	}
2216 
2217 	resolved_member = *member;
2218 	resolved_member.type = resolved_type_id;
2219 
2220 	return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
2221 							       &resolved_member,
2222 							       resolved_type);
2223 }
2224 
btf_ptr_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2225 static int btf_ptr_check_member(struct btf_verifier_env *env,
2226 				const struct btf_type *struct_type,
2227 				const struct btf_member *member,
2228 				const struct btf_type *member_type)
2229 {
2230 	u32 struct_size, struct_bits_off, bytes_offset;
2231 
2232 	struct_size = struct_type->size;
2233 	struct_bits_off = member->offset;
2234 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2235 
2236 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2237 		btf_verifier_log_member(env, struct_type, member,
2238 					"Member is not byte aligned");
2239 		return -EINVAL;
2240 	}
2241 
2242 	if (struct_size - bytes_offset < sizeof(void *)) {
2243 		btf_verifier_log_member(env, struct_type, member,
2244 					"Member exceeds struct_size");
2245 		return -EINVAL;
2246 	}
2247 
2248 	return 0;
2249 }
2250 
btf_ref_type_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2251 static int btf_ref_type_check_meta(struct btf_verifier_env *env,
2252 				   const struct btf_type *t,
2253 				   u32 meta_left)
2254 {
2255 	if (btf_type_vlen(t)) {
2256 		btf_verifier_log_type(env, t, "vlen != 0");
2257 		return -EINVAL;
2258 	}
2259 
2260 	if (btf_type_kflag(t)) {
2261 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2262 		return -EINVAL;
2263 	}
2264 
2265 	if (!BTF_TYPE_ID_VALID(t->type)) {
2266 		btf_verifier_log_type(env, t, "Invalid type_id");
2267 		return -EINVAL;
2268 	}
2269 
2270 	/* typedef type must have a valid name, and other ref types,
2271 	 * volatile, const, restrict, should have a null name.
2272 	 */
2273 	if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
2274 		if (!t->name_off ||
2275 		    !btf_name_valid_identifier(env->btf, t->name_off)) {
2276 			btf_verifier_log_type(env, t, "Invalid name");
2277 			return -EINVAL;
2278 		}
2279 	} else {
2280 		if (t->name_off) {
2281 			btf_verifier_log_type(env, t, "Invalid name");
2282 			return -EINVAL;
2283 		}
2284 	}
2285 
2286 	btf_verifier_log_type(env, t, NULL);
2287 
2288 	return 0;
2289 }
2290 
btf_modifier_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2291 static int btf_modifier_resolve(struct btf_verifier_env *env,
2292 				const struct resolve_vertex *v)
2293 {
2294 	const struct btf_type *t = v->t;
2295 	const struct btf_type *next_type;
2296 	u32 next_type_id = t->type;
2297 	struct btf *btf = env->btf;
2298 
2299 	next_type = btf_type_by_id(btf, next_type_id);
2300 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2301 		btf_verifier_log_type(env, v->t, "Invalid type_id");
2302 		return -EINVAL;
2303 	}
2304 
2305 	if (!env_type_is_resolve_sink(env, next_type) &&
2306 	    !env_type_is_resolved(env, next_type_id))
2307 		return env_stack_push(env, next_type, next_type_id);
2308 
2309 	/* Figure out the resolved next_type_id with size.
2310 	 * They will be stored in the current modifier's
2311 	 * resolved_ids and resolved_sizes such that it can
2312 	 * save us a few type-following when we use it later (e.g. in
2313 	 * pretty print).
2314 	 */
2315 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2316 		if (env_type_is_resolved(env, next_type_id))
2317 			next_type = btf_type_id_resolve(btf, &next_type_id);
2318 
2319 		/* "typedef void new_void", "const void"...etc */
2320 		if (!btf_type_is_void(next_type) &&
2321 		    !btf_type_is_fwd(next_type) &&
2322 		    !btf_type_is_func_proto(next_type)) {
2323 			btf_verifier_log_type(env, v->t, "Invalid type_id");
2324 			return -EINVAL;
2325 		}
2326 	}
2327 
2328 	env_stack_pop_resolved(env, next_type_id, 0);
2329 
2330 	return 0;
2331 }
2332 
btf_var_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2333 static int btf_var_resolve(struct btf_verifier_env *env,
2334 			   const struct resolve_vertex *v)
2335 {
2336 	const struct btf_type *next_type;
2337 	const struct btf_type *t = v->t;
2338 	u32 next_type_id = t->type;
2339 	struct btf *btf = env->btf;
2340 
2341 	next_type = btf_type_by_id(btf, next_type_id);
2342 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2343 		btf_verifier_log_type(env, v->t, "Invalid type_id");
2344 		return -EINVAL;
2345 	}
2346 
2347 	if (!env_type_is_resolve_sink(env, next_type) &&
2348 	    !env_type_is_resolved(env, next_type_id))
2349 		return env_stack_push(env, next_type, next_type_id);
2350 
2351 	if (btf_type_is_modifier(next_type)) {
2352 		const struct btf_type *resolved_type;
2353 		u32 resolved_type_id;
2354 
2355 		resolved_type_id = next_type_id;
2356 		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2357 
2358 		if (btf_type_is_ptr(resolved_type) &&
2359 		    !env_type_is_resolve_sink(env, resolved_type) &&
2360 		    !env_type_is_resolved(env, resolved_type_id))
2361 			return env_stack_push(env, resolved_type,
2362 					      resolved_type_id);
2363 	}
2364 
2365 	/* We must resolve to something concrete at this point, no
2366 	 * forward types or similar that would resolve to size of
2367 	 * zero is allowed.
2368 	 */
2369 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2370 		btf_verifier_log_type(env, v->t, "Invalid type_id");
2371 		return -EINVAL;
2372 	}
2373 
2374 	env_stack_pop_resolved(env, next_type_id, 0);
2375 
2376 	return 0;
2377 }
2378 
btf_ptr_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2379 static int btf_ptr_resolve(struct btf_verifier_env *env,
2380 			   const struct resolve_vertex *v)
2381 {
2382 	const struct btf_type *next_type;
2383 	const struct btf_type *t = v->t;
2384 	u32 next_type_id = t->type;
2385 	struct btf *btf = env->btf;
2386 
2387 	next_type = btf_type_by_id(btf, next_type_id);
2388 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2389 		btf_verifier_log_type(env, v->t, "Invalid type_id");
2390 		return -EINVAL;
2391 	}
2392 
2393 	if (!env_type_is_resolve_sink(env, next_type) &&
2394 	    !env_type_is_resolved(env, next_type_id))
2395 		return env_stack_push(env, next_type, next_type_id);
2396 
2397 	/* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
2398 	 * the modifier may have stopped resolving when it was resolved
2399 	 * to a ptr (last-resolved-ptr).
2400 	 *
2401 	 * We now need to continue from the last-resolved-ptr to
2402 	 * ensure the last-resolved-ptr will not referring back to
2403 	 * the currenct ptr (t).
2404 	 */
2405 	if (btf_type_is_modifier(next_type)) {
2406 		const struct btf_type *resolved_type;
2407 		u32 resolved_type_id;
2408 
2409 		resolved_type_id = next_type_id;
2410 		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2411 
2412 		if (btf_type_is_ptr(resolved_type) &&
2413 		    !env_type_is_resolve_sink(env, resolved_type) &&
2414 		    !env_type_is_resolved(env, resolved_type_id))
2415 			return env_stack_push(env, resolved_type,
2416 					      resolved_type_id);
2417 	}
2418 
2419 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2420 		if (env_type_is_resolved(env, next_type_id))
2421 			next_type = btf_type_id_resolve(btf, &next_type_id);
2422 
2423 		if (!btf_type_is_void(next_type) &&
2424 		    !btf_type_is_fwd(next_type) &&
2425 		    !btf_type_is_func_proto(next_type)) {
2426 			btf_verifier_log_type(env, v->t, "Invalid type_id");
2427 			return -EINVAL;
2428 		}
2429 	}
2430 
2431 	env_stack_pop_resolved(env, next_type_id, 0);
2432 
2433 	return 0;
2434 }
2435 
btf_modifier_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2436 static void btf_modifier_show(const struct btf *btf,
2437 			      const struct btf_type *t,
2438 			      u32 type_id, void *data,
2439 			      u8 bits_offset, struct btf_show *show)
2440 {
2441 	if (btf->resolved_ids)
2442 		t = btf_type_id_resolve(btf, &type_id);
2443 	else
2444 		t = btf_type_skip_modifiers(btf, type_id, NULL);
2445 
2446 	btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2447 }
2448 
btf_var_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2449 static void btf_var_show(const struct btf *btf, const struct btf_type *t,
2450 			 u32 type_id, void *data, u8 bits_offset,
2451 			 struct btf_show *show)
2452 {
2453 	t = btf_type_id_resolve(btf, &type_id);
2454 
2455 	btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2456 }
2457 
btf_ptr_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2458 static void btf_ptr_show(const struct btf *btf, const struct btf_type *t,
2459 			 u32 type_id, void *data, u8 bits_offset,
2460 			 struct btf_show *show)
2461 {
2462 	void *safe_data;
2463 
2464 	safe_data = btf_show_start_type(show, t, type_id, data);
2465 	if (!safe_data)
2466 		return;
2467 
2468 	/* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */
2469 	if (show->flags & BTF_SHOW_PTR_RAW)
2470 		btf_show_type_value(show, "0x%px", *(void **)safe_data);
2471 	else
2472 		btf_show_type_value(show, "0x%p", *(void **)safe_data);
2473 	btf_show_end_type(show);
2474 }
2475 
btf_ref_type_log(struct btf_verifier_env * env,const struct btf_type * t)2476 static void btf_ref_type_log(struct btf_verifier_env *env,
2477 			     const struct btf_type *t)
2478 {
2479 	btf_verifier_log(env, "type_id=%u", t->type);
2480 }
2481 
2482 static struct btf_kind_operations modifier_ops = {
2483 	.check_meta = btf_ref_type_check_meta,
2484 	.resolve = btf_modifier_resolve,
2485 	.check_member = btf_modifier_check_member,
2486 	.check_kflag_member = btf_modifier_check_kflag_member,
2487 	.log_details = btf_ref_type_log,
2488 	.show = btf_modifier_show,
2489 };
2490 
2491 static struct btf_kind_operations ptr_ops = {
2492 	.check_meta = btf_ref_type_check_meta,
2493 	.resolve = btf_ptr_resolve,
2494 	.check_member = btf_ptr_check_member,
2495 	.check_kflag_member = btf_generic_check_kflag_member,
2496 	.log_details = btf_ref_type_log,
2497 	.show = btf_ptr_show,
2498 };
2499 
btf_fwd_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2500 static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
2501 			      const struct btf_type *t,
2502 			      u32 meta_left)
2503 {
2504 	if (btf_type_vlen(t)) {
2505 		btf_verifier_log_type(env, t, "vlen != 0");
2506 		return -EINVAL;
2507 	}
2508 
2509 	if (t->type) {
2510 		btf_verifier_log_type(env, t, "type != 0");
2511 		return -EINVAL;
2512 	}
2513 
2514 	/* fwd type must have a valid name */
2515 	if (!t->name_off ||
2516 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2517 		btf_verifier_log_type(env, t, "Invalid name");
2518 		return -EINVAL;
2519 	}
2520 
2521 	btf_verifier_log_type(env, t, NULL);
2522 
2523 	return 0;
2524 }
2525 
btf_fwd_type_log(struct btf_verifier_env * env,const struct btf_type * t)2526 static void btf_fwd_type_log(struct btf_verifier_env *env,
2527 			     const struct btf_type *t)
2528 {
2529 	btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
2530 }
2531 
2532 static struct btf_kind_operations fwd_ops = {
2533 	.check_meta = btf_fwd_check_meta,
2534 	.resolve = btf_df_resolve,
2535 	.check_member = btf_df_check_member,
2536 	.check_kflag_member = btf_df_check_kflag_member,
2537 	.log_details = btf_fwd_type_log,
2538 	.show = btf_df_show,
2539 };
2540 
btf_array_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2541 static int btf_array_check_member(struct btf_verifier_env *env,
2542 				  const struct btf_type *struct_type,
2543 				  const struct btf_member *member,
2544 				  const struct btf_type *member_type)
2545 {
2546 	u32 struct_bits_off = member->offset;
2547 	u32 struct_size, bytes_offset;
2548 	u32 array_type_id, array_size;
2549 	struct btf *btf = env->btf;
2550 
2551 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2552 		btf_verifier_log_member(env, struct_type, member,
2553 					"Member is not byte aligned");
2554 		return -EINVAL;
2555 	}
2556 
2557 	array_type_id = member->type;
2558 	btf_type_id_size(btf, &array_type_id, &array_size);
2559 	struct_size = struct_type->size;
2560 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2561 	if (struct_size - bytes_offset < array_size) {
2562 		btf_verifier_log_member(env, struct_type, member,
2563 					"Member exceeds struct_size");
2564 		return -EINVAL;
2565 	}
2566 
2567 	return 0;
2568 }
2569 
btf_array_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2570 static s32 btf_array_check_meta(struct btf_verifier_env *env,
2571 				const struct btf_type *t,
2572 				u32 meta_left)
2573 {
2574 	const struct btf_array *array = btf_type_array(t);
2575 	u32 meta_needed = sizeof(*array);
2576 
2577 	if (meta_left < meta_needed) {
2578 		btf_verifier_log_basic(env, t,
2579 				       "meta_left:%u meta_needed:%u",
2580 				       meta_left, meta_needed);
2581 		return -EINVAL;
2582 	}
2583 
2584 	/* array type should not have a name */
2585 	if (t->name_off) {
2586 		btf_verifier_log_type(env, t, "Invalid name");
2587 		return -EINVAL;
2588 	}
2589 
2590 	if (btf_type_vlen(t)) {
2591 		btf_verifier_log_type(env, t, "vlen != 0");
2592 		return -EINVAL;
2593 	}
2594 
2595 	if (btf_type_kflag(t)) {
2596 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2597 		return -EINVAL;
2598 	}
2599 
2600 	if (t->size) {
2601 		btf_verifier_log_type(env, t, "size != 0");
2602 		return -EINVAL;
2603 	}
2604 
2605 	/* Array elem type and index type cannot be in type void,
2606 	 * so !array->type and !array->index_type are not allowed.
2607 	 */
2608 	if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
2609 		btf_verifier_log_type(env, t, "Invalid elem");
2610 		return -EINVAL;
2611 	}
2612 
2613 	if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
2614 		btf_verifier_log_type(env, t, "Invalid index");
2615 		return -EINVAL;
2616 	}
2617 
2618 	btf_verifier_log_type(env, t, NULL);
2619 
2620 	return meta_needed;
2621 }
2622 
btf_array_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2623 static int btf_array_resolve(struct btf_verifier_env *env,
2624 			     const struct resolve_vertex *v)
2625 {
2626 	const struct btf_array *array = btf_type_array(v->t);
2627 	const struct btf_type *elem_type, *index_type;
2628 	u32 elem_type_id, index_type_id;
2629 	struct btf *btf = env->btf;
2630 	u32 elem_size;
2631 
2632 	/* Check array->index_type */
2633 	index_type_id = array->index_type;
2634 	index_type = btf_type_by_id(btf, index_type_id);
2635 	if (btf_type_nosize_or_null(index_type) ||
2636 	    btf_type_is_resolve_source_only(index_type)) {
2637 		btf_verifier_log_type(env, v->t, "Invalid index");
2638 		return -EINVAL;
2639 	}
2640 
2641 	if (!env_type_is_resolve_sink(env, index_type) &&
2642 	    !env_type_is_resolved(env, index_type_id))
2643 		return env_stack_push(env, index_type, index_type_id);
2644 
2645 	index_type = btf_type_id_size(btf, &index_type_id, NULL);
2646 	if (!index_type || !btf_type_is_int(index_type) ||
2647 	    !btf_type_int_is_regular(index_type)) {
2648 		btf_verifier_log_type(env, v->t, "Invalid index");
2649 		return -EINVAL;
2650 	}
2651 
2652 	/* Check array->type */
2653 	elem_type_id = array->type;
2654 	elem_type = btf_type_by_id(btf, elem_type_id);
2655 	if (btf_type_nosize_or_null(elem_type) ||
2656 	    btf_type_is_resolve_source_only(elem_type)) {
2657 		btf_verifier_log_type(env, v->t,
2658 				      "Invalid elem");
2659 		return -EINVAL;
2660 	}
2661 
2662 	if (!env_type_is_resolve_sink(env, elem_type) &&
2663 	    !env_type_is_resolved(env, elem_type_id))
2664 		return env_stack_push(env, elem_type, elem_type_id);
2665 
2666 	elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2667 	if (!elem_type) {
2668 		btf_verifier_log_type(env, v->t, "Invalid elem");
2669 		return -EINVAL;
2670 	}
2671 
2672 	if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
2673 		btf_verifier_log_type(env, v->t, "Invalid array of int");
2674 		return -EINVAL;
2675 	}
2676 
2677 	if (array->nelems && elem_size > U32_MAX / array->nelems) {
2678 		btf_verifier_log_type(env, v->t,
2679 				      "Array size overflows U32_MAX");
2680 		return -EINVAL;
2681 	}
2682 
2683 	env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
2684 
2685 	return 0;
2686 }
2687 
btf_array_log(struct btf_verifier_env * env,const struct btf_type * t)2688 static void btf_array_log(struct btf_verifier_env *env,
2689 			  const struct btf_type *t)
2690 {
2691 	const struct btf_array *array = btf_type_array(t);
2692 
2693 	btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
2694 			 array->type, array->index_type, array->nelems);
2695 }
2696 
__btf_array_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2697 static void __btf_array_show(const struct btf *btf, const struct btf_type *t,
2698 			     u32 type_id, void *data, u8 bits_offset,
2699 			     struct btf_show *show)
2700 {
2701 	const struct btf_array *array = btf_type_array(t);
2702 	const struct btf_kind_operations *elem_ops;
2703 	const struct btf_type *elem_type;
2704 	u32 i, elem_size = 0, elem_type_id;
2705 	u16 encoding = 0;
2706 
2707 	elem_type_id = array->type;
2708 	elem_type = btf_type_skip_modifiers(btf, elem_type_id, NULL);
2709 	if (elem_type && btf_type_has_size(elem_type))
2710 		elem_size = elem_type->size;
2711 
2712 	if (elem_type && btf_type_is_int(elem_type)) {
2713 		u32 int_type = btf_type_int(elem_type);
2714 
2715 		encoding = BTF_INT_ENCODING(int_type);
2716 
2717 		/*
2718 		 * BTF_INT_CHAR encoding never seems to be set for
2719 		 * char arrays, so if size is 1 and element is
2720 		 * printable as a char, we'll do that.
2721 		 */
2722 		if (elem_size == 1)
2723 			encoding = BTF_INT_CHAR;
2724 	}
2725 
2726 	if (!btf_show_start_array_type(show, t, type_id, encoding, data))
2727 		return;
2728 
2729 	if (!elem_type)
2730 		goto out;
2731 	elem_ops = btf_type_ops(elem_type);
2732 
2733 	for (i = 0; i < array->nelems; i++) {
2734 
2735 		btf_show_start_array_member(show);
2736 
2737 		elem_ops->show(btf, elem_type, elem_type_id, data,
2738 			       bits_offset, show);
2739 		data += elem_size;
2740 
2741 		btf_show_end_array_member(show);
2742 
2743 		if (show->state.array_terminated)
2744 			break;
2745 	}
2746 out:
2747 	btf_show_end_array_type(show);
2748 }
2749 
btf_array_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)2750 static void btf_array_show(const struct btf *btf, const struct btf_type *t,
2751 			   u32 type_id, void *data, u8 bits_offset,
2752 			   struct btf_show *show)
2753 {
2754 	const struct btf_member *m = show->state.member;
2755 
2756 	/*
2757 	 * First check if any members would be shown (are non-zero).
2758 	 * See comments above "struct btf_show" definition for more
2759 	 * details on how this works at a high-level.
2760 	 */
2761 	if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
2762 		if (!show->state.depth_check) {
2763 			show->state.depth_check = show->state.depth + 1;
2764 			show->state.depth_to_show = 0;
2765 		}
2766 		__btf_array_show(btf, t, type_id, data, bits_offset, show);
2767 		show->state.member = m;
2768 
2769 		if (show->state.depth_check != show->state.depth + 1)
2770 			return;
2771 		show->state.depth_check = 0;
2772 
2773 		if (show->state.depth_to_show <= show->state.depth)
2774 			return;
2775 		/*
2776 		 * Reaching here indicates we have recursed and found
2777 		 * non-zero array member(s).
2778 		 */
2779 	}
2780 	__btf_array_show(btf, t, type_id, data, bits_offset, show);
2781 }
2782 
2783 static struct btf_kind_operations array_ops = {
2784 	.check_meta = btf_array_check_meta,
2785 	.resolve = btf_array_resolve,
2786 	.check_member = btf_array_check_member,
2787 	.check_kflag_member = btf_generic_check_kflag_member,
2788 	.log_details = btf_array_log,
2789 	.show = btf_array_show,
2790 };
2791 
btf_struct_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)2792 static int btf_struct_check_member(struct btf_verifier_env *env,
2793 				   const struct btf_type *struct_type,
2794 				   const struct btf_member *member,
2795 				   const struct btf_type *member_type)
2796 {
2797 	u32 struct_bits_off = member->offset;
2798 	u32 struct_size, bytes_offset;
2799 
2800 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2801 		btf_verifier_log_member(env, struct_type, member,
2802 					"Member is not byte aligned");
2803 		return -EINVAL;
2804 	}
2805 
2806 	struct_size = struct_type->size;
2807 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2808 	if (struct_size - bytes_offset < member_type->size) {
2809 		btf_verifier_log_member(env, struct_type, member,
2810 					"Member exceeds struct_size");
2811 		return -EINVAL;
2812 	}
2813 
2814 	return 0;
2815 }
2816 
btf_struct_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)2817 static s32 btf_struct_check_meta(struct btf_verifier_env *env,
2818 				 const struct btf_type *t,
2819 				 u32 meta_left)
2820 {
2821 	bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
2822 	const struct btf_member *member;
2823 	u32 meta_needed, last_offset;
2824 	struct btf *btf = env->btf;
2825 	u32 struct_size = t->size;
2826 	u32 offset;
2827 	u16 i;
2828 
2829 	meta_needed = btf_type_vlen(t) * sizeof(*member);
2830 	if (meta_left < meta_needed) {
2831 		btf_verifier_log_basic(env, t,
2832 				       "meta_left:%u meta_needed:%u",
2833 				       meta_left, meta_needed);
2834 		return -EINVAL;
2835 	}
2836 
2837 	/* struct type either no name or a valid one */
2838 	if (t->name_off &&
2839 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2840 		btf_verifier_log_type(env, t, "Invalid name");
2841 		return -EINVAL;
2842 	}
2843 
2844 	btf_verifier_log_type(env, t, NULL);
2845 
2846 	last_offset = 0;
2847 	for_each_member(i, t, member) {
2848 		if (!btf_name_offset_valid(btf, member->name_off)) {
2849 			btf_verifier_log_member(env, t, member,
2850 						"Invalid member name_offset:%u",
2851 						member->name_off);
2852 			return -EINVAL;
2853 		}
2854 
2855 		/* struct member either no name or a valid one */
2856 		if (member->name_off &&
2857 		    !btf_name_valid_identifier(btf, member->name_off)) {
2858 			btf_verifier_log_member(env, t, member, "Invalid name");
2859 			return -EINVAL;
2860 		}
2861 		/* A member cannot be in type void */
2862 		if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
2863 			btf_verifier_log_member(env, t, member,
2864 						"Invalid type_id");
2865 			return -EINVAL;
2866 		}
2867 
2868 		offset = btf_member_bit_offset(t, member);
2869 		if (is_union && offset) {
2870 			btf_verifier_log_member(env, t, member,
2871 						"Invalid member bits_offset");
2872 			return -EINVAL;
2873 		}
2874 
2875 		/*
2876 		 * ">" instead of ">=" because the last member could be
2877 		 * "char a[0];"
2878 		 */
2879 		if (last_offset > offset) {
2880 			btf_verifier_log_member(env, t, member,
2881 						"Invalid member bits_offset");
2882 			return -EINVAL;
2883 		}
2884 
2885 		if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
2886 			btf_verifier_log_member(env, t, member,
2887 						"Member bits_offset exceeds its struct size");
2888 			return -EINVAL;
2889 		}
2890 
2891 		btf_verifier_log_member(env, t, member, NULL);
2892 		last_offset = offset;
2893 	}
2894 
2895 	return meta_needed;
2896 }
2897 
btf_struct_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)2898 static int btf_struct_resolve(struct btf_verifier_env *env,
2899 			      const struct resolve_vertex *v)
2900 {
2901 	const struct btf_member *member;
2902 	int err;
2903 	u16 i;
2904 
2905 	/* Before continue resolving the next_member,
2906 	 * ensure the last member is indeed resolved to a
2907 	 * type with size info.
2908 	 */
2909 	if (v->next_member) {
2910 		const struct btf_type *last_member_type;
2911 		const struct btf_member *last_member;
2912 		u32 last_member_type_id;
2913 
2914 		last_member = btf_type_member(v->t) + v->next_member - 1;
2915 		last_member_type_id = last_member->type;
2916 		if (WARN_ON_ONCE(!env_type_is_resolved(env,
2917 						       last_member_type_id)))
2918 			return -EINVAL;
2919 
2920 		last_member_type = btf_type_by_id(env->btf,
2921 						  last_member_type_id);
2922 		if (btf_type_kflag(v->t))
2923 			err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
2924 								last_member,
2925 								last_member_type);
2926 		else
2927 			err = btf_type_ops(last_member_type)->check_member(env, v->t,
2928 								last_member,
2929 								last_member_type);
2930 		if (err)
2931 			return err;
2932 	}
2933 
2934 	for_each_member_from(i, v->next_member, v->t, member) {
2935 		u32 member_type_id = member->type;
2936 		const struct btf_type *member_type = btf_type_by_id(env->btf,
2937 								member_type_id);
2938 
2939 		if (btf_type_nosize_or_null(member_type) ||
2940 		    btf_type_is_resolve_source_only(member_type)) {
2941 			btf_verifier_log_member(env, v->t, member,
2942 						"Invalid member");
2943 			return -EINVAL;
2944 		}
2945 
2946 		if (!env_type_is_resolve_sink(env, member_type) &&
2947 		    !env_type_is_resolved(env, member_type_id)) {
2948 			env_stack_set_next_member(env, i + 1);
2949 			return env_stack_push(env, member_type, member_type_id);
2950 		}
2951 
2952 		if (btf_type_kflag(v->t))
2953 			err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
2954 									    member,
2955 									    member_type);
2956 		else
2957 			err = btf_type_ops(member_type)->check_member(env, v->t,
2958 								      member,
2959 								      member_type);
2960 		if (err)
2961 			return err;
2962 	}
2963 
2964 	env_stack_pop_resolved(env, 0, 0);
2965 
2966 	return 0;
2967 }
2968 
btf_struct_log(struct btf_verifier_env * env,const struct btf_type * t)2969 static void btf_struct_log(struct btf_verifier_env *env,
2970 			   const struct btf_type *t)
2971 {
2972 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2973 }
2974 
2975 /* find 'struct bpf_spin_lock' in map value.
2976  * return >= 0 offset if found
2977  * and < 0 in case of error
2978  */
btf_find_spin_lock(const struct btf * btf,const struct btf_type * t)2979 int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
2980 {
2981 	const struct btf_member *member;
2982 	u32 i, off = -ENOENT;
2983 
2984 	if (!__btf_type_is_struct(t))
2985 		return -EINVAL;
2986 
2987 	for_each_member(i, t, member) {
2988 		const struct btf_type *member_type = btf_type_by_id(btf,
2989 								    member->type);
2990 		if (!__btf_type_is_struct(member_type))
2991 			continue;
2992 		if (member_type->size != sizeof(struct bpf_spin_lock))
2993 			continue;
2994 		if (strcmp(__btf_name_by_offset(btf, member_type->name_off),
2995 			   "bpf_spin_lock"))
2996 			continue;
2997 		if (off != -ENOENT)
2998 			/* only one 'struct bpf_spin_lock' is allowed */
2999 			return -E2BIG;
3000 		off = btf_member_bit_offset(t, member);
3001 		if (off % 8)
3002 			/* valid C code cannot generate such BTF */
3003 			return -EINVAL;
3004 		off /= 8;
3005 		if (off % __alignof__(struct bpf_spin_lock))
3006 			/* valid struct bpf_spin_lock will be 4 byte aligned */
3007 			return -EINVAL;
3008 	}
3009 	return off;
3010 }
3011 
__btf_struct_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)3012 static void __btf_struct_show(const struct btf *btf, const struct btf_type *t,
3013 			      u32 type_id, void *data, u8 bits_offset,
3014 			      struct btf_show *show)
3015 {
3016 	const struct btf_member *member;
3017 	void *safe_data;
3018 	u32 i;
3019 
3020 	safe_data = btf_show_start_struct_type(show, t, type_id, data);
3021 	if (!safe_data)
3022 		return;
3023 
3024 	for_each_member(i, t, member) {
3025 		const struct btf_type *member_type = btf_type_by_id(btf,
3026 								member->type);
3027 		const struct btf_kind_operations *ops;
3028 		u32 member_offset, bitfield_size;
3029 		u32 bytes_offset;
3030 		u8 bits8_offset;
3031 
3032 		btf_show_start_member(show, member);
3033 
3034 		member_offset = btf_member_bit_offset(t, member);
3035 		bitfield_size = btf_member_bitfield_size(t, member);
3036 		bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
3037 		bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
3038 		if (bitfield_size) {
3039 			safe_data = btf_show_start_type(show, member_type,
3040 							member->type,
3041 							data + bytes_offset);
3042 			if (safe_data)
3043 				btf_bitfield_show(safe_data,
3044 						  bits8_offset,
3045 						  bitfield_size, show);
3046 			btf_show_end_type(show);
3047 		} else {
3048 			ops = btf_type_ops(member_type);
3049 			ops->show(btf, member_type, member->type,
3050 				  data + bytes_offset, bits8_offset, show);
3051 		}
3052 
3053 		btf_show_end_member(show);
3054 	}
3055 
3056 	btf_show_end_struct_type(show);
3057 }
3058 
btf_struct_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)3059 static void btf_struct_show(const struct btf *btf, const struct btf_type *t,
3060 			    u32 type_id, void *data, u8 bits_offset,
3061 			    struct btf_show *show)
3062 {
3063 	const struct btf_member *m = show->state.member;
3064 
3065 	/*
3066 	 * First check if any members would be shown (are non-zero).
3067 	 * See comments above "struct btf_show" definition for more
3068 	 * details on how this works at a high-level.
3069 	 */
3070 	if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
3071 		if (!show->state.depth_check) {
3072 			show->state.depth_check = show->state.depth + 1;
3073 			show->state.depth_to_show = 0;
3074 		}
3075 		__btf_struct_show(btf, t, type_id, data, bits_offset, show);
3076 		/* Restore saved member data here */
3077 		show->state.member = m;
3078 		if (show->state.depth_check != show->state.depth + 1)
3079 			return;
3080 		show->state.depth_check = 0;
3081 
3082 		if (show->state.depth_to_show <= show->state.depth)
3083 			return;
3084 		/*
3085 		 * Reaching here indicates we have recursed and found
3086 		 * non-zero child values.
3087 		 */
3088 	}
3089 
3090 	__btf_struct_show(btf, t, type_id, data, bits_offset, show);
3091 }
3092 
3093 static struct btf_kind_operations struct_ops = {
3094 	.check_meta = btf_struct_check_meta,
3095 	.resolve = btf_struct_resolve,
3096 	.check_member = btf_struct_check_member,
3097 	.check_kflag_member = btf_generic_check_kflag_member,
3098 	.log_details = btf_struct_log,
3099 	.show = btf_struct_show,
3100 };
3101 
btf_enum_check_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)3102 static int btf_enum_check_member(struct btf_verifier_env *env,
3103 				 const struct btf_type *struct_type,
3104 				 const struct btf_member *member,
3105 				 const struct btf_type *member_type)
3106 {
3107 	u32 struct_bits_off = member->offset;
3108 	u32 struct_size, bytes_offset;
3109 
3110 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
3111 		btf_verifier_log_member(env, struct_type, member,
3112 					"Member is not byte aligned");
3113 		return -EINVAL;
3114 	}
3115 
3116 	struct_size = struct_type->size;
3117 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
3118 	if (struct_size - bytes_offset < member_type->size) {
3119 		btf_verifier_log_member(env, struct_type, member,
3120 					"Member exceeds struct_size");
3121 		return -EINVAL;
3122 	}
3123 
3124 	return 0;
3125 }
3126 
btf_enum_check_kflag_member(struct btf_verifier_env * env,const struct btf_type * struct_type,const struct btf_member * member,const struct btf_type * member_type)3127 static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
3128 				       const struct btf_type *struct_type,
3129 				       const struct btf_member *member,
3130 				       const struct btf_type *member_type)
3131 {
3132 	u32 struct_bits_off, nr_bits, bytes_end, struct_size;
3133 	u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
3134 
3135 	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
3136 	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
3137 	if (!nr_bits) {
3138 		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
3139 			btf_verifier_log_member(env, struct_type, member,
3140 						"Member is not byte aligned");
3141 			return -EINVAL;
3142 		}
3143 
3144 		nr_bits = int_bitsize;
3145 	} else if (nr_bits > int_bitsize) {
3146 		btf_verifier_log_member(env, struct_type, member,
3147 					"Invalid member bitfield_size");
3148 		return -EINVAL;
3149 	}
3150 
3151 	struct_size = struct_type->size;
3152 	bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
3153 	if (struct_size < bytes_end) {
3154 		btf_verifier_log_member(env, struct_type, member,
3155 					"Member exceeds struct_size");
3156 		return -EINVAL;
3157 	}
3158 
3159 	return 0;
3160 }
3161 
btf_enum_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)3162 static s32 btf_enum_check_meta(struct btf_verifier_env *env,
3163 			       const struct btf_type *t,
3164 			       u32 meta_left)
3165 {
3166 	const struct btf_enum *enums = btf_type_enum(t);
3167 	struct btf *btf = env->btf;
3168 	u16 i, nr_enums;
3169 	u32 meta_needed;
3170 
3171 	nr_enums = btf_type_vlen(t);
3172 	meta_needed = nr_enums * sizeof(*enums);
3173 
3174 	if (meta_left < meta_needed) {
3175 		btf_verifier_log_basic(env, t,
3176 				       "meta_left:%u meta_needed:%u",
3177 				       meta_left, meta_needed);
3178 		return -EINVAL;
3179 	}
3180 
3181 	if (btf_type_kflag(t)) {
3182 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
3183 		return -EINVAL;
3184 	}
3185 
3186 	if (t->size > 8 || !is_power_of_2(t->size)) {
3187 		btf_verifier_log_type(env, t, "Unexpected size");
3188 		return -EINVAL;
3189 	}
3190 
3191 	/* enum type either no name or a valid one */
3192 	if (t->name_off &&
3193 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
3194 		btf_verifier_log_type(env, t, "Invalid name");
3195 		return -EINVAL;
3196 	}
3197 
3198 	btf_verifier_log_type(env, t, NULL);
3199 
3200 	for (i = 0; i < nr_enums; i++) {
3201 		if (!btf_name_offset_valid(btf, enums[i].name_off)) {
3202 			btf_verifier_log(env, "\tInvalid name_offset:%u",
3203 					 enums[i].name_off);
3204 			return -EINVAL;
3205 		}
3206 
3207 		/* enum member must have a valid name */
3208 		if (!enums[i].name_off ||
3209 		    !btf_name_valid_identifier(btf, enums[i].name_off)) {
3210 			btf_verifier_log_type(env, t, "Invalid name");
3211 			return -EINVAL;
3212 		}
3213 
3214 		if (env->log.level == BPF_LOG_KERNEL)
3215 			continue;
3216 		btf_verifier_log(env, "\t%s val=%d\n",
3217 				 __btf_name_by_offset(btf, enums[i].name_off),
3218 				 enums[i].val);
3219 	}
3220 
3221 	return meta_needed;
3222 }
3223 
btf_enum_log(struct btf_verifier_env * env,const struct btf_type * t)3224 static void btf_enum_log(struct btf_verifier_env *env,
3225 			 const struct btf_type *t)
3226 {
3227 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
3228 }
3229 
btf_enum_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)3230 static void btf_enum_show(const struct btf *btf, const struct btf_type *t,
3231 			  u32 type_id, void *data, u8 bits_offset,
3232 			  struct btf_show *show)
3233 {
3234 	const struct btf_enum *enums = btf_type_enum(t);
3235 	u32 i, nr_enums = btf_type_vlen(t);
3236 	void *safe_data;
3237 	int v;
3238 
3239 	safe_data = btf_show_start_type(show, t, type_id, data);
3240 	if (!safe_data)
3241 		return;
3242 
3243 	v = *(int *)safe_data;
3244 
3245 	for (i = 0; i < nr_enums; i++) {
3246 		if (v != enums[i].val)
3247 			continue;
3248 
3249 		btf_show_type_value(show, "%s",
3250 				    __btf_name_by_offset(btf,
3251 							 enums[i].name_off));
3252 
3253 		btf_show_end_type(show);
3254 		return;
3255 	}
3256 
3257 	btf_show_type_value(show, "%d", v);
3258 	btf_show_end_type(show);
3259 }
3260 
3261 static struct btf_kind_operations enum_ops = {
3262 	.check_meta = btf_enum_check_meta,
3263 	.resolve = btf_df_resolve,
3264 	.check_member = btf_enum_check_member,
3265 	.check_kflag_member = btf_enum_check_kflag_member,
3266 	.log_details = btf_enum_log,
3267 	.show = btf_enum_show,
3268 };
3269 
btf_func_proto_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)3270 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
3271 				     const struct btf_type *t,
3272 				     u32 meta_left)
3273 {
3274 	u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
3275 
3276 	if (meta_left < meta_needed) {
3277 		btf_verifier_log_basic(env, t,
3278 				       "meta_left:%u meta_needed:%u",
3279 				       meta_left, meta_needed);
3280 		return -EINVAL;
3281 	}
3282 
3283 	if (t->name_off) {
3284 		btf_verifier_log_type(env, t, "Invalid name");
3285 		return -EINVAL;
3286 	}
3287 
3288 	if (btf_type_kflag(t)) {
3289 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
3290 		return -EINVAL;
3291 	}
3292 
3293 	btf_verifier_log_type(env, t, NULL);
3294 
3295 	return meta_needed;
3296 }
3297 
btf_func_proto_log(struct btf_verifier_env * env,const struct btf_type * t)3298 static void btf_func_proto_log(struct btf_verifier_env *env,
3299 			       const struct btf_type *t)
3300 {
3301 	const struct btf_param *args = (const struct btf_param *)(t + 1);
3302 	u16 nr_args = btf_type_vlen(t), i;
3303 
3304 	btf_verifier_log(env, "return=%u args=(", t->type);
3305 	if (!nr_args) {
3306 		btf_verifier_log(env, "void");
3307 		goto done;
3308 	}
3309 
3310 	if (nr_args == 1 && !args[0].type) {
3311 		/* Only one vararg */
3312 		btf_verifier_log(env, "vararg");
3313 		goto done;
3314 	}
3315 
3316 	btf_verifier_log(env, "%u %s", args[0].type,
3317 			 __btf_name_by_offset(env->btf,
3318 					      args[0].name_off));
3319 	for (i = 1; i < nr_args - 1; i++)
3320 		btf_verifier_log(env, ", %u %s", args[i].type,
3321 				 __btf_name_by_offset(env->btf,
3322 						      args[i].name_off));
3323 
3324 	if (nr_args > 1) {
3325 		const struct btf_param *last_arg = &args[nr_args - 1];
3326 
3327 		if (last_arg->type)
3328 			btf_verifier_log(env, ", %u %s", last_arg->type,
3329 					 __btf_name_by_offset(env->btf,
3330 							      last_arg->name_off));
3331 		else
3332 			btf_verifier_log(env, ", vararg");
3333 	}
3334 
3335 done:
3336 	btf_verifier_log(env, ")");
3337 }
3338 
3339 static struct btf_kind_operations func_proto_ops = {
3340 	.check_meta = btf_func_proto_check_meta,
3341 	.resolve = btf_df_resolve,
3342 	/*
3343 	 * BTF_KIND_FUNC_PROTO cannot be directly referred by
3344 	 * a struct's member.
3345 	 *
3346 	 * It should be a funciton pointer instead.
3347 	 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
3348 	 *
3349 	 * Hence, there is no btf_func_check_member().
3350 	 */
3351 	.check_member = btf_df_check_member,
3352 	.check_kflag_member = btf_df_check_kflag_member,
3353 	.log_details = btf_func_proto_log,
3354 	.show = btf_df_show,
3355 };
3356 
btf_func_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)3357 static s32 btf_func_check_meta(struct btf_verifier_env *env,
3358 			       const struct btf_type *t,
3359 			       u32 meta_left)
3360 {
3361 	if (!t->name_off ||
3362 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
3363 		btf_verifier_log_type(env, t, "Invalid name");
3364 		return -EINVAL;
3365 	}
3366 
3367 	if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) {
3368 		btf_verifier_log_type(env, t, "Invalid func linkage");
3369 		return -EINVAL;
3370 	}
3371 
3372 	if (btf_type_kflag(t)) {
3373 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
3374 		return -EINVAL;
3375 	}
3376 
3377 	btf_verifier_log_type(env, t, NULL);
3378 
3379 	return 0;
3380 }
3381 
3382 static struct btf_kind_operations func_ops = {
3383 	.check_meta = btf_func_check_meta,
3384 	.resolve = btf_df_resolve,
3385 	.check_member = btf_df_check_member,
3386 	.check_kflag_member = btf_df_check_kflag_member,
3387 	.log_details = btf_ref_type_log,
3388 	.show = btf_df_show,
3389 };
3390 
btf_var_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)3391 static s32 btf_var_check_meta(struct btf_verifier_env *env,
3392 			      const struct btf_type *t,
3393 			      u32 meta_left)
3394 {
3395 	const struct btf_var *var;
3396 	u32 meta_needed = sizeof(*var);
3397 
3398 	if (meta_left < meta_needed) {
3399 		btf_verifier_log_basic(env, t,
3400 				       "meta_left:%u meta_needed:%u",
3401 				       meta_left, meta_needed);
3402 		return -EINVAL;
3403 	}
3404 
3405 	if (btf_type_vlen(t)) {
3406 		btf_verifier_log_type(env, t, "vlen != 0");
3407 		return -EINVAL;
3408 	}
3409 
3410 	if (btf_type_kflag(t)) {
3411 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
3412 		return -EINVAL;
3413 	}
3414 
3415 	if (!t->name_off ||
3416 	    !__btf_name_valid(env->btf, t->name_off)) {
3417 		btf_verifier_log_type(env, t, "Invalid name");
3418 		return -EINVAL;
3419 	}
3420 
3421 	/* A var cannot be in type void */
3422 	if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
3423 		btf_verifier_log_type(env, t, "Invalid type_id");
3424 		return -EINVAL;
3425 	}
3426 
3427 	var = btf_type_var(t);
3428 	if (var->linkage != BTF_VAR_STATIC &&
3429 	    var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
3430 		btf_verifier_log_type(env, t, "Linkage not supported");
3431 		return -EINVAL;
3432 	}
3433 
3434 	btf_verifier_log_type(env, t, NULL);
3435 
3436 	return meta_needed;
3437 }
3438 
btf_var_log(struct btf_verifier_env * env,const struct btf_type * t)3439 static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
3440 {
3441 	const struct btf_var *var = btf_type_var(t);
3442 
3443 	btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
3444 }
3445 
3446 static const struct btf_kind_operations var_ops = {
3447 	.check_meta		= btf_var_check_meta,
3448 	.resolve		= btf_var_resolve,
3449 	.check_member		= btf_df_check_member,
3450 	.check_kflag_member	= btf_df_check_kflag_member,
3451 	.log_details		= btf_var_log,
3452 	.show			= btf_var_show,
3453 };
3454 
btf_datasec_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)3455 static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
3456 				  const struct btf_type *t,
3457 				  u32 meta_left)
3458 {
3459 	const struct btf_var_secinfo *vsi;
3460 	u64 last_vsi_end_off = 0, sum = 0;
3461 	u32 i, meta_needed;
3462 
3463 	meta_needed = btf_type_vlen(t) * sizeof(*vsi);
3464 	if (meta_left < meta_needed) {
3465 		btf_verifier_log_basic(env, t,
3466 				       "meta_left:%u meta_needed:%u",
3467 				       meta_left, meta_needed);
3468 		return -EINVAL;
3469 	}
3470 
3471 	if (!btf_type_vlen(t)) {
3472 		btf_verifier_log_type(env, t, "vlen == 0");
3473 		return -EINVAL;
3474 	}
3475 
3476 	if (!t->size) {
3477 		btf_verifier_log_type(env, t, "size == 0");
3478 		return -EINVAL;
3479 	}
3480 
3481 	if (btf_type_kflag(t)) {
3482 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
3483 		return -EINVAL;
3484 	}
3485 
3486 	if (!t->name_off ||
3487 	    !btf_name_valid_section(env->btf, t->name_off)) {
3488 		btf_verifier_log_type(env, t, "Invalid name");
3489 		return -EINVAL;
3490 	}
3491 
3492 	btf_verifier_log_type(env, t, NULL);
3493 
3494 	for_each_vsi(i, t, vsi) {
3495 		/* A var cannot be in type void */
3496 		if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
3497 			btf_verifier_log_vsi(env, t, vsi,
3498 					     "Invalid type_id");
3499 			return -EINVAL;
3500 		}
3501 
3502 		if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
3503 			btf_verifier_log_vsi(env, t, vsi,
3504 					     "Invalid offset");
3505 			return -EINVAL;
3506 		}
3507 
3508 		if (!vsi->size || vsi->size > t->size) {
3509 			btf_verifier_log_vsi(env, t, vsi,
3510 					     "Invalid size");
3511 			return -EINVAL;
3512 		}
3513 
3514 		last_vsi_end_off = vsi->offset + vsi->size;
3515 		if (last_vsi_end_off > t->size) {
3516 			btf_verifier_log_vsi(env, t, vsi,
3517 					     "Invalid offset+size");
3518 			return -EINVAL;
3519 		}
3520 
3521 		btf_verifier_log_vsi(env, t, vsi, NULL);
3522 		sum += vsi->size;
3523 	}
3524 
3525 	if (t->size < sum) {
3526 		btf_verifier_log_type(env, t, "Invalid btf_info size");
3527 		return -EINVAL;
3528 	}
3529 
3530 	return meta_needed;
3531 }
3532 
btf_datasec_resolve(struct btf_verifier_env * env,const struct resolve_vertex * v)3533 static int btf_datasec_resolve(struct btf_verifier_env *env,
3534 			       const struct resolve_vertex *v)
3535 {
3536 	const struct btf_var_secinfo *vsi;
3537 	struct btf *btf = env->btf;
3538 	u16 i;
3539 
3540 	env->resolve_mode = RESOLVE_TBD;
3541 	for_each_vsi_from(i, v->next_member, v->t, vsi) {
3542 		u32 var_type_id = vsi->type, type_id, type_size = 0;
3543 		const struct btf_type *var_type = btf_type_by_id(env->btf,
3544 								 var_type_id);
3545 		if (!var_type || !btf_type_is_var(var_type)) {
3546 			btf_verifier_log_vsi(env, v->t, vsi,
3547 					     "Not a VAR kind member");
3548 			return -EINVAL;
3549 		}
3550 
3551 		if (!env_type_is_resolve_sink(env, var_type) &&
3552 		    !env_type_is_resolved(env, var_type_id)) {
3553 			env_stack_set_next_member(env, i + 1);
3554 			return env_stack_push(env, var_type, var_type_id);
3555 		}
3556 
3557 		type_id = var_type->type;
3558 		if (!btf_type_id_size(btf, &type_id, &type_size)) {
3559 			btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
3560 			return -EINVAL;
3561 		}
3562 
3563 		if (vsi->size < type_size) {
3564 			btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
3565 			return -EINVAL;
3566 		}
3567 	}
3568 
3569 	env_stack_pop_resolved(env, 0, 0);
3570 	return 0;
3571 }
3572 
btf_datasec_log(struct btf_verifier_env * env,const struct btf_type * t)3573 static void btf_datasec_log(struct btf_verifier_env *env,
3574 			    const struct btf_type *t)
3575 {
3576 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
3577 }
3578 
btf_datasec_show(const struct btf * btf,const struct btf_type * t,u32 type_id,void * data,u8 bits_offset,struct btf_show * show)3579 static void btf_datasec_show(const struct btf *btf,
3580 			     const struct btf_type *t, u32 type_id,
3581 			     void *data, u8 bits_offset,
3582 			     struct btf_show *show)
3583 {
3584 	const struct btf_var_secinfo *vsi;
3585 	const struct btf_type *var;
3586 	u32 i;
3587 
3588 	if (!btf_show_start_type(show, t, type_id, data))
3589 		return;
3590 
3591 	btf_show_type_value(show, "section (\"%s\") = {",
3592 			    __btf_name_by_offset(btf, t->name_off));
3593 	for_each_vsi(i, t, vsi) {
3594 		var = btf_type_by_id(btf, vsi->type);
3595 		if (i)
3596 			btf_show(show, ",");
3597 		btf_type_ops(var)->show(btf, var, vsi->type,
3598 					data + vsi->offset, bits_offset, show);
3599 	}
3600 	btf_show_end_type(show);
3601 }
3602 
3603 static const struct btf_kind_operations datasec_ops = {
3604 	.check_meta		= btf_datasec_check_meta,
3605 	.resolve		= btf_datasec_resolve,
3606 	.check_member		= btf_df_check_member,
3607 	.check_kflag_member	= btf_df_check_kflag_member,
3608 	.log_details		= btf_datasec_log,
3609 	.show			= btf_datasec_show,
3610 };
3611 
btf_func_proto_check(struct btf_verifier_env * env,const struct btf_type * t)3612 static int btf_func_proto_check(struct btf_verifier_env *env,
3613 				const struct btf_type *t)
3614 {
3615 	const struct btf_type *ret_type;
3616 	const struct btf_param *args;
3617 	const struct btf *btf;
3618 	u16 nr_args, i;
3619 	int err;
3620 
3621 	btf = env->btf;
3622 	args = (const struct btf_param *)(t + 1);
3623 	nr_args = btf_type_vlen(t);
3624 
3625 	/* Check func return type which could be "void" (t->type == 0) */
3626 	if (t->type) {
3627 		u32 ret_type_id = t->type;
3628 
3629 		ret_type = btf_type_by_id(btf, ret_type_id);
3630 		if (!ret_type) {
3631 			btf_verifier_log_type(env, t, "Invalid return type");
3632 			return -EINVAL;
3633 		}
3634 
3635 		if (btf_type_needs_resolve(ret_type) &&
3636 		    !env_type_is_resolved(env, ret_type_id)) {
3637 			err = btf_resolve(env, ret_type, ret_type_id);
3638 			if (err)
3639 				return err;
3640 		}
3641 
3642 		/* Ensure the return type is a type that has a size */
3643 		if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
3644 			btf_verifier_log_type(env, t, "Invalid return type");
3645 			return -EINVAL;
3646 		}
3647 	}
3648 
3649 	if (!nr_args)
3650 		return 0;
3651 
3652 	/* Last func arg type_id could be 0 if it is a vararg */
3653 	if (!args[nr_args - 1].type) {
3654 		if (args[nr_args - 1].name_off) {
3655 			btf_verifier_log_type(env, t, "Invalid arg#%u",
3656 					      nr_args);
3657 			return -EINVAL;
3658 		}
3659 		nr_args--;
3660 	}
3661 
3662 	err = 0;
3663 	for (i = 0; i < nr_args; i++) {
3664 		const struct btf_type *arg_type;
3665 		u32 arg_type_id;
3666 
3667 		arg_type_id = args[i].type;
3668 		arg_type = btf_type_by_id(btf, arg_type_id);
3669 		if (!arg_type) {
3670 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
3671 			err = -EINVAL;
3672 			break;
3673 		}
3674 
3675 		if (btf_type_is_resolve_source_only(arg_type)) {
3676 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
3677 			return -EINVAL;
3678 		}
3679 
3680 		if (args[i].name_off &&
3681 		    (!btf_name_offset_valid(btf, args[i].name_off) ||
3682 		     !btf_name_valid_identifier(btf, args[i].name_off))) {
3683 			btf_verifier_log_type(env, t,
3684 					      "Invalid arg#%u", i + 1);
3685 			err = -EINVAL;
3686 			break;
3687 		}
3688 
3689 		if (btf_type_needs_resolve(arg_type) &&
3690 		    !env_type_is_resolved(env, arg_type_id)) {
3691 			err = btf_resolve(env, arg_type, arg_type_id);
3692 			if (err)
3693 				break;
3694 		}
3695 
3696 		if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
3697 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
3698 			err = -EINVAL;
3699 			break;
3700 		}
3701 	}
3702 
3703 	return err;
3704 }
3705 
btf_func_check(struct btf_verifier_env * env,const struct btf_type * t)3706 static int btf_func_check(struct btf_verifier_env *env,
3707 			  const struct btf_type *t)
3708 {
3709 	const struct btf_type *proto_type;
3710 	const struct btf_param *args;
3711 	const struct btf *btf;
3712 	u16 nr_args, i;
3713 
3714 	btf = env->btf;
3715 	proto_type = btf_type_by_id(btf, t->type);
3716 
3717 	if (!proto_type || !btf_type_is_func_proto(proto_type)) {
3718 		btf_verifier_log_type(env, t, "Invalid type_id");
3719 		return -EINVAL;
3720 	}
3721 
3722 	args = (const struct btf_param *)(proto_type + 1);
3723 	nr_args = btf_type_vlen(proto_type);
3724 	for (i = 0; i < nr_args; i++) {
3725 		if (!args[i].name_off && args[i].type) {
3726 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
3727 			return -EINVAL;
3728 		}
3729 	}
3730 
3731 	return 0;
3732 }
3733 
3734 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
3735 	[BTF_KIND_INT] = &int_ops,
3736 	[BTF_KIND_PTR] = &ptr_ops,
3737 	[BTF_KIND_ARRAY] = &array_ops,
3738 	[BTF_KIND_STRUCT] = &struct_ops,
3739 	[BTF_KIND_UNION] = &struct_ops,
3740 	[BTF_KIND_ENUM] = &enum_ops,
3741 	[BTF_KIND_FWD] = &fwd_ops,
3742 	[BTF_KIND_TYPEDEF] = &modifier_ops,
3743 	[BTF_KIND_VOLATILE] = &modifier_ops,
3744 	[BTF_KIND_CONST] = &modifier_ops,
3745 	[BTF_KIND_RESTRICT] = &modifier_ops,
3746 	[BTF_KIND_FUNC] = &func_ops,
3747 	[BTF_KIND_FUNC_PROTO] = &func_proto_ops,
3748 	[BTF_KIND_VAR] = &var_ops,
3749 	[BTF_KIND_DATASEC] = &datasec_ops,
3750 };
3751 
btf_check_meta(struct btf_verifier_env * env,const struct btf_type * t,u32 meta_left)3752 static s32 btf_check_meta(struct btf_verifier_env *env,
3753 			  const struct btf_type *t,
3754 			  u32 meta_left)
3755 {
3756 	u32 saved_meta_left = meta_left;
3757 	s32 var_meta_size;
3758 
3759 	if (meta_left < sizeof(*t)) {
3760 		btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
3761 				 env->log_type_id, meta_left, sizeof(*t));
3762 		return -EINVAL;
3763 	}
3764 	meta_left -= sizeof(*t);
3765 
3766 	if (t->info & ~BTF_INFO_MASK) {
3767 		btf_verifier_log(env, "[%u] Invalid btf_info:%x",
3768 				 env->log_type_id, t->info);
3769 		return -EINVAL;
3770 	}
3771 
3772 	if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
3773 	    BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
3774 		btf_verifier_log(env, "[%u] Invalid kind:%u",
3775 				 env->log_type_id, BTF_INFO_KIND(t->info));
3776 		return -EINVAL;
3777 	}
3778 
3779 	if (!btf_name_offset_valid(env->btf, t->name_off)) {
3780 		btf_verifier_log(env, "[%u] Invalid name_offset:%u",
3781 				 env->log_type_id, t->name_off);
3782 		return -EINVAL;
3783 	}
3784 
3785 	var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
3786 	if (var_meta_size < 0)
3787 		return var_meta_size;
3788 
3789 	meta_left -= var_meta_size;
3790 
3791 	return saved_meta_left - meta_left;
3792 }
3793 
btf_check_all_metas(struct btf_verifier_env * env)3794 static int btf_check_all_metas(struct btf_verifier_env *env)
3795 {
3796 	struct btf *btf = env->btf;
3797 	struct btf_header *hdr;
3798 	void *cur, *end;
3799 
3800 	hdr = &btf->hdr;
3801 	cur = btf->nohdr_data + hdr->type_off;
3802 	end = cur + hdr->type_len;
3803 
3804 	env->log_type_id = 1;
3805 	while (cur < end) {
3806 		struct btf_type *t = cur;
3807 		s32 meta_size;
3808 
3809 		meta_size = btf_check_meta(env, t, end - cur);
3810 		if (meta_size < 0)
3811 			return meta_size;
3812 
3813 		btf_add_type(env, t);
3814 		cur += meta_size;
3815 		env->log_type_id++;
3816 	}
3817 
3818 	return 0;
3819 }
3820 
btf_resolve_valid(struct btf_verifier_env * env,const struct btf_type * t,u32 type_id)3821 static bool btf_resolve_valid(struct btf_verifier_env *env,
3822 			      const struct btf_type *t,
3823 			      u32 type_id)
3824 {
3825 	struct btf *btf = env->btf;
3826 
3827 	if (!env_type_is_resolved(env, type_id))
3828 		return false;
3829 
3830 	if (btf_type_is_struct(t) || btf_type_is_datasec(t))
3831 		return !btf->resolved_ids[type_id] &&
3832 		       !btf->resolved_sizes[type_id];
3833 
3834 	if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
3835 	    btf_type_is_var(t)) {
3836 		t = btf_type_id_resolve(btf, &type_id);
3837 		return t &&
3838 		       !btf_type_is_modifier(t) &&
3839 		       !btf_type_is_var(t) &&
3840 		       !btf_type_is_datasec(t);
3841 	}
3842 
3843 	if (btf_type_is_array(t)) {
3844 		const struct btf_array *array = btf_type_array(t);
3845 		const struct btf_type *elem_type;
3846 		u32 elem_type_id = array->type;
3847 		u32 elem_size;
3848 
3849 		elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
3850 		return elem_type && !btf_type_is_modifier(elem_type) &&
3851 			(array->nelems * elem_size ==
3852 			 btf->resolved_sizes[type_id]);
3853 	}
3854 
3855 	return false;
3856 }
3857 
btf_resolve(struct btf_verifier_env * env,const struct btf_type * t,u32 type_id)3858 static int btf_resolve(struct btf_verifier_env *env,
3859 		       const struct btf_type *t, u32 type_id)
3860 {
3861 	u32 save_log_type_id = env->log_type_id;
3862 	const struct resolve_vertex *v;
3863 	int err = 0;
3864 
3865 	env->resolve_mode = RESOLVE_TBD;
3866 	env_stack_push(env, t, type_id);
3867 	while (!err && (v = env_stack_peak(env))) {
3868 		env->log_type_id = v->type_id;
3869 		err = btf_type_ops(v->t)->resolve(env, v);
3870 	}
3871 
3872 	env->log_type_id = type_id;
3873 	if (err == -E2BIG) {
3874 		btf_verifier_log_type(env, t,
3875 				      "Exceeded max resolving depth:%u",
3876 				      MAX_RESOLVE_DEPTH);
3877 	} else if (err == -EEXIST) {
3878 		btf_verifier_log_type(env, t, "Loop detected");
3879 	}
3880 
3881 	/* Final sanity check */
3882 	if (!err && !btf_resolve_valid(env, t, type_id)) {
3883 		btf_verifier_log_type(env, t, "Invalid resolve state");
3884 		err = -EINVAL;
3885 	}
3886 
3887 	env->log_type_id = save_log_type_id;
3888 	return err;
3889 }
3890 
btf_check_all_types(struct btf_verifier_env * env)3891 static int btf_check_all_types(struct btf_verifier_env *env)
3892 {
3893 	struct btf *btf = env->btf;
3894 	u32 type_id;
3895 	int err;
3896 
3897 	err = env_resolve_init(env);
3898 	if (err)
3899 		return err;
3900 
3901 	env->phase++;
3902 	for (type_id = 1; type_id <= btf->nr_types; type_id++) {
3903 		const struct btf_type *t = btf_type_by_id(btf, type_id);
3904 
3905 		env->log_type_id = type_id;
3906 		if (btf_type_needs_resolve(t) &&
3907 		    !env_type_is_resolved(env, type_id)) {
3908 			err = btf_resolve(env, t, type_id);
3909 			if (err)
3910 				return err;
3911 		}
3912 
3913 		if (btf_type_is_func_proto(t)) {
3914 			err = btf_func_proto_check(env, t);
3915 			if (err)
3916 				return err;
3917 		}
3918 
3919 		if (btf_type_is_func(t)) {
3920 			err = btf_func_check(env, t);
3921 			if (err)
3922 				return err;
3923 		}
3924 	}
3925 
3926 	return 0;
3927 }
3928 
btf_parse_type_sec(struct btf_verifier_env * env)3929 static int btf_parse_type_sec(struct btf_verifier_env *env)
3930 {
3931 	const struct btf_header *hdr = &env->btf->hdr;
3932 	int err;
3933 
3934 	/* Type section must align to 4 bytes */
3935 	if (hdr->type_off & (sizeof(u32) - 1)) {
3936 		btf_verifier_log(env, "Unaligned type_off");
3937 		return -EINVAL;
3938 	}
3939 
3940 	if (!hdr->type_len) {
3941 		btf_verifier_log(env, "No type found");
3942 		return -EINVAL;
3943 	}
3944 
3945 	err = btf_check_all_metas(env);
3946 	if (err)
3947 		return err;
3948 
3949 	return btf_check_all_types(env);
3950 }
3951 
btf_parse_str_sec(struct btf_verifier_env * env)3952 static int btf_parse_str_sec(struct btf_verifier_env *env)
3953 {
3954 	const struct btf_header *hdr;
3955 	struct btf *btf = env->btf;
3956 	const char *start, *end;
3957 
3958 	hdr = &btf->hdr;
3959 	start = btf->nohdr_data + hdr->str_off;
3960 	end = start + hdr->str_len;
3961 
3962 	if (end != btf->data + btf->data_size) {
3963 		btf_verifier_log(env, "String section is not at the end");
3964 		return -EINVAL;
3965 	}
3966 
3967 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
3968 	    start[0] || end[-1]) {
3969 		btf_verifier_log(env, "Invalid string section");
3970 		return -EINVAL;
3971 	}
3972 
3973 	btf->strings = start;
3974 
3975 	return 0;
3976 }
3977 
3978 static const size_t btf_sec_info_offset[] = {
3979 	offsetof(struct btf_header, type_off),
3980 	offsetof(struct btf_header, str_off),
3981 };
3982 
btf_sec_info_cmp(const void * a,const void * b)3983 static int btf_sec_info_cmp(const void *a, const void *b)
3984 {
3985 	const struct btf_sec_info *x = a;
3986 	const struct btf_sec_info *y = b;
3987 
3988 	return (int)(x->off - y->off) ? : (int)(x->len - y->len);
3989 }
3990 
btf_check_sec_info(struct btf_verifier_env * env,u32 btf_data_size)3991 static int btf_check_sec_info(struct btf_verifier_env *env,
3992 			      u32 btf_data_size)
3993 {
3994 	struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
3995 	u32 total, expected_total, i;
3996 	const struct btf_header *hdr;
3997 	const struct btf *btf;
3998 
3999 	btf = env->btf;
4000 	hdr = &btf->hdr;
4001 
4002 	/* Populate the secs from hdr */
4003 	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
4004 		secs[i] = *(struct btf_sec_info *)((void *)hdr +
4005 						   btf_sec_info_offset[i]);
4006 
4007 	sort(secs, ARRAY_SIZE(btf_sec_info_offset),
4008 	     sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
4009 
4010 	/* Check for gaps and overlap among sections */
4011 	total = 0;
4012 	expected_total = btf_data_size - hdr->hdr_len;
4013 	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
4014 		if (expected_total < secs[i].off) {
4015 			btf_verifier_log(env, "Invalid section offset");
4016 			return -EINVAL;
4017 		}
4018 		if (total < secs[i].off) {
4019 			/* gap */
4020 			btf_verifier_log(env, "Unsupported section found");
4021 			return -EINVAL;
4022 		}
4023 		if (total > secs[i].off) {
4024 			btf_verifier_log(env, "Section overlap found");
4025 			return -EINVAL;
4026 		}
4027 		if (expected_total - total < secs[i].len) {
4028 			btf_verifier_log(env,
4029 					 "Total section length too long");
4030 			return -EINVAL;
4031 		}
4032 		total += secs[i].len;
4033 	}
4034 
4035 	/* There is data other than hdr and known sections */
4036 	if (expected_total != total) {
4037 		btf_verifier_log(env, "Unsupported section found");
4038 		return -EINVAL;
4039 	}
4040 
4041 	return 0;
4042 }
4043 
btf_parse_hdr(struct btf_verifier_env * env)4044 static int btf_parse_hdr(struct btf_verifier_env *env)
4045 {
4046 	u32 hdr_len, hdr_copy, btf_data_size;
4047 	const struct btf_header *hdr;
4048 	struct btf *btf;
4049 	int err;
4050 
4051 	btf = env->btf;
4052 	btf_data_size = btf->data_size;
4053 
4054 	if (btf_data_size <
4055 	    offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
4056 		btf_verifier_log(env, "hdr_len not found");
4057 		return -EINVAL;
4058 	}
4059 
4060 	hdr = btf->data;
4061 	hdr_len = hdr->hdr_len;
4062 	if (btf_data_size < hdr_len) {
4063 		btf_verifier_log(env, "btf_header not found");
4064 		return -EINVAL;
4065 	}
4066 
4067 	/* Ensure the unsupported header fields are zero */
4068 	if (hdr_len > sizeof(btf->hdr)) {
4069 		u8 *expected_zero = btf->data + sizeof(btf->hdr);
4070 		u8 *end = btf->data + hdr_len;
4071 
4072 		for (; expected_zero < end; expected_zero++) {
4073 			if (*expected_zero) {
4074 				btf_verifier_log(env, "Unsupported btf_header");
4075 				return -E2BIG;
4076 			}
4077 		}
4078 	}
4079 
4080 	hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
4081 	memcpy(&btf->hdr, btf->data, hdr_copy);
4082 
4083 	hdr = &btf->hdr;
4084 
4085 	btf_verifier_log_hdr(env, btf_data_size);
4086 
4087 	if (hdr->magic != BTF_MAGIC) {
4088 		btf_verifier_log(env, "Invalid magic");
4089 		return -EINVAL;
4090 	}
4091 
4092 	if (hdr->version != BTF_VERSION) {
4093 		btf_verifier_log(env, "Unsupported version");
4094 		return -ENOTSUPP;
4095 	}
4096 
4097 	if (hdr->flags) {
4098 		btf_verifier_log(env, "Unsupported flags");
4099 		return -ENOTSUPP;
4100 	}
4101 
4102 	if (btf_data_size == hdr->hdr_len) {
4103 		btf_verifier_log(env, "No data");
4104 		return -EINVAL;
4105 	}
4106 
4107 	err = btf_check_sec_info(env, btf_data_size);
4108 	if (err)
4109 		return err;
4110 
4111 	return 0;
4112 }
4113 
btf_parse(void __user * btf_data,u32 btf_data_size,u32 log_level,char __user * log_ubuf,u32 log_size)4114 static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
4115 			     u32 log_level, char __user *log_ubuf, u32 log_size)
4116 {
4117 	struct btf_verifier_env *env = NULL;
4118 	struct bpf_verifier_log *log;
4119 	struct btf *btf = NULL;
4120 	u8 *data;
4121 	int err;
4122 
4123 	if (btf_data_size > BTF_MAX_SIZE)
4124 		return ERR_PTR(-E2BIG);
4125 
4126 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
4127 	if (!env)
4128 		return ERR_PTR(-ENOMEM);
4129 
4130 	log = &env->log;
4131 	if (log_level || log_ubuf || log_size) {
4132 		/* user requested verbose verifier output
4133 		 * and supplied buffer to store the verification trace
4134 		 */
4135 		log->level = log_level;
4136 		log->ubuf = log_ubuf;
4137 		log->len_total = log_size;
4138 
4139 		/* log attributes have to be sane */
4140 		if (!bpf_verifier_log_attr_valid(log)) {
4141 			err = -EINVAL;
4142 			goto errout;
4143 		}
4144 	}
4145 
4146 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
4147 	if (!btf) {
4148 		err = -ENOMEM;
4149 		goto errout;
4150 	}
4151 	env->btf = btf;
4152 
4153 	data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
4154 	if (!data) {
4155 		err = -ENOMEM;
4156 		goto errout;
4157 	}
4158 
4159 	btf->data = data;
4160 	btf->data_size = btf_data_size;
4161 
4162 	if (copy_from_user(data, btf_data, btf_data_size)) {
4163 		err = -EFAULT;
4164 		goto errout;
4165 	}
4166 
4167 	err = btf_parse_hdr(env);
4168 	if (err)
4169 		goto errout;
4170 
4171 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
4172 
4173 	err = btf_parse_str_sec(env);
4174 	if (err)
4175 		goto errout;
4176 
4177 	err = btf_parse_type_sec(env);
4178 	if (err)
4179 		goto errout;
4180 
4181 	if (log->level && bpf_verifier_log_full(log)) {
4182 		err = -ENOSPC;
4183 		goto errout;
4184 	}
4185 
4186 	btf_verifier_env_free(env);
4187 	refcount_set(&btf->refcnt, 1);
4188 	return btf;
4189 
4190 errout:
4191 	btf_verifier_env_free(env);
4192 	if (btf)
4193 		btf_free(btf);
4194 	return ERR_PTR(err);
4195 }
4196 
4197 extern char __weak __start_BTF[];
4198 extern char __weak __stop_BTF[];
4199 extern struct btf *btf_vmlinux;
4200 
4201 #define BPF_MAP_TYPE(_id, _ops)
4202 #define BPF_LINK_TYPE(_id, _name)
4203 static union {
4204 	struct bpf_ctx_convert {
4205 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
4206 	prog_ctx_type _id##_prog; \
4207 	kern_ctx_type _id##_kern;
4208 #include <linux/bpf_types.h>
4209 #undef BPF_PROG_TYPE
4210 	} *__t;
4211 	/* 't' is written once under lock. Read many times. */
4212 	const struct btf_type *t;
4213 } bpf_ctx_convert;
4214 enum {
4215 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
4216 	__ctx_convert##_id,
4217 #include <linux/bpf_types.h>
4218 #undef BPF_PROG_TYPE
4219 	__ctx_convert_unused, /* to avoid empty enum in extreme .config */
4220 };
4221 static u8 bpf_ctx_convert_map[] = {
4222 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
4223 	[_id] = __ctx_convert##_id,
4224 #include <linux/bpf_types.h>
4225 #undef BPF_PROG_TYPE
4226 	0, /* avoid empty array */
4227 };
4228 #undef BPF_MAP_TYPE
4229 #undef BPF_LINK_TYPE
4230 
4231 static const struct btf_member *
btf_get_prog_ctx_type(struct bpf_verifier_log * log,struct btf * btf,const struct btf_type * t,enum bpf_prog_type prog_type,int arg)4232 btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf,
4233 		      const struct btf_type *t, enum bpf_prog_type prog_type,
4234 		      int arg)
4235 {
4236 	const struct btf_type *conv_struct;
4237 	const struct btf_type *ctx_struct;
4238 	const struct btf_member *ctx_type;
4239 	const char *tname, *ctx_tname;
4240 
4241 	conv_struct = bpf_ctx_convert.t;
4242 	if (!conv_struct) {
4243 		bpf_log(log, "btf_vmlinux is malformed\n");
4244 		return NULL;
4245 	}
4246 	t = btf_type_by_id(btf, t->type);
4247 	while (btf_type_is_modifier(t))
4248 		t = btf_type_by_id(btf, t->type);
4249 	if (!btf_type_is_struct(t)) {
4250 		/* Only pointer to struct is supported for now.
4251 		 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF
4252 		 * is not supported yet.
4253 		 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
4254 		 */
4255 		if (log->level & BPF_LOG_LEVEL)
4256 			bpf_log(log, "arg#%d type is not a struct\n", arg);
4257 		return NULL;
4258 	}
4259 	tname = btf_name_by_offset(btf, t->name_off);
4260 	if (!tname) {
4261 		bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
4262 		return NULL;
4263 	}
4264 	/* prog_type is valid bpf program type. No need for bounds check. */
4265 	ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
4266 	/* ctx_struct is a pointer to prog_ctx_type in vmlinux.
4267 	 * Like 'struct __sk_buff'
4268 	 */
4269 	ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type);
4270 	if (!ctx_struct)
4271 		/* should not happen */
4272 		return NULL;
4273 again:
4274 	ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off);
4275 	if (!ctx_tname) {
4276 		/* should not happen */
4277 		bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
4278 		return NULL;
4279 	}
4280 	/* only compare that prog's ctx type name is the same as
4281 	 * kernel expects. No need to compare field by field.
4282 	 * It's ok for bpf prog to do:
4283 	 * struct __sk_buff {};
4284 	 * int socket_filter_bpf_prog(struct __sk_buff *skb)
4285 	 * { // no fields of skb are ever used }
4286 	 */
4287 	if (strcmp(ctx_tname, tname)) {
4288 		/* bpf_user_pt_regs_t is a typedef, so resolve it to
4289 		 * underlying struct and check name again
4290 		 */
4291 		if (!btf_type_is_modifier(ctx_struct))
4292 			return NULL;
4293 		while (btf_type_is_modifier(ctx_struct))
4294 			ctx_struct = btf_type_by_id(btf_vmlinux, ctx_struct->type);
4295 		goto again;
4296 	}
4297 	return ctx_type;
4298 }
4299 
4300 static const struct bpf_map_ops * const btf_vmlinux_map_ops[] = {
4301 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
4302 #define BPF_LINK_TYPE(_id, _name)
4303 #define BPF_MAP_TYPE(_id, _ops) \
4304 	[_id] = &_ops,
4305 #include <linux/bpf_types.h>
4306 #undef BPF_PROG_TYPE
4307 #undef BPF_LINK_TYPE
4308 #undef BPF_MAP_TYPE
4309 };
4310 
btf_vmlinux_map_ids_init(const struct btf * btf,struct bpf_verifier_log * log)4311 static int btf_vmlinux_map_ids_init(const struct btf *btf,
4312 				    struct bpf_verifier_log *log)
4313 {
4314 	const struct bpf_map_ops *ops;
4315 	int i, btf_id;
4316 
4317 	for (i = 0; i < ARRAY_SIZE(btf_vmlinux_map_ops); ++i) {
4318 		ops = btf_vmlinux_map_ops[i];
4319 		if (!ops || (!ops->map_btf_name && !ops->map_btf_id))
4320 			continue;
4321 		if (!ops->map_btf_name || !ops->map_btf_id) {
4322 			bpf_log(log, "map type %d is misconfigured\n", i);
4323 			return -EINVAL;
4324 		}
4325 		btf_id = btf_find_by_name_kind(btf, ops->map_btf_name,
4326 					       BTF_KIND_STRUCT);
4327 		if (btf_id < 0)
4328 			return btf_id;
4329 		*ops->map_btf_id = btf_id;
4330 	}
4331 
4332 	return 0;
4333 }
4334 
btf_translate_to_vmlinux(struct bpf_verifier_log * log,struct btf * btf,const struct btf_type * t,enum bpf_prog_type prog_type,int arg)4335 static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
4336 				     struct btf *btf,
4337 				     const struct btf_type *t,
4338 				     enum bpf_prog_type prog_type,
4339 				     int arg)
4340 {
4341 	const struct btf_member *prog_ctx_type, *kern_ctx_type;
4342 
4343 	prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type, arg);
4344 	if (!prog_ctx_type)
4345 		return -ENOENT;
4346 	kern_ctx_type = prog_ctx_type + 1;
4347 	return kern_ctx_type->type;
4348 }
4349 
4350 BTF_ID_LIST(bpf_ctx_convert_btf_id)
BTF_ID(struct,bpf_ctx_convert)4351 BTF_ID(struct, bpf_ctx_convert)
4352 
4353 struct btf *btf_parse_vmlinux(void)
4354 {
4355 	struct btf_verifier_env *env = NULL;
4356 	struct bpf_verifier_log *log;
4357 	struct btf *btf = NULL;
4358 	int err;
4359 
4360 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
4361 	if (!env)
4362 		return ERR_PTR(-ENOMEM);
4363 
4364 	log = &env->log;
4365 	log->level = BPF_LOG_KERNEL;
4366 
4367 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
4368 	if (!btf) {
4369 		err = -ENOMEM;
4370 		goto errout;
4371 	}
4372 	env->btf = btf;
4373 
4374 	btf->data = __start_BTF;
4375 	btf->data_size = __stop_BTF - __start_BTF;
4376 
4377 	err = btf_parse_hdr(env);
4378 	if (err)
4379 		goto errout;
4380 
4381 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
4382 
4383 	err = btf_parse_str_sec(env);
4384 	if (err)
4385 		goto errout;
4386 
4387 	err = btf_check_all_metas(env);
4388 	if (err)
4389 		goto errout;
4390 
4391 	/* btf_parse_vmlinux() runs under bpf_verifier_lock */
4392 	bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
4393 
4394 	/* find bpf map structs for map_ptr access checking */
4395 	err = btf_vmlinux_map_ids_init(btf, log);
4396 	if (err < 0)
4397 		goto errout;
4398 
4399 	bpf_struct_ops_init(btf, log);
4400 
4401 	btf_verifier_env_free(env);
4402 	refcount_set(&btf->refcnt, 1);
4403 	return btf;
4404 
4405 errout:
4406 	btf_verifier_env_free(env);
4407 	if (btf) {
4408 		kvfree(btf->types);
4409 		kfree(btf);
4410 	}
4411 	return ERR_PTR(err);
4412 }
4413 
bpf_prog_get_target_btf(const struct bpf_prog * prog)4414 struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
4415 {
4416 	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
4417 
4418 	if (tgt_prog) {
4419 		return tgt_prog->aux->btf;
4420 	} else {
4421 		return btf_vmlinux;
4422 	}
4423 }
4424 
is_string_ptr(struct btf * btf,const struct btf_type * t)4425 static bool is_string_ptr(struct btf *btf, const struct btf_type *t)
4426 {
4427 	/* t comes in already as a pointer */
4428 	t = btf_type_by_id(btf, t->type);
4429 
4430 	/* allow const */
4431 	if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST)
4432 		t = btf_type_by_id(btf, t->type);
4433 
4434 	/* char, signed char, unsigned char */
4435 	return btf_type_is_int(t) && t->size == 1;
4436 }
4437 
btf_ctx_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)4438 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
4439 		    const struct bpf_prog *prog,
4440 		    struct bpf_insn_access_aux *info)
4441 {
4442 	const struct btf_type *t = prog->aux->attach_func_proto;
4443 	struct bpf_prog *tgt_prog = prog->aux->dst_prog;
4444 	struct btf *btf = bpf_prog_get_target_btf(prog);
4445 	const char *tname = prog->aux->attach_func_name;
4446 	struct bpf_verifier_log *log = info->log;
4447 	const struct btf_param *args;
4448 	u32 nr_args, arg;
4449 	int i, ret;
4450 
4451 	if (off % 8) {
4452 		bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
4453 			tname, off);
4454 		return false;
4455 	}
4456 	arg = off / 8;
4457 	args = (const struct btf_param *)(t + 1);
4458 	/* if (t == NULL) Fall back to default BPF prog with 5 u64 arguments */
4459 	nr_args = t ? btf_type_vlen(t) : 5;
4460 	if (prog->aux->attach_btf_trace) {
4461 		/* skip first 'void *__data' argument in btf_trace_##name typedef */
4462 		args++;
4463 		nr_args--;
4464 	}
4465 
4466 	if (arg > nr_args) {
4467 		bpf_log(log, "func '%s' doesn't have %d-th argument\n",
4468 			tname, arg + 1);
4469 		return false;
4470 	}
4471 
4472 	if (arg == nr_args) {
4473 		switch (prog->expected_attach_type) {
4474 		case BPF_LSM_MAC:
4475 		case BPF_TRACE_FEXIT:
4476 			/* When LSM programs are attached to void LSM hooks
4477 			 * they use FEXIT trampolines and when attached to
4478 			 * int LSM hooks, they use MODIFY_RETURN trampolines.
4479 			 *
4480 			 * While the LSM programs are BPF_MODIFY_RETURN-like
4481 			 * the check:
4482 			 *
4483 			 *	if (ret_type != 'int')
4484 			 *		return -EINVAL;
4485 			 *
4486 			 * is _not_ done here. This is still safe as LSM hooks
4487 			 * have only void and int return types.
4488 			 */
4489 			if (!t)
4490 				return true;
4491 			t = btf_type_by_id(btf, t->type);
4492 			break;
4493 		case BPF_MODIFY_RETURN:
4494 			/* For now the BPF_MODIFY_RETURN can only be attached to
4495 			 * functions that return an int.
4496 			 */
4497 			if (!t)
4498 				return false;
4499 
4500 			t = btf_type_skip_modifiers(btf, t->type, NULL);
4501 			if (!btf_type_is_small_int(t)) {
4502 				bpf_log(log,
4503 					"ret type %s not allowed for fmod_ret\n",
4504 					btf_kind_str[BTF_INFO_KIND(t->info)]);
4505 				return false;
4506 			}
4507 			break;
4508 		default:
4509 			bpf_log(log, "func '%s' doesn't have %d-th argument\n",
4510 				tname, arg + 1);
4511 			return false;
4512 		}
4513 	} else {
4514 		if (!t)
4515 			/* Default prog with 5 args */
4516 			return true;
4517 		t = btf_type_by_id(btf, args[arg].type);
4518 	}
4519 
4520 	/* skip modifiers */
4521 	while (btf_type_is_modifier(t))
4522 		t = btf_type_by_id(btf, t->type);
4523 	if (btf_type_is_small_int(t) || btf_type_is_enum(t))
4524 		/* accessing a scalar */
4525 		return true;
4526 	if (!btf_type_is_ptr(t)) {
4527 		bpf_log(log,
4528 			"func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
4529 			tname, arg,
4530 			__btf_name_by_offset(btf, t->name_off),
4531 			btf_kind_str[BTF_INFO_KIND(t->info)]);
4532 		return false;
4533 	}
4534 
4535 	/* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
4536 	for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
4537 		const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
4538 
4539 		if (ctx_arg_info->offset == off &&
4540 		    (ctx_arg_info->reg_type == PTR_TO_RDONLY_BUF_OR_NULL ||
4541 		     ctx_arg_info->reg_type == PTR_TO_RDWR_BUF_OR_NULL)) {
4542 			info->reg_type = ctx_arg_info->reg_type;
4543 			return true;
4544 		}
4545 	}
4546 
4547 	if (t->type == 0)
4548 		/* This is a pointer to void.
4549 		 * It is the same as scalar from the verifier safety pov.
4550 		 * No further pointer walking is allowed.
4551 		 */
4552 		return true;
4553 
4554 	if (is_string_ptr(btf, t))
4555 		return true;
4556 
4557 	/* this is a pointer to another type */
4558 	for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
4559 		const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
4560 
4561 		if (ctx_arg_info->offset == off) {
4562 			info->reg_type = ctx_arg_info->reg_type;
4563 			info->btf_id = ctx_arg_info->btf_id;
4564 			return true;
4565 		}
4566 	}
4567 
4568 	info->reg_type = PTR_TO_BTF_ID;
4569 	if (tgt_prog) {
4570 		enum bpf_prog_type tgt_type;
4571 
4572 		if (tgt_prog->type == BPF_PROG_TYPE_EXT)
4573 			tgt_type = tgt_prog->aux->saved_dst_prog_type;
4574 		else
4575 			tgt_type = tgt_prog->type;
4576 
4577 		ret = btf_translate_to_vmlinux(log, btf, t, tgt_type, arg);
4578 		if (ret > 0) {
4579 			info->btf_id = ret;
4580 			return true;
4581 		} else {
4582 			return false;
4583 		}
4584 	}
4585 
4586 	info->btf_id = t->type;
4587 	t = btf_type_by_id(btf, t->type);
4588 	/* skip modifiers */
4589 	while (btf_type_is_modifier(t)) {
4590 		info->btf_id = t->type;
4591 		t = btf_type_by_id(btf, t->type);
4592 	}
4593 	if (!btf_type_is_struct(t)) {
4594 		bpf_log(log,
4595 			"func '%s' arg%d type %s is not a struct\n",
4596 			tname, arg, btf_kind_str[BTF_INFO_KIND(t->info)]);
4597 		return false;
4598 	}
4599 	bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
4600 		tname, arg, info->btf_id, btf_kind_str[BTF_INFO_KIND(t->info)],
4601 		__btf_name_by_offset(btf, t->name_off));
4602 	return true;
4603 }
4604 
4605 enum bpf_struct_walk_result {
4606 	/* < 0 error */
4607 	WALK_SCALAR = 0,
4608 	WALK_PTR,
4609 	WALK_STRUCT,
4610 };
4611 
btf_struct_walk(struct bpf_verifier_log * log,const struct btf_type * t,int off,int size,u32 * next_btf_id)4612 static int btf_struct_walk(struct bpf_verifier_log *log,
4613 			   const struct btf_type *t, int off, int size,
4614 			   u32 *next_btf_id)
4615 {
4616 	u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
4617 	const struct btf_type *mtype, *elem_type = NULL;
4618 	const struct btf_member *member;
4619 	const char *tname, *mname;
4620 	u32 vlen, elem_id, mid;
4621 
4622 again:
4623 	tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
4624 	if (!btf_type_is_struct(t)) {
4625 		bpf_log(log, "Type '%s' is not a struct\n", tname);
4626 		return -EINVAL;
4627 	}
4628 
4629 	vlen = btf_type_vlen(t);
4630 	if (off + size > t->size) {
4631 		/* If the last element is a variable size array, we may
4632 		 * need to relax the rule.
4633 		 */
4634 		struct btf_array *array_elem;
4635 
4636 		if (vlen == 0)
4637 			goto error;
4638 
4639 		member = btf_type_member(t) + vlen - 1;
4640 		mtype = btf_type_skip_modifiers(btf_vmlinux, member->type,
4641 						NULL);
4642 		if (!btf_type_is_array(mtype))
4643 			goto error;
4644 
4645 		array_elem = (struct btf_array *)(mtype + 1);
4646 		if (array_elem->nelems != 0)
4647 			goto error;
4648 
4649 		moff = btf_member_bit_offset(t, member) / 8;
4650 		if (off < moff)
4651 			goto error;
4652 
4653 		/* Only allow structure for now, can be relaxed for
4654 		 * other types later.
4655 		 */
4656 		t = btf_type_skip_modifiers(btf_vmlinux, array_elem->type,
4657 					    NULL);
4658 		if (!btf_type_is_struct(t))
4659 			goto error;
4660 
4661 		off = (off - moff) % t->size;
4662 		goto again;
4663 
4664 error:
4665 		bpf_log(log, "access beyond struct %s at off %u size %u\n",
4666 			tname, off, size);
4667 		return -EACCES;
4668 	}
4669 
4670 	for_each_member(i, t, member) {
4671 		/* offset of the field in bytes */
4672 		moff = btf_member_bit_offset(t, member) / 8;
4673 		if (off + size <= moff)
4674 			/* won't find anything, field is already too far */
4675 			break;
4676 
4677 		if (btf_member_bitfield_size(t, member)) {
4678 			u32 end_bit = btf_member_bit_offset(t, member) +
4679 				btf_member_bitfield_size(t, member);
4680 
4681 			/* off <= moff instead of off == moff because clang
4682 			 * does not generate a BTF member for anonymous
4683 			 * bitfield like the ":16" here:
4684 			 * struct {
4685 			 *	int :16;
4686 			 *	int x:8;
4687 			 * };
4688 			 */
4689 			if (off <= moff &&
4690 			    BITS_ROUNDUP_BYTES(end_bit) <= off + size)
4691 				return WALK_SCALAR;
4692 
4693 			/* off may be accessing a following member
4694 			 *
4695 			 * or
4696 			 *
4697 			 * Doing partial access at either end of this
4698 			 * bitfield.  Continue on this case also to
4699 			 * treat it as not accessing this bitfield
4700 			 * and eventually error out as field not
4701 			 * found to keep it simple.
4702 			 * It could be relaxed if there was a legit
4703 			 * partial access case later.
4704 			 */
4705 			continue;
4706 		}
4707 
4708 		/* In case of "off" is pointing to holes of a struct */
4709 		if (off < moff)
4710 			break;
4711 
4712 		/* type of the field */
4713 		mid = member->type;
4714 		mtype = btf_type_by_id(btf_vmlinux, member->type);
4715 		mname = __btf_name_by_offset(btf_vmlinux, member->name_off);
4716 
4717 		mtype = __btf_resolve_size(btf_vmlinux, mtype, &msize,
4718 					   &elem_type, &elem_id, &total_nelems,
4719 					   &mid);
4720 		if (IS_ERR(mtype)) {
4721 			bpf_log(log, "field %s doesn't have size\n", mname);
4722 			return -EFAULT;
4723 		}
4724 
4725 		mtrue_end = moff + msize;
4726 		if (off >= mtrue_end)
4727 			/* no overlap with member, keep iterating */
4728 			continue;
4729 
4730 		if (btf_type_is_array(mtype)) {
4731 			u32 elem_idx;
4732 
4733 			/* __btf_resolve_size() above helps to
4734 			 * linearize a multi-dimensional array.
4735 			 *
4736 			 * The logic here is treating an array
4737 			 * in a struct as the following way:
4738 			 *
4739 			 * struct outer {
4740 			 *	struct inner array[2][2];
4741 			 * };
4742 			 *
4743 			 * looks like:
4744 			 *
4745 			 * struct outer {
4746 			 *	struct inner array_elem0;
4747 			 *	struct inner array_elem1;
4748 			 *	struct inner array_elem2;
4749 			 *	struct inner array_elem3;
4750 			 * };
4751 			 *
4752 			 * When accessing outer->array[1][0], it moves
4753 			 * moff to "array_elem2", set mtype to
4754 			 * "struct inner", and msize also becomes
4755 			 * sizeof(struct inner).  Then most of the
4756 			 * remaining logic will fall through without
4757 			 * caring the current member is an array or
4758 			 * not.
4759 			 *
4760 			 * Unlike mtype/msize/moff, mtrue_end does not
4761 			 * change.  The naming difference ("_true") tells
4762 			 * that it is not always corresponding to
4763 			 * the current mtype/msize/moff.
4764 			 * It is the true end of the current
4765 			 * member (i.e. array in this case).  That
4766 			 * will allow an int array to be accessed like
4767 			 * a scratch space,
4768 			 * i.e. allow access beyond the size of
4769 			 *      the array's element as long as it is
4770 			 *      within the mtrue_end boundary.
4771 			 */
4772 
4773 			/* skip empty array */
4774 			if (moff == mtrue_end)
4775 				continue;
4776 
4777 			msize /= total_nelems;
4778 			elem_idx = (off - moff) / msize;
4779 			moff += elem_idx * msize;
4780 			mtype = elem_type;
4781 			mid = elem_id;
4782 		}
4783 
4784 		/* the 'off' we're looking for is either equal to start
4785 		 * of this field or inside of this struct
4786 		 */
4787 		if (btf_type_is_struct(mtype)) {
4788 			/* our field must be inside that union or struct */
4789 			t = mtype;
4790 
4791 			/* return if the offset matches the member offset */
4792 			if (off == moff) {
4793 				*next_btf_id = mid;
4794 				return WALK_STRUCT;
4795 			}
4796 
4797 			/* adjust offset we're looking for */
4798 			off -= moff;
4799 			goto again;
4800 		}
4801 
4802 		if (btf_type_is_ptr(mtype)) {
4803 			const struct btf_type *stype;
4804 			u32 id;
4805 
4806 			if (msize != size || off != moff) {
4807 				bpf_log(log,
4808 					"cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
4809 					mname, moff, tname, off, size);
4810 				return -EACCES;
4811 			}
4812 			stype = btf_type_skip_modifiers(btf_vmlinux, mtype->type, &id);
4813 			if (btf_type_is_struct(stype)) {
4814 				*next_btf_id = id;
4815 				return WALK_PTR;
4816 			}
4817 		}
4818 
4819 		/* Allow more flexible access within an int as long as
4820 		 * it is within mtrue_end.
4821 		 * Since mtrue_end could be the end of an array,
4822 		 * that also allows using an array of int as a scratch
4823 		 * space. e.g. skb->cb[].
4824 		 */
4825 		if (off + size > mtrue_end) {
4826 			bpf_log(log,
4827 				"access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
4828 				mname, mtrue_end, tname, off, size);
4829 			return -EACCES;
4830 		}
4831 
4832 		return WALK_SCALAR;
4833 	}
4834 	bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
4835 	return -EINVAL;
4836 }
4837 
btf_struct_access(struct bpf_verifier_log * log,const struct btf_type * t,int off,int size,enum bpf_access_type atype __maybe_unused,u32 * next_btf_id)4838 int btf_struct_access(struct bpf_verifier_log *log,
4839 		      const struct btf_type *t, int off, int size,
4840 		      enum bpf_access_type atype __maybe_unused,
4841 		      u32 *next_btf_id)
4842 {
4843 	int err;
4844 	u32 id;
4845 
4846 	do {
4847 		err = btf_struct_walk(log, t, off, size, &id);
4848 
4849 		switch (err) {
4850 		case WALK_PTR:
4851 			/* If we found the pointer or scalar on t+off,
4852 			 * we're done.
4853 			 */
4854 			*next_btf_id = id;
4855 			return PTR_TO_BTF_ID;
4856 		case WALK_SCALAR:
4857 			return SCALAR_VALUE;
4858 		case WALK_STRUCT:
4859 			/* We found nested struct, so continue the search
4860 			 * by diving in it. At this point the offset is
4861 			 * aligned with the new type, so set it to 0.
4862 			 */
4863 			t = btf_type_by_id(btf_vmlinux, id);
4864 			off = 0;
4865 			break;
4866 		default:
4867 			/* It's either error or unknown return value..
4868 			 * scream and leave.
4869 			 */
4870 			if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value"))
4871 				return -EINVAL;
4872 			return err;
4873 		}
4874 	} while (t);
4875 
4876 	return -EINVAL;
4877 }
4878 
btf_struct_ids_match(struct bpf_verifier_log * log,int off,u32 id,u32 need_type_id)4879 bool btf_struct_ids_match(struct bpf_verifier_log *log,
4880 			  int off, u32 id, u32 need_type_id)
4881 {
4882 	const struct btf_type *type;
4883 	int err;
4884 
4885 	/* Are we already done? */
4886 	if (need_type_id == id && off == 0)
4887 		return true;
4888 
4889 again:
4890 	type = btf_type_by_id(btf_vmlinux, id);
4891 	if (!type)
4892 		return false;
4893 	err = btf_struct_walk(log, type, off, 1, &id);
4894 	if (err != WALK_STRUCT)
4895 		return false;
4896 
4897 	/* We found nested struct object. If it matches
4898 	 * the requested ID, we're done. Otherwise let's
4899 	 * continue the search with offset 0 in the new
4900 	 * type.
4901 	 */
4902 	if (need_type_id != id) {
4903 		off = 0;
4904 		goto again;
4905 	}
4906 
4907 	return true;
4908 }
4909 
__get_type_size(struct btf * btf,u32 btf_id,const struct btf_type ** bad_type)4910 static int __get_type_size(struct btf *btf, u32 btf_id,
4911 			   const struct btf_type **bad_type)
4912 {
4913 	const struct btf_type *t;
4914 
4915 	if (!btf_id)
4916 		/* void */
4917 		return 0;
4918 	t = btf_type_by_id(btf, btf_id);
4919 	while (t && btf_type_is_modifier(t))
4920 		t = btf_type_by_id(btf, t->type);
4921 	if (!t) {
4922 		*bad_type = btf->types[0];
4923 		return -EINVAL;
4924 	}
4925 	if (btf_type_is_ptr(t))
4926 		/* kernel size of pointer. Not BPF's size of pointer*/
4927 		return sizeof(void *);
4928 	if (btf_type_is_int(t) || btf_type_is_enum(t))
4929 		return t->size;
4930 	*bad_type = t;
4931 	return -EINVAL;
4932 }
4933 
btf_distill_func_proto(struct bpf_verifier_log * log,struct btf * btf,const struct btf_type * func,const char * tname,struct btf_func_model * m)4934 int btf_distill_func_proto(struct bpf_verifier_log *log,
4935 			   struct btf *btf,
4936 			   const struct btf_type *func,
4937 			   const char *tname,
4938 			   struct btf_func_model *m)
4939 {
4940 	const struct btf_param *args;
4941 	const struct btf_type *t;
4942 	u32 i, nargs;
4943 	int ret;
4944 
4945 	if (!func) {
4946 		/* BTF function prototype doesn't match the verifier types.
4947 		 * Fall back to 5 u64 args.
4948 		 */
4949 		for (i = 0; i < 5; i++)
4950 			m->arg_size[i] = 8;
4951 		m->ret_size = 8;
4952 		m->nr_args = 5;
4953 		return 0;
4954 	}
4955 	args = (const struct btf_param *)(func + 1);
4956 	nargs = btf_type_vlen(func);
4957 	if (nargs >= MAX_BPF_FUNC_ARGS) {
4958 		bpf_log(log,
4959 			"The function %s has %d arguments. Too many.\n",
4960 			tname, nargs);
4961 		return -EINVAL;
4962 	}
4963 	ret = __get_type_size(btf, func->type, &t);
4964 	if (ret < 0) {
4965 		bpf_log(log,
4966 			"The function %s return type %s is unsupported.\n",
4967 			tname, btf_kind_str[BTF_INFO_KIND(t->info)]);
4968 		return -EINVAL;
4969 	}
4970 	m->ret_size = ret;
4971 
4972 	for (i = 0; i < nargs; i++) {
4973 		if (i == nargs - 1 && args[i].type == 0) {
4974 			bpf_log(log,
4975 				"The function %s with variable args is unsupported.\n",
4976 				tname);
4977 			return -EINVAL;
4978 		}
4979 		ret = __get_type_size(btf, args[i].type, &t);
4980 		if (ret < 0) {
4981 			bpf_log(log,
4982 				"The function %s arg%d type %s is unsupported.\n",
4983 				tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]);
4984 			return -EINVAL;
4985 		}
4986 		if (ret == 0) {
4987 			bpf_log(log,
4988 				"The function %s has malformed void argument.\n",
4989 				tname);
4990 			return -EINVAL;
4991 		}
4992 		m->arg_size[i] = ret;
4993 	}
4994 	m->nr_args = nargs;
4995 	return 0;
4996 }
4997 
4998 /* Compare BTFs of two functions assuming only scalars and pointers to context.
4999  * t1 points to BTF_KIND_FUNC in btf1
5000  * t2 points to BTF_KIND_FUNC in btf2
5001  * Returns:
5002  * EINVAL - function prototype mismatch
5003  * EFAULT - verifier bug
5004  * 0 - 99% match. The last 1% is validated by the verifier.
5005  */
btf_check_func_type_match(struct bpf_verifier_log * log,struct btf * btf1,const struct btf_type * t1,struct btf * btf2,const struct btf_type * t2)5006 static int btf_check_func_type_match(struct bpf_verifier_log *log,
5007 				     struct btf *btf1, const struct btf_type *t1,
5008 				     struct btf *btf2, const struct btf_type *t2)
5009 {
5010 	const struct btf_param *args1, *args2;
5011 	const char *fn1, *fn2, *s1, *s2;
5012 	u32 nargs1, nargs2, i;
5013 
5014 	fn1 = btf_name_by_offset(btf1, t1->name_off);
5015 	fn2 = btf_name_by_offset(btf2, t2->name_off);
5016 
5017 	if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) {
5018 		bpf_log(log, "%s() is not a global function\n", fn1);
5019 		return -EINVAL;
5020 	}
5021 	if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) {
5022 		bpf_log(log, "%s() is not a global function\n", fn2);
5023 		return -EINVAL;
5024 	}
5025 
5026 	t1 = btf_type_by_id(btf1, t1->type);
5027 	if (!t1 || !btf_type_is_func_proto(t1))
5028 		return -EFAULT;
5029 	t2 = btf_type_by_id(btf2, t2->type);
5030 	if (!t2 || !btf_type_is_func_proto(t2))
5031 		return -EFAULT;
5032 
5033 	args1 = (const struct btf_param *)(t1 + 1);
5034 	nargs1 = btf_type_vlen(t1);
5035 	args2 = (const struct btf_param *)(t2 + 1);
5036 	nargs2 = btf_type_vlen(t2);
5037 
5038 	if (nargs1 != nargs2) {
5039 		bpf_log(log, "%s() has %d args while %s() has %d args\n",
5040 			fn1, nargs1, fn2, nargs2);
5041 		return -EINVAL;
5042 	}
5043 
5044 	t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
5045 	t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
5046 	if (t1->info != t2->info) {
5047 		bpf_log(log,
5048 			"Return type %s of %s() doesn't match type %s of %s()\n",
5049 			btf_type_str(t1), fn1,
5050 			btf_type_str(t2), fn2);
5051 		return -EINVAL;
5052 	}
5053 
5054 	for (i = 0; i < nargs1; i++) {
5055 		t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL);
5056 		t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL);
5057 
5058 		if (t1->info != t2->info) {
5059 			bpf_log(log, "arg%d in %s() is %s while %s() has %s\n",
5060 				i, fn1, btf_type_str(t1),
5061 				fn2, btf_type_str(t2));
5062 			return -EINVAL;
5063 		}
5064 		if (btf_type_has_size(t1) && t1->size != t2->size) {
5065 			bpf_log(log,
5066 				"arg%d in %s() has size %d while %s() has %d\n",
5067 				i, fn1, t1->size,
5068 				fn2, t2->size);
5069 			return -EINVAL;
5070 		}
5071 
5072 		/* global functions are validated with scalars and pointers
5073 		 * to context only. And only global functions can be replaced.
5074 		 * Hence type check only those types.
5075 		 */
5076 		if (btf_type_is_int(t1) || btf_type_is_enum(t1))
5077 			continue;
5078 		if (!btf_type_is_ptr(t1)) {
5079 			bpf_log(log,
5080 				"arg%d in %s() has unrecognized type\n",
5081 				i, fn1);
5082 			return -EINVAL;
5083 		}
5084 		t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
5085 		t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
5086 		if (!btf_type_is_struct(t1)) {
5087 			bpf_log(log,
5088 				"arg%d in %s() is not a pointer to context\n",
5089 				i, fn1);
5090 			return -EINVAL;
5091 		}
5092 		if (!btf_type_is_struct(t2)) {
5093 			bpf_log(log,
5094 				"arg%d in %s() is not a pointer to context\n",
5095 				i, fn2);
5096 			return -EINVAL;
5097 		}
5098 		/* This is an optional check to make program writing easier.
5099 		 * Compare names of structs and report an error to the user.
5100 		 * btf_prepare_func_args() already checked that t2 struct
5101 		 * is a context type. btf_prepare_func_args() will check
5102 		 * later that t1 struct is a context type as well.
5103 		 */
5104 		s1 = btf_name_by_offset(btf1, t1->name_off);
5105 		s2 = btf_name_by_offset(btf2, t2->name_off);
5106 		if (strcmp(s1, s2)) {
5107 			bpf_log(log,
5108 				"arg%d %s(struct %s *) doesn't match %s(struct %s *)\n",
5109 				i, fn1, s1, fn2, s2);
5110 			return -EINVAL;
5111 		}
5112 	}
5113 	return 0;
5114 }
5115 
5116 /* Compare BTFs of given program with BTF of target program */
btf_check_type_match(struct bpf_verifier_log * log,const struct bpf_prog * prog,struct btf * btf2,const struct btf_type * t2)5117 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
5118 			 struct btf *btf2, const struct btf_type *t2)
5119 {
5120 	struct btf *btf1 = prog->aux->btf;
5121 	const struct btf_type *t1;
5122 	u32 btf_id = 0;
5123 
5124 	if (!prog->aux->func_info) {
5125 		bpf_log(log, "Program extension requires BTF\n");
5126 		return -EINVAL;
5127 	}
5128 
5129 	btf_id = prog->aux->func_info[0].type_id;
5130 	if (!btf_id)
5131 		return -EFAULT;
5132 
5133 	t1 = btf_type_by_id(btf1, btf_id);
5134 	if (!t1 || !btf_type_is_func(t1))
5135 		return -EFAULT;
5136 
5137 	return btf_check_func_type_match(log, btf1, t1, btf2, t2);
5138 }
5139 
5140 /* Compare BTF of a function with given bpf_reg_state.
5141  * Returns:
5142  * EFAULT - there is a verifier bug. Abort verification.
5143  * EINVAL - there is a type mismatch or BTF is not available.
5144  * 0 - BTF matches with what bpf_reg_state expects.
5145  * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
5146  */
btf_check_func_arg_match(struct bpf_verifier_env * env,int subprog,struct bpf_reg_state * reg)5147 int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
5148 			     struct bpf_reg_state *reg)
5149 {
5150 	struct bpf_verifier_log *log = &env->log;
5151 	struct bpf_prog *prog = env->prog;
5152 	struct btf *btf = prog->aux->btf;
5153 	const struct btf_param *args;
5154 	const struct btf_type *t;
5155 	u32 i, nargs, btf_id;
5156 	const char *tname;
5157 
5158 	if (!prog->aux->func_info)
5159 		return -EINVAL;
5160 
5161 	btf_id = prog->aux->func_info[subprog].type_id;
5162 	if (!btf_id)
5163 		return -EFAULT;
5164 
5165 	if (prog->aux->func_info_aux[subprog].unreliable)
5166 		return -EINVAL;
5167 
5168 	t = btf_type_by_id(btf, btf_id);
5169 	if (!t || !btf_type_is_func(t)) {
5170 		/* These checks were already done by the verifier while loading
5171 		 * struct bpf_func_info
5172 		 */
5173 		bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
5174 			subprog);
5175 		return -EFAULT;
5176 	}
5177 	tname = btf_name_by_offset(btf, t->name_off);
5178 
5179 	t = btf_type_by_id(btf, t->type);
5180 	if (!t || !btf_type_is_func_proto(t)) {
5181 		bpf_log(log, "Invalid BTF of func %s\n", tname);
5182 		return -EFAULT;
5183 	}
5184 	args = (const struct btf_param *)(t + 1);
5185 	nargs = btf_type_vlen(t);
5186 	if (nargs > 5) {
5187 		bpf_log(log, "Function %s has %d > 5 args\n", tname, nargs);
5188 		goto out;
5189 	}
5190 	/* check that BTF function arguments match actual types that the
5191 	 * verifier sees.
5192 	 */
5193 	for (i = 0; i < nargs; i++) {
5194 		t = btf_type_by_id(btf, args[i].type);
5195 		while (btf_type_is_modifier(t))
5196 			t = btf_type_by_id(btf, t->type);
5197 		if (btf_type_is_int(t) || btf_type_is_enum(t)) {
5198 			if (reg[i + 1].type == SCALAR_VALUE)
5199 				continue;
5200 			bpf_log(log, "R%d is not a scalar\n", i + 1);
5201 			goto out;
5202 		}
5203 		if (btf_type_is_ptr(t)) {
5204 			if (reg[i + 1].type == SCALAR_VALUE) {
5205 				bpf_log(log, "R%d is not a pointer\n", i + 1);
5206 				goto out;
5207 			}
5208 			/* If function expects ctx type in BTF check that caller
5209 			 * is passing PTR_TO_CTX.
5210 			 */
5211 			if (btf_get_prog_ctx_type(log, btf, t, prog->type, i)) {
5212 				if (reg[i + 1].type != PTR_TO_CTX) {
5213 					bpf_log(log,
5214 						"arg#%d expected pointer to ctx, but got %s\n",
5215 						i, btf_kind_str[BTF_INFO_KIND(t->info)]);
5216 					goto out;
5217 				}
5218 				if (check_ctx_reg(env, &reg[i + 1], i + 1))
5219 					goto out;
5220 				continue;
5221 			}
5222 		}
5223 		bpf_log(log, "Unrecognized arg#%d type %s\n",
5224 			i, btf_kind_str[BTF_INFO_KIND(t->info)]);
5225 		goto out;
5226 	}
5227 	return 0;
5228 out:
5229 	/* Compiler optimizations can remove arguments from static functions
5230 	 * or mismatched type can be passed into a global function.
5231 	 * In such cases mark the function as unreliable from BTF point of view.
5232 	 */
5233 	prog->aux->func_info_aux[subprog].unreliable = true;
5234 	return -EINVAL;
5235 }
5236 
5237 /* Convert BTF of a function into bpf_reg_state if possible
5238  * Returns:
5239  * EFAULT - there is a verifier bug. Abort verification.
5240  * EINVAL - cannot convert BTF.
5241  * 0 - Successfully converted BTF into bpf_reg_state
5242  * (either PTR_TO_CTX or SCALAR_VALUE).
5243  */
btf_prepare_func_args(struct bpf_verifier_env * env,int subprog,struct bpf_reg_state * reg)5244 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
5245 			  struct bpf_reg_state *reg)
5246 {
5247 	struct bpf_verifier_log *log = &env->log;
5248 	struct bpf_prog *prog = env->prog;
5249 	enum bpf_prog_type prog_type = prog->type;
5250 	struct btf *btf = prog->aux->btf;
5251 	const struct btf_param *args;
5252 	const struct btf_type *t;
5253 	u32 i, nargs, btf_id;
5254 	const char *tname;
5255 
5256 	if (!prog->aux->func_info ||
5257 	    prog->aux->func_info_aux[subprog].linkage != BTF_FUNC_GLOBAL) {
5258 		bpf_log(log, "Verifier bug\n");
5259 		return -EFAULT;
5260 	}
5261 
5262 	btf_id = prog->aux->func_info[subprog].type_id;
5263 	if (!btf_id) {
5264 		bpf_log(log, "Global functions need valid BTF\n");
5265 		return -EFAULT;
5266 	}
5267 
5268 	t = btf_type_by_id(btf, btf_id);
5269 	if (!t || !btf_type_is_func(t)) {
5270 		/* These checks were already done by the verifier while loading
5271 		 * struct bpf_func_info
5272 		 */
5273 		bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
5274 			subprog);
5275 		return -EFAULT;
5276 	}
5277 	tname = btf_name_by_offset(btf, t->name_off);
5278 
5279 	if (log->level & BPF_LOG_LEVEL)
5280 		bpf_log(log, "Validating %s() func#%d...\n",
5281 			tname, subprog);
5282 
5283 	if (prog->aux->func_info_aux[subprog].unreliable) {
5284 		bpf_log(log, "Verifier bug in function %s()\n", tname);
5285 		return -EFAULT;
5286 	}
5287 	if (prog_type == BPF_PROG_TYPE_EXT)
5288 		prog_type = prog->aux->dst_prog->type;
5289 
5290 	t = btf_type_by_id(btf, t->type);
5291 	if (!t || !btf_type_is_func_proto(t)) {
5292 		bpf_log(log, "Invalid type of function %s()\n", tname);
5293 		return -EFAULT;
5294 	}
5295 	args = (const struct btf_param *)(t + 1);
5296 	nargs = btf_type_vlen(t);
5297 	if (nargs > 5) {
5298 		bpf_log(log, "Global function %s() with %d > 5 args. Buggy compiler.\n",
5299 			tname, nargs);
5300 		return -EINVAL;
5301 	}
5302 	/* check that function returns int */
5303 	t = btf_type_by_id(btf, t->type);
5304 	while (btf_type_is_modifier(t))
5305 		t = btf_type_by_id(btf, t->type);
5306 	if (!btf_type_is_int(t) && !btf_type_is_enum(t)) {
5307 		bpf_log(log,
5308 			"Global function %s() doesn't return scalar. Only those are supported.\n",
5309 			tname);
5310 		return -EINVAL;
5311 	}
5312 	/* Convert BTF function arguments into verifier types.
5313 	 * Only PTR_TO_CTX and SCALAR are supported atm.
5314 	 */
5315 	for (i = 0; i < nargs; i++) {
5316 		t = btf_type_by_id(btf, args[i].type);
5317 		while (btf_type_is_modifier(t))
5318 			t = btf_type_by_id(btf, t->type);
5319 		if (btf_type_is_int(t) || btf_type_is_enum(t)) {
5320 			reg[i + 1].type = SCALAR_VALUE;
5321 			continue;
5322 		}
5323 		if (btf_type_is_ptr(t) &&
5324 		    btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
5325 			reg[i + 1].type = PTR_TO_CTX;
5326 			continue;
5327 		}
5328 		bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
5329 			i, btf_kind_str[BTF_INFO_KIND(t->info)], tname);
5330 		return -EINVAL;
5331 	}
5332 	return 0;
5333 }
5334 
btf_type_show(const struct btf * btf,u32 type_id,void * obj,struct btf_show * show)5335 static void btf_type_show(const struct btf *btf, u32 type_id, void *obj,
5336 			  struct btf_show *show)
5337 {
5338 	const struct btf_type *t = btf_type_by_id(btf, type_id);
5339 
5340 	show->btf = btf;
5341 	memset(&show->state, 0, sizeof(show->state));
5342 	memset(&show->obj, 0, sizeof(show->obj));
5343 
5344 	btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
5345 }
5346 
btf_seq_show(struct btf_show * show,const char * fmt,va_list args)5347 static void btf_seq_show(struct btf_show *show, const char *fmt,
5348 			 va_list args)
5349 {
5350 	seq_vprintf((struct seq_file *)show->target, fmt, args);
5351 }
5352 
btf_type_seq_show_flags(const struct btf * btf,u32 type_id,void * obj,struct seq_file * m,u64 flags)5353 int btf_type_seq_show_flags(const struct btf *btf, u32 type_id,
5354 			    void *obj, struct seq_file *m, u64 flags)
5355 {
5356 	struct btf_show sseq;
5357 
5358 	sseq.target = m;
5359 	sseq.showfn = btf_seq_show;
5360 	sseq.flags = flags;
5361 
5362 	btf_type_show(btf, type_id, obj, &sseq);
5363 
5364 	return sseq.state.status;
5365 }
5366 
btf_type_seq_show(const struct btf * btf,u32 type_id,void * obj,struct seq_file * m)5367 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
5368 		       struct seq_file *m)
5369 {
5370 	(void) btf_type_seq_show_flags(btf, type_id, obj, m,
5371 				       BTF_SHOW_NONAME | BTF_SHOW_COMPACT |
5372 				       BTF_SHOW_ZERO | BTF_SHOW_UNSAFE);
5373 }
5374 
5375 struct btf_show_snprintf {
5376 	struct btf_show show;
5377 	int len_left;		/* space left in string */
5378 	int len;		/* length we would have written */
5379 };
5380 
btf_snprintf_show(struct btf_show * show,const char * fmt,va_list args)5381 static void btf_snprintf_show(struct btf_show *show, const char *fmt,
5382 			      va_list args)
5383 {
5384 	struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show;
5385 	int len;
5386 
5387 	len = vsnprintf(show->target, ssnprintf->len_left, fmt, args);
5388 
5389 	if (len < 0) {
5390 		ssnprintf->len_left = 0;
5391 		ssnprintf->len = len;
5392 	} else if (len > ssnprintf->len_left) {
5393 		/* no space, drive on to get length we would have written */
5394 		ssnprintf->len_left = 0;
5395 		ssnprintf->len += len;
5396 	} else {
5397 		ssnprintf->len_left -= len;
5398 		ssnprintf->len += len;
5399 		show->target += len;
5400 	}
5401 }
5402 
btf_type_snprintf_show(const struct btf * btf,u32 type_id,void * obj,char * buf,int len,u64 flags)5403 int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
5404 			   char *buf, int len, u64 flags)
5405 {
5406 	struct btf_show_snprintf ssnprintf;
5407 
5408 	ssnprintf.show.target = buf;
5409 	ssnprintf.show.flags = flags;
5410 	ssnprintf.show.showfn = btf_snprintf_show;
5411 	ssnprintf.len_left = len;
5412 	ssnprintf.len = 0;
5413 
5414 	btf_type_show(btf, type_id, obj, (struct btf_show *)&ssnprintf);
5415 
5416 	/* If we encontered an error, return it. */
5417 	if (ssnprintf.show.state.status)
5418 		return ssnprintf.show.state.status;
5419 
5420 	/* Otherwise return length we would have written */
5421 	return ssnprintf.len;
5422 }
5423 
5424 #ifdef CONFIG_PROC_FS
bpf_btf_show_fdinfo(struct seq_file * m,struct file * filp)5425 static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
5426 {
5427 	const struct btf *btf = filp->private_data;
5428 
5429 	seq_printf(m, "btf_id:\t%u\n", btf->id);
5430 }
5431 #endif
5432 
btf_release(struct inode * inode,struct file * filp)5433 static int btf_release(struct inode *inode, struct file *filp)
5434 {
5435 	btf_put(filp->private_data);
5436 	return 0;
5437 }
5438 
5439 const struct file_operations btf_fops = {
5440 #ifdef CONFIG_PROC_FS
5441 	.show_fdinfo	= bpf_btf_show_fdinfo,
5442 #endif
5443 	.release	= btf_release,
5444 };
5445 
__btf_new_fd(struct btf * btf)5446 static int __btf_new_fd(struct btf *btf)
5447 {
5448 	return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
5449 }
5450 
btf_new_fd(const union bpf_attr * attr)5451 int btf_new_fd(const union bpf_attr *attr)
5452 {
5453 	struct btf *btf;
5454 	int ret;
5455 
5456 	btf = btf_parse(u64_to_user_ptr(attr->btf),
5457 			attr->btf_size, attr->btf_log_level,
5458 			u64_to_user_ptr(attr->btf_log_buf),
5459 			attr->btf_log_size);
5460 	if (IS_ERR(btf))
5461 		return PTR_ERR(btf);
5462 
5463 	ret = btf_alloc_id(btf);
5464 	if (ret) {
5465 		btf_free(btf);
5466 		return ret;
5467 	}
5468 
5469 	/*
5470 	 * The BTF ID is published to the userspace.
5471 	 * All BTF free must go through call_rcu() from
5472 	 * now on (i.e. free by calling btf_put()).
5473 	 */
5474 
5475 	ret = __btf_new_fd(btf);
5476 	if (ret < 0)
5477 		btf_put(btf);
5478 
5479 	return ret;
5480 }
5481 
btf_get_by_fd(int fd)5482 struct btf *btf_get_by_fd(int fd)
5483 {
5484 	struct btf *btf;
5485 	struct fd f;
5486 
5487 	f = fdget(fd);
5488 
5489 	if (!f.file)
5490 		return ERR_PTR(-EBADF);
5491 
5492 	if (f.file->f_op != &btf_fops) {
5493 		fdput(f);
5494 		return ERR_PTR(-EINVAL);
5495 	}
5496 
5497 	btf = f.file->private_data;
5498 	refcount_inc(&btf->refcnt);
5499 	fdput(f);
5500 
5501 	return btf;
5502 }
5503 
btf_get_info_by_fd(const struct btf * btf,const union bpf_attr * attr,union bpf_attr __user * uattr)5504 int btf_get_info_by_fd(const struct btf *btf,
5505 		       const union bpf_attr *attr,
5506 		       union bpf_attr __user *uattr)
5507 {
5508 	struct bpf_btf_info __user *uinfo;
5509 	struct bpf_btf_info info;
5510 	u32 info_copy, btf_copy;
5511 	void __user *ubtf;
5512 	u32 uinfo_len;
5513 
5514 	uinfo = u64_to_user_ptr(attr->info.info);
5515 	uinfo_len = attr->info.info_len;
5516 
5517 	info_copy = min_t(u32, uinfo_len, sizeof(info));
5518 	memset(&info, 0, sizeof(info));
5519 	if (copy_from_user(&info, uinfo, info_copy))
5520 		return -EFAULT;
5521 
5522 	info.id = btf->id;
5523 	ubtf = u64_to_user_ptr(info.btf);
5524 	btf_copy = min_t(u32, btf->data_size, info.btf_size);
5525 	if (copy_to_user(ubtf, btf->data, btf_copy))
5526 		return -EFAULT;
5527 	info.btf_size = btf->data_size;
5528 
5529 	if (copy_to_user(uinfo, &info, info_copy) ||
5530 	    put_user(info_copy, &uattr->info.info_len))
5531 		return -EFAULT;
5532 
5533 	return 0;
5534 }
5535 
btf_get_fd_by_id(u32 id)5536 int btf_get_fd_by_id(u32 id)
5537 {
5538 	struct btf *btf;
5539 	int fd;
5540 
5541 	rcu_read_lock();
5542 	btf = idr_find(&btf_idr, id);
5543 	if (!btf || !refcount_inc_not_zero(&btf->refcnt))
5544 		btf = ERR_PTR(-ENOENT);
5545 	rcu_read_unlock();
5546 
5547 	if (IS_ERR(btf))
5548 		return PTR_ERR(btf);
5549 
5550 	fd = __btf_new_fd(btf);
5551 	if (fd < 0)
5552 		btf_put(btf);
5553 
5554 	return fd;
5555 }
5556 
btf_id(const struct btf * btf)5557 u32 btf_id(const struct btf *btf)
5558 {
5559 	return btf->id;
5560 }
5561 
btf_id_cmp_func(const void * a,const void * b)5562 static int btf_id_cmp_func(const void *a, const void *b)
5563 {
5564 	const int *pa = a, *pb = b;
5565 
5566 	return *pa - *pb;
5567 }
5568 
btf_id_set_contains(const struct btf_id_set * set,u32 id)5569 bool btf_id_set_contains(const struct btf_id_set *set, u32 id)
5570 {
5571 	return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
5572 }
5573