• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #ifndef _LINUX_BPF_VERIFIER_H
5 #define _LINUX_BPF_VERIFIER_H 1
6 
7 #include <linux/bpf.h> /* for enum bpf_reg_type */
8 #include <linux/filter.h> /* for MAX_BPF_STACK */
9 #include <linux/tnum.h>
10 
11 /* Maximum variable offset umax_value permitted when resolving memory accesses.
12  * In practice this is far bigger than any realistic pointer offset; this limit
13  * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
14  */
15 #define BPF_MAX_VAR_OFF	(1 << 29)
16 /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO].  This ensures
17  * that converting umax_value to int cannot overflow.
18  */
19 #define BPF_MAX_VAR_SIZ	(1 << 29)
20 /* size of type_str_buf in bpf_verifier. */
21 #define TYPE_STR_BUF_LEN 64
22 
23 /* Liveness marks, used for registers and spilled-regs (in stack slots).
24  * Read marks propagate upwards until they find a write mark; they record that
25  * "one of this state's descendants read this reg" (and therefore the reg is
26  * relevant for states_equal() checks).
27  * Write marks collect downwards and do not propagate; they record that "the
28  * straight-line code that reached this state (from its parent) wrote this reg"
29  * (and therefore that reads propagated from this state or its descendants
30  * should not propagate to its parent).
31  * A state with a write mark can receive read marks; it just won't propagate
32  * them to its parent, since the write mark is a property, not of the state,
33  * but of the link between it and its parent.  See mark_reg_read() and
34  * mark_stack_slot_read() in kernel/bpf/verifier.c.
35  */
36 enum bpf_reg_liveness {
37 	REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
38 	REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */
39 	REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */
40 	REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
41 	REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */
42 	REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
43 };
44 
45 struct bpf_reg_state {
46 	/* Ordering of fields matters.  See states_equal() */
47 	enum bpf_reg_type type;
48 	union {
49 		/* valid when type == PTR_TO_PACKET */
50 		int range;
51 
52 		/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
53 		 *   PTR_TO_MAP_VALUE_OR_NULL
54 		 */
55 		struct bpf_map *map_ptr;
56 
57 		u32 btf_id; /* for PTR_TO_BTF_ID */
58 
59 		u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
60 
61 		/* Max size from any of the above. */
62 		unsigned long raw;
63 	};
64 	/* Fixed part of pointer offset, pointer types only */
65 	s32 off;
66 	/* For PTR_TO_PACKET, used to find other pointers with the same variable
67 	 * offset, so they can share range knowledge.
68 	 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
69 	 * came from, when one is tested for != NULL.
70 	 * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation
71 	 * for the purpose of tracking that it's freed.
72 	 * For PTR_TO_SOCKET this is used to share which pointers retain the
73 	 * same reference to the socket, to determine proper reference freeing.
74 	 */
75 	u32 id;
76 	/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
77 	 * from a pointer-cast helper, bpf_sk_fullsock() and
78 	 * bpf_tcp_sock().
79 	 *
80 	 * Consider the following where "sk" is a reference counted
81 	 * pointer returned from "sk = bpf_sk_lookup_tcp();":
82 	 *
83 	 * 1: sk = bpf_sk_lookup_tcp();
84 	 * 2: if (!sk) { return 0; }
85 	 * 3: fullsock = bpf_sk_fullsock(sk);
86 	 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
87 	 * 5: tp = bpf_tcp_sock(fullsock);
88 	 * 6: if (!tp) { bpf_sk_release(sk); return 0; }
89 	 * 7: bpf_sk_release(sk);
90 	 * 8: snd_cwnd = tp->snd_cwnd;  // verifier will complain
91 	 *
92 	 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
93 	 * "tp" ptr should be invalidated also.  In order to do that,
94 	 * the reg holding "fullsock" and "sk" need to remember
95 	 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
96 	 * such that the verifier can reset all regs which have
97 	 * ref_obj_id matching the sk_reg->id.
98 	 *
99 	 * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
100 	 * sk_reg->id will stay as NULL-marking purpose only.
101 	 * After NULL-marking is done, sk_reg->id can be reset to 0.
102 	 *
103 	 * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
104 	 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
105 	 *
106 	 * After "tp = bpf_tcp_sock(fullsock);" at line 5,
107 	 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
108 	 * which is the same as sk_reg->ref_obj_id.
109 	 *
110 	 * From the verifier perspective, if sk, fullsock and tp
111 	 * are not NULL, they are the same ptr with different
112 	 * reg->type.  In particular, bpf_sk_release(tp) is also
113 	 * allowed and has the same effect as bpf_sk_release(sk).
114 	 */
115 	u32 ref_obj_id;
116 	/* For scalar types (SCALAR_VALUE), this represents our knowledge of
117 	 * the actual value.
118 	 * For pointer types, this represents the variable part of the offset
119 	 * from the pointed-to object, and is shared with all bpf_reg_states
120 	 * with the same id as us.
121 	 */
122 	struct tnum var_off;
123 	/* Used to determine if any memory access using this register will
124 	 * result in a bad access.
125 	 * These refer to the same value as var_off, not necessarily the actual
126 	 * contents of the register.
127 	 */
128 	s64 smin_value; /* minimum possible (s64)value */
129 	s64 smax_value; /* maximum possible (s64)value */
130 	u64 umin_value; /* minimum possible (u64)value */
131 	u64 umax_value; /* maximum possible (u64)value */
132 	s32 s32_min_value; /* minimum possible (s32)value */
133 	s32 s32_max_value; /* maximum possible (s32)value */
134 	u32 u32_min_value; /* minimum possible (u32)value */
135 	u32 u32_max_value; /* maximum possible (u32)value */
136 	/* parentage chain for liveness checking */
137 	struct bpf_reg_state *parent;
138 	/* Inside the callee two registers can be both PTR_TO_STACK like
139 	 * R1=fp-8 and R2=fp-8, but one of them points to this function stack
140 	 * while another to the caller's stack. To differentiate them 'frameno'
141 	 * is used which is an index in bpf_verifier_state->frame[] array
142 	 * pointing to bpf_func_state.
143 	 */
144 	u32 frameno;
145 	/* Tracks subreg definition. The stored value is the insn_idx of the
146 	 * writing insn. This is safe because subreg_def is used before any insn
147 	 * patching which only happens after main verification finished.
148 	 */
149 	s32 subreg_def;
150 	enum bpf_reg_liveness live;
151 	/* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
152 	bool precise;
153 };
154 
155 enum bpf_stack_slot_type {
156 	STACK_INVALID,    /* nothing was stored in this stack slot */
157 	STACK_SPILL,      /* register spilled into stack */
158 	STACK_MISC,	  /* BPF program wrote some data into this slot */
159 	STACK_ZERO,	  /* BPF program wrote constant zero */
160 };
161 
162 #define BPF_REG_SIZE 8	/* size of eBPF register in bytes */
163 
164 struct bpf_stack_state {
165 	struct bpf_reg_state spilled_ptr;
166 	u8 slot_type[BPF_REG_SIZE];
167 };
168 
169 struct bpf_reference_state {
170 	/* Track each reference created with a unique id, even if the same
171 	 * instruction creates the reference multiple times (eg, via CALL).
172 	 */
173 	int id;
174 	/* Instruction where the allocation of this reference occurred. This
175 	 * is used purely to inform the user of a reference leak.
176 	 */
177 	int insn_idx;
178 };
179 
180 /* state of the program:
181  * type of all registers and stack info
182  */
183 struct bpf_func_state {
184 	struct bpf_reg_state regs[MAX_BPF_REG];
185 	/* index of call instruction that called into this func */
186 	int callsite;
187 	/* stack frame number of this function state from pov of
188 	 * enclosing bpf_verifier_state.
189 	 * 0 = main function, 1 = first callee.
190 	 */
191 	u32 frameno;
192 	/* subprog number == index within subprog_info
193 	 * zero == main subprog
194 	 */
195 	u32 subprogno;
196 
197 	/* The following fields should be last. See copy_func_state() */
198 	int acquired_refs;
199 	struct bpf_reference_state *refs;
200 	int allocated_stack;
201 	struct bpf_stack_state *stack;
202 };
203 
204 struct bpf_idx_pair {
205 	u32 prev_idx;
206 	u32 idx;
207 };
208 
209 struct bpf_id_pair {
210 	u32 old;
211 	u32 cur;
212 };
213 
214 /* Maximum number of register states that can exist at once */
215 #define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
216 #define MAX_CALL_FRAMES 8
217 struct bpf_verifier_state {
218 	/* call stack tracking */
219 	struct bpf_func_state *frame[MAX_CALL_FRAMES];
220 	struct bpf_verifier_state *parent;
221 	/*
222 	 * 'branches' field is the number of branches left to explore:
223 	 * 0 - all possible paths from this state reached bpf_exit or
224 	 * were safely pruned
225 	 * 1 - at least one path is being explored.
226 	 * This state hasn't reached bpf_exit
227 	 * 2 - at least two paths are being explored.
228 	 * This state is an immediate parent of two children.
229 	 * One is fallthrough branch with branches==1 and another
230 	 * state is pushed into stack (to be explored later) also with
231 	 * branches==1. The parent of this state has branches==1.
232 	 * The verifier state tree connected via 'parent' pointer looks like:
233 	 * 1
234 	 * 1
235 	 * 2 -> 1 (first 'if' pushed into stack)
236 	 * 1
237 	 * 2 -> 1 (second 'if' pushed into stack)
238 	 * 1
239 	 * 1
240 	 * 1 bpf_exit.
241 	 *
242 	 * Once do_check() reaches bpf_exit, it calls update_branch_counts()
243 	 * and the verifier state tree will look:
244 	 * 1
245 	 * 1
246 	 * 2 -> 1 (first 'if' pushed into stack)
247 	 * 1
248 	 * 1 -> 1 (second 'if' pushed into stack)
249 	 * 0
250 	 * 0
251 	 * 0 bpf_exit.
252 	 * After pop_stack() the do_check() will resume at second 'if'.
253 	 *
254 	 * If is_state_visited() sees a state with branches > 0 it means
255 	 * there is a loop. If such state is exactly equal to the current state
256 	 * it's an infinite loop. Note states_equal() checks for states
257 	 * equvalency, so two states being 'states_equal' does not mean
258 	 * infinite loop. The exact comparison is provided by
259 	 * states_maybe_looping() function. It's a stronger pre-check and
260 	 * much faster than states_equal().
261 	 *
262 	 * This algorithm may not find all possible infinite loops or
263 	 * loop iteration count may be too high.
264 	 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
265 	 */
266 	u32 branches;
267 	u32 insn_idx;
268 	u32 curframe;
269 	u32 active_spin_lock;
270 	bool speculative;
271 
272 	/* first and last insn idx of this verifier state */
273 	u32 first_insn_idx;
274 	u32 last_insn_idx;
275 	/* jmp history recorded from first to last.
276 	 * backtracking is using it to go from last to first.
277 	 * For most states jmp_history_cnt is [0-3].
278 	 * For loops can go up to ~40.
279 	 */
280 	struct bpf_idx_pair *jmp_history;
281 	u32 jmp_history_cnt;
282 };
283 
284 #define bpf_get_spilled_reg(slot, frame)				\
285 	(((slot < frame->allocated_stack / BPF_REG_SIZE) &&		\
286 	  (frame->stack[slot].slot_type[0] == STACK_SPILL))		\
287 	 ? &frame->stack[slot].spilled_ptr : NULL)
288 
289 /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
290 #define bpf_for_each_spilled_reg(iter, frame, reg)			\
291 	for (iter = 0, reg = bpf_get_spilled_reg(iter, frame);		\
292 	     iter < frame->allocated_stack / BPF_REG_SIZE;		\
293 	     iter++, reg = bpf_get_spilled_reg(iter, frame))
294 
295 /* Invoke __expr over regsiters in __vst, setting __state and __reg */
296 #define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr)   \
297 	({                                                               \
298 		struct bpf_verifier_state *___vstate = __vst;            \
299 		int ___i, ___j;                                          \
300 		for (___i = 0; ___i <= ___vstate->curframe; ___i++) {    \
301 			struct bpf_reg_state *___regs;                   \
302 			__state = ___vstate->frame[___i];                \
303 			___regs = __state->regs;                         \
304 			for (___j = 0; ___j < MAX_BPF_REG; ___j++) {     \
305 				__reg = &___regs[___j];                  \
306 				(void)(__expr);                          \
307 			}                                                \
308 			bpf_for_each_spilled_reg(___j, __state, __reg) { \
309 				if (!__reg)                              \
310 					continue;                        \
311 				(void)(__expr);                          \
312 			}                                                \
313 		}                                                        \
314 	})
315 
316 /* linked list of verifier states used to prune search */
317 struct bpf_verifier_state_list {
318 	struct bpf_verifier_state state;
319 	struct bpf_verifier_state_list *next;
320 	int miss_cnt, hit_cnt;
321 };
322 
323 /* Possible states for alu_state member. */
324 #define BPF_ALU_SANITIZE_SRC		(1U << 0)
325 #define BPF_ALU_SANITIZE_DST		(1U << 1)
326 #define BPF_ALU_NEG_VALUE		(1U << 2)
327 #define BPF_ALU_NON_POINTER		(1U << 3)
328 #define BPF_ALU_IMMEDIATE		(1U << 4)
329 #define BPF_ALU_SANITIZE		(BPF_ALU_SANITIZE_SRC | \
330 					 BPF_ALU_SANITIZE_DST)
331 
332 struct bpf_insn_aux_data {
333 	union {
334 		enum bpf_reg_type ptr_type;	/* pointer type for load/store insns */
335 		unsigned long map_ptr_state;	/* pointer/poison value for maps */
336 		s32 call_imm;			/* saved imm field of call insn */
337 		u32 alu_limit;			/* limit for add/sub register with pointer */
338 		struct {
339 			u32 map_index;		/* index into used_maps[] */
340 			u32 map_off;		/* offset from value base address */
341 		};
342 		struct {
343 			enum bpf_reg_type reg_type;	/* type of pseudo_btf_id */
344 			union {
345 				u32 btf_id;	/* btf_id for struct typed var */
346 				u32 mem_size;	/* mem_size for non-struct typed var */
347 			};
348 		} btf_var;
349 	};
350 	u64 map_key_state; /* constant (32 bit) key tracking for maps */
351 	int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
352 	u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
353 	bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
354 	bool zext_dst; /* this insn zero extends dst reg */
355 	u8 alu_state; /* used in combination with alu_limit */
356 
357 	/* below fields are initialized once */
358 	unsigned int orig_idx; /* original instruction index */
359 	bool prune_point;
360 };
361 
362 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
363 
364 #define BPF_VERIFIER_TMP_LOG_SIZE	1024
365 
366 struct bpf_verifier_log {
367 	u32 level;
368 	char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
369 	char __user *ubuf;
370 	u32 len_used;
371 	u32 len_total;
372 };
373 
bpf_verifier_log_full(const struct bpf_verifier_log * log)374 static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
375 {
376 	return log->len_used >= log->len_total - 1;
377 }
378 
379 #define BPF_LOG_LEVEL1	1
380 #define BPF_LOG_LEVEL2	2
381 #define BPF_LOG_STATS	4
382 #define BPF_LOG_LEVEL	(BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
383 #define BPF_LOG_MASK	(BPF_LOG_LEVEL | BPF_LOG_STATS)
384 #define BPF_LOG_KERNEL	(BPF_LOG_MASK + 1) /* kernel internal flag */
385 
bpf_verifier_log_needed(const struct bpf_verifier_log * log)386 static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
387 {
388 	return log &&
389 		((log->level && log->ubuf && !bpf_verifier_log_full(log)) ||
390 		 log->level == BPF_LOG_KERNEL);
391 }
392 
393 static inline bool
bpf_verifier_log_attr_valid(const struct bpf_verifier_log * log)394 bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
395 {
396 	return log->len_total >= 128 && log->len_total <= UINT_MAX >> 2 &&
397 	       log->level && log->ubuf && !(log->level & ~BPF_LOG_MASK);
398 }
399 
400 #define BPF_MAX_SUBPROGS 256
401 
402 struct bpf_subprog_info {
403 	/* 'start' has to be the first field otherwise find_subprog() won't work */
404 	u32 start; /* insn idx of function entry point */
405 	u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
406 	u16 stack_depth; /* max. stack depth used by this function */
407 	bool has_tail_call;
408 	bool tail_call_reachable;
409 	bool has_ld_abs;
410 };
411 
412 /* single container for all structs
413  * one verifier_env per bpf_check() call
414  */
415 struct bpf_verifier_env {
416 	u32 insn_idx;
417 	u32 prev_insn_idx;
418 	struct bpf_prog *prog;		/* eBPF program being verified */
419 	const struct bpf_verifier_ops *ops;
420 	struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
421 	int stack_size;			/* number of states to be processed */
422 	bool strict_alignment;		/* perform strict pointer alignment checks */
423 	bool test_state_freq;		/* test verifier with different pruning frequency */
424 	struct bpf_verifier_state *cur_state; /* current verifier state */
425 	struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
426 	struct bpf_verifier_state_list *free_list;
427 	struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
428 	u32 used_map_cnt;		/* number of used maps */
429 	u32 id_gen;			/* used to generate unique reg IDs */
430 	bool explore_alu_limits;
431 	bool allow_ptr_leaks;
432 	bool allow_uninit_stack;
433 	bool allow_ptr_to_map_access;
434 	bool bpf_capable;
435 	bool bypass_spec_v1;
436 	bool bypass_spec_v4;
437 	bool seen_direct_write;
438 	struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
439 	const struct bpf_line_info *prev_linfo;
440 	struct bpf_verifier_log log;
441 	struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
442 	struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
443 	struct {
444 		int *insn_state;
445 		int *insn_stack;
446 		int cur_stack;
447 	} cfg;
448 	u32 pass_cnt; /* number of times do_check() was called */
449 	u32 subprog_cnt;
450 	/* number of instructions analyzed by the verifier */
451 	u32 prev_insn_processed, insn_processed;
452 	/* number of jmps, calls, exits analyzed so far */
453 	u32 prev_jmps_processed, jmps_processed;
454 	/* total verification time */
455 	u64 verification_time;
456 	/* maximum number of verifier states kept in 'branching' instructions */
457 	u32 max_states_per_insn;
458 	/* total number of allocated verifier states */
459 	u32 total_states;
460 	/* some states are freed during program analysis.
461 	 * this is peak number of states. this number dominates kernel
462 	 * memory consumption during verification
463 	 */
464 	u32 peak_states;
465 	/* longest register parentage chain walked for liveness marking */
466 	u32 longest_mark_read_walk;
467 	/* buffer used in reg_type_str() to generate reg_type string */
468 	char type_str_buf[TYPE_STR_BUF_LEN];
469 };
470 
471 __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
472 				      const char *fmt, va_list args);
473 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
474 					   const char *fmt, ...);
475 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
476 			    const char *fmt, ...);
477 
cur_func(struct bpf_verifier_env * env)478 static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
479 {
480 	struct bpf_verifier_state *cur = env->cur_state;
481 
482 	return cur->frame[cur->curframe];
483 }
484 
cur_regs(struct bpf_verifier_env * env)485 static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
486 {
487 	return cur_func(env)->regs;
488 }
489 
490 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
491 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
492 				 int insn_idx, int prev_insn_idx);
493 int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
494 void
495 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
496 			      struct bpf_insn *insn);
497 void
498 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
499 
500 int check_ptr_off_reg(struct bpf_verifier_env *env,
501 		      const struct bpf_reg_state *reg, int regno);
502 
503 /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
bpf_trampoline_compute_key(const struct bpf_prog * tgt_prog,u32 btf_id)504 static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
505 					     u32 btf_id)
506 {
507         return tgt_prog ? (((u64)tgt_prog->aux->id) << 32 | btf_id) : btf_id;
508 }
509 
510 int bpf_check_attach_target(struct bpf_verifier_log *log,
511 			    const struct bpf_prog *prog,
512 			    const struct bpf_prog *tgt_prog,
513 			    u32 btf_id,
514 			    struct bpf_attach_target_info *tgt_info);
515 #define BPF_BASE_TYPE_MASK	GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
516 
517 /* extract base type from bpf_{arg, return, reg}_type. */
base_type(u32 type)518 static inline u32 base_type(u32 type)
519 {
520 	return type & BPF_BASE_TYPE_MASK;
521 }
522 
523 /* extract flags from an extended type. See bpf_type_flag in bpf.h. */
type_flag(u32 type)524 static inline u32 type_flag(u32 type)
525 {
526 	return type & ~BPF_BASE_TYPE_MASK;
527 }
528 
529 #endif /* _LINUX_BPF_VERIFIER_H */
530