• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /* SPDX-License-Identifier: GPL-2.0 */
2  /*
3   * Linux Socket Filter Data Structures
4   */
5  #ifndef __LINUX_FILTER_H__
6  #define __LINUX_FILTER_H__
7  
8  #include <stdarg.h>
9  
10  #include <linux/atomic.h>
11  #include <linux/refcount.h>
12  #include <linux/compat.h>
13  #include <linux/skbuff.h>
14  #include <linux/linkage.h>
15  #include <linux/printk.h>
16  #include <linux/workqueue.h>
17  #include <linux/sched.h>
18  #include <linux/capability.h>
19  #include <linux/cryptohash.h>
20  #include <linux/set_memory.h>
21  #include <linux/kallsyms.h>
22  #include <linux/if_vlan.h>
23  #include <linux/vmalloc.h>
24  
25  #include <net/sch_generic.h>
26  
27  #include <asm/byteorder.h>
28  #include <uapi/linux/filter.h>
29  #include <uapi/linux/bpf.h>
30  
31  struct sk_buff;
32  struct sock;
33  struct seccomp_data;
34  struct bpf_prog_aux;
35  struct xdp_rxq_info;
36  struct xdp_buff;
37  struct sock_reuseport;
38  struct ctl_table;
39  struct ctl_table_header;
40  
41  /* ArgX, context and stack frame pointer register positions. Note,
42   * Arg1, Arg2, Arg3, etc are used as argument mappings of function
43   * calls in BPF_CALL instruction.
44   */
45  #define BPF_REG_ARG1	BPF_REG_1
46  #define BPF_REG_ARG2	BPF_REG_2
47  #define BPF_REG_ARG3	BPF_REG_3
48  #define BPF_REG_ARG4	BPF_REG_4
49  #define BPF_REG_ARG5	BPF_REG_5
50  #define BPF_REG_CTX	BPF_REG_6
51  #define BPF_REG_FP	BPF_REG_10
52  
53  /* Additional register mappings for converted user programs. */
54  #define BPF_REG_A	BPF_REG_0
55  #define BPF_REG_X	BPF_REG_7
56  #define BPF_REG_TMP	BPF_REG_2	/* scratch reg */
57  #define BPF_REG_D	BPF_REG_8	/* data, callee-saved */
58  #define BPF_REG_H	BPF_REG_9	/* hlen, callee-saved */
59  
60  /* Kernel hidden auxiliary/helper register. */
61  #define BPF_REG_AX		MAX_BPF_REG
62  #define MAX_BPF_EXT_REG		(MAX_BPF_REG + 1)
63  #define MAX_BPF_JIT_REG		MAX_BPF_EXT_REG
64  
65  /* unused opcode to mark special call to bpf_tail_call() helper */
66  #define BPF_TAIL_CALL	0xf0
67  
68  /* unused opcode to mark call to interpreter with arguments */
69  #define BPF_CALL_ARGS	0xe0
70  
71  /* unused opcode to mark speculation barrier for mitigating
72   * Speculative Store Bypass
73   */
74  #define BPF_NOSPEC	0xc0
75  
76  /* As per nm, we expose JITed images as text (code) section for
77   * kallsyms. That way, tools like perf can find it to match
78   * addresses.
79   */
80  #define BPF_SYM_ELF_TYPE	't'
81  
82  /* BPF program can access up to 512 bytes of stack space. */
83  #define MAX_BPF_STACK	512
84  
85  /* Helper macros for filter block array initializers. */
86  
87  /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
88  
89  #define BPF_ALU64_REG(OP, DST, SRC)				\
90  	((struct bpf_insn) {					\
91  		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,	\
92  		.dst_reg = DST,					\
93  		.src_reg = SRC,					\
94  		.off   = 0,					\
95  		.imm   = 0 })
96  
97  #define BPF_ALU32_REG(OP, DST, SRC)				\
98  	((struct bpf_insn) {					\
99  		.code  = BPF_ALU | BPF_OP(OP) | BPF_X,		\
100  		.dst_reg = DST,					\
101  		.src_reg = SRC,					\
102  		.off   = 0,					\
103  		.imm   = 0 })
104  
105  /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
106  
107  #define BPF_ALU64_IMM(OP, DST, IMM)				\
108  	((struct bpf_insn) {					\
109  		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
110  		.dst_reg = DST,					\
111  		.src_reg = 0,					\
112  		.off   = 0,					\
113  		.imm   = IMM })
114  
115  #define BPF_ALU32_IMM(OP, DST, IMM)				\
116  	((struct bpf_insn) {					\
117  		.code  = BPF_ALU | BPF_OP(OP) | BPF_K,		\
118  		.dst_reg = DST,					\
119  		.src_reg = 0,					\
120  		.off   = 0,					\
121  		.imm   = IMM })
122  
123  /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
124  
125  #define BPF_ENDIAN(TYPE, DST, LEN)				\
126  	((struct bpf_insn) {					\
127  		.code  = BPF_ALU | BPF_END | BPF_SRC(TYPE),	\
128  		.dst_reg = DST,					\
129  		.src_reg = 0,					\
130  		.off   = 0,					\
131  		.imm   = LEN })
132  
133  /* Short form of mov, dst_reg = src_reg */
134  
135  #define BPF_MOV64_REG(DST, SRC)					\
136  	((struct bpf_insn) {					\
137  		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
138  		.dst_reg = DST,					\
139  		.src_reg = SRC,					\
140  		.off   = 0,					\
141  		.imm   = 0 })
142  
143  #define BPF_MOV32_REG(DST, SRC)					\
144  	((struct bpf_insn) {					\
145  		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
146  		.dst_reg = DST,					\
147  		.src_reg = SRC,					\
148  		.off   = 0,					\
149  		.imm   = 0 })
150  
151  /* Short form of mov, dst_reg = imm32 */
152  
153  #define BPF_MOV64_IMM(DST, IMM)					\
154  	((struct bpf_insn) {					\
155  		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
156  		.dst_reg = DST,					\
157  		.src_reg = 0,					\
158  		.off   = 0,					\
159  		.imm   = IMM })
160  
161  #define BPF_MOV32_IMM(DST, IMM)					\
162  	((struct bpf_insn) {					\
163  		.code  = BPF_ALU | BPF_MOV | BPF_K,		\
164  		.dst_reg = DST,					\
165  		.src_reg = 0,					\
166  		.off   = 0,					\
167  		.imm   = IMM })
168  
169  /* Special form of mov32, used for doing explicit zero extension on dst. */
170  #define BPF_ZEXT_REG(DST)					\
171  	((struct bpf_insn) {					\
172  		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
173  		.dst_reg = DST,					\
174  		.src_reg = DST,					\
175  		.off   = 0,					\
176  		.imm   = 1 })
177  
insn_is_zext(const struct bpf_insn * insn)178  static inline bool insn_is_zext(const struct bpf_insn *insn)
179  {
180  	return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1;
181  }
182  
183  /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
184  #define BPF_LD_IMM64(DST, IMM)					\
185  	BPF_LD_IMM64_RAW(DST, 0, IMM)
186  
187  #define BPF_LD_IMM64_RAW(DST, SRC, IMM)				\
188  	((struct bpf_insn) {					\
189  		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
190  		.dst_reg = DST,					\
191  		.src_reg = SRC,					\
192  		.off   = 0,					\
193  		.imm   = (__u32) (IMM) }),			\
194  	((struct bpf_insn) {					\
195  		.code  = 0, /* zero is reserved opcode */	\
196  		.dst_reg = 0,					\
197  		.src_reg = 0,					\
198  		.off   = 0,					\
199  		.imm   = ((__u64) (IMM)) >> 32 })
200  
201  /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
202  #define BPF_LD_MAP_FD(DST, MAP_FD)				\
203  	BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
204  
205  /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
206  
207  #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)			\
208  	((struct bpf_insn) {					\
209  		.code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),	\
210  		.dst_reg = DST,					\
211  		.src_reg = SRC,					\
212  		.off   = 0,					\
213  		.imm   = IMM })
214  
215  #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)			\
216  	((struct bpf_insn) {					\
217  		.code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE),	\
218  		.dst_reg = DST,					\
219  		.src_reg = SRC,					\
220  		.off   = 0,					\
221  		.imm   = IMM })
222  
223  /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
224  
225  #define BPF_LD_ABS(SIZE, IMM)					\
226  	((struct bpf_insn) {					\
227  		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,	\
228  		.dst_reg = 0,					\
229  		.src_reg = 0,					\
230  		.off   = 0,					\
231  		.imm   = IMM })
232  
233  /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
234  
235  #define BPF_LD_IND(SIZE, SRC, IMM)				\
236  	((struct bpf_insn) {					\
237  		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND,	\
238  		.dst_reg = 0,					\
239  		.src_reg = SRC,					\
240  		.off   = 0,					\
241  		.imm   = IMM })
242  
243  /* Memory load, dst_reg = *(uint *) (src_reg + off16) */
244  
245  #define BPF_LDX_MEM(SIZE, DST, SRC, OFF)			\
246  	((struct bpf_insn) {					\
247  		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,	\
248  		.dst_reg = DST,					\
249  		.src_reg = SRC,					\
250  		.off   = OFF,					\
251  		.imm   = 0 })
252  
253  /* Memory store, *(uint *) (dst_reg + off16) = src_reg */
254  
255  #define BPF_STX_MEM(SIZE, DST, SRC, OFF)			\
256  	((struct bpf_insn) {					\
257  		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,	\
258  		.dst_reg = DST,					\
259  		.src_reg = SRC,					\
260  		.off   = OFF,					\
261  		.imm   = 0 })
262  
263  /* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
264  
265  #define BPF_STX_XADD(SIZE, DST, SRC, OFF)			\
266  	((struct bpf_insn) {					\
267  		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,	\
268  		.dst_reg = DST,					\
269  		.src_reg = SRC,					\
270  		.off   = OFF,					\
271  		.imm   = 0 })
272  
273  /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
274  
275  #define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
276  	((struct bpf_insn) {					\
277  		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
278  		.dst_reg = DST,					\
279  		.src_reg = 0,					\
280  		.off   = OFF,					\
281  		.imm   = IMM })
282  
283  /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
284  
285  #define BPF_JMP_REG(OP, DST, SRC, OFF)				\
286  	((struct bpf_insn) {					\
287  		.code  = BPF_JMP | BPF_OP(OP) | BPF_X,		\
288  		.dst_reg = DST,					\
289  		.src_reg = SRC,					\
290  		.off   = OFF,					\
291  		.imm   = 0 })
292  
293  /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
294  
295  #define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
296  	((struct bpf_insn) {					\
297  		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
298  		.dst_reg = DST,					\
299  		.src_reg = 0,					\
300  		.off   = OFF,					\
301  		.imm   = IMM })
302  
303  /* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
304  
305  #define BPF_JMP32_REG(OP, DST, SRC, OFF)			\
306  	((struct bpf_insn) {					\
307  		.code  = BPF_JMP32 | BPF_OP(OP) | BPF_X,	\
308  		.dst_reg = DST,					\
309  		.src_reg = SRC,					\
310  		.off   = OFF,					\
311  		.imm   = 0 })
312  
313  /* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
314  
315  #define BPF_JMP32_IMM(OP, DST, IMM, OFF)			\
316  	((struct bpf_insn) {					\
317  		.code  = BPF_JMP32 | BPF_OP(OP) | BPF_K,	\
318  		.dst_reg = DST,					\
319  		.src_reg = 0,					\
320  		.off   = OFF,					\
321  		.imm   = IMM })
322  
323  /* Unconditional jumps, goto pc + off16 */
324  
325  #define BPF_JMP_A(OFF)						\
326  	((struct bpf_insn) {					\
327  		.code  = BPF_JMP | BPF_JA,			\
328  		.dst_reg = 0,					\
329  		.src_reg = 0,					\
330  		.off   = OFF,					\
331  		.imm   = 0 })
332  
333  /* Relative call */
334  
335  #define BPF_CALL_REL(TGT)					\
336  	((struct bpf_insn) {					\
337  		.code  = BPF_JMP | BPF_CALL,			\
338  		.dst_reg = 0,					\
339  		.src_reg = BPF_PSEUDO_CALL,			\
340  		.off   = 0,					\
341  		.imm   = TGT })
342  
343  /* Function call */
344  
345  #define BPF_CAST_CALL(x)					\
346  		((u64 (*)(u64, u64, u64, u64, u64))(x))
347  
348  #define BPF_EMIT_CALL(FUNC)					\
349  	((struct bpf_insn) {					\
350  		.code  = BPF_JMP | BPF_CALL,			\
351  		.dst_reg = 0,					\
352  		.src_reg = 0,					\
353  		.off   = 0,					\
354  		.imm   = ((FUNC) - __bpf_call_base) })
355  
356  /* Raw code statement block */
357  
358  #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)			\
359  	((struct bpf_insn) {					\
360  		.code  = CODE,					\
361  		.dst_reg = DST,					\
362  		.src_reg = SRC,					\
363  		.off   = OFF,					\
364  		.imm   = IMM })
365  
366  /* Program exit */
367  
368  #define BPF_EXIT_INSN()						\
369  	((struct bpf_insn) {					\
370  		.code  = BPF_JMP | BPF_EXIT,			\
371  		.dst_reg = 0,					\
372  		.src_reg = 0,					\
373  		.off   = 0,					\
374  		.imm   = 0 })
375  
376  /* Speculation barrier */
377  
378  #define BPF_ST_NOSPEC()						\
379  	((struct bpf_insn) {					\
380  		.code  = BPF_ST | BPF_NOSPEC,			\
381  		.dst_reg = 0,					\
382  		.src_reg = 0,					\
383  		.off   = 0,					\
384  		.imm   = 0 })
385  
386  /* Internal classic blocks for direct assignment */
387  
388  #define __BPF_STMT(CODE, K)					\
389  	((struct sock_filter) BPF_STMT(CODE, K))
390  
391  #define __BPF_JUMP(CODE, K, JT, JF)				\
392  	((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
393  
394  #define bytes_to_bpf_size(bytes)				\
395  ({								\
396  	int bpf_size = -EINVAL;					\
397  								\
398  	if (bytes == sizeof(u8))				\
399  		bpf_size = BPF_B;				\
400  	else if (bytes == sizeof(u16))				\
401  		bpf_size = BPF_H;				\
402  	else if (bytes == sizeof(u32))				\
403  		bpf_size = BPF_W;				\
404  	else if (bytes == sizeof(u64))				\
405  		bpf_size = BPF_DW;				\
406  								\
407  	bpf_size;						\
408  })
409  
410  #define bpf_size_to_bytes(bpf_size)				\
411  ({								\
412  	int bytes = -EINVAL;					\
413  								\
414  	if (bpf_size == BPF_B)					\
415  		bytes = sizeof(u8);				\
416  	else if (bpf_size == BPF_H)				\
417  		bytes = sizeof(u16);				\
418  	else if (bpf_size == BPF_W)				\
419  		bytes = sizeof(u32);				\
420  	else if (bpf_size == BPF_DW)				\
421  		bytes = sizeof(u64);				\
422  								\
423  	bytes;							\
424  })
425  
426  #define BPF_SIZEOF(type)					\
427  	({							\
428  		const int __size = bytes_to_bpf_size(sizeof(type)); \
429  		BUILD_BUG_ON(__size < 0);			\
430  		__size;						\
431  	})
432  
433  #define BPF_FIELD_SIZEOF(type, field)				\
434  	({							\
435  		const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
436  		BUILD_BUG_ON(__size < 0);			\
437  		__size;						\
438  	})
439  
440  #define BPF_LDST_BYTES(insn)					\
441  	({							\
442  		const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \
443  		WARN_ON(__size < 0);				\
444  		__size;						\
445  	})
446  
447  #define __BPF_MAP_0(m, v, ...) v
448  #define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
449  #define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
450  #define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
451  #define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
452  #define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
453  
454  #define __BPF_REG_0(...) __BPF_PAD(5)
455  #define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
456  #define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
457  #define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
458  #define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
459  #define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
460  
461  #define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
462  #define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
463  
464  #define __BPF_CAST(t, a)						       \
465  	(__force t)							       \
466  	(__force							       \
467  	 typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long),      \
468  				      (unsigned long)0, (t)0))) a
469  #define __BPF_V void
470  #define __BPF_N
471  
472  #define __BPF_DECL_ARGS(t, a) t   a
473  #define __BPF_DECL_REGS(t, a) u64 a
474  
475  #define __BPF_PAD(n)							       \
476  	__BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2,       \
477  		  u64, __ur_3, u64, __ur_4, u64, __ur_5)
478  
479  #define BPF_CALL_x(x, attr, name, ...)					       \
480  	static __always_inline						       \
481  	u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__));   \
482  	typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
483  	attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__));    \
484  	attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__))     \
485  	{								       \
486  		return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
487  	}								       \
488  	static __always_inline						       \
489  	u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
490  
491  #define __NOATTR
492  #define BPF_CALL_0(name, ...)	BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__)
493  #define BPF_CALL_1(name, ...)	BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__)
494  #define BPF_CALL_2(name, ...)	BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__)
495  #define BPF_CALL_3(name, ...)	BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__)
496  #define BPF_CALL_4(name, ...)	BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__)
497  #define BPF_CALL_5(name, ...)	BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__)
498  
499  #define NOTRACE_BPF_CALL_1(name, ...)	BPF_CALL_x(1, notrace, name, __VA_ARGS__)
500  
501  #define bpf_ctx_range(TYPE, MEMBER)						\
502  	offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
503  #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2)				\
504  	offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
505  #if BITS_PER_LONG == 64
506  # define bpf_ctx_range_ptr(TYPE, MEMBER)					\
507  	offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
508  #else
509  # define bpf_ctx_range_ptr(TYPE, MEMBER)					\
510  	offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1
511  #endif /* BITS_PER_LONG == 64 */
512  
513  #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE)				\
514  	({									\
515  		BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE));		\
516  		*(PTR_SIZE) = (SIZE);						\
517  		offsetof(TYPE, MEMBER);						\
518  	})
519  
520  #ifdef CONFIG_COMPAT
521  /* A struct sock_filter is architecture independent. */
522  struct compat_sock_fprog {
523  	u16		len;
524  	compat_uptr_t	filter;	/* struct sock_filter * */
525  };
526  #endif
527  
528  struct sock_fprog_kern {
529  	u16			len;
530  	struct sock_filter	*filter;
531  };
532  
533  #define BPF_BINARY_HEADER_MAGIC	0x05de0e82
534  
535  struct bpf_binary_header {
536  #ifdef CONFIG_CFI_CLANG
537  	u32 magic;
538  #endif
539  	u32 pages;
540  	/* Some arches need word alignment for their instructions */
541  	u8 image[] __aligned(4);
542  };
543  
544  struct bpf_prog {
545  	u16			pages;		/* Number of allocated pages */
546  	u16			jited:1,	/* Is our filter JIT'ed? */
547  				jit_requested:1,/* archs need to JIT the prog */
548  				gpl_compatible:1, /* Is filter GPL compatible? */
549  				cb_access:1,	/* Is control block accessed? */
550  				dst_needed:1,	/* Do we need dst entry? */
551  				blinded:1,	/* Was blinded */
552  				is_func:1,	/* program is a bpf function */
553  				kprobe_override:1, /* Do we override a kprobe? */
554  				has_callchain_buf:1, /* callchain buffer allocated? */
555  				enforce_expected_attach_type:1; /* Enforce expected_attach_type checking at attach time */
556  	enum bpf_prog_type	type;		/* Type of BPF program */
557  	enum bpf_attach_type	expected_attach_type; /* For some prog types */
558  	u32			len;		/* Number of filter blocks */
559  	u32			jited_len;	/* Size of jited insns in bytes */
560  	u8			tag[BPF_TAG_SIZE];
561  	struct bpf_prog_aux	*aux;		/* Auxiliary fields */
562  	struct sock_fprog_kern	*orig_prog;	/* Original BPF program */
563  	unsigned int		(*bpf_func)(const void *ctx,
564  					    const struct bpf_insn *insn);
565  	/* Instructions for interpreter */
566  	union {
567  		struct sock_filter	insns[0];
568  		struct bpf_insn		insnsi[0];
569  	};
570  };
571  
572  struct sk_filter {
573  	refcount_t	refcnt;
574  	struct rcu_head	rcu;
575  	struct bpf_prog	*prog;
576  };
577  
578  DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
579  
580  #if IS_ENABLED(CONFIG_BPF_JIT) && IS_ENABLED(CONFIG_CFI_CLANG)
581  /*
582   * With JIT, the kernel makes an indirect call to dynamically generated
583   * code. Use bpf_call_func to perform additional validation of the call
584   * target to narrow down attack surface. Architectures implementing BPF
585   * JIT can override arch_bpf_jit_check_func for arch-specific checking.
586   */
587  extern bool arch_bpf_jit_check_func(const struct bpf_prog *prog);
588  
__bpf_call_func(const struct bpf_prog * prog,const void * ctx)589  static inline unsigned int __bpf_call_func(const struct bpf_prog *prog,
590  					   const void *ctx)
591  {
592  	/* Call interpreter with CFI checking. */
593  	return prog->bpf_func(ctx, prog->insnsi);
594  }
595  
596  static inline struct bpf_binary_header *
597  bpf_jit_binary_hdr(const struct bpf_prog *fp);
598  
bpf_call_func(const struct bpf_prog * prog,const void * ctx)599  static inline unsigned int __nocfi bpf_call_func(const struct bpf_prog *prog,
600  						 const void *ctx)
601  {
602  	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
603  
604  	if (!IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) && !prog->jited)
605  		return __bpf_call_func(prog, ctx);
606  
607  	/*
608  	 * We are about to call dynamically generated code. Check that the
609  	 * page has bpf_binary_header with a valid magic to limit possible
610  	 * call targets.
611  	 */
612  	BUG_ON(hdr->magic != BPF_BINARY_HEADER_MAGIC ||
613  		!arch_bpf_jit_check_func(prog));
614  
615  	/* Call jited function without CFI checking. */
616  	return prog->bpf_func(ctx, prog->insnsi);
617  }
618  
bpf_jit_set_header_magic(struct bpf_binary_header * hdr)619  static inline void bpf_jit_set_header_magic(struct bpf_binary_header *hdr)
620  {
621  	hdr->magic = BPF_BINARY_HEADER_MAGIC;
622  }
623  #else
bpf_call_func(const struct bpf_prog * prog,const void * ctx)624  static inline unsigned int bpf_call_func(const struct bpf_prog *prog,
625  					 const void *ctx)
626  {
627  	return prog->bpf_func(ctx, prog->insnsi);
628  }
629  
bpf_jit_set_header_magic(struct bpf_binary_header * hdr)630  static inline void bpf_jit_set_header_magic(struct bpf_binary_header *hdr)
631  {
632  }
633  #endif
634  
635  #define BPF_PROG_RUN(prog, ctx)	({				\
636  	u32 ret;						\
637  	cant_sleep();						\
638  	if (static_branch_unlikely(&bpf_stats_enabled_key)) {	\
639  		struct bpf_prog_stats *stats;			\
640  		u64 start = sched_clock();			\
641  		ret = bpf_call_func(prog, ctx);			\
642  		stats = this_cpu_ptr(prog->aux->stats);		\
643  		u64_stats_update_begin(&stats->syncp);		\
644  		stats->cnt++;					\
645  		stats->nsecs += sched_clock() - start;		\
646  		u64_stats_update_end(&stats->syncp);		\
647  	} else {						\
648  		ret = bpf_call_func(prog, ctx);			\
649  	}							\
650  	ret; })
651  
652  #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
653  
654  struct bpf_skb_data_end {
655  	struct qdisc_skb_cb qdisc_cb;
656  	void *data_meta;
657  	void *data_end;
658  };
659  
660  struct bpf_redirect_info {
661  	u32 flags;
662  	u32 tgt_index;
663  	void *tgt_value;
664  	struct bpf_map *map;
665  	struct bpf_map *map_to_flush;
666  	u32 kern_flags;
667  };
668  
669  DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
670  
671  /* flags for bpf_redirect_info kern_flags */
672  #define BPF_RI_F_RF_NO_DIRECT	BIT(0)	/* no napi_direct on return_frame */
673  
674  /* Compute the linear packet data range [data, data_end) which
675   * will be accessed by various program types (cls_bpf, act_bpf,
676   * lwt, ...). Subsystems allowing direct data access must (!)
677   * ensure that cb[] area can be written to when BPF program is
678   * invoked (otherwise cb[] save/restore is necessary).
679   */
bpf_compute_data_pointers(struct sk_buff * skb)680  static inline void bpf_compute_data_pointers(struct sk_buff *skb)
681  {
682  	struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
683  
684  	BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
685  	cb->data_meta = skb->data - skb_metadata_len(skb);
686  	cb->data_end  = skb->data + skb_headlen(skb);
687  }
688  
689  /* Similar to bpf_compute_data_pointers(), except that save orginal
690   * data in cb->data and cb->meta_data for restore.
691   */
bpf_compute_and_save_data_end(struct sk_buff * skb,void ** saved_data_end)692  static inline void bpf_compute_and_save_data_end(
693  	struct sk_buff *skb, void **saved_data_end)
694  {
695  	struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
696  
697  	*saved_data_end = cb->data_end;
698  	cb->data_end  = skb->data + skb_headlen(skb);
699  }
700  
701  /* Restore data saved by bpf_compute_data_pointers(). */
bpf_restore_data_end(struct sk_buff * skb,void * saved_data_end)702  static inline void bpf_restore_data_end(
703  	struct sk_buff *skb, void *saved_data_end)
704  {
705  	struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
706  
707  	cb->data_end = saved_data_end;
708  }
709  
bpf_skb_cb(struct sk_buff * skb)710  static inline u8 *bpf_skb_cb(struct sk_buff *skb)
711  {
712  	/* eBPF programs may read/write skb->cb[] area to transfer meta
713  	 * data between tail calls. Since this also needs to work with
714  	 * tc, that scratch memory is mapped to qdisc_skb_cb's data area.
715  	 *
716  	 * In some socket filter cases, the cb unfortunately needs to be
717  	 * saved/restored so that protocol specific skb->cb[] data won't
718  	 * be lost. In any case, due to unpriviledged eBPF programs
719  	 * attached to sockets, we need to clear the bpf_skb_cb() area
720  	 * to not leak previous contents to user space.
721  	 */
722  	BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
723  	BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
724  		     FIELD_SIZEOF(struct qdisc_skb_cb, data));
725  
726  	return qdisc_skb_cb(skb)->data;
727  }
728  
__bpf_prog_run_save_cb(const struct bpf_prog * prog,struct sk_buff * skb)729  static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
730  					 struct sk_buff *skb)
731  {
732  	u8 *cb_data = bpf_skb_cb(skb);
733  	u8 cb_saved[BPF_SKB_CB_LEN];
734  	u32 res;
735  
736  	if (unlikely(prog->cb_access)) {
737  		memcpy(cb_saved, cb_data, sizeof(cb_saved));
738  		memset(cb_data, 0, sizeof(cb_saved));
739  	}
740  
741  	res = BPF_PROG_RUN(prog, skb);
742  
743  	if (unlikely(prog->cb_access))
744  		memcpy(cb_data, cb_saved, sizeof(cb_saved));
745  
746  	return res;
747  }
748  
bpf_prog_run_save_cb(const struct bpf_prog * prog,struct sk_buff * skb)749  static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
750  				       struct sk_buff *skb)
751  {
752  	u32 res;
753  
754  	preempt_disable();
755  	res = __bpf_prog_run_save_cb(prog, skb);
756  	preempt_enable();
757  	return res;
758  }
759  
bpf_prog_run_clear_cb(const struct bpf_prog * prog,struct sk_buff * skb)760  static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
761  					struct sk_buff *skb)
762  {
763  	u8 *cb_data = bpf_skb_cb(skb);
764  	u32 res;
765  
766  	if (unlikely(prog->cb_access))
767  		memset(cb_data, 0, BPF_SKB_CB_LEN);
768  
769  	preempt_disable();
770  	res = BPF_PROG_RUN(prog, skb);
771  	preempt_enable();
772  	return res;
773  }
774  
bpf_prog_run_xdp(const struct bpf_prog * prog,struct xdp_buff * xdp)775  static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
776  					    struct xdp_buff *xdp)
777  {
778  	/* Caller needs to hold rcu_read_lock() (!), otherwise program
779  	 * can be released while still running, or map elements could be
780  	 * freed early while still having concurrent users. XDP fastpath
781  	 * already takes rcu_read_lock() when fetching the program, so
782  	 * it's not necessary here anymore.
783  	 */
784  	return BPF_PROG_RUN(prog, xdp);
785  }
786  
bpf_prog_insn_size(const struct bpf_prog * prog)787  static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
788  {
789  	return prog->len * sizeof(struct bpf_insn);
790  }
791  
bpf_prog_tag_scratch_size(const struct bpf_prog * prog)792  static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
793  {
794  	return round_up(bpf_prog_insn_size(prog) +
795  			sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
796  }
797  
bpf_prog_size(unsigned int proglen)798  static inline unsigned int bpf_prog_size(unsigned int proglen)
799  {
800  	return max(sizeof(struct bpf_prog),
801  		   offsetof(struct bpf_prog, insns[proglen]));
802  }
803  
bpf_prog_was_classic(const struct bpf_prog * prog)804  static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
805  {
806  	/* When classic BPF programs have been loaded and the arch
807  	 * does not have a classic BPF JIT (anymore), they have been
808  	 * converted via bpf_migrate_filter() to eBPF and thus always
809  	 * have an unspec program type.
810  	 */
811  	return prog->type == BPF_PROG_TYPE_UNSPEC;
812  }
813  
bpf_ctx_off_adjust_machine(u32 size)814  static inline u32 bpf_ctx_off_adjust_machine(u32 size)
815  {
816  	const u32 size_machine = sizeof(unsigned long);
817  
818  	if (size > size_machine && size % size_machine == 0)
819  		size = size_machine;
820  
821  	return size;
822  }
823  
824  static inline bool
bpf_ctx_narrow_access_ok(u32 off,u32 size,u32 size_default)825  bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
826  {
827  	return size <= size_default && (size & (size - 1)) == 0;
828  }
829  
830  static inline u8
bpf_ctx_narrow_access_offset(u32 off,u32 size,u32 size_default)831  bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
832  {
833  	u8 access_off = off & (size_default - 1);
834  
835  #ifdef __LITTLE_ENDIAN
836  	return access_off;
837  #else
838  	return size_default - (access_off + size);
839  #endif
840  }
841  
842  #define bpf_ctx_wide_access_ok(off, size, type, field)			\
843  	(size == sizeof(__u64) &&					\
844  	off >= offsetof(type, field) &&					\
845  	off + sizeof(__u64) <= offsetofend(type, field) &&		\
846  	off % sizeof(__u64) == 0)
847  
848  #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
849  
bpf_prog_lock_ro(struct bpf_prog * fp)850  static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
851  {
852  #ifndef CONFIG_BPF_JIT_ALWAYS_ON
853  	if (!fp->jited) {
854  		set_vm_flush_reset_perms(fp);
855  		set_memory_ro((unsigned long)fp, fp->pages);
856  	}
857  #endif
858  }
859  
bpf_jit_binary_lock_ro(struct bpf_binary_header * hdr)860  static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
861  {
862  	set_vm_flush_reset_perms(hdr);
863  	set_memory_ro((unsigned long)hdr, hdr->pages);
864  	set_memory_x((unsigned long)hdr, hdr->pages);
865  }
866  
867  static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog * fp)868  bpf_jit_binary_hdr(const struct bpf_prog *fp)
869  {
870  	unsigned long real_start = (unsigned long)fp->bpf_func;
871  	unsigned long addr = real_start & PAGE_MASK;
872  
873  	return (void *)addr;
874  }
875  
876  int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
sk_filter(struct sock * sk,struct sk_buff * skb)877  static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
878  {
879  	return sk_filter_trim_cap(sk, skb, 1);
880  }
881  
882  struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
883  void bpf_prog_free(struct bpf_prog *fp);
884  
885  bool bpf_opcode_in_insntable(u8 code);
886  
887  void bpf_prog_free_linfo(struct bpf_prog *prog);
888  void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
889  			       const u32 *insn_to_jit_off);
890  int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
891  void bpf_prog_free_jited_linfo(struct bpf_prog *prog);
892  void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog);
893  
894  struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
895  struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags);
896  struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
897  				  gfp_t gfp_extra_flags);
898  void __bpf_prog_free(struct bpf_prog *fp);
899  
bpf_prog_unlock_free(struct bpf_prog * fp)900  static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
901  {
902  	__bpf_prog_free(fp);
903  }
904  
905  typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
906  				       unsigned int flen);
907  
908  int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
909  int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
910  			      bpf_aux_classic_check_t trans, bool save_orig);
911  void bpf_prog_destroy(struct bpf_prog *fp);
912  
913  int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
914  int sk_attach_bpf(u32 ufd, struct sock *sk);
915  int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
916  int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
917  void sk_reuseport_prog_free(struct bpf_prog *prog);
918  int sk_detach_filter(struct sock *sk);
919  int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
920  		  unsigned int len);
921  
922  bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
923  void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
924  
925  u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
926  #define __bpf_call_base_args \
927  	((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
928  	 (void *)__bpf_call_base)
929  
930  struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
931  void bpf_jit_compile(struct bpf_prog *prog);
932  bool bpf_jit_needs_zext(void);
933  bool bpf_helper_changes_pkt_data(void *func);
934  
bpf_dump_raw_ok(const struct cred * cred)935  static inline bool bpf_dump_raw_ok(const struct cred *cred)
936  {
937  	/* Reconstruction of call-sites is dependent on kallsyms,
938  	 * thus make dump the same restriction.
939  	 */
940  	return kallsyms_show_value(cred);
941  }
942  
943  struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
944  				       const struct bpf_insn *patch, u32 len);
945  int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
946  
947  void bpf_clear_redirect_map(struct bpf_map *map);
948  
xdp_return_frame_no_direct(void)949  static inline bool xdp_return_frame_no_direct(void)
950  {
951  	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
952  
953  	return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
954  }
955  
xdp_set_return_frame_no_direct(void)956  static inline void xdp_set_return_frame_no_direct(void)
957  {
958  	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
959  
960  	ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
961  }
962  
xdp_clear_return_frame_no_direct(void)963  static inline void xdp_clear_return_frame_no_direct(void)
964  {
965  	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
966  
967  	ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
968  }
969  
xdp_ok_fwd_dev(const struct net_device * fwd,unsigned int pktlen)970  static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
971  				 unsigned int pktlen)
972  {
973  	unsigned int len;
974  
975  	if (unlikely(!(fwd->flags & IFF_UP)))
976  		return -ENETDOWN;
977  
978  	len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
979  	if (pktlen > len)
980  		return -EMSGSIZE;
981  
982  	return 0;
983  }
984  
985  /* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
986   * same cpu context. Further for best results no more than a single map
987   * for the do_redirect/do_flush pair should be used. This limitation is
988   * because we only track one map and force a flush when the map changes.
989   * This does not appear to be a real limitation for existing software.
990   */
991  int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
992  			    struct xdp_buff *xdp, struct bpf_prog *prog);
993  int xdp_do_redirect(struct net_device *dev,
994  		    struct xdp_buff *xdp,
995  		    struct bpf_prog *prog);
996  void xdp_do_flush_map(void);
997  
998  void bpf_warn_invalid_xdp_action(u32 act);
999  
1000  #ifdef CONFIG_INET
1001  struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
1002  				  struct bpf_prog *prog, struct sk_buff *skb,
1003  				  u32 hash);
1004  #else
1005  static inline struct sock *
bpf_run_sk_reuseport(struct sock_reuseport * reuse,struct sock * sk,struct bpf_prog * prog,struct sk_buff * skb,u32 hash)1006  bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
1007  		     struct bpf_prog *prog, struct sk_buff *skb,
1008  		     u32 hash)
1009  {
1010  	return NULL;
1011  }
1012  #endif
1013  
1014  #ifdef CONFIG_BPF_JIT
1015  extern int bpf_jit_enable;
1016  extern int bpf_jit_harden;
1017  extern int bpf_jit_kallsyms;
1018  extern long bpf_jit_limit;
1019  extern long bpf_jit_limit_max;
1020  
1021  typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
1022  
1023  struct bpf_binary_header *
1024  bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1025  		     unsigned int alignment,
1026  		     bpf_jit_fill_hole_t bpf_fill_ill_insns);
1027  void bpf_jit_binary_free(struct bpf_binary_header *hdr);
1028  u64 bpf_jit_alloc_exec_limit(void);
1029  void *bpf_jit_alloc_exec(unsigned long size);
1030  void bpf_jit_free_exec(void *addr);
1031  void bpf_jit_free(struct bpf_prog *fp);
1032  
1033  int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1034  			  const struct bpf_insn *insn, bool extra_pass,
1035  			  u64 *func_addr, bool *func_addr_fixed);
1036  
1037  struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
1038  void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
1039  
bpf_jit_dump(unsigned int flen,unsigned int proglen,u32 pass,void * image)1040  static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
1041  				u32 pass, void *image)
1042  {
1043  	pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
1044  	       proglen, pass, image, current->comm, task_pid_nr(current));
1045  
1046  	if (image)
1047  		print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
1048  			       16, 1, image, proglen, false);
1049  }
1050  
bpf_jit_is_ebpf(void)1051  static inline bool bpf_jit_is_ebpf(void)
1052  {
1053  # ifdef CONFIG_HAVE_EBPF_JIT
1054  	return true;
1055  # else
1056  	return false;
1057  # endif
1058  }
1059  
ebpf_jit_enabled(void)1060  static inline bool ebpf_jit_enabled(void)
1061  {
1062  	return bpf_jit_enable && bpf_jit_is_ebpf();
1063  }
1064  
bpf_prog_ebpf_jited(const struct bpf_prog * fp)1065  static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
1066  {
1067  	return fp->jited && bpf_jit_is_ebpf();
1068  }
1069  
bpf_jit_blinding_enabled(struct bpf_prog * prog)1070  static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
1071  {
1072  	/* These are the prerequisites, should someone ever have the
1073  	 * idea to call blinding outside of them, we make sure to
1074  	 * bail out.
1075  	 */
1076  	if (!bpf_jit_is_ebpf())
1077  		return false;
1078  	if (!prog->jit_requested)
1079  		return false;
1080  	if (!bpf_jit_harden)
1081  		return false;
1082  	if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
1083  		return false;
1084  
1085  	return true;
1086  }
1087  
bpf_jit_kallsyms_enabled(void)1088  static inline bool bpf_jit_kallsyms_enabled(void)
1089  {
1090  	/* There are a couple of corner cases where kallsyms should
1091  	 * not be enabled f.e. on hardening.
1092  	 */
1093  	if (bpf_jit_harden)
1094  		return false;
1095  	if (!bpf_jit_kallsyms)
1096  		return false;
1097  	if (bpf_jit_kallsyms == 1)
1098  		return true;
1099  
1100  	return false;
1101  }
1102  
1103  const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
1104  				 unsigned long *off, char *sym);
1105  bool is_bpf_text_address(unsigned long addr);
1106  int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
1107  		    char *sym);
1108  
1109  static inline const char *
bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char ** modname,char * sym)1110  bpf_address_lookup(unsigned long addr, unsigned long *size,
1111  		   unsigned long *off, char **modname, char *sym)
1112  {
1113  	const char *ret = __bpf_address_lookup(addr, size, off, sym);
1114  
1115  	if (ret && modname)
1116  		*modname = NULL;
1117  	return ret;
1118  }
1119  
1120  void bpf_prog_kallsyms_add(struct bpf_prog *fp);
1121  void bpf_prog_kallsyms_del(struct bpf_prog *fp);
1122  void bpf_get_prog_name(const struct bpf_prog *prog, char *sym);
1123  
1124  #else /* CONFIG_BPF_JIT */
1125  
ebpf_jit_enabled(void)1126  static inline bool ebpf_jit_enabled(void)
1127  {
1128  	return false;
1129  }
1130  
bpf_prog_ebpf_jited(const struct bpf_prog * fp)1131  static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
1132  {
1133  	return false;
1134  }
1135  
bpf_jit_free(struct bpf_prog * fp)1136  static inline void bpf_jit_free(struct bpf_prog *fp)
1137  {
1138  	bpf_prog_unlock_free(fp);
1139  }
1140  
bpf_jit_kallsyms_enabled(void)1141  static inline bool bpf_jit_kallsyms_enabled(void)
1142  {
1143  	return false;
1144  }
1145  
1146  static inline const char *
__bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char * sym)1147  __bpf_address_lookup(unsigned long addr, unsigned long *size,
1148  		     unsigned long *off, char *sym)
1149  {
1150  	return NULL;
1151  }
1152  
is_bpf_text_address(unsigned long addr)1153  static inline bool is_bpf_text_address(unsigned long addr)
1154  {
1155  	return false;
1156  }
1157  
bpf_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)1158  static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
1159  				  char *type, char *sym)
1160  {
1161  	return -ERANGE;
1162  }
1163  
1164  static inline const char *
bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char ** modname,char * sym)1165  bpf_address_lookup(unsigned long addr, unsigned long *size,
1166  		   unsigned long *off, char **modname, char *sym)
1167  {
1168  	return NULL;
1169  }
1170  
bpf_prog_kallsyms_add(struct bpf_prog * fp)1171  static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
1172  {
1173  }
1174  
bpf_prog_kallsyms_del(struct bpf_prog * fp)1175  static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
1176  {
1177  }
1178  
bpf_get_prog_name(const struct bpf_prog * prog,char * sym)1179  static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
1180  {
1181  	sym[0] = '\0';
1182  }
1183  
1184  #endif /* CONFIG_BPF_JIT */
1185  
1186  void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
1187  
1188  #define BPF_ANC		BIT(15)
1189  
bpf_needs_clear_a(const struct sock_filter * first)1190  static inline bool bpf_needs_clear_a(const struct sock_filter *first)
1191  {
1192  	switch (first->code) {
1193  	case BPF_RET | BPF_K:
1194  	case BPF_LD | BPF_W | BPF_LEN:
1195  		return false;
1196  
1197  	case BPF_LD | BPF_W | BPF_ABS:
1198  	case BPF_LD | BPF_H | BPF_ABS:
1199  	case BPF_LD | BPF_B | BPF_ABS:
1200  		if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
1201  			return true;
1202  		return false;
1203  
1204  	default:
1205  		return true;
1206  	}
1207  }
1208  
bpf_anc_helper(const struct sock_filter * ftest)1209  static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
1210  {
1211  	BUG_ON(ftest->code & BPF_ANC);
1212  
1213  	switch (ftest->code) {
1214  	case BPF_LD | BPF_W | BPF_ABS:
1215  	case BPF_LD | BPF_H | BPF_ABS:
1216  	case BPF_LD | BPF_B | BPF_ABS:
1217  #define BPF_ANCILLARY(CODE)	case SKF_AD_OFF + SKF_AD_##CODE:	\
1218  				return BPF_ANC | SKF_AD_##CODE
1219  		switch (ftest->k) {
1220  		BPF_ANCILLARY(PROTOCOL);
1221  		BPF_ANCILLARY(PKTTYPE);
1222  		BPF_ANCILLARY(IFINDEX);
1223  		BPF_ANCILLARY(NLATTR);
1224  		BPF_ANCILLARY(NLATTR_NEST);
1225  		BPF_ANCILLARY(MARK);
1226  		BPF_ANCILLARY(QUEUE);
1227  		BPF_ANCILLARY(HATYPE);
1228  		BPF_ANCILLARY(RXHASH);
1229  		BPF_ANCILLARY(CPU);
1230  		BPF_ANCILLARY(ALU_XOR_X);
1231  		BPF_ANCILLARY(VLAN_TAG);
1232  		BPF_ANCILLARY(VLAN_TAG_PRESENT);
1233  		BPF_ANCILLARY(PAY_OFFSET);
1234  		BPF_ANCILLARY(RANDOM);
1235  		BPF_ANCILLARY(VLAN_TPID);
1236  		}
1237  		/* Fallthrough. */
1238  	default:
1239  		return ftest->code;
1240  	}
1241  }
1242  
1243  void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
1244  					   int k, unsigned int size);
1245  
bpf_load_pointer(const struct sk_buff * skb,int k,unsigned int size,void * buffer)1246  static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
1247  				     unsigned int size, void *buffer)
1248  {
1249  	if (k >= 0)
1250  		return skb_header_pointer(skb, k, size, buffer);
1251  
1252  	return bpf_internal_load_pointer_neg_helper(skb, k, size);
1253  }
1254  
bpf_tell_extensions(void)1255  static inline int bpf_tell_extensions(void)
1256  {
1257  	return SKF_AD_MAX;
1258  }
1259  
1260  struct bpf_sock_addr_kern {
1261  	struct sock *sk;
1262  	struct sockaddr *uaddr;
1263  	/* Temporary "register" to make indirect stores to nested structures
1264  	 * defined above. We need three registers to make such a store, but
1265  	 * only two (src and dst) are available at convert_ctx_access time
1266  	 */
1267  	u64 tmp_reg;
1268  	void *t_ctx;	/* Attach type specific context. */
1269  };
1270  
1271  struct bpf_sock_ops_kern {
1272  	struct	sock *sk;
1273  	u32	op;
1274  	union {
1275  		u32 args[4];
1276  		u32 reply;
1277  		u32 replylong[4];
1278  	};
1279  	u32	is_fullsock;
1280  	u64	temp;			/* temp and everything after is not
1281  					 * initialized to 0 before calling
1282  					 * the BPF program. New fields that
1283  					 * should be initialized to 0 should
1284  					 * be inserted before temp.
1285  					 * temp is scratch storage used by
1286  					 * sock_ops_convert_ctx_access
1287  					 * as temporary storage of a register.
1288  					 */
1289  };
1290  
1291  struct bpf_sysctl_kern {
1292  	struct ctl_table_header *head;
1293  	struct ctl_table *table;
1294  	void *cur_val;
1295  	size_t cur_len;
1296  	void *new_val;
1297  	size_t new_len;
1298  	int new_updated;
1299  	int write;
1300  	loff_t *ppos;
1301  	/* Temporary "register" for indirect stores to ppos. */
1302  	u64 tmp_reg;
1303  };
1304  
1305  struct bpf_sockopt_kern {
1306  	struct sock	*sk;
1307  	u8		*optval;
1308  	u8		*optval_end;
1309  	s32		level;
1310  	s32		optname;
1311  	s32		optlen;
1312  	s32		retval;
1313  };
1314  
1315  #endif /* __LINUX_FILTER_H__ */
1316