• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Linux Socket Filter - Kernel level socket filtering
3  *
4  * Based on the design of the Berkeley Packet Filter. The new
5  * internal format has been designed by PLUMgrid:
6  *
7  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8  *
9  * Authors:
10  *
11  *	Jay Schulist <jschlst@samba.org>
12  *	Alexei Starovoitov <ast@plumgrid.com>
13  *	Daniel Borkmann <dborkman@redhat.com>
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  *
20  * Andi Kleen - Fix a few bad bugs and races.
21  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22  */
23 
24 #include <linux/filter.h>
25 #include <linux/skbuff.h>
26 #include <linux/vmalloc.h>
27 #include <linux/random.h>
28 #include <linux/moduleloader.h>
29 #include <linux/bpf.h>
30 
31 #include <asm/unaligned.h>
32 
33 /* Registers */
34 #define BPF_R0	regs[BPF_REG_0]
35 #define BPF_R1	regs[BPF_REG_1]
36 #define BPF_R2	regs[BPF_REG_2]
37 #define BPF_R3	regs[BPF_REG_3]
38 #define BPF_R4	regs[BPF_REG_4]
39 #define BPF_R5	regs[BPF_REG_5]
40 #define BPF_R6	regs[BPF_REG_6]
41 #define BPF_R7	regs[BPF_REG_7]
42 #define BPF_R8	regs[BPF_REG_8]
43 #define BPF_R9	regs[BPF_REG_9]
44 #define BPF_R10	regs[BPF_REG_10]
45 
46 /* Named registers */
47 #define DST	regs[insn->dst_reg]
48 #define SRC	regs[insn->src_reg]
49 #define FP	regs[BPF_REG_FP]
50 #define ARG1	regs[BPF_REG_ARG1]
51 #define CTX	regs[BPF_REG_CTX]
52 #define IMM	insn->imm
53 
54 /* No hurry in this branch
55  *
56  * Exported for the bpf jit load helper.
57  */
bpf_internal_load_pointer_neg_helper(const struct sk_buff * skb,int k,unsigned int size)58 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
59 {
60 	u8 *ptr = NULL;
61 
62 	if (k >= SKF_NET_OFF)
63 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
64 	else if (k >= SKF_LL_OFF)
65 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
66 
67 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
68 		return ptr;
69 
70 	return NULL;
71 }
72 
bpf_prog_alloc(unsigned int size,gfp_t gfp_extra_flags)73 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
74 {
75 	gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
76 			  gfp_extra_flags;
77 	struct bpf_prog_aux *aux;
78 	struct bpf_prog *fp;
79 
80 	size = round_up(size, PAGE_SIZE);
81 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
82 	if (fp == NULL)
83 		return NULL;
84 
85 	kmemcheck_annotate_bitfield(fp, meta);
86 
87 	aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
88 	if (aux == NULL) {
89 		vfree(fp);
90 		return NULL;
91 	}
92 
93 	fp->pages = size / PAGE_SIZE;
94 	fp->aux = aux;
95 	fp->aux->prog = fp;
96 
97 	return fp;
98 }
99 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
100 
bpf_prog_realloc(struct bpf_prog * fp_old,unsigned int size,gfp_t gfp_extra_flags)101 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
102 				  gfp_t gfp_extra_flags)
103 {
104 	gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
105 			  gfp_extra_flags;
106 	struct bpf_prog *fp;
107 
108 	BUG_ON(fp_old == NULL);
109 
110 	size = round_up(size, PAGE_SIZE);
111 	if (size <= fp_old->pages * PAGE_SIZE)
112 		return fp_old;
113 
114 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
115 	if (fp != NULL) {
116 		kmemcheck_annotate_bitfield(fp, meta);
117 
118 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
119 		fp->pages = size / PAGE_SIZE;
120 		fp->aux->prog = fp;
121 
122 		/* We keep fp->aux from fp_old around in the new
123 		 * reallocated structure.
124 		 */
125 		fp_old->aux = NULL;
126 		__bpf_prog_free(fp_old);
127 	}
128 
129 	return fp;
130 }
131 EXPORT_SYMBOL_GPL(bpf_prog_realloc);
132 
__bpf_prog_free(struct bpf_prog * fp)133 void __bpf_prog_free(struct bpf_prog *fp)
134 {
135 	kfree(fp->aux);
136 	vfree(fp);
137 }
138 EXPORT_SYMBOL_GPL(__bpf_prog_free);
139 
bpf_is_jmp_and_has_target(const struct bpf_insn * insn)140 static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
141 {
142 	return BPF_CLASS(insn->code) == BPF_JMP  &&
143 	       /* Call and Exit are both special jumps with no
144 		* target inside the BPF instruction image.
145 		*/
146 	       BPF_OP(insn->code) != BPF_CALL &&
147 	       BPF_OP(insn->code) != BPF_EXIT;
148 }
149 
bpf_adj_branches(struct bpf_prog * prog,u32 pos,u32 delta)150 static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
151 {
152 	struct bpf_insn *insn = prog->insnsi;
153 	u32 i, insn_cnt = prog->len;
154 
155 	for (i = 0; i < insn_cnt; i++, insn++) {
156 		if (!bpf_is_jmp_and_has_target(insn))
157 			continue;
158 
159 		/* Adjust offset of jmps if we cross boundaries. */
160 		if (i < pos && i + insn->off + 1 > pos)
161 			insn->off += delta;
162 		else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
163 			insn->off -= delta;
164 	}
165 }
166 
bpf_patch_insn_single(struct bpf_prog * prog,u32 off,const struct bpf_insn * patch,u32 len)167 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
168 				       const struct bpf_insn *patch, u32 len)
169 {
170 	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
171 	struct bpf_prog *prog_adj;
172 
173 	/* Since our patchlet doesn't expand the image, we're done. */
174 	if (insn_delta == 0) {
175 		memcpy(prog->insnsi + off, patch, sizeof(*patch));
176 		return prog;
177 	}
178 
179 	insn_adj_cnt = prog->len + insn_delta;
180 
181 	/* Several new instructions need to be inserted. Make room
182 	 * for them. Likely, there's no need for a new allocation as
183 	 * last page could have large enough tailroom.
184 	 */
185 	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
186 				    GFP_USER);
187 	if (!prog_adj)
188 		return NULL;
189 
190 	prog_adj->len = insn_adj_cnt;
191 
192 	/* Patching happens in 3 steps:
193 	 *
194 	 * 1) Move over tail of insnsi from next instruction onwards,
195 	 *    so we can patch the single target insn with one or more
196 	 *    new ones (patching is always from 1 to n insns, n > 0).
197 	 * 2) Inject new instructions at the target location.
198 	 * 3) Adjust branch offsets if necessary.
199 	 */
200 	insn_rest = insn_adj_cnt - off - len;
201 
202 	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
203 		sizeof(*patch) * insn_rest);
204 	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
205 
206 	bpf_adj_branches(prog_adj, off, insn_delta);
207 
208 	return prog_adj;
209 }
210 
211 #ifdef CONFIG_BPF_JIT
212 struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,bpf_jit_fill_hole_t bpf_fill_ill_insns)213 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
214 		     unsigned int alignment,
215 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
216 {
217 	struct bpf_binary_header *hdr;
218 	unsigned int size, hole, start;
219 
220 	/* Most of BPF filters are really small, but if some of them
221 	 * fill a page, allow at least 128 extra bytes to insert a
222 	 * random section of illegal instructions.
223 	 */
224 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
225 	hdr = module_alloc(size);
226 	if (hdr == NULL)
227 		return NULL;
228 
229 	/* Fill space with illegal/arch-dep instructions. */
230 	bpf_fill_ill_insns(hdr, size);
231 
232 	hdr->pages = size / PAGE_SIZE;
233 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
234 		     PAGE_SIZE - sizeof(*hdr));
235 	start = (prandom_u32() % hole) & ~(alignment - 1);
236 
237 	/* Leave a random number of instructions before BPF code. */
238 	*image_ptr = &hdr->image[start];
239 
240 	return hdr;
241 }
242 
bpf_jit_binary_free(struct bpf_binary_header * hdr)243 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
244 {
245 	module_memfree(hdr);
246 }
247 #endif /* CONFIG_BPF_JIT */
248 
249 /* Base function for offset calculation. Needs to go into .text section,
250  * therefore keeping it non-static as well; will also be used by JITs
251  * anyway later on, so do not let the compiler omit it.
252  */
__bpf_call_base(u64 r1,u64 r2,u64 r3,u64 r4,u64 r5)253 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
254 {
255 	return 0;
256 }
257 EXPORT_SYMBOL_GPL(__bpf_call_base);
258 
259 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
260 /**
261  *	__bpf_prog_run - run eBPF program on a given context
262  *	@ctx: is the data we are operating on
263  *	@insn: is the array of eBPF instructions
264  *
265  * Decode and execute eBPF instructions.
266  */
__bpf_prog_run(void * ctx,const struct bpf_insn * insn)267 static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
268 {
269 	u64 stack[MAX_BPF_STACK / sizeof(u64)];
270 	u64 regs[MAX_BPF_REG], tmp;
271 	static const void *jumptable[256] = {
272 		[0 ... 255] = &&default_label,
273 		/* Now overwrite non-defaults ... */
274 		/* 32 bit ALU operations */
275 		[BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
276 		[BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
277 		[BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
278 		[BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
279 		[BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
280 		[BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
281 		[BPF_ALU | BPF_OR | BPF_X]  = &&ALU_OR_X,
282 		[BPF_ALU | BPF_OR | BPF_K]  = &&ALU_OR_K,
283 		[BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
284 		[BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
285 		[BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
286 		[BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
287 		[BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
288 		[BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
289 		[BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
290 		[BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
291 		[BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
292 		[BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
293 		[BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
294 		[BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
295 		[BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
296 		[BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
297 		[BPF_ALU | BPF_NEG] = &&ALU_NEG,
298 		[BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
299 		[BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
300 		/* 64 bit ALU operations */
301 		[BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
302 		[BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
303 		[BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
304 		[BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
305 		[BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
306 		[BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
307 		[BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
308 		[BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
309 		[BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
310 		[BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
311 		[BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
312 		[BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
313 		[BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
314 		[BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
315 		[BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
316 		[BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
317 		[BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
318 		[BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
319 		[BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
320 		[BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
321 		[BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
322 		[BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
323 		[BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
324 		[BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
325 		[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
326 		/* Call instruction */
327 		[BPF_JMP | BPF_CALL] = &&JMP_CALL,
328 		[BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
329 		/* Jumps */
330 		[BPF_JMP | BPF_JA] = &&JMP_JA,
331 		[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
332 		[BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
333 		[BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
334 		[BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
335 		[BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
336 		[BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
337 		[BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
338 		[BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
339 		[BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
340 		[BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
341 		[BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
342 		[BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
343 		[BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
344 		[BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
345 		/* Program return */
346 		[BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
347 		/* Store instructions */
348 		[BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
349 		[BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
350 		[BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
351 		[BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
352 		[BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
353 		[BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
354 		[BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
355 		[BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
356 		[BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
357 		[BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
358 		/* Load instructions */
359 		[BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
360 		[BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
361 		[BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
362 		[BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
363 		[BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
364 		[BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
365 		[BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
366 		[BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
367 		[BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
368 		[BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
369 		[BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
370 	};
371 	u32 tail_call_cnt = 0;
372 	void *ptr;
373 	int off;
374 
375 #define CONT	 ({ insn++; goto select_insn; })
376 #define CONT_JMP ({ insn++; goto select_insn; })
377 
378 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
379 	ARG1 = (u64) (unsigned long) ctx;
380 
381 	/* Registers used in classic BPF programs need to be reset first. */
382 	regs[BPF_REG_A] = 0;
383 	regs[BPF_REG_X] = 0;
384 
385 select_insn:
386 	goto *jumptable[insn->code];
387 
388 	/* ALU */
389 #define ALU(OPCODE, OP)			\
390 	ALU64_##OPCODE##_X:		\
391 		DST = DST OP SRC;	\
392 		CONT;			\
393 	ALU_##OPCODE##_X:		\
394 		DST = (u32) DST OP (u32) SRC;	\
395 		CONT;			\
396 	ALU64_##OPCODE##_K:		\
397 		DST = DST OP IMM;		\
398 		CONT;			\
399 	ALU_##OPCODE##_K:		\
400 		DST = (u32) DST OP (u32) IMM;	\
401 		CONT;
402 
403 	ALU(ADD,  +)
404 	ALU(SUB,  -)
405 	ALU(AND,  &)
406 	ALU(OR,   |)
407 	ALU(LSH, <<)
408 	ALU(RSH, >>)
409 	ALU(XOR,  ^)
410 	ALU(MUL,  *)
411 #undef ALU
412 	ALU_NEG:
413 		DST = (u32) -DST;
414 		CONT;
415 	ALU64_NEG:
416 		DST = -DST;
417 		CONT;
418 	ALU_MOV_X:
419 		DST = (u32) SRC;
420 		CONT;
421 	ALU_MOV_K:
422 		DST = (u32) IMM;
423 		CONT;
424 	ALU64_MOV_X:
425 		DST = SRC;
426 		CONT;
427 	ALU64_MOV_K:
428 		DST = IMM;
429 		CONT;
430 	LD_IMM_DW:
431 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
432 		insn++;
433 		CONT;
434 	ALU64_ARSH_X:
435 		(*(s64 *) &DST) >>= SRC;
436 		CONT;
437 	ALU64_ARSH_K:
438 		(*(s64 *) &DST) >>= IMM;
439 		CONT;
440 	ALU64_MOD_X:
441 		if (unlikely(SRC == 0))
442 			return 0;
443 		div64_u64_rem(DST, SRC, &tmp);
444 		DST = tmp;
445 		CONT;
446 	ALU_MOD_X:
447 		if (unlikely((u32)SRC == 0))
448 			return 0;
449 		tmp = (u32) DST;
450 		DST = do_div(tmp, (u32) SRC);
451 		CONT;
452 	ALU64_MOD_K:
453 		div64_u64_rem(DST, IMM, &tmp);
454 		DST = tmp;
455 		CONT;
456 	ALU_MOD_K:
457 		tmp = (u32) DST;
458 		DST = do_div(tmp, (u32) IMM);
459 		CONT;
460 	ALU64_DIV_X:
461 		if (unlikely(SRC == 0))
462 			return 0;
463 		DST = div64_u64(DST, SRC);
464 		CONT;
465 	ALU_DIV_X:
466 		if (unlikely((u32)SRC == 0))
467 			return 0;
468 		tmp = (u32) DST;
469 		do_div(tmp, (u32) SRC);
470 		DST = (u32) tmp;
471 		CONT;
472 	ALU64_DIV_K:
473 		DST = div64_u64(DST, IMM);
474 		CONT;
475 	ALU_DIV_K:
476 		tmp = (u32) DST;
477 		do_div(tmp, (u32) IMM);
478 		DST = (u32) tmp;
479 		CONT;
480 	ALU_END_TO_BE:
481 		switch (IMM) {
482 		case 16:
483 			DST = (__force u16) cpu_to_be16(DST);
484 			break;
485 		case 32:
486 			DST = (__force u32) cpu_to_be32(DST);
487 			break;
488 		case 64:
489 			DST = (__force u64) cpu_to_be64(DST);
490 			break;
491 		}
492 		CONT;
493 	ALU_END_TO_LE:
494 		switch (IMM) {
495 		case 16:
496 			DST = (__force u16) cpu_to_le16(DST);
497 			break;
498 		case 32:
499 			DST = (__force u32) cpu_to_le32(DST);
500 			break;
501 		case 64:
502 			DST = (__force u64) cpu_to_le64(DST);
503 			break;
504 		}
505 		CONT;
506 
507 	/* CALL */
508 	JMP_CALL:
509 		/* Function call scratches BPF_R1-BPF_R5 registers,
510 		 * preserves BPF_R6-BPF_R9, and stores return value
511 		 * into BPF_R0.
512 		 */
513 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
514 						       BPF_R4, BPF_R5);
515 		CONT;
516 
517 	JMP_TAIL_CALL: {
518 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
519 		struct bpf_array *array = container_of(map, struct bpf_array, map);
520 		struct bpf_prog *prog;
521 		u32 index = BPF_R3;
522 
523 		if (unlikely(index >= array->map.max_entries))
524 			goto out;
525 
526 		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
527 			goto out;
528 
529 		tail_call_cnt++;
530 
531 		prog = READ_ONCE(array->ptrs[index]);
532 		if (unlikely(!prog))
533 			goto out;
534 
535 		/* ARG1 at this point is guaranteed to point to CTX from
536 		 * the verifier side due to the fact that the tail call is
537 		 * handeled like a helper, that is, bpf_tail_call_proto,
538 		 * where arg1_type is ARG_PTR_TO_CTX.
539 		 */
540 		insn = prog->insnsi;
541 		goto select_insn;
542 out:
543 		CONT;
544 	}
545 	/* JMP */
546 	JMP_JA:
547 		insn += insn->off;
548 		CONT;
549 	JMP_JEQ_X:
550 		if (DST == SRC) {
551 			insn += insn->off;
552 			CONT_JMP;
553 		}
554 		CONT;
555 	JMP_JEQ_K:
556 		if (DST == IMM) {
557 			insn += insn->off;
558 			CONT_JMP;
559 		}
560 		CONT;
561 	JMP_JNE_X:
562 		if (DST != SRC) {
563 			insn += insn->off;
564 			CONT_JMP;
565 		}
566 		CONT;
567 	JMP_JNE_K:
568 		if (DST != IMM) {
569 			insn += insn->off;
570 			CONT_JMP;
571 		}
572 		CONT;
573 	JMP_JGT_X:
574 		if (DST > SRC) {
575 			insn += insn->off;
576 			CONT_JMP;
577 		}
578 		CONT;
579 	JMP_JGT_K:
580 		if (DST > IMM) {
581 			insn += insn->off;
582 			CONT_JMP;
583 		}
584 		CONT;
585 	JMP_JGE_X:
586 		if (DST >= SRC) {
587 			insn += insn->off;
588 			CONT_JMP;
589 		}
590 		CONT;
591 	JMP_JGE_K:
592 		if (DST >= IMM) {
593 			insn += insn->off;
594 			CONT_JMP;
595 		}
596 		CONT;
597 	JMP_JSGT_X:
598 		if (((s64) DST) > ((s64) SRC)) {
599 			insn += insn->off;
600 			CONT_JMP;
601 		}
602 		CONT;
603 	JMP_JSGT_K:
604 		if (((s64) DST) > ((s64) IMM)) {
605 			insn += insn->off;
606 			CONT_JMP;
607 		}
608 		CONT;
609 	JMP_JSGE_X:
610 		if (((s64) DST) >= ((s64) SRC)) {
611 			insn += insn->off;
612 			CONT_JMP;
613 		}
614 		CONT;
615 	JMP_JSGE_K:
616 		if (((s64) DST) >= ((s64) IMM)) {
617 			insn += insn->off;
618 			CONT_JMP;
619 		}
620 		CONT;
621 	JMP_JSET_X:
622 		if (DST & SRC) {
623 			insn += insn->off;
624 			CONT_JMP;
625 		}
626 		CONT;
627 	JMP_JSET_K:
628 		if (DST & IMM) {
629 			insn += insn->off;
630 			CONT_JMP;
631 		}
632 		CONT;
633 	JMP_EXIT:
634 		return BPF_R0;
635 
636 	/* STX and ST and LDX*/
637 #define LDST(SIZEOP, SIZE)						\
638 	STX_MEM_##SIZEOP:						\
639 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
640 		CONT;							\
641 	ST_MEM_##SIZEOP:						\
642 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
643 		CONT;							\
644 	LDX_MEM_##SIZEOP:						\
645 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
646 		CONT;
647 
648 	LDST(B,   u8)
649 	LDST(H,  u16)
650 	LDST(W,  u32)
651 	LDST(DW, u64)
652 #undef LDST
653 	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
654 		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
655 			   (DST + insn->off));
656 		CONT;
657 	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
658 		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
659 			     (DST + insn->off));
660 		CONT;
661 	LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
662 		off = IMM;
663 load_word:
664 		/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
665 		 * only appearing in the programs where ctx ==
666 		 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
667 		 * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
668 		 * internal BPF verifier will check that BPF_R6 ==
669 		 * ctx.
670 		 *
671 		 * BPF_ABS and BPF_IND are wrappers of function calls,
672 		 * so they scratch BPF_R1-BPF_R5 registers, preserve
673 		 * BPF_R6-BPF_R9, and store return value into BPF_R0.
674 		 *
675 		 * Implicit input:
676 		 *   ctx == skb == BPF_R6 == CTX
677 		 *
678 		 * Explicit input:
679 		 *   SRC == any register
680 		 *   IMM == 32-bit immediate
681 		 *
682 		 * Output:
683 		 *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
684 		 */
685 
686 		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
687 		if (likely(ptr != NULL)) {
688 			BPF_R0 = get_unaligned_be32(ptr);
689 			CONT;
690 		}
691 
692 		return 0;
693 	LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
694 		off = IMM;
695 load_half:
696 		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
697 		if (likely(ptr != NULL)) {
698 			BPF_R0 = get_unaligned_be16(ptr);
699 			CONT;
700 		}
701 
702 		return 0;
703 	LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
704 		off = IMM;
705 load_byte:
706 		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
707 		if (likely(ptr != NULL)) {
708 			BPF_R0 = *(u8 *)ptr;
709 			CONT;
710 		}
711 
712 		return 0;
713 	LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
714 		off = IMM + SRC;
715 		goto load_word;
716 	LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
717 		off = IMM + SRC;
718 		goto load_half;
719 	LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
720 		off = IMM + SRC;
721 		goto load_byte;
722 
723 	default_label:
724 		/* If we ever reach this, we have a bug somewhere. */
725 		WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
726 		return 0;
727 }
728 
729 #else
__bpf_prog_ret0(void * ctx,const struct bpf_insn * insn)730 static unsigned int __bpf_prog_ret0(void *ctx, const struct bpf_insn *insn)
731 {
732 	return 0;
733 }
734 #endif
735 
bpf_prog_array_compatible(struct bpf_array * array,const struct bpf_prog * fp)736 bool bpf_prog_array_compatible(struct bpf_array *array,
737 			       const struct bpf_prog *fp)
738 {
739 	if (!array->owner_prog_type) {
740 		/* There's no owner yet where we could check for
741 		 * compatibility.
742 		 */
743 		array->owner_prog_type = fp->type;
744 		array->owner_jited = fp->jited;
745 
746 		return true;
747 	}
748 
749 	return array->owner_prog_type == fp->type &&
750 	       array->owner_jited == fp->jited;
751 }
752 
bpf_check_tail_call(const struct bpf_prog * fp)753 static int bpf_check_tail_call(const struct bpf_prog *fp)
754 {
755 	struct bpf_prog_aux *aux = fp->aux;
756 	int i;
757 
758 	for (i = 0; i < aux->used_map_cnt; i++) {
759 		struct bpf_map *map = aux->used_maps[i];
760 		struct bpf_array *array;
761 
762 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
763 			continue;
764 
765 		array = container_of(map, struct bpf_array, map);
766 		if (!bpf_prog_array_compatible(array, fp))
767 			return -EINVAL;
768 	}
769 
770 	return 0;
771 }
772 
773 /**
774  *	bpf_prog_select_runtime - select exec runtime for BPF program
775  *	@fp: bpf_prog populated with internal BPF program
776  *
777  * Try to JIT eBPF program, if JIT is not available, use interpreter.
778  * The BPF program will be executed via BPF_PROG_RUN() macro.
779  */
bpf_prog_select_runtime(struct bpf_prog * fp)780 int bpf_prog_select_runtime(struct bpf_prog *fp)
781 {
782 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
783 	fp->bpf_func = (void *) __bpf_prog_run;
784 #else
785 	fp->bpf_func = (void *) __bpf_prog_ret0;
786 #endif
787 
788 	/* eBPF JITs can rewrite the program in case constant
789 	 * blinding is active. However, in case of error during
790 	 * blinding, bpf_int_jit_compile() must always return a
791 	 * valid program, which in this case would simply not
792 	 * be JITed, but falls back to the interpreter.
793 	 */
794 	bpf_int_jit_compile(fp);
795 #ifdef CONFIG_BPF_JIT_ALWAYS_ON
796 	if (!fp->jited)
797 		return -ENOTSUPP;
798 #endif
799 	bpf_prog_lock_ro(fp);
800 
801 	/* The tail call compatibility check can only be done at
802 	 * this late stage as we need to determine, if we deal
803 	 * with JITed or non JITed program concatenations and not
804 	 * all eBPF JITs might immediately support all features.
805 	 */
806 	return bpf_check_tail_call(fp);
807 }
808 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
809 
bpf_prog_free_deferred(struct work_struct * work)810 static void bpf_prog_free_deferred(struct work_struct *work)
811 {
812 	struct bpf_prog_aux *aux;
813 
814 	aux = container_of(work, struct bpf_prog_aux, work);
815 	bpf_jit_free(aux->prog);
816 }
817 
818 /* Free internal BPF program */
bpf_prog_free(struct bpf_prog * fp)819 void bpf_prog_free(struct bpf_prog *fp)
820 {
821 	struct bpf_prog_aux *aux = fp->aux;
822 
823 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
824 	schedule_work(&aux->work);
825 }
826 EXPORT_SYMBOL_GPL(bpf_prog_free);
827 
828 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
829 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
830 
bpf_user_rnd_init_once(void)831 void bpf_user_rnd_init_once(void)
832 {
833 	prandom_init_once(&bpf_user_rnd_state);
834 }
835 
bpf_user_rnd_u32(u64 r1,u64 r2,u64 r3,u64 r4,u64 r5)836 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
837 {
838 	/* Should someone ever have the rather unwise idea to use some
839 	 * of the registers passed into this function, then note that
840 	 * this function is called from native eBPF and classic-to-eBPF
841 	 * transformations. Register assignments from both sides are
842 	 * different, f.e. classic always sets fn(ctx, A, X) here.
843 	 */
844 	struct rnd_state *state;
845 	u32 res;
846 
847 	state = &get_cpu_var(bpf_user_rnd_state);
848 	res = prandom_u32_state(state);
849 	put_cpu_var(state);
850 
851 	return res;
852 }
853 
854 /* Weak definitions of helper functions in case we don't have bpf syscall. */
855 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
856 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
857 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
858 
859 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
860 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
861 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
862 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
863 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
864 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
bpf_get_trace_printk_proto(void)865 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
866 {
867 	return NULL;
868 }
869 
870 /* Always built-in helper functions. */
871 const struct bpf_func_proto bpf_tail_call_proto = {
872 	.func		= NULL,
873 	.gpl_only	= false,
874 	.ret_type	= RET_VOID,
875 	.arg1_type	= ARG_PTR_TO_CTX,
876 	.arg2_type	= ARG_CONST_MAP_PTR,
877 	.arg3_type	= ARG_ANYTHING,
878 };
879 
880 /* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
bpf_int_jit_compile(struct bpf_prog * prog)881 void __weak bpf_int_jit_compile(struct bpf_prog *prog)
882 {
883 }
884 
885 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
886  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
887  */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)888 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
889 			 int len)
890 {
891 	return -EFAULT;
892 }
893