• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Linux Socket Filter - Kernel level socket filtering
4  *
5  * Based on the design of the Berkeley Packet Filter. The new
6  * internal format has been designed by PLUMgrid:
7  *
8  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9  *
10  * Authors:
11  *
12  *	Jay Schulist <jschlst@samba.org>
13  *	Alexei Starovoitov <ast@plumgrid.com>
14  *	Daniel Borkmann <dborkman@redhat.com>
15  *
16  * Andi Kleen - Fix a few bad bugs and races.
17  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18  */
19 
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/objtool.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <linux/nospec.h>
36 
37 #include <asm/barrier.h>
38 #include <asm/unaligned.h>
39 
40 /* Registers */
41 #define BPF_R0	regs[BPF_REG_0]
42 #define BPF_R1	regs[BPF_REG_1]
43 #define BPF_R2	regs[BPF_REG_2]
44 #define BPF_R3	regs[BPF_REG_3]
45 #define BPF_R4	regs[BPF_REG_4]
46 #define BPF_R5	regs[BPF_REG_5]
47 #define BPF_R6	regs[BPF_REG_6]
48 #define BPF_R7	regs[BPF_REG_7]
49 #define BPF_R8	regs[BPF_REG_8]
50 #define BPF_R9	regs[BPF_REG_9]
51 #define BPF_R10	regs[BPF_REG_10]
52 
53 /* Named registers */
54 #define DST	regs[insn->dst_reg]
55 #define SRC	regs[insn->src_reg]
56 #define FP	regs[BPF_REG_FP]
57 #define AX	regs[BPF_REG_AX]
58 #define ARG1	regs[BPF_REG_ARG1]
59 #define CTX	regs[BPF_REG_CTX]
60 #define IMM	insn->imm
61 
62 /* No hurry in this branch
63  *
64  * Exported for the bpf jit load helper.
65  */
bpf_internal_load_pointer_neg_helper(const struct sk_buff * skb,int k,unsigned int size)66 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
67 {
68 	u8 *ptr = NULL;
69 
70 	if (k >= SKF_NET_OFF) {
71 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
72 	} else if (k >= SKF_LL_OFF) {
73 		if (unlikely(!skb_mac_header_was_set(skb)))
74 			return NULL;
75 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
76 	}
77 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
78 		return ptr;
79 
80 	return NULL;
81 }
82 
bpf_prog_alloc_no_stats(unsigned int size,gfp_t gfp_extra_flags)83 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
84 {
85 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
86 	struct bpf_prog_aux *aux;
87 	struct bpf_prog *fp;
88 
89 	size = round_up(size, PAGE_SIZE);
90 	fp = __vmalloc(size, gfp_flags);
91 	if (fp == NULL)
92 		return NULL;
93 
94 	aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
95 	if (aux == NULL) {
96 		vfree(fp);
97 		return NULL;
98 	}
99 
100 	fp->pages = size / PAGE_SIZE;
101 	fp->aux = aux;
102 	fp->aux->prog = fp;
103 	fp->jit_requested = ebpf_jit_enabled();
104 
105 	INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
106 	mutex_init(&fp->aux->used_maps_mutex);
107 	mutex_init(&fp->aux->dst_mutex);
108 
109 	return fp;
110 }
111 
bpf_prog_alloc(unsigned int size,gfp_t gfp_extra_flags)112 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
113 {
114 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
115 	struct bpf_prog *prog;
116 	int cpu;
117 
118 	prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
119 	if (!prog)
120 		return NULL;
121 
122 	prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
123 	if (!prog->aux->stats) {
124 		kfree(prog->aux);
125 		vfree(prog);
126 		return NULL;
127 	}
128 
129 	for_each_possible_cpu(cpu) {
130 		struct bpf_prog_stats *pstats;
131 
132 		pstats = per_cpu_ptr(prog->aux->stats, cpu);
133 		u64_stats_init(&pstats->syncp);
134 	}
135 	return prog;
136 }
137 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
138 
bpf_prog_alloc_jited_linfo(struct bpf_prog * prog)139 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
140 {
141 	if (!prog->aux->nr_linfo || !prog->jit_requested)
142 		return 0;
143 
144 	prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
145 					 sizeof(*prog->aux->jited_linfo),
146 					 GFP_KERNEL | __GFP_NOWARN);
147 	if (!prog->aux->jited_linfo)
148 		return -ENOMEM;
149 
150 	return 0;
151 }
152 
bpf_prog_free_jited_linfo(struct bpf_prog * prog)153 void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
154 {
155 	kfree(prog->aux->jited_linfo);
156 	prog->aux->jited_linfo = NULL;
157 }
158 
bpf_prog_free_unused_jited_linfo(struct bpf_prog * prog)159 void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
160 {
161 	if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
162 		bpf_prog_free_jited_linfo(prog);
163 }
164 
165 /* The jit engine is responsible to provide an array
166  * for insn_off to the jited_off mapping (insn_to_jit_off).
167  *
168  * The idx to this array is the insn_off.  Hence, the insn_off
169  * here is relative to the prog itself instead of the main prog.
170  * This array has one entry for each xlated bpf insn.
171  *
172  * jited_off is the byte off to the last byte of the jited insn.
173  *
174  * Hence, with
175  * insn_start:
176  *      The first bpf insn off of the prog.  The insn off
177  *      here is relative to the main prog.
178  *      e.g. if prog is a subprog, insn_start > 0
179  * linfo_idx:
180  *      The prog's idx to prog->aux->linfo and jited_linfo
181  *
182  * jited_linfo[linfo_idx] = prog->bpf_func
183  *
184  * For i > linfo_idx,
185  *
186  * jited_linfo[i] = prog->bpf_func +
187  *	insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
188  */
bpf_prog_fill_jited_linfo(struct bpf_prog * prog,const u32 * insn_to_jit_off)189 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
190 			       const u32 *insn_to_jit_off)
191 {
192 	u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
193 	const struct bpf_line_info *linfo;
194 	void **jited_linfo;
195 
196 	if (!prog->aux->jited_linfo)
197 		/* Userspace did not provide linfo */
198 		return;
199 
200 	linfo_idx = prog->aux->linfo_idx;
201 	linfo = &prog->aux->linfo[linfo_idx];
202 	insn_start = linfo[0].insn_off;
203 	insn_end = insn_start + prog->len;
204 
205 	jited_linfo = &prog->aux->jited_linfo[linfo_idx];
206 	jited_linfo[0] = prog->bpf_func;
207 
208 	nr_linfo = prog->aux->nr_linfo - linfo_idx;
209 
210 	for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
211 		/* The verifier ensures that linfo[i].insn_off is
212 		 * strictly increasing
213 		 */
214 		jited_linfo[i] = prog->bpf_func +
215 			insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
216 }
217 
bpf_prog_free_linfo(struct bpf_prog * prog)218 void bpf_prog_free_linfo(struct bpf_prog *prog)
219 {
220 	bpf_prog_free_jited_linfo(prog);
221 	kvfree(prog->aux->linfo);
222 }
223 
bpf_prog_realloc(struct bpf_prog * fp_old,unsigned int size,gfp_t gfp_extra_flags)224 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
225 				  gfp_t gfp_extra_flags)
226 {
227 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
228 	struct bpf_prog *fp;
229 	u32 pages, delta;
230 	int ret;
231 
232 	size = round_up(size, PAGE_SIZE);
233 	pages = size / PAGE_SIZE;
234 	if (pages <= fp_old->pages)
235 		return fp_old;
236 
237 	delta = pages - fp_old->pages;
238 	ret = __bpf_prog_charge(fp_old->aux->user, delta);
239 	if (ret)
240 		return NULL;
241 
242 	fp = __vmalloc(size, gfp_flags);
243 	if (fp == NULL) {
244 		__bpf_prog_uncharge(fp_old->aux->user, delta);
245 	} else {
246 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
247 		fp->pages = pages;
248 		fp->aux->prog = fp;
249 
250 		/* We keep fp->aux from fp_old around in the new
251 		 * reallocated structure.
252 		 */
253 		fp_old->aux = NULL;
254 		__bpf_prog_free(fp_old);
255 	}
256 
257 	return fp;
258 }
259 
__bpf_prog_free(struct bpf_prog * fp)260 void __bpf_prog_free(struct bpf_prog *fp)
261 {
262 	if (fp->aux) {
263 		mutex_destroy(&fp->aux->used_maps_mutex);
264 		mutex_destroy(&fp->aux->dst_mutex);
265 		free_percpu(fp->aux->stats);
266 		kfree(fp->aux->poke_tab);
267 		kfree(fp->aux);
268 	}
269 	vfree(fp);
270 }
271 
bpf_prog_calc_tag(struct bpf_prog * fp)272 int bpf_prog_calc_tag(struct bpf_prog *fp)
273 {
274 	const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
275 	u32 raw_size = bpf_prog_tag_scratch_size(fp);
276 	u32 digest[SHA1_DIGEST_WORDS];
277 	u32 ws[SHA1_WORKSPACE_WORDS];
278 	u32 i, bsize, psize, blocks;
279 	struct bpf_insn *dst;
280 	bool was_ld_map;
281 	u8 *raw, *todo;
282 	__be32 *result;
283 	__be64 *bits;
284 
285 	raw = vmalloc(raw_size);
286 	if (!raw)
287 		return -ENOMEM;
288 
289 	sha1_init(digest);
290 	memset(ws, 0, sizeof(ws));
291 
292 	/* We need to take out the map fd for the digest calculation
293 	 * since they are unstable from user space side.
294 	 */
295 	dst = (void *)raw;
296 	for (i = 0, was_ld_map = false; i < fp->len; i++) {
297 		dst[i] = fp->insnsi[i];
298 		if (!was_ld_map &&
299 		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
300 		    (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
301 		     dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
302 			was_ld_map = true;
303 			dst[i].imm = 0;
304 		} else if (was_ld_map &&
305 			   dst[i].code == 0 &&
306 			   dst[i].dst_reg == 0 &&
307 			   dst[i].src_reg == 0 &&
308 			   dst[i].off == 0) {
309 			was_ld_map = false;
310 			dst[i].imm = 0;
311 		} else {
312 			was_ld_map = false;
313 		}
314 	}
315 
316 	psize = bpf_prog_insn_size(fp);
317 	memset(&raw[psize], 0, raw_size - psize);
318 	raw[psize++] = 0x80;
319 
320 	bsize  = round_up(psize, SHA1_BLOCK_SIZE);
321 	blocks = bsize / SHA1_BLOCK_SIZE;
322 	todo   = raw;
323 	if (bsize - psize >= sizeof(__be64)) {
324 		bits = (__be64 *)(todo + bsize - sizeof(__be64));
325 	} else {
326 		bits = (__be64 *)(todo + bsize + bits_offset);
327 		blocks++;
328 	}
329 	*bits = cpu_to_be64((psize - 1) << 3);
330 
331 	while (blocks--) {
332 		sha1_transform(digest, todo, ws);
333 		todo += SHA1_BLOCK_SIZE;
334 	}
335 
336 	result = (__force __be32 *)digest;
337 	for (i = 0; i < SHA1_DIGEST_WORDS; i++)
338 		result[i] = cpu_to_be32(digest[i]);
339 	memcpy(fp->tag, result, sizeof(fp->tag));
340 
341 	vfree(raw);
342 	return 0;
343 }
344 
bpf_adj_delta_to_imm(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)345 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
346 				s32 end_new, s32 curr, const bool probe_pass)
347 {
348 	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
349 	s32 delta = end_new - end_old;
350 	s64 imm = insn->imm;
351 
352 	if (curr < pos && curr + imm + 1 >= end_old)
353 		imm += delta;
354 	else if (curr >= end_new && curr + imm + 1 < end_new)
355 		imm -= delta;
356 	if (imm < imm_min || imm > imm_max)
357 		return -ERANGE;
358 	if (!probe_pass)
359 		insn->imm = imm;
360 	return 0;
361 }
362 
bpf_adj_delta_to_off(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)363 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
364 				s32 end_new, s32 curr, const bool probe_pass)
365 {
366 	const s32 off_min = S16_MIN, off_max = S16_MAX;
367 	s32 delta = end_new - end_old;
368 	s32 off = insn->off;
369 
370 	if (curr < pos && curr + off + 1 >= end_old)
371 		off += delta;
372 	else if (curr >= end_new && curr + off + 1 < end_new)
373 		off -= delta;
374 	if (off < off_min || off > off_max)
375 		return -ERANGE;
376 	if (!probe_pass)
377 		insn->off = off;
378 	return 0;
379 }
380 
bpf_adj_branches(struct bpf_prog * prog,u32 pos,s32 end_old,s32 end_new,const bool probe_pass)381 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
382 			    s32 end_new, const bool probe_pass)
383 {
384 	u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
385 	struct bpf_insn *insn = prog->insnsi;
386 	int ret = 0;
387 
388 	for (i = 0; i < insn_cnt; i++, insn++) {
389 		u8 code;
390 
391 		/* In the probing pass we still operate on the original,
392 		 * unpatched image in order to check overflows before we
393 		 * do any other adjustments. Therefore skip the patchlet.
394 		 */
395 		if (probe_pass && i == pos) {
396 			i = end_new;
397 			insn = prog->insnsi + end_old;
398 		}
399 		code = insn->code;
400 		if ((BPF_CLASS(code) != BPF_JMP &&
401 		     BPF_CLASS(code) != BPF_JMP32) ||
402 		    BPF_OP(code) == BPF_EXIT)
403 			continue;
404 		/* Adjust offset of jmps if we cross patch boundaries. */
405 		if (BPF_OP(code) == BPF_CALL) {
406 			if (insn->src_reg != BPF_PSEUDO_CALL)
407 				continue;
408 			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
409 						   end_new, i, probe_pass);
410 		} else {
411 			ret = bpf_adj_delta_to_off(insn, pos, end_old,
412 						   end_new, i, probe_pass);
413 		}
414 		if (ret)
415 			break;
416 	}
417 
418 	return ret;
419 }
420 
bpf_adj_linfo(struct bpf_prog * prog,u32 off,u32 delta)421 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
422 {
423 	struct bpf_line_info *linfo;
424 	u32 i, nr_linfo;
425 
426 	nr_linfo = prog->aux->nr_linfo;
427 	if (!nr_linfo || !delta)
428 		return;
429 
430 	linfo = prog->aux->linfo;
431 
432 	for (i = 0; i < nr_linfo; i++)
433 		if (off < linfo[i].insn_off)
434 			break;
435 
436 	/* Push all off < linfo[i].insn_off by delta */
437 	for (; i < nr_linfo; i++)
438 		linfo[i].insn_off += delta;
439 }
440 
bpf_patch_insn_single(struct bpf_prog * prog,u32 off,const struct bpf_insn * patch,u32 len)441 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
442 				       const struct bpf_insn *patch, u32 len)
443 {
444 	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
445 	const u32 cnt_max = S16_MAX;
446 	struct bpf_prog *prog_adj;
447 	int err;
448 
449 	/* Since our patchlet doesn't expand the image, we're done. */
450 	if (insn_delta == 0) {
451 		memcpy(prog->insnsi + off, patch, sizeof(*patch));
452 		return prog;
453 	}
454 
455 	insn_adj_cnt = prog->len + insn_delta;
456 
457 	/* Reject anything that would potentially let the insn->off
458 	 * target overflow when we have excessive program expansions.
459 	 * We need to probe here before we do any reallocation where
460 	 * we afterwards may not fail anymore.
461 	 */
462 	if (insn_adj_cnt > cnt_max &&
463 	    (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
464 		return ERR_PTR(err);
465 
466 	/* Several new instructions need to be inserted. Make room
467 	 * for them. Likely, there's no need for a new allocation as
468 	 * last page could have large enough tailroom.
469 	 */
470 	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
471 				    GFP_USER);
472 	if (!prog_adj)
473 		return ERR_PTR(-ENOMEM);
474 
475 	prog_adj->len = insn_adj_cnt;
476 
477 	/* Patching happens in 3 steps:
478 	 *
479 	 * 1) Move over tail of insnsi from next instruction onwards,
480 	 *    so we can patch the single target insn with one or more
481 	 *    new ones (patching is always from 1 to n insns, n > 0).
482 	 * 2) Inject new instructions at the target location.
483 	 * 3) Adjust branch offsets if necessary.
484 	 */
485 	insn_rest = insn_adj_cnt - off - len;
486 
487 	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
488 		sizeof(*patch) * insn_rest);
489 	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
490 
491 	/* We are guaranteed to not fail at this point, otherwise
492 	 * the ship has sailed to reverse to the original state. An
493 	 * overflow cannot happen at this point.
494 	 */
495 	BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
496 
497 	bpf_adj_linfo(prog_adj, off, insn_delta);
498 
499 	return prog_adj;
500 }
501 
bpf_remove_insns(struct bpf_prog * prog,u32 off,u32 cnt)502 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
503 {
504 	/* Branch offsets can't overflow when program is shrinking, no need
505 	 * to call bpf_adj_branches(..., true) here
506 	 */
507 	memmove(prog->insnsi + off, prog->insnsi + off + cnt,
508 		sizeof(struct bpf_insn) * (prog->len - off - cnt));
509 	prog->len -= cnt;
510 
511 	return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
512 }
513 
bpf_prog_kallsyms_del_subprogs(struct bpf_prog * fp)514 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
515 {
516 	int i;
517 
518 	for (i = 0; i < fp->aux->func_cnt; i++)
519 		bpf_prog_kallsyms_del(fp->aux->func[i]);
520 }
521 
bpf_prog_kallsyms_del_all(struct bpf_prog * fp)522 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
523 {
524 	bpf_prog_kallsyms_del_subprogs(fp);
525 	bpf_prog_kallsyms_del(fp);
526 }
527 
528 #ifdef CONFIG_BPF_JIT
529 /* All BPF JIT sysctl knobs here. */
530 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
531 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
532 int bpf_jit_harden   __read_mostly;
533 long bpf_jit_limit   __read_mostly;
534 long bpf_jit_limit_max __read_mostly;
535 
536 static void
bpf_prog_ksym_set_addr(struct bpf_prog * prog)537 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
538 {
539 	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
540 	unsigned long addr = (unsigned long)hdr;
541 
542 	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
543 
544 	prog->aux->ksym.start = (unsigned long) prog->bpf_func;
545 	prog->aux->ksym.end   = addr + hdr->pages * PAGE_SIZE;
546 }
547 
548 static void
bpf_prog_ksym_set_name(struct bpf_prog * prog)549 bpf_prog_ksym_set_name(struct bpf_prog *prog)
550 {
551 	char *sym = prog->aux->ksym.name;
552 	const char *end = sym + KSYM_NAME_LEN;
553 	const struct btf_type *type;
554 	const char *func_name;
555 
556 	BUILD_BUG_ON(sizeof("bpf_prog_") +
557 		     sizeof(prog->tag) * 2 +
558 		     /* name has been null terminated.
559 		      * We should need +1 for the '_' preceding
560 		      * the name.  However, the null character
561 		      * is double counted between the name and the
562 		      * sizeof("bpf_prog_") above, so we omit
563 		      * the +1 here.
564 		      */
565 		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
566 
567 	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
568 	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
569 
570 	/* prog->aux->name will be ignored if full btf name is available */
571 	if (prog->aux->func_info_cnt) {
572 		type = btf_type_by_id(prog->aux->btf,
573 				      prog->aux->func_info[prog->aux->func_idx].type_id);
574 		func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
575 		snprintf(sym, (size_t)(end - sym), "_%s", func_name);
576 		return;
577 	}
578 
579 	if (prog->aux->name[0])
580 		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
581 	else
582 		*sym = 0;
583 }
584 
bpf_get_ksym_start(struct latch_tree_node * n)585 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
586 {
587 	return container_of(n, struct bpf_ksym, tnode)->start;
588 }
589 
bpf_tree_less(struct latch_tree_node * a,struct latch_tree_node * b)590 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
591 					  struct latch_tree_node *b)
592 {
593 	return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
594 }
595 
bpf_tree_comp(void * key,struct latch_tree_node * n)596 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
597 {
598 	unsigned long val = (unsigned long)key;
599 	const struct bpf_ksym *ksym;
600 
601 	ksym = container_of(n, struct bpf_ksym, tnode);
602 
603 	if (val < ksym->start)
604 		return -1;
605 	/* Ensure that we detect return addresses as part of the program, when
606 	 * the final instruction is a call for a program part of the stack
607 	 * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
608 	 */
609 	if (val > ksym->end)
610 		return  1;
611 
612 	return 0;
613 }
614 
615 static const struct latch_tree_ops bpf_tree_ops = {
616 	.less	= bpf_tree_less,
617 	.comp	= bpf_tree_comp,
618 };
619 
620 static DEFINE_SPINLOCK(bpf_lock);
621 static LIST_HEAD(bpf_kallsyms);
622 static struct latch_tree_root bpf_tree __cacheline_aligned;
623 
bpf_ksym_add(struct bpf_ksym * ksym)624 void bpf_ksym_add(struct bpf_ksym *ksym)
625 {
626 	spin_lock_bh(&bpf_lock);
627 	WARN_ON_ONCE(!list_empty(&ksym->lnode));
628 	list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
629 	latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
630 	spin_unlock_bh(&bpf_lock);
631 }
632 
__bpf_ksym_del(struct bpf_ksym * ksym)633 static void __bpf_ksym_del(struct bpf_ksym *ksym)
634 {
635 	if (list_empty(&ksym->lnode))
636 		return;
637 
638 	latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
639 	list_del_rcu(&ksym->lnode);
640 }
641 
bpf_ksym_del(struct bpf_ksym * ksym)642 void bpf_ksym_del(struct bpf_ksym *ksym)
643 {
644 	spin_lock_bh(&bpf_lock);
645 	__bpf_ksym_del(ksym);
646 	spin_unlock_bh(&bpf_lock);
647 }
648 
bpf_prog_kallsyms_candidate(const struct bpf_prog * fp)649 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
650 {
651 	return fp->jited && !bpf_prog_was_classic(fp);
652 }
653 
bpf_prog_kallsyms_verify_off(const struct bpf_prog * fp)654 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
655 {
656 	return list_empty(&fp->aux->ksym.lnode) ||
657 	       fp->aux->ksym.lnode.prev == LIST_POISON2;
658 }
659 
bpf_prog_kallsyms_add(struct bpf_prog * fp)660 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
661 {
662 	if (!bpf_prog_kallsyms_candidate(fp) ||
663 	    !bpf_capable())
664 		return;
665 
666 	bpf_prog_ksym_set_addr(fp);
667 	bpf_prog_ksym_set_name(fp);
668 	fp->aux->ksym.prog = true;
669 
670 	bpf_ksym_add(&fp->aux->ksym);
671 }
672 
bpf_prog_kallsyms_del(struct bpf_prog * fp)673 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
674 {
675 	if (!bpf_prog_kallsyms_candidate(fp))
676 		return;
677 
678 	bpf_ksym_del(&fp->aux->ksym);
679 }
680 
bpf_ksym_find(unsigned long addr)681 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
682 {
683 	struct latch_tree_node *n;
684 
685 	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
686 	return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
687 }
688 
__bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char * sym)689 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
690 				 unsigned long *off, char *sym)
691 {
692 	struct bpf_ksym *ksym;
693 	char *ret = NULL;
694 
695 	rcu_read_lock();
696 	ksym = bpf_ksym_find(addr);
697 	if (ksym) {
698 		unsigned long symbol_start = ksym->start;
699 		unsigned long symbol_end = ksym->end;
700 
701 		strncpy(sym, ksym->name, KSYM_NAME_LEN);
702 
703 		ret = sym;
704 		if (size)
705 			*size = symbol_end - symbol_start;
706 		if (off)
707 			*off  = addr - symbol_start;
708 	}
709 	rcu_read_unlock();
710 
711 	return ret;
712 }
713 
is_bpf_text_address(unsigned long addr)714 bool is_bpf_text_address(unsigned long addr)
715 {
716 	bool ret;
717 
718 	rcu_read_lock();
719 	ret = bpf_ksym_find(addr) != NULL;
720 	rcu_read_unlock();
721 
722 	return ret;
723 }
724 
bpf_prog_ksym_find(unsigned long addr)725 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
726 {
727 	struct bpf_ksym *ksym = bpf_ksym_find(addr);
728 
729 	return ksym && ksym->prog ?
730 	       container_of(ksym, struct bpf_prog_aux, ksym)->prog :
731 	       NULL;
732 }
733 
search_bpf_extables(unsigned long addr)734 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
735 {
736 	const struct exception_table_entry *e = NULL;
737 	struct bpf_prog *prog;
738 
739 	rcu_read_lock();
740 	prog = bpf_prog_ksym_find(addr);
741 	if (!prog)
742 		goto out;
743 	if (!prog->aux->num_exentries)
744 		goto out;
745 
746 	e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
747 out:
748 	rcu_read_unlock();
749 	return e;
750 }
751 
bpf_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)752 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
753 		    char *sym)
754 {
755 	struct bpf_ksym *ksym;
756 	unsigned int it = 0;
757 	int ret = -ERANGE;
758 
759 	if (!bpf_jit_kallsyms_enabled())
760 		return ret;
761 
762 	rcu_read_lock();
763 	list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
764 		if (it++ != symnum)
765 			continue;
766 
767 		strncpy(sym, ksym->name, KSYM_NAME_LEN);
768 
769 		*value = ksym->start;
770 		*type  = BPF_SYM_ELF_TYPE;
771 
772 		ret = 0;
773 		break;
774 	}
775 	rcu_read_unlock();
776 
777 	return ret;
778 }
779 
bpf_jit_add_poke_descriptor(struct bpf_prog * prog,struct bpf_jit_poke_descriptor * poke)780 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
781 				struct bpf_jit_poke_descriptor *poke)
782 {
783 	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
784 	static const u32 poke_tab_max = 1024;
785 	u32 slot = prog->aux->size_poke_tab;
786 	u32 size = slot + 1;
787 
788 	if (size > poke_tab_max)
789 		return -ENOSPC;
790 	if (poke->tailcall_target || poke->tailcall_target_stable ||
791 	    poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
792 		return -EINVAL;
793 
794 	switch (poke->reason) {
795 	case BPF_POKE_REASON_TAIL_CALL:
796 		if (!poke->tail_call.map)
797 			return -EINVAL;
798 		break;
799 	default:
800 		return -EINVAL;
801 	}
802 
803 	tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
804 	if (!tab)
805 		return -ENOMEM;
806 
807 	memcpy(&tab[slot], poke, sizeof(*poke));
808 	prog->aux->size_poke_tab = size;
809 	prog->aux->poke_tab = tab;
810 
811 	return slot;
812 }
813 
814 static atomic_long_t bpf_jit_current;
815 
816 /* Can be overridden by an arch's JIT compiler if it has a custom,
817  * dedicated BPF backend memory area, or if neither of the two
818  * below apply.
819  */
bpf_jit_alloc_exec_limit(void)820 u64 __weak bpf_jit_alloc_exec_limit(void)
821 {
822 #if defined(MODULES_VADDR)
823 	return MODULES_END - MODULES_VADDR;
824 #else
825 	return VMALLOC_END - VMALLOC_START;
826 #endif
827 }
828 
bpf_jit_charge_init(void)829 static int __init bpf_jit_charge_init(void)
830 {
831 	/* Only used as heuristic here to derive limit. */
832 	bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
833 	bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
834 					    PAGE_SIZE), LONG_MAX);
835 	return 0;
836 }
837 pure_initcall(bpf_jit_charge_init);
838 
bpf_jit_charge_modmem(u32 size)839 int bpf_jit_charge_modmem(u32 size)
840 {
841 	if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
842 		if (!bpf_capable()) {
843 			atomic_long_sub(size, &bpf_jit_current);
844 			return -EPERM;
845 		}
846 	}
847 
848 	return 0;
849 }
850 
bpf_jit_uncharge_modmem(u32 size)851 void bpf_jit_uncharge_modmem(u32 size)
852 {
853 	atomic_long_sub(size, &bpf_jit_current);
854 }
855 
bpf_jit_alloc_exec(unsigned long size)856 void *__weak bpf_jit_alloc_exec(unsigned long size)
857 {
858 	return module_alloc(size);
859 }
860 
bpf_jit_free_exec(void * addr)861 void __weak bpf_jit_free_exec(void *addr)
862 {
863 	module_memfree(addr);
864 }
865 
866 struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,bpf_jit_fill_hole_t bpf_fill_ill_insns)867 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
868 		     unsigned int alignment,
869 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
870 {
871 	struct bpf_binary_header *hdr;
872 	u32 size, hole, start, pages;
873 
874 	WARN_ON_ONCE(!is_power_of_2(alignment) ||
875 		     alignment > BPF_IMAGE_ALIGNMENT);
876 
877 	/* Most of BPF filters are really small, but if some of them
878 	 * fill a page, allow at least 128 extra bytes to insert a
879 	 * random section of illegal instructions.
880 	 */
881 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
882 	pages = size / PAGE_SIZE;
883 
884 	if (bpf_jit_charge_modmem(size))
885 		return NULL;
886 	hdr = bpf_jit_alloc_exec(size);
887 	if (!hdr) {
888 		bpf_jit_uncharge_modmem(size);
889 		return NULL;
890 	}
891 
892 	/* Fill space with illegal/arch-dep instructions. */
893 	bpf_fill_ill_insns(hdr, size);
894 
895 	hdr->pages = pages;
896 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
897 		     PAGE_SIZE - sizeof(*hdr));
898 	start = (get_random_int() % hole) & ~(alignment - 1);
899 
900 	/* Leave a random number of instructions before BPF code. */
901 	*image_ptr = &hdr->image[start];
902 
903 	return hdr;
904 }
905 
bpf_jit_binary_free(struct bpf_binary_header * hdr)906 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
907 {
908 	u32 pages = hdr->pages;
909 
910 	bpf_jit_free_exec(hdr);
911 	bpf_jit_uncharge_modmem(pages << PAGE_SHIFT);
912 }
913 
914 /* This symbol is only overridden by archs that have different
915  * requirements than the usual eBPF JITs, f.e. when they only
916  * implement cBPF JIT, do not set images read-only, etc.
917  */
bpf_jit_free(struct bpf_prog * fp)918 void __weak bpf_jit_free(struct bpf_prog *fp)
919 {
920 	if (fp->jited) {
921 		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
922 
923 		bpf_jit_binary_free(hdr);
924 
925 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
926 	}
927 
928 	bpf_prog_unlock_free(fp);
929 }
930 
bpf_jit_get_func_addr(const struct bpf_prog * prog,const struct bpf_insn * insn,bool extra_pass,u64 * func_addr,bool * func_addr_fixed)931 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
932 			  const struct bpf_insn *insn, bool extra_pass,
933 			  u64 *func_addr, bool *func_addr_fixed)
934 {
935 	s16 off = insn->off;
936 	s32 imm = insn->imm;
937 	u8 *addr;
938 
939 	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
940 	if (!*func_addr_fixed) {
941 		/* Place-holder address till the last pass has collected
942 		 * all addresses for JITed subprograms in which case we
943 		 * can pick them up from prog->aux.
944 		 */
945 		if (!extra_pass)
946 			addr = NULL;
947 		else if (prog->aux->func &&
948 			 off >= 0 && off < prog->aux->func_cnt)
949 			addr = (u8 *)prog->aux->func[off]->bpf_func;
950 		else
951 			return -EINVAL;
952 	} else {
953 		/* Address of a BPF helper call. Since part of the core
954 		 * kernel, it's always at a fixed location. __bpf_call_base
955 		 * and the helper with imm relative to it are both in core
956 		 * kernel.
957 		 */
958 		addr = (u8 *)__bpf_call_base + imm;
959 	}
960 
961 	*func_addr = (unsigned long)addr;
962 	return 0;
963 }
964 
bpf_jit_blind_insn(const struct bpf_insn * from,const struct bpf_insn * aux,struct bpf_insn * to_buff,bool emit_zext)965 static int bpf_jit_blind_insn(const struct bpf_insn *from,
966 			      const struct bpf_insn *aux,
967 			      struct bpf_insn *to_buff,
968 			      bool emit_zext)
969 {
970 	struct bpf_insn *to = to_buff;
971 	u32 imm_rnd = get_random_int();
972 	s16 off;
973 
974 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
975 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
976 
977 	/* Constraints on AX register:
978 	 *
979 	 * AX register is inaccessible from user space. It is mapped in
980 	 * all JITs, and used here for constant blinding rewrites. It is
981 	 * typically "stateless" meaning its contents are only valid within
982 	 * the executed instruction, but not across several instructions.
983 	 * There are a few exceptions however which are further detailed
984 	 * below.
985 	 *
986 	 * Constant blinding is only used by JITs, not in the interpreter.
987 	 * The interpreter uses AX in some occasions as a local temporary
988 	 * register e.g. in DIV or MOD instructions.
989 	 *
990 	 * In restricted circumstances, the verifier can also use the AX
991 	 * register for rewrites as long as they do not interfere with
992 	 * the above cases!
993 	 */
994 	if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
995 		goto out;
996 
997 	if (from->imm == 0 &&
998 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
999 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1000 		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1001 		goto out;
1002 	}
1003 
1004 	switch (from->code) {
1005 	case BPF_ALU | BPF_ADD | BPF_K:
1006 	case BPF_ALU | BPF_SUB | BPF_K:
1007 	case BPF_ALU | BPF_AND | BPF_K:
1008 	case BPF_ALU | BPF_OR  | BPF_K:
1009 	case BPF_ALU | BPF_XOR | BPF_K:
1010 	case BPF_ALU | BPF_MUL | BPF_K:
1011 	case BPF_ALU | BPF_MOV | BPF_K:
1012 	case BPF_ALU | BPF_DIV | BPF_K:
1013 	case BPF_ALU | BPF_MOD | BPF_K:
1014 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1015 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1016 		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1017 		break;
1018 
1019 	case BPF_ALU64 | BPF_ADD | BPF_K:
1020 	case BPF_ALU64 | BPF_SUB | BPF_K:
1021 	case BPF_ALU64 | BPF_AND | BPF_K:
1022 	case BPF_ALU64 | BPF_OR  | BPF_K:
1023 	case BPF_ALU64 | BPF_XOR | BPF_K:
1024 	case BPF_ALU64 | BPF_MUL | BPF_K:
1025 	case BPF_ALU64 | BPF_MOV | BPF_K:
1026 	case BPF_ALU64 | BPF_DIV | BPF_K:
1027 	case BPF_ALU64 | BPF_MOD | BPF_K:
1028 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1029 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1030 		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1031 		break;
1032 
1033 	case BPF_JMP | BPF_JEQ  | BPF_K:
1034 	case BPF_JMP | BPF_JNE  | BPF_K:
1035 	case BPF_JMP | BPF_JGT  | BPF_K:
1036 	case BPF_JMP | BPF_JLT  | BPF_K:
1037 	case BPF_JMP | BPF_JGE  | BPF_K:
1038 	case BPF_JMP | BPF_JLE  | BPF_K:
1039 	case BPF_JMP | BPF_JSGT | BPF_K:
1040 	case BPF_JMP | BPF_JSLT | BPF_K:
1041 	case BPF_JMP | BPF_JSGE | BPF_K:
1042 	case BPF_JMP | BPF_JSLE | BPF_K:
1043 	case BPF_JMP | BPF_JSET | BPF_K:
1044 		/* Accommodate for extra offset in case of a backjump. */
1045 		off = from->off;
1046 		if (off < 0)
1047 			off -= 2;
1048 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1049 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1050 		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1051 		break;
1052 
1053 	case BPF_JMP32 | BPF_JEQ  | BPF_K:
1054 	case BPF_JMP32 | BPF_JNE  | BPF_K:
1055 	case BPF_JMP32 | BPF_JGT  | BPF_K:
1056 	case BPF_JMP32 | BPF_JLT  | BPF_K:
1057 	case BPF_JMP32 | BPF_JGE  | BPF_K:
1058 	case BPF_JMP32 | BPF_JLE  | BPF_K:
1059 	case BPF_JMP32 | BPF_JSGT | BPF_K:
1060 	case BPF_JMP32 | BPF_JSLT | BPF_K:
1061 	case BPF_JMP32 | BPF_JSGE | BPF_K:
1062 	case BPF_JMP32 | BPF_JSLE | BPF_K:
1063 	case BPF_JMP32 | BPF_JSET | BPF_K:
1064 		/* Accommodate for extra offset in case of a backjump. */
1065 		off = from->off;
1066 		if (off < 0)
1067 			off -= 2;
1068 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1069 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1070 		*to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1071 				      off);
1072 		break;
1073 
1074 	case BPF_LD | BPF_IMM | BPF_DW:
1075 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1076 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1077 		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1078 		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1079 		break;
1080 	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1081 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1082 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1083 		if (emit_zext)
1084 			*to++ = BPF_ZEXT_REG(BPF_REG_AX);
1085 		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
1086 		break;
1087 
1088 	case BPF_ST | BPF_MEM | BPF_DW:
1089 	case BPF_ST | BPF_MEM | BPF_W:
1090 	case BPF_ST | BPF_MEM | BPF_H:
1091 	case BPF_ST | BPF_MEM | BPF_B:
1092 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1093 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1094 		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1095 		break;
1096 	}
1097 out:
1098 	return to - to_buff;
1099 }
1100 
bpf_prog_clone_create(struct bpf_prog * fp_other,gfp_t gfp_extra_flags)1101 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1102 					      gfp_t gfp_extra_flags)
1103 {
1104 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1105 	struct bpf_prog *fp;
1106 
1107 	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1108 	if (fp != NULL) {
1109 		/* aux->prog still points to the fp_other one, so
1110 		 * when promoting the clone to the real program,
1111 		 * this still needs to be adapted.
1112 		 */
1113 		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1114 	}
1115 
1116 	return fp;
1117 }
1118 
bpf_prog_clone_free(struct bpf_prog * fp)1119 static void bpf_prog_clone_free(struct bpf_prog *fp)
1120 {
1121 	/* aux was stolen by the other clone, so we cannot free
1122 	 * it from this path! It will be freed eventually by the
1123 	 * other program on release.
1124 	 *
1125 	 * At this point, we don't need a deferred release since
1126 	 * clone is guaranteed to not be locked.
1127 	 */
1128 	fp->aux = NULL;
1129 	__bpf_prog_free(fp);
1130 }
1131 
bpf_jit_prog_release_other(struct bpf_prog * fp,struct bpf_prog * fp_other)1132 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1133 {
1134 	/* We have to repoint aux->prog to self, as we don't
1135 	 * know whether fp here is the clone or the original.
1136 	 */
1137 	fp->aux->prog = fp;
1138 	bpf_prog_clone_free(fp_other);
1139 }
1140 
bpf_jit_blind_constants(struct bpf_prog * prog)1141 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1142 {
1143 	struct bpf_insn insn_buff[16], aux[2];
1144 	struct bpf_prog *clone, *tmp;
1145 	int insn_delta, insn_cnt;
1146 	struct bpf_insn *insn;
1147 	int i, rewritten;
1148 
1149 	if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1150 		return prog;
1151 
1152 	clone = bpf_prog_clone_create(prog, GFP_USER);
1153 	if (!clone)
1154 		return ERR_PTR(-ENOMEM);
1155 
1156 	insn_cnt = clone->len;
1157 	insn = clone->insnsi;
1158 
1159 	for (i = 0; i < insn_cnt; i++, insn++) {
1160 		/* We temporarily need to hold the original ld64 insn
1161 		 * so that we can still access the first part in the
1162 		 * second blinding run.
1163 		 */
1164 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1165 		    insn[1].code == 0)
1166 			memcpy(aux, insn, sizeof(aux));
1167 
1168 		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1169 						clone->aux->verifier_zext);
1170 		if (!rewritten)
1171 			continue;
1172 
1173 		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1174 		if (IS_ERR(tmp)) {
1175 			/* Patching may have repointed aux->prog during
1176 			 * realloc from the original one, so we need to
1177 			 * fix it up here on error.
1178 			 */
1179 			bpf_jit_prog_release_other(prog, clone);
1180 			return tmp;
1181 		}
1182 
1183 		clone = tmp;
1184 		insn_delta = rewritten - 1;
1185 
1186 		/* Walk new program and skip insns we just inserted. */
1187 		insn = clone->insnsi + i + insn_delta;
1188 		insn_cnt += insn_delta;
1189 		i        += insn_delta;
1190 	}
1191 
1192 	clone->blinded = 1;
1193 	return clone;
1194 }
1195 #endif /* CONFIG_BPF_JIT */
1196 
1197 /* Base function for offset calculation. Needs to go into .text section,
1198  * therefore keeping it non-static as well; will also be used by JITs
1199  * anyway later on, so do not let the compiler omit it. This also needs
1200  * to go into kallsyms for correlation from e.g. bpftool, so naming
1201  * must not change.
1202  */
__bpf_call_base(u64 r1,u64 r2,u64 r3,u64 r4,u64 r5)1203 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1204 {
1205 	return 0;
1206 }
1207 EXPORT_SYMBOL_GPL(__bpf_call_base);
1208 
1209 /* All UAPI available opcodes. */
1210 #define BPF_INSN_MAP(INSN_2, INSN_3)		\
1211 	/* 32 bit ALU operations. */		\
1212 	/*   Register based. */			\
1213 	INSN_3(ALU, ADD,  X),			\
1214 	INSN_3(ALU, SUB,  X),			\
1215 	INSN_3(ALU, AND,  X),			\
1216 	INSN_3(ALU, OR,   X),			\
1217 	INSN_3(ALU, LSH,  X),			\
1218 	INSN_3(ALU, RSH,  X),			\
1219 	INSN_3(ALU, XOR,  X),			\
1220 	INSN_3(ALU, MUL,  X),			\
1221 	INSN_3(ALU, MOV,  X),			\
1222 	INSN_3(ALU, ARSH, X),			\
1223 	INSN_3(ALU, DIV,  X),			\
1224 	INSN_3(ALU, MOD,  X),			\
1225 	INSN_2(ALU, NEG),			\
1226 	INSN_3(ALU, END, TO_BE),		\
1227 	INSN_3(ALU, END, TO_LE),		\
1228 	/*   Immediate based. */		\
1229 	INSN_3(ALU, ADD,  K),			\
1230 	INSN_3(ALU, SUB,  K),			\
1231 	INSN_3(ALU, AND,  K),			\
1232 	INSN_3(ALU, OR,   K),			\
1233 	INSN_3(ALU, LSH,  K),			\
1234 	INSN_3(ALU, RSH,  K),			\
1235 	INSN_3(ALU, XOR,  K),			\
1236 	INSN_3(ALU, MUL,  K),			\
1237 	INSN_3(ALU, MOV,  K),			\
1238 	INSN_3(ALU, ARSH, K),			\
1239 	INSN_3(ALU, DIV,  K),			\
1240 	INSN_3(ALU, MOD,  K),			\
1241 	/* 64 bit ALU operations. */		\
1242 	/*   Register based. */			\
1243 	INSN_3(ALU64, ADD,  X),			\
1244 	INSN_3(ALU64, SUB,  X),			\
1245 	INSN_3(ALU64, AND,  X),			\
1246 	INSN_3(ALU64, OR,   X),			\
1247 	INSN_3(ALU64, LSH,  X),			\
1248 	INSN_3(ALU64, RSH,  X),			\
1249 	INSN_3(ALU64, XOR,  X),			\
1250 	INSN_3(ALU64, MUL,  X),			\
1251 	INSN_3(ALU64, MOV,  X),			\
1252 	INSN_3(ALU64, ARSH, X),			\
1253 	INSN_3(ALU64, DIV,  X),			\
1254 	INSN_3(ALU64, MOD,  X),			\
1255 	INSN_2(ALU64, NEG),			\
1256 	/*   Immediate based. */		\
1257 	INSN_3(ALU64, ADD,  K),			\
1258 	INSN_3(ALU64, SUB,  K),			\
1259 	INSN_3(ALU64, AND,  K),			\
1260 	INSN_3(ALU64, OR,   K),			\
1261 	INSN_3(ALU64, LSH,  K),			\
1262 	INSN_3(ALU64, RSH,  K),			\
1263 	INSN_3(ALU64, XOR,  K),			\
1264 	INSN_3(ALU64, MUL,  K),			\
1265 	INSN_3(ALU64, MOV,  K),			\
1266 	INSN_3(ALU64, ARSH, K),			\
1267 	INSN_3(ALU64, DIV,  K),			\
1268 	INSN_3(ALU64, MOD,  K),			\
1269 	/* Call instruction. */			\
1270 	INSN_2(JMP, CALL),			\
1271 	/* Exit instruction. */			\
1272 	INSN_2(JMP, EXIT),			\
1273 	/* 32-bit Jump instructions. */		\
1274 	/*   Register based. */			\
1275 	INSN_3(JMP32, JEQ,  X),			\
1276 	INSN_3(JMP32, JNE,  X),			\
1277 	INSN_3(JMP32, JGT,  X),			\
1278 	INSN_3(JMP32, JLT,  X),			\
1279 	INSN_3(JMP32, JGE,  X),			\
1280 	INSN_3(JMP32, JLE,  X),			\
1281 	INSN_3(JMP32, JSGT, X),			\
1282 	INSN_3(JMP32, JSLT, X),			\
1283 	INSN_3(JMP32, JSGE, X),			\
1284 	INSN_3(JMP32, JSLE, X),			\
1285 	INSN_3(JMP32, JSET, X),			\
1286 	/*   Immediate based. */		\
1287 	INSN_3(JMP32, JEQ,  K),			\
1288 	INSN_3(JMP32, JNE,  K),			\
1289 	INSN_3(JMP32, JGT,  K),			\
1290 	INSN_3(JMP32, JLT,  K),			\
1291 	INSN_3(JMP32, JGE,  K),			\
1292 	INSN_3(JMP32, JLE,  K),			\
1293 	INSN_3(JMP32, JSGT, K),			\
1294 	INSN_3(JMP32, JSLT, K),			\
1295 	INSN_3(JMP32, JSGE, K),			\
1296 	INSN_3(JMP32, JSLE, K),			\
1297 	INSN_3(JMP32, JSET, K),			\
1298 	/* Jump instructions. */		\
1299 	/*   Register based. */			\
1300 	INSN_3(JMP, JEQ,  X),			\
1301 	INSN_3(JMP, JNE,  X),			\
1302 	INSN_3(JMP, JGT,  X),			\
1303 	INSN_3(JMP, JLT,  X),			\
1304 	INSN_3(JMP, JGE,  X),			\
1305 	INSN_3(JMP, JLE,  X),			\
1306 	INSN_3(JMP, JSGT, X),			\
1307 	INSN_3(JMP, JSLT, X),			\
1308 	INSN_3(JMP, JSGE, X),			\
1309 	INSN_3(JMP, JSLE, X),			\
1310 	INSN_3(JMP, JSET, X),			\
1311 	/*   Immediate based. */		\
1312 	INSN_3(JMP, JEQ,  K),			\
1313 	INSN_3(JMP, JNE,  K),			\
1314 	INSN_3(JMP, JGT,  K),			\
1315 	INSN_3(JMP, JLT,  K),			\
1316 	INSN_3(JMP, JGE,  K),			\
1317 	INSN_3(JMP, JLE,  K),			\
1318 	INSN_3(JMP, JSGT, K),			\
1319 	INSN_3(JMP, JSLT, K),			\
1320 	INSN_3(JMP, JSGE, K),			\
1321 	INSN_3(JMP, JSLE, K),			\
1322 	INSN_3(JMP, JSET, K),			\
1323 	INSN_2(JMP, JA),			\
1324 	/* Store instructions. */		\
1325 	/*   Register based. */			\
1326 	INSN_3(STX, MEM,  B),			\
1327 	INSN_3(STX, MEM,  H),			\
1328 	INSN_3(STX, MEM,  W),			\
1329 	INSN_3(STX, MEM,  DW),			\
1330 	INSN_3(STX, XADD, W),			\
1331 	INSN_3(STX, XADD, DW),			\
1332 	/*   Immediate based. */		\
1333 	INSN_3(ST, MEM, B),			\
1334 	INSN_3(ST, MEM, H),			\
1335 	INSN_3(ST, MEM, W),			\
1336 	INSN_3(ST, MEM, DW),			\
1337 	/* Load instructions. */		\
1338 	/*   Register based. */			\
1339 	INSN_3(LDX, MEM, B),			\
1340 	INSN_3(LDX, MEM, H),			\
1341 	INSN_3(LDX, MEM, W),			\
1342 	INSN_3(LDX, MEM, DW),			\
1343 	/*   Immediate based. */		\
1344 	INSN_3(LD, IMM, DW)
1345 
bpf_opcode_in_insntable(u8 code)1346 bool bpf_opcode_in_insntable(u8 code)
1347 {
1348 #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1349 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1350 	static const bool public_insntable[256] = {
1351 		[0 ... 255] = false,
1352 		/* Now overwrite non-defaults ... */
1353 		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1354 		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1355 		[BPF_LD | BPF_ABS | BPF_B] = true,
1356 		[BPF_LD | BPF_ABS | BPF_H] = true,
1357 		[BPF_LD | BPF_ABS | BPF_W] = true,
1358 		[BPF_LD | BPF_IND | BPF_B] = true,
1359 		[BPF_LD | BPF_IND | BPF_H] = true,
1360 		[BPF_LD | BPF_IND | BPF_W] = true,
1361 	};
1362 #undef BPF_INSN_3_TBL
1363 #undef BPF_INSN_2_TBL
1364 	return public_insntable[code];
1365 }
1366 
1367 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
bpf_probe_read_kernel(void * dst,u32 size,const void * unsafe_ptr)1368 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1369 {
1370 	memset(dst, 0, size);
1371 	return -EFAULT;
1372 }
1373 
1374 /**
1375  *	__bpf_prog_run - run eBPF program on a given context
1376  *	@regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1377  *	@insn: is the array of eBPF instructions
1378  *	@stack: is the eBPF storage stack
1379  *
1380  * Decode and execute eBPF instructions.
1381  */
___bpf_prog_run(u64 * regs,const struct bpf_insn * insn,u64 * stack)1382 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1383 {
1384 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1385 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1386 	static const void * const jumptable[256] __annotate_jump_table = {
1387 		[0 ... 255] = &&default_label,
1388 		/* Now overwrite non-defaults ... */
1389 		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1390 		/* Non-UAPI available opcodes. */
1391 		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1392 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1393 		[BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
1394 		[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1395 		[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1396 		[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1397 		[BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1398 	};
1399 #undef BPF_INSN_3_LBL
1400 #undef BPF_INSN_2_LBL
1401 	u32 tail_call_cnt = 0;
1402 
1403 #define CONT	 ({ insn++; goto select_insn; })
1404 #define CONT_JMP ({ insn++; goto select_insn; })
1405 
1406 select_insn:
1407 	goto *jumptable[insn->code];
1408 
1409 	/* Explicitly mask the register-based shift amounts with 63 or 31
1410 	 * to avoid undefined behavior. Normally this won't affect the
1411 	 * generated code, for example, in case of native 64 bit archs such
1412 	 * as x86-64 or arm64, the compiler is optimizing the AND away for
1413 	 * the interpreter. In case of JITs, each of the JIT backends compiles
1414 	 * the BPF shift operations to machine instructions which produce
1415 	 * implementation-defined results in such a case; the resulting
1416 	 * contents of the register may be arbitrary, but program behaviour
1417 	 * as a whole remains defined. In other words, in case of JIT backends,
1418 	 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1419 	 */
1420 	/* ALU (shifts) */
1421 #define SHT(OPCODE, OP)					\
1422 	ALU64_##OPCODE##_X:				\
1423 		DST = DST OP (SRC & 63);		\
1424 		CONT;					\
1425 	ALU_##OPCODE##_X:				\
1426 		DST = (u32) DST OP ((u32) SRC & 31);	\
1427 		CONT;					\
1428 	ALU64_##OPCODE##_K:				\
1429 		DST = DST OP IMM;			\
1430 		CONT;					\
1431 	ALU_##OPCODE##_K:				\
1432 		DST = (u32) DST OP (u32) IMM;		\
1433 		CONT;
1434 	/* ALU (rest) */
1435 #define ALU(OPCODE, OP)					\
1436 	ALU64_##OPCODE##_X:				\
1437 		DST = DST OP SRC;			\
1438 		CONT;					\
1439 	ALU_##OPCODE##_X:				\
1440 		DST = (u32) DST OP (u32) SRC;		\
1441 		CONT;					\
1442 	ALU64_##OPCODE##_K:				\
1443 		DST = DST OP IMM;			\
1444 		CONT;					\
1445 	ALU_##OPCODE##_K:				\
1446 		DST = (u32) DST OP (u32) IMM;		\
1447 		CONT;
1448 	ALU(ADD,  +)
1449 	ALU(SUB,  -)
1450 	ALU(AND,  &)
1451 	ALU(OR,   |)
1452 	ALU(XOR,  ^)
1453 	ALU(MUL,  *)
1454 	SHT(LSH, <<)
1455 	SHT(RSH, >>)
1456 #undef SHT
1457 #undef ALU
1458 	ALU_NEG:
1459 		DST = (u32) -DST;
1460 		CONT;
1461 	ALU64_NEG:
1462 		DST = -DST;
1463 		CONT;
1464 	ALU_MOV_X:
1465 		DST = (u32) SRC;
1466 		CONT;
1467 	ALU_MOV_K:
1468 		DST = (u32) IMM;
1469 		CONT;
1470 	ALU64_MOV_X:
1471 		DST = SRC;
1472 		CONT;
1473 	ALU64_MOV_K:
1474 		DST = IMM;
1475 		CONT;
1476 	LD_IMM_DW:
1477 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1478 		insn++;
1479 		CONT;
1480 	ALU_ARSH_X:
1481 		DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1482 		CONT;
1483 	ALU_ARSH_K:
1484 		DST = (u64) (u32) (((s32) DST) >> IMM);
1485 		CONT;
1486 	ALU64_ARSH_X:
1487 		(*(s64 *) &DST) >>= (SRC & 63);
1488 		CONT;
1489 	ALU64_ARSH_K:
1490 		(*(s64 *) &DST) >>= IMM;
1491 		CONT;
1492 	ALU64_MOD_X:
1493 		div64_u64_rem(DST, SRC, &AX);
1494 		DST = AX;
1495 		CONT;
1496 	ALU_MOD_X:
1497 		AX = (u32) DST;
1498 		DST = do_div(AX, (u32) SRC);
1499 		CONT;
1500 	ALU64_MOD_K:
1501 		div64_u64_rem(DST, IMM, &AX);
1502 		DST = AX;
1503 		CONT;
1504 	ALU_MOD_K:
1505 		AX = (u32) DST;
1506 		DST = do_div(AX, (u32) IMM);
1507 		CONT;
1508 	ALU64_DIV_X:
1509 		DST = div64_u64(DST, SRC);
1510 		CONT;
1511 	ALU_DIV_X:
1512 		AX = (u32) DST;
1513 		do_div(AX, (u32) SRC);
1514 		DST = (u32) AX;
1515 		CONT;
1516 	ALU64_DIV_K:
1517 		DST = div64_u64(DST, IMM);
1518 		CONT;
1519 	ALU_DIV_K:
1520 		AX = (u32) DST;
1521 		do_div(AX, (u32) IMM);
1522 		DST = (u32) AX;
1523 		CONT;
1524 	ALU_END_TO_BE:
1525 		switch (IMM) {
1526 		case 16:
1527 			DST = (__force u16) cpu_to_be16(DST);
1528 			break;
1529 		case 32:
1530 			DST = (__force u32) cpu_to_be32(DST);
1531 			break;
1532 		case 64:
1533 			DST = (__force u64) cpu_to_be64(DST);
1534 			break;
1535 		}
1536 		CONT;
1537 	ALU_END_TO_LE:
1538 		switch (IMM) {
1539 		case 16:
1540 			DST = (__force u16) cpu_to_le16(DST);
1541 			break;
1542 		case 32:
1543 			DST = (__force u32) cpu_to_le32(DST);
1544 			break;
1545 		case 64:
1546 			DST = (__force u64) cpu_to_le64(DST);
1547 			break;
1548 		}
1549 		CONT;
1550 
1551 	/* CALL */
1552 	JMP_CALL:
1553 		/* Function call scratches BPF_R1-BPF_R5 registers,
1554 		 * preserves BPF_R6-BPF_R9, and stores return value
1555 		 * into BPF_R0.
1556 		 */
1557 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1558 						       BPF_R4, BPF_R5);
1559 		CONT;
1560 
1561 	JMP_CALL_ARGS:
1562 		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1563 							    BPF_R3, BPF_R4,
1564 							    BPF_R5,
1565 							    insn + insn->off + 1);
1566 		CONT;
1567 
1568 	JMP_TAIL_CALL: {
1569 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1570 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1571 		struct bpf_prog *prog;
1572 		u32 index = BPF_R3;
1573 
1574 		if (unlikely(index >= array->map.max_entries))
1575 			goto out;
1576 		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1577 			goto out;
1578 
1579 		tail_call_cnt++;
1580 
1581 		prog = READ_ONCE(array->ptrs[index]);
1582 		if (!prog)
1583 			goto out;
1584 
1585 		/* ARG1 at this point is guaranteed to point to CTX from
1586 		 * the verifier side due to the fact that the tail call is
1587 		 * handled like a helper, that is, bpf_tail_call_proto,
1588 		 * where arg1_type is ARG_PTR_TO_CTX.
1589 		 */
1590 		insn = prog->insnsi;
1591 		goto select_insn;
1592 out:
1593 		CONT;
1594 	}
1595 	JMP_JA:
1596 		insn += insn->off;
1597 		CONT;
1598 	JMP_EXIT:
1599 		return BPF_R0;
1600 	/* JMP */
1601 #define COND_JMP(SIGN, OPCODE, CMP_OP)				\
1602 	JMP_##OPCODE##_X:					\
1603 		if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {	\
1604 			insn += insn->off;			\
1605 			CONT_JMP;				\
1606 		}						\
1607 		CONT;						\
1608 	JMP32_##OPCODE##_X:					\
1609 		if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {	\
1610 			insn += insn->off;			\
1611 			CONT_JMP;				\
1612 		}						\
1613 		CONT;						\
1614 	JMP_##OPCODE##_K:					\
1615 		if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {	\
1616 			insn += insn->off;			\
1617 			CONT_JMP;				\
1618 		}						\
1619 		CONT;						\
1620 	JMP32_##OPCODE##_K:					\
1621 		if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {	\
1622 			insn += insn->off;			\
1623 			CONT_JMP;				\
1624 		}						\
1625 		CONT;
1626 	COND_JMP(u, JEQ, ==)
1627 	COND_JMP(u, JNE, !=)
1628 	COND_JMP(u, JGT, >)
1629 	COND_JMP(u, JLT, <)
1630 	COND_JMP(u, JGE, >=)
1631 	COND_JMP(u, JLE, <=)
1632 	COND_JMP(u, JSET, &)
1633 	COND_JMP(s, JSGT, >)
1634 	COND_JMP(s, JSLT, <)
1635 	COND_JMP(s, JSGE, >=)
1636 	COND_JMP(s, JSLE, <=)
1637 #undef COND_JMP
1638 	/* ST, STX and LDX*/
1639 	ST_NOSPEC:
1640 		/* Speculation barrier for mitigating Speculative Store Bypass.
1641 		 * In case of arm64, we rely on the firmware mitigation as
1642 		 * controlled via the ssbd kernel parameter. Whenever the
1643 		 * mitigation is enabled, it works for all of the kernel code
1644 		 * with no need to provide any additional instructions here.
1645 		 * In case of x86, we use 'lfence' insn for mitigation. We
1646 		 * reuse preexisting logic from Spectre v1 mitigation that
1647 		 * happens to produce the required code on x86 for v4 as well.
1648 		 */
1649 		barrier_nospec();
1650 		CONT;
1651 #define LDST(SIZEOP, SIZE)						\
1652 	STX_MEM_##SIZEOP:						\
1653 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
1654 		CONT;							\
1655 	ST_MEM_##SIZEOP:						\
1656 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
1657 		CONT;							\
1658 	LDX_MEM_##SIZEOP:						\
1659 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
1660 		CONT;							\
1661 	LDX_PROBE_MEM_##SIZEOP:						\
1662 		bpf_probe_read_kernel(&DST, sizeof(SIZE),		\
1663 				      (const void *)(long) (SRC + insn->off));	\
1664 		DST = *((SIZE *)&DST);					\
1665 		CONT;
1666 
1667 	LDST(B,   u8)
1668 	LDST(H,  u16)
1669 	LDST(W,  u32)
1670 	LDST(DW, u64)
1671 #undef LDST
1672 
1673 	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1674 		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1675 			   (DST + insn->off));
1676 		CONT;
1677 	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1678 		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1679 			     (DST + insn->off));
1680 		CONT;
1681 
1682 	default_label:
1683 		/* If we ever reach this, we have a bug somewhere. Die hard here
1684 		 * instead of just returning 0; we could be somewhere in a subprog,
1685 		 * so execution could continue otherwise which we do /not/ want.
1686 		 *
1687 		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1688 		 */
1689 		pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1690 		BUG_ON(1);
1691 		return 0;
1692 }
1693 
1694 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1695 #define DEFINE_BPF_PROG_RUN(stack_size) \
1696 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1697 { \
1698 	u64 stack[stack_size / sizeof(u64)]; \
1699 	u64 regs[MAX_BPF_EXT_REG]; \
1700 \
1701 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1702 	ARG1 = (u64) (unsigned long) ctx; \
1703 	return ___bpf_prog_run(regs, insn, stack); \
1704 }
1705 
1706 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1707 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1708 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1709 				      const struct bpf_insn *insn) \
1710 { \
1711 	u64 stack[stack_size / sizeof(u64)]; \
1712 	u64 regs[MAX_BPF_EXT_REG]; \
1713 \
1714 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1715 	BPF_R1 = r1; \
1716 	BPF_R2 = r2; \
1717 	BPF_R3 = r3; \
1718 	BPF_R4 = r4; \
1719 	BPF_R5 = r5; \
1720 	return ___bpf_prog_run(regs, insn, stack); \
1721 }
1722 
1723 #define EVAL1(FN, X) FN(X)
1724 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1725 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1726 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1727 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1728 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1729 
1730 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1731 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1732 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1733 
1734 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1735 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1736 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1737 
1738 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1739 
1740 static unsigned int (*interpreters[])(const void *ctx,
1741 				      const struct bpf_insn *insn) = {
1742 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1743 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1744 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1745 };
1746 #undef PROG_NAME_LIST
1747 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1748 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1749 				  const struct bpf_insn *insn) = {
1750 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1751 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1752 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1753 };
1754 #undef PROG_NAME_LIST
1755 
bpf_patch_call_args(struct bpf_insn * insn,u32 stack_depth)1756 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1757 {
1758 	stack_depth = max_t(u32, stack_depth, 1);
1759 	insn->off = (s16) insn->imm;
1760 	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1761 		__bpf_call_base_args;
1762 	insn->code = BPF_JMP | BPF_CALL_ARGS;
1763 }
1764 
1765 #else
__bpf_prog_ret0_warn(const void * ctx,const struct bpf_insn * insn)1766 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1767 					 const struct bpf_insn *insn)
1768 {
1769 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1770 	 * is not working properly, so warn about it!
1771 	 */
1772 	WARN_ON_ONCE(1);
1773 	return 0;
1774 }
1775 #endif
1776 
bpf_prog_array_compatible(struct bpf_array * array,const struct bpf_prog * fp)1777 bool bpf_prog_array_compatible(struct bpf_array *array,
1778 			       const struct bpf_prog *fp)
1779 {
1780 	bool ret;
1781 	struct bpf_prog_aux *aux = fp->aux;
1782 
1783 	if (fp->kprobe_override)
1784 		return false;
1785 
1786 	spin_lock(&array->aux->owner.lock);
1787 
1788 	if (!array->aux->owner.type) {
1789 		/* There's no owner yet where we could check for
1790 		 * compatibility.
1791 		 */
1792 		array->aux->owner.type  = fp->type;
1793 		array->aux->owner.jited = fp->jited;
1794 		array->aux->owner.attach_func_proto = aux->attach_func_proto;
1795 		ret = true;
1796 	} else {
1797 		ret = array->aux->owner.type  == fp->type &&
1798 		      array->aux->owner.jited == fp->jited;
1799 		if (ret &&
1800 		    array->aux->owner.attach_func_proto != aux->attach_func_proto) {
1801 			switch (fp->type) {
1802 			case BPF_PROG_TYPE_TRACING:
1803 			case BPF_PROG_TYPE_LSM:
1804 			case BPF_PROG_TYPE_EXT:
1805 			case BPF_PROG_TYPE_STRUCT_OPS:
1806 				ret = false;
1807 				break;
1808 			default:
1809 				break;
1810 			}
1811 		}
1812 	}
1813 	spin_unlock(&array->aux->owner.lock);
1814 	return ret;
1815 }
1816 
bpf_check_tail_call(const struct bpf_prog * fp)1817 static int bpf_check_tail_call(const struct bpf_prog *fp)
1818 {
1819 	struct bpf_prog_aux *aux = fp->aux;
1820 	int i, ret = 0;
1821 
1822 	mutex_lock(&aux->used_maps_mutex);
1823 	for (i = 0; i < aux->used_map_cnt; i++) {
1824 		struct bpf_map *map = aux->used_maps[i];
1825 		struct bpf_array *array;
1826 
1827 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1828 			continue;
1829 
1830 		array = container_of(map, struct bpf_array, map);
1831 		if (!bpf_prog_array_compatible(array, fp)) {
1832 			ret = -EINVAL;
1833 			goto out;
1834 		}
1835 	}
1836 
1837 out:
1838 	mutex_unlock(&aux->used_maps_mutex);
1839 	return ret;
1840 }
1841 
bpf_prog_select_func(struct bpf_prog * fp)1842 static void bpf_prog_select_func(struct bpf_prog *fp)
1843 {
1844 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1845 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1846 
1847 	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1848 #else
1849 	fp->bpf_func = __bpf_prog_ret0_warn;
1850 #endif
1851 }
1852 
1853 /**
1854  *	bpf_prog_select_runtime - select exec runtime for BPF program
1855  *	@fp: bpf_prog populated with internal BPF program
1856  *	@err: pointer to error variable
1857  *
1858  * Try to JIT eBPF program, if JIT is not available, use interpreter.
1859  * The BPF program will be executed via BPF_PROG_RUN() macro.
1860  */
bpf_prog_select_runtime(struct bpf_prog * fp,int * err)1861 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1862 {
1863 	/* In case of BPF to BPF calls, verifier did all the prep
1864 	 * work with regards to JITing, etc.
1865 	 */
1866 	if (fp->bpf_func)
1867 		goto finalize;
1868 
1869 	bpf_prog_select_func(fp);
1870 
1871 	/* eBPF JITs can rewrite the program in case constant
1872 	 * blinding is active. However, in case of error during
1873 	 * blinding, bpf_int_jit_compile() must always return a
1874 	 * valid program, which in this case would simply not
1875 	 * be JITed, but falls back to the interpreter.
1876 	 */
1877 	if (!bpf_prog_is_dev_bound(fp->aux)) {
1878 		*err = bpf_prog_alloc_jited_linfo(fp);
1879 		if (*err)
1880 			return fp;
1881 
1882 		fp = bpf_int_jit_compile(fp);
1883 		if (!fp->jited) {
1884 			bpf_prog_free_jited_linfo(fp);
1885 #ifdef CONFIG_BPF_JIT_ALWAYS_ON
1886 			*err = -ENOTSUPP;
1887 			return fp;
1888 #endif
1889 		} else {
1890 			bpf_prog_free_unused_jited_linfo(fp);
1891 		}
1892 	} else {
1893 		*err = bpf_prog_offload_compile(fp);
1894 		if (*err)
1895 			return fp;
1896 	}
1897 
1898 finalize:
1899 	*err = bpf_prog_lock_ro(fp);
1900 	if (*err)
1901 		return fp;
1902 
1903 	/* The tail call compatibility check can only be done at
1904 	 * this late stage as we need to determine, if we deal
1905 	 * with JITed or non JITed program concatenations and not
1906 	 * all eBPF JITs might immediately support all features.
1907 	 */
1908 	*err = bpf_check_tail_call(fp);
1909 
1910 	return fp;
1911 }
1912 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1913 
__bpf_prog_ret1(const void * ctx,const struct bpf_insn * insn)1914 static unsigned int __bpf_prog_ret1(const void *ctx,
1915 				    const struct bpf_insn *insn)
1916 {
1917 	return 1;
1918 }
1919 
1920 static struct bpf_prog_dummy {
1921 	struct bpf_prog prog;
1922 } dummy_bpf_prog = {
1923 	.prog = {
1924 		.bpf_func = __bpf_prog_ret1,
1925 	},
1926 };
1927 
1928 /* to avoid allocating empty bpf_prog_array for cgroups that
1929  * don't have bpf program attached use one global 'empty_prog_array'
1930  * It will not be modified the caller of bpf_prog_array_alloc()
1931  * (since caller requested prog_cnt == 0)
1932  * that pointer should be 'freed' by bpf_prog_array_free()
1933  */
1934 static struct {
1935 	struct bpf_prog_array hdr;
1936 	struct bpf_prog *null_prog;
1937 } empty_prog_array = {
1938 	.null_prog = NULL,
1939 };
1940 
bpf_prog_array_alloc(u32 prog_cnt,gfp_t flags)1941 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1942 {
1943 	if (prog_cnt)
1944 		return kzalloc(sizeof(struct bpf_prog_array) +
1945 			       sizeof(struct bpf_prog_array_item) *
1946 			       (prog_cnt + 1),
1947 			       flags);
1948 
1949 	return &empty_prog_array.hdr;
1950 }
1951 
bpf_prog_array_free(struct bpf_prog_array * progs)1952 void bpf_prog_array_free(struct bpf_prog_array *progs)
1953 {
1954 	if (!progs || progs == &empty_prog_array.hdr)
1955 		return;
1956 	kfree_rcu(progs, rcu);
1957 }
1958 
bpf_prog_array_length(struct bpf_prog_array * array)1959 int bpf_prog_array_length(struct bpf_prog_array *array)
1960 {
1961 	struct bpf_prog_array_item *item;
1962 	u32 cnt = 0;
1963 
1964 	for (item = array->items; item->prog; item++)
1965 		if (item->prog != &dummy_bpf_prog.prog)
1966 			cnt++;
1967 	return cnt;
1968 }
1969 
bpf_prog_array_is_empty(struct bpf_prog_array * array)1970 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1971 {
1972 	struct bpf_prog_array_item *item;
1973 
1974 	for (item = array->items; item->prog; item++)
1975 		if (item->prog != &dummy_bpf_prog.prog)
1976 			return false;
1977 	return true;
1978 }
1979 
bpf_prog_array_copy_core(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt)1980 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
1981 				     u32 *prog_ids,
1982 				     u32 request_cnt)
1983 {
1984 	struct bpf_prog_array_item *item;
1985 	int i = 0;
1986 
1987 	for (item = array->items; item->prog; item++) {
1988 		if (item->prog == &dummy_bpf_prog.prog)
1989 			continue;
1990 		prog_ids[i] = item->prog->aux->id;
1991 		if (++i == request_cnt) {
1992 			item++;
1993 			break;
1994 		}
1995 	}
1996 
1997 	return !!(item->prog);
1998 }
1999 
bpf_prog_array_copy_to_user(struct bpf_prog_array * array,__u32 __user * prog_ids,u32 cnt)2000 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2001 				__u32 __user *prog_ids, u32 cnt)
2002 {
2003 	unsigned long err = 0;
2004 	bool nospc;
2005 	u32 *ids;
2006 
2007 	/* users of this function are doing:
2008 	 * cnt = bpf_prog_array_length();
2009 	 * if (cnt > 0)
2010 	 *     bpf_prog_array_copy_to_user(..., cnt);
2011 	 * so below kcalloc doesn't need extra cnt > 0 check.
2012 	 */
2013 	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2014 	if (!ids)
2015 		return -ENOMEM;
2016 	nospc = bpf_prog_array_copy_core(array, ids, cnt);
2017 	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2018 	kfree(ids);
2019 	if (err)
2020 		return -EFAULT;
2021 	if (nospc)
2022 		return -ENOSPC;
2023 	return 0;
2024 }
2025 
bpf_prog_array_delete_safe(struct bpf_prog_array * array,struct bpf_prog * old_prog)2026 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2027 				struct bpf_prog *old_prog)
2028 {
2029 	struct bpf_prog_array_item *item;
2030 
2031 	for (item = array->items; item->prog; item++)
2032 		if (item->prog == old_prog) {
2033 			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2034 			break;
2035 		}
2036 }
2037 
2038 /**
2039  * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2040  *                                   index into the program array with
2041  *                                   a dummy no-op program.
2042  * @array: a bpf_prog_array
2043  * @index: the index of the program to replace
2044  *
2045  * Skips over dummy programs, by not counting them, when calculating
2046  * the position of the program to replace.
2047  *
2048  * Return:
2049  * * 0		- Success
2050  * * -EINVAL	- Invalid index value. Must be a non-negative integer.
2051  * * -ENOENT	- Index out of range
2052  */
bpf_prog_array_delete_safe_at(struct bpf_prog_array * array,int index)2053 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2054 {
2055 	return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2056 }
2057 
2058 /**
2059  * bpf_prog_array_update_at() - Updates the program at the given index
2060  *                              into the program array.
2061  * @array: a bpf_prog_array
2062  * @index: the index of the program to update
2063  * @prog: the program to insert into the array
2064  *
2065  * Skips over dummy programs, by not counting them, when calculating
2066  * the position of the program to update.
2067  *
2068  * Return:
2069  * * 0		- Success
2070  * * -EINVAL	- Invalid index value. Must be a non-negative integer.
2071  * * -ENOENT	- Index out of range
2072  */
bpf_prog_array_update_at(struct bpf_prog_array * array,int index,struct bpf_prog * prog)2073 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2074 			     struct bpf_prog *prog)
2075 {
2076 	struct bpf_prog_array_item *item;
2077 
2078 	if (unlikely(index < 0))
2079 		return -EINVAL;
2080 
2081 	for (item = array->items; item->prog; item++) {
2082 		if (item->prog == &dummy_bpf_prog.prog)
2083 			continue;
2084 		if (!index) {
2085 			WRITE_ONCE(item->prog, prog);
2086 			return 0;
2087 		}
2088 		index--;
2089 	}
2090 	return -ENOENT;
2091 }
2092 
bpf_prog_array_copy(struct bpf_prog_array * old_array,struct bpf_prog * exclude_prog,struct bpf_prog * include_prog,struct bpf_prog_array ** new_array)2093 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2094 			struct bpf_prog *exclude_prog,
2095 			struct bpf_prog *include_prog,
2096 			struct bpf_prog_array **new_array)
2097 {
2098 	int new_prog_cnt, carry_prog_cnt = 0;
2099 	struct bpf_prog_array_item *existing;
2100 	struct bpf_prog_array *array;
2101 	bool found_exclude = false;
2102 	int new_prog_idx = 0;
2103 
2104 	/* Figure out how many existing progs we need to carry over to
2105 	 * the new array.
2106 	 */
2107 	if (old_array) {
2108 		existing = old_array->items;
2109 		for (; existing->prog; existing++) {
2110 			if (existing->prog == exclude_prog) {
2111 				found_exclude = true;
2112 				continue;
2113 			}
2114 			if (existing->prog != &dummy_bpf_prog.prog)
2115 				carry_prog_cnt++;
2116 			if (existing->prog == include_prog)
2117 				return -EEXIST;
2118 		}
2119 	}
2120 
2121 	if (exclude_prog && !found_exclude)
2122 		return -ENOENT;
2123 
2124 	/* How many progs (not NULL) will be in the new array? */
2125 	new_prog_cnt = carry_prog_cnt;
2126 	if (include_prog)
2127 		new_prog_cnt += 1;
2128 
2129 	/* Do we have any prog (not NULL) in the new array? */
2130 	if (!new_prog_cnt) {
2131 		*new_array = NULL;
2132 		return 0;
2133 	}
2134 
2135 	/* +1 as the end of prog_array is marked with NULL */
2136 	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2137 	if (!array)
2138 		return -ENOMEM;
2139 
2140 	/* Fill in the new prog array */
2141 	if (carry_prog_cnt) {
2142 		existing = old_array->items;
2143 		for (; existing->prog; existing++)
2144 			if (existing->prog != exclude_prog &&
2145 			    existing->prog != &dummy_bpf_prog.prog) {
2146 				array->items[new_prog_idx++].prog =
2147 					existing->prog;
2148 			}
2149 	}
2150 	if (include_prog)
2151 		array->items[new_prog_idx++].prog = include_prog;
2152 	array->items[new_prog_idx].prog = NULL;
2153 	*new_array = array;
2154 	return 0;
2155 }
2156 
bpf_prog_array_copy_info(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt,u32 * prog_cnt)2157 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2158 			     u32 *prog_ids, u32 request_cnt,
2159 			     u32 *prog_cnt)
2160 {
2161 	u32 cnt = 0;
2162 
2163 	if (array)
2164 		cnt = bpf_prog_array_length(array);
2165 
2166 	*prog_cnt = cnt;
2167 
2168 	/* return early if user requested only program count or nothing to copy */
2169 	if (!request_cnt || !cnt)
2170 		return 0;
2171 
2172 	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2173 	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2174 								     : 0;
2175 }
2176 
__bpf_free_used_maps(struct bpf_prog_aux * aux,struct bpf_map ** used_maps,u32 len)2177 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2178 			  struct bpf_map **used_maps, u32 len)
2179 {
2180 	struct bpf_map *map;
2181 	u32 i;
2182 
2183 	for (i = 0; i < len; i++) {
2184 		map = used_maps[i];
2185 		if (map->ops->map_poke_untrack)
2186 			map->ops->map_poke_untrack(map, aux);
2187 		bpf_map_put(map);
2188 	}
2189 }
2190 
bpf_free_used_maps(struct bpf_prog_aux * aux)2191 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2192 {
2193 	__bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2194 	kfree(aux->used_maps);
2195 }
2196 
bpf_prog_free_deferred(struct work_struct * work)2197 static void bpf_prog_free_deferred(struct work_struct *work)
2198 {
2199 	struct bpf_prog_aux *aux;
2200 	int i;
2201 
2202 	aux = container_of(work, struct bpf_prog_aux, work);
2203 	bpf_free_used_maps(aux);
2204 	if (bpf_prog_is_dev_bound(aux))
2205 		bpf_prog_offload_destroy(aux->prog);
2206 #ifdef CONFIG_PERF_EVENTS
2207 	if (aux->prog->has_callchain_buf)
2208 		put_callchain_buffers();
2209 #endif
2210 	if (aux->dst_trampoline)
2211 		bpf_trampoline_put(aux->dst_trampoline);
2212 	for (i = 0; i < aux->func_cnt; i++) {
2213 		/* We can just unlink the subprog poke descriptor table as
2214 		 * it was originally linked to the main program and is also
2215 		 * released along with it.
2216 		 */
2217 		aux->func[i]->aux->poke_tab = NULL;
2218 		bpf_jit_free(aux->func[i]);
2219 	}
2220 	if (aux->func_cnt) {
2221 		kfree(aux->func);
2222 		bpf_prog_unlock_free(aux->prog);
2223 	} else {
2224 		bpf_jit_free(aux->prog);
2225 	}
2226 }
2227 
2228 /* Free internal BPF program */
bpf_prog_free(struct bpf_prog * fp)2229 void bpf_prog_free(struct bpf_prog *fp)
2230 {
2231 	struct bpf_prog_aux *aux = fp->aux;
2232 
2233 	if (aux->dst_prog)
2234 		bpf_prog_put(aux->dst_prog);
2235 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
2236 	schedule_work(&aux->work);
2237 }
2238 EXPORT_SYMBOL_GPL(bpf_prog_free);
2239 
2240 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2241 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2242 
bpf_user_rnd_init_once(void)2243 void bpf_user_rnd_init_once(void)
2244 {
2245 	prandom_init_once(&bpf_user_rnd_state);
2246 }
2247 
BPF_CALL_0(bpf_user_rnd_u32)2248 BPF_CALL_0(bpf_user_rnd_u32)
2249 {
2250 	/* Should someone ever have the rather unwise idea to use some
2251 	 * of the registers passed into this function, then note that
2252 	 * this function is called from native eBPF and classic-to-eBPF
2253 	 * transformations. Register assignments from both sides are
2254 	 * different, f.e. classic always sets fn(ctx, A, X) here.
2255 	 */
2256 	struct rnd_state *state;
2257 	u32 res;
2258 
2259 	state = &get_cpu_var(bpf_user_rnd_state);
2260 	res = prandom_u32_state(state);
2261 	put_cpu_var(bpf_user_rnd_state);
2262 
2263 	return res;
2264 }
2265 
BPF_CALL_0(bpf_get_raw_cpu_id)2266 BPF_CALL_0(bpf_get_raw_cpu_id)
2267 {
2268 	return raw_smp_processor_id();
2269 }
2270 
2271 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2272 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2273 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2274 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2275 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2276 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2277 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2278 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2279 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2280 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2281 
2282 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2283 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2284 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2285 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2286 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2287 
2288 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2289 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2290 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2291 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2292 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2293 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2294 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2295 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2296 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2297 
bpf_get_trace_printk_proto(void)2298 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2299 {
2300 	return NULL;
2301 }
2302 
2303 u64 __weak
bpf_event_output(struct bpf_map * map,u64 flags,void * meta,u64 meta_size,void * ctx,u64 ctx_size,bpf_ctx_copy_t ctx_copy)2304 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2305 		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2306 {
2307 	return -ENOTSUPP;
2308 }
2309 EXPORT_SYMBOL_GPL(bpf_event_output);
2310 
2311 /* Always built-in helper functions. */
2312 const struct bpf_func_proto bpf_tail_call_proto = {
2313 	.func		= NULL,
2314 	.gpl_only	= false,
2315 	.ret_type	= RET_VOID,
2316 	.arg1_type	= ARG_PTR_TO_CTX,
2317 	.arg2_type	= ARG_CONST_MAP_PTR,
2318 	.arg3_type	= ARG_ANYTHING,
2319 };
2320 
2321 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2322  * It is encouraged to implement bpf_int_jit_compile() instead, so that
2323  * eBPF and implicitly also cBPF can get JITed!
2324  */
bpf_int_jit_compile(struct bpf_prog * prog)2325 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2326 {
2327 	return prog;
2328 }
2329 
2330 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2331  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2332  */
bpf_jit_compile(struct bpf_prog * prog)2333 void __weak bpf_jit_compile(struct bpf_prog *prog)
2334 {
2335 }
2336 
bpf_helper_changes_pkt_data(enum bpf_func_id func_id)2337 bool __weak bpf_helper_changes_pkt_data(enum bpf_func_id func_id)
2338 {
2339 	return false;
2340 }
2341 
2342 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2343  * analysis code and wants explicit zero extension inserted by verifier.
2344  * Otherwise, return FALSE.
2345  */
bpf_jit_needs_zext(void)2346 bool __weak bpf_jit_needs_zext(void)
2347 {
2348 	return false;
2349 }
2350 
2351 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2352  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2353  */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)2354 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2355 			 int len)
2356 {
2357 	return -EFAULT;
2358 }
2359 
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * addr1,void * addr2)2360 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2361 			      void *addr1, void *addr2)
2362 {
2363 	return -ENOTSUPP;
2364 }
2365 
2366 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2367 EXPORT_SYMBOL(bpf_stats_enabled_key);
2368 
2369 /* All definitions of tracepoints related to BPF. */
2370 #define CREATE_TRACE_POINTS
2371 #include <linux/bpf_trace.h>
2372 
2373 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2374 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
2375