• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Linux Socket Filter - Kernel level socket filtering
4  *
5  * Based on the design of the Berkeley Packet Filter. The new
6  * internal format has been designed by PLUMgrid:
7  *
8  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9  *
10  * Authors:
11  *
12  *	Jay Schulist <jschlst@samba.org>
13  *	Alexei Starovoitov <ast@plumgrid.com>
14  *	Daniel Borkmann <dborkman@redhat.com>
15  *
16  * Andi Kleen - Fix a few bad bugs and races.
17  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18  */
19 
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/frame.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/nospec.h>
34 
35 #include <asm/barrier.h>
36 #include <asm/unaligned.h>
37 
38 /* Registers */
39 #define BPF_R0	regs[BPF_REG_0]
40 #define BPF_R1	regs[BPF_REG_1]
41 #define BPF_R2	regs[BPF_REG_2]
42 #define BPF_R3	regs[BPF_REG_3]
43 #define BPF_R4	regs[BPF_REG_4]
44 #define BPF_R5	regs[BPF_REG_5]
45 #define BPF_R6	regs[BPF_REG_6]
46 #define BPF_R7	regs[BPF_REG_7]
47 #define BPF_R8	regs[BPF_REG_8]
48 #define BPF_R9	regs[BPF_REG_9]
49 #define BPF_R10	regs[BPF_REG_10]
50 
51 /* Named registers */
52 #define DST	regs[insn->dst_reg]
53 #define SRC	regs[insn->src_reg]
54 #define FP	regs[BPF_REG_FP]
55 #define AX	regs[BPF_REG_AX]
56 #define ARG1	regs[BPF_REG_ARG1]
57 #define CTX	regs[BPF_REG_CTX]
58 #define IMM	insn->imm
59 
60 /* No hurry in this branch
61  *
62  * Exported for the bpf jit load helper.
63  */
bpf_internal_load_pointer_neg_helper(const struct sk_buff * skb,int k,unsigned int size)64 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
65 {
66 	u8 *ptr = NULL;
67 
68 	if (k >= SKF_NET_OFF) {
69 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
70 	} else if (k >= SKF_LL_OFF) {
71 		if (unlikely(!skb_mac_header_was_set(skb)))
72 			return NULL;
73 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
74 	}
75 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
76 		return ptr;
77 
78 	return NULL;
79 }
80 
bpf_prog_alloc_no_stats(unsigned int size,gfp_t gfp_extra_flags)81 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
82 {
83 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
84 	struct bpf_prog_aux *aux;
85 	struct bpf_prog *fp;
86 
87 	size = round_up(size, PAGE_SIZE);
88 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
89 	if (fp == NULL)
90 		return NULL;
91 
92 	aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
93 	if (aux == NULL) {
94 		vfree(fp);
95 		return NULL;
96 	}
97 
98 	fp->pages = size / PAGE_SIZE;
99 	fp->aux = aux;
100 	fp->aux->prog = fp;
101 	fp->jit_requested = ebpf_jit_enabled();
102 
103 	INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
104 
105 	return fp;
106 }
107 
bpf_prog_alloc(unsigned int size,gfp_t gfp_extra_flags)108 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
109 {
110 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
111 	struct bpf_prog *prog;
112 	int cpu;
113 
114 	prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
115 	if (!prog)
116 		return NULL;
117 
118 	prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
119 	if (!prog->aux->stats) {
120 		kfree(prog->aux);
121 		vfree(prog);
122 		return NULL;
123 	}
124 
125 	for_each_possible_cpu(cpu) {
126 		struct bpf_prog_stats *pstats;
127 
128 		pstats = per_cpu_ptr(prog->aux->stats, cpu);
129 		u64_stats_init(&pstats->syncp);
130 	}
131 	return prog;
132 }
133 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
134 
bpf_prog_alloc_jited_linfo(struct bpf_prog * prog)135 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
136 {
137 	if (!prog->aux->nr_linfo || !prog->jit_requested)
138 		return 0;
139 
140 	prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
141 					 sizeof(*prog->aux->jited_linfo),
142 					 GFP_KERNEL | __GFP_NOWARN);
143 	if (!prog->aux->jited_linfo)
144 		return -ENOMEM;
145 
146 	return 0;
147 }
148 
bpf_prog_free_jited_linfo(struct bpf_prog * prog)149 void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
150 {
151 	kfree(prog->aux->jited_linfo);
152 	prog->aux->jited_linfo = NULL;
153 }
154 
bpf_prog_free_unused_jited_linfo(struct bpf_prog * prog)155 void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
156 {
157 	if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
158 		bpf_prog_free_jited_linfo(prog);
159 }
160 
161 /* The jit engine is responsible to provide an array
162  * for insn_off to the jited_off mapping (insn_to_jit_off).
163  *
164  * The idx to this array is the insn_off.  Hence, the insn_off
165  * here is relative to the prog itself instead of the main prog.
166  * This array has one entry for each xlated bpf insn.
167  *
168  * jited_off is the byte off to the last byte of the jited insn.
169  *
170  * Hence, with
171  * insn_start:
172  *      The first bpf insn off of the prog.  The insn off
173  *      here is relative to the main prog.
174  *      e.g. if prog is a subprog, insn_start > 0
175  * linfo_idx:
176  *      The prog's idx to prog->aux->linfo and jited_linfo
177  *
178  * jited_linfo[linfo_idx] = prog->bpf_func
179  *
180  * For i > linfo_idx,
181  *
182  * jited_linfo[i] = prog->bpf_func +
183  *	insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
184  */
bpf_prog_fill_jited_linfo(struct bpf_prog * prog,const u32 * insn_to_jit_off)185 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
186 			       const u32 *insn_to_jit_off)
187 {
188 	u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
189 	const struct bpf_line_info *linfo;
190 	void **jited_linfo;
191 
192 	if (!prog->aux->jited_linfo)
193 		/* Userspace did not provide linfo */
194 		return;
195 
196 	linfo_idx = prog->aux->linfo_idx;
197 	linfo = &prog->aux->linfo[linfo_idx];
198 	insn_start = linfo[0].insn_off;
199 	insn_end = insn_start + prog->len;
200 
201 	jited_linfo = &prog->aux->jited_linfo[linfo_idx];
202 	jited_linfo[0] = prog->bpf_func;
203 
204 	nr_linfo = prog->aux->nr_linfo - linfo_idx;
205 
206 	for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
207 		/* The verifier ensures that linfo[i].insn_off is
208 		 * strictly increasing
209 		 */
210 		jited_linfo[i] = prog->bpf_func +
211 			insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
212 }
213 
bpf_prog_free_linfo(struct bpf_prog * prog)214 void bpf_prog_free_linfo(struct bpf_prog *prog)
215 {
216 	bpf_prog_free_jited_linfo(prog);
217 	kvfree(prog->aux->linfo);
218 }
219 
bpf_prog_realloc(struct bpf_prog * fp_old,unsigned int size,gfp_t gfp_extra_flags)220 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
221 				  gfp_t gfp_extra_flags)
222 {
223 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
224 	struct bpf_prog *fp;
225 	u32 pages, delta;
226 	int ret;
227 
228 	BUG_ON(fp_old == NULL);
229 
230 	size = round_up(size, PAGE_SIZE);
231 	pages = size / PAGE_SIZE;
232 	if (pages <= fp_old->pages)
233 		return fp_old;
234 
235 	delta = pages - fp_old->pages;
236 	ret = __bpf_prog_charge(fp_old->aux->user, delta);
237 	if (ret)
238 		return NULL;
239 
240 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
241 	if (fp == NULL) {
242 		__bpf_prog_uncharge(fp_old->aux->user, delta);
243 	} else {
244 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
245 		fp->pages = pages;
246 		fp->aux->prog = fp;
247 
248 		/* We keep fp->aux from fp_old around in the new
249 		 * reallocated structure.
250 		 */
251 		fp_old->aux = NULL;
252 		__bpf_prog_free(fp_old);
253 	}
254 
255 	return fp;
256 }
257 
__bpf_prog_free(struct bpf_prog * fp)258 void __bpf_prog_free(struct bpf_prog *fp)
259 {
260 	if (fp->aux) {
261 		free_percpu(fp->aux->stats);
262 		kfree(fp->aux);
263 	}
264 	vfree(fp);
265 }
266 
bpf_prog_calc_tag(struct bpf_prog * fp)267 int bpf_prog_calc_tag(struct bpf_prog *fp)
268 {
269 	const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
270 	u32 raw_size = bpf_prog_tag_scratch_size(fp);
271 	u32 digest[SHA_DIGEST_WORDS];
272 	u32 ws[SHA_WORKSPACE_WORDS];
273 	u32 i, bsize, psize, blocks;
274 	struct bpf_insn *dst;
275 	bool was_ld_map;
276 	u8 *raw, *todo;
277 	__be32 *result;
278 	__be64 *bits;
279 
280 	raw = vmalloc(raw_size);
281 	if (!raw)
282 		return -ENOMEM;
283 
284 	sha_init(digest);
285 	memset(ws, 0, sizeof(ws));
286 
287 	/* We need to take out the map fd for the digest calculation
288 	 * since they are unstable from user space side.
289 	 */
290 	dst = (void *)raw;
291 	for (i = 0, was_ld_map = false; i < fp->len; i++) {
292 		dst[i] = fp->insnsi[i];
293 		if (!was_ld_map &&
294 		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
295 		    (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
296 		     dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
297 			was_ld_map = true;
298 			dst[i].imm = 0;
299 		} else if (was_ld_map &&
300 			   dst[i].code == 0 &&
301 			   dst[i].dst_reg == 0 &&
302 			   dst[i].src_reg == 0 &&
303 			   dst[i].off == 0) {
304 			was_ld_map = false;
305 			dst[i].imm = 0;
306 		} else {
307 			was_ld_map = false;
308 		}
309 	}
310 
311 	psize = bpf_prog_insn_size(fp);
312 	memset(&raw[psize], 0, raw_size - psize);
313 	raw[psize++] = 0x80;
314 
315 	bsize  = round_up(psize, SHA_MESSAGE_BYTES);
316 	blocks = bsize / SHA_MESSAGE_BYTES;
317 	todo   = raw;
318 	if (bsize - psize >= sizeof(__be64)) {
319 		bits = (__be64 *)(todo + bsize - sizeof(__be64));
320 	} else {
321 		bits = (__be64 *)(todo + bsize + bits_offset);
322 		blocks++;
323 	}
324 	*bits = cpu_to_be64((psize - 1) << 3);
325 
326 	while (blocks--) {
327 		sha_transform(digest, todo, ws);
328 		todo += SHA_MESSAGE_BYTES;
329 	}
330 
331 	result = (__force __be32 *)digest;
332 	for (i = 0; i < SHA_DIGEST_WORDS; i++)
333 		result[i] = cpu_to_be32(digest[i]);
334 	memcpy(fp->tag, result, sizeof(fp->tag));
335 
336 	vfree(raw);
337 	return 0;
338 }
339 
bpf_adj_delta_to_imm(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)340 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
341 				s32 end_new, s32 curr, const bool probe_pass)
342 {
343 	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
344 	s32 delta = end_new - end_old;
345 	s64 imm = insn->imm;
346 
347 	if (curr < pos && curr + imm + 1 >= end_old)
348 		imm += delta;
349 	else if (curr >= end_new && curr + imm + 1 < end_new)
350 		imm -= delta;
351 	if (imm < imm_min || imm > imm_max)
352 		return -ERANGE;
353 	if (!probe_pass)
354 		insn->imm = imm;
355 	return 0;
356 }
357 
bpf_adj_delta_to_off(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)358 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
359 				s32 end_new, s32 curr, const bool probe_pass)
360 {
361 	const s32 off_min = S16_MIN, off_max = S16_MAX;
362 	s32 delta = end_new - end_old;
363 	s32 off = insn->off;
364 
365 	if (curr < pos && curr + off + 1 >= end_old)
366 		off += delta;
367 	else if (curr >= end_new && curr + off + 1 < end_new)
368 		off -= delta;
369 	if (off < off_min || off > off_max)
370 		return -ERANGE;
371 	if (!probe_pass)
372 		insn->off = off;
373 	return 0;
374 }
375 
bpf_adj_branches(struct bpf_prog * prog,u32 pos,s32 end_old,s32 end_new,const bool probe_pass)376 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
377 			    s32 end_new, const bool probe_pass)
378 {
379 	u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
380 	struct bpf_insn *insn = prog->insnsi;
381 	int ret = 0;
382 
383 	for (i = 0; i < insn_cnt; i++, insn++) {
384 		u8 code;
385 
386 		/* In the probing pass we still operate on the original,
387 		 * unpatched image in order to check overflows before we
388 		 * do any other adjustments. Therefore skip the patchlet.
389 		 */
390 		if (probe_pass && i == pos) {
391 			i = end_new;
392 			insn = prog->insnsi + end_old;
393 		}
394 		code = insn->code;
395 		if ((BPF_CLASS(code) != BPF_JMP &&
396 		     BPF_CLASS(code) != BPF_JMP32) ||
397 		    BPF_OP(code) == BPF_EXIT)
398 			continue;
399 		/* Adjust offset of jmps if we cross patch boundaries. */
400 		if (BPF_OP(code) == BPF_CALL) {
401 			if (insn->src_reg != BPF_PSEUDO_CALL)
402 				continue;
403 			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
404 						   end_new, i, probe_pass);
405 		} else {
406 			ret = bpf_adj_delta_to_off(insn, pos, end_old,
407 						   end_new, i, probe_pass);
408 		}
409 		if (ret)
410 			break;
411 	}
412 
413 	return ret;
414 }
415 
bpf_adj_linfo(struct bpf_prog * prog,u32 off,u32 delta)416 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
417 {
418 	struct bpf_line_info *linfo;
419 	u32 i, nr_linfo;
420 
421 	nr_linfo = prog->aux->nr_linfo;
422 	if (!nr_linfo || !delta)
423 		return;
424 
425 	linfo = prog->aux->linfo;
426 
427 	for (i = 0; i < nr_linfo; i++)
428 		if (off < linfo[i].insn_off)
429 			break;
430 
431 	/* Push all off < linfo[i].insn_off by delta */
432 	for (; i < nr_linfo; i++)
433 		linfo[i].insn_off += delta;
434 }
435 
bpf_patch_insn_single(struct bpf_prog * prog,u32 off,const struct bpf_insn * patch,u32 len)436 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
437 				       const struct bpf_insn *patch, u32 len)
438 {
439 	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
440 	const u32 cnt_max = S16_MAX;
441 	struct bpf_prog *prog_adj;
442 	int err;
443 
444 	/* Since our patchlet doesn't expand the image, we're done. */
445 	if (insn_delta == 0) {
446 		memcpy(prog->insnsi + off, patch, sizeof(*patch));
447 		return prog;
448 	}
449 
450 	insn_adj_cnt = prog->len + insn_delta;
451 
452 	/* Reject anything that would potentially let the insn->off
453 	 * target overflow when we have excessive program expansions.
454 	 * We need to probe here before we do any reallocation where
455 	 * we afterwards may not fail anymore.
456 	 */
457 	if (insn_adj_cnt > cnt_max &&
458 	    (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
459 		return ERR_PTR(err);
460 
461 	/* Several new instructions need to be inserted. Make room
462 	 * for them. Likely, there's no need for a new allocation as
463 	 * last page could have large enough tailroom.
464 	 */
465 	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
466 				    GFP_USER);
467 	if (!prog_adj)
468 		return ERR_PTR(-ENOMEM);
469 
470 	prog_adj->len = insn_adj_cnt;
471 
472 	/* Patching happens in 3 steps:
473 	 *
474 	 * 1) Move over tail of insnsi from next instruction onwards,
475 	 *    so we can patch the single target insn with one or more
476 	 *    new ones (patching is always from 1 to n insns, n > 0).
477 	 * 2) Inject new instructions at the target location.
478 	 * 3) Adjust branch offsets if necessary.
479 	 */
480 	insn_rest = insn_adj_cnt - off - len;
481 
482 	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
483 		sizeof(*patch) * insn_rest);
484 	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
485 
486 	/* We are guaranteed to not fail at this point, otherwise
487 	 * the ship has sailed to reverse to the original state. An
488 	 * overflow cannot happen at this point.
489 	 */
490 	BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
491 
492 	bpf_adj_linfo(prog_adj, off, insn_delta);
493 
494 	return prog_adj;
495 }
496 
bpf_remove_insns(struct bpf_prog * prog,u32 off,u32 cnt)497 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
498 {
499 	/* Branch offsets can't overflow when program is shrinking, no need
500 	 * to call bpf_adj_branches(..., true) here
501 	 */
502 	memmove(prog->insnsi + off, prog->insnsi + off + cnt,
503 		sizeof(struct bpf_insn) * (prog->len - off - cnt));
504 	prog->len -= cnt;
505 
506 	return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
507 }
508 
bpf_prog_kallsyms_del_subprogs(struct bpf_prog * fp)509 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
510 {
511 	int i;
512 
513 	for (i = 0; i < fp->aux->func_cnt; i++)
514 		bpf_prog_kallsyms_del(fp->aux->func[i]);
515 }
516 
bpf_prog_kallsyms_del_all(struct bpf_prog * fp)517 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
518 {
519 	bpf_prog_kallsyms_del_subprogs(fp);
520 	bpf_prog_kallsyms_del(fp);
521 }
522 
523 #ifdef CONFIG_BPF_JIT
524 /* All BPF JIT sysctl knobs here. */
525 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
526 int bpf_jit_harden   __read_mostly;
527 int bpf_jit_kallsyms __read_mostly;
528 long bpf_jit_limit   __read_mostly;
529 long bpf_jit_limit_max __read_mostly;
530 
531 static __always_inline void
bpf_get_prog_addr_region(const struct bpf_prog * prog,unsigned long * symbol_start,unsigned long * symbol_end)532 bpf_get_prog_addr_region(const struct bpf_prog *prog,
533 			 unsigned long *symbol_start,
534 			 unsigned long *symbol_end)
535 {
536 	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
537 	unsigned long addr = (unsigned long)hdr;
538 
539 	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
540 
541 	*symbol_start = addr;
542 	*symbol_end   = addr + hdr->pages * PAGE_SIZE;
543 }
544 
bpf_get_prog_name(const struct bpf_prog * prog,char * sym)545 void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
546 {
547 	const char *end = sym + KSYM_NAME_LEN;
548 	const struct btf_type *type;
549 	const char *func_name;
550 
551 	BUILD_BUG_ON(sizeof("bpf_prog_") +
552 		     sizeof(prog->tag) * 2 +
553 		     /* name has been null terminated.
554 		      * We should need +1 for the '_' preceding
555 		      * the name.  However, the null character
556 		      * is double counted between the name and the
557 		      * sizeof("bpf_prog_") above, so we omit
558 		      * the +1 here.
559 		      */
560 		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
561 
562 	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
563 	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
564 
565 	/* prog->aux->name will be ignored if full btf name is available */
566 	if (prog->aux->func_info_cnt) {
567 		type = btf_type_by_id(prog->aux->btf,
568 				      prog->aux->func_info[prog->aux->func_idx].type_id);
569 		func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
570 		snprintf(sym, (size_t)(end - sym), "_%s", func_name);
571 		return;
572 	}
573 
574 	if (prog->aux->name[0])
575 		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
576 	else
577 		*sym = 0;
578 }
579 
580 static __always_inline unsigned long
bpf_get_prog_addr_start(struct latch_tree_node * n)581 bpf_get_prog_addr_start(struct latch_tree_node *n)
582 {
583 	unsigned long symbol_start, symbol_end;
584 	const struct bpf_prog_aux *aux;
585 
586 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
587 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
588 
589 	return symbol_start;
590 }
591 
bpf_tree_less(struct latch_tree_node * a,struct latch_tree_node * b)592 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
593 					  struct latch_tree_node *b)
594 {
595 	return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
596 }
597 
bpf_tree_comp(void * key,struct latch_tree_node * n)598 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
599 {
600 	unsigned long val = (unsigned long)key;
601 	unsigned long symbol_start, symbol_end;
602 	const struct bpf_prog_aux *aux;
603 
604 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
605 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
606 
607 	if (val < symbol_start)
608 		return -1;
609 	if (val >= symbol_end)
610 		return  1;
611 
612 	return 0;
613 }
614 
615 static const struct latch_tree_ops bpf_tree_ops = {
616 	.less	= bpf_tree_less,
617 	.comp	= bpf_tree_comp,
618 };
619 
620 static DEFINE_SPINLOCK(bpf_lock);
621 static LIST_HEAD(bpf_kallsyms);
622 static struct latch_tree_root bpf_tree __cacheline_aligned;
623 
bpf_prog_ksym_node_add(struct bpf_prog_aux * aux)624 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
625 {
626 	WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
627 	list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
628 	latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
629 }
630 
bpf_prog_ksym_node_del(struct bpf_prog_aux * aux)631 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
632 {
633 	if (list_empty(&aux->ksym_lnode))
634 		return;
635 
636 	latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
637 	list_del_rcu(&aux->ksym_lnode);
638 }
639 
bpf_prog_kallsyms_candidate(const struct bpf_prog * fp)640 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
641 {
642 	return fp->jited && !bpf_prog_was_classic(fp);
643 }
644 
bpf_prog_kallsyms_verify_off(const struct bpf_prog * fp)645 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
646 {
647 	return list_empty(&fp->aux->ksym_lnode) ||
648 	       fp->aux->ksym_lnode.prev == LIST_POISON2;
649 }
650 
bpf_prog_kallsyms_add(struct bpf_prog * fp)651 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
652 {
653 	if (!bpf_prog_kallsyms_candidate(fp) ||
654 	    !capable(CAP_SYS_ADMIN))
655 		return;
656 
657 	spin_lock_bh(&bpf_lock);
658 	bpf_prog_ksym_node_add(fp->aux);
659 	spin_unlock_bh(&bpf_lock);
660 }
661 
bpf_prog_kallsyms_del(struct bpf_prog * fp)662 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
663 {
664 	if (!bpf_prog_kallsyms_candidate(fp))
665 		return;
666 
667 	spin_lock_bh(&bpf_lock);
668 	bpf_prog_ksym_node_del(fp->aux);
669 	spin_unlock_bh(&bpf_lock);
670 }
671 
bpf_prog_kallsyms_find(unsigned long addr)672 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
673 {
674 	struct latch_tree_node *n;
675 
676 	if (!bpf_jit_kallsyms_enabled())
677 		return NULL;
678 
679 	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
680 	return n ?
681 	       container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
682 	       NULL;
683 }
684 
__bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char * sym)685 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
686 				 unsigned long *off, char *sym)
687 {
688 	unsigned long symbol_start, symbol_end;
689 	struct bpf_prog *prog;
690 	char *ret = NULL;
691 
692 	rcu_read_lock();
693 	prog = bpf_prog_kallsyms_find(addr);
694 	if (prog) {
695 		bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
696 		bpf_get_prog_name(prog, sym);
697 
698 		ret = sym;
699 		if (size)
700 			*size = symbol_end - symbol_start;
701 		if (off)
702 			*off  = addr - symbol_start;
703 	}
704 	rcu_read_unlock();
705 
706 	return ret;
707 }
708 
is_bpf_text_address(unsigned long addr)709 bool is_bpf_text_address(unsigned long addr)
710 {
711 	bool ret;
712 
713 	rcu_read_lock();
714 	ret = bpf_prog_kallsyms_find(addr) != NULL;
715 	rcu_read_unlock();
716 
717 	return ret;
718 }
719 
bpf_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)720 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
721 		    char *sym)
722 {
723 	struct bpf_prog_aux *aux;
724 	unsigned int it = 0;
725 	int ret = -ERANGE;
726 
727 	if (!bpf_jit_kallsyms_enabled())
728 		return ret;
729 
730 	rcu_read_lock();
731 	list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
732 		if (it++ != symnum)
733 			continue;
734 
735 		bpf_get_prog_name(aux->prog, sym);
736 
737 		*value = (unsigned long)aux->prog->bpf_func;
738 		*type  = BPF_SYM_ELF_TYPE;
739 
740 		ret = 0;
741 		break;
742 	}
743 	rcu_read_unlock();
744 
745 	return ret;
746 }
747 
748 static atomic_long_t bpf_jit_current;
749 
750 /* Can be overridden by an arch's JIT compiler if it has a custom,
751  * dedicated BPF backend memory area, or if neither of the two
752  * below apply.
753  */
bpf_jit_alloc_exec_limit(void)754 u64 __weak bpf_jit_alloc_exec_limit(void)
755 {
756 #if defined(MODULES_VADDR)
757 	return MODULES_END - MODULES_VADDR;
758 #else
759 	return VMALLOC_END - VMALLOC_START;
760 #endif
761 }
762 
bpf_jit_charge_init(void)763 static int __init bpf_jit_charge_init(void)
764 {
765 	/* Only used as heuristic here to derive limit. */
766 	bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
767 	bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
768 					    PAGE_SIZE), LONG_MAX);
769 	return 0;
770 }
771 pure_initcall(bpf_jit_charge_init);
772 
bpf_jit_charge_modmem(u32 pages)773 static int bpf_jit_charge_modmem(u32 pages)
774 {
775 	if (atomic_long_add_return(pages, &bpf_jit_current) >
776 	    (bpf_jit_limit >> PAGE_SHIFT)) {
777 		if (!capable(CAP_SYS_ADMIN)) {
778 			atomic_long_sub(pages, &bpf_jit_current);
779 			return -EPERM;
780 		}
781 	}
782 
783 	return 0;
784 }
785 
bpf_jit_uncharge_modmem(u32 pages)786 static void bpf_jit_uncharge_modmem(u32 pages)
787 {
788 	atomic_long_sub(pages, &bpf_jit_current);
789 }
790 
bpf_jit_alloc_exec(unsigned long size)791 void *__weak bpf_jit_alloc_exec(unsigned long size)
792 {
793 	return module_alloc(size);
794 }
795 
bpf_jit_free_exec(void * addr)796 void __weak bpf_jit_free_exec(void *addr)
797 {
798 	module_memfree(addr);
799 }
800 
801 #if IS_ENABLED(CONFIG_BPF_JIT) && IS_ENABLED(CONFIG_CFI_CLANG)
arch_bpf_jit_check_func(const struct bpf_prog * prog)802 bool __weak arch_bpf_jit_check_func(const struct bpf_prog *prog)
803 {
804 	return true;
805 }
806 EXPORT_SYMBOL_GPL(arch_bpf_jit_check_func);
807 #endif
808 
809 struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,bpf_jit_fill_hole_t bpf_fill_ill_insns)810 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
811 		     unsigned int alignment,
812 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
813 {
814 	struct bpf_binary_header *hdr;
815 	u32 size, hole, start, pages;
816 
817 	/* Most of BPF filters are really small, but if some of them
818 	 * fill a page, allow at least 128 extra bytes to insert a
819 	 * random section of illegal instructions.
820 	 */
821 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
822 	pages = size / PAGE_SIZE;
823 
824 	if (bpf_jit_charge_modmem(pages))
825 		return NULL;
826 	hdr = bpf_jit_alloc_exec(size);
827 	if (!hdr) {
828 		bpf_jit_uncharge_modmem(pages);
829 		return NULL;
830 	}
831 
832 	/* Fill space with illegal/arch-dep instructions. */
833 	bpf_fill_ill_insns(hdr, size);
834 
835 	bpf_jit_set_header_magic(hdr);
836 	hdr->pages = pages;
837 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
838 		     PAGE_SIZE - sizeof(*hdr));
839 	start = (get_random_int() % hole) & ~(alignment - 1);
840 
841 	/* Leave a random number of instructions before BPF code. */
842 	*image_ptr = &hdr->image[start];
843 
844 	return hdr;
845 }
846 
bpf_jit_binary_free(struct bpf_binary_header * hdr)847 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
848 {
849 	u32 pages = hdr->pages;
850 
851 	bpf_jit_free_exec(hdr);
852 	bpf_jit_uncharge_modmem(pages);
853 }
854 
855 /* This symbol is only overridden by archs that have different
856  * requirements than the usual eBPF JITs, f.e. when they only
857  * implement cBPF JIT, do not set images read-only, etc.
858  */
bpf_jit_free(struct bpf_prog * fp)859 void __weak bpf_jit_free(struct bpf_prog *fp)
860 {
861 	if (fp->jited) {
862 		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
863 
864 		bpf_jit_binary_free(hdr);
865 
866 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
867 	}
868 
869 	bpf_prog_unlock_free(fp);
870 }
871 
bpf_jit_get_func_addr(const struct bpf_prog * prog,const struct bpf_insn * insn,bool extra_pass,u64 * func_addr,bool * func_addr_fixed)872 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
873 			  const struct bpf_insn *insn, bool extra_pass,
874 			  u64 *func_addr, bool *func_addr_fixed)
875 {
876 	s16 off = insn->off;
877 	s32 imm = insn->imm;
878 	u8 *addr;
879 
880 	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
881 	if (!*func_addr_fixed) {
882 		/* Place-holder address till the last pass has collected
883 		 * all addresses for JITed subprograms in which case we
884 		 * can pick them up from prog->aux.
885 		 */
886 		if (!extra_pass)
887 			addr = NULL;
888 		else if (prog->aux->func &&
889 			 off >= 0 && off < prog->aux->func_cnt)
890 			addr = (u8 *)prog->aux->func[off]->bpf_func;
891 		else
892 			return -EINVAL;
893 	} else {
894 		/* Address of a BPF helper call. Since part of the core
895 		 * kernel, it's always at a fixed location. __bpf_call_base
896 		 * and the helper with imm relative to it are both in core
897 		 * kernel.
898 		 */
899 		addr = (u8 *)__bpf_call_base + imm;
900 	}
901 
902 	*func_addr = (unsigned long)addr;
903 	return 0;
904 }
905 
bpf_jit_blind_insn(const struct bpf_insn * from,const struct bpf_insn * aux,struct bpf_insn * to_buff,bool emit_zext)906 static int bpf_jit_blind_insn(const struct bpf_insn *from,
907 			      const struct bpf_insn *aux,
908 			      struct bpf_insn *to_buff,
909 			      bool emit_zext)
910 {
911 	struct bpf_insn *to = to_buff;
912 	u32 imm_rnd = get_random_int();
913 	s16 off;
914 
915 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
916 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
917 
918 	/* Constraints on AX register:
919 	 *
920 	 * AX register is inaccessible from user space. It is mapped in
921 	 * all JITs, and used here for constant blinding rewrites. It is
922 	 * typically "stateless" meaning its contents are only valid within
923 	 * the executed instruction, but not across several instructions.
924 	 * There are a few exceptions however which are further detailed
925 	 * below.
926 	 *
927 	 * Constant blinding is only used by JITs, not in the interpreter.
928 	 * The interpreter uses AX in some occasions as a local temporary
929 	 * register e.g. in DIV or MOD instructions.
930 	 *
931 	 * In restricted circumstances, the verifier can also use the AX
932 	 * register for rewrites as long as they do not interfere with
933 	 * the above cases!
934 	 */
935 	if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
936 		goto out;
937 
938 	if (from->imm == 0 &&
939 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
940 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
941 		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
942 		goto out;
943 	}
944 
945 	switch (from->code) {
946 	case BPF_ALU | BPF_ADD | BPF_K:
947 	case BPF_ALU | BPF_SUB | BPF_K:
948 	case BPF_ALU | BPF_AND | BPF_K:
949 	case BPF_ALU | BPF_OR  | BPF_K:
950 	case BPF_ALU | BPF_XOR | BPF_K:
951 	case BPF_ALU | BPF_MUL | BPF_K:
952 	case BPF_ALU | BPF_MOV | BPF_K:
953 	case BPF_ALU | BPF_DIV | BPF_K:
954 	case BPF_ALU | BPF_MOD | BPF_K:
955 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
956 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
957 		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
958 		break;
959 
960 	case BPF_ALU64 | BPF_ADD | BPF_K:
961 	case BPF_ALU64 | BPF_SUB | BPF_K:
962 	case BPF_ALU64 | BPF_AND | BPF_K:
963 	case BPF_ALU64 | BPF_OR  | BPF_K:
964 	case BPF_ALU64 | BPF_XOR | BPF_K:
965 	case BPF_ALU64 | BPF_MUL | BPF_K:
966 	case BPF_ALU64 | BPF_MOV | BPF_K:
967 	case BPF_ALU64 | BPF_DIV | BPF_K:
968 	case BPF_ALU64 | BPF_MOD | BPF_K:
969 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
970 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
971 		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
972 		break;
973 
974 	case BPF_JMP | BPF_JEQ  | BPF_K:
975 	case BPF_JMP | BPF_JNE  | BPF_K:
976 	case BPF_JMP | BPF_JGT  | BPF_K:
977 	case BPF_JMP | BPF_JLT  | BPF_K:
978 	case BPF_JMP | BPF_JGE  | BPF_K:
979 	case BPF_JMP | BPF_JLE  | BPF_K:
980 	case BPF_JMP | BPF_JSGT | BPF_K:
981 	case BPF_JMP | BPF_JSLT | BPF_K:
982 	case BPF_JMP | BPF_JSGE | BPF_K:
983 	case BPF_JMP | BPF_JSLE | BPF_K:
984 	case BPF_JMP | BPF_JSET | BPF_K:
985 		/* Accommodate for extra offset in case of a backjump. */
986 		off = from->off;
987 		if (off < 0)
988 			off -= 2;
989 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
990 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
991 		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
992 		break;
993 
994 	case BPF_JMP32 | BPF_JEQ  | BPF_K:
995 	case BPF_JMP32 | BPF_JNE  | BPF_K:
996 	case BPF_JMP32 | BPF_JGT  | BPF_K:
997 	case BPF_JMP32 | BPF_JLT  | BPF_K:
998 	case BPF_JMP32 | BPF_JGE  | BPF_K:
999 	case BPF_JMP32 | BPF_JLE  | BPF_K:
1000 	case BPF_JMP32 | BPF_JSGT | BPF_K:
1001 	case BPF_JMP32 | BPF_JSLT | BPF_K:
1002 	case BPF_JMP32 | BPF_JSGE | BPF_K:
1003 	case BPF_JMP32 | BPF_JSLE | BPF_K:
1004 	case BPF_JMP32 | BPF_JSET | BPF_K:
1005 		/* Accommodate for extra offset in case of a backjump. */
1006 		off = from->off;
1007 		if (off < 0)
1008 			off -= 2;
1009 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1010 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1011 		*to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1012 				      off);
1013 		break;
1014 
1015 	case BPF_LD | BPF_IMM | BPF_DW:
1016 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1017 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1018 		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1019 		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1020 		break;
1021 	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1022 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1023 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1024 		if (emit_zext)
1025 			*to++ = BPF_ZEXT_REG(BPF_REG_AX);
1026 		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
1027 		break;
1028 
1029 	case BPF_ST | BPF_MEM | BPF_DW:
1030 	case BPF_ST | BPF_MEM | BPF_W:
1031 	case BPF_ST | BPF_MEM | BPF_H:
1032 	case BPF_ST | BPF_MEM | BPF_B:
1033 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1034 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1035 		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1036 		break;
1037 	}
1038 out:
1039 	return to - to_buff;
1040 }
1041 
bpf_prog_clone_create(struct bpf_prog * fp_other,gfp_t gfp_extra_flags)1042 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1043 					      gfp_t gfp_extra_flags)
1044 {
1045 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1046 	struct bpf_prog *fp;
1047 
1048 	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
1049 	if (fp != NULL) {
1050 		/* aux->prog still points to the fp_other one, so
1051 		 * when promoting the clone to the real program,
1052 		 * this still needs to be adapted.
1053 		 */
1054 		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1055 	}
1056 
1057 	return fp;
1058 }
1059 
bpf_prog_clone_free(struct bpf_prog * fp)1060 static void bpf_prog_clone_free(struct bpf_prog *fp)
1061 {
1062 	/* aux was stolen by the other clone, so we cannot free
1063 	 * it from this path! It will be freed eventually by the
1064 	 * other program on release.
1065 	 *
1066 	 * At this point, we don't need a deferred release since
1067 	 * clone is guaranteed to not be locked.
1068 	 */
1069 	fp->aux = NULL;
1070 	__bpf_prog_free(fp);
1071 }
1072 
bpf_jit_prog_release_other(struct bpf_prog * fp,struct bpf_prog * fp_other)1073 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1074 {
1075 	/* We have to repoint aux->prog to self, as we don't
1076 	 * know whether fp here is the clone or the original.
1077 	 */
1078 	fp->aux->prog = fp;
1079 	bpf_prog_clone_free(fp_other);
1080 }
1081 
bpf_jit_blind_constants(struct bpf_prog * prog)1082 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1083 {
1084 	struct bpf_insn insn_buff[16], aux[2];
1085 	struct bpf_prog *clone, *tmp;
1086 	int insn_delta, insn_cnt;
1087 	struct bpf_insn *insn;
1088 	int i, rewritten;
1089 
1090 	if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1091 		return prog;
1092 
1093 	clone = bpf_prog_clone_create(prog, GFP_USER);
1094 	if (!clone)
1095 		return ERR_PTR(-ENOMEM);
1096 
1097 	insn_cnt = clone->len;
1098 	insn = clone->insnsi;
1099 
1100 	for (i = 0; i < insn_cnt; i++, insn++) {
1101 		/* We temporarily need to hold the original ld64 insn
1102 		 * so that we can still access the first part in the
1103 		 * second blinding run.
1104 		 */
1105 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1106 		    insn[1].code == 0)
1107 			memcpy(aux, insn, sizeof(aux));
1108 
1109 		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1110 						clone->aux->verifier_zext);
1111 		if (!rewritten)
1112 			continue;
1113 
1114 		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1115 		if (IS_ERR(tmp)) {
1116 			/* Patching may have repointed aux->prog during
1117 			 * realloc from the original one, so we need to
1118 			 * fix it up here on error.
1119 			 */
1120 			bpf_jit_prog_release_other(prog, clone);
1121 			return tmp;
1122 		}
1123 
1124 		clone = tmp;
1125 		insn_delta = rewritten - 1;
1126 
1127 		/* Walk new program and skip insns we just inserted. */
1128 		insn = clone->insnsi + i + insn_delta;
1129 		insn_cnt += insn_delta;
1130 		i        += insn_delta;
1131 	}
1132 
1133 	clone->blinded = 1;
1134 	return clone;
1135 }
1136 #endif /* CONFIG_BPF_JIT */
1137 
1138 /* Base function for offset calculation. Needs to go into .text section,
1139  * therefore keeping it non-static as well; will also be used by JITs
1140  * anyway later on, so do not let the compiler omit it. This also needs
1141  * to go into kallsyms for correlation from e.g. bpftool, so naming
1142  * must not change.
1143  */
__bpf_call_base(u64 r1,u64 r2,u64 r3,u64 r4,u64 r5)1144 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1145 {
1146 	return 0;
1147 }
1148 EXPORT_SYMBOL_GPL(__bpf_call_base);
1149 
1150 /* All UAPI available opcodes. */
1151 #define BPF_INSN_MAP(INSN_2, INSN_3)		\
1152 	/* 32 bit ALU operations. */		\
1153 	/*   Register based. */			\
1154 	INSN_3(ALU, ADD,  X),			\
1155 	INSN_3(ALU, SUB,  X),			\
1156 	INSN_3(ALU, AND,  X),			\
1157 	INSN_3(ALU, OR,   X),			\
1158 	INSN_3(ALU, LSH,  X),			\
1159 	INSN_3(ALU, RSH,  X),			\
1160 	INSN_3(ALU, XOR,  X),			\
1161 	INSN_3(ALU, MUL,  X),			\
1162 	INSN_3(ALU, MOV,  X),			\
1163 	INSN_3(ALU, ARSH, X),			\
1164 	INSN_3(ALU, DIV,  X),			\
1165 	INSN_3(ALU, MOD,  X),			\
1166 	INSN_2(ALU, NEG),			\
1167 	INSN_3(ALU, END, TO_BE),		\
1168 	INSN_3(ALU, END, TO_LE),		\
1169 	/*   Immediate based. */		\
1170 	INSN_3(ALU, ADD,  K),			\
1171 	INSN_3(ALU, SUB,  K),			\
1172 	INSN_3(ALU, AND,  K),			\
1173 	INSN_3(ALU, OR,   K),			\
1174 	INSN_3(ALU, LSH,  K),			\
1175 	INSN_3(ALU, RSH,  K),			\
1176 	INSN_3(ALU, XOR,  K),			\
1177 	INSN_3(ALU, MUL,  K),			\
1178 	INSN_3(ALU, MOV,  K),			\
1179 	INSN_3(ALU, ARSH, K),			\
1180 	INSN_3(ALU, DIV,  K),			\
1181 	INSN_3(ALU, MOD,  K),			\
1182 	/* 64 bit ALU operations. */		\
1183 	/*   Register based. */			\
1184 	INSN_3(ALU64, ADD,  X),			\
1185 	INSN_3(ALU64, SUB,  X),			\
1186 	INSN_3(ALU64, AND,  X),			\
1187 	INSN_3(ALU64, OR,   X),			\
1188 	INSN_3(ALU64, LSH,  X),			\
1189 	INSN_3(ALU64, RSH,  X),			\
1190 	INSN_3(ALU64, XOR,  X),			\
1191 	INSN_3(ALU64, MUL,  X),			\
1192 	INSN_3(ALU64, MOV,  X),			\
1193 	INSN_3(ALU64, ARSH, X),			\
1194 	INSN_3(ALU64, DIV,  X),			\
1195 	INSN_3(ALU64, MOD,  X),			\
1196 	INSN_2(ALU64, NEG),			\
1197 	/*   Immediate based. */		\
1198 	INSN_3(ALU64, ADD,  K),			\
1199 	INSN_3(ALU64, SUB,  K),			\
1200 	INSN_3(ALU64, AND,  K),			\
1201 	INSN_3(ALU64, OR,   K),			\
1202 	INSN_3(ALU64, LSH,  K),			\
1203 	INSN_3(ALU64, RSH,  K),			\
1204 	INSN_3(ALU64, XOR,  K),			\
1205 	INSN_3(ALU64, MUL,  K),			\
1206 	INSN_3(ALU64, MOV,  K),			\
1207 	INSN_3(ALU64, ARSH, K),			\
1208 	INSN_3(ALU64, DIV,  K),			\
1209 	INSN_3(ALU64, MOD,  K),			\
1210 	/* Call instruction. */			\
1211 	INSN_2(JMP, CALL),			\
1212 	/* Exit instruction. */			\
1213 	INSN_2(JMP, EXIT),			\
1214 	/* 32-bit Jump instructions. */		\
1215 	/*   Register based. */			\
1216 	INSN_3(JMP32, JEQ,  X),			\
1217 	INSN_3(JMP32, JNE,  X),			\
1218 	INSN_3(JMP32, JGT,  X),			\
1219 	INSN_3(JMP32, JLT,  X),			\
1220 	INSN_3(JMP32, JGE,  X),			\
1221 	INSN_3(JMP32, JLE,  X),			\
1222 	INSN_3(JMP32, JSGT, X),			\
1223 	INSN_3(JMP32, JSLT, X),			\
1224 	INSN_3(JMP32, JSGE, X),			\
1225 	INSN_3(JMP32, JSLE, X),			\
1226 	INSN_3(JMP32, JSET, X),			\
1227 	/*   Immediate based. */		\
1228 	INSN_3(JMP32, JEQ,  K),			\
1229 	INSN_3(JMP32, JNE,  K),			\
1230 	INSN_3(JMP32, JGT,  K),			\
1231 	INSN_3(JMP32, JLT,  K),			\
1232 	INSN_3(JMP32, JGE,  K),			\
1233 	INSN_3(JMP32, JLE,  K),			\
1234 	INSN_3(JMP32, JSGT, K),			\
1235 	INSN_3(JMP32, JSLT, K),			\
1236 	INSN_3(JMP32, JSGE, K),			\
1237 	INSN_3(JMP32, JSLE, K),			\
1238 	INSN_3(JMP32, JSET, K),			\
1239 	/* Jump instructions. */		\
1240 	/*   Register based. */			\
1241 	INSN_3(JMP, JEQ,  X),			\
1242 	INSN_3(JMP, JNE,  X),			\
1243 	INSN_3(JMP, JGT,  X),			\
1244 	INSN_3(JMP, JLT,  X),			\
1245 	INSN_3(JMP, JGE,  X),			\
1246 	INSN_3(JMP, JLE,  X),			\
1247 	INSN_3(JMP, JSGT, X),			\
1248 	INSN_3(JMP, JSLT, X),			\
1249 	INSN_3(JMP, JSGE, X),			\
1250 	INSN_3(JMP, JSLE, X),			\
1251 	INSN_3(JMP, JSET, X),			\
1252 	/*   Immediate based. */		\
1253 	INSN_3(JMP, JEQ,  K),			\
1254 	INSN_3(JMP, JNE,  K),			\
1255 	INSN_3(JMP, JGT,  K),			\
1256 	INSN_3(JMP, JLT,  K),			\
1257 	INSN_3(JMP, JGE,  K),			\
1258 	INSN_3(JMP, JLE,  K),			\
1259 	INSN_3(JMP, JSGT, K),			\
1260 	INSN_3(JMP, JSLT, K),			\
1261 	INSN_3(JMP, JSGE, K),			\
1262 	INSN_3(JMP, JSLE, K),			\
1263 	INSN_3(JMP, JSET, K),			\
1264 	INSN_2(JMP, JA),			\
1265 	/* Store instructions. */		\
1266 	/*   Register based. */			\
1267 	INSN_3(STX, MEM,  B),			\
1268 	INSN_3(STX, MEM,  H),			\
1269 	INSN_3(STX, MEM,  W),			\
1270 	INSN_3(STX, MEM,  DW),			\
1271 	INSN_3(STX, XADD, W),			\
1272 	INSN_3(STX, XADD, DW),			\
1273 	/*   Immediate based. */		\
1274 	INSN_3(ST, MEM, B),			\
1275 	INSN_3(ST, MEM, H),			\
1276 	INSN_3(ST, MEM, W),			\
1277 	INSN_3(ST, MEM, DW),			\
1278 	/* Load instructions. */		\
1279 	/*   Register based. */			\
1280 	INSN_3(LDX, MEM, B),			\
1281 	INSN_3(LDX, MEM, H),			\
1282 	INSN_3(LDX, MEM, W),			\
1283 	INSN_3(LDX, MEM, DW),			\
1284 	/*   Immediate based. */		\
1285 	INSN_3(LD, IMM, DW)
1286 
bpf_opcode_in_insntable(u8 code)1287 bool bpf_opcode_in_insntable(u8 code)
1288 {
1289 #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1290 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1291 	static const bool public_insntable[256] = {
1292 		[0 ... 255] = false,
1293 		/* Now overwrite non-defaults ... */
1294 		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1295 		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1296 		[BPF_LD | BPF_ABS | BPF_B] = true,
1297 		[BPF_LD | BPF_ABS | BPF_H] = true,
1298 		[BPF_LD | BPF_ABS | BPF_W] = true,
1299 		[BPF_LD | BPF_IND | BPF_B] = true,
1300 		[BPF_LD | BPF_IND | BPF_H] = true,
1301 		[BPF_LD | BPF_IND | BPF_W] = true,
1302 	};
1303 #undef BPF_INSN_3_TBL
1304 #undef BPF_INSN_2_TBL
1305 	return public_insntable[code];
1306 }
1307 
1308 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1309 /**
1310  *	__bpf_prog_run - run eBPF program on a given context
1311  *	@regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1312  *	@insn: is the array of eBPF instructions
1313  *	@stack: is the eBPF storage stack
1314  *
1315  * Decode and execute eBPF instructions.
1316  */
___bpf_prog_run(u64 * regs,const struct bpf_insn * insn,u64 * stack)1317 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1318 {
1319 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1320 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1321 	static const void * const jumptable[256] __annotate_jump_table = {
1322 		[0 ... 255] = &&default_label,
1323 		/* Now overwrite non-defaults ... */
1324 		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1325 		/* Non-UAPI available opcodes. */
1326 		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1327 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1328 		[BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
1329 	};
1330 #undef BPF_INSN_3_LBL
1331 #undef BPF_INSN_2_LBL
1332 	u32 tail_call_cnt = 0;
1333 
1334 #define CONT	 ({ insn++; goto select_insn; })
1335 #define CONT_JMP ({ insn++; goto select_insn; })
1336 
1337 select_insn:
1338 	goto *jumptable[insn->code];
1339 
1340 	/* Explicitly mask the register-based shift amounts with 63 or 31
1341 	 * to avoid undefined behavior. Normally this won't affect the
1342 	 * generated code, for example, in case of native 64 bit archs such
1343 	 * as x86-64 or arm64, the compiler is optimizing the AND away for
1344 	 * the interpreter. In case of JITs, each of the JIT backends compiles
1345 	 * the BPF shift operations to machine instructions which produce
1346 	 * implementation-defined results in such a case; the resulting
1347 	 * contents of the register may be arbitrary, but program behaviour
1348 	 * as a whole remains defined. In other words, in case of JIT backends,
1349 	 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1350 	 */
1351 	/* ALU (shifts) */
1352 #define SHT(OPCODE, OP)					\
1353 	ALU64_##OPCODE##_X:				\
1354 		DST = DST OP (SRC & 63);		\
1355 		CONT;					\
1356 	ALU_##OPCODE##_X:				\
1357 		DST = (u32) DST OP ((u32) SRC & 31);	\
1358 		CONT;					\
1359 	ALU64_##OPCODE##_K:				\
1360 		DST = DST OP IMM;			\
1361 		CONT;					\
1362 	ALU_##OPCODE##_K:				\
1363 		DST = (u32) DST OP (u32) IMM;		\
1364 		CONT;
1365 	/* ALU (rest) */
1366 #define ALU(OPCODE, OP)					\
1367 	ALU64_##OPCODE##_X:				\
1368 		DST = DST OP SRC;			\
1369 		CONT;					\
1370 	ALU_##OPCODE##_X:				\
1371 		DST = (u32) DST OP (u32) SRC;		\
1372 		CONT;					\
1373 	ALU64_##OPCODE##_K:				\
1374 		DST = DST OP IMM;			\
1375 		CONT;					\
1376 	ALU_##OPCODE##_K:				\
1377 		DST = (u32) DST OP (u32) IMM;		\
1378 		CONT;
1379 	ALU(ADD,  +)
1380 	ALU(SUB,  -)
1381 	ALU(AND,  &)
1382 	ALU(OR,   |)
1383 	ALU(XOR,  ^)
1384 	ALU(MUL,  *)
1385 	SHT(LSH, <<)
1386 	SHT(RSH, >>)
1387 #undef SHT
1388 #undef ALU
1389 	ALU_NEG:
1390 		DST = (u32) -DST;
1391 		CONT;
1392 	ALU64_NEG:
1393 		DST = -DST;
1394 		CONT;
1395 	ALU_MOV_X:
1396 		DST = (u32) SRC;
1397 		CONT;
1398 	ALU_MOV_K:
1399 		DST = (u32) IMM;
1400 		CONT;
1401 	ALU64_MOV_X:
1402 		DST = SRC;
1403 		CONT;
1404 	ALU64_MOV_K:
1405 		DST = IMM;
1406 		CONT;
1407 	LD_IMM_DW:
1408 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1409 		insn++;
1410 		CONT;
1411 	ALU_ARSH_X:
1412 		DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1413 		CONT;
1414 	ALU_ARSH_K:
1415 		DST = (u64) (u32) (((s32) DST) >> IMM);
1416 		CONT;
1417 	ALU64_ARSH_X:
1418 		(*(s64 *) &DST) >>= (SRC & 63);
1419 		CONT;
1420 	ALU64_ARSH_K:
1421 		(*(s64 *) &DST) >>= IMM;
1422 		CONT;
1423 	ALU64_MOD_X:
1424 		div64_u64_rem(DST, SRC, &AX);
1425 		DST = AX;
1426 		CONT;
1427 	ALU_MOD_X:
1428 		AX = (u32) DST;
1429 		DST = do_div(AX, (u32) SRC);
1430 		CONT;
1431 	ALU64_MOD_K:
1432 		div64_u64_rem(DST, IMM, &AX);
1433 		DST = AX;
1434 		CONT;
1435 	ALU_MOD_K:
1436 		AX = (u32) DST;
1437 		DST = do_div(AX, (u32) IMM);
1438 		CONT;
1439 	ALU64_DIV_X:
1440 		DST = div64_u64(DST, SRC);
1441 		CONT;
1442 	ALU_DIV_X:
1443 		AX = (u32) DST;
1444 		do_div(AX, (u32) SRC);
1445 		DST = (u32) AX;
1446 		CONT;
1447 	ALU64_DIV_K:
1448 		DST = div64_u64(DST, IMM);
1449 		CONT;
1450 	ALU_DIV_K:
1451 		AX = (u32) DST;
1452 		do_div(AX, (u32) IMM);
1453 		DST = (u32) AX;
1454 		CONT;
1455 	ALU_END_TO_BE:
1456 		switch (IMM) {
1457 		case 16:
1458 			DST = (__force u16) cpu_to_be16(DST);
1459 			break;
1460 		case 32:
1461 			DST = (__force u32) cpu_to_be32(DST);
1462 			break;
1463 		case 64:
1464 			DST = (__force u64) cpu_to_be64(DST);
1465 			break;
1466 		}
1467 		CONT;
1468 	ALU_END_TO_LE:
1469 		switch (IMM) {
1470 		case 16:
1471 			DST = (__force u16) cpu_to_le16(DST);
1472 			break;
1473 		case 32:
1474 			DST = (__force u32) cpu_to_le32(DST);
1475 			break;
1476 		case 64:
1477 			DST = (__force u64) cpu_to_le64(DST);
1478 			break;
1479 		}
1480 		CONT;
1481 
1482 	/* CALL */
1483 	JMP_CALL:
1484 		/* Function call scratches BPF_R1-BPF_R5 registers,
1485 		 * preserves BPF_R6-BPF_R9, and stores return value
1486 		 * into BPF_R0.
1487 		 */
1488 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1489 						       BPF_R4, BPF_R5);
1490 		CONT;
1491 
1492 	JMP_CALL_ARGS:
1493 		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1494 							    BPF_R3, BPF_R4,
1495 							    BPF_R5,
1496 							    insn + insn->off + 1);
1497 		CONT;
1498 
1499 	JMP_TAIL_CALL: {
1500 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1501 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1502 		struct bpf_prog *prog;
1503 		u32 index = BPF_R3;
1504 
1505 		if (unlikely(index >= array->map.max_entries))
1506 			goto out;
1507 		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1508 			goto out;
1509 
1510 		tail_call_cnt++;
1511 
1512 		prog = READ_ONCE(array->ptrs[index]);
1513 		if (!prog)
1514 			goto out;
1515 
1516 		/* ARG1 at this point is guaranteed to point to CTX from
1517 		 * the verifier side due to the fact that the tail call is
1518 		 * handeled like a helper, that is, bpf_tail_call_proto,
1519 		 * where arg1_type is ARG_PTR_TO_CTX.
1520 		 */
1521 		insn = prog->insnsi;
1522 		goto select_insn;
1523 out:
1524 		CONT;
1525 	}
1526 	JMP_JA:
1527 		insn += insn->off;
1528 		CONT;
1529 	JMP_EXIT:
1530 		return BPF_R0;
1531 	/* JMP */
1532 #define COND_JMP(SIGN, OPCODE, CMP_OP)				\
1533 	JMP_##OPCODE##_X:					\
1534 		if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {	\
1535 			insn += insn->off;			\
1536 			CONT_JMP;				\
1537 		}						\
1538 		CONT;						\
1539 	JMP32_##OPCODE##_X:					\
1540 		if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {	\
1541 			insn += insn->off;			\
1542 			CONT_JMP;				\
1543 		}						\
1544 		CONT;						\
1545 	JMP_##OPCODE##_K:					\
1546 		if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {	\
1547 			insn += insn->off;			\
1548 			CONT_JMP;				\
1549 		}						\
1550 		CONT;						\
1551 	JMP32_##OPCODE##_K:					\
1552 		if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {	\
1553 			insn += insn->off;			\
1554 			CONT_JMP;				\
1555 		}						\
1556 		CONT;
1557 	COND_JMP(u, JEQ, ==)
1558 	COND_JMP(u, JNE, !=)
1559 	COND_JMP(u, JGT, >)
1560 	COND_JMP(u, JLT, <)
1561 	COND_JMP(u, JGE, >=)
1562 	COND_JMP(u, JLE, <=)
1563 	COND_JMP(u, JSET, &)
1564 	COND_JMP(s, JSGT, >)
1565 	COND_JMP(s, JSLT, <)
1566 	COND_JMP(s, JSGE, >=)
1567 	COND_JMP(s, JSLE, <=)
1568 #undef COND_JMP
1569 	/* ST, STX and LDX*/
1570 	ST_NOSPEC:
1571 		/* Speculation barrier for mitigating Speculative Store Bypass.
1572 		 * In case of arm64, we rely on the firmware mitigation as
1573 		 * controlled via the ssbd kernel parameter. Whenever the
1574 		 * mitigation is enabled, it works for all of the kernel code
1575 		 * with no need to provide any additional instructions here.
1576 		 * In case of x86, we use 'lfence' insn for mitigation. We
1577 		 * reuse preexisting logic from Spectre v1 mitigation that
1578 		 * happens to produce the required code on x86 for v4 as well.
1579 		 */
1580 		barrier_nospec();
1581 		CONT;
1582 #define LDST(SIZEOP, SIZE)						\
1583 	STX_MEM_##SIZEOP:						\
1584 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
1585 		CONT;							\
1586 	ST_MEM_##SIZEOP:						\
1587 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
1588 		CONT;							\
1589 	LDX_MEM_##SIZEOP:						\
1590 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
1591 		CONT;
1592 
1593 	LDST(B,   u8)
1594 	LDST(H,  u16)
1595 	LDST(W,  u32)
1596 	LDST(DW, u64)
1597 #undef LDST
1598 	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1599 		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1600 			   (DST + insn->off));
1601 		CONT;
1602 	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1603 		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1604 			     (DST + insn->off));
1605 		CONT;
1606 
1607 	default_label:
1608 		/* If we ever reach this, we have a bug somewhere. Die hard here
1609 		 * instead of just returning 0; we could be somewhere in a subprog,
1610 		 * so execution could continue otherwise which we do /not/ want.
1611 		 *
1612 		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1613 		 */
1614 		pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1615 		BUG_ON(1);
1616 		return 0;
1617 }
1618 
1619 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1620 #define DEFINE_BPF_PROG_RUN(stack_size) \
1621 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1622 { \
1623 	u64 stack[stack_size / sizeof(u64)]; \
1624 	u64 regs[MAX_BPF_EXT_REG]; \
1625 \
1626 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1627 	ARG1 = (u64) (unsigned long) ctx; \
1628 	return ___bpf_prog_run(regs, insn, stack); \
1629 }
1630 
1631 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1632 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1633 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1634 				      const struct bpf_insn *insn) \
1635 { \
1636 	u64 stack[stack_size / sizeof(u64)]; \
1637 	u64 regs[MAX_BPF_EXT_REG]; \
1638 \
1639 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1640 	BPF_R1 = r1; \
1641 	BPF_R2 = r2; \
1642 	BPF_R3 = r3; \
1643 	BPF_R4 = r4; \
1644 	BPF_R5 = r5; \
1645 	return ___bpf_prog_run(regs, insn, stack); \
1646 }
1647 
1648 #define EVAL1(FN, X) FN(X)
1649 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1650 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1651 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1652 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1653 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1654 
1655 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1656 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1657 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1658 
1659 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1660 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1661 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1662 
1663 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1664 
1665 static unsigned int (*interpreters[])(const void *ctx,
1666 				      const struct bpf_insn *insn) = {
1667 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1668 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1669 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1670 };
1671 #undef PROG_NAME_LIST
1672 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1673 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1674 				  const struct bpf_insn *insn) = {
1675 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1676 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1677 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1678 };
1679 #undef PROG_NAME_LIST
1680 
bpf_patch_call_args(struct bpf_insn * insn,u32 stack_depth)1681 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1682 {
1683 	stack_depth = max_t(u32, stack_depth, 1);
1684 	insn->off = (s16) insn->imm;
1685 	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1686 		__bpf_call_base_args;
1687 	insn->code = BPF_JMP | BPF_CALL_ARGS;
1688 }
1689 
1690 #else
__bpf_prog_ret0_warn(const void * ctx,const struct bpf_insn * insn)1691 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1692 					 const struct bpf_insn *insn)
1693 {
1694 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1695 	 * is not working properly, so warn about it!
1696 	 */
1697 	WARN_ON_ONCE(1);
1698 	return 0;
1699 }
1700 #endif
1701 
bpf_prog_array_compatible(struct bpf_array * array,const struct bpf_prog * fp)1702 bool bpf_prog_array_compatible(struct bpf_array *array,
1703 			       const struct bpf_prog *fp)
1704 {
1705 	if (fp->kprobe_override)
1706 		return false;
1707 
1708 	if (!array->owner_prog_type) {
1709 		/* There's no owner yet where we could check for
1710 		 * compatibility.
1711 		 */
1712 		array->owner_prog_type = fp->type;
1713 		array->owner_jited = fp->jited;
1714 
1715 		return true;
1716 	}
1717 
1718 	return array->owner_prog_type == fp->type &&
1719 	       array->owner_jited == fp->jited;
1720 }
1721 
bpf_check_tail_call(const struct bpf_prog * fp)1722 static int bpf_check_tail_call(const struct bpf_prog *fp)
1723 {
1724 	struct bpf_prog_aux *aux = fp->aux;
1725 	int i;
1726 
1727 	for (i = 0; i < aux->used_map_cnt; i++) {
1728 		struct bpf_map *map = aux->used_maps[i];
1729 		struct bpf_array *array;
1730 
1731 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1732 			continue;
1733 
1734 		array = container_of(map, struct bpf_array, map);
1735 		if (!bpf_prog_array_compatible(array, fp))
1736 			return -EINVAL;
1737 	}
1738 
1739 	return 0;
1740 }
1741 
bpf_prog_select_func(struct bpf_prog * fp)1742 static void bpf_prog_select_func(struct bpf_prog *fp)
1743 {
1744 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1745 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1746 
1747 	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1748 #else
1749 	fp->bpf_func = __bpf_prog_ret0_warn;
1750 #endif
1751 }
1752 
1753 /**
1754  *	bpf_prog_select_runtime - select exec runtime for BPF program
1755  *	@fp: bpf_prog populated with internal BPF program
1756  *	@err: pointer to error variable
1757  *
1758  * Try to JIT eBPF program, if JIT is not available, use interpreter.
1759  * The BPF program will be executed via BPF_PROG_RUN() macro.
1760  */
bpf_prog_select_runtime(struct bpf_prog * fp,int * err)1761 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1762 {
1763 	/* In case of BPF to BPF calls, verifier did all the prep
1764 	 * work with regards to JITing, etc.
1765 	 */
1766 	if (fp->bpf_func)
1767 		goto finalize;
1768 
1769 	bpf_prog_select_func(fp);
1770 
1771 	/* eBPF JITs can rewrite the program in case constant
1772 	 * blinding is active. However, in case of error during
1773 	 * blinding, bpf_int_jit_compile() must always return a
1774 	 * valid program, which in this case would simply not
1775 	 * be JITed, but falls back to the interpreter.
1776 	 */
1777 	if (!bpf_prog_is_dev_bound(fp->aux)) {
1778 		*err = bpf_prog_alloc_jited_linfo(fp);
1779 		if (*err)
1780 			return fp;
1781 
1782 		fp = bpf_int_jit_compile(fp);
1783 		if (!fp->jited) {
1784 			bpf_prog_free_jited_linfo(fp);
1785 #ifdef CONFIG_BPF_JIT_ALWAYS_ON
1786 			*err = -ENOTSUPP;
1787 			return fp;
1788 #endif
1789 		} else {
1790 			bpf_prog_free_unused_jited_linfo(fp);
1791 		}
1792 	} else {
1793 		*err = bpf_prog_offload_compile(fp);
1794 		if (*err)
1795 			return fp;
1796 	}
1797 
1798 finalize:
1799 	bpf_prog_lock_ro(fp);
1800 
1801 	/* The tail call compatibility check can only be done at
1802 	 * this late stage as we need to determine, if we deal
1803 	 * with JITed or non JITed program concatenations and not
1804 	 * all eBPF JITs might immediately support all features.
1805 	 */
1806 	*err = bpf_check_tail_call(fp);
1807 
1808 	return fp;
1809 }
1810 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1811 
__bpf_prog_ret1(const void * ctx,const struct bpf_insn * insn)1812 static unsigned int __bpf_prog_ret1(const void *ctx,
1813 				    const struct bpf_insn *insn)
1814 {
1815 	return 1;
1816 }
1817 
1818 static struct bpf_prog_dummy {
1819 	struct bpf_prog prog;
1820 } dummy_bpf_prog = {
1821 	.prog = {
1822 		.bpf_func = __bpf_prog_ret1,
1823 	},
1824 };
1825 
1826 /* to avoid allocating empty bpf_prog_array for cgroups that
1827  * don't have bpf program attached use one global 'empty_prog_array'
1828  * It will not be modified the caller of bpf_prog_array_alloc()
1829  * (since caller requested prog_cnt == 0)
1830  * that pointer should be 'freed' by bpf_prog_array_free()
1831  */
1832 static struct {
1833 	struct bpf_prog_array hdr;
1834 	struct bpf_prog *null_prog;
1835 } empty_prog_array = {
1836 	.null_prog = NULL,
1837 };
1838 
bpf_prog_array_alloc(u32 prog_cnt,gfp_t flags)1839 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1840 {
1841 	if (prog_cnt)
1842 		return kzalloc(sizeof(struct bpf_prog_array) +
1843 			       sizeof(struct bpf_prog_array_item) *
1844 			       (prog_cnt + 1),
1845 			       flags);
1846 
1847 	return &empty_prog_array.hdr;
1848 }
1849 
bpf_prog_array_free(struct bpf_prog_array * progs)1850 void bpf_prog_array_free(struct bpf_prog_array *progs)
1851 {
1852 	if (!progs || progs == &empty_prog_array.hdr)
1853 		return;
1854 	kfree_rcu(progs, rcu);
1855 }
1856 
bpf_prog_array_length(struct bpf_prog_array * array)1857 int bpf_prog_array_length(struct bpf_prog_array *array)
1858 {
1859 	struct bpf_prog_array_item *item;
1860 	u32 cnt = 0;
1861 
1862 	for (item = array->items; item->prog; item++)
1863 		if (item->prog != &dummy_bpf_prog.prog)
1864 			cnt++;
1865 	return cnt;
1866 }
1867 
bpf_prog_array_is_empty(struct bpf_prog_array * array)1868 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1869 {
1870 	struct bpf_prog_array_item *item;
1871 
1872 	for (item = array->items; item->prog; item++)
1873 		if (item->prog != &dummy_bpf_prog.prog)
1874 			return false;
1875 	return true;
1876 }
1877 
bpf_prog_array_copy_core(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt)1878 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
1879 				     u32 *prog_ids,
1880 				     u32 request_cnt)
1881 {
1882 	struct bpf_prog_array_item *item;
1883 	int i = 0;
1884 
1885 	for (item = array->items; item->prog; item++) {
1886 		if (item->prog == &dummy_bpf_prog.prog)
1887 			continue;
1888 		prog_ids[i] = item->prog->aux->id;
1889 		if (++i == request_cnt) {
1890 			item++;
1891 			break;
1892 		}
1893 	}
1894 
1895 	return !!(item->prog);
1896 }
1897 
bpf_prog_array_copy_to_user(struct bpf_prog_array * array,__u32 __user * prog_ids,u32 cnt)1898 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
1899 				__u32 __user *prog_ids, u32 cnt)
1900 {
1901 	unsigned long err = 0;
1902 	bool nospc;
1903 	u32 *ids;
1904 
1905 	/* users of this function are doing:
1906 	 * cnt = bpf_prog_array_length();
1907 	 * if (cnt > 0)
1908 	 *     bpf_prog_array_copy_to_user(..., cnt);
1909 	 * so below kcalloc doesn't need extra cnt > 0 check.
1910 	 */
1911 	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1912 	if (!ids)
1913 		return -ENOMEM;
1914 	nospc = bpf_prog_array_copy_core(array, ids, cnt);
1915 	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1916 	kfree(ids);
1917 	if (err)
1918 		return -EFAULT;
1919 	if (nospc)
1920 		return -ENOSPC;
1921 	return 0;
1922 }
1923 
bpf_prog_array_delete_safe(struct bpf_prog_array * array,struct bpf_prog * old_prog)1924 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
1925 				struct bpf_prog *old_prog)
1926 {
1927 	struct bpf_prog_array_item *item;
1928 
1929 	for (item = array->items; item->prog; item++)
1930 		if (item->prog == old_prog) {
1931 			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
1932 			break;
1933 		}
1934 }
1935 
bpf_prog_array_copy(struct bpf_prog_array * old_array,struct bpf_prog * exclude_prog,struct bpf_prog * include_prog,struct bpf_prog_array ** new_array)1936 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
1937 			struct bpf_prog *exclude_prog,
1938 			struct bpf_prog *include_prog,
1939 			struct bpf_prog_array **new_array)
1940 {
1941 	int new_prog_cnt, carry_prog_cnt = 0;
1942 	struct bpf_prog_array_item *existing;
1943 	struct bpf_prog_array *array;
1944 	bool found_exclude = false;
1945 	int new_prog_idx = 0;
1946 
1947 	/* Figure out how many existing progs we need to carry over to
1948 	 * the new array.
1949 	 */
1950 	if (old_array) {
1951 		existing = old_array->items;
1952 		for (; existing->prog; existing++) {
1953 			if (existing->prog == exclude_prog) {
1954 				found_exclude = true;
1955 				continue;
1956 			}
1957 			if (existing->prog != &dummy_bpf_prog.prog)
1958 				carry_prog_cnt++;
1959 			if (existing->prog == include_prog)
1960 				return -EEXIST;
1961 		}
1962 	}
1963 
1964 	if (exclude_prog && !found_exclude)
1965 		return -ENOENT;
1966 
1967 	/* How many progs (not NULL) will be in the new array? */
1968 	new_prog_cnt = carry_prog_cnt;
1969 	if (include_prog)
1970 		new_prog_cnt += 1;
1971 
1972 	/* Do we have any prog (not NULL) in the new array? */
1973 	if (!new_prog_cnt) {
1974 		*new_array = NULL;
1975 		return 0;
1976 	}
1977 
1978 	/* +1 as the end of prog_array is marked with NULL */
1979 	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1980 	if (!array)
1981 		return -ENOMEM;
1982 
1983 	/* Fill in the new prog array */
1984 	if (carry_prog_cnt) {
1985 		existing = old_array->items;
1986 		for (; existing->prog; existing++)
1987 			if (existing->prog != exclude_prog &&
1988 			    existing->prog != &dummy_bpf_prog.prog) {
1989 				array->items[new_prog_idx++].prog =
1990 					existing->prog;
1991 			}
1992 	}
1993 	if (include_prog)
1994 		array->items[new_prog_idx++].prog = include_prog;
1995 	array->items[new_prog_idx].prog = NULL;
1996 	*new_array = array;
1997 	return 0;
1998 }
1999 
bpf_prog_array_copy_info(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt,u32 * prog_cnt)2000 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2001 			     u32 *prog_ids, u32 request_cnt,
2002 			     u32 *prog_cnt)
2003 {
2004 	u32 cnt = 0;
2005 
2006 	if (array)
2007 		cnt = bpf_prog_array_length(array);
2008 
2009 	*prog_cnt = cnt;
2010 
2011 	/* return early if user requested only program count or nothing to copy */
2012 	if (!request_cnt || !cnt)
2013 		return 0;
2014 
2015 	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2016 	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2017 								     : 0;
2018 }
2019 
bpf_prog_free_deferred(struct work_struct * work)2020 static void bpf_prog_free_deferred(struct work_struct *work)
2021 {
2022 	struct bpf_prog_aux *aux;
2023 	int i;
2024 
2025 	aux = container_of(work, struct bpf_prog_aux, work);
2026 	if (bpf_prog_is_dev_bound(aux))
2027 		bpf_prog_offload_destroy(aux->prog);
2028 #ifdef CONFIG_PERF_EVENTS
2029 	if (aux->prog->has_callchain_buf)
2030 		put_callchain_buffers();
2031 #endif
2032 	for (i = 0; i < aux->func_cnt; i++)
2033 		bpf_jit_free(aux->func[i]);
2034 	if (aux->func_cnt) {
2035 		kfree(aux->func);
2036 		bpf_prog_unlock_free(aux->prog);
2037 	} else {
2038 		bpf_jit_free(aux->prog);
2039 	}
2040 }
2041 
2042 /* Free internal BPF program */
bpf_prog_free(struct bpf_prog * fp)2043 void bpf_prog_free(struct bpf_prog *fp)
2044 {
2045 	struct bpf_prog_aux *aux = fp->aux;
2046 
2047 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
2048 	schedule_work(&aux->work);
2049 }
2050 EXPORT_SYMBOL_GPL(bpf_prog_free);
2051 
2052 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2053 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2054 
bpf_user_rnd_init_once(void)2055 void bpf_user_rnd_init_once(void)
2056 {
2057 	prandom_init_once(&bpf_user_rnd_state);
2058 }
2059 
BPF_CALL_0(bpf_user_rnd_u32)2060 BPF_CALL_0(bpf_user_rnd_u32)
2061 {
2062 	/* Should someone ever have the rather unwise idea to use some
2063 	 * of the registers passed into this function, then note that
2064 	 * this function is called from native eBPF and classic-to-eBPF
2065 	 * transformations. Register assignments from both sides are
2066 	 * different, f.e. classic always sets fn(ctx, A, X) here.
2067 	 */
2068 	struct rnd_state *state;
2069 	u32 res;
2070 
2071 	state = &get_cpu_var(bpf_user_rnd_state);
2072 	res = prandom_u32_state(state);
2073 	put_cpu_var(bpf_user_rnd_state);
2074 
2075 	return res;
2076 }
2077 
2078 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2079 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2080 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2081 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2082 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2083 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2084 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2085 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2086 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2087 
2088 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2089 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2090 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2091 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2092 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2093 
2094 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2095 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2096 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2097 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2098 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2099 
bpf_get_trace_printk_proto(void)2100 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2101 {
2102 	return NULL;
2103 }
2104 
2105 u64 __weak
bpf_event_output(struct bpf_map * map,u64 flags,void * meta,u64 meta_size,void * ctx,u64 ctx_size,bpf_ctx_copy_t ctx_copy)2106 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2107 		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2108 {
2109 	return -ENOTSUPP;
2110 }
2111 EXPORT_SYMBOL_GPL(bpf_event_output);
2112 
2113 /* Always built-in helper functions. */
2114 const struct bpf_func_proto bpf_tail_call_proto = {
2115 	.func		= NULL,
2116 	.gpl_only	= false,
2117 	.ret_type	= RET_VOID,
2118 	.arg1_type	= ARG_PTR_TO_CTX,
2119 	.arg2_type	= ARG_CONST_MAP_PTR,
2120 	.arg3_type	= ARG_ANYTHING,
2121 };
2122 
2123 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2124  * It is encouraged to implement bpf_int_jit_compile() instead, so that
2125  * eBPF and implicitly also cBPF can get JITed!
2126  */
bpf_int_jit_compile(struct bpf_prog * prog)2127 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2128 {
2129 	return prog;
2130 }
2131 
2132 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2133  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2134  */
bpf_jit_compile(struct bpf_prog * prog)2135 void __weak bpf_jit_compile(struct bpf_prog *prog)
2136 {
2137 }
2138 
bpf_helper_changes_pkt_data(void * func)2139 bool __weak bpf_helper_changes_pkt_data(void *func)
2140 {
2141 	return false;
2142 }
2143 
2144 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2145  * analysis code and wants explicit zero extension inserted by verifier.
2146  * Otherwise, return FALSE.
2147  */
bpf_jit_needs_zext(void)2148 bool __weak bpf_jit_needs_zext(void)
2149 {
2150 	return false;
2151 }
2152 
2153 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2154  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2155  */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)2156 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2157 			 int len)
2158 {
2159 	return -EFAULT;
2160 }
2161 
2162 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2163 EXPORT_SYMBOL(bpf_stats_enabled_key);
2164 
2165 /* All definitions of tracepoints related to BPF. */
2166 #define CREATE_TRACE_POINTS
2167 #include <linux/bpf_trace.h>
2168 
2169 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2170 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
2171