1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18 */
19
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/objtool.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <linux/nospec.h>
36
37 #include <asm/barrier.h>
38 #include <asm/unaligned.h>
39
40 #include <trace/hooks/memory.h>
41
42 /* Registers */
43 #define BPF_R0 regs[BPF_REG_0]
44 #define BPF_R1 regs[BPF_REG_1]
45 #define BPF_R2 regs[BPF_REG_2]
46 #define BPF_R3 regs[BPF_REG_3]
47 #define BPF_R4 regs[BPF_REG_4]
48 #define BPF_R5 regs[BPF_REG_5]
49 #define BPF_R6 regs[BPF_REG_6]
50 #define BPF_R7 regs[BPF_REG_7]
51 #define BPF_R8 regs[BPF_REG_8]
52 #define BPF_R9 regs[BPF_REG_9]
53 #define BPF_R10 regs[BPF_REG_10]
54
55 /* Named registers */
56 #define DST regs[insn->dst_reg]
57 #define SRC regs[insn->src_reg]
58 #define FP regs[BPF_REG_FP]
59 #define AX regs[BPF_REG_AX]
60 #define ARG1 regs[BPF_REG_ARG1]
61 #define CTX regs[BPF_REG_CTX]
62 #define IMM insn->imm
63
64 /* No hurry in this branch
65 *
66 * Exported for the bpf jit load helper.
67 */
bpf_internal_load_pointer_neg_helper(const struct sk_buff * skb,int k,unsigned int size)68 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
69 {
70 u8 *ptr = NULL;
71
72 if (k >= SKF_NET_OFF) {
73 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
74 } else if (k >= SKF_LL_OFF) {
75 if (unlikely(!skb_mac_header_was_set(skb)))
76 return NULL;
77 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
78 }
79 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
80 return ptr;
81
82 return NULL;
83 }
84
bpf_prog_alloc_no_stats(unsigned int size,gfp_t gfp_extra_flags)85 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
86 {
87 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
88 struct bpf_prog_aux *aux;
89 struct bpf_prog *fp;
90
91 size = round_up(size, PAGE_SIZE);
92 fp = __vmalloc(size, gfp_flags);
93 if (fp == NULL)
94 return NULL;
95
96 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
97 if (aux == NULL) {
98 vfree(fp);
99 return NULL;
100 }
101
102 fp->pages = size / PAGE_SIZE;
103 fp->aux = aux;
104 fp->aux->prog = fp;
105 fp->jit_requested = ebpf_jit_enabled();
106
107 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
108 mutex_init(&fp->aux->used_maps_mutex);
109 mutex_init(&fp->aux->dst_mutex);
110
111 return fp;
112 }
113
bpf_prog_alloc(unsigned int size,gfp_t gfp_extra_flags)114 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
115 {
116 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
117 struct bpf_prog *prog;
118 int cpu;
119
120 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
121 if (!prog)
122 return NULL;
123
124 prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
125 if (!prog->aux->stats) {
126 kfree(prog->aux);
127 vfree(prog);
128 return NULL;
129 }
130
131 for_each_possible_cpu(cpu) {
132 struct bpf_prog_stats *pstats;
133
134 pstats = per_cpu_ptr(prog->aux->stats, cpu);
135 u64_stats_init(&pstats->syncp);
136 }
137 return prog;
138 }
139 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
140
bpf_prog_alloc_jited_linfo(struct bpf_prog * prog)141 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
142 {
143 if (!prog->aux->nr_linfo || !prog->jit_requested)
144 return 0;
145
146 prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
147 sizeof(*prog->aux->jited_linfo),
148 GFP_KERNEL | __GFP_NOWARN);
149 if (!prog->aux->jited_linfo)
150 return -ENOMEM;
151
152 return 0;
153 }
154
bpf_prog_free_jited_linfo(struct bpf_prog * prog)155 void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
156 {
157 kfree(prog->aux->jited_linfo);
158 prog->aux->jited_linfo = NULL;
159 }
160
bpf_prog_free_unused_jited_linfo(struct bpf_prog * prog)161 void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
162 {
163 if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
164 bpf_prog_free_jited_linfo(prog);
165 }
166
167 /* The jit engine is responsible to provide an array
168 * for insn_off to the jited_off mapping (insn_to_jit_off).
169 *
170 * The idx to this array is the insn_off. Hence, the insn_off
171 * here is relative to the prog itself instead of the main prog.
172 * This array has one entry for each xlated bpf insn.
173 *
174 * jited_off is the byte off to the last byte of the jited insn.
175 *
176 * Hence, with
177 * insn_start:
178 * The first bpf insn off of the prog. The insn off
179 * here is relative to the main prog.
180 * e.g. if prog is a subprog, insn_start > 0
181 * linfo_idx:
182 * The prog's idx to prog->aux->linfo and jited_linfo
183 *
184 * jited_linfo[linfo_idx] = prog->bpf_func
185 *
186 * For i > linfo_idx,
187 *
188 * jited_linfo[i] = prog->bpf_func +
189 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
190 */
bpf_prog_fill_jited_linfo(struct bpf_prog * prog,const u32 * insn_to_jit_off)191 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
192 const u32 *insn_to_jit_off)
193 {
194 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
195 const struct bpf_line_info *linfo;
196 void **jited_linfo;
197
198 if (!prog->aux->jited_linfo)
199 /* Userspace did not provide linfo */
200 return;
201
202 linfo_idx = prog->aux->linfo_idx;
203 linfo = &prog->aux->linfo[linfo_idx];
204 insn_start = linfo[0].insn_off;
205 insn_end = insn_start + prog->len;
206
207 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
208 jited_linfo[0] = prog->bpf_func;
209
210 nr_linfo = prog->aux->nr_linfo - linfo_idx;
211
212 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
213 /* The verifier ensures that linfo[i].insn_off is
214 * strictly increasing
215 */
216 jited_linfo[i] = prog->bpf_func +
217 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
218 }
219
bpf_prog_free_linfo(struct bpf_prog * prog)220 void bpf_prog_free_linfo(struct bpf_prog *prog)
221 {
222 bpf_prog_free_jited_linfo(prog);
223 kvfree(prog->aux->linfo);
224 }
225
bpf_prog_realloc(struct bpf_prog * fp_old,unsigned int size,gfp_t gfp_extra_flags)226 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
227 gfp_t gfp_extra_flags)
228 {
229 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
230 struct bpf_prog *fp;
231 u32 pages, delta;
232 int ret;
233
234 size = round_up(size, PAGE_SIZE);
235 pages = size / PAGE_SIZE;
236 if (pages <= fp_old->pages)
237 return fp_old;
238
239 delta = pages - fp_old->pages;
240 ret = __bpf_prog_charge(fp_old->aux->user, delta);
241 if (ret)
242 return NULL;
243
244 fp = __vmalloc(size, gfp_flags);
245 if (fp == NULL) {
246 __bpf_prog_uncharge(fp_old->aux->user, delta);
247 } else {
248 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
249 fp->pages = pages;
250 fp->aux->prog = fp;
251
252 /* We keep fp->aux from fp_old around in the new
253 * reallocated structure.
254 */
255 fp_old->aux = NULL;
256 __bpf_prog_free(fp_old);
257 }
258
259 return fp;
260 }
261
__bpf_prog_free(struct bpf_prog * fp)262 void __bpf_prog_free(struct bpf_prog *fp)
263 {
264 if (fp->aux) {
265 mutex_destroy(&fp->aux->used_maps_mutex);
266 mutex_destroy(&fp->aux->dst_mutex);
267 free_percpu(fp->aux->stats);
268 kfree(fp->aux->poke_tab);
269 kfree(fp->aux);
270 }
271 vfree(fp);
272 }
273
bpf_prog_calc_tag(struct bpf_prog * fp)274 int bpf_prog_calc_tag(struct bpf_prog *fp)
275 {
276 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
277 u32 raw_size = bpf_prog_tag_scratch_size(fp);
278 u32 digest[SHA1_DIGEST_WORDS];
279 u32 ws[SHA1_WORKSPACE_WORDS];
280 u32 i, bsize, psize, blocks;
281 struct bpf_insn *dst;
282 bool was_ld_map;
283 u8 *raw, *todo;
284 __be32 *result;
285 __be64 *bits;
286
287 raw = vmalloc(raw_size);
288 if (!raw)
289 return -ENOMEM;
290
291 sha1_init(digest);
292 memset(ws, 0, sizeof(ws));
293
294 /* We need to take out the map fd for the digest calculation
295 * since they are unstable from user space side.
296 */
297 dst = (void *)raw;
298 for (i = 0, was_ld_map = false; i < fp->len; i++) {
299 dst[i] = fp->insnsi[i];
300 if (!was_ld_map &&
301 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
302 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
303 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
304 was_ld_map = true;
305 dst[i].imm = 0;
306 } else if (was_ld_map &&
307 dst[i].code == 0 &&
308 dst[i].dst_reg == 0 &&
309 dst[i].src_reg == 0 &&
310 dst[i].off == 0) {
311 was_ld_map = false;
312 dst[i].imm = 0;
313 } else {
314 was_ld_map = false;
315 }
316 }
317
318 psize = bpf_prog_insn_size(fp);
319 memset(&raw[psize], 0, raw_size - psize);
320 raw[psize++] = 0x80;
321
322 bsize = round_up(psize, SHA1_BLOCK_SIZE);
323 blocks = bsize / SHA1_BLOCK_SIZE;
324 todo = raw;
325 if (bsize - psize >= sizeof(__be64)) {
326 bits = (__be64 *)(todo + bsize - sizeof(__be64));
327 } else {
328 bits = (__be64 *)(todo + bsize + bits_offset);
329 blocks++;
330 }
331 *bits = cpu_to_be64((psize - 1) << 3);
332
333 while (blocks--) {
334 sha1_transform(digest, todo, ws);
335 todo += SHA1_BLOCK_SIZE;
336 }
337
338 result = (__force __be32 *)digest;
339 for (i = 0; i < SHA1_DIGEST_WORDS; i++)
340 result[i] = cpu_to_be32(digest[i]);
341 memcpy(fp->tag, result, sizeof(fp->tag));
342
343 vfree(raw);
344 return 0;
345 }
346
bpf_adj_delta_to_imm(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)347 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
348 s32 end_new, s32 curr, const bool probe_pass)
349 {
350 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
351 s32 delta = end_new - end_old;
352 s64 imm = insn->imm;
353
354 if (curr < pos && curr + imm + 1 >= end_old)
355 imm += delta;
356 else if (curr >= end_new && curr + imm + 1 < end_new)
357 imm -= delta;
358 if (imm < imm_min || imm > imm_max)
359 return -ERANGE;
360 if (!probe_pass)
361 insn->imm = imm;
362 return 0;
363 }
364
bpf_adj_delta_to_off(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)365 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
366 s32 end_new, s32 curr, const bool probe_pass)
367 {
368 const s32 off_min = S16_MIN, off_max = S16_MAX;
369 s32 delta = end_new - end_old;
370 s32 off = insn->off;
371
372 if (curr < pos && curr + off + 1 >= end_old)
373 off += delta;
374 else if (curr >= end_new && curr + off + 1 < end_new)
375 off -= delta;
376 if (off < off_min || off > off_max)
377 return -ERANGE;
378 if (!probe_pass)
379 insn->off = off;
380 return 0;
381 }
382
bpf_adj_branches(struct bpf_prog * prog,u32 pos,s32 end_old,s32 end_new,const bool probe_pass)383 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
384 s32 end_new, const bool probe_pass)
385 {
386 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
387 struct bpf_insn *insn = prog->insnsi;
388 int ret = 0;
389
390 for (i = 0; i < insn_cnt; i++, insn++) {
391 u8 code;
392
393 /* In the probing pass we still operate on the original,
394 * unpatched image in order to check overflows before we
395 * do any other adjustments. Therefore skip the patchlet.
396 */
397 if (probe_pass && i == pos) {
398 i = end_new;
399 insn = prog->insnsi + end_old;
400 }
401 code = insn->code;
402 if ((BPF_CLASS(code) != BPF_JMP &&
403 BPF_CLASS(code) != BPF_JMP32) ||
404 BPF_OP(code) == BPF_EXIT)
405 continue;
406 /* Adjust offset of jmps if we cross patch boundaries. */
407 if (BPF_OP(code) == BPF_CALL) {
408 if (insn->src_reg != BPF_PSEUDO_CALL)
409 continue;
410 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
411 end_new, i, probe_pass);
412 } else {
413 ret = bpf_adj_delta_to_off(insn, pos, end_old,
414 end_new, i, probe_pass);
415 }
416 if (ret)
417 break;
418 }
419
420 return ret;
421 }
422
bpf_adj_linfo(struct bpf_prog * prog,u32 off,u32 delta)423 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
424 {
425 struct bpf_line_info *linfo;
426 u32 i, nr_linfo;
427
428 nr_linfo = prog->aux->nr_linfo;
429 if (!nr_linfo || !delta)
430 return;
431
432 linfo = prog->aux->linfo;
433
434 for (i = 0; i < nr_linfo; i++)
435 if (off < linfo[i].insn_off)
436 break;
437
438 /* Push all off < linfo[i].insn_off by delta */
439 for (; i < nr_linfo; i++)
440 linfo[i].insn_off += delta;
441 }
442
bpf_patch_insn_single(struct bpf_prog * prog,u32 off,const struct bpf_insn * patch,u32 len)443 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
444 const struct bpf_insn *patch, u32 len)
445 {
446 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
447 const u32 cnt_max = S16_MAX;
448 struct bpf_prog *prog_adj;
449 int err;
450
451 /* Since our patchlet doesn't expand the image, we're done. */
452 if (insn_delta == 0) {
453 memcpy(prog->insnsi + off, patch, sizeof(*patch));
454 return prog;
455 }
456
457 insn_adj_cnt = prog->len + insn_delta;
458
459 /* Reject anything that would potentially let the insn->off
460 * target overflow when we have excessive program expansions.
461 * We need to probe here before we do any reallocation where
462 * we afterwards may not fail anymore.
463 */
464 if (insn_adj_cnt > cnt_max &&
465 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
466 return ERR_PTR(err);
467
468 /* Several new instructions need to be inserted. Make room
469 * for them. Likely, there's no need for a new allocation as
470 * last page could have large enough tailroom.
471 */
472 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
473 GFP_USER);
474 if (!prog_adj)
475 return ERR_PTR(-ENOMEM);
476
477 prog_adj->len = insn_adj_cnt;
478
479 /* Patching happens in 3 steps:
480 *
481 * 1) Move over tail of insnsi from next instruction onwards,
482 * so we can patch the single target insn with one or more
483 * new ones (patching is always from 1 to n insns, n > 0).
484 * 2) Inject new instructions at the target location.
485 * 3) Adjust branch offsets if necessary.
486 */
487 insn_rest = insn_adj_cnt - off - len;
488
489 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
490 sizeof(*patch) * insn_rest);
491 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
492
493 /* We are guaranteed to not fail at this point, otherwise
494 * the ship has sailed to reverse to the original state. An
495 * overflow cannot happen at this point.
496 */
497 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
498
499 bpf_adj_linfo(prog_adj, off, insn_delta);
500
501 return prog_adj;
502 }
503
bpf_remove_insns(struct bpf_prog * prog,u32 off,u32 cnt)504 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
505 {
506 /* Branch offsets can't overflow when program is shrinking, no need
507 * to call bpf_adj_branches(..., true) here
508 */
509 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
510 sizeof(struct bpf_insn) * (prog->len - off - cnt));
511 prog->len -= cnt;
512
513 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
514 }
515
bpf_prog_kallsyms_del_subprogs(struct bpf_prog * fp)516 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
517 {
518 int i;
519
520 for (i = 0; i < fp->aux->func_cnt; i++)
521 bpf_prog_kallsyms_del(fp->aux->func[i]);
522 }
523
bpf_prog_kallsyms_del_all(struct bpf_prog * fp)524 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
525 {
526 bpf_prog_kallsyms_del_subprogs(fp);
527 bpf_prog_kallsyms_del(fp);
528 }
529
530 #ifdef CONFIG_BPF_JIT
531 /* All BPF JIT sysctl knobs here. */
532 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
533 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
534 int bpf_jit_harden __read_mostly;
535 long bpf_jit_limit __read_mostly;
536 long bpf_jit_limit_max __read_mostly;
537
538 static void
bpf_prog_ksym_set_addr(struct bpf_prog * prog)539 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
540 {
541 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
542 unsigned long addr = (unsigned long)hdr;
543
544 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
545
546 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
547 prog->aux->ksym.end = addr + hdr->pages * PAGE_SIZE;
548 }
549
550 static void
bpf_prog_ksym_set_name(struct bpf_prog * prog)551 bpf_prog_ksym_set_name(struct bpf_prog *prog)
552 {
553 char *sym = prog->aux->ksym.name;
554 const char *end = sym + KSYM_NAME_LEN;
555 const struct btf_type *type;
556 const char *func_name;
557
558 BUILD_BUG_ON(sizeof("bpf_prog_") +
559 sizeof(prog->tag) * 2 +
560 /* name has been null terminated.
561 * We should need +1 for the '_' preceding
562 * the name. However, the null character
563 * is double counted between the name and the
564 * sizeof("bpf_prog_") above, so we omit
565 * the +1 here.
566 */
567 sizeof(prog->aux->name) > KSYM_NAME_LEN);
568
569 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
570 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
571
572 /* prog->aux->name will be ignored if full btf name is available */
573 if (prog->aux->func_info_cnt) {
574 type = btf_type_by_id(prog->aux->btf,
575 prog->aux->func_info[prog->aux->func_idx].type_id);
576 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
577 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
578 return;
579 }
580
581 if (prog->aux->name[0])
582 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
583 else
584 *sym = 0;
585 }
586
bpf_get_ksym_start(struct latch_tree_node * n)587 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
588 {
589 return container_of(n, struct bpf_ksym, tnode)->start;
590 }
591
bpf_tree_less(struct latch_tree_node * a,struct latch_tree_node * b)592 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
593 struct latch_tree_node *b)
594 {
595 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
596 }
597
bpf_tree_comp(void * key,struct latch_tree_node * n)598 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
599 {
600 unsigned long val = (unsigned long)key;
601 const struct bpf_ksym *ksym;
602
603 ksym = container_of(n, struct bpf_ksym, tnode);
604
605 if (val < ksym->start)
606 return -1;
607 /* Ensure that we detect return addresses as part of the program, when
608 * the final instruction is a call for a program part of the stack
609 * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
610 */
611 if (val > ksym->end)
612 return 1;
613
614 return 0;
615 }
616
617 static const struct latch_tree_ops bpf_tree_ops = {
618 .less = bpf_tree_less,
619 .comp = bpf_tree_comp,
620 };
621
622 static DEFINE_SPINLOCK(bpf_lock);
623 static LIST_HEAD(bpf_kallsyms);
624 static struct latch_tree_root bpf_tree __cacheline_aligned;
625
bpf_ksym_add(struct bpf_ksym * ksym)626 void bpf_ksym_add(struct bpf_ksym *ksym)
627 {
628 spin_lock_bh(&bpf_lock);
629 WARN_ON_ONCE(!list_empty(&ksym->lnode));
630 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
631 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
632 spin_unlock_bh(&bpf_lock);
633 }
634
__bpf_ksym_del(struct bpf_ksym * ksym)635 static void __bpf_ksym_del(struct bpf_ksym *ksym)
636 {
637 if (list_empty(&ksym->lnode))
638 return;
639
640 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
641 list_del_rcu(&ksym->lnode);
642 }
643
bpf_ksym_del(struct bpf_ksym * ksym)644 void bpf_ksym_del(struct bpf_ksym *ksym)
645 {
646 spin_lock_bh(&bpf_lock);
647 __bpf_ksym_del(ksym);
648 spin_unlock_bh(&bpf_lock);
649 }
650
bpf_prog_kallsyms_candidate(const struct bpf_prog * fp)651 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
652 {
653 return fp->jited && !bpf_prog_was_classic(fp);
654 }
655
bpf_prog_kallsyms_verify_off(const struct bpf_prog * fp)656 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
657 {
658 return list_empty(&fp->aux->ksym.lnode) ||
659 fp->aux->ksym.lnode.prev == LIST_POISON2;
660 }
661
bpf_prog_kallsyms_add(struct bpf_prog * fp)662 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
663 {
664 if (!bpf_prog_kallsyms_candidate(fp) ||
665 !bpf_capable())
666 return;
667
668 bpf_prog_ksym_set_addr(fp);
669 bpf_prog_ksym_set_name(fp);
670 fp->aux->ksym.prog = true;
671
672 bpf_ksym_add(&fp->aux->ksym);
673 }
674
bpf_prog_kallsyms_del(struct bpf_prog * fp)675 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
676 {
677 if (!bpf_prog_kallsyms_candidate(fp))
678 return;
679
680 bpf_ksym_del(&fp->aux->ksym);
681 }
682
bpf_ksym_find(unsigned long addr)683 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
684 {
685 struct latch_tree_node *n;
686
687 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
688 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
689 }
690
__bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char * sym)691 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
692 unsigned long *off, char *sym)
693 {
694 struct bpf_ksym *ksym;
695 char *ret = NULL;
696
697 rcu_read_lock();
698 ksym = bpf_ksym_find(addr);
699 if (ksym) {
700 unsigned long symbol_start = ksym->start;
701 unsigned long symbol_end = ksym->end;
702
703 strncpy(sym, ksym->name, KSYM_NAME_LEN);
704
705 ret = sym;
706 if (size)
707 *size = symbol_end - symbol_start;
708 if (off)
709 *off = addr - symbol_start;
710 }
711 rcu_read_unlock();
712
713 return ret;
714 }
715
is_bpf_text_address(unsigned long addr)716 bool is_bpf_text_address(unsigned long addr)
717 {
718 bool ret;
719
720 rcu_read_lock();
721 ret = bpf_ksym_find(addr) != NULL;
722 rcu_read_unlock();
723
724 return ret;
725 }
726
bpf_prog_ksym_find(unsigned long addr)727 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
728 {
729 struct bpf_ksym *ksym = bpf_ksym_find(addr);
730
731 return ksym && ksym->prog ?
732 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
733 NULL;
734 }
735
search_bpf_extables(unsigned long addr)736 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
737 {
738 const struct exception_table_entry *e = NULL;
739 struct bpf_prog *prog;
740
741 rcu_read_lock();
742 prog = bpf_prog_ksym_find(addr);
743 if (!prog)
744 goto out;
745 if (!prog->aux->num_exentries)
746 goto out;
747
748 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
749 out:
750 rcu_read_unlock();
751 return e;
752 }
753
bpf_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)754 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
755 char *sym)
756 {
757 struct bpf_ksym *ksym;
758 unsigned int it = 0;
759 int ret = -ERANGE;
760
761 if (!bpf_jit_kallsyms_enabled())
762 return ret;
763
764 rcu_read_lock();
765 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
766 if (it++ != symnum)
767 continue;
768
769 strncpy(sym, ksym->name, KSYM_NAME_LEN);
770
771 *value = ksym->start;
772 *type = BPF_SYM_ELF_TYPE;
773
774 ret = 0;
775 break;
776 }
777 rcu_read_unlock();
778
779 return ret;
780 }
781
bpf_jit_add_poke_descriptor(struct bpf_prog * prog,struct bpf_jit_poke_descriptor * poke)782 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
783 struct bpf_jit_poke_descriptor *poke)
784 {
785 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
786 static const u32 poke_tab_max = 1024;
787 u32 slot = prog->aux->size_poke_tab;
788 u32 size = slot + 1;
789
790 if (size > poke_tab_max)
791 return -ENOSPC;
792 if (poke->tailcall_target || poke->tailcall_target_stable ||
793 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
794 return -EINVAL;
795
796 switch (poke->reason) {
797 case BPF_POKE_REASON_TAIL_CALL:
798 if (!poke->tail_call.map)
799 return -EINVAL;
800 break;
801 default:
802 return -EINVAL;
803 }
804
805 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
806 if (!tab)
807 return -ENOMEM;
808
809 memcpy(&tab[slot], poke, sizeof(*poke));
810 prog->aux->size_poke_tab = size;
811 prog->aux->poke_tab = tab;
812
813 return slot;
814 }
815
816 static atomic_long_t bpf_jit_current;
817
818 /* Can be overridden by an arch's JIT compiler if it has a custom,
819 * dedicated BPF backend memory area, or if neither of the two
820 * below apply.
821 */
bpf_jit_alloc_exec_limit(void)822 u64 __weak bpf_jit_alloc_exec_limit(void)
823 {
824 #if defined(MODULES_VADDR)
825 return MODULES_END - MODULES_VADDR;
826 #else
827 return VMALLOC_END - VMALLOC_START;
828 #endif
829 }
830
bpf_jit_charge_init(void)831 static int __init bpf_jit_charge_init(void)
832 {
833 /* Only used as heuristic here to derive limit. */
834 bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
835 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
836 PAGE_SIZE), LONG_MAX);
837 return 0;
838 }
839 pure_initcall(bpf_jit_charge_init);
840
bpf_jit_charge_modmem(u32 pages)841 int bpf_jit_charge_modmem(u32 pages)
842 {
843 if (atomic_long_add_return(pages, &bpf_jit_current) >
844 (bpf_jit_limit >> PAGE_SHIFT)) {
845 if (!bpf_capable()) {
846 atomic_long_sub(pages, &bpf_jit_current);
847 return -EPERM;
848 }
849 }
850
851 return 0;
852 }
853
bpf_jit_uncharge_modmem(u32 pages)854 void bpf_jit_uncharge_modmem(u32 pages)
855 {
856 atomic_long_sub(pages, &bpf_jit_current);
857 }
858
bpf_jit_alloc_exec(unsigned long size)859 void *__weak bpf_jit_alloc_exec(unsigned long size)
860 {
861 return module_alloc(size);
862 }
863
bpf_jit_free_exec(void * addr)864 void __weak bpf_jit_free_exec(void *addr)
865 {
866 module_memfree(addr);
867 }
868
869 struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,bpf_jit_fill_hole_t bpf_fill_ill_insns)870 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
871 unsigned int alignment,
872 bpf_jit_fill_hole_t bpf_fill_ill_insns)
873 {
874 struct bpf_binary_header *hdr;
875 u32 size, hole, start, pages;
876
877 WARN_ON_ONCE(!is_power_of_2(alignment) ||
878 alignment > BPF_IMAGE_ALIGNMENT);
879
880 /* Most of BPF filters are really small, but if some of them
881 * fill a page, allow at least 128 extra bytes to insert a
882 * random section of illegal instructions.
883 */
884 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
885 pages = size / PAGE_SIZE;
886
887 if (bpf_jit_charge_modmem(pages))
888 return NULL;
889 hdr = bpf_jit_alloc_exec(size);
890 if (!hdr) {
891 bpf_jit_uncharge_modmem(pages);
892 return NULL;
893 }
894
895 /* Fill space with illegal/arch-dep instructions. */
896 bpf_fill_ill_insns(hdr, size);
897
898 hdr->pages = pages;
899 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
900 PAGE_SIZE - sizeof(*hdr));
901 start = (get_random_int() % hole) & ~(alignment - 1);
902
903 /* Leave a random number of instructions before BPF code. */
904 *image_ptr = &hdr->image[start];
905
906 return hdr;
907 }
908
bpf_jit_binary_free(struct bpf_binary_header * hdr)909 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
910 {
911 u32 pages = hdr->pages;
912
913 trace_android_vh_set_memory_rw((unsigned long)hdr, pages);
914 trace_android_vh_set_memory_nx((unsigned long)hdr, pages);
915 bpf_jit_free_exec(hdr);
916 bpf_jit_uncharge_modmem(pages);
917 }
918
919 /* This symbol is only overridden by archs that have different
920 * requirements than the usual eBPF JITs, f.e. when they only
921 * implement cBPF JIT, do not set images read-only, etc.
922 */
bpf_jit_free(struct bpf_prog * fp)923 void __weak bpf_jit_free(struct bpf_prog *fp)
924 {
925 if (fp->jited) {
926 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
927
928 bpf_jit_binary_free(hdr);
929
930 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
931 }
932
933 bpf_prog_unlock_free(fp);
934 }
935
bpf_jit_get_func_addr(const struct bpf_prog * prog,const struct bpf_insn * insn,bool extra_pass,u64 * func_addr,bool * func_addr_fixed)936 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
937 const struct bpf_insn *insn, bool extra_pass,
938 u64 *func_addr, bool *func_addr_fixed)
939 {
940 s16 off = insn->off;
941 s32 imm = insn->imm;
942 u8 *addr;
943
944 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
945 if (!*func_addr_fixed) {
946 /* Place-holder address till the last pass has collected
947 * all addresses for JITed subprograms in which case we
948 * can pick them up from prog->aux.
949 */
950 if (!extra_pass)
951 addr = NULL;
952 else if (prog->aux->func &&
953 off >= 0 && off < prog->aux->func_cnt)
954 addr = (u8 *)prog->aux->func[off]->bpf_func;
955 else
956 return -EINVAL;
957 } else {
958 /* Address of a BPF helper call. Since part of the core
959 * kernel, it's always at a fixed location. __bpf_call_base
960 * and the helper with imm relative to it are both in core
961 * kernel.
962 */
963 addr = (u8 *)__bpf_call_base + imm;
964 }
965
966 *func_addr = (unsigned long)addr;
967 return 0;
968 }
969
bpf_jit_blind_insn(const struct bpf_insn * from,const struct bpf_insn * aux,struct bpf_insn * to_buff,bool emit_zext)970 static int bpf_jit_blind_insn(const struct bpf_insn *from,
971 const struct bpf_insn *aux,
972 struct bpf_insn *to_buff,
973 bool emit_zext)
974 {
975 struct bpf_insn *to = to_buff;
976 u32 imm_rnd = get_random_int();
977 s16 off;
978
979 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
980 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
981
982 /* Constraints on AX register:
983 *
984 * AX register is inaccessible from user space. It is mapped in
985 * all JITs, and used here for constant blinding rewrites. It is
986 * typically "stateless" meaning its contents are only valid within
987 * the executed instruction, but not across several instructions.
988 * There are a few exceptions however which are further detailed
989 * below.
990 *
991 * Constant blinding is only used by JITs, not in the interpreter.
992 * The interpreter uses AX in some occasions as a local temporary
993 * register e.g. in DIV or MOD instructions.
994 *
995 * In restricted circumstances, the verifier can also use the AX
996 * register for rewrites as long as they do not interfere with
997 * the above cases!
998 */
999 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1000 goto out;
1001
1002 if (from->imm == 0 &&
1003 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
1004 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1005 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1006 goto out;
1007 }
1008
1009 switch (from->code) {
1010 case BPF_ALU | BPF_ADD | BPF_K:
1011 case BPF_ALU | BPF_SUB | BPF_K:
1012 case BPF_ALU | BPF_AND | BPF_K:
1013 case BPF_ALU | BPF_OR | BPF_K:
1014 case BPF_ALU | BPF_XOR | BPF_K:
1015 case BPF_ALU | BPF_MUL | BPF_K:
1016 case BPF_ALU | BPF_MOV | BPF_K:
1017 case BPF_ALU | BPF_DIV | BPF_K:
1018 case BPF_ALU | BPF_MOD | BPF_K:
1019 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1020 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1021 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1022 break;
1023
1024 case BPF_ALU64 | BPF_ADD | BPF_K:
1025 case BPF_ALU64 | BPF_SUB | BPF_K:
1026 case BPF_ALU64 | BPF_AND | BPF_K:
1027 case BPF_ALU64 | BPF_OR | BPF_K:
1028 case BPF_ALU64 | BPF_XOR | BPF_K:
1029 case BPF_ALU64 | BPF_MUL | BPF_K:
1030 case BPF_ALU64 | BPF_MOV | BPF_K:
1031 case BPF_ALU64 | BPF_DIV | BPF_K:
1032 case BPF_ALU64 | BPF_MOD | BPF_K:
1033 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1034 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1035 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1036 break;
1037
1038 case BPF_JMP | BPF_JEQ | BPF_K:
1039 case BPF_JMP | BPF_JNE | BPF_K:
1040 case BPF_JMP | BPF_JGT | BPF_K:
1041 case BPF_JMP | BPF_JLT | BPF_K:
1042 case BPF_JMP | BPF_JGE | BPF_K:
1043 case BPF_JMP | BPF_JLE | BPF_K:
1044 case BPF_JMP | BPF_JSGT | BPF_K:
1045 case BPF_JMP | BPF_JSLT | BPF_K:
1046 case BPF_JMP | BPF_JSGE | BPF_K:
1047 case BPF_JMP | BPF_JSLE | BPF_K:
1048 case BPF_JMP | BPF_JSET | BPF_K:
1049 /* Accommodate for extra offset in case of a backjump. */
1050 off = from->off;
1051 if (off < 0)
1052 off -= 2;
1053 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1054 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1055 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1056 break;
1057
1058 case BPF_JMP32 | BPF_JEQ | BPF_K:
1059 case BPF_JMP32 | BPF_JNE | BPF_K:
1060 case BPF_JMP32 | BPF_JGT | BPF_K:
1061 case BPF_JMP32 | BPF_JLT | BPF_K:
1062 case BPF_JMP32 | BPF_JGE | BPF_K:
1063 case BPF_JMP32 | BPF_JLE | BPF_K:
1064 case BPF_JMP32 | BPF_JSGT | BPF_K:
1065 case BPF_JMP32 | BPF_JSLT | BPF_K:
1066 case BPF_JMP32 | BPF_JSGE | BPF_K:
1067 case BPF_JMP32 | BPF_JSLE | BPF_K:
1068 case BPF_JMP32 | BPF_JSET | BPF_K:
1069 /* Accommodate for extra offset in case of a backjump. */
1070 off = from->off;
1071 if (off < 0)
1072 off -= 2;
1073 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1074 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1075 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1076 off);
1077 break;
1078
1079 case BPF_LD | BPF_IMM | BPF_DW:
1080 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1081 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1082 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1083 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1084 break;
1085 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1086 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1087 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1088 if (emit_zext)
1089 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1090 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1091 break;
1092
1093 case BPF_ST | BPF_MEM | BPF_DW:
1094 case BPF_ST | BPF_MEM | BPF_W:
1095 case BPF_ST | BPF_MEM | BPF_H:
1096 case BPF_ST | BPF_MEM | BPF_B:
1097 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1098 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1099 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1100 break;
1101 }
1102 out:
1103 return to - to_buff;
1104 }
1105
bpf_prog_clone_create(struct bpf_prog * fp_other,gfp_t gfp_extra_flags)1106 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1107 gfp_t gfp_extra_flags)
1108 {
1109 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1110 struct bpf_prog *fp;
1111
1112 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1113 if (fp != NULL) {
1114 /* aux->prog still points to the fp_other one, so
1115 * when promoting the clone to the real program,
1116 * this still needs to be adapted.
1117 */
1118 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1119 }
1120
1121 return fp;
1122 }
1123
bpf_prog_clone_free(struct bpf_prog * fp)1124 static void bpf_prog_clone_free(struct bpf_prog *fp)
1125 {
1126 /* aux was stolen by the other clone, so we cannot free
1127 * it from this path! It will be freed eventually by the
1128 * other program on release.
1129 *
1130 * At this point, we don't need a deferred release since
1131 * clone is guaranteed to not be locked.
1132 */
1133 fp->aux = NULL;
1134 __bpf_prog_free(fp);
1135 }
1136
bpf_jit_prog_release_other(struct bpf_prog * fp,struct bpf_prog * fp_other)1137 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1138 {
1139 /* We have to repoint aux->prog to self, as we don't
1140 * know whether fp here is the clone or the original.
1141 */
1142 fp->aux->prog = fp;
1143 bpf_prog_clone_free(fp_other);
1144 }
1145
bpf_jit_blind_constants(struct bpf_prog * prog)1146 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1147 {
1148 struct bpf_insn insn_buff[16], aux[2];
1149 struct bpf_prog *clone, *tmp;
1150 int insn_delta, insn_cnt;
1151 struct bpf_insn *insn;
1152 int i, rewritten;
1153
1154 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1155 return prog;
1156
1157 clone = bpf_prog_clone_create(prog, GFP_USER);
1158 if (!clone)
1159 return ERR_PTR(-ENOMEM);
1160
1161 insn_cnt = clone->len;
1162 insn = clone->insnsi;
1163
1164 for (i = 0; i < insn_cnt; i++, insn++) {
1165 /* We temporarily need to hold the original ld64 insn
1166 * so that we can still access the first part in the
1167 * second blinding run.
1168 */
1169 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1170 insn[1].code == 0)
1171 memcpy(aux, insn, sizeof(aux));
1172
1173 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1174 clone->aux->verifier_zext);
1175 if (!rewritten)
1176 continue;
1177
1178 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1179 if (IS_ERR(tmp)) {
1180 /* Patching may have repointed aux->prog during
1181 * realloc from the original one, so we need to
1182 * fix it up here on error.
1183 */
1184 bpf_jit_prog_release_other(prog, clone);
1185 return tmp;
1186 }
1187
1188 clone = tmp;
1189 insn_delta = rewritten - 1;
1190
1191 /* Walk new program and skip insns we just inserted. */
1192 insn = clone->insnsi + i + insn_delta;
1193 insn_cnt += insn_delta;
1194 i += insn_delta;
1195 }
1196
1197 clone->blinded = 1;
1198 return clone;
1199 }
1200 #endif /* CONFIG_BPF_JIT */
1201
1202 /* Base function for offset calculation. Needs to go into .text section,
1203 * therefore keeping it non-static as well; will also be used by JITs
1204 * anyway later on, so do not let the compiler omit it. This also needs
1205 * to go into kallsyms for correlation from e.g. bpftool, so naming
1206 * must not change.
1207 */
__bpf_call_base(u64 r1,u64 r2,u64 r3,u64 r4,u64 r5)1208 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1209 {
1210 return 0;
1211 }
1212 EXPORT_SYMBOL_GPL(__bpf_call_base);
1213
1214 /* All UAPI available opcodes. */
1215 #define BPF_INSN_MAP(INSN_2, INSN_3) \
1216 /* 32 bit ALU operations. */ \
1217 /* Register based. */ \
1218 INSN_3(ALU, ADD, X), \
1219 INSN_3(ALU, SUB, X), \
1220 INSN_3(ALU, AND, X), \
1221 INSN_3(ALU, OR, X), \
1222 INSN_3(ALU, LSH, X), \
1223 INSN_3(ALU, RSH, X), \
1224 INSN_3(ALU, XOR, X), \
1225 INSN_3(ALU, MUL, X), \
1226 INSN_3(ALU, MOV, X), \
1227 INSN_3(ALU, ARSH, X), \
1228 INSN_3(ALU, DIV, X), \
1229 INSN_3(ALU, MOD, X), \
1230 INSN_2(ALU, NEG), \
1231 INSN_3(ALU, END, TO_BE), \
1232 INSN_3(ALU, END, TO_LE), \
1233 /* Immediate based. */ \
1234 INSN_3(ALU, ADD, K), \
1235 INSN_3(ALU, SUB, K), \
1236 INSN_3(ALU, AND, K), \
1237 INSN_3(ALU, OR, K), \
1238 INSN_3(ALU, LSH, K), \
1239 INSN_3(ALU, RSH, K), \
1240 INSN_3(ALU, XOR, K), \
1241 INSN_3(ALU, MUL, K), \
1242 INSN_3(ALU, MOV, K), \
1243 INSN_3(ALU, ARSH, K), \
1244 INSN_3(ALU, DIV, K), \
1245 INSN_3(ALU, MOD, K), \
1246 /* 64 bit ALU operations. */ \
1247 /* Register based. */ \
1248 INSN_3(ALU64, ADD, X), \
1249 INSN_3(ALU64, SUB, X), \
1250 INSN_3(ALU64, AND, X), \
1251 INSN_3(ALU64, OR, X), \
1252 INSN_3(ALU64, LSH, X), \
1253 INSN_3(ALU64, RSH, X), \
1254 INSN_3(ALU64, XOR, X), \
1255 INSN_3(ALU64, MUL, X), \
1256 INSN_3(ALU64, MOV, X), \
1257 INSN_3(ALU64, ARSH, X), \
1258 INSN_3(ALU64, DIV, X), \
1259 INSN_3(ALU64, MOD, X), \
1260 INSN_2(ALU64, NEG), \
1261 /* Immediate based. */ \
1262 INSN_3(ALU64, ADD, K), \
1263 INSN_3(ALU64, SUB, K), \
1264 INSN_3(ALU64, AND, K), \
1265 INSN_3(ALU64, OR, K), \
1266 INSN_3(ALU64, LSH, K), \
1267 INSN_3(ALU64, RSH, K), \
1268 INSN_3(ALU64, XOR, K), \
1269 INSN_3(ALU64, MUL, K), \
1270 INSN_3(ALU64, MOV, K), \
1271 INSN_3(ALU64, ARSH, K), \
1272 INSN_3(ALU64, DIV, K), \
1273 INSN_3(ALU64, MOD, K), \
1274 /* Call instruction. */ \
1275 INSN_2(JMP, CALL), \
1276 /* Exit instruction. */ \
1277 INSN_2(JMP, EXIT), \
1278 /* 32-bit Jump instructions. */ \
1279 /* Register based. */ \
1280 INSN_3(JMP32, JEQ, X), \
1281 INSN_3(JMP32, JNE, X), \
1282 INSN_3(JMP32, JGT, X), \
1283 INSN_3(JMP32, JLT, X), \
1284 INSN_3(JMP32, JGE, X), \
1285 INSN_3(JMP32, JLE, X), \
1286 INSN_3(JMP32, JSGT, X), \
1287 INSN_3(JMP32, JSLT, X), \
1288 INSN_3(JMP32, JSGE, X), \
1289 INSN_3(JMP32, JSLE, X), \
1290 INSN_3(JMP32, JSET, X), \
1291 /* Immediate based. */ \
1292 INSN_3(JMP32, JEQ, K), \
1293 INSN_3(JMP32, JNE, K), \
1294 INSN_3(JMP32, JGT, K), \
1295 INSN_3(JMP32, JLT, K), \
1296 INSN_3(JMP32, JGE, K), \
1297 INSN_3(JMP32, JLE, K), \
1298 INSN_3(JMP32, JSGT, K), \
1299 INSN_3(JMP32, JSLT, K), \
1300 INSN_3(JMP32, JSGE, K), \
1301 INSN_3(JMP32, JSLE, K), \
1302 INSN_3(JMP32, JSET, K), \
1303 /* Jump instructions. */ \
1304 /* Register based. */ \
1305 INSN_3(JMP, JEQ, X), \
1306 INSN_3(JMP, JNE, X), \
1307 INSN_3(JMP, JGT, X), \
1308 INSN_3(JMP, JLT, X), \
1309 INSN_3(JMP, JGE, X), \
1310 INSN_3(JMP, JLE, X), \
1311 INSN_3(JMP, JSGT, X), \
1312 INSN_3(JMP, JSLT, X), \
1313 INSN_3(JMP, JSGE, X), \
1314 INSN_3(JMP, JSLE, X), \
1315 INSN_3(JMP, JSET, X), \
1316 /* Immediate based. */ \
1317 INSN_3(JMP, JEQ, K), \
1318 INSN_3(JMP, JNE, K), \
1319 INSN_3(JMP, JGT, K), \
1320 INSN_3(JMP, JLT, K), \
1321 INSN_3(JMP, JGE, K), \
1322 INSN_3(JMP, JLE, K), \
1323 INSN_3(JMP, JSGT, K), \
1324 INSN_3(JMP, JSLT, K), \
1325 INSN_3(JMP, JSGE, K), \
1326 INSN_3(JMP, JSLE, K), \
1327 INSN_3(JMP, JSET, K), \
1328 INSN_2(JMP, JA), \
1329 /* Store instructions. */ \
1330 /* Register based. */ \
1331 INSN_3(STX, MEM, B), \
1332 INSN_3(STX, MEM, H), \
1333 INSN_3(STX, MEM, W), \
1334 INSN_3(STX, MEM, DW), \
1335 INSN_3(STX, XADD, W), \
1336 INSN_3(STX, XADD, DW), \
1337 /* Immediate based. */ \
1338 INSN_3(ST, MEM, B), \
1339 INSN_3(ST, MEM, H), \
1340 INSN_3(ST, MEM, W), \
1341 INSN_3(ST, MEM, DW), \
1342 /* Load instructions. */ \
1343 /* Register based. */ \
1344 INSN_3(LDX, MEM, B), \
1345 INSN_3(LDX, MEM, H), \
1346 INSN_3(LDX, MEM, W), \
1347 INSN_3(LDX, MEM, DW), \
1348 /* Immediate based. */ \
1349 INSN_3(LD, IMM, DW)
1350
bpf_opcode_in_insntable(u8 code)1351 bool bpf_opcode_in_insntable(u8 code)
1352 {
1353 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1354 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1355 static const bool public_insntable[256] = {
1356 [0 ... 255] = false,
1357 /* Now overwrite non-defaults ... */
1358 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1359 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1360 [BPF_LD | BPF_ABS | BPF_B] = true,
1361 [BPF_LD | BPF_ABS | BPF_H] = true,
1362 [BPF_LD | BPF_ABS | BPF_W] = true,
1363 [BPF_LD | BPF_IND | BPF_B] = true,
1364 [BPF_LD | BPF_IND | BPF_H] = true,
1365 [BPF_LD | BPF_IND | BPF_W] = true,
1366 };
1367 #undef BPF_INSN_3_TBL
1368 #undef BPF_INSN_2_TBL
1369 return public_insntable[code];
1370 }
1371
1372 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
bpf_probe_read_kernel(void * dst,u32 size,const void * unsafe_ptr)1373 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1374 {
1375 memset(dst, 0, size);
1376 return -EFAULT;
1377 }
1378
1379 /**
1380 * __bpf_prog_run - run eBPF program on a given context
1381 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1382 * @insn: is the array of eBPF instructions
1383 * @stack: is the eBPF storage stack
1384 *
1385 * Decode and execute eBPF instructions.
1386 */
___bpf_prog_run(u64 * regs,const struct bpf_insn * insn,u64 * stack)1387 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1388 {
1389 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1390 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1391 static const void * const jumptable[256] __annotate_jump_table = {
1392 [0 ... 255] = &&default_label,
1393 /* Now overwrite non-defaults ... */
1394 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1395 /* Non-UAPI available opcodes. */
1396 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1397 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1398 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
1399 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1400 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1401 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1402 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1403 };
1404 #undef BPF_INSN_3_LBL
1405 #undef BPF_INSN_2_LBL
1406 u32 tail_call_cnt = 0;
1407
1408 #define CONT ({ insn++; goto select_insn; })
1409 #define CONT_JMP ({ insn++; goto select_insn; })
1410
1411 select_insn:
1412 goto *jumptable[insn->code];
1413
1414 /* Explicitly mask the register-based shift amounts with 63 or 31
1415 * to avoid undefined behavior. Normally this won't affect the
1416 * generated code, for example, in case of native 64 bit archs such
1417 * as x86-64 or arm64, the compiler is optimizing the AND away for
1418 * the interpreter. In case of JITs, each of the JIT backends compiles
1419 * the BPF shift operations to machine instructions which produce
1420 * implementation-defined results in such a case; the resulting
1421 * contents of the register may be arbitrary, but program behaviour
1422 * as a whole remains defined. In other words, in case of JIT backends,
1423 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1424 */
1425 /* ALU (shifts) */
1426 #define SHT(OPCODE, OP) \
1427 ALU64_##OPCODE##_X: \
1428 DST = DST OP (SRC & 63); \
1429 CONT; \
1430 ALU_##OPCODE##_X: \
1431 DST = (u32) DST OP ((u32) SRC & 31); \
1432 CONT; \
1433 ALU64_##OPCODE##_K: \
1434 DST = DST OP IMM; \
1435 CONT; \
1436 ALU_##OPCODE##_K: \
1437 DST = (u32) DST OP (u32) IMM; \
1438 CONT;
1439 /* ALU (rest) */
1440 #define ALU(OPCODE, OP) \
1441 ALU64_##OPCODE##_X: \
1442 DST = DST OP SRC; \
1443 CONT; \
1444 ALU_##OPCODE##_X: \
1445 DST = (u32) DST OP (u32) SRC; \
1446 CONT; \
1447 ALU64_##OPCODE##_K: \
1448 DST = DST OP IMM; \
1449 CONT; \
1450 ALU_##OPCODE##_K: \
1451 DST = (u32) DST OP (u32) IMM; \
1452 CONT;
1453 ALU(ADD, +)
1454 ALU(SUB, -)
1455 ALU(AND, &)
1456 ALU(OR, |)
1457 ALU(XOR, ^)
1458 ALU(MUL, *)
1459 SHT(LSH, <<)
1460 SHT(RSH, >>)
1461 #undef SHT
1462 #undef ALU
1463 ALU_NEG:
1464 DST = (u32) -DST;
1465 CONT;
1466 ALU64_NEG:
1467 DST = -DST;
1468 CONT;
1469 ALU_MOV_X:
1470 DST = (u32) SRC;
1471 CONT;
1472 ALU_MOV_K:
1473 DST = (u32) IMM;
1474 CONT;
1475 ALU64_MOV_X:
1476 DST = SRC;
1477 CONT;
1478 ALU64_MOV_K:
1479 DST = IMM;
1480 CONT;
1481 LD_IMM_DW:
1482 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1483 insn++;
1484 CONT;
1485 ALU_ARSH_X:
1486 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1487 CONT;
1488 ALU_ARSH_K:
1489 DST = (u64) (u32) (((s32) DST) >> IMM);
1490 CONT;
1491 ALU64_ARSH_X:
1492 (*(s64 *) &DST) >>= (SRC & 63);
1493 CONT;
1494 ALU64_ARSH_K:
1495 (*(s64 *) &DST) >>= IMM;
1496 CONT;
1497 ALU64_MOD_X:
1498 div64_u64_rem(DST, SRC, &AX);
1499 DST = AX;
1500 CONT;
1501 ALU_MOD_X:
1502 AX = (u32) DST;
1503 DST = do_div(AX, (u32) SRC);
1504 CONT;
1505 ALU64_MOD_K:
1506 div64_u64_rem(DST, IMM, &AX);
1507 DST = AX;
1508 CONT;
1509 ALU_MOD_K:
1510 AX = (u32) DST;
1511 DST = do_div(AX, (u32) IMM);
1512 CONT;
1513 ALU64_DIV_X:
1514 DST = div64_u64(DST, SRC);
1515 CONT;
1516 ALU_DIV_X:
1517 AX = (u32) DST;
1518 do_div(AX, (u32) SRC);
1519 DST = (u32) AX;
1520 CONT;
1521 ALU64_DIV_K:
1522 DST = div64_u64(DST, IMM);
1523 CONT;
1524 ALU_DIV_K:
1525 AX = (u32) DST;
1526 do_div(AX, (u32) IMM);
1527 DST = (u32) AX;
1528 CONT;
1529 ALU_END_TO_BE:
1530 switch (IMM) {
1531 case 16:
1532 DST = (__force u16) cpu_to_be16(DST);
1533 break;
1534 case 32:
1535 DST = (__force u32) cpu_to_be32(DST);
1536 break;
1537 case 64:
1538 DST = (__force u64) cpu_to_be64(DST);
1539 break;
1540 }
1541 CONT;
1542 ALU_END_TO_LE:
1543 switch (IMM) {
1544 case 16:
1545 DST = (__force u16) cpu_to_le16(DST);
1546 break;
1547 case 32:
1548 DST = (__force u32) cpu_to_le32(DST);
1549 break;
1550 case 64:
1551 DST = (__force u64) cpu_to_le64(DST);
1552 break;
1553 }
1554 CONT;
1555
1556 /* CALL */
1557 JMP_CALL:
1558 /* Function call scratches BPF_R1-BPF_R5 registers,
1559 * preserves BPF_R6-BPF_R9, and stores return value
1560 * into BPF_R0.
1561 */
1562 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1563 BPF_R4, BPF_R5);
1564 CONT;
1565
1566 JMP_CALL_ARGS:
1567 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1568 BPF_R3, BPF_R4,
1569 BPF_R5,
1570 insn + insn->off + 1);
1571 CONT;
1572
1573 JMP_TAIL_CALL: {
1574 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1575 struct bpf_array *array = container_of(map, struct bpf_array, map);
1576 struct bpf_prog *prog;
1577 u32 index = BPF_R3;
1578
1579 if (unlikely(index >= array->map.max_entries))
1580 goto out;
1581 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1582 goto out;
1583
1584 tail_call_cnt++;
1585
1586 prog = READ_ONCE(array->ptrs[index]);
1587 if (!prog)
1588 goto out;
1589
1590 /* ARG1 at this point is guaranteed to point to CTX from
1591 * the verifier side due to the fact that the tail call is
1592 * handled like a helper, that is, bpf_tail_call_proto,
1593 * where arg1_type is ARG_PTR_TO_CTX.
1594 */
1595 insn = prog->insnsi;
1596 goto select_insn;
1597 out:
1598 CONT;
1599 }
1600 JMP_JA:
1601 insn += insn->off;
1602 CONT;
1603 JMP_EXIT:
1604 return BPF_R0;
1605 /* JMP */
1606 #define COND_JMP(SIGN, OPCODE, CMP_OP) \
1607 JMP_##OPCODE##_X: \
1608 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
1609 insn += insn->off; \
1610 CONT_JMP; \
1611 } \
1612 CONT; \
1613 JMP32_##OPCODE##_X: \
1614 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
1615 insn += insn->off; \
1616 CONT_JMP; \
1617 } \
1618 CONT; \
1619 JMP_##OPCODE##_K: \
1620 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
1621 insn += insn->off; \
1622 CONT_JMP; \
1623 } \
1624 CONT; \
1625 JMP32_##OPCODE##_K: \
1626 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
1627 insn += insn->off; \
1628 CONT_JMP; \
1629 } \
1630 CONT;
1631 COND_JMP(u, JEQ, ==)
1632 COND_JMP(u, JNE, !=)
1633 COND_JMP(u, JGT, >)
1634 COND_JMP(u, JLT, <)
1635 COND_JMP(u, JGE, >=)
1636 COND_JMP(u, JLE, <=)
1637 COND_JMP(u, JSET, &)
1638 COND_JMP(s, JSGT, >)
1639 COND_JMP(s, JSLT, <)
1640 COND_JMP(s, JSGE, >=)
1641 COND_JMP(s, JSLE, <=)
1642 #undef COND_JMP
1643 /* ST, STX and LDX*/
1644 ST_NOSPEC:
1645 /* Speculation barrier for mitigating Speculative Store Bypass.
1646 * In case of arm64, we rely on the firmware mitigation as
1647 * controlled via the ssbd kernel parameter. Whenever the
1648 * mitigation is enabled, it works for all of the kernel code
1649 * with no need to provide any additional instructions here.
1650 * In case of x86, we use 'lfence' insn for mitigation. We
1651 * reuse preexisting logic from Spectre v1 mitigation that
1652 * happens to produce the required code on x86 for v4 as well.
1653 */
1654 barrier_nospec();
1655 CONT;
1656 #define LDST(SIZEOP, SIZE) \
1657 STX_MEM_##SIZEOP: \
1658 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1659 CONT; \
1660 ST_MEM_##SIZEOP: \
1661 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1662 CONT; \
1663 LDX_MEM_##SIZEOP: \
1664 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1665 CONT; \
1666 LDX_PROBE_MEM_##SIZEOP: \
1667 bpf_probe_read_kernel(&DST, sizeof(SIZE), \
1668 (const void *)(long) (SRC + insn->off)); \
1669 DST = *((SIZE *)&DST); \
1670 CONT;
1671
1672 LDST(B, u8)
1673 LDST(H, u16)
1674 LDST(W, u32)
1675 LDST(DW, u64)
1676 #undef LDST
1677
1678 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1679 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1680 (DST + insn->off));
1681 CONT;
1682 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1683 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1684 (DST + insn->off));
1685 CONT;
1686
1687 default_label:
1688 /* If we ever reach this, we have a bug somewhere. Die hard here
1689 * instead of just returning 0; we could be somewhere in a subprog,
1690 * so execution could continue otherwise which we do /not/ want.
1691 *
1692 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1693 */
1694 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1695 BUG_ON(1);
1696 return 0;
1697 }
1698
1699 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1700 #define DEFINE_BPF_PROG_RUN(stack_size) \
1701 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1702 { \
1703 u64 stack[stack_size / sizeof(u64)]; \
1704 u64 regs[MAX_BPF_EXT_REG]; \
1705 \
1706 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1707 ARG1 = (u64) (unsigned long) ctx; \
1708 return ___bpf_prog_run(regs, insn, stack); \
1709 }
1710
1711 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1712 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1713 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1714 const struct bpf_insn *insn) \
1715 { \
1716 u64 stack[stack_size / sizeof(u64)]; \
1717 u64 regs[MAX_BPF_EXT_REG]; \
1718 \
1719 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1720 BPF_R1 = r1; \
1721 BPF_R2 = r2; \
1722 BPF_R3 = r3; \
1723 BPF_R4 = r4; \
1724 BPF_R5 = r5; \
1725 return ___bpf_prog_run(regs, insn, stack); \
1726 }
1727
1728 #define EVAL1(FN, X) FN(X)
1729 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1730 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1731 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1732 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1733 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1734
1735 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1736 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1737 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1738
1739 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1740 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1741 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1742
1743 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1744
1745 static unsigned int (*interpreters[])(const void *ctx,
1746 const struct bpf_insn *insn) = {
1747 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1748 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1749 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1750 };
1751 #undef PROG_NAME_LIST
1752 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1753 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1754 const struct bpf_insn *insn) = {
1755 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1756 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1757 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1758 };
1759 #undef PROG_NAME_LIST
1760
bpf_patch_call_args(struct bpf_insn * insn,u32 stack_depth)1761 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1762 {
1763 stack_depth = max_t(u32, stack_depth, 1);
1764 insn->off = (s16) insn->imm;
1765 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1766 __bpf_call_base_args;
1767 insn->code = BPF_JMP | BPF_CALL_ARGS;
1768 }
1769
1770 #else
__bpf_prog_ret0_warn(const void * ctx,const struct bpf_insn * insn)1771 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1772 const struct bpf_insn *insn)
1773 {
1774 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1775 * is not working properly, so warn about it!
1776 */
1777 WARN_ON_ONCE(1);
1778 return 0;
1779 }
1780 #endif
1781
bpf_prog_array_compatible(struct bpf_array * array,const struct bpf_prog * fp)1782 bool bpf_prog_array_compatible(struct bpf_array *array,
1783 const struct bpf_prog *fp)
1784 {
1785 bool ret;
1786
1787 if (fp->kprobe_override)
1788 return false;
1789
1790 spin_lock(&array->aux->owner.lock);
1791
1792 if (!array->aux->owner.type) {
1793 /* There's no owner yet where we could check for
1794 * compatibility.
1795 */
1796 array->aux->owner.type = fp->type;
1797 array->aux->owner.jited = fp->jited;
1798 ret = true;
1799 } else {
1800 ret = array->aux->owner.type == fp->type &&
1801 array->aux->owner.jited == fp->jited;
1802 }
1803 spin_unlock(&array->aux->owner.lock);
1804 return ret;
1805 }
1806
bpf_check_tail_call(const struct bpf_prog * fp)1807 static int bpf_check_tail_call(const struct bpf_prog *fp)
1808 {
1809 struct bpf_prog_aux *aux = fp->aux;
1810 int i, ret = 0;
1811
1812 mutex_lock(&aux->used_maps_mutex);
1813 for (i = 0; i < aux->used_map_cnt; i++) {
1814 struct bpf_map *map = aux->used_maps[i];
1815 struct bpf_array *array;
1816
1817 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1818 continue;
1819
1820 array = container_of(map, struct bpf_array, map);
1821 if (!bpf_prog_array_compatible(array, fp)) {
1822 ret = -EINVAL;
1823 goto out;
1824 }
1825 }
1826
1827 out:
1828 mutex_unlock(&aux->used_maps_mutex);
1829 return ret;
1830 }
1831
bpf_prog_select_func(struct bpf_prog * fp)1832 static void bpf_prog_select_func(struct bpf_prog *fp)
1833 {
1834 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1835 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1836
1837 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1838 #else
1839 fp->bpf_func = __bpf_prog_ret0_warn;
1840 #endif
1841 }
1842
1843 /**
1844 * bpf_prog_select_runtime - select exec runtime for BPF program
1845 * @fp: bpf_prog populated with internal BPF program
1846 * @err: pointer to error variable
1847 *
1848 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1849 * The BPF program will be executed via BPF_PROG_RUN() macro.
1850 */
bpf_prog_select_runtime(struct bpf_prog * fp,int * err)1851 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1852 {
1853 /* In case of BPF to BPF calls, verifier did all the prep
1854 * work with regards to JITing, etc.
1855 */
1856 if (fp->bpf_func)
1857 goto finalize;
1858
1859 bpf_prog_select_func(fp);
1860
1861 /* eBPF JITs can rewrite the program in case constant
1862 * blinding is active. However, in case of error during
1863 * blinding, bpf_int_jit_compile() must always return a
1864 * valid program, which in this case would simply not
1865 * be JITed, but falls back to the interpreter.
1866 */
1867 if (!bpf_prog_is_dev_bound(fp->aux)) {
1868 *err = bpf_prog_alloc_jited_linfo(fp);
1869 if (*err)
1870 return fp;
1871
1872 fp = bpf_int_jit_compile(fp);
1873 if (!fp->jited) {
1874 bpf_prog_free_jited_linfo(fp);
1875 #ifdef CONFIG_BPF_JIT_ALWAYS_ON
1876 *err = -ENOTSUPP;
1877 return fp;
1878 #endif
1879 } else {
1880 bpf_prog_free_unused_jited_linfo(fp);
1881 }
1882 } else {
1883 *err = bpf_prog_offload_compile(fp);
1884 if (*err)
1885 return fp;
1886 }
1887
1888 finalize:
1889 bpf_prog_lock_ro(fp);
1890
1891 /* The tail call compatibility check can only be done at
1892 * this late stage as we need to determine, if we deal
1893 * with JITed or non JITed program concatenations and not
1894 * all eBPF JITs might immediately support all features.
1895 */
1896 *err = bpf_check_tail_call(fp);
1897
1898 return fp;
1899 }
1900 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1901
__bpf_prog_ret1(const void * ctx,const struct bpf_insn * insn)1902 static unsigned int __bpf_prog_ret1(const void *ctx,
1903 const struct bpf_insn *insn)
1904 {
1905 return 1;
1906 }
1907
1908 static struct bpf_prog_dummy {
1909 struct bpf_prog prog;
1910 } dummy_bpf_prog = {
1911 .prog = {
1912 .bpf_func = __bpf_prog_ret1,
1913 },
1914 };
1915
1916 /* to avoid allocating empty bpf_prog_array for cgroups that
1917 * don't have bpf program attached use one global 'empty_prog_array'
1918 * It will not be modified the caller of bpf_prog_array_alloc()
1919 * (since caller requested prog_cnt == 0)
1920 * that pointer should be 'freed' by bpf_prog_array_free()
1921 */
1922 static struct {
1923 struct bpf_prog_array hdr;
1924 struct bpf_prog *null_prog;
1925 } empty_prog_array = {
1926 .null_prog = NULL,
1927 };
1928
bpf_prog_array_alloc(u32 prog_cnt,gfp_t flags)1929 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1930 {
1931 if (prog_cnt)
1932 return kzalloc(sizeof(struct bpf_prog_array) +
1933 sizeof(struct bpf_prog_array_item) *
1934 (prog_cnt + 1),
1935 flags);
1936
1937 return &empty_prog_array.hdr;
1938 }
1939
bpf_prog_array_free(struct bpf_prog_array * progs)1940 void bpf_prog_array_free(struct bpf_prog_array *progs)
1941 {
1942 if (!progs || progs == &empty_prog_array.hdr)
1943 return;
1944 kfree_rcu(progs, rcu);
1945 }
1946
bpf_prog_array_length(struct bpf_prog_array * array)1947 int bpf_prog_array_length(struct bpf_prog_array *array)
1948 {
1949 struct bpf_prog_array_item *item;
1950 u32 cnt = 0;
1951
1952 for (item = array->items; item->prog; item++)
1953 if (item->prog != &dummy_bpf_prog.prog)
1954 cnt++;
1955 return cnt;
1956 }
1957
bpf_prog_array_is_empty(struct bpf_prog_array * array)1958 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1959 {
1960 struct bpf_prog_array_item *item;
1961
1962 for (item = array->items; item->prog; item++)
1963 if (item->prog != &dummy_bpf_prog.prog)
1964 return false;
1965 return true;
1966 }
1967
bpf_prog_array_copy_core(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt)1968 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
1969 u32 *prog_ids,
1970 u32 request_cnt)
1971 {
1972 struct bpf_prog_array_item *item;
1973 int i = 0;
1974
1975 for (item = array->items; item->prog; item++) {
1976 if (item->prog == &dummy_bpf_prog.prog)
1977 continue;
1978 prog_ids[i] = item->prog->aux->id;
1979 if (++i == request_cnt) {
1980 item++;
1981 break;
1982 }
1983 }
1984
1985 return !!(item->prog);
1986 }
1987
bpf_prog_array_copy_to_user(struct bpf_prog_array * array,__u32 __user * prog_ids,u32 cnt)1988 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
1989 __u32 __user *prog_ids, u32 cnt)
1990 {
1991 unsigned long err = 0;
1992 bool nospc;
1993 u32 *ids;
1994
1995 /* users of this function are doing:
1996 * cnt = bpf_prog_array_length();
1997 * if (cnt > 0)
1998 * bpf_prog_array_copy_to_user(..., cnt);
1999 * so below kcalloc doesn't need extra cnt > 0 check.
2000 */
2001 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2002 if (!ids)
2003 return -ENOMEM;
2004 nospc = bpf_prog_array_copy_core(array, ids, cnt);
2005 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2006 kfree(ids);
2007 if (err)
2008 return -EFAULT;
2009 if (nospc)
2010 return -ENOSPC;
2011 return 0;
2012 }
2013
bpf_prog_array_delete_safe(struct bpf_prog_array * array,struct bpf_prog * old_prog)2014 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2015 struct bpf_prog *old_prog)
2016 {
2017 struct bpf_prog_array_item *item;
2018
2019 for (item = array->items; item->prog; item++)
2020 if (item->prog == old_prog) {
2021 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2022 break;
2023 }
2024 }
2025
2026 /**
2027 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2028 * index into the program array with
2029 * a dummy no-op program.
2030 * @array: a bpf_prog_array
2031 * @index: the index of the program to replace
2032 *
2033 * Skips over dummy programs, by not counting them, when calculating
2034 * the position of the program to replace.
2035 *
2036 * Return:
2037 * * 0 - Success
2038 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2039 * * -ENOENT - Index out of range
2040 */
bpf_prog_array_delete_safe_at(struct bpf_prog_array * array,int index)2041 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2042 {
2043 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2044 }
2045
2046 /**
2047 * bpf_prog_array_update_at() - Updates the program at the given index
2048 * into the program array.
2049 * @array: a bpf_prog_array
2050 * @index: the index of the program to update
2051 * @prog: the program to insert into the array
2052 *
2053 * Skips over dummy programs, by not counting them, when calculating
2054 * the position of the program to update.
2055 *
2056 * Return:
2057 * * 0 - Success
2058 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2059 * * -ENOENT - Index out of range
2060 */
bpf_prog_array_update_at(struct bpf_prog_array * array,int index,struct bpf_prog * prog)2061 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2062 struct bpf_prog *prog)
2063 {
2064 struct bpf_prog_array_item *item;
2065
2066 if (unlikely(index < 0))
2067 return -EINVAL;
2068
2069 for (item = array->items; item->prog; item++) {
2070 if (item->prog == &dummy_bpf_prog.prog)
2071 continue;
2072 if (!index) {
2073 WRITE_ONCE(item->prog, prog);
2074 return 0;
2075 }
2076 index--;
2077 }
2078 return -ENOENT;
2079 }
2080
bpf_prog_array_copy(struct bpf_prog_array * old_array,struct bpf_prog * exclude_prog,struct bpf_prog * include_prog,struct bpf_prog_array ** new_array)2081 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2082 struct bpf_prog *exclude_prog,
2083 struct bpf_prog *include_prog,
2084 struct bpf_prog_array **new_array)
2085 {
2086 int new_prog_cnt, carry_prog_cnt = 0;
2087 struct bpf_prog_array_item *existing;
2088 struct bpf_prog_array *array;
2089 bool found_exclude = false;
2090 int new_prog_idx = 0;
2091
2092 /* Figure out how many existing progs we need to carry over to
2093 * the new array.
2094 */
2095 if (old_array) {
2096 existing = old_array->items;
2097 for (; existing->prog; existing++) {
2098 if (existing->prog == exclude_prog) {
2099 found_exclude = true;
2100 continue;
2101 }
2102 if (existing->prog != &dummy_bpf_prog.prog)
2103 carry_prog_cnt++;
2104 if (existing->prog == include_prog)
2105 return -EEXIST;
2106 }
2107 }
2108
2109 if (exclude_prog && !found_exclude)
2110 return -ENOENT;
2111
2112 /* How many progs (not NULL) will be in the new array? */
2113 new_prog_cnt = carry_prog_cnt;
2114 if (include_prog)
2115 new_prog_cnt += 1;
2116
2117 /* Do we have any prog (not NULL) in the new array? */
2118 if (!new_prog_cnt) {
2119 *new_array = NULL;
2120 return 0;
2121 }
2122
2123 /* +1 as the end of prog_array is marked with NULL */
2124 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2125 if (!array)
2126 return -ENOMEM;
2127
2128 /* Fill in the new prog array */
2129 if (carry_prog_cnt) {
2130 existing = old_array->items;
2131 for (; existing->prog; existing++)
2132 if (existing->prog != exclude_prog &&
2133 existing->prog != &dummy_bpf_prog.prog) {
2134 array->items[new_prog_idx++].prog =
2135 existing->prog;
2136 }
2137 }
2138 if (include_prog)
2139 array->items[new_prog_idx++].prog = include_prog;
2140 array->items[new_prog_idx].prog = NULL;
2141 *new_array = array;
2142 return 0;
2143 }
2144
bpf_prog_array_copy_info(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt,u32 * prog_cnt)2145 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2146 u32 *prog_ids, u32 request_cnt,
2147 u32 *prog_cnt)
2148 {
2149 u32 cnt = 0;
2150
2151 if (array)
2152 cnt = bpf_prog_array_length(array);
2153
2154 *prog_cnt = cnt;
2155
2156 /* return early if user requested only program count or nothing to copy */
2157 if (!request_cnt || !cnt)
2158 return 0;
2159
2160 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2161 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2162 : 0;
2163 }
2164
__bpf_free_used_maps(struct bpf_prog_aux * aux,struct bpf_map ** used_maps,u32 len)2165 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2166 struct bpf_map **used_maps, u32 len)
2167 {
2168 struct bpf_map *map;
2169 u32 i;
2170
2171 for (i = 0; i < len; i++) {
2172 map = used_maps[i];
2173 if (map->ops->map_poke_untrack)
2174 map->ops->map_poke_untrack(map, aux);
2175 bpf_map_put(map);
2176 }
2177 }
2178
bpf_free_used_maps(struct bpf_prog_aux * aux)2179 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2180 {
2181 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2182 kfree(aux->used_maps);
2183 }
2184
bpf_prog_free_deferred(struct work_struct * work)2185 static void bpf_prog_free_deferred(struct work_struct *work)
2186 {
2187 struct bpf_prog_aux *aux;
2188 int i;
2189
2190 aux = container_of(work, struct bpf_prog_aux, work);
2191 bpf_free_used_maps(aux);
2192 if (bpf_prog_is_dev_bound(aux))
2193 bpf_prog_offload_destroy(aux->prog);
2194 #ifdef CONFIG_PERF_EVENTS
2195 if (aux->prog->has_callchain_buf)
2196 put_callchain_buffers();
2197 #endif
2198 if (aux->dst_trampoline)
2199 bpf_trampoline_put(aux->dst_trampoline);
2200 for (i = 0; i < aux->func_cnt; i++)
2201 bpf_jit_free(aux->func[i]);
2202 if (aux->func_cnt) {
2203 kfree(aux->func);
2204 bpf_prog_unlock_free(aux->prog);
2205 } else {
2206 bpf_jit_free(aux->prog);
2207 }
2208 }
2209
2210 /* Free internal BPF program */
bpf_prog_free(struct bpf_prog * fp)2211 void bpf_prog_free(struct bpf_prog *fp)
2212 {
2213 struct bpf_prog_aux *aux = fp->aux;
2214
2215 if (aux->dst_prog)
2216 bpf_prog_put(aux->dst_prog);
2217 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2218 schedule_work(&aux->work);
2219 }
2220 EXPORT_SYMBOL_GPL(bpf_prog_free);
2221
2222 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2223 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2224
bpf_user_rnd_init_once(void)2225 void bpf_user_rnd_init_once(void)
2226 {
2227 prandom_init_once(&bpf_user_rnd_state);
2228 }
2229
BPF_CALL_0(bpf_user_rnd_u32)2230 BPF_CALL_0(bpf_user_rnd_u32)
2231 {
2232 /* Should someone ever have the rather unwise idea to use some
2233 * of the registers passed into this function, then note that
2234 * this function is called from native eBPF and classic-to-eBPF
2235 * transformations. Register assignments from both sides are
2236 * different, f.e. classic always sets fn(ctx, A, X) here.
2237 */
2238 struct rnd_state *state;
2239 u32 res;
2240
2241 state = &get_cpu_var(bpf_user_rnd_state);
2242 res = prandom_u32_state(state);
2243 put_cpu_var(bpf_user_rnd_state);
2244
2245 return res;
2246 }
2247
BPF_CALL_0(bpf_get_raw_cpu_id)2248 BPF_CALL_0(bpf_get_raw_cpu_id)
2249 {
2250 return raw_smp_processor_id();
2251 }
2252
2253 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2254 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2255 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2256 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2257 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2258 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2259 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2260 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2261 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2262 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2263
2264 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2265 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2266 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2267 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2268 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2269
2270 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2271 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2272 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2273 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2274 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2275 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2276 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2277 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2278 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2279
bpf_get_trace_printk_proto(void)2280 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2281 {
2282 return NULL;
2283 }
2284
2285 u64 __weak
bpf_event_output(struct bpf_map * map,u64 flags,void * meta,u64 meta_size,void * ctx,u64 ctx_size,bpf_ctx_copy_t ctx_copy)2286 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2287 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2288 {
2289 return -ENOTSUPP;
2290 }
2291 EXPORT_SYMBOL_GPL(bpf_event_output);
2292
2293 /* Always built-in helper functions. */
2294 const struct bpf_func_proto bpf_tail_call_proto = {
2295 .func = NULL,
2296 .gpl_only = false,
2297 .ret_type = RET_VOID,
2298 .arg1_type = ARG_PTR_TO_CTX,
2299 .arg2_type = ARG_CONST_MAP_PTR,
2300 .arg3_type = ARG_ANYTHING,
2301 };
2302
2303 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2304 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2305 * eBPF and implicitly also cBPF can get JITed!
2306 */
bpf_int_jit_compile(struct bpf_prog * prog)2307 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2308 {
2309 return prog;
2310 }
2311
2312 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2313 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2314 */
bpf_jit_compile(struct bpf_prog * prog)2315 void __weak bpf_jit_compile(struct bpf_prog *prog)
2316 {
2317 }
2318
bpf_helper_changes_pkt_data(void * func)2319 bool __weak bpf_helper_changes_pkt_data(void *func)
2320 {
2321 return false;
2322 }
2323
2324 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2325 * analysis code and wants explicit zero extension inserted by verifier.
2326 * Otherwise, return FALSE.
2327 */
bpf_jit_needs_zext(void)2328 bool __weak bpf_jit_needs_zext(void)
2329 {
2330 return false;
2331 }
2332
2333 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2334 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2335 */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)2336 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2337 int len)
2338 {
2339 return -EFAULT;
2340 }
2341
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * addr1,void * addr2)2342 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2343 void *addr1, void *addr2)
2344 {
2345 return -ENOTSUPP;
2346 }
2347
2348 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2349 EXPORT_SYMBOL(bpf_stats_enabled_key);
2350
2351 /* All definitions of tracepoints related to BPF. */
2352 #undef TRACE_INCLUDE_PATH
2353 #define CREATE_TRACE_POINTS
2354 #include <linux/bpf_trace.h>
2355
2356 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2357 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
2358