1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18 */
19
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/objtool.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/nodemask.h>
37 #include <linux/nospec.h>
38
39 #include <asm/barrier.h>
40 #include <asm/unaligned.h>
41
42 /* Registers */
43 #define BPF_R0 regs[BPF_REG_0]
44 #define BPF_R1 regs[BPF_REG_1]
45 #define BPF_R2 regs[BPF_REG_2]
46 #define BPF_R3 regs[BPF_REG_3]
47 #define BPF_R4 regs[BPF_REG_4]
48 #define BPF_R5 regs[BPF_REG_5]
49 #define BPF_R6 regs[BPF_REG_6]
50 #define BPF_R7 regs[BPF_REG_7]
51 #define BPF_R8 regs[BPF_REG_8]
52 #define BPF_R9 regs[BPF_REG_9]
53 #define BPF_R10 regs[BPF_REG_10]
54
55 /* Named registers */
56 #define DST regs[insn->dst_reg]
57 #define SRC regs[insn->src_reg]
58 #define FP regs[BPF_REG_FP]
59 #define AX regs[BPF_REG_AX]
60 #define ARG1 regs[BPF_REG_ARG1]
61 #define CTX regs[BPF_REG_CTX]
62 #define IMM insn->imm
63
64 /* No hurry in this branch
65 *
66 * Exported for the bpf jit load helper.
67 */
bpf_internal_load_pointer_neg_helper(const struct sk_buff * skb,int k,unsigned int size)68 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
69 {
70 u8 *ptr = NULL;
71
72 if (k >= SKF_NET_OFF) {
73 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
74 } else if (k >= SKF_LL_OFF) {
75 if (unlikely(!skb_mac_header_was_set(skb)))
76 return NULL;
77 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
78 }
79 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
80 return ptr;
81
82 return NULL;
83 }
84
bpf_prog_alloc_no_stats(unsigned int size,gfp_t gfp_extra_flags)85 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
86 {
87 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
88 struct bpf_prog_aux *aux;
89 struct bpf_prog *fp;
90
91 size = round_up(size, PAGE_SIZE);
92 fp = __vmalloc(size, gfp_flags);
93 if (fp == NULL)
94 return NULL;
95
96 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
97 if (aux == NULL) {
98 vfree(fp);
99 return NULL;
100 }
101 fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
102 if (!fp->active) {
103 vfree(fp);
104 kfree(aux);
105 return NULL;
106 }
107
108 fp->pages = size / PAGE_SIZE;
109 fp->aux = aux;
110 fp->aux->prog = fp;
111 fp->jit_requested = ebpf_jit_enabled();
112 fp->blinding_requested = bpf_jit_blinding_enabled(fp);
113 #ifdef CONFIG_CGROUP_BPF
114 aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
115 #endif
116
117 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
118 mutex_init(&fp->aux->used_maps_mutex);
119 mutex_init(&fp->aux->dst_mutex);
120
121 return fp;
122 }
123
bpf_prog_alloc(unsigned int size,gfp_t gfp_extra_flags)124 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
125 {
126 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
127 struct bpf_prog *prog;
128 int cpu;
129
130 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
131 if (!prog)
132 return NULL;
133
134 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
135 if (!prog->stats) {
136 free_percpu(prog->active);
137 kfree(prog->aux);
138 vfree(prog);
139 return NULL;
140 }
141
142 for_each_possible_cpu(cpu) {
143 struct bpf_prog_stats *pstats;
144
145 pstats = per_cpu_ptr(prog->stats, cpu);
146 u64_stats_init(&pstats->syncp);
147 }
148 return prog;
149 }
150 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
151
bpf_prog_alloc_jited_linfo(struct bpf_prog * prog)152 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
153 {
154 if (!prog->aux->nr_linfo || !prog->jit_requested)
155 return 0;
156
157 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
158 sizeof(*prog->aux->jited_linfo),
159 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
160 if (!prog->aux->jited_linfo)
161 return -ENOMEM;
162
163 return 0;
164 }
165
bpf_prog_jit_attempt_done(struct bpf_prog * prog)166 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
167 {
168 if (prog->aux->jited_linfo &&
169 (!prog->jited || !prog->aux->jited_linfo[0])) {
170 kvfree(prog->aux->jited_linfo);
171 prog->aux->jited_linfo = NULL;
172 }
173
174 kfree(prog->aux->kfunc_tab);
175 prog->aux->kfunc_tab = NULL;
176 }
177
178 /* The jit engine is responsible to provide an array
179 * for insn_off to the jited_off mapping (insn_to_jit_off).
180 *
181 * The idx to this array is the insn_off. Hence, the insn_off
182 * here is relative to the prog itself instead of the main prog.
183 * This array has one entry for each xlated bpf insn.
184 *
185 * jited_off is the byte off to the end of the jited insn.
186 *
187 * Hence, with
188 * insn_start:
189 * The first bpf insn off of the prog. The insn off
190 * here is relative to the main prog.
191 * e.g. if prog is a subprog, insn_start > 0
192 * linfo_idx:
193 * The prog's idx to prog->aux->linfo and jited_linfo
194 *
195 * jited_linfo[linfo_idx] = prog->bpf_func
196 *
197 * For i > linfo_idx,
198 *
199 * jited_linfo[i] = prog->bpf_func +
200 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
201 */
bpf_prog_fill_jited_linfo(struct bpf_prog * prog,const u32 * insn_to_jit_off)202 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
203 const u32 *insn_to_jit_off)
204 {
205 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
206 const struct bpf_line_info *linfo;
207 void **jited_linfo;
208
209 if (!prog->aux->jited_linfo)
210 /* Userspace did not provide linfo */
211 return;
212
213 linfo_idx = prog->aux->linfo_idx;
214 linfo = &prog->aux->linfo[linfo_idx];
215 insn_start = linfo[0].insn_off;
216 insn_end = insn_start + prog->len;
217
218 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
219 jited_linfo[0] = prog->bpf_func;
220
221 nr_linfo = prog->aux->nr_linfo - linfo_idx;
222
223 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
224 /* The verifier ensures that linfo[i].insn_off is
225 * strictly increasing
226 */
227 jited_linfo[i] = prog->bpf_func +
228 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
229 }
230
bpf_prog_realloc(struct bpf_prog * fp_old,unsigned int size,gfp_t gfp_extra_flags)231 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
232 gfp_t gfp_extra_flags)
233 {
234 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
235 struct bpf_prog *fp;
236 u32 pages;
237
238 size = round_up(size, PAGE_SIZE);
239 pages = size / PAGE_SIZE;
240 if (pages <= fp_old->pages)
241 return fp_old;
242
243 fp = __vmalloc(size, gfp_flags);
244 if (fp) {
245 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
246 fp->pages = pages;
247 fp->aux->prog = fp;
248
249 /* We keep fp->aux from fp_old around in the new
250 * reallocated structure.
251 */
252 fp_old->aux = NULL;
253 fp_old->stats = NULL;
254 fp_old->active = NULL;
255 __bpf_prog_free(fp_old);
256 }
257
258 return fp;
259 }
260
__bpf_prog_free(struct bpf_prog * fp)261 void __bpf_prog_free(struct bpf_prog *fp)
262 {
263 if (fp->aux) {
264 mutex_destroy(&fp->aux->used_maps_mutex);
265 mutex_destroy(&fp->aux->dst_mutex);
266 kfree(fp->aux->poke_tab);
267 kfree(fp->aux);
268 }
269 free_percpu(fp->stats);
270 free_percpu(fp->active);
271 vfree(fp);
272 }
273
bpf_prog_calc_tag(struct bpf_prog * fp)274 int bpf_prog_calc_tag(struct bpf_prog *fp)
275 {
276 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
277 u32 raw_size = bpf_prog_tag_scratch_size(fp);
278 u32 digest[SHA1_DIGEST_WORDS];
279 u32 ws[SHA1_WORKSPACE_WORDS];
280 u32 i, bsize, psize, blocks;
281 struct bpf_insn *dst;
282 bool was_ld_map;
283 u8 *raw, *todo;
284 __be32 *result;
285 __be64 *bits;
286
287 raw = vmalloc(raw_size);
288 if (!raw)
289 return -ENOMEM;
290
291 sha1_init(digest);
292 memset(ws, 0, sizeof(ws));
293
294 /* We need to take out the map fd for the digest calculation
295 * since they are unstable from user space side.
296 */
297 dst = (void *)raw;
298 for (i = 0, was_ld_map = false; i < fp->len; i++) {
299 dst[i] = fp->insnsi[i];
300 if (!was_ld_map &&
301 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
302 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
303 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
304 was_ld_map = true;
305 dst[i].imm = 0;
306 } else if (was_ld_map &&
307 dst[i].code == 0 &&
308 dst[i].dst_reg == 0 &&
309 dst[i].src_reg == 0 &&
310 dst[i].off == 0) {
311 was_ld_map = false;
312 dst[i].imm = 0;
313 } else {
314 was_ld_map = false;
315 }
316 }
317
318 psize = bpf_prog_insn_size(fp);
319 memset(&raw[psize], 0, raw_size - psize);
320 raw[psize++] = 0x80;
321
322 bsize = round_up(psize, SHA1_BLOCK_SIZE);
323 blocks = bsize / SHA1_BLOCK_SIZE;
324 todo = raw;
325 if (bsize - psize >= sizeof(__be64)) {
326 bits = (__be64 *)(todo + bsize - sizeof(__be64));
327 } else {
328 bits = (__be64 *)(todo + bsize + bits_offset);
329 blocks++;
330 }
331 *bits = cpu_to_be64((psize - 1) << 3);
332
333 while (blocks--) {
334 sha1_transform(digest, todo, ws);
335 todo += SHA1_BLOCK_SIZE;
336 }
337
338 result = (__force __be32 *)digest;
339 for (i = 0; i < SHA1_DIGEST_WORDS; i++)
340 result[i] = cpu_to_be32(digest[i]);
341 memcpy(fp->tag, result, sizeof(fp->tag));
342
343 vfree(raw);
344 return 0;
345 }
346
bpf_adj_delta_to_imm(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)347 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
348 s32 end_new, s32 curr, const bool probe_pass)
349 {
350 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
351 s32 delta = end_new - end_old;
352 s64 imm = insn->imm;
353
354 if (curr < pos && curr + imm + 1 >= end_old)
355 imm += delta;
356 else if (curr >= end_new && curr + imm + 1 < end_new)
357 imm -= delta;
358 if (imm < imm_min || imm > imm_max)
359 return -ERANGE;
360 if (!probe_pass)
361 insn->imm = imm;
362 return 0;
363 }
364
bpf_adj_delta_to_off(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)365 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
366 s32 end_new, s32 curr, const bool probe_pass)
367 {
368 const s32 off_min = S16_MIN, off_max = S16_MAX;
369 s32 delta = end_new - end_old;
370 s32 off = insn->off;
371
372 if (curr < pos && curr + off + 1 >= end_old)
373 off += delta;
374 else if (curr >= end_new && curr + off + 1 < end_new)
375 off -= delta;
376 if (off < off_min || off > off_max)
377 return -ERANGE;
378 if (!probe_pass)
379 insn->off = off;
380 return 0;
381 }
382
bpf_adj_branches(struct bpf_prog * prog,u32 pos,s32 end_old,s32 end_new,const bool probe_pass)383 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
384 s32 end_new, const bool probe_pass)
385 {
386 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
387 struct bpf_insn *insn = prog->insnsi;
388 int ret = 0;
389
390 for (i = 0; i < insn_cnt; i++, insn++) {
391 u8 code;
392
393 /* In the probing pass we still operate on the original,
394 * unpatched image in order to check overflows before we
395 * do any other adjustments. Therefore skip the patchlet.
396 */
397 if (probe_pass && i == pos) {
398 i = end_new;
399 insn = prog->insnsi + end_old;
400 }
401 if (bpf_pseudo_func(insn)) {
402 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
403 end_new, i, probe_pass);
404 if (ret)
405 return ret;
406 continue;
407 }
408 code = insn->code;
409 if ((BPF_CLASS(code) != BPF_JMP &&
410 BPF_CLASS(code) != BPF_JMP32) ||
411 BPF_OP(code) == BPF_EXIT)
412 continue;
413 /* Adjust offset of jmps if we cross patch boundaries. */
414 if (BPF_OP(code) == BPF_CALL) {
415 if (insn->src_reg != BPF_PSEUDO_CALL)
416 continue;
417 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
418 end_new, i, probe_pass);
419 } else {
420 ret = bpf_adj_delta_to_off(insn, pos, end_old,
421 end_new, i, probe_pass);
422 }
423 if (ret)
424 break;
425 }
426
427 return ret;
428 }
429
bpf_adj_linfo(struct bpf_prog * prog,u32 off,u32 delta)430 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
431 {
432 struct bpf_line_info *linfo;
433 u32 i, nr_linfo;
434
435 nr_linfo = prog->aux->nr_linfo;
436 if (!nr_linfo || !delta)
437 return;
438
439 linfo = prog->aux->linfo;
440
441 for (i = 0; i < nr_linfo; i++)
442 if (off < linfo[i].insn_off)
443 break;
444
445 /* Push all off < linfo[i].insn_off by delta */
446 for (; i < nr_linfo; i++)
447 linfo[i].insn_off += delta;
448 }
449
bpf_patch_insn_single(struct bpf_prog * prog,u32 off,const struct bpf_insn * patch,u32 len)450 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
451 const struct bpf_insn *patch, u32 len)
452 {
453 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
454 const u32 cnt_max = S16_MAX;
455 struct bpf_prog *prog_adj;
456 int err;
457
458 /* Since our patchlet doesn't expand the image, we're done. */
459 if (insn_delta == 0) {
460 memcpy(prog->insnsi + off, patch, sizeof(*patch));
461 return prog;
462 }
463
464 insn_adj_cnt = prog->len + insn_delta;
465
466 /* Reject anything that would potentially let the insn->off
467 * target overflow when we have excessive program expansions.
468 * We need to probe here before we do any reallocation where
469 * we afterwards may not fail anymore.
470 */
471 if (insn_adj_cnt > cnt_max &&
472 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
473 return ERR_PTR(err);
474
475 /* Several new instructions need to be inserted. Make room
476 * for them. Likely, there's no need for a new allocation as
477 * last page could have large enough tailroom.
478 */
479 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
480 GFP_USER);
481 if (!prog_adj)
482 return ERR_PTR(-ENOMEM);
483
484 prog_adj->len = insn_adj_cnt;
485
486 /* Patching happens in 3 steps:
487 *
488 * 1) Move over tail of insnsi from next instruction onwards,
489 * so we can patch the single target insn with one or more
490 * new ones (patching is always from 1 to n insns, n > 0).
491 * 2) Inject new instructions at the target location.
492 * 3) Adjust branch offsets if necessary.
493 */
494 insn_rest = insn_adj_cnt - off - len;
495
496 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
497 sizeof(*patch) * insn_rest);
498 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
499
500 /* We are guaranteed to not fail at this point, otherwise
501 * the ship has sailed to reverse to the original state. An
502 * overflow cannot happen at this point.
503 */
504 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
505
506 bpf_adj_linfo(prog_adj, off, insn_delta);
507
508 return prog_adj;
509 }
510
bpf_remove_insns(struct bpf_prog * prog,u32 off,u32 cnt)511 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
512 {
513 /* Branch offsets can't overflow when program is shrinking, no need
514 * to call bpf_adj_branches(..., true) here
515 */
516 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
517 sizeof(struct bpf_insn) * (prog->len - off - cnt));
518 prog->len -= cnt;
519
520 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
521 }
522
bpf_prog_kallsyms_del_subprogs(struct bpf_prog * fp)523 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
524 {
525 int i;
526
527 for (i = 0; i < fp->aux->func_cnt; i++)
528 bpf_prog_kallsyms_del(fp->aux->func[i]);
529 }
530
bpf_prog_kallsyms_del_all(struct bpf_prog * fp)531 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
532 {
533 bpf_prog_kallsyms_del_subprogs(fp);
534 bpf_prog_kallsyms_del(fp);
535 }
536
537 #ifdef CONFIG_BPF_JIT
538 /* All BPF JIT sysctl knobs here. */
539 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
540 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
541 int bpf_jit_harden __read_mostly;
542 long bpf_jit_limit __read_mostly;
543 long bpf_jit_limit_max __read_mostly;
544
545 static void
bpf_prog_ksym_set_addr(struct bpf_prog * prog)546 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
547 {
548 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
549
550 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
551 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len;
552 }
553
554 static void
bpf_prog_ksym_set_name(struct bpf_prog * prog)555 bpf_prog_ksym_set_name(struct bpf_prog *prog)
556 {
557 char *sym = prog->aux->ksym.name;
558 const char *end = sym + KSYM_NAME_LEN;
559 const struct btf_type *type;
560 const char *func_name;
561
562 BUILD_BUG_ON(sizeof("bpf_prog_") +
563 sizeof(prog->tag) * 2 +
564 /* name has been null terminated.
565 * We should need +1 for the '_' preceding
566 * the name. However, the null character
567 * is double counted between the name and the
568 * sizeof("bpf_prog_") above, so we omit
569 * the +1 here.
570 */
571 sizeof(prog->aux->name) > KSYM_NAME_LEN);
572
573 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
574 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
575
576 /* prog->aux->name will be ignored if full btf name is available */
577 if (prog->aux->func_info_cnt) {
578 type = btf_type_by_id(prog->aux->btf,
579 prog->aux->func_info[prog->aux->func_idx].type_id);
580 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
581 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
582 return;
583 }
584
585 if (prog->aux->name[0])
586 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
587 else
588 *sym = 0;
589 }
590
bpf_get_ksym_start(struct latch_tree_node * n)591 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
592 {
593 return container_of(n, struct bpf_ksym, tnode)->start;
594 }
595
bpf_tree_less(struct latch_tree_node * a,struct latch_tree_node * b)596 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
597 struct latch_tree_node *b)
598 {
599 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
600 }
601
bpf_tree_comp(void * key,struct latch_tree_node * n)602 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
603 {
604 unsigned long val = (unsigned long)key;
605 const struct bpf_ksym *ksym;
606
607 ksym = container_of(n, struct bpf_ksym, tnode);
608
609 if (val < ksym->start)
610 return -1;
611 /* Ensure that we detect return addresses as part of the program, when
612 * the final instruction is a call for a program part of the stack
613 * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
614 */
615 if (val > ksym->end)
616 return 1;
617
618 return 0;
619 }
620
621 static const struct latch_tree_ops bpf_tree_ops = {
622 .less = bpf_tree_less,
623 .comp = bpf_tree_comp,
624 };
625
626 static DEFINE_SPINLOCK(bpf_lock);
627 static LIST_HEAD(bpf_kallsyms);
628 static struct latch_tree_root bpf_tree __cacheline_aligned;
629
bpf_ksym_add(struct bpf_ksym * ksym)630 void bpf_ksym_add(struct bpf_ksym *ksym)
631 {
632 spin_lock_bh(&bpf_lock);
633 WARN_ON_ONCE(!list_empty(&ksym->lnode));
634 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
635 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
636 spin_unlock_bh(&bpf_lock);
637 }
638
__bpf_ksym_del(struct bpf_ksym * ksym)639 static void __bpf_ksym_del(struct bpf_ksym *ksym)
640 {
641 if (list_empty(&ksym->lnode))
642 return;
643
644 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
645 list_del_rcu(&ksym->lnode);
646 }
647
bpf_ksym_del(struct bpf_ksym * ksym)648 void bpf_ksym_del(struct bpf_ksym *ksym)
649 {
650 spin_lock_bh(&bpf_lock);
651 __bpf_ksym_del(ksym);
652 spin_unlock_bh(&bpf_lock);
653 }
654
bpf_prog_kallsyms_candidate(const struct bpf_prog * fp)655 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
656 {
657 return fp->jited && !bpf_prog_was_classic(fp);
658 }
659
bpf_prog_kallsyms_add(struct bpf_prog * fp)660 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
661 {
662 if (!bpf_prog_kallsyms_candidate(fp) ||
663 !bpf_capable())
664 return;
665
666 bpf_prog_ksym_set_addr(fp);
667 bpf_prog_ksym_set_name(fp);
668 fp->aux->ksym.prog = true;
669
670 bpf_ksym_add(&fp->aux->ksym);
671 }
672
bpf_prog_kallsyms_del(struct bpf_prog * fp)673 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
674 {
675 if (!bpf_prog_kallsyms_candidate(fp))
676 return;
677
678 bpf_ksym_del(&fp->aux->ksym);
679 }
680
bpf_ksym_find(unsigned long addr)681 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
682 {
683 struct latch_tree_node *n;
684
685 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
686 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
687 }
688
__bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char * sym)689 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
690 unsigned long *off, char *sym)
691 {
692 struct bpf_ksym *ksym;
693 char *ret = NULL;
694
695 rcu_read_lock();
696 ksym = bpf_ksym_find(addr);
697 if (ksym) {
698 unsigned long symbol_start = ksym->start;
699 unsigned long symbol_end = ksym->end;
700
701 strncpy(sym, ksym->name, KSYM_NAME_LEN);
702
703 ret = sym;
704 if (size)
705 *size = symbol_end - symbol_start;
706 if (off)
707 *off = addr - symbol_start;
708 }
709 rcu_read_unlock();
710
711 return ret;
712 }
713
is_bpf_text_address(unsigned long addr)714 bool is_bpf_text_address(unsigned long addr)
715 {
716 bool ret;
717
718 rcu_read_lock();
719 ret = bpf_ksym_find(addr) != NULL;
720 rcu_read_unlock();
721
722 return ret;
723 }
724
bpf_prog_ksym_find(unsigned long addr)725 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
726 {
727 struct bpf_ksym *ksym = bpf_ksym_find(addr);
728
729 return ksym && ksym->prog ?
730 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
731 NULL;
732 }
733
search_bpf_extables(unsigned long addr)734 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
735 {
736 const struct exception_table_entry *e = NULL;
737 struct bpf_prog *prog;
738
739 rcu_read_lock();
740 prog = bpf_prog_ksym_find(addr);
741 if (!prog)
742 goto out;
743 if (!prog->aux->num_exentries)
744 goto out;
745
746 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
747 out:
748 rcu_read_unlock();
749 return e;
750 }
751
bpf_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)752 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
753 char *sym)
754 {
755 struct bpf_ksym *ksym;
756 unsigned int it = 0;
757 int ret = -ERANGE;
758
759 if (!bpf_jit_kallsyms_enabled())
760 return ret;
761
762 rcu_read_lock();
763 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
764 if (it++ != symnum)
765 continue;
766
767 strncpy(sym, ksym->name, KSYM_NAME_LEN);
768
769 *value = ksym->start;
770 *type = BPF_SYM_ELF_TYPE;
771
772 ret = 0;
773 break;
774 }
775 rcu_read_unlock();
776
777 return ret;
778 }
779
bpf_jit_add_poke_descriptor(struct bpf_prog * prog,struct bpf_jit_poke_descriptor * poke)780 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
781 struct bpf_jit_poke_descriptor *poke)
782 {
783 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
784 static const u32 poke_tab_max = 1024;
785 u32 slot = prog->aux->size_poke_tab;
786 u32 size = slot + 1;
787
788 if (size > poke_tab_max)
789 return -ENOSPC;
790 if (poke->tailcall_target || poke->tailcall_target_stable ||
791 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
792 return -EINVAL;
793
794 switch (poke->reason) {
795 case BPF_POKE_REASON_TAIL_CALL:
796 if (!poke->tail_call.map)
797 return -EINVAL;
798 break;
799 default:
800 return -EINVAL;
801 }
802
803 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
804 if (!tab)
805 return -ENOMEM;
806
807 memcpy(&tab[slot], poke, sizeof(*poke));
808 prog->aux->size_poke_tab = size;
809 prog->aux->poke_tab = tab;
810
811 return slot;
812 }
813
814 /*
815 * BPF program pack allocator.
816 *
817 * Most BPF programs are pretty small. Allocating a hole page for each
818 * program is sometime a waste. Many small bpf program also adds pressure
819 * to instruction TLB. To solve this issue, we introduce a BPF program pack
820 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
821 * to host BPF programs.
822 */
823 #define BPF_PROG_CHUNK_SHIFT 6
824 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
825 #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
826
827 struct bpf_prog_pack {
828 struct list_head list;
829 void *ptr;
830 unsigned long bitmap[];
831 };
832
bpf_jit_fill_hole_with_zero(void * area,unsigned int size)833 void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
834 {
835 memset(area, 0, size);
836 }
837
838 #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
839
840 static DEFINE_MUTEX(pack_mutex);
841 static LIST_HEAD(pack_list);
842
843 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
844 * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
845 */
846 #ifdef PMD_SIZE
847 #define BPF_PROG_PACK_SIZE (PMD_SIZE * num_possible_nodes())
848 #else
849 #define BPF_PROG_PACK_SIZE PAGE_SIZE
850 #endif
851
852 #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
853
alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)854 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
855 {
856 struct bpf_prog_pack *pack;
857
858 pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
859 GFP_KERNEL);
860 if (!pack)
861 return NULL;
862 pack->ptr = module_alloc(BPF_PROG_PACK_SIZE);
863 if (!pack->ptr) {
864 kfree(pack);
865 return NULL;
866 }
867 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
868 bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
869 list_add_tail(&pack->list, &pack_list);
870
871 set_vm_flush_reset_perms(pack->ptr);
872 set_memory_ro((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
873 set_memory_x((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
874 return pack;
875 }
876
bpf_prog_pack_alloc(u32 size,bpf_jit_fill_hole_t bpf_fill_ill_insns)877 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
878 {
879 unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
880 struct bpf_prog_pack *pack;
881 unsigned long pos;
882 void *ptr = NULL;
883
884 mutex_lock(&pack_mutex);
885 if (size > BPF_PROG_PACK_SIZE) {
886 size = round_up(size, PAGE_SIZE);
887 ptr = module_alloc(size);
888 if (ptr) {
889 bpf_fill_ill_insns(ptr, size);
890 set_vm_flush_reset_perms(ptr);
891 set_memory_ro((unsigned long)ptr, size / PAGE_SIZE);
892 set_memory_x((unsigned long)ptr, size / PAGE_SIZE);
893 }
894 goto out;
895 }
896 list_for_each_entry(pack, &pack_list, list) {
897 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
898 nbits, 0);
899 if (pos < BPF_PROG_CHUNK_COUNT)
900 goto found_free_area;
901 }
902
903 pack = alloc_new_pack(bpf_fill_ill_insns);
904 if (!pack)
905 goto out;
906
907 pos = 0;
908
909 found_free_area:
910 bitmap_set(pack->bitmap, pos, nbits);
911 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
912
913 out:
914 mutex_unlock(&pack_mutex);
915 return ptr;
916 }
917
bpf_prog_pack_free(struct bpf_binary_header * hdr)918 void bpf_prog_pack_free(struct bpf_binary_header *hdr)
919 {
920 struct bpf_prog_pack *pack = NULL, *tmp;
921 unsigned int nbits;
922 unsigned long pos;
923
924 mutex_lock(&pack_mutex);
925 if (hdr->size > BPF_PROG_PACK_SIZE) {
926 module_memfree(hdr);
927 goto out;
928 }
929
930 list_for_each_entry(tmp, &pack_list, list) {
931 if ((void *)hdr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > (void *)hdr) {
932 pack = tmp;
933 break;
934 }
935 }
936
937 if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
938 goto out;
939
940 nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
941 pos = ((unsigned long)hdr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
942
943 WARN_ONCE(bpf_arch_text_invalidate(hdr, hdr->size),
944 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
945
946 bitmap_clear(pack->bitmap, pos, nbits);
947 if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
948 BPF_PROG_CHUNK_COUNT, 0) == 0) {
949 list_del(&pack->list);
950 module_memfree(pack->ptr);
951 kfree(pack);
952 }
953 out:
954 mutex_unlock(&pack_mutex);
955 }
956
957 static atomic_long_t bpf_jit_current;
958
959 /* Can be overridden by an arch's JIT compiler if it has a custom,
960 * dedicated BPF backend memory area, or if neither of the two
961 * below apply.
962 */
bpf_jit_alloc_exec_limit(void)963 u64 __weak bpf_jit_alloc_exec_limit(void)
964 {
965 #if defined(MODULES_VADDR)
966 return MODULES_END - MODULES_VADDR;
967 #else
968 return VMALLOC_END - VMALLOC_START;
969 #endif
970 }
971
bpf_jit_charge_init(void)972 static int __init bpf_jit_charge_init(void)
973 {
974 /* Only used as heuristic here to derive limit. */
975 bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
976 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
977 PAGE_SIZE), LONG_MAX);
978 return 0;
979 }
980 pure_initcall(bpf_jit_charge_init);
981
bpf_jit_charge_modmem(u32 size)982 int bpf_jit_charge_modmem(u32 size)
983 {
984 if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
985 if (!bpf_capable()) {
986 atomic_long_sub(size, &bpf_jit_current);
987 return -EPERM;
988 }
989 }
990
991 return 0;
992 }
993
bpf_jit_uncharge_modmem(u32 size)994 void bpf_jit_uncharge_modmem(u32 size)
995 {
996 atomic_long_sub(size, &bpf_jit_current);
997 }
998
bpf_jit_alloc_exec(unsigned long size)999 void *__weak bpf_jit_alloc_exec(unsigned long size)
1000 {
1001 return module_alloc(size);
1002 }
1003
bpf_jit_free_exec(void * addr)1004 void __weak bpf_jit_free_exec(void *addr)
1005 {
1006 module_memfree(addr);
1007 }
1008
1009 struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,bpf_jit_fill_hole_t bpf_fill_ill_insns)1010 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1011 unsigned int alignment,
1012 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1013 {
1014 struct bpf_binary_header *hdr;
1015 u32 size, hole, start;
1016
1017 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1018 alignment > BPF_IMAGE_ALIGNMENT);
1019
1020 /* Most of BPF filters are really small, but if some of them
1021 * fill a page, allow at least 128 extra bytes to insert a
1022 * random section of illegal instructions.
1023 */
1024 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1025
1026 if (bpf_jit_charge_modmem(size))
1027 return NULL;
1028 hdr = bpf_jit_alloc_exec(size);
1029 if (!hdr) {
1030 bpf_jit_uncharge_modmem(size);
1031 return NULL;
1032 }
1033
1034 /* Fill space with illegal/arch-dep instructions. */
1035 bpf_fill_ill_insns(hdr, size);
1036
1037 hdr->size = size;
1038 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1039 PAGE_SIZE - sizeof(*hdr));
1040 start = prandom_u32_max(hole) & ~(alignment - 1);
1041
1042 /* Leave a random number of instructions before BPF code. */
1043 *image_ptr = &hdr->image[start];
1044
1045 return hdr;
1046 }
1047
bpf_jit_binary_free(struct bpf_binary_header * hdr)1048 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1049 {
1050 u32 size = hdr->size;
1051
1052 bpf_jit_free_exec(hdr);
1053 bpf_jit_uncharge_modmem(size);
1054 }
1055
1056 /* Allocate jit binary from bpf_prog_pack allocator.
1057 * Since the allocated memory is RO+X, the JIT engine cannot write directly
1058 * to the memory. To solve this problem, a RW buffer is also allocated at
1059 * as the same time. The JIT engine should calculate offsets based on the
1060 * RO memory address, but write JITed program to the RW buffer. Once the
1061 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1062 * the JITed program to the RO memory.
1063 */
1064 struct bpf_binary_header *
bpf_jit_binary_pack_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,struct bpf_binary_header ** rw_header,u8 ** rw_image,bpf_jit_fill_hole_t bpf_fill_ill_insns)1065 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1066 unsigned int alignment,
1067 struct bpf_binary_header **rw_header,
1068 u8 **rw_image,
1069 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1070 {
1071 struct bpf_binary_header *ro_header;
1072 u32 size, hole, start;
1073
1074 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1075 alignment > BPF_IMAGE_ALIGNMENT);
1076
1077 /* add 16 bytes for a random section of illegal instructions */
1078 size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1079
1080 if (bpf_jit_charge_modmem(size))
1081 return NULL;
1082 ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1083 if (!ro_header) {
1084 bpf_jit_uncharge_modmem(size);
1085 return NULL;
1086 }
1087
1088 *rw_header = kvmalloc(size, GFP_KERNEL);
1089 if (!*rw_header) {
1090 bpf_arch_text_copy(&ro_header->size, &size, sizeof(size));
1091 bpf_prog_pack_free(ro_header);
1092 bpf_jit_uncharge_modmem(size);
1093 return NULL;
1094 }
1095
1096 /* Fill space with illegal/arch-dep instructions. */
1097 bpf_fill_ill_insns(*rw_header, size);
1098 (*rw_header)->size = size;
1099
1100 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1101 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1102 start = prandom_u32_max(hole) & ~(alignment - 1);
1103
1104 *image_ptr = &ro_header->image[start];
1105 *rw_image = &(*rw_header)->image[start];
1106
1107 return ro_header;
1108 }
1109
1110 /* Copy JITed text from rw_header to its final location, the ro_header. */
bpf_jit_binary_pack_finalize(struct bpf_prog * prog,struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)1111 int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
1112 struct bpf_binary_header *ro_header,
1113 struct bpf_binary_header *rw_header)
1114 {
1115 void *ptr;
1116
1117 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1118
1119 kvfree(rw_header);
1120
1121 if (IS_ERR(ptr)) {
1122 bpf_prog_pack_free(ro_header);
1123 return PTR_ERR(ptr);
1124 }
1125 return 0;
1126 }
1127
1128 /* bpf_jit_binary_pack_free is called in two different scenarios:
1129 * 1) when the program is freed after;
1130 * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1131 * For case 2), we need to free both the RO memory and the RW buffer.
1132 *
1133 * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1134 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1135 * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1136 * bpf_arch_text_copy (when jit fails).
1137 */
bpf_jit_binary_pack_free(struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)1138 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1139 struct bpf_binary_header *rw_header)
1140 {
1141 u32 size = ro_header->size;
1142
1143 bpf_prog_pack_free(ro_header);
1144 kvfree(rw_header);
1145 bpf_jit_uncharge_modmem(size);
1146 }
1147
1148 struct bpf_binary_header *
bpf_jit_binary_pack_hdr(const struct bpf_prog * fp)1149 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
1150 {
1151 unsigned long real_start = (unsigned long)fp->bpf_func;
1152 unsigned long addr;
1153
1154 addr = real_start & BPF_PROG_CHUNK_MASK;
1155 return (void *)addr;
1156 }
1157
1158 static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog * fp)1159 bpf_jit_binary_hdr(const struct bpf_prog *fp)
1160 {
1161 unsigned long real_start = (unsigned long)fp->bpf_func;
1162 unsigned long addr;
1163
1164 addr = real_start & PAGE_MASK;
1165 return (void *)addr;
1166 }
1167
1168 /* This symbol is only overridden by archs that have different
1169 * requirements than the usual eBPF JITs, f.e. when they only
1170 * implement cBPF JIT, do not set images read-only, etc.
1171 */
bpf_jit_free(struct bpf_prog * fp)1172 void __weak bpf_jit_free(struct bpf_prog *fp)
1173 {
1174 if (fp->jited) {
1175 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1176
1177 bpf_jit_binary_free(hdr);
1178 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1179 }
1180
1181 bpf_prog_unlock_free(fp);
1182 }
1183
bpf_jit_get_func_addr(const struct bpf_prog * prog,const struct bpf_insn * insn,bool extra_pass,u64 * func_addr,bool * func_addr_fixed)1184 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1185 const struct bpf_insn *insn, bool extra_pass,
1186 u64 *func_addr, bool *func_addr_fixed)
1187 {
1188 s16 off = insn->off;
1189 s32 imm = insn->imm;
1190 u8 *addr;
1191
1192 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1193 if (!*func_addr_fixed) {
1194 /* Place-holder address till the last pass has collected
1195 * all addresses for JITed subprograms in which case we
1196 * can pick them up from prog->aux.
1197 */
1198 if (!extra_pass)
1199 addr = NULL;
1200 else if (prog->aux->func &&
1201 off >= 0 && off < prog->aux->func_cnt)
1202 addr = (u8 *)prog->aux->func[off]->bpf_func;
1203 else
1204 return -EINVAL;
1205 } else {
1206 /* Address of a BPF helper call. Since part of the core
1207 * kernel, it's always at a fixed location. __bpf_call_base
1208 * and the helper with imm relative to it are both in core
1209 * kernel.
1210 */
1211 addr = (u8 *)__bpf_call_base + imm;
1212 }
1213
1214 *func_addr = (unsigned long)addr;
1215 return 0;
1216 }
1217
bpf_jit_blind_insn(const struct bpf_insn * from,const struct bpf_insn * aux,struct bpf_insn * to_buff,bool emit_zext)1218 static int bpf_jit_blind_insn(const struct bpf_insn *from,
1219 const struct bpf_insn *aux,
1220 struct bpf_insn *to_buff,
1221 bool emit_zext)
1222 {
1223 struct bpf_insn *to = to_buff;
1224 u32 imm_rnd = get_random_u32();
1225 s16 off;
1226
1227 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
1228 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1229
1230 /* Constraints on AX register:
1231 *
1232 * AX register is inaccessible from user space. It is mapped in
1233 * all JITs, and used here for constant blinding rewrites. It is
1234 * typically "stateless" meaning its contents are only valid within
1235 * the executed instruction, but not across several instructions.
1236 * There are a few exceptions however which are further detailed
1237 * below.
1238 *
1239 * Constant blinding is only used by JITs, not in the interpreter.
1240 * The interpreter uses AX in some occasions as a local temporary
1241 * register e.g. in DIV or MOD instructions.
1242 *
1243 * In restricted circumstances, the verifier can also use the AX
1244 * register for rewrites as long as they do not interfere with
1245 * the above cases!
1246 */
1247 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1248 goto out;
1249
1250 if (from->imm == 0 &&
1251 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
1252 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1253 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1254 goto out;
1255 }
1256
1257 switch (from->code) {
1258 case BPF_ALU | BPF_ADD | BPF_K:
1259 case BPF_ALU | BPF_SUB | BPF_K:
1260 case BPF_ALU | BPF_AND | BPF_K:
1261 case BPF_ALU | BPF_OR | BPF_K:
1262 case BPF_ALU | BPF_XOR | BPF_K:
1263 case BPF_ALU | BPF_MUL | BPF_K:
1264 case BPF_ALU | BPF_MOV | BPF_K:
1265 case BPF_ALU | BPF_DIV | BPF_K:
1266 case BPF_ALU | BPF_MOD | BPF_K:
1267 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1268 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1269 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1270 break;
1271
1272 case BPF_ALU64 | BPF_ADD | BPF_K:
1273 case BPF_ALU64 | BPF_SUB | BPF_K:
1274 case BPF_ALU64 | BPF_AND | BPF_K:
1275 case BPF_ALU64 | BPF_OR | BPF_K:
1276 case BPF_ALU64 | BPF_XOR | BPF_K:
1277 case BPF_ALU64 | BPF_MUL | BPF_K:
1278 case BPF_ALU64 | BPF_MOV | BPF_K:
1279 case BPF_ALU64 | BPF_DIV | BPF_K:
1280 case BPF_ALU64 | BPF_MOD | BPF_K:
1281 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1282 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1283 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1284 break;
1285
1286 case BPF_JMP | BPF_JEQ | BPF_K:
1287 case BPF_JMP | BPF_JNE | BPF_K:
1288 case BPF_JMP | BPF_JGT | BPF_K:
1289 case BPF_JMP | BPF_JLT | BPF_K:
1290 case BPF_JMP | BPF_JGE | BPF_K:
1291 case BPF_JMP | BPF_JLE | BPF_K:
1292 case BPF_JMP | BPF_JSGT | BPF_K:
1293 case BPF_JMP | BPF_JSLT | BPF_K:
1294 case BPF_JMP | BPF_JSGE | BPF_K:
1295 case BPF_JMP | BPF_JSLE | BPF_K:
1296 case BPF_JMP | BPF_JSET | BPF_K:
1297 /* Accommodate for extra offset in case of a backjump. */
1298 off = from->off;
1299 if (off < 0)
1300 off -= 2;
1301 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1302 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1303 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1304 break;
1305
1306 case BPF_JMP32 | BPF_JEQ | BPF_K:
1307 case BPF_JMP32 | BPF_JNE | BPF_K:
1308 case BPF_JMP32 | BPF_JGT | BPF_K:
1309 case BPF_JMP32 | BPF_JLT | BPF_K:
1310 case BPF_JMP32 | BPF_JGE | BPF_K:
1311 case BPF_JMP32 | BPF_JLE | BPF_K:
1312 case BPF_JMP32 | BPF_JSGT | BPF_K:
1313 case BPF_JMP32 | BPF_JSLT | BPF_K:
1314 case BPF_JMP32 | BPF_JSGE | BPF_K:
1315 case BPF_JMP32 | BPF_JSLE | BPF_K:
1316 case BPF_JMP32 | BPF_JSET | BPF_K:
1317 /* Accommodate for extra offset in case of a backjump. */
1318 off = from->off;
1319 if (off < 0)
1320 off -= 2;
1321 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1322 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1323 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1324 off);
1325 break;
1326
1327 case BPF_LD | BPF_IMM | BPF_DW:
1328 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1329 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1330 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1331 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1332 break;
1333 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1334 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1335 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1336 if (emit_zext)
1337 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1338 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1339 break;
1340
1341 case BPF_ST | BPF_MEM | BPF_DW:
1342 case BPF_ST | BPF_MEM | BPF_W:
1343 case BPF_ST | BPF_MEM | BPF_H:
1344 case BPF_ST | BPF_MEM | BPF_B:
1345 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1346 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1347 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1348 break;
1349 }
1350 out:
1351 return to - to_buff;
1352 }
1353
bpf_prog_clone_create(struct bpf_prog * fp_other,gfp_t gfp_extra_flags)1354 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1355 gfp_t gfp_extra_flags)
1356 {
1357 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1358 struct bpf_prog *fp;
1359
1360 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1361 if (fp != NULL) {
1362 /* aux->prog still points to the fp_other one, so
1363 * when promoting the clone to the real program,
1364 * this still needs to be adapted.
1365 */
1366 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1367 }
1368
1369 return fp;
1370 }
1371
bpf_prog_clone_free(struct bpf_prog * fp)1372 static void bpf_prog_clone_free(struct bpf_prog *fp)
1373 {
1374 /* aux was stolen by the other clone, so we cannot free
1375 * it from this path! It will be freed eventually by the
1376 * other program on release.
1377 *
1378 * At this point, we don't need a deferred release since
1379 * clone is guaranteed to not be locked.
1380 */
1381 fp->aux = NULL;
1382 fp->stats = NULL;
1383 fp->active = NULL;
1384 __bpf_prog_free(fp);
1385 }
1386
bpf_jit_prog_release_other(struct bpf_prog * fp,struct bpf_prog * fp_other)1387 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1388 {
1389 /* We have to repoint aux->prog to self, as we don't
1390 * know whether fp here is the clone or the original.
1391 */
1392 fp->aux->prog = fp;
1393 bpf_prog_clone_free(fp_other);
1394 }
1395
bpf_jit_blind_constants(struct bpf_prog * prog)1396 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1397 {
1398 struct bpf_insn insn_buff[16], aux[2];
1399 struct bpf_prog *clone, *tmp;
1400 int insn_delta, insn_cnt;
1401 struct bpf_insn *insn;
1402 int i, rewritten;
1403
1404 if (!prog->blinding_requested || prog->blinded)
1405 return prog;
1406
1407 clone = bpf_prog_clone_create(prog, GFP_USER);
1408 if (!clone)
1409 return ERR_PTR(-ENOMEM);
1410
1411 insn_cnt = clone->len;
1412 insn = clone->insnsi;
1413
1414 for (i = 0; i < insn_cnt; i++, insn++) {
1415 if (bpf_pseudo_func(insn)) {
1416 /* ld_imm64 with an address of bpf subprog is not
1417 * a user controlled constant. Don't randomize it,
1418 * since it will conflict with jit_subprogs() logic.
1419 */
1420 insn++;
1421 i++;
1422 continue;
1423 }
1424
1425 /* We temporarily need to hold the original ld64 insn
1426 * so that we can still access the first part in the
1427 * second blinding run.
1428 */
1429 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1430 insn[1].code == 0)
1431 memcpy(aux, insn, sizeof(aux));
1432
1433 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1434 clone->aux->verifier_zext);
1435 if (!rewritten)
1436 continue;
1437
1438 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1439 if (IS_ERR(tmp)) {
1440 /* Patching may have repointed aux->prog during
1441 * realloc from the original one, so we need to
1442 * fix it up here on error.
1443 */
1444 bpf_jit_prog_release_other(prog, clone);
1445 return tmp;
1446 }
1447
1448 clone = tmp;
1449 insn_delta = rewritten - 1;
1450
1451 /* Walk new program and skip insns we just inserted. */
1452 insn = clone->insnsi + i + insn_delta;
1453 insn_cnt += insn_delta;
1454 i += insn_delta;
1455 }
1456
1457 clone->blinded = 1;
1458 return clone;
1459 }
1460 #endif /* CONFIG_BPF_JIT */
1461
1462 /* Base function for offset calculation. Needs to go into .text section,
1463 * therefore keeping it non-static as well; will also be used by JITs
1464 * anyway later on, so do not let the compiler omit it. This also needs
1465 * to go into kallsyms for correlation from e.g. bpftool, so naming
1466 * must not change.
1467 */
__bpf_call_base(u64 r1,u64 r2,u64 r3,u64 r4,u64 r5)1468 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1469 {
1470 return 0;
1471 }
1472 EXPORT_SYMBOL_GPL(__bpf_call_base);
1473
1474 /* All UAPI available opcodes. */
1475 #define BPF_INSN_MAP(INSN_2, INSN_3) \
1476 /* 32 bit ALU operations. */ \
1477 /* Register based. */ \
1478 INSN_3(ALU, ADD, X), \
1479 INSN_3(ALU, SUB, X), \
1480 INSN_3(ALU, AND, X), \
1481 INSN_3(ALU, OR, X), \
1482 INSN_3(ALU, LSH, X), \
1483 INSN_3(ALU, RSH, X), \
1484 INSN_3(ALU, XOR, X), \
1485 INSN_3(ALU, MUL, X), \
1486 INSN_3(ALU, MOV, X), \
1487 INSN_3(ALU, ARSH, X), \
1488 INSN_3(ALU, DIV, X), \
1489 INSN_3(ALU, MOD, X), \
1490 INSN_2(ALU, NEG), \
1491 INSN_3(ALU, END, TO_BE), \
1492 INSN_3(ALU, END, TO_LE), \
1493 /* Immediate based. */ \
1494 INSN_3(ALU, ADD, K), \
1495 INSN_3(ALU, SUB, K), \
1496 INSN_3(ALU, AND, K), \
1497 INSN_3(ALU, OR, K), \
1498 INSN_3(ALU, LSH, K), \
1499 INSN_3(ALU, RSH, K), \
1500 INSN_3(ALU, XOR, K), \
1501 INSN_3(ALU, MUL, K), \
1502 INSN_3(ALU, MOV, K), \
1503 INSN_3(ALU, ARSH, K), \
1504 INSN_3(ALU, DIV, K), \
1505 INSN_3(ALU, MOD, K), \
1506 /* 64 bit ALU operations. */ \
1507 /* Register based. */ \
1508 INSN_3(ALU64, ADD, X), \
1509 INSN_3(ALU64, SUB, X), \
1510 INSN_3(ALU64, AND, X), \
1511 INSN_3(ALU64, OR, X), \
1512 INSN_3(ALU64, LSH, X), \
1513 INSN_3(ALU64, RSH, X), \
1514 INSN_3(ALU64, XOR, X), \
1515 INSN_3(ALU64, MUL, X), \
1516 INSN_3(ALU64, MOV, X), \
1517 INSN_3(ALU64, ARSH, X), \
1518 INSN_3(ALU64, DIV, X), \
1519 INSN_3(ALU64, MOD, X), \
1520 INSN_2(ALU64, NEG), \
1521 /* Immediate based. */ \
1522 INSN_3(ALU64, ADD, K), \
1523 INSN_3(ALU64, SUB, K), \
1524 INSN_3(ALU64, AND, K), \
1525 INSN_3(ALU64, OR, K), \
1526 INSN_3(ALU64, LSH, K), \
1527 INSN_3(ALU64, RSH, K), \
1528 INSN_3(ALU64, XOR, K), \
1529 INSN_3(ALU64, MUL, K), \
1530 INSN_3(ALU64, MOV, K), \
1531 INSN_3(ALU64, ARSH, K), \
1532 INSN_3(ALU64, DIV, K), \
1533 INSN_3(ALU64, MOD, K), \
1534 /* Call instruction. */ \
1535 INSN_2(JMP, CALL), \
1536 /* Exit instruction. */ \
1537 INSN_2(JMP, EXIT), \
1538 /* 32-bit Jump instructions. */ \
1539 /* Register based. */ \
1540 INSN_3(JMP32, JEQ, X), \
1541 INSN_3(JMP32, JNE, X), \
1542 INSN_3(JMP32, JGT, X), \
1543 INSN_3(JMP32, JLT, X), \
1544 INSN_3(JMP32, JGE, X), \
1545 INSN_3(JMP32, JLE, X), \
1546 INSN_3(JMP32, JSGT, X), \
1547 INSN_3(JMP32, JSLT, X), \
1548 INSN_3(JMP32, JSGE, X), \
1549 INSN_3(JMP32, JSLE, X), \
1550 INSN_3(JMP32, JSET, X), \
1551 /* Immediate based. */ \
1552 INSN_3(JMP32, JEQ, K), \
1553 INSN_3(JMP32, JNE, K), \
1554 INSN_3(JMP32, JGT, K), \
1555 INSN_3(JMP32, JLT, K), \
1556 INSN_3(JMP32, JGE, K), \
1557 INSN_3(JMP32, JLE, K), \
1558 INSN_3(JMP32, JSGT, K), \
1559 INSN_3(JMP32, JSLT, K), \
1560 INSN_3(JMP32, JSGE, K), \
1561 INSN_3(JMP32, JSLE, K), \
1562 INSN_3(JMP32, JSET, K), \
1563 /* Jump instructions. */ \
1564 /* Register based. */ \
1565 INSN_3(JMP, JEQ, X), \
1566 INSN_3(JMP, JNE, X), \
1567 INSN_3(JMP, JGT, X), \
1568 INSN_3(JMP, JLT, X), \
1569 INSN_3(JMP, JGE, X), \
1570 INSN_3(JMP, JLE, X), \
1571 INSN_3(JMP, JSGT, X), \
1572 INSN_3(JMP, JSLT, X), \
1573 INSN_3(JMP, JSGE, X), \
1574 INSN_3(JMP, JSLE, X), \
1575 INSN_3(JMP, JSET, X), \
1576 /* Immediate based. */ \
1577 INSN_3(JMP, JEQ, K), \
1578 INSN_3(JMP, JNE, K), \
1579 INSN_3(JMP, JGT, K), \
1580 INSN_3(JMP, JLT, K), \
1581 INSN_3(JMP, JGE, K), \
1582 INSN_3(JMP, JLE, K), \
1583 INSN_3(JMP, JSGT, K), \
1584 INSN_3(JMP, JSLT, K), \
1585 INSN_3(JMP, JSGE, K), \
1586 INSN_3(JMP, JSLE, K), \
1587 INSN_3(JMP, JSET, K), \
1588 INSN_2(JMP, JA), \
1589 /* Store instructions. */ \
1590 /* Register based. */ \
1591 INSN_3(STX, MEM, B), \
1592 INSN_3(STX, MEM, H), \
1593 INSN_3(STX, MEM, W), \
1594 INSN_3(STX, MEM, DW), \
1595 INSN_3(STX, ATOMIC, W), \
1596 INSN_3(STX, ATOMIC, DW), \
1597 /* Immediate based. */ \
1598 INSN_3(ST, MEM, B), \
1599 INSN_3(ST, MEM, H), \
1600 INSN_3(ST, MEM, W), \
1601 INSN_3(ST, MEM, DW), \
1602 /* Load instructions. */ \
1603 /* Register based. */ \
1604 INSN_3(LDX, MEM, B), \
1605 INSN_3(LDX, MEM, H), \
1606 INSN_3(LDX, MEM, W), \
1607 INSN_3(LDX, MEM, DW), \
1608 /* Immediate based. */ \
1609 INSN_3(LD, IMM, DW)
1610
bpf_opcode_in_insntable(u8 code)1611 bool bpf_opcode_in_insntable(u8 code)
1612 {
1613 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1614 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1615 static const bool public_insntable[256] = {
1616 [0 ... 255] = false,
1617 /* Now overwrite non-defaults ... */
1618 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1619 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1620 [BPF_LD | BPF_ABS | BPF_B] = true,
1621 [BPF_LD | BPF_ABS | BPF_H] = true,
1622 [BPF_LD | BPF_ABS | BPF_W] = true,
1623 [BPF_LD | BPF_IND | BPF_B] = true,
1624 [BPF_LD | BPF_IND | BPF_H] = true,
1625 [BPF_LD | BPF_IND | BPF_W] = true,
1626 };
1627 #undef BPF_INSN_3_TBL
1628 #undef BPF_INSN_2_TBL
1629 return public_insntable[code];
1630 }
1631
1632 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
bpf_probe_read_kernel(void * dst,u32 size,const void * unsafe_ptr)1633 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1634 {
1635 memset(dst, 0, size);
1636 return -EFAULT;
1637 }
1638
1639 /**
1640 * ___bpf_prog_run - run eBPF program on a given context
1641 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1642 * @insn: is the array of eBPF instructions
1643 *
1644 * Decode and execute eBPF instructions.
1645 *
1646 * Return: whatever value is in %BPF_R0 at program exit
1647 */
___bpf_prog_run(u64 * regs,const struct bpf_insn * insn)1648 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1649 {
1650 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1651 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1652 static const void * const jumptable[256] __annotate_jump_table = {
1653 [0 ... 255] = &&default_label,
1654 /* Now overwrite non-defaults ... */
1655 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1656 /* Non-UAPI available opcodes. */
1657 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1658 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1659 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
1660 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1661 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1662 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1663 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1664 };
1665 #undef BPF_INSN_3_LBL
1666 #undef BPF_INSN_2_LBL
1667 u32 tail_call_cnt = 0;
1668
1669 #define CONT ({ insn++; goto select_insn; })
1670 #define CONT_JMP ({ insn++; goto select_insn; })
1671
1672 select_insn:
1673 goto *jumptable[insn->code];
1674
1675 /* Explicitly mask the register-based shift amounts with 63 or 31
1676 * to avoid undefined behavior. Normally this won't affect the
1677 * generated code, for example, in case of native 64 bit archs such
1678 * as x86-64 or arm64, the compiler is optimizing the AND away for
1679 * the interpreter. In case of JITs, each of the JIT backends compiles
1680 * the BPF shift operations to machine instructions which produce
1681 * implementation-defined results in such a case; the resulting
1682 * contents of the register may be arbitrary, but program behaviour
1683 * as a whole remains defined. In other words, in case of JIT backends,
1684 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1685 */
1686 /* ALU (shifts) */
1687 #define SHT(OPCODE, OP) \
1688 ALU64_##OPCODE##_X: \
1689 DST = DST OP (SRC & 63); \
1690 CONT; \
1691 ALU_##OPCODE##_X: \
1692 DST = (u32) DST OP ((u32) SRC & 31); \
1693 CONT; \
1694 ALU64_##OPCODE##_K: \
1695 DST = DST OP IMM; \
1696 CONT; \
1697 ALU_##OPCODE##_K: \
1698 DST = (u32) DST OP (u32) IMM; \
1699 CONT;
1700 /* ALU (rest) */
1701 #define ALU(OPCODE, OP) \
1702 ALU64_##OPCODE##_X: \
1703 DST = DST OP SRC; \
1704 CONT; \
1705 ALU_##OPCODE##_X: \
1706 DST = (u32) DST OP (u32) SRC; \
1707 CONT; \
1708 ALU64_##OPCODE##_K: \
1709 DST = DST OP IMM; \
1710 CONT; \
1711 ALU_##OPCODE##_K: \
1712 DST = (u32) DST OP (u32) IMM; \
1713 CONT;
1714 ALU(ADD, +)
1715 ALU(SUB, -)
1716 ALU(AND, &)
1717 ALU(OR, |)
1718 ALU(XOR, ^)
1719 ALU(MUL, *)
1720 SHT(LSH, <<)
1721 SHT(RSH, >>)
1722 #undef SHT
1723 #undef ALU
1724 ALU_NEG:
1725 DST = (u32) -DST;
1726 CONT;
1727 ALU64_NEG:
1728 DST = -DST;
1729 CONT;
1730 ALU_MOV_X:
1731 DST = (u32) SRC;
1732 CONT;
1733 ALU_MOV_K:
1734 DST = (u32) IMM;
1735 CONT;
1736 ALU64_MOV_X:
1737 DST = SRC;
1738 CONT;
1739 ALU64_MOV_K:
1740 DST = IMM;
1741 CONT;
1742 LD_IMM_DW:
1743 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1744 insn++;
1745 CONT;
1746 ALU_ARSH_X:
1747 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1748 CONT;
1749 ALU_ARSH_K:
1750 DST = (u64) (u32) (((s32) DST) >> IMM);
1751 CONT;
1752 ALU64_ARSH_X:
1753 (*(s64 *) &DST) >>= (SRC & 63);
1754 CONT;
1755 ALU64_ARSH_K:
1756 (*(s64 *) &DST) >>= IMM;
1757 CONT;
1758 ALU64_MOD_X:
1759 div64_u64_rem(DST, SRC, &AX);
1760 DST = AX;
1761 CONT;
1762 ALU_MOD_X:
1763 AX = (u32) DST;
1764 DST = do_div(AX, (u32) SRC);
1765 CONT;
1766 ALU64_MOD_K:
1767 div64_u64_rem(DST, IMM, &AX);
1768 DST = AX;
1769 CONT;
1770 ALU_MOD_K:
1771 AX = (u32) DST;
1772 DST = do_div(AX, (u32) IMM);
1773 CONT;
1774 ALU64_DIV_X:
1775 DST = div64_u64(DST, SRC);
1776 CONT;
1777 ALU_DIV_X:
1778 AX = (u32) DST;
1779 do_div(AX, (u32) SRC);
1780 DST = (u32) AX;
1781 CONT;
1782 ALU64_DIV_K:
1783 DST = div64_u64(DST, IMM);
1784 CONT;
1785 ALU_DIV_K:
1786 AX = (u32) DST;
1787 do_div(AX, (u32) IMM);
1788 DST = (u32) AX;
1789 CONT;
1790 ALU_END_TO_BE:
1791 switch (IMM) {
1792 case 16:
1793 DST = (__force u16) cpu_to_be16(DST);
1794 break;
1795 case 32:
1796 DST = (__force u32) cpu_to_be32(DST);
1797 break;
1798 case 64:
1799 DST = (__force u64) cpu_to_be64(DST);
1800 break;
1801 }
1802 CONT;
1803 ALU_END_TO_LE:
1804 switch (IMM) {
1805 case 16:
1806 DST = (__force u16) cpu_to_le16(DST);
1807 break;
1808 case 32:
1809 DST = (__force u32) cpu_to_le32(DST);
1810 break;
1811 case 64:
1812 DST = (__force u64) cpu_to_le64(DST);
1813 break;
1814 }
1815 CONT;
1816
1817 /* CALL */
1818 JMP_CALL:
1819 /* Function call scratches BPF_R1-BPF_R5 registers,
1820 * preserves BPF_R6-BPF_R9, and stores return value
1821 * into BPF_R0.
1822 */
1823 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1824 BPF_R4, BPF_R5);
1825 CONT;
1826
1827 JMP_CALL_ARGS:
1828 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1829 BPF_R3, BPF_R4,
1830 BPF_R5,
1831 insn + insn->off + 1);
1832 CONT;
1833
1834 JMP_TAIL_CALL: {
1835 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1836 struct bpf_array *array = container_of(map, struct bpf_array, map);
1837 struct bpf_prog *prog;
1838 u32 index = BPF_R3;
1839
1840 if (unlikely(index >= array->map.max_entries))
1841 goto out;
1842
1843 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
1844 goto out;
1845
1846 tail_call_cnt++;
1847
1848 prog = READ_ONCE(array->ptrs[index]);
1849 if (!prog)
1850 goto out;
1851
1852 /* ARG1 at this point is guaranteed to point to CTX from
1853 * the verifier side due to the fact that the tail call is
1854 * handled like a helper, that is, bpf_tail_call_proto,
1855 * where arg1_type is ARG_PTR_TO_CTX.
1856 */
1857 insn = prog->insnsi;
1858 goto select_insn;
1859 out:
1860 CONT;
1861 }
1862 JMP_JA:
1863 insn += insn->off;
1864 CONT;
1865 JMP_EXIT:
1866 return BPF_R0;
1867 /* JMP */
1868 #define COND_JMP(SIGN, OPCODE, CMP_OP) \
1869 JMP_##OPCODE##_X: \
1870 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
1871 insn += insn->off; \
1872 CONT_JMP; \
1873 } \
1874 CONT; \
1875 JMP32_##OPCODE##_X: \
1876 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
1877 insn += insn->off; \
1878 CONT_JMP; \
1879 } \
1880 CONT; \
1881 JMP_##OPCODE##_K: \
1882 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
1883 insn += insn->off; \
1884 CONT_JMP; \
1885 } \
1886 CONT; \
1887 JMP32_##OPCODE##_K: \
1888 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
1889 insn += insn->off; \
1890 CONT_JMP; \
1891 } \
1892 CONT;
1893 COND_JMP(u, JEQ, ==)
1894 COND_JMP(u, JNE, !=)
1895 COND_JMP(u, JGT, >)
1896 COND_JMP(u, JLT, <)
1897 COND_JMP(u, JGE, >=)
1898 COND_JMP(u, JLE, <=)
1899 COND_JMP(u, JSET, &)
1900 COND_JMP(s, JSGT, >)
1901 COND_JMP(s, JSLT, <)
1902 COND_JMP(s, JSGE, >=)
1903 COND_JMP(s, JSLE, <=)
1904 #undef COND_JMP
1905 /* ST, STX and LDX*/
1906 ST_NOSPEC:
1907 /* Speculation barrier for mitigating Speculative Store Bypass.
1908 * In case of arm64, we rely on the firmware mitigation as
1909 * controlled via the ssbd kernel parameter. Whenever the
1910 * mitigation is enabled, it works for all of the kernel code
1911 * with no need to provide any additional instructions here.
1912 * In case of x86, we use 'lfence' insn for mitigation. We
1913 * reuse preexisting logic from Spectre v1 mitigation that
1914 * happens to produce the required code on x86 for v4 as well.
1915 */
1916 barrier_nospec();
1917 CONT;
1918 #define LDST(SIZEOP, SIZE) \
1919 STX_MEM_##SIZEOP: \
1920 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1921 CONT; \
1922 ST_MEM_##SIZEOP: \
1923 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1924 CONT; \
1925 LDX_MEM_##SIZEOP: \
1926 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1927 CONT; \
1928 LDX_PROBE_MEM_##SIZEOP: \
1929 bpf_probe_read_kernel(&DST, sizeof(SIZE), \
1930 (const void *)(long) (SRC + insn->off)); \
1931 DST = *((SIZE *)&DST); \
1932 CONT;
1933
1934 LDST(B, u8)
1935 LDST(H, u16)
1936 LDST(W, u32)
1937 LDST(DW, u64)
1938 #undef LDST
1939
1940 #define ATOMIC_ALU_OP(BOP, KOP) \
1941 case BOP: \
1942 if (BPF_SIZE(insn->code) == BPF_W) \
1943 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
1944 (DST + insn->off)); \
1945 else \
1946 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
1947 (DST + insn->off)); \
1948 break; \
1949 case BOP | BPF_FETCH: \
1950 if (BPF_SIZE(insn->code) == BPF_W) \
1951 SRC = (u32) atomic_fetch_##KOP( \
1952 (u32) SRC, \
1953 (atomic_t *)(unsigned long) (DST + insn->off)); \
1954 else \
1955 SRC = (u64) atomic64_fetch_##KOP( \
1956 (u64) SRC, \
1957 (atomic64_t *)(unsigned long) (DST + insn->off)); \
1958 break;
1959
1960 STX_ATOMIC_DW:
1961 STX_ATOMIC_W:
1962 switch (IMM) {
1963 ATOMIC_ALU_OP(BPF_ADD, add)
1964 ATOMIC_ALU_OP(BPF_AND, and)
1965 ATOMIC_ALU_OP(BPF_OR, or)
1966 ATOMIC_ALU_OP(BPF_XOR, xor)
1967 #undef ATOMIC_ALU_OP
1968
1969 case BPF_XCHG:
1970 if (BPF_SIZE(insn->code) == BPF_W)
1971 SRC = (u32) atomic_xchg(
1972 (atomic_t *)(unsigned long) (DST + insn->off),
1973 (u32) SRC);
1974 else
1975 SRC = (u64) atomic64_xchg(
1976 (atomic64_t *)(unsigned long) (DST + insn->off),
1977 (u64) SRC);
1978 break;
1979 case BPF_CMPXCHG:
1980 if (BPF_SIZE(insn->code) == BPF_W)
1981 BPF_R0 = (u32) atomic_cmpxchg(
1982 (atomic_t *)(unsigned long) (DST + insn->off),
1983 (u32) BPF_R0, (u32) SRC);
1984 else
1985 BPF_R0 = (u64) atomic64_cmpxchg(
1986 (atomic64_t *)(unsigned long) (DST + insn->off),
1987 (u64) BPF_R0, (u64) SRC);
1988 break;
1989
1990 default:
1991 goto default_label;
1992 }
1993 CONT;
1994
1995 default_label:
1996 /* If we ever reach this, we have a bug somewhere. Die hard here
1997 * instead of just returning 0; we could be somewhere in a subprog,
1998 * so execution could continue otherwise which we do /not/ want.
1999 *
2000 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2001 */
2002 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2003 insn->code, insn->imm);
2004 BUG_ON(1);
2005 return 0;
2006 }
2007
2008 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2009 #define DEFINE_BPF_PROG_RUN(stack_size) \
2010 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2011 { \
2012 u64 stack[stack_size / sizeof(u64)]; \
2013 u64 regs[MAX_BPF_EXT_REG] = {}; \
2014 \
2015 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2016 ARG1 = (u64) (unsigned long) ctx; \
2017 return ___bpf_prog_run(regs, insn); \
2018 }
2019
2020 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2021 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2022 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2023 const struct bpf_insn *insn) \
2024 { \
2025 u64 stack[stack_size / sizeof(u64)]; \
2026 u64 regs[MAX_BPF_EXT_REG]; \
2027 \
2028 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2029 BPF_R1 = r1; \
2030 BPF_R2 = r2; \
2031 BPF_R3 = r3; \
2032 BPF_R4 = r4; \
2033 BPF_R5 = r5; \
2034 return ___bpf_prog_run(regs, insn); \
2035 }
2036
2037 #define EVAL1(FN, X) FN(X)
2038 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2039 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2040 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2041 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2042 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2043
2044 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2045 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2046 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2047
2048 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2049 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2050 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2051
2052 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2053
2054 static unsigned int (*interpreters[])(const void *ctx,
2055 const struct bpf_insn *insn) = {
2056 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2057 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2058 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2059 };
2060 #undef PROG_NAME_LIST
2061 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2062 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2063 const struct bpf_insn *insn) = {
2064 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2065 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2066 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2067 };
2068 #undef PROG_NAME_LIST
2069
bpf_patch_call_args(struct bpf_insn * insn,u32 stack_depth)2070 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2071 {
2072 stack_depth = max_t(u32, stack_depth, 1);
2073 insn->off = (s16) insn->imm;
2074 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2075 __bpf_call_base_args;
2076 insn->code = BPF_JMP | BPF_CALL_ARGS;
2077 }
2078
2079 #else
__bpf_prog_ret0_warn(const void * ctx,const struct bpf_insn * insn)2080 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2081 const struct bpf_insn *insn)
2082 {
2083 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2084 * is not working properly, so warn about it!
2085 */
2086 WARN_ON_ONCE(1);
2087 return 0;
2088 }
2089 #endif
2090
bpf_prog_map_compatible(struct bpf_map * map,const struct bpf_prog * fp)2091 bool bpf_prog_map_compatible(struct bpf_map *map,
2092 const struct bpf_prog *fp)
2093 {
2094 enum bpf_prog_type prog_type = resolve_prog_type(fp);
2095 bool ret;
2096
2097 if (fp->kprobe_override)
2098 return false;
2099
2100 spin_lock(&map->owner.lock);
2101 if (!map->owner.type) {
2102 /* There's no owner yet where we could check for
2103 * compatibility.
2104 */
2105 map->owner.type = prog_type;
2106 map->owner.jited = fp->jited;
2107 map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
2108 ret = true;
2109 } else {
2110 ret = map->owner.type == prog_type &&
2111 map->owner.jited == fp->jited &&
2112 map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
2113 }
2114 spin_unlock(&map->owner.lock);
2115
2116 return ret;
2117 }
2118
bpf_check_tail_call(const struct bpf_prog * fp)2119 static int bpf_check_tail_call(const struct bpf_prog *fp)
2120 {
2121 struct bpf_prog_aux *aux = fp->aux;
2122 int i, ret = 0;
2123
2124 mutex_lock(&aux->used_maps_mutex);
2125 for (i = 0; i < aux->used_map_cnt; i++) {
2126 struct bpf_map *map = aux->used_maps[i];
2127
2128 if (!map_type_contains_progs(map))
2129 continue;
2130
2131 if (!bpf_prog_map_compatible(map, fp)) {
2132 ret = -EINVAL;
2133 goto out;
2134 }
2135 }
2136
2137 out:
2138 mutex_unlock(&aux->used_maps_mutex);
2139 return ret;
2140 }
2141
bpf_prog_select_func(struct bpf_prog * fp)2142 static void bpf_prog_select_func(struct bpf_prog *fp)
2143 {
2144 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2145 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2146
2147 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
2148 #else
2149 fp->bpf_func = __bpf_prog_ret0_warn;
2150 #endif
2151 }
2152
2153 /**
2154 * bpf_prog_select_runtime - select exec runtime for BPF program
2155 * @fp: bpf_prog populated with BPF program
2156 * @err: pointer to error variable
2157 *
2158 * Try to JIT eBPF program, if JIT is not available, use interpreter.
2159 * The BPF program will be executed via bpf_prog_run() function.
2160 *
2161 * Return: the &fp argument along with &err set to 0 for success or
2162 * a negative errno code on failure
2163 */
bpf_prog_select_runtime(struct bpf_prog * fp,int * err)2164 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2165 {
2166 /* In case of BPF to BPF calls, verifier did all the prep
2167 * work with regards to JITing, etc.
2168 */
2169 bool jit_needed = false;
2170
2171 if (fp->bpf_func)
2172 goto finalize;
2173
2174 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2175 bpf_prog_has_kfunc_call(fp))
2176 jit_needed = true;
2177
2178 bpf_prog_select_func(fp);
2179
2180 /* eBPF JITs can rewrite the program in case constant
2181 * blinding is active. However, in case of error during
2182 * blinding, bpf_int_jit_compile() must always return a
2183 * valid program, which in this case would simply not
2184 * be JITed, but falls back to the interpreter.
2185 */
2186 if (!bpf_prog_is_dev_bound(fp->aux)) {
2187 *err = bpf_prog_alloc_jited_linfo(fp);
2188 if (*err)
2189 return fp;
2190
2191 fp = bpf_int_jit_compile(fp);
2192 bpf_prog_jit_attempt_done(fp);
2193 if (!fp->jited && jit_needed) {
2194 *err = -ENOTSUPP;
2195 return fp;
2196 }
2197 } else {
2198 *err = bpf_prog_offload_compile(fp);
2199 if (*err)
2200 return fp;
2201 }
2202
2203 finalize:
2204 bpf_prog_lock_ro(fp);
2205
2206 /* The tail call compatibility check can only be done at
2207 * this late stage as we need to determine, if we deal
2208 * with JITed or non JITed program concatenations and not
2209 * all eBPF JITs might immediately support all features.
2210 */
2211 *err = bpf_check_tail_call(fp);
2212
2213 return fp;
2214 }
2215 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2216
__bpf_prog_ret1(const void * ctx,const struct bpf_insn * insn)2217 static unsigned int __bpf_prog_ret1(const void *ctx,
2218 const struct bpf_insn *insn)
2219 {
2220 return 1;
2221 }
2222
2223 static struct bpf_prog_dummy {
2224 struct bpf_prog prog;
2225 } dummy_bpf_prog = {
2226 .prog = {
2227 .bpf_func = __bpf_prog_ret1,
2228 },
2229 };
2230
2231 struct bpf_empty_prog_array bpf_empty_prog_array = {
2232 .null_prog = NULL,
2233 };
2234 EXPORT_SYMBOL(bpf_empty_prog_array);
2235
bpf_prog_array_alloc(u32 prog_cnt,gfp_t flags)2236 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2237 {
2238 if (prog_cnt)
2239 return kzalloc(sizeof(struct bpf_prog_array) +
2240 sizeof(struct bpf_prog_array_item) *
2241 (prog_cnt + 1),
2242 flags);
2243
2244 return &bpf_empty_prog_array.hdr;
2245 }
2246
bpf_prog_array_free(struct bpf_prog_array * progs)2247 void bpf_prog_array_free(struct bpf_prog_array *progs)
2248 {
2249 if (!progs || progs == &bpf_empty_prog_array.hdr)
2250 return;
2251 kfree_rcu(progs, rcu);
2252 }
2253
__bpf_prog_array_free_sleepable_cb(struct rcu_head * rcu)2254 static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
2255 {
2256 struct bpf_prog_array *progs;
2257
2258 progs = container_of(rcu, struct bpf_prog_array, rcu);
2259 kfree_rcu(progs, rcu);
2260 }
2261
bpf_prog_array_free_sleepable(struct bpf_prog_array * progs)2262 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
2263 {
2264 if (!progs || progs == &bpf_empty_prog_array.hdr)
2265 return;
2266 call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
2267 }
2268
bpf_prog_array_length(struct bpf_prog_array * array)2269 int bpf_prog_array_length(struct bpf_prog_array *array)
2270 {
2271 struct bpf_prog_array_item *item;
2272 u32 cnt = 0;
2273
2274 for (item = array->items; item->prog; item++)
2275 if (item->prog != &dummy_bpf_prog.prog)
2276 cnt++;
2277 return cnt;
2278 }
2279
bpf_prog_array_is_empty(struct bpf_prog_array * array)2280 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2281 {
2282 struct bpf_prog_array_item *item;
2283
2284 for (item = array->items; item->prog; item++)
2285 if (item->prog != &dummy_bpf_prog.prog)
2286 return false;
2287 return true;
2288 }
2289
bpf_prog_array_copy_core(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt)2290 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2291 u32 *prog_ids,
2292 u32 request_cnt)
2293 {
2294 struct bpf_prog_array_item *item;
2295 int i = 0;
2296
2297 for (item = array->items; item->prog; item++) {
2298 if (item->prog == &dummy_bpf_prog.prog)
2299 continue;
2300 prog_ids[i] = item->prog->aux->id;
2301 if (++i == request_cnt) {
2302 item++;
2303 break;
2304 }
2305 }
2306
2307 return !!(item->prog);
2308 }
2309
bpf_prog_array_copy_to_user(struct bpf_prog_array * array,__u32 __user * prog_ids,u32 cnt)2310 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2311 __u32 __user *prog_ids, u32 cnt)
2312 {
2313 unsigned long err = 0;
2314 bool nospc;
2315 u32 *ids;
2316
2317 /* users of this function are doing:
2318 * cnt = bpf_prog_array_length();
2319 * if (cnt > 0)
2320 * bpf_prog_array_copy_to_user(..., cnt);
2321 * so below kcalloc doesn't need extra cnt > 0 check.
2322 */
2323 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2324 if (!ids)
2325 return -ENOMEM;
2326 nospc = bpf_prog_array_copy_core(array, ids, cnt);
2327 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2328 kfree(ids);
2329 if (err)
2330 return -EFAULT;
2331 if (nospc)
2332 return -ENOSPC;
2333 return 0;
2334 }
2335
bpf_prog_array_delete_safe(struct bpf_prog_array * array,struct bpf_prog * old_prog)2336 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2337 struct bpf_prog *old_prog)
2338 {
2339 struct bpf_prog_array_item *item;
2340
2341 for (item = array->items; item->prog; item++)
2342 if (item->prog == old_prog) {
2343 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2344 break;
2345 }
2346 }
2347
2348 /**
2349 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2350 * index into the program array with
2351 * a dummy no-op program.
2352 * @array: a bpf_prog_array
2353 * @index: the index of the program to replace
2354 *
2355 * Skips over dummy programs, by not counting them, when calculating
2356 * the position of the program to replace.
2357 *
2358 * Return:
2359 * * 0 - Success
2360 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2361 * * -ENOENT - Index out of range
2362 */
bpf_prog_array_delete_safe_at(struct bpf_prog_array * array,int index)2363 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2364 {
2365 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2366 }
2367
2368 /**
2369 * bpf_prog_array_update_at() - Updates the program at the given index
2370 * into the program array.
2371 * @array: a bpf_prog_array
2372 * @index: the index of the program to update
2373 * @prog: the program to insert into the array
2374 *
2375 * Skips over dummy programs, by not counting them, when calculating
2376 * the position of the program to update.
2377 *
2378 * Return:
2379 * * 0 - Success
2380 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2381 * * -ENOENT - Index out of range
2382 */
bpf_prog_array_update_at(struct bpf_prog_array * array,int index,struct bpf_prog * prog)2383 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2384 struct bpf_prog *prog)
2385 {
2386 struct bpf_prog_array_item *item;
2387
2388 if (unlikely(index < 0))
2389 return -EINVAL;
2390
2391 for (item = array->items; item->prog; item++) {
2392 if (item->prog == &dummy_bpf_prog.prog)
2393 continue;
2394 if (!index) {
2395 WRITE_ONCE(item->prog, prog);
2396 return 0;
2397 }
2398 index--;
2399 }
2400 return -ENOENT;
2401 }
2402
bpf_prog_array_copy(struct bpf_prog_array * old_array,struct bpf_prog * exclude_prog,struct bpf_prog * include_prog,u64 bpf_cookie,struct bpf_prog_array ** new_array)2403 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2404 struct bpf_prog *exclude_prog,
2405 struct bpf_prog *include_prog,
2406 u64 bpf_cookie,
2407 struct bpf_prog_array **new_array)
2408 {
2409 int new_prog_cnt, carry_prog_cnt = 0;
2410 struct bpf_prog_array_item *existing, *new;
2411 struct bpf_prog_array *array;
2412 bool found_exclude = false;
2413
2414 /* Figure out how many existing progs we need to carry over to
2415 * the new array.
2416 */
2417 if (old_array) {
2418 existing = old_array->items;
2419 for (; existing->prog; existing++) {
2420 if (existing->prog == exclude_prog) {
2421 found_exclude = true;
2422 continue;
2423 }
2424 if (existing->prog != &dummy_bpf_prog.prog)
2425 carry_prog_cnt++;
2426 if (existing->prog == include_prog)
2427 return -EEXIST;
2428 }
2429 }
2430
2431 if (exclude_prog && !found_exclude)
2432 return -ENOENT;
2433
2434 /* How many progs (not NULL) will be in the new array? */
2435 new_prog_cnt = carry_prog_cnt;
2436 if (include_prog)
2437 new_prog_cnt += 1;
2438
2439 /* Do we have any prog (not NULL) in the new array? */
2440 if (!new_prog_cnt) {
2441 *new_array = NULL;
2442 return 0;
2443 }
2444
2445 /* +1 as the end of prog_array is marked with NULL */
2446 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2447 if (!array)
2448 return -ENOMEM;
2449 new = array->items;
2450
2451 /* Fill in the new prog array */
2452 if (carry_prog_cnt) {
2453 existing = old_array->items;
2454 for (; existing->prog; existing++) {
2455 if (existing->prog == exclude_prog ||
2456 existing->prog == &dummy_bpf_prog.prog)
2457 continue;
2458
2459 new->prog = existing->prog;
2460 new->bpf_cookie = existing->bpf_cookie;
2461 new++;
2462 }
2463 }
2464 if (include_prog) {
2465 new->prog = include_prog;
2466 new->bpf_cookie = bpf_cookie;
2467 new++;
2468 }
2469 new->prog = NULL;
2470 *new_array = array;
2471 return 0;
2472 }
2473
bpf_prog_array_copy_info(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt,u32 * prog_cnt)2474 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2475 u32 *prog_ids, u32 request_cnt,
2476 u32 *prog_cnt)
2477 {
2478 u32 cnt = 0;
2479
2480 if (array)
2481 cnt = bpf_prog_array_length(array);
2482
2483 *prog_cnt = cnt;
2484
2485 /* return early if user requested only program count or nothing to copy */
2486 if (!request_cnt || !cnt)
2487 return 0;
2488
2489 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2490 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2491 : 0;
2492 }
2493
__bpf_free_used_maps(struct bpf_prog_aux * aux,struct bpf_map ** used_maps,u32 len)2494 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2495 struct bpf_map **used_maps, u32 len)
2496 {
2497 struct bpf_map *map;
2498 u32 i;
2499
2500 for (i = 0; i < len; i++) {
2501 map = used_maps[i];
2502 if (map->ops->map_poke_untrack)
2503 map->ops->map_poke_untrack(map, aux);
2504 bpf_map_put(map);
2505 }
2506 }
2507
bpf_free_used_maps(struct bpf_prog_aux * aux)2508 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2509 {
2510 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2511 kfree(aux->used_maps);
2512 }
2513
__bpf_free_used_btfs(struct bpf_prog_aux * aux,struct btf_mod_pair * used_btfs,u32 len)2514 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2515 struct btf_mod_pair *used_btfs, u32 len)
2516 {
2517 #ifdef CONFIG_BPF_SYSCALL
2518 struct btf_mod_pair *btf_mod;
2519 u32 i;
2520
2521 for (i = 0; i < len; i++) {
2522 btf_mod = &used_btfs[i];
2523 if (btf_mod->module)
2524 module_put(btf_mod->module);
2525 btf_put(btf_mod->btf);
2526 }
2527 #endif
2528 }
2529
bpf_free_used_btfs(struct bpf_prog_aux * aux)2530 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2531 {
2532 __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2533 kfree(aux->used_btfs);
2534 }
2535
bpf_prog_free_deferred(struct work_struct * work)2536 static void bpf_prog_free_deferred(struct work_struct *work)
2537 {
2538 struct bpf_prog_aux *aux;
2539 int i;
2540
2541 aux = container_of(work, struct bpf_prog_aux, work);
2542 #ifdef CONFIG_BPF_SYSCALL
2543 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2544 #endif
2545 #ifdef CONFIG_CGROUP_BPF
2546 if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2547 bpf_cgroup_atype_put(aux->cgroup_atype);
2548 #endif
2549 bpf_free_used_maps(aux);
2550 bpf_free_used_btfs(aux);
2551 if (bpf_prog_is_dev_bound(aux))
2552 bpf_prog_offload_destroy(aux->prog);
2553 #ifdef CONFIG_PERF_EVENTS
2554 if (aux->prog->has_callchain_buf)
2555 put_callchain_buffers();
2556 #endif
2557 if (aux->dst_trampoline)
2558 bpf_trampoline_put(aux->dst_trampoline);
2559 for (i = 0; i < aux->func_cnt; i++) {
2560 /* We can just unlink the subprog poke descriptor table as
2561 * it was originally linked to the main program and is also
2562 * released along with it.
2563 */
2564 aux->func[i]->aux->poke_tab = NULL;
2565 bpf_jit_free(aux->func[i]);
2566 }
2567 if (aux->func_cnt) {
2568 kfree(aux->func);
2569 bpf_prog_unlock_free(aux->prog);
2570 } else {
2571 bpf_jit_free(aux->prog);
2572 }
2573 }
2574
bpf_prog_free(struct bpf_prog * fp)2575 void bpf_prog_free(struct bpf_prog *fp)
2576 {
2577 struct bpf_prog_aux *aux = fp->aux;
2578
2579 if (aux->dst_prog)
2580 bpf_prog_put(aux->dst_prog);
2581 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2582 schedule_work(&aux->work);
2583 }
2584 EXPORT_SYMBOL_GPL(bpf_prog_free);
2585
2586 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2587 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2588
bpf_user_rnd_init_once(void)2589 void bpf_user_rnd_init_once(void)
2590 {
2591 prandom_init_once(&bpf_user_rnd_state);
2592 }
2593
BPF_CALL_0(bpf_user_rnd_u32)2594 BPF_CALL_0(bpf_user_rnd_u32)
2595 {
2596 /* Should someone ever have the rather unwise idea to use some
2597 * of the registers passed into this function, then note that
2598 * this function is called from native eBPF and classic-to-eBPF
2599 * transformations. Register assignments from both sides are
2600 * different, f.e. classic always sets fn(ctx, A, X) here.
2601 */
2602 struct rnd_state *state;
2603 u32 res;
2604
2605 state = &get_cpu_var(bpf_user_rnd_state);
2606 res = prandom_u32_state(state);
2607 put_cpu_var(bpf_user_rnd_state);
2608
2609 return res;
2610 }
2611
BPF_CALL_0(bpf_get_raw_cpu_id)2612 BPF_CALL_0(bpf_get_raw_cpu_id)
2613 {
2614 return raw_smp_processor_id();
2615 }
2616
2617 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2618 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2619 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2620 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2621 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2622 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2623 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2624 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
2625 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2626 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2627 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2628
2629 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2630 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2631 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2632 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2633 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2634 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2635 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;
2636
2637 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2638 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2639 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2640 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2641 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2642 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2643 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2644 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2645 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2646 const struct bpf_func_proto bpf_set_retval_proto __weak;
2647 const struct bpf_func_proto bpf_get_retval_proto __weak;
2648
bpf_get_trace_printk_proto(void)2649 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2650 {
2651 return NULL;
2652 }
2653
bpf_get_trace_vprintk_proto(void)2654 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
2655 {
2656 return NULL;
2657 }
2658
2659 u64 __weak
bpf_event_output(struct bpf_map * map,u64 flags,void * meta,u64 meta_size,void * ctx,u64 ctx_size,bpf_ctx_copy_t ctx_copy)2660 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2661 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2662 {
2663 return -ENOTSUPP;
2664 }
2665 EXPORT_SYMBOL_GPL(bpf_event_output);
2666
2667 /* Always built-in helper functions. */
2668 const struct bpf_func_proto bpf_tail_call_proto = {
2669 .func = NULL,
2670 .gpl_only = false,
2671 .ret_type = RET_VOID,
2672 .arg1_type = ARG_PTR_TO_CTX,
2673 .arg2_type = ARG_CONST_MAP_PTR,
2674 .arg3_type = ARG_ANYTHING,
2675 };
2676
2677 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2678 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2679 * eBPF and implicitly also cBPF can get JITed!
2680 */
bpf_int_jit_compile(struct bpf_prog * prog)2681 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2682 {
2683 return prog;
2684 }
2685
2686 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2687 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2688 */
bpf_jit_compile(struct bpf_prog * prog)2689 void __weak bpf_jit_compile(struct bpf_prog *prog)
2690 {
2691 }
2692
bpf_helper_changes_pkt_data(void * func)2693 bool __weak bpf_helper_changes_pkt_data(void *func)
2694 {
2695 return false;
2696 }
2697
2698 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2699 * analysis code and wants explicit zero extension inserted by verifier.
2700 * Otherwise, return FALSE.
2701 *
2702 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2703 * you don't override this. JITs that don't want these extra insns can detect
2704 * them using insn_is_zext.
2705 */
bpf_jit_needs_zext(void)2706 bool __weak bpf_jit_needs_zext(void)
2707 {
2708 return false;
2709 }
2710
2711 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)2712 bool __weak bpf_jit_supports_subprog_tailcalls(void)
2713 {
2714 return false;
2715 }
2716
bpf_jit_supports_kfunc_call(void)2717 bool __weak bpf_jit_supports_kfunc_call(void)
2718 {
2719 return false;
2720 }
2721
2722 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2723 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2724 */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)2725 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2726 int len)
2727 {
2728 return -EFAULT;
2729 }
2730
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * addr1,void * addr2)2731 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2732 void *addr1, void *addr2)
2733 {
2734 return -ENOTSUPP;
2735 }
2736
bpf_arch_text_copy(void * dst,void * src,size_t len)2737 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
2738 {
2739 return ERR_PTR(-ENOTSUPP);
2740 }
2741
bpf_arch_text_invalidate(void * dst,size_t len)2742 int __weak bpf_arch_text_invalidate(void *dst, size_t len)
2743 {
2744 return -ENOTSUPP;
2745 }
2746
2747 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2748 EXPORT_SYMBOL(bpf_stats_enabled_key);
2749
2750 /* All definitions of tracepoints related to BPF. */
2751 #define CREATE_TRACE_POINTS
2752 #include <linux/bpf_trace.h>
2753
2754 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2755 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
2756