1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * BPF JIT compiler for ARM64
4 *
5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6 */
7
8 #define pr_fmt(fmt) "bpf_jit: " fmt
9
10 #include <linux/bitfield.h>
11 #include <linux/bpf.h>
12 #include <linux/filter.h>
13 #include <linux/printk.h>
14 #include <linux/slab.h>
15
16 #include <asm/byteorder.h>
17 #include <asm/cacheflush.h>
18 #include <asm/debug-monitors.h>
19 #include <asm/set_memory.h>
20 #include <trace/hooks/memory.h>
21
22 #include "bpf_jit.h"
23
24 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
25 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
26 #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
27 #define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
28
29 /* Map BPF registers to A64 registers */
30 static const int bpf2a64[] = {
31 /* return value from in-kernel function, and exit value from eBPF */
32 [BPF_REG_0] = A64_R(7),
33 /* arguments from eBPF program to in-kernel function */
34 [BPF_REG_1] = A64_R(0),
35 [BPF_REG_2] = A64_R(1),
36 [BPF_REG_3] = A64_R(2),
37 [BPF_REG_4] = A64_R(3),
38 [BPF_REG_5] = A64_R(4),
39 /* callee saved registers that in-kernel function will preserve */
40 [BPF_REG_6] = A64_R(19),
41 [BPF_REG_7] = A64_R(20),
42 [BPF_REG_8] = A64_R(21),
43 [BPF_REG_9] = A64_R(22),
44 /* read-only frame pointer to access stack */
45 [BPF_REG_FP] = A64_R(25),
46 /* temporary registers for internal BPF JIT */
47 [TMP_REG_1] = A64_R(10),
48 [TMP_REG_2] = A64_R(11),
49 [TMP_REG_3] = A64_R(12),
50 /* tail_call_cnt */
51 [TCALL_CNT] = A64_R(26),
52 /* temporary register for blinding constants */
53 [BPF_REG_AX] = A64_R(9),
54 };
55
56 struct jit_ctx {
57 const struct bpf_prog *prog;
58 int idx;
59 int epilogue_offset;
60 int *offset;
61 int exentry_idx;
62 __le32 *image;
63 u32 stack_size;
64 };
65
emit(const u32 insn,struct jit_ctx * ctx)66 static inline void emit(const u32 insn, struct jit_ctx *ctx)
67 {
68 if (ctx->image != NULL)
69 ctx->image[ctx->idx] = cpu_to_le32(insn);
70
71 ctx->idx++;
72 }
73
emit_a64_mov_i(const int is64,const int reg,const s32 val,struct jit_ctx * ctx)74 static inline void emit_a64_mov_i(const int is64, const int reg,
75 const s32 val, struct jit_ctx *ctx)
76 {
77 u16 hi = val >> 16;
78 u16 lo = val & 0xffff;
79
80 if (hi & 0x8000) {
81 if (hi == 0xffff) {
82 emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
83 } else {
84 emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
85 if (lo != 0xffff)
86 emit(A64_MOVK(is64, reg, lo, 0), ctx);
87 }
88 } else {
89 emit(A64_MOVZ(is64, reg, lo, 0), ctx);
90 if (hi)
91 emit(A64_MOVK(is64, reg, hi, 16), ctx);
92 }
93 }
94
i64_i16_blocks(const u64 val,bool inverse)95 static int i64_i16_blocks(const u64 val, bool inverse)
96 {
97 return (((val >> 0) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
98 (((val >> 16) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
99 (((val >> 32) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
100 (((val >> 48) & 0xffff) != (inverse ? 0xffff : 0x0000));
101 }
102
emit_a64_mov_i64(const int reg,const u64 val,struct jit_ctx * ctx)103 static inline void emit_a64_mov_i64(const int reg, const u64 val,
104 struct jit_ctx *ctx)
105 {
106 u64 nrm_tmp = val, rev_tmp = ~val;
107 bool inverse;
108 int shift;
109
110 if (!(nrm_tmp >> 32))
111 return emit_a64_mov_i(0, reg, (u32)val, ctx);
112
113 inverse = i64_i16_blocks(nrm_tmp, true) < i64_i16_blocks(nrm_tmp, false);
114 shift = max(round_down((inverse ? (fls64(rev_tmp) - 1) :
115 (fls64(nrm_tmp) - 1)), 16), 0);
116 if (inverse)
117 emit(A64_MOVN(1, reg, (rev_tmp >> shift) & 0xffff, shift), ctx);
118 else
119 emit(A64_MOVZ(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
120 shift -= 16;
121 while (shift >= 0) {
122 if (((nrm_tmp >> shift) & 0xffff) != (inverse ? 0xffff : 0x0000))
123 emit(A64_MOVK(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
124 shift -= 16;
125 }
126 }
127
128 /*
129 * Kernel addresses in the vmalloc space use at most 48 bits, and the
130 * remaining bits are guaranteed to be 0x1. So we can compose the address
131 * with a fixed length movn/movk/movk sequence.
132 */
emit_addr_mov_i64(const int reg,const u64 val,struct jit_ctx * ctx)133 static inline void emit_addr_mov_i64(const int reg, const u64 val,
134 struct jit_ctx *ctx)
135 {
136 u64 tmp = val;
137 int shift = 0;
138
139 emit(A64_MOVN(1, reg, ~tmp & 0xffff, shift), ctx);
140 while (shift < 32) {
141 tmp >>= 16;
142 shift += 16;
143 emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
144 }
145 }
146
bpf2a64_offset(int bpf_insn,int off,const struct jit_ctx * ctx)147 static inline int bpf2a64_offset(int bpf_insn, int off,
148 const struct jit_ctx *ctx)
149 {
150 /* BPF JMP offset is relative to the next instruction */
151 bpf_insn++;
152 /*
153 * Whereas arm64 branch instructions encode the offset
154 * from the branch itself, so we must subtract 1 from the
155 * instruction offset.
156 */
157 return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
158 }
159
jit_fill_hole(void * area,unsigned int size)160 static void jit_fill_hole(void *area, unsigned int size)
161 {
162 __le32 *ptr;
163 /* We are guaranteed to have aligned memory. */
164 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
165 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
166 }
167
epilogue_offset(const struct jit_ctx * ctx)168 static inline int epilogue_offset(const struct jit_ctx *ctx)
169 {
170 int to = ctx->epilogue_offset;
171 int from = ctx->idx;
172
173 return to - from;
174 }
175
is_addsub_imm(u32 imm)176 static bool is_addsub_imm(u32 imm)
177 {
178 /* Either imm12 or shifted imm12. */
179 return !(imm & ~0xfff) || !(imm & ~0xfff000);
180 }
181
182 /* Stack must be multiples of 16B */
183 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
184
185 /* Tail call offset to jump into */
186 #if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)
187 #define PROLOGUE_OFFSET 8
188 #else
189 #define PROLOGUE_OFFSET 7
190 #endif
191
build_prologue(struct jit_ctx * ctx,bool ebpf_from_cbpf)192 static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
193 {
194 const struct bpf_prog *prog = ctx->prog;
195 const u8 r6 = bpf2a64[BPF_REG_6];
196 const u8 r7 = bpf2a64[BPF_REG_7];
197 const u8 r8 = bpf2a64[BPF_REG_8];
198 const u8 r9 = bpf2a64[BPF_REG_9];
199 const u8 fp = bpf2a64[BPF_REG_FP];
200 const u8 tcc = bpf2a64[TCALL_CNT];
201 const int idx0 = ctx->idx;
202 int cur_offset;
203
204 /*
205 * BPF prog stack layout
206 *
207 * high
208 * original A64_SP => 0:+-----+ BPF prologue
209 * |FP/LR|
210 * current A64_FP => -16:+-----+
211 * | ... | callee saved registers
212 * BPF fp register => -64:+-----+ <= (BPF_FP)
213 * | |
214 * | ... | BPF prog stack
215 * | |
216 * +-----+ <= (BPF_FP - prog->aux->stack_depth)
217 * |RSVD | padding
218 * current A64_SP => +-----+ <= (BPF_FP - ctx->stack_size)
219 * | |
220 * | ... | Function call stack
221 * | |
222 * +-----+
223 * low
224 *
225 */
226
227 /* BTI landing pad */
228 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
229 emit(A64_BTI_C, ctx);
230
231 /* Save FP and LR registers to stay align with ARM64 AAPCS */
232 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
233 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
234
235 /* Save callee-saved registers */
236 emit(A64_PUSH(r6, r7, A64_SP), ctx);
237 emit(A64_PUSH(r8, r9, A64_SP), ctx);
238 emit(A64_PUSH(fp, tcc, A64_SP), ctx);
239
240 /* Set up BPF prog stack base register */
241 emit(A64_MOV(1, fp, A64_SP), ctx);
242
243 if (!ebpf_from_cbpf) {
244 /* Initialize tail_call_cnt */
245 emit(A64_MOVZ(1, tcc, 0, 0), ctx);
246
247 cur_offset = ctx->idx - idx0;
248 if (cur_offset != PROLOGUE_OFFSET) {
249 pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
250 cur_offset, PROLOGUE_OFFSET);
251 return -1;
252 }
253
254 /* BTI landing pad for the tail call, done with a BR */
255 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
256 emit(A64_BTI_J, ctx);
257 }
258
259 ctx->stack_size = STACK_ALIGN(prog->aux->stack_depth);
260
261 /* Set up function call stack */
262 emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
263 return 0;
264 }
265
266 static int out_offset = -1; /* initialized on the first pass of build_body() */
emit_bpf_tail_call(struct jit_ctx * ctx)267 static int emit_bpf_tail_call(struct jit_ctx *ctx)
268 {
269 /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
270 const u8 r2 = bpf2a64[BPF_REG_2];
271 const u8 r3 = bpf2a64[BPF_REG_3];
272
273 const u8 tmp = bpf2a64[TMP_REG_1];
274 const u8 prg = bpf2a64[TMP_REG_2];
275 const u8 tcc = bpf2a64[TCALL_CNT];
276 const int idx0 = ctx->idx;
277 #define cur_offset (ctx->idx - idx0)
278 #define jmp_offset (out_offset - (cur_offset))
279 size_t off;
280
281 /* if (index >= array->map.max_entries)
282 * goto out;
283 */
284 off = offsetof(struct bpf_array, map.max_entries);
285 emit_a64_mov_i64(tmp, off, ctx);
286 emit(A64_LDR32(tmp, r2, tmp), ctx);
287 emit(A64_MOV(0, r3, r3), ctx);
288 emit(A64_CMP(0, r3, tmp), ctx);
289 emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
290
291 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
292 * goto out;
293 * tail_call_cnt++;
294 */
295 emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
296 emit(A64_CMP(1, tcc, tmp), ctx);
297 emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
298 emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
299
300 /* prog = array->ptrs[index];
301 * if (prog == NULL)
302 * goto out;
303 */
304 off = offsetof(struct bpf_array, ptrs);
305 emit_a64_mov_i64(tmp, off, ctx);
306 emit(A64_ADD(1, tmp, r2, tmp), ctx);
307 emit(A64_LSL(1, prg, r3, 3), ctx);
308 emit(A64_LDR64(prg, tmp, prg), ctx);
309 emit(A64_CBZ(1, prg, jmp_offset), ctx);
310
311 /* goto *(prog->bpf_func + prologue_offset); */
312 off = offsetof(struct bpf_prog, bpf_func);
313 emit_a64_mov_i64(tmp, off, ctx);
314 emit(A64_LDR64(tmp, prg, tmp), ctx);
315 emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
316 emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
317 emit(A64_BR(tmp), ctx);
318
319 /* out: */
320 if (out_offset == -1)
321 out_offset = cur_offset;
322 if (cur_offset != out_offset) {
323 pr_err_once("tail_call out_offset = %d, expected %d!\n",
324 cur_offset, out_offset);
325 return -1;
326 }
327 return 0;
328 #undef cur_offset
329 #undef jmp_offset
330 }
331
build_epilogue(struct jit_ctx * ctx)332 static void build_epilogue(struct jit_ctx *ctx)
333 {
334 const u8 r0 = bpf2a64[BPF_REG_0];
335 const u8 r6 = bpf2a64[BPF_REG_6];
336 const u8 r7 = bpf2a64[BPF_REG_7];
337 const u8 r8 = bpf2a64[BPF_REG_8];
338 const u8 r9 = bpf2a64[BPF_REG_9];
339 const u8 fp = bpf2a64[BPF_REG_FP];
340
341 /* We're done with BPF stack */
342 emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
343
344 /* Restore fs (x25) and x26 */
345 emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
346
347 /* Restore callee-saved register */
348 emit(A64_POP(r8, r9, A64_SP), ctx);
349 emit(A64_POP(r6, r7, A64_SP), ctx);
350
351 /* Restore FP/LR registers */
352 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
353
354 /* Set return value */
355 emit(A64_MOV(1, A64_R(0), r0), ctx);
356
357 emit(A64_RET(A64_LR), ctx);
358 }
359
360 #define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
361 #define BPF_FIXUP_REG_MASK GENMASK(31, 27)
362
arm64_bpf_fixup_exception(const struct exception_table_entry * ex,struct pt_regs * regs)363 int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
364 struct pt_regs *regs)
365 {
366 off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
367 int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
368
369 regs->regs[dst_reg] = 0;
370 regs->pc = (unsigned long)&ex->fixup - offset;
371 return 1;
372 }
373
374 /* For accesses to BTF pointers, add an entry to the exception table */
add_exception_handler(const struct bpf_insn * insn,struct jit_ctx * ctx,int dst_reg)375 static int add_exception_handler(const struct bpf_insn *insn,
376 struct jit_ctx *ctx,
377 int dst_reg)
378 {
379 off_t offset;
380 unsigned long pc;
381 struct exception_table_entry *ex;
382
383 if (!ctx->image)
384 /* First pass */
385 return 0;
386
387 if (BPF_MODE(insn->code) != BPF_PROBE_MEM)
388 return 0;
389
390 if (!ctx->prog->aux->extable ||
391 WARN_ON_ONCE(ctx->exentry_idx >= ctx->prog->aux->num_exentries))
392 return -EINVAL;
393
394 ex = &ctx->prog->aux->extable[ctx->exentry_idx];
395 pc = (unsigned long)&ctx->image[ctx->idx - 1];
396
397 offset = pc - (long)&ex->insn;
398 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
399 return -ERANGE;
400 ex->insn = offset;
401
402 /*
403 * Since the extable follows the program, the fixup offset is always
404 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
405 * to keep things simple, and put the destination register in the upper
406 * bits. We don't need to worry about buildtime or runtime sort
407 * modifying the upper bits because the table is already sorted, and
408 * isn't part of the main exception table.
409 */
410 offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE);
411 if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
412 return -ERANGE;
413
414 ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
415 FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
416
417 ctx->exentry_idx++;
418 return 0;
419 }
420
421 /* JITs an eBPF instruction.
422 * Returns:
423 * 0 - successfully JITed an 8-byte eBPF instruction.
424 * >0 - successfully JITed a 16-byte eBPF instruction.
425 * <0 - failed to JIT.
426 */
build_insn(const struct bpf_insn * insn,struct jit_ctx * ctx,bool extra_pass)427 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
428 bool extra_pass)
429 {
430 const u8 code = insn->code;
431 const u8 dst = bpf2a64[insn->dst_reg];
432 const u8 src = bpf2a64[insn->src_reg];
433 const u8 tmp = bpf2a64[TMP_REG_1];
434 const u8 tmp2 = bpf2a64[TMP_REG_2];
435 const u8 tmp3 = bpf2a64[TMP_REG_3];
436 const s16 off = insn->off;
437 const s32 imm = insn->imm;
438 const int i = insn - ctx->prog->insnsi;
439 const bool is64 = BPF_CLASS(code) == BPF_ALU64 ||
440 BPF_CLASS(code) == BPF_JMP;
441 const bool isdw = BPF_SIZE(code) == BPF_DW;
442 u8 jmp_cond, reg;
443 s32 jmp_offset;
444 u32 a64_insn;
445 int ret;
446
447 #define check_imm(bits, imm) do { \
448 if ((((imm) > 0) && ((imm) >> (bits))) || \
449 (((imm) < 0) && (~(imm) >> (bits)))) { \
450 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
451 i, imm, imm); \
452 return -EINVAL; \
453 } \
454 } while (0)
455 #define check_imm19(imm) check_imm(19, imm)
456 #define check_imm26(imm) check_imm(26, imm)
457
458 switch (code) {
459 /* dst = src */
460 case BPF_ALU | BPF_MOV | BPF_X:
461 case BPF_ALU64 | BPF_MOV | BPF_X:
462 emit(A64_MOV(is64, dst, src), ctx);
463 break;
464 /* dst = dst OP src */
465 case BPF_ALU | BPF_ADD | BPF_X:
466 case BPF_ALU64 | BPF_ADD | BPF_X:
467 emit(A64_ADD(is64, dst, dst, src), ctx);
468 break;
469 case BPF_ALU | BPF_SUB | BPF_X:
470 case BPF_ALU64 | BPF_SUB | BPF_X:
471 emit(A64_SUB(is64, dst, dst, src), ctx);
472 break;
473 case BPF_ALU | BPF_AND | BPF_X:
474 case BPF_ALU64 | BPF_AND | BPF_X:
475 emit(A64_AND(is64, dst, dst, src), ctx);
476 break;
477 case BPF_ALU | BPF_OR | BPF_X:
478 case BPF_ALU64 | BPF_OR | BPF_X:
479 emit(A64_ORR(is64, dst, dst, src), ctx);
480 break;
481 case BPF_ALU | BPF_XOR | BPF_X:
482 case BPF_ALU64 | BPF_XOR | BPF_X:
483 emit(A64_EOR(is64, dst, dst, src), ctx);
484 break;
485 case BPF_ALU | BPF_MUL | BPF_X:
486 case BPF_ALU64 | BPF_MUL | BPF_X:
487 emit(A64_MUL(is64, dst, dst, src), ctx);
488 break;
489 case BPF_ALU | BPF_DIV | BPF_X:
490 case BPF_ALU64 | BPF_DIV | BPF_X:
491 case BPF_ALU | BPF_MOD | BPF_X:
492 case BPF_ALU64 | BPF_MOD | BPF_X:
493 switch (BPF_OP(code)) {
494 case BPF_DIV:
495 emit(A64_UDIV(is64, dst, dst, src), ctx);
496 break;
497 case BPF_MOD:
498 emit(A64_UDIV(is64, tmp, dst, src), ctx);
499 emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
500 break;
501 }
502 break;
503 case BPF_ALU | BPF_LSH | BPF_X:
504 case BPF_ALU64 | BPF_LSH | BPF_X:
505 emit(A64_LSLV(is64, dst, dst, src), ctx);
506 break;
507 case BPF_ALU | BPF_RSH | BPF_X:
508 case BPF_ALU64 | BPF_RSH | BPF_X:
509 emit(A64_LSRV(is64, dst, dst, src), ctx);
510 break;
511 case BPF_ALU | BPF_ARSH | BPF_X:
512 case BPF_ALU64 | BPF_ARSH | BPF_X:
513 emit(A64_ASRV(is64, dst, dst, src), ctx);
514 break;
515 /* dst = -dst */
516 case BPF_ALU | BPF_NEG:
517 case BPF_ALU64 | BPF_NEG:
518 emit(A64_NEG(is64, dst, dst), ctx);
519 break;
520 /* dst = BSWAP##imm(dst) */
521 case BPF_ALU | BPF_END | BPF_FROM_LE:
522 case BPF_ALU | BPF_END | BPF_FROM_BE:
523 #ifdef CONFIG_CPU_BIG_ENDIAN
524 if (BPF_SRC(code) == BPF_FROM_BE)
525 goto emit_bswap_uxt;
526 #else /* !CONFIG_CPU_BIG_ENDIAN */
527 if (BPF_SRC(code) == BPF_FROM_LE)
528 goto emit_bswap_uxt;
529 #endif
530 switch (imm) {
531 case 16:
532 emit(A64_REV16(is64, dst, dst), ctx);
533 /* zero-extend 16 bits into 64 bits */
534 emit(A64_UXTH(is64, dst, dst), ctx);
535 break;
536 case 32:
537 emit(A64_REV32(is64, dst, dst), ctx);
538 /* upper 32 bits already cleared */
539 break;
540 case 64:
541 emit(A64_REV64(dst, dst), ctx);
542 break;
543 }
544 break;
545 emit_bswap_uxt:
546 switch (imm) {
547 case 16:
548 /* zero-extend 16 bits into 64 bits */
549 emit(A64_UXTH(is64, dst, dst), ctx);
550 break;
551 case 32:
552 /* zero-extend 32 bits into 64 bits */
553 emit(A64_UXTW(is64, dst, dst), ctx);
554 break;
555 case 64:
556 /* nop */
557 break;
558 }
559 break;
560 /* dst = imm */
561 case BPF_ALU | BPF_MOV | BPF_K:
562 case BPF_ALU64 | BPF_MOV | BPF_K:
563 emit_a64_mov_i(is64, dst, imm, ctx);
564 break;
565 /* dst = dst OP imm */
566 case BPF_ALU | BPF_ADD | BPF_K:
567 case BPF_ALU64 | BPF_ADD | BPF_K:
568 if (is_addsub_imm(imm)) {
569 emit(A64_ADD_I(is64, dst, dst, imm), ctx);
570 } else if (is_addsub_imm(-imm)) {
571 emit(A64_SUB_I(is64, dst, dst, -imm), ctx);
572 } else {
573 emit_a64_mov_i(is64, tmp, imm, ctx);
574 emit(A64_ADD(is64, dst, dst, tmp), ctx);
575 }
576 break;
577 case BPF_ALU | BPF_SUB | BPF_K:
578 case BPF_ALU64 | BPF_SUB | BPF_K:
579 if (is_addsub_imm(imm)) {
580 emit(A64_SUB_I(is64, dst, dst, imm), ctx);
581 } else if (is_addsub_imm(-imm)) {
582 emit(A64_ADD_I(is64, dst, dst, -imm), ctx);
583 } else {
584 emit_a64_mov_i(is64, tmp, imm, ctx);
585 emit(A64_SUB(is64, dst, dst, tmp), ctx);
586 }
587 break;
588 case BPF_ALU | BPF_AND | BPF_K:
589 case BPF_ALU64 | BPF_AND | BPF_K:
590 a64_insn = A64_AND_I(is64, dst, dst, imm);
591 if (a64_insn != AARCH64_BREAK_FAULT) {
592 emit(a64_insn, ctx);
593 } else {
594 emit_a64_mov_i(is64, tmp, imm, ctx);
595 emit(A64_AND(is64, dst, dst, tmp), ctx);
596 }
597 break;
598 case BPF_ALU | BPF_OR | BPF_K:
599 case BPF_ALU64 | BPF_OR | BPF_K:
600 a64_insn = A64_ORR_I(is64, dst, dst, imm);
601 if (a64_insn != AARCH64_BREAK_FAULT) {
602 emit(a64_insn, ctx);
603 } else {
604 emit_a64_mov_i(is64, tmp, imm, ctx);
605 emit(A64_ORR(is64, dst, dst, tmp), ctx);
606 }
607 break;
608 case BPF_ALU | BPF_XOR | BPF_K:
609 case BPF_ALU64 | BPF_XOR | BPF_K:
610 a64_insn = A64_EOR_I(is64, dst, dst, imm);
611 if (a64_insn != AARCH64_BREAK_FAULT) {
612 emit(a64_insn, ctx);
613 } else {
614 emit_a64_mov_i(is64, tmp, imm, ctx);
615 emit(A64_EOR(is64, dst, dst, tmp), ctx);
616 }
617 break;
618 case BPF_ALU | BPF_MUL | BPF_K:
619 case BPF_ALU64 | BPF_MUL | BPF_K:
620 emit_a64_mov_i(is64, tmp, imm, ctx);
621 emit(A64_MUL(is64, dst, dst, tmp), ctx);
622 break;
623 case BPF_ALU | BPF_DIV | BPF_K:
624 case BPF_ALU64 | BPF_DIV | BPF_K:
625 emit_a64_mov_i(is64, tmp, imm, ctx);
626 emit(A64_UDIV(is64, dst, dst, tmp), ctx);
627 break;
628 case BPF_ALU | BPF_MOD | BPF_K:
629 case BPF_ALU64 | BPF_MOD | BPF_K:
630 emit_a64_mov_i(is64, tmp2, imm, ctx);
631 emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
632 emit(A64_MSUB(is64, dst, dst, tmp, tmp2), ctx);
633 break;
634 case BPF_ALU | BPF_LSH | BPF_K:
635 case BPF_ALU64 | BPF_LSH | BPF_K:
636 emit(A64_LSL(is64, dst, dst, imm), ctx);
637 break;
638 case BPF_ALU | BPF_RSH | BPF_K:
639 case BPF_ALU64 | BPF_RSH | BPF_K:
640 emit(A64_LSR(is64, dst, dst, imm), ctx);
641 break;
642 case BPF_ALU | BPF_ARSH | BPF_K:
643 case BPF_ALU64 | BPF_ARSH | BPF_K:
644 emit(A64_ASR(is64, dst, dst, imm), ctx);
645 break;
646
647 /* JUMP off */
648 case BPF_JMP | BPF_JA:
649 jmp_offset = bpf2a64_offset(i, off, ctx);
650 check_imm26(jmp_offset);
651 emit(A64_B(jmp_offset), ctx);
652 break;
653 /* IF (dst COND src) JUMP off */
654 case BPF_JMP | BPF_JEQ | BPF_X:
655 case BPF_JMP | BPF_JGT | BPF_X:
656 case BPF_JMP | BPF_JLT | BPF_X:
657 case BPF_JMP | BPF_JGE | BPF_X:
658 case BPF_JMP | BPF_JLE | BPF_X:
659 case BPF_JMP | BPF_JNE | BPF_X:
660 case BPF_JMP | BPF_JSGT | BPF_X:
661 case BPF_JMP | BPF_JSLT | BPF_X:
662 case BPF_JMP | BPF_JSGE | BPF_X:
663 case BPF_JMP | BPF_JSLE | BPF_X:
664 case BPF_JMP32 | BPF_JEQ | BPF_X:
665 case BPF_JMP32 | BPF_JGT | BPF_X:
666 case BPF_JMP32 | BPF_JLT | BPF_X:
667 case BPF_JMP32 | BPF_JGE | BPF_X:
668 case BPF_JMP32 | BPF_JLE | BPF_X:
669 case BPF_JMP32 | BPF_JNE | BPF_X:
670 case BPF_JMP32 | BPF_JSGT | BPF_X:
671 case BPF_JMP32 | BPF_JSLT | BPF_X:
672 case BPF_JMP32 | BPF_JSGE | BPF_X:
673 case BPF_JMP32 | BPF_JSLE | BPF_X:
674 emit(A64_CMP(is64, dst, src), ctx);
675 emit_cond_jmp:
676 jmp_offset = bpf2a64_offset(i, off, ctx);
677 check_imm19(jmp_offset);
678 switch (BPF_OP(code)) {
679 case BPF_JEQ:
680 jmp_cond = A64_COND_EQ;
681 break;
682 case BPF_JGT:
683 jmp_cond = A64_COND_HI;
684 break;
685 case BPF_JLT:
686 jmp_cond = A64_COND_CC;
687 break;
688 case BPF_JGE:
689 jmp_cond = A64_COND_CS;
690 break;
691 case BPF_JLE:
692 jmp_cond = A64_COND_LS;
693 break;
694 case BPF_JSET:
695 case BPF_JNE:
696 jmp_cond = A64_COND_NE;
697 break;
698 case BPF_JSGT:
699 jmp_cond = A64_COND_GT;
700 break;
701 case BPF_JSLT:
702 jmp_cond = A64_COND_LT;
703 break;
704 case BPF_JSGE:
705 jmp_cond = A64_COND_GE;
706 break;
707 case BPF_JSLE:
708 jmp_cond = A64_COND_LE;
709 break;
710 default:
711 return -EFAULT;
712 }
713 emit(A64_B_(jmp_cond, jmp_offset), ctx);
714 break;
715 case BPF_JMP | BPF_JSET | BPF_X:
716 case BPF_JMP32 | BPF_JSET | BPF_X:
717 emit(A64_TST(is64, dst, src), ctx);
718 goto emit_cond_jmp;
719 /* IF (dst COND imm) JUMP off */
720 case BPF_JMP | BPF_JEQ | BPF_K:
721 case BPF_JMP | BPF_JGT | BPF_K:
722 case BPF_JMP | BPF_JLT | BPF_K:
723 case BPF_JMP | BPF_JGE | BPF_K:
724 case BPF_JMP | BPF_JLE | BPF_K:
725 case BPF_JMP | BPF_JNE | BPF_K:
726 case BPF_JMP | BPF_JSGT | BPF_K:
727 case BPF_JMP | BPF_JSLT | BPF_K:
728 case BPF_JMP | BPF_JSGE | BPF_K:
729 case BPF_JMP | BPF_JSLE | BPF_K:
730 case BPF_JMP32 | BPF_JEQ | BPF_K:
731 case BPF_JMP32 | BPF_JGT | BPF_K:
732 case BPF_JMP32 | BPF_JLT | BPF_K:
733 case BPF_JMP32 | BPF_JGE | BPF_K:
734 case BPF_JMP32 | BPF_JLE | BPF_K:
735 case BPF_JMP32 | BPF_JNE | BPF_K:
736 case BPF_JMP32 | BPF_JSGT | BPF_K:
737 case BPF_JMP32 | BPF_JSLT | BPF_K:
738 case BPF_JMP32 | BPF_JSGE | BPF_K:
739 case BPF_JMP32 | BPF_JSLE | BPF_K:
740 if (is_addsub_imm(imm)) {
741 emit(A64_CMP_I(is64, dst, imm), ctx);
742 } else if (is_addsub_imm(-imm)) {
743 emit(A64_CMN_I(is64, dst, -imm), ctx);
744 } else {
745 emit_a64_mov_i(is64, tmp, imm, ctx);
746 emit(A64_CMP(is64, dst, tmp), ctx);
747 }
748 goto emit_cond_jmp;
749 case BPF_JMP | BPF_JSET | BPF_K:
750 case BPF_JMP32 | BPF_JSET | BPF_K:
751 a64_insn = A64_TST_I(is64, dst, imm);
752 if (a64_insn != AARCH64_BREAK_FAULT) {
753 emit(a64_insn, ctx);
754 } else {
755 emit_a64_mov_i(is64, tmp, imm, ctx);
756 emit(A64_TST(is64, dst, tmp), ctx);
757 }
758 goto emit_cond_jmp;
759 /* function call */
760 case BPF_JMP | BPF_CALL:
761 {
762 const u8 r0 = bpf2a64[BPF_REG_0];
763 bool func_addr_fixed;
764 u64 func_addr;
765
766 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
767 &func_addr, &func_addr_fixed);
768 if (ret < 0)
769 return ret;
770 emit_addr_mov_i64(tmp, func_addr, ctx);
771 emit(A64_BLR(tmp), ctx);
772 emit(A64_MOV(1, r0, A64_R(0)), ctx);
773 break;
774 }
775 /* tail call */
776 case BPF_JMP | BPF_TAIL_CALL:
777 if (emit_bpf_tail_call(ctx))
778 return -EFAULT;
779 break;
780 /* function return */
781 case BPF_JMP | BPF_EXIT:
782 /* Optimization: when last instruction is EXIT,
783 simply fallthrough to epilogue. */
784 if (i == ctx->prog->len - 1)
785 break;
786 jmp_offset = epilogue_offset(ctx);
787 check_imm26(jmp_offset);
788 emit(A64_B(jmp_offset), ctx);
789 break;
790
791 /* dst = imm64 */
792 case BPF_LD | BPF_IMM | BPF_DW:
793 {
794 const struct bpf_insn insn1 = insn[1];
795 u64 imm64;
796
797 imm64 = (u64)insn1.imm << 32 | (u32)imm;
798 emit_a64_mov_i64(dst, imm64, ctx);
799
800 return 1;
801 }
802
803 /* LDX: dst = *(size *)(src + off) */
804 case BPF_LDX | BPF_MEM | BPF_W:
805 case BPF_LDX | BPF_MEM | BPF_H:
806 case BPF_LDX | BPF_MEM | BPF_B:
807 case BPF_LDX | BPF_MEM | BPF_DW:
808 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
809 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
810 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
811 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
812 emit_a64_mov_i(1, tmp, off, ctx);
813 switch (BPF_SIZE(code)) {
814 case BPF_W:
815 emit(A64_LDR32(dst, src, tmp), ctx);
816 break;
817 case BPF_H:
818 emit(A64_LDRH(dst, src, tmp), ctx);
819 break;
820 case BPF_B:
821 emit(A64_LDRB(dst, src, tmp), ctx);
822 break;
823 case BPF_DW:
824 emit(A64_LDR64(dst, src, tmp), ctx);
825 break;
826 }
827
828 ret = add_exception_handler(insn, ctx, dst);
829 if (ret)
830 return ret;
831 break;
832
833 /* speculation barrier */
834 case BPF_ST | BPF_NOSPEC:
835 /*
836 * Nothing required here.
837 *
838 * In case of arm64, we rely on the firmware mitigation of
839 * Speculative Store Bypass as controlled via the ssbd kernel
840 * parameter. Whenever the mitigation is enabled, it works
841 * for all of the kernel code with no need to provide any
842 * additional instructions.
843 */
844 break;
845
846 /* ST: *(size *)(dst + off) = imm */
847 case BPF_ST | BPF_MEM | BPF_W:
848 case BPF_ST | BPF_MEM | BPF_H:
849 case BPF_ST | BPF_MEM | BPF_B:
850 case BPF_ST | BPF_MEM | BPF_DW:
851 /* Load imm to a register then store it */
852 emit_a64_mov_i(1, tmp2, off, ctx);
853 emit_a64_mov_i(1, tmp, imm, ctx);
854 switch (BPF_SIZE(code)) {
855 case BPF_W:
856 emit(A64_STR32(tmp, dst, tmp2), ctx);
857 break;
858 case BPF_H:
859 emit(A64_STRH(tmp, dst, tmp2), ctx);
860 break;
861 case BPF_B:
862 emit(A64_STRB(tmp, dst, tmp2), ctx);
863 break;
864 case BPF_DW:
865 emit(A64_STR64(tmp, dst, tmp2), ctx);
866 break;
867 }
868 break;
869
870 /* STX: *(size *)(dst + off) = src */
871 case BPF_STX | BPF_MEM | BPF_W:
872 case BPF_STX | BPF_MEM | BPF_H:
873 case BPF_STX | BPF_MEM | BPF_B:
874 case BPF_STX | BPF_MEM | BPF_DW:
875 emit_a64_mov_i(1, tmp, off, ctx);
876 switch (BPF_SIZE(code)) {
877 case BPF_W:
878 emit(A64_STR32(src, dst, tmp), ctx);
879 break;
880 case BPF_H:
881 emit(A64_STRH(src, dst, tmp), ctx);
882 break;
883 case BPF_B:
884 emit(A64_STRB(src, dst, tmp), ctx);
885 break;
886 case BPF_DW:
887 emit(A64_STR64(src, dst, tmp), ctx);
888 break;
889 }
890 break;
891
892 /* STX XADD: lock *(u32 *)(dst + off) += src */
893 case BPF_STX | BPF_XADD | BPF_W:
894 /* STX XADD: lock *(u64 *)(dst + off) += src */
895 case BPF_STX | BPF_XADD | BPF_DW:
896 if (!off) {
897 reg = dst;
898 } else {
899 emit_a64_mov_i(1, tmp, off, ctx);
900 emit(A64_ADD(1, tmp, tmp, dst), ctx);
901 reg = tmp;
902 }
903 if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
904 emit(A64_STADD(isdw, reg, src), ctx);
905 } else {
906 emit(A64_LDXR(isdw, tmp2, reg), ctx);
907 emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
908 emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
909 jmp_offset = -3;
910 check_imm19(jmp_offset);
911 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
912 }
913 break;
914
915 default:
916 pr_err_once("unknown opcode %02x\n", code);
917 return -EINVAL;
918 }
919
920 return 0;
921 }
922
build_body(struct jit_ctx * ctx,bool extra_pass)923 static int build_body(struct jit_ctx *ctx, bool extra_pass)
924 {
925 const struct bpf_prog *prog = ctx->prog;
926 int i;
927
928 /*
929 * - offset[0] offset of the end of prologue,
930 * start of the 1st instruction.
931 * - offset[1] - offset of the end of 1st instruction,
932 * start of the 2nd instruction
933 * [....]
934 * - offset[3] - offset of the end of 3rd instruction,
935 * start of 4th instruction
936 */
937 for (i = 0; i < prog->len; i++) {
938 const struct bpf_insn *insn = &prog->insnsi[i];
939 int ret;
940
941 if (ctx->image == NULL)
942 ctx->offset[i] = ctx->idx;
943 ret = build_insn(insn, ctx, extra_pass);
944 if (ret > 0) {
945 i++;
946 if (ctx->image == NULL)
947 ctx->offset[i] = ctx->idx;
948 continue;
949 }
950 if (ret)
951 return ret;
952 }
953 /*
954 * offset is allocated with prog->len + 1 so fill in
955 * the last element with the offset after the last
956 * instruction (end of program)
957 */
958 if (ctx->image == NULL)
959 ctx->offset[i] = ctx->idx;
960
961 return 0;
962 }
963
validate_code(struct jit_ctx * ctx)964 static int validate_code(struct jit_ctx *ctx)
965 {
966 int i;
967
968 for (i = 0; i < ctx->idx; i++) {
969 u32 a64_insn = le32_to_cpu(ctx->image[i]);
970
971 if (a64_insn == AARCH64_BREAK_FAULT)
972 return -1;
973 }
974
975 if (WARN_ON_ONCE(ctx->exentry_idx != ctx->prog->aux->num_exentries))
976 return -1;
977
978 return 0;
979 }
980
bpf_flush_icache(void * start,void * end)981 static inline void bpf_flush_icache(void *start, void *end)
982 {
983 flush_icache_range((unsigned long)start, (unsigned long)end);
984 }
985
986 struct arm64_jit_data {
987 struct bpf_binary_header *header;
988 u8 *image;
989 struct jit_ctx ctx;
990 };
991
bpf_int_jit_compile(struct bpf_prog * prog)992 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
993 {
994 int image_size, prog_size, extable_size;
995 struct bpf_prog *tmp, *orig_prog = prog;
996 struct bpf_binary_header *header;
997 struct arm64_jit_data *jit_data;
998 bool was_classic = bpf_prog_was_classic(prog);
999 bool tmp_blinded = false;
1000 bool extra_pass = false;
1001 struct jit_ctx ctx;
1002 u8 *image_ptr;
1003
1004 if (!prog->jit_requested)
1005 return orig_prog;
1006
1007 tmp = bpf_jit_blind_constants(prog);
1008 /* If blinding was requested and we failed during blinding,
1009 * we must fall back to the interpreter.
1010 */
1011 if (IS_ERR(tmp))
1012 return orig_prog;
1013 if (tmp != prog) {
1014 tmp_blinded = true;
1015 prog = tmp;
1016 }
1017
1018 jit_data = prog->aux->jit_data;
1019 if (!jit_data) {
1020 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1021 if (!jit_data) {
1022 prog = orig_prog;
1023 goto out;
1024 }
1025 prog->aux->jit_data = jit_data;
1026 }
1027 if (jit_data->ctx.offset) {
1028 ctx = jit_data->ctx;
1029 image_ptr = jit_data->image;
1030 header = jit_data->header;
1031 extra_pass = true;
1032 prog_size = sizeof(u32) * ctx.idx;
1033 goto skip_init_ctx;
1034 }
1035 memset(&ctx, 0, sizeof(ctx));
1036 ctx.prog = prog;
1037
1038 ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
1039 if (ctx.offset == NULL) {
1040 prog = orig_prog;
1041 goto out_off;
1042 }
1043
1044 /*
1045 * 1. Initial fake pass to compute ctx->idx and ctx->offset.
1046 *
1047 * BPF line info needs ctx->offset[i] to be the offset of
1048 * instruction[i] in jited image, so build prologue first.
1049 */
1050 if (build_prologue(&ctx, was_classic)) {
1051 prog = orig_prog;
1052 goto out_off;
1053 }
1054
1055 if (build_body(&ctx, extra_pass)) {
1056 prog = orig_prog;
1057 goto out_off;
1058 }
1059
1060 ctx.epilogue_offset = ctx.idx;
1061 build_epilogue(&ctx);
1062
1063 extable_size = prog->aux->num_exentries *
1064 sizeof(struct exception_table_entry);
1065
1066 /* Now we know the actual image size. */
1067 prog_size = sizeof(u32) * ctx.idx;
1068 image_size = prog_size + extable_size;
1069 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1070 sizeof(u32), jit_fill_hole);
1071 if (header == NULL) {
1072 prog = orig_prog;
1073 goto out_off;
1074 }
1075
1076 /* 2. Now, the actual pass. */
1077
1078 ctx.image = (__le32 *)image_ptr;
1079 if (extable_size)
1080 prog->aux->extable = (void *)image_ptr + prog_size;
1081 skip_init_ctx:
1082 ctx.idx = 0;
1083 ctx.exentry_idx = 0;
1084
1085 build_prologue(&ctx, was_classic);
1086
1087 if (build_body(&ctx, extra_pass)) {
1088 bpf_jit_binary_free(header);
1089 prog = orig_prog;
1090 goto out_off;
1091 }
1092
1093 build_epilogue(&ctx);
1094
1095 /* 3. Extra pass to validate JITed code. */
1096 if (validate_code(&ctx)) {
1097 bpf_jit_binary_free(header);
1098 prog = orig_prog;
1099 goto out_off;
1100 }
1101
1102 /* And we're done. */
1103 if (bpf_jit_enable > 1)
1104 bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
1105
1106 bpf_flush_icache(header, ctx.image + ctx.idx);
1107
1108 if (!prog->is_func || extra_pass) {
1109 if (extra_pass && ctx.idx != jit_data->ctx.idx) {
1110 pr_err_once("multi-func JIT bug %d != %d\n",
1111 ctx.idx, jit_data->ctx.idx);
1112 bpf_jit_binary_free(header);
1113 prog->bpf_func = NULL;
1114 prog->jited = 0;
1115 prog->jited_len = 0;
1116 goto out_off;
1117 }
1118 bpf_jit_binary_lock_ro(header);
1119 trace_android_vh_set_memory_ro((unsigned long)header, header->pages);
1120 trace_android_vh_set_memory_x((unsigned long)header, header->pages);
1121 } else {
1122 jit_data->ctx = ctx;
1123 jit_data->image = image_ptr;
1124 jit_data->header = header;
1125 }
1126 prog->bpf_func = (void *)ctx.image;
1127 prog->jited = 1;
1128 prog->jited_len = prog_size;
1129
1130 if (!prog->is_func || extra_pass) {
1131 int i;
1132
1133 /* offset[prog->len] is the size of program */
1134 for (i = 0; i <= prog->len; i++)
1135 ctx.offset[i] *= AARCH64_INSN_SIZE;
1136 bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
1137 out_off:
1138 kfree(ctx.offset);
1139 kfree(jit_data);
1140 prog->aux->jit_data = NULL;
1141 }
1142 out:
1143 if (tmp_blinded)
1144 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1145 tmp : orig_prog);
1146 return prog;
1147 }
1148
bpf_jit_alloc_exec_limit(void)1149 u64 bpf_jit_alloc_exec_limit(void)
1150 {
1151 return VMALLOC_END - VMALLOC_START;
1152 }
1153
bpf_jit_alloc_exec(unsigned long size)1154 void *bpf_jit_alloc_exec(unsigned long size)
1155 {
1156 return vmalloc(size);
1157 }
1158
bpf_jit_free_exec(void * addr)1159 void bpf_jit_free_exec(void *addr)
1160 {
1161 return vfree(addr);
1162 }
1163