• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * BPF JIT compiler for ARM64
4  *
5  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6  */
7 
8 #define pr_fmt(fmt) "bpf_jit: " fmt
9 
10 #include <linux/bpf.h>
11 #include <linux/filter.h>
12 #include <linux/printk.h>
13 #include <linux/slab.h>
14 
15 #include <asm/byteorder.h>
16 #include <asm/cacheflush.h>
17 #include <asm/debug-monitors.h>
18 #include <asm/set_memory.h>
19 
20 #include "bpf_jit.h"
21 
22 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
23 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
24 #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
25 #define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
26 
27 /* Map BPF registers to A64 registers */
28 static const int bpf2a64[] = {
29 	/* return value from in-kernel function, and exit value from eBPF */
30 	[BPF_REG_0] = A64_R(7),
31 	/* arguments from eBPF program to in-kernel function */
32 	[BPF_REG_1] = A64_R(0),
33 	[BPF_REG_2] = A64_R(1),
34 	[BPF_REG_3] = A64_R(2),
35 	[BPF_REG_4] = A64_R(3),
36 	[BPF_REG_5] = A64_R(4),
37 	/* callee saved registers that in-kernel function will preserve */
38 	[BPF_REG_6] = A64_R(19),
39 	[BPF_REG_7] = A64_R(20),
40 	[BPF_REG_8] = A64_R(21),
41 	[BPF_REG_9] = A64_R(22),
42 	/* read-only frame pointer to access stack */
43 	[BPF_REG_FP] = A64_R(25),
44 	/* temporary registers for internal BPF JIT */
45 	[TMP_REG_1] = A64_R(10),
46 	[TMP_REG_2] = A64_R(11),
47 	[TMP_REG_3] = A64_R(12),
48 	/* tail_call_cnt */
49 	[TCALL_CNT] = A64_R(26),
50 	/* temporary register for blinding constants */
51 	[BPF_REG_AX] = A64_R(9),
52 };
53 
54 struct jit_ctx {
55 	const struct bpf_prog *prog;
56 	int idx;
57 	int epilogue_offset;
58 	int *offset;
59 	__le32 *image;
60 	u32 stack_size;
61 };
62 
emit(const u32 insn,struct jit_ctx * ctx)63 static inline void emit(const u32 insn, struct jit_ctx *ctx)
64 {
65 	if (ctx->image != NULL)
66 		ctx->image[ctx->idx] = cpu_to_le32(insn);
67 
68 	ctx->idx++;
69 }
70 
emit_a64_mov_i(const int is64,const int reg,const s32 val,struct jit_ctx * ctx)71 static inline void emit_a64_mov_i(const int is64, const int reg,
72 				  const s32 val, struct jit_ctx *ctx)
73 {
74 	u16 hi = val >> 16;
75 	u16 lo = val & 0xffff;
76 
77 	if (hi & 0x8000) {
78 		if (hi == 0xffff) {
79 			emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
80 		} else {
81 			emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
82 			if (lo != 0xffff)
83 				emit(A64_MOVK(is64, reg, lo, 0), ctx);
84 		}
85 	} else {
86 		emit(A64_MOVZ(is64, reg, lo, 0), ctx);
87 		if (hi)
88 			emit(A64_MOVK(is64, reg, hi, 16), ctx);
89 	}
90 }
91 
i64_i16_blocks(const u64 val,bool inverse)92 static int i64_i16_blocks(const u64 val, bool inverse)
93 {
94 	return (((val >>  0) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
95 	       (((val >> 16) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
96 	       (((val >> 32) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
97 	       (((val >> 48) & 0xffff) != (inverse ? 0xffff : 0x0000));
98 }
99 
emit_a64_mov_i64(const int reg,const u64 val,struct jit_ctx * ctx)100 static inline void emit_a64_mov_i64(const int reg, const u64 val,
101 				    struct jit_ctx *ctx)
102 {
103 	u64 nrm_tmp = val, rev_tmp = ~val;
104 	bool inverse;
105 	int shift;
106 
107 	if (!(nrm_tmp >> 32))
108 		return emit_a64_mov_i(0, reg, (u32)val, ctx);
109 
110 	inverse = i64_i16_blocks(nrm_tmp, true) < i64_i16_blocks(nrm_tmp, false);
111 	shift = max(round_down((inverse ? (fls64(rev_tmp) - 1) :
112 					  (fls64(nrm_tmp) - 1)), 16), 0);
113 	if (inverse)
114 		emit(A64_MOVN(1, reg, (rev_tmp >> shift) & 0xffff, shift), ctx);
115 	else
116 		emit(A64_MOVZ(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
117 	shift -= 16;
118 	while (shift >= 0) {
119 		if (((nrm_tmp >> shift) & 0xffff) != (inverse ? 0xffff : 0x0000))
120 			emit(A64_MOVK(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
121 		shift -= 16;
122 	}
123 }
124 
125 /*
126  * Kernel addresses in the vmalloc space use at most 48 bits, and the
127  * remaining bits are guaranteed to be 0x1. So we can compose the address
128  * with a fixed length movn/movk/movk sequence.
129  */
emit_addr_mov_i64(const int reg,const u64 val,struct jit_ctx * ctx)130 static inline void emit_addr_mov_i64(const int reg, const u64 val,
131 				     struct jit_ctx *ctx)
132 {
133 	u64 tmp = val;
134 	int shift = 0;
135 
136 	emit(A64_MOVN(1, reg, ~tmp & 0xffff, shift), ctx);
137 	while (shift < 32) {
138 		tmp >>= 16;
139 		shift += 16;
140 		emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
141 	}
142 }
143 
bpf2a64_offset(int bpf_insn,int off,const struct jit_ctx * ctx)144 static inline int bpf2a64_offset(int bpf_insn, int off,
145 				 const struct jit_ctx *ctx)
146 {
147 	/* BPF JMP offset is relative to the next instruction */
148 	bpf_insn++;
149 	/*
150 	 * Whereas arm64 branch instructions encode the offset
151 	 * from the branch itself, so we must subtract 1 from the
152 	 * instruction offset.
153 	 */
154 	return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
155 }
156 
jit_fill_hole(void * area,unsigned int size)157 static void jit_fill_hole(void *area, unsigned int size)
158 {
159 	__le32 *ptr;
160 	/* We are guaranteed to have aligned memory. */
161 	for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
162 		*ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
163 }
164 
epilogue_offset(const struct jit_ctx * ctx)165 static inline int epilogue_offset(const struct jit_ctx *ctx)
166 {
167 	int to = ctx->epilogue_offset;
168 	int from = ctx->idx;
169 
170 	return to - from;
171 }
172 
173 /* Stack must be multiples of 16B */
174 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
175 
176 /* Tail call offset to jump into */
177 #define PROLOGUE_OFFSET 7
178 
build_prologue(struct jit_ctx * ctx,bool ebpf_from_cbpf)179 static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
180 {
181 	const struct bpf_prog *prog = ctx->prog;
182 	const u8 r6 = bpf2a64[BPF_REG_6];
183 	const u8 r7 = bpf2a64[BPF_REG_7];
184 	const u8 r8 = bpf2a64[BPF_REG_8];
185 	const u8 r9 = bpf2a64[BPF_REG_9];
186 	const u8 fp = bpf2a64[BPF_REG_FP];
187 	const u8 tcc = bpf2a64[TCALL_CNT];
188 	const int idx0 = ctx->idx;
189 	int cur_offset;
190 
191 	/*
192 	 * BPF prog stack layout
193 	 *
194 	 *                         high
195 	 * original A64_SP =>   0:+-----+ BPF prologue
196 	 *                        |FP/LR|
197 	 * current A64_FP =>  -16:+-----+
198 	 *                        | ... | callee saved registers
199 	 * BPF fp register => -64:+-----+ <= (BPF_FP)
200 	 *                        |     |
201 	 *                        | ... | BPF prog stack
202 	 *                        |     |
203 	 *                        +-----+ <= (BPF_FP - prog->aux->stack_depth)
204 	 *                        |RSVD | padding
205 	 * current A64_SP =>      +-----+ <= (BPF_FP - ctx->stack_size)
206 	 *                        |     |
207 	 *                        | ... | Function call stack
208 	 *                        |     |
209 	 *                        +-----+
210 	 *                          low
211 	 *
212 	 */
213 
214 	/* Save FP and LR registers to stay align with ARM64 AAPCS */
215 	emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
216 	emit(A64_MOV(1, A64_FP, A64_SP), ctx);
217 
218 	/* Save callee-saved registers */
219 	emit(A64_PUSH(r6, r7, A64_SP), ctx);
220 	emit(A64_PUSH(r8, r9, A64_SP), ctx);
221 	emit(A64_PUSH(fp, tcc, A64_SP), ctx);
222 
223 	/* Set up BPF prog stack base register */
224 	emit(A64_MOV(1, fp, A64_SP), ctx);
225 
226 	if (!ebpf_from_cbpf) {
227 		/* Initialize tail_call_cnt */
228 		emit(A64_MOVZ(1, tcc, 0, 0), ctx);
229 
230 		cur_offset = ctx->idx - idx0;
231 		if (cur_offset != PROLOGUE_OFFSET) {
232 			pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
233 				    cur_offset, PROLOGUE_OFFSET);
234 			return -1;
235 		}
236 	}
237 
238 	ctx->stack_size = STACK_ALIGN(prog->aux->stack_depth);
239 
240 	/* Set up function call stack */
241 	emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
242 	return 0;
243 }
244 
245 static int out_offset = -1; /* initialized on the first pass of build_body() */
emit_bpf_tail_call(struct jit_ctx * ctx)246 static int emit_bpf_tail_call(struct jit_ctx *ctx)
247 {
248 	/* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
249 	const u8 r2 = bpf2a64[BPF_REG_2];
250 	const u8 r3 = bpf2a64[BPF_REG_3];
251 
252 	const u8 tmp = bpf2a64[TMP_REG_1];
253 	const u8 prg = bpf2a64[TMP_REG_2];
254 	const u8 tcc = bpf2a64[TCALL_CNT];
255 	const int idx0 = ctx->idx;
256 #define cur_offset (ctx->idx - idx0)
257 #define jmp_offset (out_offset - (cur_offset))
258 	size_t off;
259 
260 	/* if (index >= array->map.max_entries)
261 	 *     goto out;
262 	 */
263 	off = offsetof(struct bpf_array, map.max_entries);
264 	emit_a64_mov_i64(tmp, off, ctx);
265 	emit(A64_LDR32(tmp, r2, tmp), ctx);
266 	emit(A64_MOV(0, r3, r3), ctx);
267 	emit(A64_CMP(0, r3, tmp), ctx);
268 	emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
269 
270 	/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
271 	 *     goto out;
272 	 * tail_call_cnt++;
273 	 */
274 	emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
275 	emit(A64_CMP(1, tcc, tmp), ctx);
276 	emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
277 	emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
278 
279 	/* prog = array->ptrs[index];
280 	 * if (prog == NULL)
281 	 *     goto out;
282 	 */
283 	off = offsetof(struct bpf_array, ptrs);
284 	emit_a64_mov_i64(tmp, off, ctx);
285 	emit(A64_ADD(1, tmp, r2, tmp), ctx);
286 	emit(A64_LSL(1, prg, r3, 3), ctx);
287 	emit(A64_LDR64(prg, tmp, prg), ctx);
288 	emit(A64_CBZ(1, prg, jmp_offset), ctx);
289 
290 	/* goto *(prog->bpf_func + prologue_offset); */
291 	off = offsetof(struct bpf_prog, bpf_func);
292 	emit_a64_mov_i64(tmp, off, ctx);
293 	emit(A64_LDR64(tmp, prg, tmp), ctx);
294 	emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
295 	emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
296 	emit(A64_BR(tmp), ctx);
297 
298 	/* out: */
299 	if (out_offset == -1)
300 		out_offset = cur_offset;
301 	if (cur_offset != out_offset) {
302 		pr_err_once("tail_call out_offset = %d, expected %d!\n",
303 			    cur_offset, out_offset);
304 		return -1;
305 	}
306 	return 0;
307 #undef cur_offset
308 #undef jmp_offset
309 }
310 
build_epilogue(struct jit_ctx * ctx)311 static void build_epilogue(struct jit_ctx *ctx)
312 {
313 	const u8 r0 = bpf2a64[BPF_REG_0];
314 	const u8 r6 = bpf2a64[BPF_REG_6];
315 	const u8 r7 = bpf2a64[BPF_REG_7];
316 	const u8 r8 = bpf2a64[BPF_REG_8];
317 	const u8 r9 = bpf2a64[BPF_REG_9];
318 	const u8 fp = bpf2a64[BPF_REG_FP];
319 
320 	/* We're done with BPF stack */
321 	emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
322 
323 	/* Restore fs (x25) and x26 */
324 	emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
325 
326 	/* Restore callee-saved register */
327 	emit(A64_POP(r8, r9, A64_SP), ctx);
328 	emit(A64_POP(r6, r7, A64_SP), ctx);
329 
330 	/* Restore FP/LR registers */
331 	emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
332 
333 	/* Set return value */
334 	emit(A64_MOV(1, A64_R(0), r0), ctx);
335 
336 	emit(A64_RET(A64_LR), ctx);
337 }
338 
339 /* JITs an eBPF instruction.
340  * Returns:
341  * 0  - successfully JITed an 8-byte eBPF instruction.
342  * >0 - successfully JITed a 16-byte eBPF instruction.
343  * <0 - failed to JIT.
344  */
build_insn(const struct bpf_insn * insn,struct jit_ctx * ctx,bool extra_pass)345 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
346 		      bool extra_pass)
347 {
348 	const u8 code = insn->code;
349 	const u8 dst = bpf2a64[insn->dst_reg];
350 	const u8 src = bpf2a64[insn->src_reg];
351 	const u8 tmp = bpf2a64[TMP_REG_1];
352 	const u8 tmp2 = bpf2a64[TMP_REG_2];
353 	const u8 tmp3 = bpf2a64[TMP_REG_3];
354 	const s16 off = insn->off;
355 	const s32 imm = insn->imm;
356 	const int i = insn - ctx->prog->insnsi;
357 	const bool is64 = BPF_CLASS(code) == BPF_ALU64 ||
358 			  BPF_CLASS(code) == BPF_JMP;
359 	const bool isdw = BPF_SIZE(code) == BPF_DW;
360 	u8 jmp_cond, reg;
361 	s32 jmp_offset;
362 
363 #define check_imm(bits, imm) do {				\
364 	if ((((imm) > 0) && ((imm) >> (bits))) ||		\
365 	    (((imm) < 0) && (~(imm) >> (bits)))) {		\
366 		pr_info("[%2d] imm=%d(0x%x) out of range\n",	\
367 			i, imm, imm);				\
368 		return -EINVAL;					\
369 	}							\
370 } while (0)
371 #define check_imm19(imm) check_imm(19, imm)
372 #define check_imm26(imm) check_imm(26, imm)
373 
374 	switch (code) {
375 	/* dst = src */
376 	case BPF_ALU | BPF_MOV | BPF_X:
377 	case BPF_ALU64 | BPF_MOV | BPF_X:
378 		emit(A64_MOV(is64, dst, src), ctx);
379 		break;
380 	/* dst = dst OP src */
381 	case BPF_ALU | BPF_ADD | BPF_X:
382 	case BPF_ALU64 | BPF_ADD | BPF_X:
383 		emit(A64_ADD(is64, dst, dst, src), ctx);
384 		break;
385 	case BPF_ALU | BPF_SUB | BPF_X:
386 	case BPF_ALU64 | BPF_SUB | BPF_X:
387 		emit(A64_SUB(is64, dst, dst, src), ctx);
388 		break;
389 	case BPF_ALU | BPF_AND | BPF_X:
390 	case BPF_ALU64 | BPF_AND | BPF_X:
391 		emit(A64_AND(is64, dst, dst, src), ctx);
392 		break;
393 	case BPF_ALU | BPF_OR | BPF_X:
394 	case BPF_ALU64 | BPF_OR | BPF_X:
395 		emit(A64_ORR(is64, dst, dst, src), ctx);
396 		break;
397 	case BPF_ALU | BPF_XOR | BPF_X:
398 	case BPF_ALU64 | BPF_XOR | BPF_X:
399 		emit(A64_EOR(is64, dst, dst, src), ctx);
400 		break;
401 	case BPF_ALU | BPF_MUL | BPF_X:
402 	case BPF_ALU64 | BPF_MUL | BPF_X:
403 		emit(A64_MUL(is64, dst, dst, src), ctx);
404 		break;
405 	case BPF_ALU | BPF_DIV | BPF_X:
406 	case BPF_ALU64 | BPF_DIV | BPF_X:
407 	case BPF_ALU | BPF_MOD | BPF_X:
408 	case BPF_ALU64 | BPF_MOD | BPF_X:
409 		switch (BPF_OP(code)) {
410 		case BPF_DIV:
411 			emit(A64_UDIV(is64, dst, dst, src), ctx);
412 			break;
413 		case BPF_MOD:
414 			emit(A64_UDIV(is64, tmp, dst, src), ctx);
415 			emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
416 			break;
417 		}
418 		break;
419 	case BPF_ALU | BPF_LSH | BPF_X:
420 	case BPF_ALU64 | BPF_LSH | BPF_X:
421 		emit(A64_LSLV(is64, dst, dst, src), ctx);
422 		break;
423 	case BPF_ALU | BPF_RSH | BPF_X:
424 	case BPF_ALU64 | BPF_RSH | BPF_X:
425 		emit(A64_LSRV(is64, dst, dst, src), ctx);
426 		break;
427 	case BPF_ALU | BPF_ARSH | BPF_X:
428 	case BPF_ALU64 | BPF_ARSH | BPF_X:
429 		emit(A64_ASRV(is64, dst, dst, src), ctx);
430 		break;
431 	/* dst = -dst */
432 	case BPF_ALU | BPF_NEG:
433 	case BPF_ALU64 | BPF_NEG:
434 		emit(A64_NEG(is64, dst, dst), ctx);
435 		break;
436 	/* dst = BSWAP##imm(dst) */
437 	case BPF_ALU | BPF_END | BPF_FROM_LE:
438 	case BPF_ALU | BPF_END | BPF_FROM_BE:
439 #ifdef CONFIG_CPU_BIG_ENDIAN
440 		if (BPF_SRC(code) == BPF_FROM_BE)
441 			goto emit_bswap_uxt;
442 #else /* !CONFIG_CPU_BIG_ENDIAN */
443 		if (BPF_SRC(code) == BPF_FROM_LE)
444 			goto emit_bswap_uxt;
445 #endif
446 		switch (imm) {
447 		case 16:
448 			emit(A64_REV16(is64, dst, dst), ctx);
449 			/* zero-extend 16 bits into 64 bits */
450 			emit(A64_UXTH(is64, dst, dst), ctx);
451 			break;
452 		case 32:
453 			emit(A64_REV32(is64, dst, dst), ctx);
454 			/* upper 32 bits already cleared */
455 			break;
456 		case 64:
457 			emit(A64_REV64(dst, dst), ctx);
458 			break;
459 		}
460 		break;
461 emit_bswap_uxt:
462 		switch (imm) {
463 		case 16:
464 			/* zero-extend 16 bits into 64 bits */
465 			emit(A64_UXTH(is64, dst, dst), ctx);
466 			break;
467 		case 32:
468 			/* zero-extend 32 bits into 64 bits */
469 			emit(A64_UXTW(is64, dst, dst), ctx);
470 			break;
471 		case 64:
472 			/* nop */
473 			break;
474 		}
475 		break;
476 	/* dst = imm */
477 	case BPF_ALU | BPF_MOV | BPF_K:
478 	case BPF_ALU64 | BPF_MOV | BPF_K:
479 		emit_a64_mov_i(is64, dst, imm, ctx);
480 		break;
481 	/* dst = dst OP imm */
482 	case BPF_ALU | BPF_ADD | BPF_K:
483 	case BPF_ALU64 | BPF_ADD | BPF_K:
484 		emit_a64_mov_i(is64, tmp, imm, ctx);
485 		emit(A64_ADD(is64, dst, dst, tmp), ctx);
486 		break;
487 	case BPF_ALU | BPF_SUB | BPF_K:
488 	case BPF_ALU64 | BPF_SUB | BPF_K:
489 		emit_a64_mov_i(is64, tmp, imm, ctx);
490 		emit(A64_SUB(is64, dst, dst, tmp), ctx);
491 		break;
492 	case BPF_ALU | BPF_AND | BPF_K:
493 	case BPF_ALU64 | BPF_AND | BPF_K:
494 		emit_a64_mov_i(is64, tmp, imm, ctx);
495 		emit(A64_AND(is64, dst, dst, tmp), ctx);
496 		break;
497 	case BPF_ALU | BPF_OR | BPF_K:
498 	case BPF_ALU64 | BPF_OR | BPF_K:
499 		emit_a64_mov_i(is64, tmp, imm, ctx);
500 		emit(A64_ORR(is64, dst, dst, tmp), ctx);
501 		break;
502 	case BPF_ALU | BPF_XOR | BPF_K:
503 	case BPF_ALU64 | BPF_XOR | BPF_K:
504 		emit_a64_mov_i(is64, tmp, imm, ctx);
505 		emit(A64_EOR(is64, dst, dst, tmp), ctx);
506 		break;
507 	case BPF_ALU | BPF_MUL | BPF_K:
508 	case BPF_ALU64 | BPF_MUL | BPF_K:
509 		emit_a64_mov_i(is64, tmp, imm, ctx);
510 		emit(A64_MUL(is64, dst, dst, tmp), ctx);
511 		break;
512 	case BPF_ALU | BPF_DIV | BPF_K:
513 	case BPF_ALU64 | BPF_DIV | BPF_K:
514 		emit_a64_mov_i(is64, tmp, imm, ctx);
515 		emit(A64_UDIV(is64, dst, dst, tmp), ctx);
516 		break;
517 	case BPF_ALU | BPF_MOD | BPF_K:
518 	case BPF_ALU64 | BPF_MOD | BPF_K:
519 		emit_a64_mov_i(is64, tmp2, imm, ctx);
520 		emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
521 		emit(A64_MSUB(is64, dst, dst, tmp, tmp2), ctx);
522 		break;
523 	case BPF_ALU | BPF_LSH | BPF_K:
524 	case BPF_ALU64 | BPF_LSH | BPF_K:
525 		emit(A64_LSL(is64, dst, dst, imm), ctx);
526 		break;
527 	case BPF_ALU | BPF_RSH | BPF_K:
528 	case BPF_ALU64 | BPF_RSH | BPF_K:
529 		emit(A64_LSR(is64, dst, dst, imm), ctx);
530 		break;
531 	case BPF_ALU | BPF_ARSH | BPF_K:
532 	case BPF_ALU64 | BPF_ARSH | BPF_K:
533 		emit(A64_ASR(is64, dst, dst, imm), ctx);
534 		break;
535 
536 	/* JUMP off */
537 	case BPF_JMP | BPF_JA:
538 		jmp_offset = bpf2a64_offset(i, off, ctx);
539 		check_imm26(jmp_offset);
540 		emit(A64_B(jmp_offset), ctx);
541 		break;
542 	/* IF (dst COND src) JUMP off */
543 	case BPF_JMP | BPF_JEQ | BPF_X:
544 	case BPF_JMP | BPF_JGT | BPF_X:
545 	case BPF_JMP | BPF_JLT | BPF_X:
546 	case BPF_JMP | BPF_JGE | BPF_X:
547 	case BPF_JMP | BPF_JLE | BPF_X:
548 	case BPF_JMP | BPF_JNE | BPF_X:
549 	case BPF_JMP | BPF_JSGT | BPF_X:
550 	case BPF_JMP | BPF_JSLT | BPF_X:
551 	case BPF_JMP | BPF_JSGE | BPF_X:
552 	case BPF_JMP | BPF_JSLE | BPF_X:
553 	case BPF_JMP32 | BPF_JEQ | BPF_X:
554 	case BPF_JMP32 | BPF_JGT | BPF_X:
555 	case BPF_JMP32 | BPF_JLT | BPF_X:
556 	case BPF_JMP32 | BPF_JGE | BPF_X:
557 	case BPF_JMP32 | BPF_JLE | BPF_X:
558 	case BPF_JMP32 | BPF_JNE | BPF_X:
559 	case BPF_JMP32 | BPF_JSGT | BPF_X:
560 	case BPF_JMP32 | BPF_JSLT | BPF_X:
561 	case BPF_JMP32 | BPF_JSGE | BPF_X:
562 	case BPF_JMP32 | BPF_JSLE | BPF_X:
563 		emit(A64_CMP(is64, dst, src), ctx);
564 emit_cond_jmp:
565 		jmp_offset = bpf2a64_offset(i, off, ctx);
566 		check_imm19(jmp_offset);
567 		switch (BPF_OP(code)) {
568 		case BPF_JEQ:
569 			jmp_cond = A64_COND_EQ;
570 			break;
571 		case BPF_JGT:
572 			jmp_cond = A64_COND_HI;
573 			break;
574 		case BPF_JLT:
575 			jmp_cond = A64_COND_CC;
576 			break;
577 		case BPF_JGE:
578 			jmp_cond = A64_COND_CS;
579 			break;
580 		case BPF_JLE:
581 			jmp_cond = A64_COND_LS;
582 			break;
583 		case BPF_JSET:
584 		case BPF_JNE:
585 			jmp_cond = A64_COND_NE;
586 			break;
587 		case BPF_JSGT:
588 			jmp_cond = A64_COND_GT;
589 			break;
590 		case BPF_JSLT:
591 			jmp_cond = A64_COND_LT;
592 			break;
593 		case BPF_JSGE:
594 			jmp_cond = A64_COND_GE;
595 			break;
596 		case BPF_JSLE:
597 			jmp_cond = A64_COND_LE;
598 			break;
599 		default:
600 			return -EFAULT;
601 		}
602 		emit(A64_B_(jmp_cond, jmp_offset), ctx);
603 		break;
604 	case BPF_JMP | BPF_JSET | BPF_X:
605 	case BPF_JMP32 | BPF_JSET | BPF_X:
606 		emit(A64_TST(is64, dst, src), ctx);
607 		goto emit_cond_jmp;
608 	/* IF (dst COND imm) JUMP off */
609 	case BPF_JMP | BPF_JEQ | BPF_K:
610 	case BPF_JMP | BPF_JGT | BPF_K:
611 	case BPF_JMP | BPF_JLT | BPF_K:
612 	case BPF_JMP | BPF_JGE | BPF_K:
613 	case BPF_JMP | BPF_JLE | BPF_K:
614 	case BPF_JMP | BPF_JNE | BPF_K:
615 	case BPF_JMP | BPF_JSGT | BPF_K:
616 	case BPF_JMP | BPF_JSLT | BPF_K:
617 	case BPF_JMP | BPF_JSGE | BPF_K:
618 	case BPF_JMP | BPF_JSLE | BPF_K:
619 	case BPF_JMP32 | BPF_JEQ | BPF_K:
620 	case BPF_JMP32 | BPF_JGT | BPF_K:
621 	case BPF_JMP32 | BPF_JLT | BPF_K:
622 	case BPF_JMP32 | BPF_JGE | BPF_K:
623 	case BPF_JMP32 | BPF_JLE | BPF_K:
624 	case BPF_JMP32 | BPF_JNE | BPF_K:
625 	case BPF_JMP32 | BPF_JSGT | BPF_K:
626 	case BPF_JMP32 | BPF_JSLT | BPF_K:
627 	case BPF_JMP32 | BPF_JSGE | BPF_K:
628 	case BPF_JMP32 | BPF_JSLE | BPF_K:
629 		emit_a64_mov_i(is64, tmp, imm, ctx);
630 		emit(A64_CMP(is64, dst, tmp), ctx);
631 		goto emit_cond_jmp;
632 	case BPF_JMP | BPF_JSET | BPF_K:
633 	case BPF_JMP32 | BPF_JSET | BPF_K:
634 		emit_a64_mov_i(is64, tmp, imm, ctx);
635 		emit(A64_TST(is64, dst, tmp), ctx);
636 		goto emit_cond_jmp;
637 	/* function call */
638 	case BPF_JMP | BPF_CALL:
639 	{
640 		const u8 r0 = bpf2a64[BPF_REG_0];
641 		bool func_addr_fixed;
642 		u64 func_addr;
643 		int ret;
644 
645 		ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
646 					    &func_addr, &func_addr_fixed);
647 		if (ret < 0)
648 			return ret;
649 		emit_addr_mov_i64(tmp, func_addr, ctx);
650 		emit(A64_BLR(tmp), ctx);
651 		emit(A64_MOV(1, r0, A64_R(0)), ctx);
652 		break;
653 	}
654 	/* tail call */
655 	case BPF_JMP | BPF_TAIL_CALL:
656 		if (emit_bpf_tail_call(ctx))
657 			return -EFAULT;
658 		break;
659 	/* function return */
660 	case BPF_JMP | BPF_EXIT:
661 		/* Optimization: when last instruction is EXIT,
662 		   simply fallthrough to epilogue. */
663 		if (i == ctx->prog->len - 1)
664 			break;
665 		jmp_offset = epilogue_offset(ctx);
666 		check_imm26(jmp_offset);
667 		emit(A64_B(jmp_offset), ctx);
668 		break;
669 
670 	/* dst = imm64 */
671 	case BPF_LD | BPF_IMM | BPF_DW:
672 	{
673 		const struct bpf_insn insn1 = insn[1];
674 		u64 imm64;
675 
676 		imm64 = (u64)insn1.imm << 32 | (u32)imm;
677 		emit_a64_mov_i64(dst, imm64, ctx);
678 
679 		return 1;
680 	}
681 
682 	/* LDX: dst = *(size *)(src + off) */
683 	case BPF_LDX | BPF_MEM | BPF_W:
684 	case BPF_LDX | BPF_MEM | BPF_H:
685 	case BPF_LDX | BPF_MEM | BPF_B:
686 	case BPF_LDX | BPF_MEM | BPF_DW:
687 		emit_a64_mov_i(1, tmp, off, ctx);
688 		switch (BPF_SIZE(code)) {
689 		case BPF_W:
690 			emit(A64_LDR32(dst, src, tmp), ctx);
691 			break;
692 		case BPF_H:
693 			emit(A64_LDRH(dst, src, tmp), ctx);
694 			break;
695 		case BPF_B:
696 			emit(A64_LDRB(dst, src, tmp), ctx);
697 			break;
698 		case BPF_DW:
699 			emit(A64_LDR64(dst, src, tmp), ctx);
700 			break;
701 		}
702 		break;
703 
704 	/* speculation barrier */
705 	case BPF_ST | BPF_NOSPEC:
706 		/*
707 		 * Nothing required here.
708 		 *
709 		 * In case of arm64, we rely on the firmware mitigation of
710 		 * Speculative Store Bypass as controlled via the ssbd kernel
711 		 * parameter. Whenever the mitigation is enabled, it works
712 		 * for all of the kernel code with no need to provide any
713 		 * additional instructions.
714 		 */
715 		break;
716 
717 	/* ST: *(size *)(dst + off) = imm */
718 	case BPF_ST | BPF_MEM | BPF_W:
719 	case BPF_ST | BPF_MEM | BPF_H:
720 	case BPF_ST | BPF_MEM | BPF_B:
721 	case BPF_ST | BPF_MEM | BPF_DW:
722 		/* Load imm to a register then store it */
723 		emit_a64_mov_i(1, tmp2, off, ctx);
724 		emit_a64_mov_i(1, tmp, imm, ctx);
725 		switch (BPF_SIZE(code)) {
726 		case BPF_W:
727 			emit(A64_STR32(tmp, dst, tmp2), ctx);
728 			break;
729 		case BPF_H:
730 			emit(A64_STRH(tmp, dst, tmp2), ctx);
731 			break;
732 		case BPF_B:
733 			emit(A64_STRB(tmp, dst, tmp2), ctx);
734 			break;
735 		case BPF_DW:
736 			emit(A64_STR64(tmp, dst, tmp2), ctx);
737 			break;
738 		}
739 		break;
740 
741 	/* STX: *(size *)(dst + off) = src */
742 	case BPF_STX | BPF_MEM | BPF_W:
743 	case BPF_STX | BPF_MEM | BPF_H:
744 	case BPF_STX | BPF_MEM | BPF_B:
745 	case BPF_STX | BPF_MEM | BPF_DW:
746 		emit_a64_mov_i(1, tmp, off, ctx);
747 		switch (BPF_SIZE(code)) {
748 		case BPF_W:
749 			emit(A64_STR32(src, dst, tmp), ctx);
750 			break;
751 		case BPF_H:
752 			emit(A64_STRH(src, dst, tmp), ctx);
753 			break;
754 		case BPF_B:
755 			emit(A64_STRB(src, dst, tmp), ctx);
756 			break;
757 		case BPF_DW:
758 			emit(A64_STR64(src, dst, tmp), ctx);
759 			break;
760 		}
761 		break;
762 
763 	/* STX XADD: lock *(u32 *)(dst + off) += src */
764 	case BPF_STX | BPF_XADD | BPF_W:
765 	/* STX XADD: lock *(u64 *)(dst + off) += src */
766 	case BPF_STX | BPF_XADD | BPF_DW:
767 		if (!off) {
768 			reg = dst;
769 		} else {
770 			emit_a64_mov_i(1, tmp, off, ctx);
771 			emit(A64_ADD(1, tmp, tmp, dst), ctx);
772 			reg = tmp;
773 		}
774 		if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
775 			emit(A64_STADD(isdw, reg, src), ctx);
776 		} else {
777 			emit(A64_LDXR(isdw, tmp2, reg), ctx);
778 			emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
779 			emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
780 			jmp_offset = -3;
781 			check_imm19(jmp_offset);
782 			emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
783 		}
784 		break;
785 
786 	default:
787 		pr_err_once("unknown opcode %02x\n", code);
788 		return -EINVAL;
789 	}
790 
791 	return 0;
792 }
793 
build_body(struct jit_ctx * ctx,bool extra_pass)794 static int build_body(struct jit_ctx *ctx, bool extra_pass)
795 {
796 	const struct bpf_prog *prog = ctx->prog;
797 	int i;
798 
799 	/*
800 	 * - offset[0] offset of the end of prologue,
801 	 *   start of the 1st instruction.
802 	 * - offset[1] - offset of the end of 1st instruction,
803 	 *   start of the 2nd instruction
804 	 * [....]
805 	 * - offset[3] - offset of the end of 3rd instruction,
806 	 *   start of 4th instruction
807 	 */
808 	for (i = 0; i < prog->len; i++) {
809 		const struct bpf_insn *insn = &prog->insnsi[i];
810 		int ret;
811 
812 		if (ctx->image == NULL)
813 			ctx->offset[i] = ctx->idx;
814 		ret = build_insn(insn, ctx, extra_pass);
815 		if (ret > 0) {
816 			i++;
817 			if (ctx->image == NULL)
818 				ctx->offset[i] = ctx->idx;
819 			continue;
820 		}
821 		if (ret)
822 			return ret;
823 	}
824 	/*
825 	 * offset is allocated with prog->len + 1 so fill in
826 	 * the last element with the offset after the last
827 	 * instruction (end of program)
828 	 */
829 	if (ctx->image == NULL)
830 		ctx->offset[i] = ctx->idx;
831 
832 	return 0;
833 }
834 
validate_code(struct jit_ctx * ctx)835 static int validate_code(struct jit_ctx *ctx)
836 {
837 	int i;
838 
839 	for (i = 0; i < ctx->idx; i++) {
840 		u32 a64_insn = le32_to_cpu(ctx->image[i]);
841 
842 		if (a64_insn == AARCH64_BREAK_FAULT)
843 			return -1;
844 	}
845 
846 	return 0;
847 }
848 
bpf_flush_icache(void * start,void * end)849 static inline void bpf_flush_icache(void *start, void *end)
850 {
851 	flush_icache_range((unsigned long)start, (unsigned long)end);
852 }
853 
854 struct arm64_jit_data {
855 	struct bpf_binary_header *header;
856 	u8 *image;
857 	struct jit_ctx ctx;
858 };
859 
bpf_int_jit_compile(struct bpf_prog * prog)860 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
861 {
862 	struct bpf_prog *tmp, *orig_prog = prog;
863 	struct bpf_binary_header *header;
864 	struct arm64_jit_data *jit_data;
865 	bool was_classic = bpf_prog_was_classic(prog);
866 	bool tmp_blinded = false;
867 	bool extra_pass = false;
868 	struct jit_ctx ctx;
869 	int image_size;
870 	u8 *image_ptr;
871 
872 	if (!prog->jit_requested)
873 		return orig_prog;
874 
875 	tmp = bpf_jit_blind_constants(prog);
876 	/* If blinding was requested and we failed during blinding,
877 	 * we must fall back to the interpreter.
878 	 */
879 	if (IS_ERR(tmp))
880 		return orig_prog;
881 	if (tmp != prog) {
882 		tmp_blinded = true;
883 		prog = tmp;
884 	}
885 
886 	jit_data = prog->aux->jit_data;
887 	if (!jit_data) {
888 		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
889 		if (!jit_data) {
890 			prog = orig_prog;
891 			goto out;
892 		}
893 		prog->aux->jit_data = jit_data;
894 	}
895 	if (jit_data->ctx.offset) {
896 		ctx = jit_data->ctx;
897 		image_ptr = jit_data->image;
898 		header = jit_data->header;
899 		extra_pass = true;
900 		image_size = sizeof(u32) * ctx.idx;
901 		goto skip_init_ctx;
902 	}
903 	memset(&ctx, 0, sizeof(ctx));
904 	ctx.prog = prog;
905 
906 	ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
907 	if (ctx.offset == NULL) {
908 		prog = orig_prog;
909 		goto out_off;
910 	}
911 
912 	/*
913 	 * 1. Initial fake pass to compute ctx->idx and ctx->offset.
914 	 *
915 	 * BPF line info needs ctx->offset[i] to be the offset of
916 	 * instruction[i] in jited image, so build prologue first.
917 	 */
918 	if (build_prologue(&ctx, was_classic)) {
919 		prog = orig_prog;
920 		goto out_off;
921 	}
922 
923 	if (build_body(&ctx, extra_pass)) {
924 		prog = orig_prog;
925 		goto out_off;
926 	}
927 
928 	ctx.epilogue_offset = ctx.idx;
929 	build_epilogue(&ctx);
930 
931 	/* Now we know the actual image size. */
932 	image_size = sizeof(u32) * ctx.idx;
933 	header = bpf_jit_binary_alloc(image_size, &image_ptr,
934 				      sizeof(u32), jit_fill_hole);
935 	if (header == NULL) {
936 		prog = orig_prog;
937 		goto out_off;
938 	}
939 
940 	/* 2. Now, the actual pass. */
941 
942 	ctx.image = (__le32 *)image_ptr;
943 skip_init_ctx:
944 	ctx.idx = 0;
945 
946 	build_prologue(&ctx, was_classic);
947 
948 	if (build_body(&ctx, extra_pass)) {
949 		bpf_jit_binary_free(header);
950 		prog = orig_prog;
951 		goto out_off;
952 	}
953 
954 	build_epilogue(&ctx);
955 
956 	/* 3. Extra pass to validate JITed code. */
957 	if (validate_code(&ctx)) {
958 		bpf_jit_binary_free(header);
959 		prog = orig_prog;
960 		goto out_off;
961 	}
962 
963 	/* And we're done. */
964 	if (bpf_jit_enable > 1)
965 		bpf_jit_dump(prog->len, image_size, 2, ctx.image);
966 
967 	bpf_flush_icache(header, ctx.image + ctx.idx);
968 
969 	if (!prog->is_func || extra_pass) {
970 		if (extra_pass && ctx.idx != jit_data->ctx.idx) {
971 			pr_err_once("multi-func JIT bug %d != %d\n",
972 				    ctx.idx, jit_data->ctx.idx);
973 			bpf_jit_binary_free(header);
974 			prog->bpf_func = NULL;
975 			prog->jited = 0;
976 			prog->jited_len = 0;
977 			goto out_off;
978 		}
979 		bpf_jit_binary_lock_ro(header);
980 	} else {
981 		jit_data->ctx = ctx;
982 		jit_data->image = image_ptr;
983 		jit_data->header = header;
984 	}
985 	prog->bpf_func = (void *)ctx.image;
986 	prog->jited = 1;
987 	prog->jited_len = image_size;
988 
989 	if (!prog->is_func || extra_pass) {
990 		int i;
991 
992 		/* offset[prog->len] is the size of program */
993 		for (i = 0; i <= prog->len; i++)
994 			ctx.offset[i] *= AARCH64_INSN_SIZE;
995 		bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
996 out_off:
997 		kfree(ctx.offset);
998 		kfree(jit_data);
999 		prog->aux->jit_data = NULL;
1000 	}
1001 out:
1002 	if (tmp_blinded)
1003 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
1004 					   tmp : orig_prog);
1005 	return prog;
1006 }
1007 
bpf_jit_alloc_exec_limit(void)1008 u64 bpf_jit_alloc_exec_limit(void)
1009 {
1010 	return BPF_JIT_REGION_SIZE;
1011 }
1012 
bpf_jit_alloc_exec(unsigned long size)1013 void *bpf_jit_alloc_exec(unsigned long size)
1014 {
1015 	return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
1016 				    BPF_JIT_REGION_END, GFP_KERNEL,
1017 				    PAGE_KERNEL, 0, NUMA_NO_NODE,
1018 				    __builtin_return_address(0));
1019 }
1020 
bpf_jit_free_exec(void * addr)1021 void bpf_jit_free_exec(void *addr)
1022 {
1023 	return vfree(addr);
1024 }
1025 
1026 #ifdef CONFIG_CFI_CLANG
arch_bpf_jit_check_func(const struct bpf_prog * prog)1027 bool arch_bpf_jit_check_func(const struct bpf_prog *prog)
1028 {
1029 	const uintptr_t func = (const uintptr_t)prog->bpf_func;
1030 
1031 	/* bpf_func must be correctly aligned and within the BPF JIT region */
1032 	return (func >= BPF_JIT_REGION_START && func < BPF_JIT_REGION_END &&
1033 		IS_ALIGNED(func, sizeof(u32)));
1034 }
1035 #endif
1036