• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * BPF JIT compiler for ARM64
3  *
4  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #define pr_fmt(fmt) "bpf_jit: " fmt
20 
21 #include <linux/bpf.h>
22 #include <linux/filter.h>
23 #include <linux/printk.h>
24 #include <linux/skbuff.h>
25 #include <linux/slab.h>
26 
27 #include <asm/byteorder.h>
28 #include <asm/cacheflush.h>
29 #include <asm/debug-monitors.h>
30 
31 #include "bpf_jit.h"
32 
33 int bpf_jit_enable __read_mostly;
34 
35 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
36 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
37 #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
38 
39 /* Map BPF registers to A64 registers */
40 static const int bpf2a64[] = {
41 	/* return value from in-kernel function, and exit value from eBPF */
42 	[BPF_REG_0] = A64_R(7),
43 	/* arguments from eBPF program to in-kernel function */
44 	[BPF_REG_1] = A64_R(0),
45 	[BPF_REG_2] = A64_R(1),
46 	[BPF_REG_3] = A64_R(2),
47 	[BPF_REG_4] = A64_R(3),
48 	[BPF_REG_5] = A64_R(4),
49 	/* callee saved registers that in-kernel function will preserve */
50 	[BPF_REG_6] = A64_R(19),
51 	[BPF_REG_7] = A64_R(20),
52 	[BPF_REG_8] = A64_R(21),
53 	[BPF_REG_9] = A64_R(22),
54 	/* read-only frame pointer to access stack */
55 	[BPF_REG_FP] = A64_R(25),
56 	/* temporary registers for internal BPF JIT */
57 	[TMP_REG_1] = A64_R(10),
58 	[TMP_REG_2] = A64_R(11),
59 	/* tail_call_cnt */
60 	[TCALL_CNT] = A64_R(26),
61 	/* temporary register for blinding constants */
62 	[BPF_REG_AX] = A64_R(9),
63 };
64 
65 struct jit_ctx {
66 	const struct bpf_prog *prog;
67 	int idx;
68 	int epilogue_offset;
69 	int *offset;
70 	u32 *image;
71 };
72 
emit(const u32 insn,struct jit_ctx * ctx)73 static inline void emit(const u32 insn, struct jit_ctx *ctx)
74 {
75 	if (ctx->image != NULL)
76 		ctx->image[ctx->idx] = cpu_to_le32(insn);
77 
78 	ctx->idx++;
79 }
80 
emit_a64_mov_i64(const int reg,const u64 val,struct jit_ctx * ctx)81 static inline void emit_a64_mov_i64(const int reg, const u64 val,
82 				    struct jit_ctx *ctx)
83 {
84 	u64 tmp = val;
85 	int shift = 0;
86 
87 	emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
88 	tmp >>= 16;
89 	shift += 16;
90 	while (tmp) {
91 		if (tmp & 0xffff)
92 			emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
93 		tmp >>= 16;
94 		shift += 16;
95 	}
96 }
97 
emit_a64_mov_i(const int is64,const int reg,const s32 val,struct jit_ctx * ctx)98 static inline void emit_a64_mov_i(const int is64, const int reg,
99 				  const s32 val, struct jit_ctx *ctx)
100 {
101 	u16 hi = val >> 16;
102 	u16 lo = val & 0xffff;
103 
104 	if (hi & 0x8000) {
105 		if (hi == 0xffff) {
106 			emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
107 		} else {
108 			emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
109 			emit(A64_MOVK(is64, reg, lo, 0), ctx);
110 		}
111 	} else {
112 		emit(A64_MOVZ(is64, reg, lo, 0), ctx);
113 		if (hi)
114 			emit(A64_MOVK(is64, reg, hi, 16), ctx);
115 	}
116 }
117 
bpf2a64_offset(int bpf_to,int bpf_from,const struct jit_ctx * ctx)118 static inline int bpf2a64_offset(int bpf_to, int bpf_from,
119 				 const struct jit_ctx *ctx)
120 {
121 	int to = ctx->offset[bpf_to];
122 	/* -1 to account for the Branch instruction */
123 	int from = ctx->offset[bpf_from] - 1;
124 
125 	return to - from;
126 }
127 
jit_fill_hole(void * area,unsigned int size)128 static void jit_fill_hole(void *area, unsigned int size)
129 {
130 	u32 *ptr;
131 	/* We are guaranteed to have aligned memory. */
132 	for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
133 		*ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
134 }
135 
epilogue_offset(const struct jit_ctx * ctx)136 static inline int epilogue_offset(const struct jit_ctx *ctx)
137 {
138 	int to = ctx->epilogue_offset;
139 	int from = ctx->idx;
140 
141 	return to - from;
142 }
143 
144 /* Stack must be multiples of 16B */
145 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
146 
147 #define _STACK_SIZE \
148 	(MAX_BPF_STACK \
149 	 + 4 /* extra for skb_copy_bits buffer */)
150 
151 #define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
152 
153 #define PROLOGUE_OFFSET 8
154 
build_prologue(struct jit_ctx * ctx)155 static int build_prologue(struct jit_ctx *ctx)
156 {
157 	const u8 r6 = bpf2a64[BPF_REG_6];
158 	const u8 r7 = bpf2a64[BPF_REG_7];
159 	const u8 r8 = bpf2a64[BPF_REG_8];
160 	const u8 r9 = bpf2a64[BPF_REG_9];
161 	const u8 fp = bpf2a64[BPF_REG_FP];
162 	const u8 tcc = bpf2a64[TCALL_CNT];
163 	const int idx0 = ctx->idx;
164 	int cur_offset;
165 
166 	/*
167 	 * BPF prog stack layout
168 	 *
169 	 *                         high
170 	 * original A64_SP =>   0:+-----+ BPF prologue
171 	 *                        |FP/LR|
172 	 * current A64_FP =>  -16:+-----+
173 	 *                        | ... | callee saved registers
174 	 * BPF fp register => -64:+-----+ <= (BPF_FP)
175 	 *                        |     |
176 	 *                        | ... | BPF prog stack
177 	 *                        |     |
178 	 *                        +-----+ <= (BPF_FP - MAX_BPF_STACK)
179 	 *                        |RSVD | JIT scratchpad
180 	 * current A64_SP =>      +-----+ <= (BPF_FP - STACK_SIZE)
181 	 *                        |     |
182 	 *                        | ... | Function call stack
183 	 *                        |     |
184 	 *                        +-----+
185 	 *                          low
186 	 *
187 	 */
188 
189 	/* Save FP and LR registers to stay align with ARM64 AAPCS */
190 	emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
191 	emit(A64_MOV(1, A64_FP, A64_SP), ctx);
192 
193 	/* Save callee-saved registers */
194 	emit(A64_PUSH(r6, r7, A64_SP), ctx);
195 	emit(A64_PUSH(r8, r9, A64_SP), ctx);
196 	emit(A64_PUSH(fp, tcc, A64_SP), ctx);
197 
198 	/* Set up BPF prog stack base register */
199 	emit(A64_MOV(1, fp, A64_SP), ctx);
200 
201 	/* Initialize tail_call_cnt */
202 	emit(A64_MOVZ(1, tcc, 0, 0), ctx);
203 
204 	/* Set up function call stack */
205 	emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
206 
207 	cur_offset = ctx->idx - idx0;
208 	if (cur_offset != PROLOGUE_OFFSET) {
209 		pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
210 			    cur_offset, PROLOGUE_OFFSET);
211 		return -1;
212 	}
213 	return 0;
214 }
215 
216 static int out_offset = -1; /* initialized on the first pass of build_body() */
emit_bpf_tail_call(struct jit_ctx * ctx)217 static int emit_bpf_tail_call(struct jit_ctx *ctx)
218 {
219 	/* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
220 	const u8 r2 = bpf2a64[BPF_REG_2];
221 	const u8 r3 = bpf2a64[BPF_REG_3];
222 
223 	const u8 tmp = bpf2a64[TMP_REG_1];
224 	const u8 prg = bpf2a64[TMP_REG_2];
225 	const u8 tcc = bpf2a64[TCALL_CNT];
226 	const int idx0 = ctx->idx;
227 #define cur_offset (ctx->idx - idx0)
228 #define jmp_offset (out_offset - (cur_offset))
229 	size_t off;
230 
231 	/* if (index >= array->map.max_entries)
232 	 *     goto out;
233 	 */
234 	off = offsetof(struct bpf_array, map.max_entries);
235 	emit_a64_mov_i64(tmp, off, ctx);
236 	emit(A64_LDR32(tmp, r2, tmp), ctx);
237 	emit(A64_MOV(0, r3, r3), ctx);
238 	emit(A64_CMP(0, r3, tmp), ctx);
239 	emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
240 
241 	/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
242 	 *     goto out;
243 	 * tail_call_cnt++;
244 	 */
245 	emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
246 	emit(A64_CMP(1, tcc, tmp), ctx);
247 	emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
248 	emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
249 
250 	/* prog = array->ptrs[index];
251 	 * if (prog == NULL)
252 	 *     goto out;
253 	 */
254 	off = offsetof(struct bpf_array, ptrs);
255 	emit_a64_mov_i64(tmp, off, ctx);
256 	emit(A64_ADD(1, tmp, r2, tmp), ctx);
257 	emit(A64_LSL(1, prg, r3, 3), ctx);
258 	emit(A64_LDR64(prg, tmp, prg), ctx);
259 	emit(A64_CBZ(1, prg, jmp_offset), ctx);
260 
261 	/* goto *(prog->bpf_func + prologue_size); */
262 	off = offsetof(struct bpf_prog, bpf_func);
263 	emit_a64_mov_i64(tmp, off, ctx);
264 	emit(A64_LDR64(tmp, prg, tmp), ctx);
265 	emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
266 	emit(A64_BR(tmp), ctx);
267 
268 	/* out: */
269 	if (out_offset == -1)
270 		out_offset = cur_offset;
271 	if (cur_offset != out_offset) {
272 		pr_err_once("tail_call out_offset = %d, expected %d!\n",
273 			    cur_offset, out_offset);
274 		return -1;
275 	}
276 	return 0;
277 #undef cur_offset
278 #undef jmp_offset
279 }
280 
build_epilogue(struct jit_ctx * ctx)281 static void build_epilogue(struct jit_ctx *ctx)
282 {
283 	const u8 r0 = bpf2a64[BPF_REG_0];
284 	const u8 r6 = bpf2a64[BPF_REG_6];
285 	const u8 r7 = bpf2a64[BPF_REG_7];
286 	const u8 r8 = bpf2a64[BPF_REG_8];
287 	const u8 r9 = bpf2a64[BPF_REG_9];
288 	const u8 fp = bpf2a64[BPF_REG_FP];
289 
290 	/* We're done with BPF stack */
291 	emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
292 
293 	/* Restore fs (x25) and x26 */
294 	emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
295 
296 	/* Restore callee-saved register */
297 	emit(A64_POP(r8, r9, A64_SP), ctx);
298 	emit(A64_POP(r6, r7, A64_SP), ctx);
299 
300 	/* Restore FP/LR registers */
301 	emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
302 
303 	/* Set return value */
304 	emit(A64_MOV(1, A64_R(0), r0), ctx);
305 
306 	emit(A64_RET(A64_LR), ctx);
307 }
308 
309 /* JITs an eBPF instruction.
310  * Returns:
311  * 0  - successfully JITed an 8-byte eBPF instruction.
312  * >0 - successfully JITed a 16-byte eBPF instruction.
313  * <0 - failed to JIT.
314  */
build_insn(const struct bpf_insn * insn,struct jit_ctx * ctx)315 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
316 {
317 	const u8 code = insn->code;
318 	const u8 dst = bpf2a64[insn->dst_reg];
319 	const u8 src = bpf2a64[insn->src_reg];
320 	const u8 tmp = bpf2a64[TMP_REG_1];
321 	const u8 tmp2 = bpf2a64[TMP_REG_2];
322 	const s16 off = insn->off;
323 	const s32 imm = insn->imm;
324 	const int i = insn - ctx->prog->insnsi;
325 	const bool is64 = BPF_CLASS(code) == BPF_ALU64;
326 	u8 jmp_cond;
327 	s32 jmp_offset;
328 
329 #define check_imm(bits, imm) do {				\
330 	if ((((imm) > 0) && ((imm) >> (bits))) ||		\
331 	    (((imm) < 0) && (~(imm) >> (bits)))) {		\
332 		pr_info("[%2d] imm=%d(0x%x) out of range\n",	\
333 			i, imm, imm);				\
334 		return -EINVAL;					\
335 	}							\
336 } while (0)
337 #define check_imm19(imm) check_imm(19, imm)
338 #define check_imm26(imm) check_imm(26, imm)
339 
340 	switch (code) {
341 	/* dst = src */
342 	case BPF_ALU | BPF_MOV | BPF_X:
343 	case BPF_ALU64 | BPF_MOV | BPF_X:
344 		emit(A64_MOV(is64, dst, src), ctx);
345 		break;
346 	/* dst = dst OP src */
347 	case BPF_ALU | BPF_ADD | BPF_X:
348 	case BPF_ALU64 | BPF_ADD | BPF_X:
349 		emit(A64_ADD(is64, dst, dst, src), ctx);
350 		break;
351 	case BPF_ALU | BPF_SUB | BPF_X:
352 	case BPF_ALU64 | BPF_SUB | BPF_X:
353 		emit(A64_SUB(is64, dst, dst, src), ctx);
354 		break;
355 	case BPF_ALU | BPF_AND | BPF_X:
356 	case BPF_ALU64 | BPF_AND | BPF_X:
357 		emit(A64_AND(is64, dst, dst, src), ctx);
358 		break;
359 	case BPF_ALU | BPF_OR | BPF_X:
360 	case BPF_ALU64 | BPF_OR | BPF_X:
361 		emit(A64_ORR(is64, dst, dst, src), ctx);
362 		break;
363 	case BPF_ALU | BPF_XOR | BPF_X:
364 	case BPF_ALU64 | BPF_XOR | BPF_X:
365 		emit(A64_EOR(is64, dst, dst, src), ctx);
366 		break;
367 	case BPF_ALU | BPF_MUL | BPF_X:
368 	case BPF_ALU64 | BPF_MUL | BPF_X:
369 		emit(A64_MUL(is64, dst, dst, src), ctx);
370 		break;
371 	case BPF_ALU | BPF_DIV | BPF_X:
372 	case BPF_ALU64 | BPF_DIV | BPF_X:
373 	case BPF_ALU | BPF_MOD | BPF_X:
374 	case BPF_ALU64 | BPF_MOD | BPF_X:
375 	{
376 		const u8 r0 = bpf2a64[BPF_REG_0];
377 
378 		/* if (src == 0) return 0 */
379 		jmp_offset = 3; /* skip ahead to else path */
380 		check_imm19(jmp_offset);
381 		emit(A64_CBNZ(is64, src, jmp_offset), ctx);
382 		emit(A64_MOVZ(1, r0, 0, 0), ctx);
383 		jmp_offset = epilogue_offset(ctx);
384 		check_imm26(jmp_offset);
385 		emit(A64_B(jmp_offset), ctx);
386 		/* else */
387 		switch (BPF_OP(code)) {
388 		case BPF_DIV:
389 			emit(A64_UDIV(is64, dst, dst, src), ctx);
390 			break;
391 		case BPF_MOD:
392 			emit(A64_UDIV(is64, tmp, dst, src), ctx);
393 			emit(A64_MUL(is64, tmp, tmp, src), ctx);
394 			emit(A64_SUB(is64, dst, dst, tmp), ctx);
395 			break;
396 		}
397 		break;
398 	}
399 	case BPF_ALU | BPF_LSH | BPF_X:
400 	case BPF_ALU64 | BPF_LSH | BPF_X:
401 		emit(A64_LSLV(is64, dst, dst, src), ctx);
402 		break;
403 	case BPF_ALU | BPF_RSH | BPF_X:
404 	case BPF_ALU64 | BPF_RSH | BPF_X:
405 		emit(A64_LSRV(is64, dst, dst, src), ctx);
406 		break;
407 	case BPF_ALU | BPF_ARSH | BPF_X:
408 	case BPF_ALU64 | BPF_ARSH | BPF_X:
409 		emit(A64_ASRV(is64, dst, dst, src), ctx);
410 		break;
411 	/* dst = -dst */
412 	case BPF_ALU | BPF_NEG:
413 	case BPF_ALU64 | BPF_NEG:
414 		emit(A64_NEG(is64, dst, dst), ctx);
415 		break;
416 	/* dst = BSWAP##imm(dst) */
417 	case BPF_ALU | BPF_END | BPF_FROM_LE:
418 	case BPF_ALU | BPF_END | BPF_FROM_BE:
419 #ifdef CONFIG_CPU_BIG_ENDIAN
420 		if (BPF_SRC(code) == BPF_FROM_BE)
421 			goto emit_bswap_uxt;
422 #else /* !CONFIG_CPU_BIG_ENDIAN */
423 		if (BPF_SRC(code) == BPF_FROM_LE)
424 			goto emit_bswap_uxt;
425 #endif
426 		switch (imm) {
427 		case 16:
428 			emit(A64_REV16(is64, dst, dst), ctx);
429 			/* zero-extend 16 bits into 64 bits */
430 			emit(A64_UXTH(is64, dst, dst), ctx);
431 			break;
432 		case 32:
433 			emit(A64_REV32(is64, dst, dst), ctx);
434 			/* upper 32 bits already cleared */
435 			break;
436 		case 64:
437 			emit(A64_REV64(dst, dst), ctx);
438 			break;
439 		}
440 		break;
441 emit_bswap_uxt:
442 		switch (imm) {
443 		case 16:
444 			/* zero-extend 16 bits into 64 bits */
445 			emit(A64_UXTH(is64, dst, dst), ctx);
446 			break;
447 		case 32:
448 			/* zero-extend 32 bits into 64 bits */
449 			emit(A64_UXTW(is64, dst, dst), ctx);
450 			break;
451 		case 64:
452 			/* nop */
453 			break;
454 		}
455 		break;
456 	/* dst = imm */
457 	case BPF_ALU | BPF_MOV | BPF_K:
458 	case BPF_ALU64 | BPF_MOV | BPF_K:
459 		emit_a64_mov_i(is64, dst, imm, ctx);
460 		break;
461 	/* dst = dst OP imm */
462 	case BPF_ALU | BPF_ADD | BPF_K:
463 	case BPF_ALU64 | BPF_ADD | BPF_K:
464 		emit_a64_mov_i(is64, tmp, imm, ctx);
465 		emit(A64_ADD(is64, dst, dst, tmp), ctx);
466 		break;
467 	case BPF_ALU | BPF_SUB | BPF_K:
468 	case BPF_ALU64 | BPF_SUB | BPF_K:
469 		emit_a64_mov_i(is64, tmp, imm, ctx);
470 		emit(A64_SUB(is64, dst, dst, tmp), ctx);
471 		break;
472 	case BPF_ALU | BPF_AND | BPF_K:
473 	case BPF_ALU64 | BPF_AND | BPF_K:
474 		emit_a64_mov_i(is64, tmp, imm, ctx);
475 		emit(A64_AND(is64, dst, dst, tmp), ctx);
476 		break;
477 	case BPF_ALU | BPF_OR | BPF_K:
478 	case BPF_ALU64 | BPF_OR | BPF_K:
479 		emit_a64_mov_i(is64, tmp, imm, ctx);
480 		emit(A64_ORR(is64, dst, dst, tmp), ctx);
481 		break;
482 	case BPF_ALU | BPF_XOR | BPF_K:
483 	case BPF_ALU64 | BPF_XOR | BPF_K:
484 		emit_a64_mov_i(is64, tmp, imm, ctx);
485 		emit(A64_EOR(is64, dst, dst, tmp), ctx);
486 		break;
487 	case BPF_ALU | BPF_MUL | BPF_K:
488 	case BPF_ALU64 | BPF_MUL | BPF_K:
489 		emit_a64_mov_i(is64, tmp, imm, ctx);
490 		emit(A64_MUL(is64, dst, dst, tmp), ctx);
491 		break;
492 	case BPF_ALU | BPF_DIV | BPF_K:
493 	case BPF_ALU64 | BPF_DIV | BPF_K:
494 		emit_a64_mov_i(is64, tmp, imm, ctx);
495 		emit(A64_UDIV(is64, dst, dst, tmp), ctx);
496 		break;
497 	case BPF_ALU | BPF_MOD | BPF_K:
498 	case BPF_ALU64 | BPF_MOD | BPF_K:
499 		emit_a64_mov_i(is64, tmp2, imm, ctx);
500 		emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
501 		emit(A64_MUL(is64, tmp, tmp, tmp2), ctx);
502 		emit(A64_SUB(is64, dst, dst, tmp), ctx);
503 		break;
504 	case BPF_ALU | BPF_LSH | BPF_K:
505 	case BPF_ALU64 | BPF_LSH | BPF_K:
506 		emit(A64_LSL(is64, dst, dst, imm), ctx);
507 		break;
508 	case BPF_ALU | BPF_RSH | BPF_K:
509 	case BPF_ALU64 | BPF_RSH | BPF_K:
510 		emit(A64_LSR(is64, dst, dst, imm), ctx);
511 		break;
512 	case BPF_ALU | BPF_ARSH | BPF_K:
513 	case BPF_ALU64 | BPF_ARSH | BPF_K:
514 		emit(A64_ASR(is64, dst, dst, imm), ctx);
515 		break;
516 
517 	/* JUMP off */
518 	case BPF_JMP | BPF_JA:
519 		jmp_offset = bpf2a64_offset(i + off, i, ctx);
520 		check_imm26(jmp_offset);
521 		emit(A64_B(jmp_offset), ctx);
522 		break;
523 	/* IF (dst COND src) JUMP off */
524 	case BPF_JMP | BPF_JEQ | BPF_X:
525 	case BPF_JMP | BPF_JGT | BPF_X:
526 	case BPF_JMP | BPF_JGE | BPF_X:
527 	case BPF_JMP | BPF_JNE | BPF_X:
528 	case BPF_JMP | BPF_JSGT | BPF_X:
529 	case BPF_JMP | BPF_JSGE | BPF_X:
530 		emit(A64_CMP(1, dst, src), ctx);
531 emit_cond_jmp:
532 		jmp_offset = bpf2a64_offset(i + off, i, ctx);
533 		check_imm19(jmp_offset);
534 		switch (BPF_OP(code)) {
535 		case BPF_JEQ:
536 			jmp_cond = A64_COND_EQ;
537 			break;
538 		case BPF_JGT:
539 			jmp_cond = A64_COND_HI;
540 			break;
541 		case BPF_JGE:
542 			jmp_cond = A64_COND_CS;
543 			break;
544 		case BPF_JSET:
545 		case BPF_JNE:
546 			jmp_cond = A64_COND_NE;
547 			break;
548 		case BPF_JSGT:
549 			jmp_cond = A64_COND_GT;
550 			break;
551 		case BPF_JSGE:
552 			jmp_cond = A64_COND_GE;
553 			break;
554 		default:
555 			return -EFAULT;
556 		}
557 		emit(A64_B_(jmp_cond, jmp_offset), ctx);
558 		break;
559 	case BPF_JMP | BPF_JSET | BPF_X:
560 		emit(A64_TST(1, dst, src), ctx);
561 		goto emit_cond_jmp;
562 	/* IF (dst COND imm) JUMP off */
563 	case BPF_JMP | BPF_JEQ | BPF_K:
564 	case BPF_JMP | BPF_JGT | BPF_K:
565 	case BPF_JMP | BPF_JGE | BPF_K:
566 	case BPF_JMP | BPF_JNE | BPF_K:
567 	case BPF_JMP | BPF_JSGT | BPF_K:
568 	case BPF_JMP | BPF_JSGE | BPF_K:
569 		emit_a64_mov_i(1, tmp, imm, ctx);
570 		emit(A64_CMP(1, dst, tmp), ctx);
571 		goto emit_cond_jmp;
572 	case BPF_JMP | BPF_JSET | BPF_K:
573 		emit_a64_mov_i(1, tmp, imm, ctx);
574 		emit(A64_TST(1, dst, tmp), ctx);
575 		goto emit_cond_jmp;
576 	/* function call */
577 	case BPF_JMP | BPF_CALL:
578 	{
579 		const u8 r0 = bpf2a64[BPF_REG_0];
580 		const u64 func = (u64)__bpf_call_base + imm;
581 
582 		emit_a64_mov_i64(tmp, func, ctx);
583 		emit(A64_BLR(tmp), ctx);
584 		emit(A64_MOV(1, r0, A64_R(0)), ctx);
585 		break;
586 	}
587 	/* tail call */
588 	case BPF_JMP | BPF_CALL | BPF_X:
589 		if (emit_bpf_tail_call(ctx))
590 			return -EFAULT;
591 		break;
592 	/* function return */
593 	case BPF_JMP | BPF_EXIT:
594 		/* Optimization: when last instruction is EXIT,
595 		   simply fallthrough to epilogue. */
596 		if (i == ctx->prog->len - 1)
597 			break;
598 		jmp_offset = epilogue_offset(ctx);
599 		check_imm26(jmp_offset);
600 		emit(A64_B(jmp_offset), ctx);
601 		break;
602 
603 	/* dst = imm64 */
604 	case BPF_LD | BPF_IMM | BPF_DW:
605 	{
606 		const struct bpf_insn insn1 = insn[1];
607 		u64 imm64;
608 
609 		if (insn1.code != 0 || insn1.src_reg != 0 ||
610 		    insn1.dst_reg != 0 || insn1.off != 0) {
611 			/* Note: verifier in BPF core must catch invalid
612 			 * instructions.
613 			 */
614 			pr_err_once("Invalid BPF_LD_IMM64 instruction\n");
615 			return -EINVAL;
616 		}
617 
618 		imm64 = (u64)insn1.imm << 32 | (u32)imm;
619 		emit_a64_mov_i64(dst, imm64, ctx);
620 
621 		return 1;
622 	}
623 
624 	/* LDX: dst = *(size *)(src + off) */
625 	case BPF_LDX | BPF_MEM | BPF_W:
626 	case BPF_LDX | BPF_MEM | BPF_H:
627 	case BPF_LDX | BPF_MEM | BPF_B:
628 	case BPF_LDX | BPF_MEM | BPF_DW:
629 		emit_a64_mov_i(1, tmp, off, ctx);
630 		switch (BPF_SIZE(code)) {
631 		case BPF_W:
632 			emit(A64_LDR32(dst, src, tmp), ctx);
633 			break;
634 		case BPF_H:
635 			emit(A64_LDRH(dst, src, tmp), ctx);
636 			break;
637 		case BPF_B:
638 			emit(A64_LDRB(dst, src, tmp), ctx);
639 			break;
640 		case BPF_DW:
641 			emit(A64_LDR64(dst, src, tmp), ctx);
642 			break;
643 		}
644 		break;
645 
646 	/* ST: *(size *)(dst + off) = imm */
647 	case BPF_ST | BPF_MEM | BPF_W:
648 	case BPF_ST | BPF_MEM | BPF_H:
649 	case BPF_ST | BPF_MEM | BPF_B:
650 	case BPF_ST | BPF_MEM | BPF_DW:
651 		/* Load imm to a register then store it */
652 		emit_a64_mov_i(1, tmp2, off, ctx);
653 		emit_a64_mov_i(1, tmp, imm, ctx);
654 		switch (BPF_SIZE(code)) {
655 		case BPF_W:
656 			emit(A64_STR32(tmp, dst, tmp2), ctx);
657 			break;
658 		case BPF_H:
659 			emit(A64_STRH(tmp, dst, tmp2), ctx);
660 			break;
661 		case BPF_B:
662 			emit(A64_STRB(tmp, dst, tmp2), ctx);
663 			break;
664 		case BPF_DW:
665 			emit(A64_STR64(tmp, dst, tmp2), ctx);
666 			break;
667 		}
668 		break;
669 
670 	/* STX: *(size *)(dst + off) = src */
671 	case BPF_STX | BPF_MEM | BPF_W:
672 	case BPF_STX | BPF_MEM | BPF_H:
673 	case BPF_STX | BPF_MEM | BPF_B:
674 	case BPF_STX | BPF_MEM | BPF_DW:
675 		emit_a64_mov_i(1, tmp, off, ctx);
676 		switch (BPF_SIZE(code)) {
677 		case BPF_W:
678 			emit(A64_STR32(src, dst, tmp), ctx);
679 			break;
680 		case BPF_H:
681 			emit(A64_STRH(src, dst, tmp), ctx);
682 			break;
683 		case BPF_B:
684 			emit(A64_STRB(src, dst, tmp), ctx);
685 			break;
686 		case BPF_DW:
687 			emit(A64_STR64(src, dst, tmp), ctx);
688 			break;
689 		}
690 		break;
691 	/* STX XADD: lock *(u32 *)(dst + off) += src */
692 	case BPF_STX | BPF_XADD | BPF_W:
693 	/* STX XADD: lock *(u64 *)(dst + off) += src */
694 	case BPF_STX | BPF_XADD | BPF_DW:
695 		goto notyet;
696 
697 	/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
698 	case BPF_LD | BPF_ABS | BPF_W:
699 	case BPF_LD | BPF_ABS | BPF_H:
700 	case BPF_LD | BPF_ABS | BPF_B:
701 	/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
702 	case BPF_LD | BPF_IND | BPF_W:
703 	case BPF_LD | BPF_IND | BPF_H:
704 	case BPF_LD | BPF_IND | BPF_B:
705 	{
706 		const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */
707 		const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */
708 		const u8 fp = bpf2a64[BPF_REG_FP];
709 		const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */
710 		const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */
711 		const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */
712 		const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */
713 		const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */
714 		int size;
715 
716 		emit(A64_MOV(1, r1, r6), ctx);
717 		emit_a64_mov_i(0, r2, imm, ctx);
718 		if (BPF_MODE(code) == BPF_IND)
719 			emit(A64_ADD(0, r2, r2, src), ctx);
720 		switch (BPF_SIZE(code)) {
721 		case BPF_W:
722 			size = 4;
723 			break;
724 		case BPF_H:
725 			size = 2;
726 			break;
727 		case BPF_B:
728 			size = 1;
729 			break;
730 		default:
731 			return -EINVAL;
732 		}
733 		emit_a64_mov_i64(r3, size, ctx);
734 		emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx);
735 		emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
736 		emit(A64_BLR(r5), ctx);
737 		emit(A64_MOV(1, r0, A64_R(0)), ctx);
738 
739 		jmp_offset = epilogue_offset(ctx);
740 		check_imm19(jmp_offset);
741 		emit(A64_CBZ(1, r0, jmp_offset), ctx);
742 		emit(A64_MOV(1, r5, r0), ctx);
743 		switch (BPF_SIZE(code)) {
744 		case BPF_W:
745 			emit(A64_LDR32(r0, r5, A64_ZR), ctx);
746 #ifndef CONFIG_CPU_BIG_ENDIAN
747 			emit(A64_REV32(0, r0, r0), ctx);
748 #endif
749 			break;
750 		case BPF_H:
751 			emit(A64_LDRH(r0, r5, A64_ZR), ctx);
752 #ifndef CONFIG_CPU_BIG_ENDIAN
753 			emit(A64_REV16(0, r0, r0), ctx);
754 #endif
755 			break;
756 		case BPF_B:
757 			emit(A64_LDRB(r0, r5, A64_ZR), ctx);
758 			break;
759 		}
760 		break;
761 	}
762 notyet:
763 		pr_info_once("*** NOT YET: opcode %02x ***\n", code);
764 		return -EFAULT;
765 
766 	default:
767 		pr_err_once("unknown opcode %02x\n", code);
768 		return -EINVAL;
769 	}
770 
771 	return 0;
772 }
773 
build_body(struct jit_ctx * ctx)774 static int build_body(struct jit_ctx *ctx)
775 {
776 	const struct bpf_prog *prog = ctx->prog;
777 	int i;
778 
779 	for (i = 0; i < prog->len; i++) {
780 		const struct bpf_insn *insn = &prog->insnsi[i];
781 		int ret;
782 
783 		ret = build_insn(insn, ctx);
784 		if (ret > 0) {
785 			i++;
786 			if (ctx->image == NULL)
787 				ctx->offset[i] = ctx->idx;
788 			continue;
789 		}
790 		if (ctx->image == NULL)
791 			ctx->offset[i] = ctx->idx;
792 		if (ret)
793 			return ret;
794 	}
795 
796 	return 0;
797 }
798 
validate_code(struct jit_ctx * ctx)799 static int validate_code(struct jit_ctx *ctx)
800 {
801 	int i;
802 
803 	for (i = 0; i < ctx->idx; i++) {
804 		u32 a64_insn = le32_to_cpu(ctx->image[i]);
805 
806 		if (a64_insn == AARCH64_BREAK_FAULT)
807 			return -1;
808 	}
809 
810 	return 0;
811 }
812 
bpf_flush_icache(void * start,void * end)813 static inline void bpf_flush_icache(void *start, void *end)
814 {
815 	flush_icache_range((unsigned long)start, (unsigned long)end);
816 }
817 
bpf_jit_compile(struct bpf_prog * prog)818 void bpf_jit_compile(struct bpf_prog *prog)
819 {
820 	/* Nothing to do here. We support Internal BPF. */
821 }
822 
bpf_int_jit_compile(struct bpf_prog * prog)823 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
824 {
825 	struct bpf_prog *tmp, *orig_prog = prog;
826 	struct bpf_binary_header *header;
827 	bool tmp_blinded = false;
828 	struct jit_ctx ctx;
829 	int image_size;
830 	u8 *image_ptr;
831 
832 	if (!bpf_jit_enable)
833 		return orig_prog;
834 
835 	tmp = bpf_jit_blind_constants(prog);
836 	/* If blinding was requested and we failed during blinding,
837 	 * we must fall back to the interpreter.
838 	 */
839 	if (IS_ERR(tmp))
840 		return orig_prog;
841 	if (tmp != prog) {
842 		tmp_blinded = true;
843 		prog = tmp;
844 	}
845 
846 	memset(&ctx, 0, sizeof(ctx));
847 	ctx.prog = prog;
848 
849 	ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
850 	if (ctx.offset == NULL) {
851 		prog = orig_prog;
852 		goto out;
853 	}
854 
855 	/* 1. Initial fake pass to compute ctx->idx. */
856 
857 	/* Fake pass to fill in ctx->offset. */
858 	if (build_body(&ctx)) {
859 		prog = orig_prog;
860 		goto out_off;
861 	}
862 
863 	if (build_prologue(&ctx)) {
864 		prog = orig_prog;
865 		goto out_off;
866 	}
867 
868 	ctx.epilogue_offset = ctx.idx;
869 	build_epilogue(&ctx);
870 
871 	/* Now we know the actual image size. */
872 	image_size = sizeof(u32) * ctx.idx;
873 	header = bpf_jit_binary_alloc(image_size, &image_ptr,
874 				      sizeof(u32), jit_fill_hole);
875 	if (header == NULL) {
876 		prog = orig_prog;
877 		goto out_off;
878 	}
879 
880 	/* 2. Now, the actual pass. */
881 
882 	ctx.image = (u32 *)image_ptr;
883 	ctx.idx = 0;
884 
885 	build_prologue(&ctx);
886 
887 	if (build_body(&ctx)) {
888 		bpf_jit_binary_free(header);
889 		prog = orig_prog;
890 		goto out_off;
891 	}
892 
893 	build_epilogue(&ctx);
894 
895 	/* 3. Extra pass to validate JITed code. */
896 	if (validate_code(&ctx)) {
897 		bpf_jit_binary_free(header);
898 		prog = orig_prog;
899 		goto out_off;
900 	}
901 
902 	/* And we're done. */
903 	if (bpf_jit_enable > 1)
904 		bpf_jit_dump(prog->len, image_size, 2, ctx.image);
905 
906 	bpf_flush_icache(header, ctx.image + ctx.idx);
907 
908 	set_memory_ro((unsigned long)header, header->pages);
909 	prog->bpf_func = (void *)ctx.image;
910 	prog->jited = 1;
911 
912 out_off:
913 	kfree(ctx.offset);
914 out:
915 	if (tmp_blinded)
916 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
917 					   tmp : orig_prog);
918 	return prog;
919 }
920 
bpf_jit_free(struct bpf_prog * prog)921 void bpf_jit_free(struct bpf_prog *prog)
922 {
923 	unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK;
924 	struct bpf_binary_header *header = (void *)addr;
925 
926 	if (!prog->jited)
927 		goto free_filter;
928 
929 	set_memory_rw(addr, header->pages);
930 	bpf_jit_binary_free(header);
931 
932 free_filter:
933 	bpf_prog_unlock_free(prog);
934 }
935