• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * BPF JIT compiler for ARM64
4  *
5  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6  */
7 
8 #define pr_fmt(fmt) "bpf_jit: " fmt
9 
10 #include <linux/bitfield.h>
11 #include <linux/bpf.h>
12 #include <linux/filter.h>
13 #include <linux/printk.h>
14 #include <linux/slab.h>
15 
16 #include <asm/byteorder.h>
17 #include <asm/cacheflush.h>
18 #include <asm/debug-monitors.h>
19 #include <asm/insn.h>
20 #include <asm/set_memory.h>
21 
22 #include "bpf_jit.h"
23 
24 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
25 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
26 #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
27 #define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
28 
29 /* Map BPF registers to A64 registers */
30 static const int bpf2a64[] = {
31 	/* return value from in-kernel function, and exit value from eBPF */
32 	[BPF_REG_0] = A64_R(7),
33 	/* arguments from eBPF program to in-kernel function */
34 	[BPF_REG_1] = A64_R(0),
35 	[BPF_REG_2] = A64_R(1),
36 	[BPF_REG_3] = A64_R(2),
37 	[BPF_REG_4] = A64_R(3),
38 	[BPF_REG_5] = A64_R(4),
39 	/* callee saved registers that in-kernel function will preserve */
40 	[BPF_REG_6] = A64_R(19),
41 	[BPF_REG_7] = A64_R(20),
42 	[BPF_REG_8] = A64_R(21),
43 	[BPF_REG_9] = A64_R(22),
44 	/* read-only frame pointer to access stack */
45 	[BPF_REG_FP] = A64_R(25),
46 	/* temporary registers for internal BPF JIT */
47 	[TMP_REG_1] = A64_R(10),
48 	[TMP_REG_2] = A64_R(11),
49 	[TMP_REG_3] = A64_R(12),
50 	/* tail_call_cnt */
51 	[TCALL_CNT] = A64_R(26),
52 	/* temporary register for blinding constants */
53 	[BPF_REG_AX] = A64_R(9),
54 };
55 
56 struct jit_ctx {
57 	const struct bpf_prog *prog;
58 	int idx;
59 	int epilogue_offset;
60 	int *offset;
61 	int exentry_idx;
62 	__le32 *image;
63 	u32 stack_size;
64 };
65 
emit(const u32 insn,struct jit_ctx * ctx)66 static inline void emit(const u32 insn, struct jit_ctx *ctx)
67 {
68 	if (ctx->image != NULL)
69 		ctx->image[ctx->idx] = cpu_to_le32(insn);
70 
71 	ctx->idx++;
72 }
73 
emit_a64_mov_i(const int is64,const int reg,const s32 val,struct jit_ctx * ctx)74 static inline void emit_a64_mov_i(const int is64, const int reg,
75 				  const s32 val, struct jit_ctx *ctx)
76 {
77 	u16 hi = val >> 16;
78 	u16 lo = val & 0xffff;
79 
80 	if (hi & 0x8000) {
81 		if (hi == 0xffff) {
82 			emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
83 		} else {
84 			emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
85 			if (lo != 0xffff)
86 				emit(A64_MOVK(is64, reg, lo, 0), ctx);
87 		}
88 	} else {
89 		emit(A64_MOVZ(is64, reg, lo, 0), ctx);
90 		if (hi)
91 			emit(A64_MOVK(is64, reg, hi, 16), ctx);
92 	}
93 }
94 
i64_i16_blocks(const u64 val,bool inverse)95 static int i64_i16_blocks(const u64 val, bool inverse)
96 {
97 	return (((val >>  0) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
98 	       (((val >> 16) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
99 	       (((val >> 32) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
100 	       (((val >> 48) & 0xffff) != (inverse ? 0xffff : 0x0000));
101 }
102 
emit_a64_mov_i64(const int reg,const u64 val,struct jit_ctx * ctx)103 static inline void emit_a64_mov_i64(const int reg, const u64 val,
104 				    struct jit_ctx *ctx)
105 {
106 	u64 nrm_tmp = val, rev_tmp = ~val;
107 	bool inverse;
108 	int shift;
109 
110 	if (!(nrm_tmp >> 32))
111 		return emit_a64_mov_i(0, reg, (u32)val, ctx);
112 
113 	inverse = i64_i16_blocks(nrm_tmp, true) < i64_i16_blocks(nrm_tmp, false);
114 	shift = max(round_down((inverse ? (fls64(rev_tmp) - 1) :
115 					  (fls64(nrm_tmp) - 1)), 16), 0);
116 	if (inverse)
117 		emit(A64_MOVN(1, reg, (rev_tmp >> shift) & 0xffff, shift), ctx);
118 	else
119 		emit(A64_MOVZ(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
120 	shift -= 16;
121 	while (shift >= 0) {
122 		if (((nrm_tmp >> shift) & 0xffff) != (inverse ? 0xffff : 0x0000))
123 			emit(A64_MOVK(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
124 		shift -= 16;
125 	}
126 }
127 
128 /*
129  * Kernel addresses in the vmalloc space use at most 48 bits, and the
130  * remaining bits are guaranteed to be 0x1. So we can compose the address
131  * with a fixed length movn/movk/movk sequence.
132  */
emit_addr_mov_i64(const int reg,const u64 val,struct jit_ctx * ctx)133 static inline void emit_addr_mov_i64(const int reg, const u64 val,
134 				     struct jit_ctx *ctx)
135 {
136 	u64 tmp = val;
137 	int shift = 0;
138 
139 	emit(A64_MOVN(1, reg, ~tmp & 0xffff, shift), ctx);
140 	while (shift < 32) {
141 		tmp >>= 16;
142 		shift += 16;
143 		emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
144 	}
145 }
146 
bpf2a64_offset(int bpf_insn,int off,const struct jit_ctx * ctx)147 static inline int bpf2a64_offset(int bpf_insn, int off,
148 				 const struct jit_ctx *ctx)
149 {
150 	/* BPF JMP offset is relative to the next instruction */
151 	bpf_insn++;
152 	/*
153 	 * Whereas arm64 branch instructions encode the offset
154 	 * from the branch itself, so we must subtract 1 from the
155 	 * instruction offset.
156 	 */
157 	return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
158 }
159 
jit_fill_hole(void * area,unsigned int size)160 static void jit_fill_hole(void *area, unsigned int size)
161 {
162 	__le32 *ptr;
163 	/* We are guaranteed to have aligned memory. */
164 	for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
165 		*ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
166 }
167 
epilogue_offset(const struct jit_ctx * ctx)168 static inline int epilogue_offset(const struct jit_ctx *ctx)
169 {
170 	int to = ctx->epilogue_offset;
171 	int from = ctx->idx;
172 
173 	return to - from;
174 }
175 
is_addsub_imm(u32 imm)176 static bool is_addsub_imm(u32 imm)
177 {
178 	/* Either imm12 or shifted imm12. */
179 	return !(imm & ~0xfff) || !(imm & ~0xfff000);
180 }
181 
182 /* Tail call offset to jump into */
183 #if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)
184 #define PROLOGUE_OFFSET 8
185 #else
186 #define PROLOGUE_OFFSET 7
187 #endif
188 
build_prologue(struct jit_ctx * ctx,bool ebpf_from_cbpf)189 static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
190 {
191 	const struct bpf_prog *prog = ctx->prog;
192 	const u8 r6 = bpf2a64[BPF_REG_6];
193 	const u8 r7 = bpf2a64[BPF_REG_7];
194 	const u8 r8 = bpf2a64[BPF_REG_8];
195 	const u8 r9 = bpf2a64[BPF_REG_9];
196 	const u8 fp = bpf2a64[BPF_REG_FP];
197 	const u8 tcc = bpf2a64[TCALL_CNT];
198 	const int idx0 = ctx->idx;
199 	int cur_offset;
200 
201 	/*
202 	 * BPF prog stack layout
203 	 *
204 	 *                         high
205 	 * original A64_SP =>   0:+-----+ BPF prologue
206 	 *                        |FP/LR|
207 	 * current A64_FP =>  -16:+-----+
208 	 *                        | ... | callee saved registers
209 	 * BPF fp register => -64:+-----+ <= (BPF_FP)
210 	 *                        |     |
211 	 *                        | ... | BPF prog stack
212 	 *                        |     |
213 	 *                        +-----+ <= (BPF_FP - prog->aux->stack_depth)
214 	 *                        |RSVD | padding
215 	 * current A64_SP =>      +-----+ <= (BPF_FP - ctx->stack_size)
216 	 *                        |     |
217 	 *                        | ... | Function call stack
218 	 *                        |     |
219 	 *                        +-----+
220 	 *                          low
221 	 *
222 	 */
223 
224 	/* BTI landing pad */
225 	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
226 		emit(A64_BTI_C, ctx);
227 
228 	/* Save FP and LR registers to stay align with ARM64 AAPCS */
229 	emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
230 	emit(A64_MOV(1, A64_FP, A64_SP), ctx);
231 
232 	/* Save callee-saved registers */
233 	emit(A64_PUSH(r6, r7, A64_SP), ctx);
234 	emit(A64_PUSH(r8, r9, A64_SP), ctx);
235 	emit(A64_PUSH(fp, tcc, A64_SP), ctx);
236 
237 	/* Set up BPF prog stack base register */
238 	emit(A64_MOV(1, fp, A64_SP), ctx);
239 
240 	if (!ebpf_from_cbpf) {
241 		/* Initialize tail_call_cnt */
242 		emit(A64_MOVZ(1, tcc, 0, 0), ctx);
243 
244 		cur_offset = ctx->idx - idx0;
245 		if (cur_offset != PROLOGUE_OFFSET) {
246 			pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
247 				    cur_offset, PROLOGUE_OFFSET);
248 			return -1;
249 		}
250 
251 		/* BTI landing pad for the tail call, done with a BR */
252 		if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
253 			emit(A64_BTI_J, ctx);
254 	}
255 
256 	/* Stack must be multiples of 16B */
257 	ctx->stack_size = round_up(prog->aux->stack_depth, 16);
258 
259 	/* Set up function call stack */
260 	emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
261 	return 0;
262 }
263 
264 static int out_offset = -1; /* initialized on the first pass of build_body() */
emit_bpf_tail_call(struct jit_ctx * ctx)265 static int emit_bpf_tail_call(struct jit_ctx *ctx)
266 {
267 	/* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
268 	const u8 r2 = bpf2a64[BPF_REG_2];
269 	const u8 r3 = bpf2a64[BPF_REG_3];
270 
271 	const u8 tmp = bpf2a64[TMP_REG_1];
272 	const u8 prg = bpf2a64[TMP_REG_2];
273 	const u8 tcc = bpf2a64[TCALL_CNT];
274 	const int idx0 = ctx->idx;
275 #define cur_offset (ctx->idx - idx0)
276 #define jmp_offset (out_offset - (cur_offset))
277 	size_t off;
278 
279 	/* if (index >= array->map.max_entries)
280 	 *     goto out;
281 	 */
282 	off = offsetof(struct bpf_array, map.max_entries);
283 	emit_a64_mov_i64(tmp, off, ctx);
284 	emit(A64_LDR32(tmp, r2, tmp), ctx);
285 	emit(A64_MOV(0, r3, r3), ctx);
286 	emit(A64_CMP(0, r3, tmp), ctx);
287 	emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
288 
289 	/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
290 	 *     goto out;
291 	 * tail_call_cnt++;
292 	 */
293 	emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
294 	emit(A64_CMP(1, tcc, tmp), ctx);
295 	emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
296 	emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
297 
298 	/* prog = array->ptrs[index];
299 	 * if (prog == NULL)
300 	 *     goto out;
301 	 */
302 	off = offsetof(struct bpf_array, ptrs);
303 	emit_a64_mov_i64(tmp, off, ctx);
304 	emit(A64_ADD(1, tmp, r2, tmp), ctx);
305 	emit(A64_LSL(1, prg, r3, 3), ctx);
306 	emit(A64_LDR64(prg, tmp, prg), ctx);
307 	emit(A64_CBZ(1, prg, jmp_offset), ctx);
308 
309 	/* goto *(prog->bpf_func + prologue_offset); */
310 	off = offsetof(struct bpf_prog, bpf_func);
311 	emit_a64_mov_i64(tmp, off, ctx);
312 	emit(A64_LDR64(tmp, prg, tmp), ctx);
313 	emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
314 	emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
315 	emit(A64_BR(tmp), ctx);
316 
317 	/* out: */
318 	if (out_offset == -1)
319 		out_offset = cur_offset;
320 	if (cur_offset != out_offset) {
321 		pr_err_once("tail_call out_offset = %d, expected %d!\n",
322 			    cur_offset, out_offset);
323 		return -1;
324 	}
325 	return 0;
326 #undef cur_offset
327 #undef jmp_offset
328 }
329 
build_epilogue(struct jit_ctx * ctx)330 static void build_epilogue(struct jit_ctx *ctx)
331 {
332 	const u8 r0 = bpf2a64[BPF_REG_0];
333 	const u8 r6 = bpf2a64[BPF_REG_6];
334 	const u8 r7 = bpf2a64[BPF_REG_7];
335 	const u8 r8 = bpf2a64[BPF_REG_8];
336 	const u8 r9 = bpf2a64[BPF_REG_9];
337 	const u8 fp = bpf2a64[BPF_REG_FP];
338 
339 	/* We're done with BPF stack */
340 	emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
341 
342 	/* Restore fs (x25) and x26 */
343 	emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
344 
345 	/* Restore callee-saved register */
346 	emit(A64_POP(r8, r9, A64_SP), ctx);
347 	emit(A64_POP(r6, r7, A64_SP), ctx);
348 
349 	/* Restore FP/LR registers */
350 	emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
351 
352 	/* Set return value */
353 	emit(A64_MOV(1, A64_R(0), r0), ctx);
354 
355 	emit(A64_RET(A64_LR), ctx);
356 }
357 
358 #define BPF_FIXUP_OFFSET_MASK	GENMASK(26, 0)
359 #define BPF_FIXUP_REG_MASK	GENMASK(31, 27)
360 
arm64_bpf_fixup_exception(const struct exception_table_entry * ex,struct pt_regs * regs)361 int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
362 			      struct pt_regs *regs)
363 {
364 	off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
365 	int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
366 
367 	regs->regs[dst_reg] = 0;
368 	regs->pc = (unsigned long)&ex->fixup - offset;
369 	return 1;
370 }
371 
372 /* For accesses to BTF pointers, add an entry to the exception table */
add_exception_handler(const struct bpf_insn * insn,struct jit_ctx * ctx,int dst_reg)373 static int add_exception_handler(const struct bpf_insn *insn,
374 				 struct jit_ctx *ctx,
375 				 int dst_reg)
376 {
377 	off_t offset;
378 	unsigned long pc;
379 	struct exception_table_entry *ex;
380 
381 	if (!ctx->image)
382 		/* First pass */
383 		return 0;
384 
385 	if (BPF_MODE(insn->code) != BPF_PROBE_MEM)
386 		return 0;
387 
388 	if (!ctx->prog->aux->extable ||
389 	    WARN_ON_ONCE(ctx->exentry_idx >= ctx->prog->aux->num_exentries))
390 		return -EINVAL;
391 
392 	ex = &ctx->prog->aux->extable[ctx->exentry_idx];
393 	pc = (unsigned long)&ctx->image[ctx->idx - 1];
394 
395 	offset = pc - (long)&ex->insn;
396 	if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
397 		return -ERANGE;
398 	ex->insn = offset;
399 
400 	/*
401 	 * Since the extable follows the program, the fixup offset is always
402 	 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
403 	 * to keep things simple, and put the destination register in the upper
404 	 * bits. We don't need to worry about buildtime or runtime sort
405 	 * modifying the upper bits because the table is already sorted, and
406 	 * isn't part of the main exception table.
407 	 */
408 	offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE);
409 	if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
410 		return -ERANGE;
411 
412 	ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
413 		    FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
414 
415 	ctx->exentry_idx++;
416 	return 0;
417 }
418 
419 /* JITs an eBPF instruction.
420  * Returns:
421  * 0  - successfully JITed an 8-byte eBPF instruction.
422  * >0 - successfully JITed a 16-byte eBPF instruction.
423  * <0 - failed to JIT.
424  */
build_insn(const struct bpf_insn * insn,struct jit_ctx * ctx,bool extra_pass)425 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
426 		      bool extra_pass)
427 {
428 	const u8 code = insn->code;
429 	const u8 dst = bpf2a64[insn->dst_reg];
430 	const u8 src = bpf2a64[insn->src_reg];
431 	const u8 tmp = bpf2a64[TMP_REG_1];
432 	const u8 tmp2 = bpf2a64[TMP_REG_2];
433 	const u8 tmp3 = bpf2a64[TMP_REG_3];
434 	const s16 off = insn->off;
435 	const s32 imm = insn->imm;
436 	const int i = insn - ctx->prog->insnsi;
437 	const bool is64 = BPF_CLASS(code) == BPF_ALU64 ||
438 			  BPF_CLASS(code) == BPF_JMP;
439 	const bool isdw = BPF_SIZE(code) == BPF_DW;
440 	u8 jmp_cond, reg;
441 	s32 jmp_offset;
442 	u32 a64_insn;
443 	int ret;
444 
445 #define check_imm(bits, imm) do {				\
446 	if ((((imm) > 0) && ((imm) >> (bits))) ||		\
447 	    (((imm) < 0) && (~(imm) >> (bits)))) {		\
448 		pr_info("[%2d] imm=%d(0x%x) out of range\n",	\
449 			i, imm, imm);				\
450 		return -EINVAL;					\
451 	}							\
452 } while (0)
453 #define check_imm19(imm) check_imm(19, imm)
454 #define check_imm26(imm) check_imm(26, imm)
455 
456 	switch (code) {
457 	/* dst = src */
458 	case BPF_ALU | BPF_MOV | BPF_X:
459 	case BPF_ALU64 | BPF_MOV | BPF_X:
460 		emit(A64_MOV(is64, dst, src), ctx);
461 		break;
462 	/* dst = dst OP src */
463 	case BPF_ALU | BPF_ADD | BPF_X:
464 	case BPF_ALU64 | BPF_ADD | BPF_X:
465 		emit(A64_ADD(is64, dst, dst, src), ctx);
466 		break;
467 	case BPF_ALU | BPF_SUB | BPF_X:
468 	case BPF_ALU64 | BPF_SUB | BPF_X:
469 		emit(A64_SUB(is64, dst, dst, src), ctx);
470 		break;
471 	case BPF_ALU | BPF_AND | BPF_X:
472 	case BPF_ALU64 | BPF_AND | BPF_X:
473 		emit(A64_AND(is64, dst, dst, src), ctx);
474 		break;
475 	case BPF_ALU | BPF_OR | BPF_X:
476 	case BPF_ALU64 | BPF_OR | BPF_X:
477 		emit(A64_ORR(is64, dst, dst, src), ctx);
478 		break;
479 	case BPF_ALU | BPF_XOR | BPF_X:
480 	case BPF_ALU64 | BPF_XOR | BPF_X:
481 		emit(A64_EOR(is64, dst, dst, src), ctx);
482 		break;
483 	case BPF_ALU | BPF_MUL | BPF_X:
484 	case BPF_ALU64 | BPF_MUL | BPF_X:
485 		emit(A64_MUL(is64, dst, dst, src), ctx);
486 		break;
487 	case BPF_ALU | BPF_DIV | BPF_X:
488 	case BPF_ALU64 | BPF_DIV | BPF_X:
489 		emit(A64_UDIV(is64, dst, dst, src), ctx);
490 		break;
491 	case BPF_ALU | BPF_MOD | BPF_X:
492 	case BPF_ALU64 | BPF_MOD | BPF_X:
493 		emit(A64_UDIV(is64, tmp, dst, src), ctx);
494 		emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
495 		break;
496 	case BPF_ALU | BPF_LSH | BPF_X:
497 	case BPF_ALU64 | BPF_LSH | BPF_X:
498 		emit(A64_LSLV(is64, dst, dst, src), ctx);
499 		break;
500 	case BPF_ALU | BPF_RSH | BPF_X:
501 	case BPF_ALU64 | BPF_RSH | BPF_X:
502 		emit(A64_LSRV(is64, dst, dst, src), ctx);
503 		break;
504 	case BPF_ALU | BPF_ARSH | BPF_X:
505 	case BPF_ALU64 | BPF_ARSH | BPF_X:
506 		emit(A64_ASRV(is64, dst, dst, src), ctx);
507 		break;
508 	/* dst = -dst */
509 	case BPF_ALU | BPF_NEG:
510 	case BPF_ALU64 | BPF_NEG:
511 		emit(A64_NEG(is64, dst, dst), ctx);
512 		break;
513 	/* dst = BSWAP##imm(dst) */
514 	case BPF_ALU | BPF_END | BPF_FROM_LE:
515 	case BPF_ALU | BPF_END | BPF_FROM_BE:
516 #ifdef CONFIG_CPU_BIG_ENDIAN
517 		if (BPF_SRC(code) == BPF_FROM_BE)
518 			goto emit_bswap_uxt;
519 #else /* !CONFIG_CPU_BIG_ENDIAN */
520 		if (BPF_SRC(code) == BPF_FROM_LE)
521 			goto emit_bswap_uxt;
522 #endif
523 		switch (imm) {
524 		case 16:
525 			emit(A64_REV16(is64, dst, dst), ctx);
526 			/* zero-extend 16 bits into 64 bits */
527 			emit(A64_UXTH(is64, dst, dst), ctx);
528 			break;
529 		case 32:
530 			emit(A64_REV32(is64, dst, dst), ctx);
531 			/* upper 32 bits already cleared */
532 			break;
533 		case 64:
534 			emit(A64_REV64(dst, dst), ctx);
535 			break;
536 		}
537 		break;
538 emit_bswap_uxt:
539 		switch (imm) {
540 		case 16:
541 			/* zero-extend 16 bits into 64 bits */
542 			emit(A64_UXTH(is64, dst, dst), ctx);
543 			break;
544 		case 32:
545 			/* zero-extend 32 bits into 64 bits */
546 			emit(A64_UXTW(is64, dst, dst), ctx);
547 			break;
548 		case 64:
549 			/* nop */
550 			break;
551 		}
552 		break;
553 	/* dst = imm */
554 	case BPF_ALU | BPF_MOV | BPF_K:
555 	case BPF_ALU64 | BPF_MOV | BPF_K:
556 		emit_a64_mov_i(is64, dst, imm, ctx);
557 		break;
558 	/* dst = dst OP imm */
559 	case BPF_ALU | BPF_ADD | BPF_K:
560 	case BPF_ALU64 | BPF_ADD | BPF_K:
561 		if (is_addsub_imm(imm)) {
562 			emit(A64_ADD_I(is64, dst, dst, imm), ctx);
563 		} else if (is_addsub_imm(-imm)) {
564 			emit(A64_SUB_I(is64, dst, dst, -imm), ctx);
565 		} else {
566 			emit_a64_mov_i(is64, tmp, imm, ctx);
567 			emit(A64_ADD(is64, dst, dst, tmp), ctx);
568 		}
569 		break;
570 	case BPF_ALU | BPF_SUB | BPF_K:
571 	case BPF_ALU64 | BPF_SUB | BPF_K:
572 		if (is_addsub_imm(imm)) {
573 			emit(A64_SUB_I(is64, dst, dst, imm), ctx);
574 		} else if (is_addsub_imm(-imm)) {
575 			emit(A64_ADD_I(is64, dst, dst, -imm), ctx);
576 		} else {
577 			emit_a64_mov_i(is64, tmp, imm, ctx);
578 			emit(A64_SUB(is64, dst, dst, tmp), ctx);
579 		}
580 		break;
581 	case BPF_ALU | BPF_AND | BPF_K:
582 	case BPF_ALU64 | BPF_AND | BPF_K:
583 		a64_insn = A64_AND_I(is64, dst, dst, imm);
584 		if (a64_insn != AARCH64_BREAK_FAULT) {
585 			emit(a64_insn, ctx);
586 		} else {
587 			emit_a64_mov_i(is64, tmp, imm, ctx);
588 			emit(A64_AND(is64, dst, dst, tmp), ctx);
589 		}
590 		break;
591 	case BPF_ALU | BPF_OR | BPF_K:
592 	case BPF_ALU64 | BPF_OR | BPF_K:
593 		a64_insn = A64_ORR_I(is64, dst, dst, imm);
594 		if (a64_insn != AARCH64_BREAK_FAULT) {
595 			emit(a64_insn, ctx);
596 		} else {
597 			emit_a64_mov_i(is64, tmp, imm, ctx);
598 			emit(A64_ORR(is64, dst, dst, tmp), ctx);
599 		}
600 		break;
601 	case BPF_ALU | BPF_XOR | BPF_K:
602 	case BPF_ALU64 | BPF_XOR | BPF_K:
603 		a64_insn = A64_EOR_I(is64, dst, dst, imm);
604 		if (a64_insn != AARCH64_BREAK_FAULT) {
605 			emit(a64_insn, ctx);
606 		} else {
607 			emit_a64_mov_i(is64, tmp, imm, ctx);
608 			emit(A64_EOR(is64, dst, dst, tmp), ctx);
609 		}
610 		break;
611 	case BPF_ALU | BPF_MUL | BPF_K:
612 	case BPF_ALU64 | BPF_MUL | BPF_K:
613 		emit_a64_mov_i(is64, tmp, imm, ctx);
614 		emit(A64_MUL(is64, dst, dst, tmp), ctx);
615 		break;
616 	case BPF_ALU | BPF_DIV | BPF_K:
617 	case BPF_ALU64 | BPF_DIV | BPF_K:
618 		emit_a64_mov_i(is64, tmp, imm, ctx);
619 		emit(A64_UDIV(is64, dst, dst, tmp), ctx);
620 		break;
621 	case BPF_ALU | BPF_MOD | BPF_K:
622 	case BPF_ALU64 | BPF_MOD | BPF_K:
623 		emit_a64_mov_i(is64, tmp2, imm, ctx);
624 		emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
625 		emit(A64_MSUB(is64, dst, dst, tmp, tmp2), ctx);
626 		break;
627 	case BPF_ALU | BPF_LSH | BPF_K:
628 	case BPF_ALU64 | BPF_LSH | BPF_K:
629 		emit(A64_LSL(is64, dst, dst, imm), ctx);
630 		break;
631 	case BPF_ALU | BPF_RSH | BPF_K:
632 	case BPF_ALU64 | BPF_RSH | BPF_K:
633 		emit(A64_LSR(is64, dst, dst, imm), ctx);
634 		break;
635 	case BPF_ALU | BPF_ARSH | BPF_K:
636 	case BPF_ALU64 | BPF_ARSH | BPF_K:
637 		emit(A64_ASR(is64, dst, dst, imm), ctx);
638 		break;
639 
640 	/* JUMP off */
641 	case BPF_JMP | BPF_JA:
642 		jmp_offset = bpf2a64_offset(i, off, ctx);
643 		check_imm26(jmp_offset);
644 		emit(A64_B(jmp_offset), ctx);
645 		break;
646 	/* IF (dst COND src) JUMP off */
647 	case BPF_JMP | BPF_JEQ | BPF_X:
648 	case BPF_JMP | BPF_JGT | BPF_X:
649 	case BPF_JMP | BPF_JLT | BPF_X:
650 	case BPF_JMP | BPF_JGE | BPF_X:
651 	case BPF_JMP | BPF_JLE | BPF_X:
652 	case BPF_JMP | BPF_JNE | BPF_X:
653 	case BPF_JMP | BPF_JSGT | BPF_X:
654 	case BPF_JMP | BPF_JSLT | BPF_X:
655 	case BPF_JMP | BPF_JSGE | BPF_X:
656 	case BPF_JMP | BPF_JSLE | BPF_X:
657 	case BPF_JMP32 | BPF_JEQ | BPF_X:
658 	case BPF_JMP32 | BPF_JGT | BPF_X:
659 	case BPF_JMP32 | BPF_JLT | BPF_X:
660 	case BPF_JMP32 | BPF_JGE | BPF_X:
661 	case BPF_JMP32 | BPF_JLE | BPF_X:
662 	case BPF_JMP32 | BPF_JNE | BPF_X:
663 	case BPF_JMP32 | BPF_JSGT | BPF_X:
664 	case BPF_JMP32 | BPF_JSLT | BPF_X:
665 	case BPF_JMP32 | BPF_JSGE | BPF_X:
666 	case BPF_JMP32 | BPF_JSLE | BPF_X:
667 		emit(A64_CMP(is64, dst, src), ctx);
668 emit_cond_jmp:
669 		jmp_offset = bpf2a64_offset(i, off, ctx);
670 		check_imm19(jmp_offset);
671 		switch (BPF_OP(code)) {
672 		case BPF_JEQ:
673 			jmp_cond = A64_COND_EQ;
674 			break;
675 		case BPF_JGT:
676 			jmp_cond = A64_COND_HI;
677 			break;
678 		case BPF_JLT:
679 			jmp_cond = A64_COND_CC;
680 			break;
681 		case BPF_JGE:
682 			jmp_cond = A64_COND_CS;
683 			break;
684 		case BPF_JLE:
685 			jmp_cond = A64_COND_LS;
686 			break;
687 		case BPF_JSET:
688 		case BPF_JNE:
689 			jmp_cond = A64_COND_NE;
690 			break;
691 		case BPF_JSGT:
692 			jmp_cond = A64_COND_GT;
693 			break;
694 		case BPF_JSLT:
695 			jmp_cond = A64_COND_LT;
696 			break;
697 		case BPF_JSGE:
698 			jmp_cond = A64_COND_GE;
699 			break;
700 		case BPF_JSLE:
701 			jmp_cond = A64_COND_LE;
702 			break;
703 		default:
704 			return -EFAULT;
705 		}
706 		emit(A64_B_(jmp_cond, jmp_offset), ctx);
707 		break;
708 	case BPF_JMP | BPF_JSET | BPF_X:
709 	case BPF_JMP32 | BPF_JSET | BPF_X:
710 		emit(A64_TST(is64, dst, src), ctx);
711 		goto emit_cond_jmp;
712 	/* IF (dst COND imm) JUMP off */
713 	case BPF_JMP | BPF_JEQ | BPF_K:
714 	case BPF_JMP | BPF_JGT | BPF_K:
715 	case BPF_JMP | BPF_JLT | BPF_K:
716 	case BPF_JMP | BPF_JGE | BPF_K:
717 	case BPF_JMP | BPF_JLE | BPF_K:
718 	case BPF_JMP | BPF_JNE | BPF_K:
719 	case BPF_JMP | BPF_JSGT | BPF_K:
720 	case BPF_JMP | BPF_JSLT | BPF_K:
721 	case BPF_JMP | BPF_JSGE | BPF_K:
722 	case BPF_JMP | BPF_JSLE | BPF_K:
723 	case BPF_JMP32 | BPF_JEQ | BPF_K:
724 	case BPF_JMP32 | BPF_JGT | BPF_K:
725 	case BPF_JMP32 | BPF_JLT | BPF_K:
726 	case BPF_JMP32 | BPF_JGE | BPF_K:
727 	case BPF_JMP32 | BPF_JLE | BPF_K:
728 	case BPF_JMP32 | BPF_JNE | BPF_K:
729 	case BPF_JMP32 | BPF_JSGT | BPF_K:
730 	case BPF_JMP32 | BPF_JSLT | BPF_K:
731 	case BPF_JMP32 | BPF_JSGE | BPF_K:
732 	case BPF_JMP32 | BPF_JSLE | BPF_K:
733 		if (is_addsub_imm(imm)) {
734 			emit(A64_CMP_I(is64, dst, imm), ctx);
735 		} else if (is_addsub_imm(-imm)) {
736 			emit(A64_CMN_I(is64, dst, -imm), ctx);
737 		} else {
738 			emit_a64_mov_i(is64, tmp, imm, ctx);
739 			emit(A64_CMP(is64, dst, tmp), ctx);
740 		}
741 		goto emit_cond_jmp;
742 	case BPF_JMP | BPF_JSET | BPF_K:
743 	case BPF_JMP32 | BPF_JSET | BPF_K:
744 		a64_insn = A64_TST_I(is64, dst, imm);
745 		if (a64_insn != AARCH64_BREAK_FAULT) {
746 			emit(a64_insn, ctx);
747 		} else {
748 			emit_a64_mov_i(is64, tmp, imm, ctx);
749 			emit(A64_TST(is64, dst, tmp), ctx);
750 		}
751 		goto emit_cond_jmp;
752 	/* function call */
753 	case BPF_JMP | BPF_CALL:
754 	{
755 		const u8 r0 = bpf2a64[BPF_REG_0];
756 		bool func_addr_fixed;
757 		u64 func_addr;
758 
759 		ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
760 					    &func_addr, &func_addr_fixed);
761 		if (ret < 0)
762 			return ret;
763 		emit_addr_mov_i64(tmp, func_addr, ctx);
764 		emit(A64_BLR(tmp), ctx);
765 		emit(A64_MOV(1, r0, A64_R(0)), ctx);
766 		break;
767 	}
768 	/* tail call */
769 	case BPF_JMP | BPF_TAIL_CALL:
770 		if (emit_bpf_tail_call(ctx))
771 			return -EFAULT;
772 		break;
773 	/* function return */
774 	case BPF_JMP | BPF_EXIT:
775 		/* Optimization: when last instruction is EXIT,
776 		   simply fallthrough to epilogue. */
777 		if (i == ctx->prog->len - 1)
778 			break;
779 		jmp_offset = epilogue_offset(ctx);
780 		check_imm26(jmp_offset);
781 		emit(A64_B(jmp_offset), ctx);
782 		break;
783 
784 	/* dst = imm64 */
785 	case BPF_LD | BPF_IMM | BPF_DW:
786 	{
787 		const struct bpf_insn insn1 = insn[1];
788 		u64 imm64;
789 
790 		imm64 = (u64)insn1.imm << 32 | (u32)imm;
791 		if (bpf_pseudo_func(insn))
792 			emit_addr_mov_i64(dst, imm64, ctx);
793 		else
794 			emit_a64_mov_i64(dst, imm64, ctx);
795 
796 		return 1;
797 	}
798 
799 	/* LDX: dst = *(size *)(src + off) */
800 	case BPF_LDX | BPF_MEM | BPF_W:
801 	case BPF_LDX | BPF_MEM | BPF_H:
802 	case BPF_LDX | BPF_MEM | BPF_B:
803 	case BPF_LDX | BPF_MEM | BPF_DW:
804 	case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
805 	case BPF_LDX | BPF_PROBE_MEM | BPF_W:
806 	case BPF_LDX | BPF_PROBE_MEM | BPF_H:
807 	case BPF_LDX | BPF_PROBE_MEM | BPF_B:
808 		emit_a64_mov_i(1, tmp, off, ctx);
809 		switch (BPF_SIZE(code)) {
810 		case BPF_W:
811 			emit(A64_LDR32(dst, src, tmp), ctx);
812 			break;
813 		case BPF_H:
814 			emit(A64_LDRH(dst, src, tmp), ctx);
815 			break;
816 		case BPF_B:
817 			emit(A64_LDRB(dst, src, tmp), ctx);
818 			break;
819 		case BPF_DW:
820 			emit(A64_LDR64(dst, src, tmp), ctx);
821 			break;
822 		}
823 
824 		ret = add_exception_handler(insn, ctx, dst);
825 		if (ret)
826 			return ret;
827 		break;
828 
829 	/* speculation barrier */
830 	case BPF_ST | BPF_NOSPEC:
831 		/*
832 		 * Nothing required here.
833 		 *
834 		 * In case of arm64, we rely on the firmware mitigation of
835 		 * Speculative Store Bypass as controlled via the ssbd kernel
836 		 * parameter. Whenever the mitigation is enabled, it works
837 		 * for all of the kernel code with no need to provide any
838 		 * additional instructions.
839 		 */
840 		break;
841 
842 	/* ST: *(size *)(dst + off) = imm */
843 	case BPF_ST | BPF_MEM | BPF_W:
844 	case BPF_ST | BPF_MEM | BPF_H:
845 	case BPF_ST | BPF_MEM | BPF_B:
846 	case BPF_ST | BPF_MEM | BPF_DW:
847 		/* Load imm to a register then store it */
848 		emit_a64_mov_i(1, tmp2, off, ctx);
849 		emit_a64_mov_i(1, tmp, imm, ctx);
850 		switch (BPF_SIZE(code)) {
851 		case BPF_W:
852 			emit(A64_STR32(tmp, dst, tmp2), ctx);
853 			break;
854 		case BPF_H:
855 			emit(A64_STRH(tmp, dst, tmp2), ctx);
856 			break;
857 		case BPF_B:
858 			emit(A64_STRB(tmp, dst, tmp2), ctx);
859 			break;
860 		case BPF_DW:
861 			emit(A64_STR64(tmp, dst, tmp2), ctx);
862 			break;
863 		}
864 		break;
865 
866 	/* STX: *(size *)(dst + off) = src */
867 	case BPF_STX | BPF_MEM | BPF_W:
868 	case BPF_STX | BPF_MEM | BPF_H:
869 	case BPF_STX | BPF_MEM | BPF_B:
870 	case BPF_STX | BPF_MEM | BPF_DW:
871 		emit_a64_mov_i(1, tmp, off, ctx);
872 		switch (BPF_SIZE(code)) {
873 		case BPF_W:
874 			emit(A64_STR32(src, dst, tmp), ctx);
875 			break;
876 		case BPF_H:
877 			emit(A64_STRH(src, dst, tmp), ctx);
878 			break;
879 		case BPF_B:
880 			emit(A64_STRB(src, dst, tmp), ctx);
881 			break;
882 		case BPF_DW:
883 			emit(A64_STR64(src, dst, tmp), ctx);
884 			break;
885 		}
886 		break;
887 
888 	case BPF_STX | BPF_ATOMIC | BPF_W:
889 	case BPF_STX | BPF_ATOMIC | BPF_DW:
890 		if (insn->imm != BPF_ADD) {
891 			pr_err_once("unknown atomic op code %02x\n", insn->imm);
892 			return -EINVAL;
893 		}
894 
895 		/* STX XADD: lock *(u32 *)(dst + off) += src
896 		 * and
897 		 * STX XADD: lock *(u64 *)(dst + off) += src
898 		 */
899 
900 		if (!off) {
901 			reg = dst;
902 		} else {
903 			emit_a64_mov_i(1, tmp, off, ctx);
904 			emit(A64_ADD(1, tmp, tmp, dst), ctx);
905 			reg = tmp;
906 		}
907 		if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
908 			emit(A64_STADD(isdw, reg, src), ctx);
909 		} else {
910 			emit(A64_LDXR(isdw, tmp2, reg), ctx);
911 			emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
912 			emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
913 			jmp_offset = -3;
914 			check_imm19(jmp_offset);
915 			emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
916 		}
917 		break;
918 
919 	default:
920 		pr_err_once("unknown opcode %02x\n", code);
921 		return -EINVAL;
922 	}
923 
924 	return 0;
925 }
926 
build_body(struct jit_ctx * ctx,bool extra_pass)927 static int build_body(struct jit_ctx *ctx, bool extra_pass)
928 {
929 	const struct bpf_prog *prog = ctx->prog;
930 	int i;
931 
932 	/*
933 	 * - offset[0] offset of the end of prologue,
934 	 *   start of the 1st instruction.
935 	 * - offset[1] - offset of the end of 1st instruction,
936 	 *   start of the 2nd instruction
937 	 * [....]
938 	 * - offset[3] - offset of the end of 3rd instruction,
939 	 *   start of 4th instruction
940 	 */
941 	for (i = 0; i < prog->len; i++) {
942 		const struct bpf_insn *insn = &prog->insnsi[i];
943 		int ret;
944 
945 		if (ctx->image == NULL)
946 			ctx->offset[i] = ctx->idx;
947 		ret = build_insn(insn, ctx, extra_pass);
948 		if (ret > 0) {
949 			i++;
950 			if (ctx->image == NULL)
951 				ctx->offset[i] = ctx->idx;
952 			continue;
953 		}
954 		if (ret)
955 			return ret;
956 	}
957 	/*
958 	 * offset is allocated with prog->len + 1 so fill in
959 	 * the last element with the offset after the last
960 	 * instruction (end of program)
961 	 */
962 	if (ctx->image == NULL)
963 		ctx->offset[i] = ctx->idx;
964 
965 	return 0;
966 }
967 
validate_code(struct jit_ctx * ctx)968 static int validate_code(struct jit_ctx *ctx)
969 {
970 	int i;
971 
972 	for (i = 0; i < ctx->idx; i++) {
973 		u32 a64_insn = le32_to_cpu(ctx->image[i]);
974 
975 		if (a64_insn == AARCH64_BREAK_FAULT)
976 			return -1;
977 	}
978 
979 	if (WARN_ON_ONCE(ctx->exentry_idx != ctx->prog->aux->num_exentries))
980 		return -1;
981 
982 	return 0;
983 }
984 
bpf_flush_icache(void * start,void * end)985 static inline void bpf_flush_icache(void *start, void *end)
986 {
987 	flush_icache_range((unsigned long)start, (unsigned long)end);
988 }
989 
990 struct arm64_jit_data {
991 	struct bpf_binary_header *header;
992 	u8 *image;
993 	struct jit_ctx ctx;
994 };
995 
bpf_int_jit_compile(struct bpf_prog * prog)996 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
997 {
998 	int image_size, prog_size, extable_size;
999 	struct bpf_prog *tmp, *orig_prog = prog;
1000 	struct bpf_binary_header *header;
1001 	struct arm64_jit_data *jit_data;
1002 	bool was_classic = bpf_prog_was_classic(prog);
1003 	bool tmp_blinded = false;
1004 	bool extra_pass = false;
1005 	struct jit_ctx ctx;
1006 	u8 *image_ptr;
1007 
1008 	if (!prog->jit_requested)
1009 		return orig_prog;
1010 
1011 	tmp = bpf_jit_blind_constants(prog);
1012 	/* If blinding was requested and we failed during blinding,
1013 	 * we must fall back to the interpreter.
1014 	 */
1015 	if (IS_ERR(tmp))
1016 		return orig_prog;
1017 	if (tmp != prog) {
1018 		tmp_blinded = true;
1019 		prog = tmp;
1020 	}
1021 
1022 	jit_data = prog->aux->jit_data;
1023 	if (!jit_data) {
1024 		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1025 		if (!jit_data) {
1026 			prog = orig_prog;
1027 			goto out;
1028 		}
1029 		prog->aux->jit_data = jit_data;
1030 	}
1031 	if (jit_data->ctx.offset) {
1032 		ctx = jit_data->ctx;
1033 		image_ptr = jit_data->image;
1034 		header = jit_data->header;
1035 		extra_pass = true;
1036 		prog_size = sizeof(u32) * ctx.idx;
1037 		goto skip_init_ctx;
1038 	}
1039 	memset(&ctx, 0, sizeof(ctx));
1040 	ctx.prog = prog;
1041 
1042 	ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
1043 	if (ctx.offset == NULL) {
1044 		prog = orig_prog;
1045 		goto out_off;
1046 	}
1047 
1048 	/*
1049 	 * 1. Initial fake pass to compute ctx->idx and ctx->offset.
1050 	 *
1051 	 * BPF line info needs ctx->offset[i] to be the offset of
1052 	 * instruction[i] in jited image, so build prologue first.
1053 	 */
1054 	if (build_prologue(&ctx, was_classic)) {
1055 		prog = orig_prog;
1056 		goto out_off;
1057 	}
1058 
1059 	if (build_body(&ctx, extra_pass)) {
1060 		prog = orig_prog;
1061 		goto out_off;
1062 	}
1063 
1064 	ctx.epilogue_offset = ctx.idx;
1065 	build_epilogue(&ctx);
1066 
1067 	extable_size = prog->aux->num_exentries *
1068 		sizeof(struct exception_table_entry);
1069 
1070 	/* Now we know the actual image size. */
1071 	prog_size = sizeof(u32) * ctx.idx;
1072 	image_size = prog_size + extable_size;
1073 	header = bpf_jit_binary_alloc(image_size, &image_ptr,
1074 				      sizeof(u32), jit_fill_hole);
1075 	if (header == NULL) {
1076 		prog = orig_prog;
1077 		goto out_off;
1078 	}
1079 
1080 	/* 2. Now, the actual pass. */
1081 
1082 	ctx.image = (__le32 *)image_ptr;
1083 	if (extable_size)
1084 		prog->aux->extable = (void *)image_ptr + prog_size;
1085 skip_init_ctx:
1086 	ctx.idx = 0;
1087 	ctx.exentry_idx = 0;
1088 
1089 	build_prologue(&ctx, was_classic);
1090 
1091 	if (build_body(&ctx, extra_pass)) {
1092 		bpf_jit_binary_free(header);
1093 		prog = orig_prog;
1094 		goto out_off;
1095 	}
1096 
1097 	build_epilogue(&ctx);
1098 
1099 	/* 3. Extra pass to validate JITed code. */
1100 	if (validate_code(&ctx)) {
1101 		bpf_jit_binary_free(header);
1102 		prog = orig_prog;
1103 		goto out_off;
1104 	}
1105 
1106 	/* And we're done. */
1107 	if (bpf_jit_enable > 1)
1108 		bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
1109 
1110 	bpf_flush_icache(header, ctx.image + ctx.idx);
1111 
1112 	if (!prog->is_func || extra_pass) {
1113 		if (extra_pass && ctx.idx != jit_data->ctx.idx) {
1114 			pr_err_once("multi-func JIT bug %d != %d\n",
1115 				    ctx.idx, jit_data->ctx.idx);
1116 			bpf_jit_binary_free(header);
1117 			prog->bpf_func = NULL;
1118 			prog->jited = 0;
1119 			prog->jited_len = 0;
1120 			goto out_off;
1121 		}
1122 		bpf_jit_binary_lock_ro(header);
1123 	} else {
1124 		jit_data->ctx = ctx;
1125 		jit_data->image = image_ptr;
1126 		jit_data->header = header;
1127 	}
1128 	prog->bpf_func = (void *)ctx.image;
1129 	prog->jited = 1;
1130 	prog->jited_len = prog_size;
1131 
1132 	if (!prog->is_func || extra_pass) {
1133 		int i;
1134 
1135 		/* offset[prog->len] is the size of program */
1136 		for (i = 0; i <= prog->len; i++)
1137 			ctx.offset[i] *= AARCH64_INSN_SIZE;
1138 		bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
1139 out_off:
1140 		kfree(ctx.offset);
1141 		kfree(jit_data);
1142 		prog->aux->jit_data = NULL;
1143 	}
1144 out:
1145 	if (tmp_blinded)
1146 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
1147 					   tmp : orig_prog);
1148 	return prog;
1149 }
1150 
bpf_jit_alloc_exec_limit(void)1151 u64 bpf_jit_alloc_exec_limit(void)
1152 {
1153 	return VMALLOC_END - VMALLOC_START;
1154 }
1155 
bpf_jit_alloc_exec(unsigned long size)1156 void *bpf_jit_alloc_exec(unsigned long size)
1157 {
1158 	/* Memory is intended to be executable, reset the pointer tag. */
1159 	return kasan_reset_tag(vmalloc(size));
1160 }
1161 
bpf_jit_free_exec(void * addr)1162 void bpf_jit_free_exec(void *addr)
1163 {
1164 	return vfree(addr);
1165 }
1166