1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Just-In-Time compiler for eBPF filters on 32bit ARM
4 *
5 * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com>
6 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
7 */
8
9 #include <linux/bpf.h>
10 #include <linux/bitops.h>
11 #include <linux/compiler.h>
12 #include <linux/errno.h>
13 #include <linux/filter.h>
14 #include <linux/netdevice.h>
15 #include <linux/string.h>
16 #include <linux/slab.h>
17 #include <linux/if_vlan.h>
18
19 #include <asm/cacheflush.h>
20 #include <asm/hwcap.h>
21 #include <asm/opcodes.h>
22 #include <asm/system_info.h>
23
24 #include "bpf_jit_32.h"
25
26 /*
27 * eBPF prog stack layout:
28 *
29 * high
30 * original ARM_SP => +-----+
31 * | | callee saved registers
32 * +-----+ <= (BPF_FP + SCRATCH_SIZE)
33 * | ... | eBPF JIT scratch space
34 * eBPF fp register => +-----+
35 * (BPF_FP) | ... | eBPF prog stack
36 * +-----+
37 * |RSVD | JIT scratchpad
38 * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
39 * | |
40 * | ... | Function call stack
41 * | |
42 * +-----+
43 * low
44 *
45 * The callee saved registers depends on whether frame pointers are enabled.
46 * With frame pointers (to be compliant with the ABI):
47 *
48 * high
49 * original ARM_SP => +--------------+ \
50 * | pc | |
51 * current ARM_FP => +--------------+ } callee saved registers
52 * |r4-r9,fp,ip,lr| |
53 * +--------------+ /
54 * low
55 *
56 * Without frame pointers:
57 *
58 * high
59 * original ARM_SP => +--------------+
60 * | r4-r9,fp,lr | callee saved registers
61 * current ARM_FP => +--------------+
62 * low
63 *
64 * When popping registers off the stack at the end of a BPF function, we
65 * reference them via the current ARM_FP register.
66 */
67 #define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
68 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \
69 1 << ARM_FP)
70 #define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
71 #define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC)
72
73 enum {
74 /* Stack layout - these are offsets from (top of stack - 4) */
75 BPF_R2_HI,
76 BPF_R2_LO,
77 BPF_R3_HI,
78 BPF_R3_LO,
79 BPF_R4_HI,
80 BPF_R4_LO,
81 BPF_R5_HI,
82 BPF_R5_LO,
83 BPF_R7_HI,
84 BPF_R7_LO,
85 BPF_R8_HI,
86 BPF_R8_LO,
87 BPF_R9_HI,
88 BPF_R9_LO,
89 BPF_FP_HI,
90 BPF_FP_LO,
91 BPF_TC_HI,
92 BPF_TC_LO,
93 BPF_AX_HI,
94 BPF_AX_LO,
95 /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
96 * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
97 * BPF_REG_FP and Tail call counts.
98 */
99 BPF_JIT_SCRATCH_REGS,
100 };
101
102 /*
103 * Negative "register" values indicate the register is stored on the stack
104 * and are the offset from the top of the eBPF JIT scratch space.
105 */
106 #define STACK_OFFSET(k) (-4 - (k) * 4)
107 #define SCRATCH_SIZE (BPF_JIT_SCRATCH_REGS * 4)
108
109 #ifdef CONFIG_FRAME_POINTER
110 #define EBPF_SCRATCH_TO_ARM_FP(x) ((x) - 4 * hweight16(CALLEE_PUSH_MASK) - 4)
111 #else
112 #define EBPF_SCRATCH_TO_ARM_FP(x) (x)
113 #endif
114
115 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */
116 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */
117 #define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */
118
119 #define FLAG_IMM_OVERFLOW (1 << 0)
120
121 /*
122 * Map eBPF registers to ARM 32bit registers or stack scratch space.
123 *
124 * 1. First argument is passed using the arm 32bit registers and rest of the
125 * arguments are passed on stack scratch space.
126 * 2. First callee-saved argument is mapped to arm 32 bit registers and rest
127 * arguments are mapped to scratch space on stack.
128 * 3. We need two 64 bit temp registers to do complex operations on eBPF
129 * registers.
130 *
131 * As the eBPF registers are all 64 bit registers and arm has only 32 bit
132 * registers, we have to map each eBPF registers with two arm 32 bit regs or
133 * scratch memory space and we have to build eBPF 64 bit register from those.
134 *
135 */
136 static const s8 bpf2a32[][2] = {
137 /* return value from in-kernel function, and exit value from eBPF */
138 [BPF_REG_0] = {ARM_R1, ARM_R0},
139 /* arguments from eBPF program to in-kernel function */
140 [BPF_REG_1] = {ARM_R3, ARM_R2},
141 /* Stored on stack scratch space */
142 [BPF_REG_2] = {STACK_OFFSET(BPF_R2_HI), STACK_OFFSET(BPF_R2_LO)},
143 [BPF_REG_3] = {STACK_OFFSET(BPF_R3_HI), STACK_OFFSET(BPF_R3_LO)},
144 [BPF_REG_4] = {STACK_OFFSET(BPF_R4_HI), STACK_OFFSET(BPF_R4_LO)},
145 [BPF_REG_5] = {STACK_OFFSET(BPF_R5_HI), STACK_OFFSET(BPF_R5_LO)},
146 /* callee saved registers that in-kernel function will preserve */
147 [BPF_REG_6] = {ARM_R5, ARM_R4},
148 /* Stored on stack scratch space */
149 [BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)},
150 [BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)},
151 [BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)},
152 /* Read only Frame Pointer to access Stack */
153 [BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)},
154 /* Temporary Register for internal BPF JIT, can be used
155 * for constant blindings and others.
156 */
157 [TMP_REG_1] = {ARM_R7, ARM_R6},
158 [TMP_REG_2] = {ARM_R9, ARM_R8},
159 /* Tail call count. Stored on stack scratch space. */
160 [TCALL_CNT] = {STACK_OFFSET(BPF_TC_HI), STACK_OFFSET(BPF_TC_LO)},
161 /* temporary register for blinding constants.
162 * Stored on stack scratch space.
163 */
164 [BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)},
165 };
166
167 #define dst_lo dst[1]
168 #define dst_hi dst[0]
169 #define src_lo src[1]
170 #define src_hi src[0]
171
172 /*
173 * JIT Context:
174 *
175 * prog : bpf_prog
176 * idx : index of current last JITed instruction.
177 * prologue_bytes : bytes used in prologue.
178 * epilogue_offset : offset of epilogue starting.
179 * offsets : array of eBPF instruction offsets in
180 * JITed code.
181 * target : final JITed code.
182 * epilogue_bytes : no of bytes used in epilogue.
183 * imm_count : no of immediate counts used for global
184 * variables.
185 * imms : array of global variable addresses.
186 */
187
188 struct jit_ctx {
189 const struct bpf_prog *prog;
190 unsigned int idx;
191 unsigned int prologue_bytes;
192 unsigned int epilogue_offset;
193 unsigned int cpu_architecture;
194 u32 flags;
195 u32 *offsets;
196 u32 *target;
197 u32 stack_size;
198 #if __LINUX_ARM_ARCH__ < 7
199 u16 epilogue_bytes;
200 u16 imm_count;
201 u32 *imms;
202 #endif
203 };
204
205 /*
206 * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
207 * (where the assembly routines like __aeabi_uidiv could cause problems).
208 */
jit_udiv32(u32 dividend,u32 divisor)209 static u32 jit_udiv32(u32 dividend, u32 divisor)
210 {
211 return dividend / divisor;
212 }
213
jit_mod32(u32 dividend,u32 divisor)214 static u32 jit_mod32(u32 dividend, u32 divisor)
215 {
216 return dividend % divisor;
217 }
218
_emit(int cond,u32 inst,struct jit_ctx * ctx)219 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
220 {
221 inst |= (cond << 28);
222 inst = __opcode_to_mem_arm(inst);
223
224 if (ctx->target != NULL)
225 ctx->target[ctx->idx] = inst;
226
227 ctx->idx++;
228 }
229
230 /*
231 * Emit an instruction that will be executed unconditionally.
232 */
emit(u32 inst,struct jit_ctx * ctx)233 static inline void emit(u32 inst, struct jit_ctx *ctx)
234 {
235 _emit(ARM_COND_AL, inst, ctx);
236 }
237
238 /*
239 * This is rather horrid, but necessary to convert an integer constant
240 * to an immediate operand for the opcodes, and be able to detect at
241 * build time whether the constant can't be converted (iow, usable in
242 * BUILD_BUG_ON()).
243 */
244 #define imm12val(v, s) (rol32(v, (s)) | (s) << 7)
245 #define const_imm8m(x) \
246 ({ int r; \
247 u32 v = (x); \
248 if (!(v & ~0x000000ff)) \
249 r = imm12val(v, 0); \
250 else if (!(v & ~0xc000003f)) \
251 r = imm12val(v, 2); \
252 else if (!(v & ~0xf000000f)) \
253 r = imm12val(v, 4); \
254 else if (!(v & ~0xfc000003)) \
255 r = imm12val(v, 6); \
256 else if (!(v & ~0xff000000)) \
257 r = imm12val(v, 8); \
258 else if (!(v & ~0x3fc00000)) \
259 r = imm12val(v, 10); \
260 else if (!(v & ~0x0ff00000)) \
261 r = imm12val(v, 12); \
262 else if (!(v & ~0x03fc0000)) \
263 r = imm12val(v, 14); \
264 else if (!(v & ~0x00ff0000)) \
265 r = imm12val(v, 16); \
266 else if (!(v & ~0x003fc000)) \
267 r = imm12val(v, 18); \
268 else if (!(v & ~0x000ff000)) \
269 r = imm12val(v, 20); \
270 else if (!(v & ~0x0003fc00)) \
271 r = imm12val(v, 22); \
272 else if (!(v & ~0x0000ff00)) \
273 r = imm12val(v, 24); \
274 else if (!(v & ~0x00003fc0)) \
275 r = imm12val(v, 26); \
276 else if (!(v & ~0x00000ff0)) \
277 r = imm12val(v, 28); \
278 else if (!(v & ~0x000003fc)) \
279 r = imm12val(v, 30); \
280 else \
281 r = -1; \
282 r; })
283
284 /*
285 * Checks if immediate value can be converted to imm12(12 bits) value.
286 */
imm8m(u32 x)287 static int imm8m(u32 x)
288 {
289 u32 rot;
290
291 for (rot = 0; rot < 16; rot++)
292 if ((x & ~ror32(0xff, 2 * rot)) == 0)
293 return rol32(x, 2 * rot) | (rot << 8);
294 return -1;
295 }
296
297 #define imm8m(x) (__builtin_constant_p(x) ? const_imm8m(x) : imm8m(x))
298
arm_bpf_ldst_imm12(u32 op,u8 rt,u8 rn,s16 imm12)299 static u32 arm_bpf_ldst_imm12(u32 op, u8 rt, u8 rn, s16 imm12)
300 {
301 op |= rt << 12 | rn << 16;
302 if (imm12 >= 0)
303 op |= ARM_INST_LDST__U;
304 else
305 imm12 = -imm12;
306 return op | (imm12 & ARM_INST_LDST__IMM12);
307 }
308
arm_bpf_ldst_imm8(u32 op,u8 rt,u8 rn,s16 imm8)309 static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8)
310 {
311 op |= rt << 12 | rn << 16;
312 if (imm8 >= 0)
313 op |= ARM_INST_LDST__U;
314 else
315 imm8 = -imm8;
316 return op | (imm8 & 0xf0) << 4 | (imm8 & 0x0f);
317 }
318
319 #define ARM_LDR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDR_I, rt, rn, off)
320 #define ARM_LDRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDRB_I, rt, rn, off)
321 #define ARM_LDRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRD_I, rt, rn, off)
322 #define ARM_LDRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off)
323
324 #define ARM_STR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off)
325 #define ARM_STRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off)
326 #define ARM_STRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRD_I, rt, rn, off)
327 #define ARM_STRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRH_I, rt, rn, off)
328
329 /*
330 * Initializes the JIT space with undefined instructions.
331 */
jit_fill_hole(void * area,unsigned int size)332 static void jit_fill_hole(void *area, unsigned int size)
333 {
334 u32 *ptr;
335 /* We are guaranteed to have aligned memory. */
336 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
337 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
338 }
339
340 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
341 /* EABI requires the stack to be aligned to 64-bit boundaries */
342 #define STACK_ALIGNMENT 8
343 #else
344 /* Stack must be aligned to 32-bit boundaries */
345 #define STACK_ALIGNMENT 4
346 #endif
347
348 /* total stack size used in JITed code */
349 #define _STACK_SIZE (ctx->prog->aux->stack_depth + SCRATCH_SIZE)
350 #define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
351
352 #if __LINUX_ARM_ARCH__ < 7
353
imm_offset(u32 k,struct jit_ctx * ctx)354 static u16 imm_offset(u32 k, struct jit_ctx *ctx)
355 {
356 unsigned int i = 0, offset;
357 u16 imm;
358
359 /* on the "fake" run we just count them (duplicates included) */
360 if (ctx->target == NULL) {
361 ctx->imm_count++;
362 return 0;
363 }
364
365 while ((i < ctx->imm_count) && ctx->imms[i]) {
366 if (ctx->imms[i] == k)
367 break;
368 i++;
369 }
370
371 if (ctx->imms[i] == 0)
372 ctx->imms[i] = k;
373
374 /* constants go just after the epilogue */
375 offset = ctx->offsets[ctx->prog->len - 1] * 4;
376 offset += ctx->prologue_bytes;
377 offset += ctx->epilogue_bytes;
378 offset += i * 4;
379
380 ctx->target[offset / 4] = k;
381
382 /* PC in ARM mode == address of the instruction + 8 */
383 imm = offset - (8 + ctx->idx * 4);
384
385 if (imm & ~0xfff) {
386 /*
387 * literal pool is too far, signal it into flags. we
388 * can only detect it on the second pass unfortunately.
389 */
390 ctx->flags |= FLAG_IMM_OVERFLOW;
391 return 0;
392 }
393
394 return imm;
395 }
396
397 #endif /* __LINUX_ARM_ARCH__ */
398
bpf2a32_offset(int bpf_to,int bpf_from,const struct jit_ctx * ctx)399 static inline int bpf2a32_offset(int bpf_to, int bpf_from,
400 const struct jit_ctx *ctx) {
401 int to, from;
402
403 if (ctx->target == NULL)
404 return 0;
405 to = ctx->offsets[bpf_to];
406 from = ctx->offsets[bpf_from];
407
408 return to - from - 1;
409 }
410
411 /*
412 * Move an immediate that's not an imm8m to a core register.
413 */
emit_mov_i_no8m(const u8 rd,u32 val,struct jit_ctx * ctx)414 static inline void emit_mov_i_no8m(const u8 rd, u32 val, struct jit_ctx *ctx)
415 {
416 #if __LINUX_ARM_ARCH__ < 7
417 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
418 #else
419 emit(ARM_MOVW(rd, val & 0xffff), ctx);
420 if (val > 0xffff)
421 emit(ARM_MOVT(rd, val >> 16), ctx);
422 #endif
423 }
424
emit_mov_i(const u8 rd,u32 val,struct jit_ctx * ctx)425 static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx)
426 {
427 int imm12 = imm8m(val);
428
429 if (imm12 >= 0)
430 emit(ARM_MOV_I(rd, imm12), ctx);
431 else
432 emit_mov_i_no8m(rd, val, ctx);
433 }
434
emit_bx_r(u8 tgt_reg,struct jit_ctx * ctx)435 static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx)
436 {
437 if (elf_hwcap & HWCAP_THUMB)
438 emit(ARM_BX(tgt_reg), ctx);
439 else
440 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
441 }
442
emit_blx_r(u8 tgt_reg,struct jit_ctx * ctx)443 static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
444 {
445 #if __LINUX_ARM_ARCH__ < 5
446 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
447 emit_bx_r(tgt_reg, ctx);
448 #else
449 emit(ARM_BLX_R(tgt_reg), ctx);
450 #endif
451 }
452
epilogue_offset(const struct jit_ctx * ctx)453 static inline int epilogue_offset(const struct jit_ctx *ctx)
454 {
455 int to, from;
456 /* No need for 1st dummy run */
457 if (ctx->target == NULL)
458 return 0;
459 to = ctx->epilogue_offset;
460 from = ctx->idx;
461
462 return to - from - 2;
463 }
464
emit_udivmod(u8 rd,u8 rm,u8 rn,struct jit_ctx * ctx,u8 op)465 static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
466 {
467 const s8 *tmp = bpf2a32[TMP_REG_1];
468
469 #if __LINUX_ARM_ARCH__ == 7
470 if (elf_hwcap & HWCAP_IDIVA) {
471 if (op == BPF_DIV)
472 emit(ARM_UDIV(rd, rm, rn), ctx);
473 else {
474 emit(ARM_UDIV(ARM_IP, rm, rn), ctx);
475 emit(ARM_MLS(rd, rn, ARM_IP, rm), ctx);
476 }
477 return;
478 }
479 #endif
480
481 /*
482 * For BPF_ALU | BPF_DIV | BPF_K instructions
483 * As ARM_R1 and ARM_R0 contains 1st argument of bpf
484 * function, we need to save it on caller side to save
485 * it from getting destroyed within callee.
486 * After the return from the callee, we restore ARM_R0
487 * ARM_R1.
488 */
489 if (rn != ARM_R1) {
490 emit(ARM_MOV_R(tmp[0], ARM_R1), ctx);
491 emit(ARM_MOV_R(ARM_R1, rn), ctx);
492 }
493 if (rm != ARM_R0) {
494 emit(ARM_MOV_R(tmp[1], ARM_R0), ctx);
495 emit(ARM_MOV_R(ARM_R0, rm), ctx);
496 }
497
498 /* Call appropriate function */
499 emit_mov_i(ARM_IP, op == BPF_DIV ?
500 (u32)jit_udiv32 : (u32)jit_mod32, ctx);
501 emit_blx_r(ARM_IP, ctx);
502
503 /* Save return value */
504 if (rd != ARM_R0)
505 emit(ARM_MOV_R(rd, ARM_R0), ctx);
506
507 /* Restore ARM_R0 and ARM_R1 */
508 if (rn != ARM_R1)
509 emit(ARM_MOV_R(ARM_R1, tmp[0]), ctx);
510 if (rm != ARM_R0)
511 emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx);
512 }
513
514 /* Is the translated BPF register on stack? */
is_stacked(s8 reg)515 static bool is_stacked(s8 reg)
516 {
517 return reg < 0;
518 }
519
520 /* If a BPF register is on the stack (stk is true), load it to the
521 * supplied temporary register and return the temporary register
522 * for subsequent operations, otherwise just use the CPU register.
523 */
arm_bpf_get_reg32(s8 reg,s8 tmp,struct jit_ctx * ctx)524 static s8 arm_bpf_get_reg32(s8 reg, s8 tmp, struct jit_ctx *ctx)
525 {
526 if (is_stacked(reg)) {
527 emit(ARM_LDR_I(tmp, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
528 reg = tmp;
529 }
530 return reg;
531 }
532
arm_bpf_get_reg64(const s8 * reg,const s8 * tmp,struct jit_ctx * ctx)533 static const s8 *arm_bpf_get_reg64(const s8 *reg, const s8 *tmp,
534 struct jit_ctx *ctx)
535 {
536 if (is_stacked(reg[1])) {
537 if (__LINUX_ARM_ARCH__ >= 6 ||
538 ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
539 emit(ARM_LDRD_I(tmp[1], ARM_FP,
540 EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
541 } else {
542 emit(ARM_LDR_I(tmp[1], ARM_FP,
543 EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
544 emit(ARM_LDR_I(tmp[0], ARM_FP,
545 EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
546 }
547 reg = tmp;
548 }
549 return reg;
550 }
551
552 /* If a BPF register is on the stack (stk is true), save the register
553 * back to the stack. If the source register is not the same, then
554 * move it into the correct register.
555 */
arm_bpf_put_reg32(s8 reg,s8 src,struct jit_ctx * ctx)556 static void arm_bpf_put_reg32(s8 reg, s8 src, struct jit_ctx *ctx)
557 {
558 if (is_stacked(reg))
559 emit(ARM_STR_I(src, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
560 else if (reg != src)
561 emit(ARM_MOV_R(reg, src), ctx);
562 }
563
arm_bpf_put_reg64(const s8 * reg,const s8 * src,struct jit_ctx * ctx)564 static void arm_bpf_put_reg64(const s8 *reg, const s8 *src,
565 struct jit_ctx *ctx)
566 {
567 if (is_stacked(reg[1])) {
568 if (__LINUX_ARM_ARCH__ >= 6 ||
569 ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
570 emit(ARM_STRD_I(src[1], ARM_FP,
571 EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
572 } else {
573 emit(ARM_STR_I(src[1], ARM_FP,
574 EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
575 emit(ARM_STR_I(src[0], ARM_FP,
576 EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
577 }
578 } else {
579 if (reg[1] != src[1])
580 emit(ARM_MOV_R(reg[1], src[1]), ctx);
581 if (reg[0] != src[0])
582 emit(ARM_MOV_R(reg[0], src[0]), ctx);
583 }
584 }
585
emit_a32_mov_i(const s8 dst,const u32 val,struct jit_ctx * ctx)586 static inline void emit_a32_mov_i(const s8 dst, const u32 val,
587 struct jit_ctx *ctx)
588 {
589 const s8 *tmp = bpf2a32[TMP_REG_1];
590
591 if (is_stacked(dst)) {
592 emit_mov_i(tmp[1], val, ctx);
593 arm_bpf_put_reg32(dst, tmp[1], ctx);
594 } else {
595 emit_mov_i(dst, val, ctx);
596 }
597 }
598
emit_a32_mov_i64(const s8 dst[],u64 val,struct jit_ctx * ctx)599 static void emit_a32_mov_i64(const s8 dst[], u64 val, struct jit_ctx *ctx)
600 {
601 const s8 *tmp = bpf2a32[TMP_REG_1];
602 const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
603
604 emit_mov_i(rd[1], (u32)val, ctx);
605 emit_mov_i(rd[0], val >> 32, ctx);
606
607 arm_bpf_put_reg64(dst, rd, ctx);
608 }
609
610 /* Sign extended move */
emit_a32_mov_se_i64(const bool is64,const s8 dst[],const u32 val,struct jit_ctx * ctx)611 static inline void emit_a32_mov_se_i64(const bool is64, const s8 dst[],
612 const u32 val, struct jit_ctx *ctx) {
613 u64 val64 = val;
614
615 if (is64 && (val & (1<<31)))
616 val64 |= 0xffffffff00000000ULL;
617 emit_a32_mov_i64(dst, val64, ctx);
618 }
619
emit_a32_add_r(const u8 dst,const u8 src,const bool is64,const bool hi,struct jit_ctx * ctx)620 static inline void emit_a32_add_r(const u8 dst, const u8 src,
621 const bool is64, const bool hi,
622 struct jit_ctx *ctx) {
623 /* 64 bit :
624 * adds dst_lo, dst_lo, src_lo
625 * adc dst_hi, dst_hi, src_hi
626 * 32 bit :
627 * add dst_lo, dst_lo, src_lo
628 */
629 if (!hi && is64)
630 emit(ARM_ADDS_R(dst, dst, src), ctx);
631 else if (hi && is64)
632 emit(ARM_ADC_R(dst, dst, src), ctx);
633 else
634 emit(ARM_ADD_R(dst, dst, src), ctx);
635 }
636
emit_a32_sub_r(const u8 dst,const u8 src,const bool is64,const bool hi,struct jit_ctx * ctx)637 static inline void emit_a32_sub_r(const u8 dst, const u8 src,
638 const bool is64, const bool hi,
639 struct jit_ctx *ctx) {
640 /* 64 bit :
641 * subs dst_lo, dst_lo, src_lo
642 * sbc dst_hi, dst_hi, src_hi
643 * 32 bit :
644 * sub dst_lo, dst_lo, src_lo
645 */
646 if (!hi && is64)
647 emit(ARM_SUBS_R(dst, dst, src), ctx);
648 else if (hi && is64)
649 emit(ARM_SBC_R(dst, dst, src), ctx);
650 else
651 emit(ARM_SUB_R(dst, dst, src), ctx);
652 }
653
emit_alu_r(const u8 dst,const u8 src,const bool is64,const bool hi,const u8 op,struct jit_ctx * ctx)654 static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64,
655 const bool hi, const u8 op, struct jit_ctx *ctx){
656 switch (BPF_OP(op)) {
657 /* dst = dst + src */
658 case BPF_ADD:
659 emit_a32_add_r(dst, src, is64, hi, ctx);
660 break;
661 /* dst = dst - src */
662 case BPF_SUB:
663 emit_a32_sub_r(dst, src, is64, hi, ctx);
664 break;
665 /* dst = dst | src */
666 case BPF_OR:
667 emit(ARM_ORR_R(dst, dst, src), ctx);
668 break;
669 /* dst = dst & src */
670 case BPF_AND:
671 emit(ARM_AND_R(dst, dst, src), ctx);
672 break;
673 /* dst = dst ^ src */
674 case BPF_XOR:
675 emit(ARM_EOR_R(dst, dst, src), ctx);
676 break;
677 /* dst = dst * src */
678 case BPF_MUL:
679 emit(ARM_MUL(dst, dst, src), ctx);
680 break;
681 /* dst = dst << src */
682 case BPF_LSH:
683 emit(ARM_LSL_R(dst, dst, src), ctx);
684 break;
685 /* dst = dst >> src */
686 case BPF_RSH:
687 emit(ARM_LSR_R(dst, dst, src), ctx);
688 break;
689 /* dst = dst >> src (signed)*/
690 case BPF_ARSH:
691 emit(ARM_MOV_SR(dst, dst, SRTYPE_ASR, src), ctx);
692 break;
693 }
694 }
695
696 /* ALU operation (32 bit)
697 * dst = dst (op) src
698 */
emit_a32_alu_r(const s8 dst,const s8 src,struct jit_ctx * ctx,const bool is64,const bool hi,const u8 op)699 static inline void emit_a32_alu_r(const s8 dst, const s8 src,
700 struct jit_ctx *ctx, const bool is64,
701 const bool hi, const u8 op) {
702 const s8 *tmp = bpf2a32[TMP_REG_1];
703 s8 rn, rd;
704
705 rn = arm_bpf_get_reg32(src, tmp[1], ctx);
706 rd = arm_bpf_get_reg32(dst, tmp[0], ctx);
707 /* ALU operation */
708 emit_alu_r(rd, rn, is64, hi, op, ctx);
709 arm_bpf_put_reg32(dst, rd, ctx);
710 }
711
712 /* ALU operation (64 bit) */
emit_a32_alu_r64(const bool is64,const s8 dst[],const s8 src[],struct jit_ctx * ctx,const u8 op)713 static inline void emit_a32_alu_r64(const bool is64, const s8 dst[],
714 const s8 src[], struct jit_ctx *ctx,
715 const u8 op) {
716 const s8 *tmp = bpf2a32[TMP_REG_1];
717 const s8 *tmp2 = bpf2a32[TMP_REG_2];
718 const s8 *rd;
719
720 rd = arm_bpf_get_reg64(dst, tmp, ctx);
721 if (is64) {
722 const s8 *rs;
723
724 rs = arm_bpf_get_reg64(src, tmp2, ctx);
725
726 /* ALU operation */
727 emit_alu_r(rd[1], rs[1], true, false, op, ctx);
728 emit_alu_r(rd[0], rs[0], true, true, op, ctx);
729 } else {
730 s8 rs;
731
732 rs = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
733
734 /* ALU operation */
735 emit_alu_r(rd[1], rs, true, false, op, ctx);
736 if (!ctx->prog->aux->verifier_zext)
737 emit_a32_mov_i(rd[0], 0, ctx);
738 }
739
740 arm_bpf_put_reg64(dst, rd, ctx);
741 }
742
743 /* dst = src (4 bytes)*/
emit_a32_mov_r(const s8 dst,const s8 src,struct jit_ctx * ctx)744 static inline void emit_a32_mov_r(const s8 dst, const s8 src,
745 struct jit_ctx *ctx) {
746 const s8 *tmp = bpf2a32[TMP_REG_1];
747 s8 rt;
748
749 rt = arm_bpf_get_reg32(src, tmp[0], ctx);
750 arm_bpf_put_reg32(dst, rt, ctx);
751 }
752
753 /* dst = src */
emit_a32_mov_r64(const bool is64,const s8 dst[],const s8 src[],struct jit_ctx * ctx)754 static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
755 const s8 src[],
756 struct jit_ctx *ctx) {
757 if (!is64) {
758 emit_a32_mov_r(dst_lo, src_lo, ctx);
759 if (!ctx->prog->aux->verifier_zext)
760 /* Zero out high 4 bytes */
761 emit_a32_mov_i(dst_hi, 0, ctx);
762 } else if (__LINUX_ARM_ARCH__ < 6 &&
763 ctx->cpu_architecture < CPU_ARCH_ARMv5TE) {
764 /* complete 8 byte move */
765 emit_a32_mov_r(dst_lo, src_lo, ctx);
766 emit_a32_mov_r(dst_hi, src_hi, ctx);
767 } else if (is_stacked(src_lo) && is_stacked(dst_lo)) {
768 const u8 *tmp = bpf2a32[TMP_REG_1];
769
770 emit(ARM_LDRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
771 emit(ARM_STRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
772 } else if (is_stacked(src_lo)) {
773 emit(ARM_LDRD_I(dst[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
774 } else if (is_stacked(dst_lo)) {
775 emit(ARM_STRD_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
776 } else {
777 emit(ARM_MOV_R(dst[0], src[0]), ctx);
778 emit(ARM_MOV_R(dst[1], src[1]), ctx);
779 }
780 }
781
782 /* Shift operations */
emit_a32_alu_i(const s8 dst,const u32 val,struct jit_ctx * ctx,const u8 op)783 static inline void emit_a32_alu_i(const s8 dst, const u32 val,
784 struct jit_ctx *ctx, const u8 op) {
785 const s8 *tmp = bpf2a32[TMP_REG_1];
786 s8 rd;
787
788 rd = arm_bpf_get_reg32(dst, tmp[0], ctx);
789
790 /* Do shift operation */
791 switch (op) {
792 case BPF_LSH:
793 emit(ARM_LSL_I(rd, rd, val), ctx);
794 break;
795 case BPF_RSH:
796 emit(ARM_LSR_I(rd, rd, val), ctx);
797 break;
798 case BPF_NEG:
799 emit(ARM_RSB_I(rd, rd, val), ctx);
800 break;
801 }
802
803 arm_bpf_put_reg32(dst, rd, ctx);
804 }
805
806 /* dst = ~dst (64 bit) */
emit_a32_neg64(const s8 dst[],struct jit_ctx * ctx)807 static inline void emit_a32_neg64(const s8 dst[],
808 struct jit_ctx *ctx){
809 const s8 *tmp = bpf2a32[TMP_REG_1];
810 const s8 *rd;
811
812 /* Setup Operand */
813 rd = arm_bpf_get_reg64(dst, tmp, ctx);
814
815 /* Do Negate Operation */
816 emit(ARM_RSBS_I(rd[1], rd[1], 0), ctx);
817 emit(ARM_RSC_I(rd[0], rd[0], 0), ctx);
818
819 arm_bpf_put_reg64(dst, rd, ctx);
820 }
821
822 /* dst = dst << src */
emit_a32_lsh_r64(const s8 dst[],const s8 src[],struct jit_ctx * ctx)823 static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[],
824 struct jit_ctx *ctx) {
825 const s8 *tmp = bpf2a32[TMP_REG_1];
826 const s8 *tmp2 = bpf2a32[TMP_REG_2];
827 const s8 *rd;
828 s8 rt;
829
830 /* Setup Operands */
831 rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
832 rd = arm_bpf_get_reg64(dst, tmp, ctx);
833
834 /* Do LSH operation */
835 emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
836 emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
837 emit(ARM_MOV_SR(ARM_LR, rd[0], SRTYPE_ASL, rt), ctx);
838 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[1], SRTYPE_ASL, ARM_IP), ctx);
839 emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd[1], SRTYPE_LSR, tmp2[0]), ctx);
840 emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_ASL, rt), ctx);
841
842 arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
843 arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
844 }
845
846 /* dst = dst >> src (signed)*/
emit_a32_arsh_r64(const s8 dst[],const s8 src[],struct jit_ctx * ctx)847 static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[],
848 struct jit_ctx *ctx) {
849 const s8 *tmp = bpf2a32[TMP_REG_1];
850 const s8 *tmp2 = bpf2a32[TMP_REG_2];
851 const s8 *rd;
852 s8 rt;
853
854 /* Setup Operands */
855 rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
856 rd = arm_bpf_get_reg64(dst, tmp, ctx);
857
858 /* Do the ARSH operation */
859 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
860 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
861 emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
862 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
863 _emit(ARM_COND_MI, ARM_B(0), ctx);
864 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASR, tmp2[0]), ctx);
865 emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_ASR, rt), ctx);
866
867 arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
868 arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
869 }
870
871 /* dst = dst >> src */
emit_a32_rsh_r64(const s8 dst[],const s8 src[],struct jit_ctx * ctx)872 static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[],
873 struct jit_ctx *ctx) {
874 const s8 *tmp = bpf2a32[TMP_REG_1];
875 const s8 *tmp2 = bpf2a32[TMP_REG_2];
876 const s8 *rd;
877 s8 rt;
878
879 /* Setup Operands */
880 rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
881 rd = arm_bpf_get_reg64(dst, tmp, ctx);
882
883 /* Do RSH operation */
884 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
885 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
886 emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
887 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
888 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_LSR, tmp2[0]), ctx);
889 emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_LSR, rt), ctx);
890
891 arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
892 arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
893 }
894
895 /* dst = dst << val */
emit_a32_lsh_i64(const s8 dst[],const u32 val,struct jit_ctx * ctx)896 static inline void emit_a32_lsh_i64(const s8 dst[],
897 const u32 val, struct jit_ctx *ctx){
898 const s8 *tmp = bpf2a32[TMP_REG_1];
899 const s8 *tmp2 = bpf2a32[TMP_REG_2];
900 const s8 *rd;
901
902 /* Setup operands */
903 rd = arm_bpf_get_reg64(dst, tmp, ctx);
904
905 /* Do LSH operation */
906 if (val < 32) {
907 emit(ARM_MOV_SI(tmp2[0], rd[0], SRTYPE_ASL, val), ctx);
908 emit(ARM_ORR_SI(rd[0], tmp2[0], rd[1], SRTYPE_LSR, 32 - val), ctx);
909 emit(ARM_MOV_SI(rd[1], rd[1], SRTYPE_ASL, val), ctx);
910 } else {
911 if (val == 32)
912 emit(ARM_MOV_R(rd[0], rd[1]), ctx);
913 else
914 emit(ARM_MOV_SI(rd[0], rd[1], SRTYPE_ASL, val - 32), ctx);
915 emit(ARM_EOR_R(rd[1], rd[1], rd[1]), ctx);
916 }
917
918 arm_bpf_put_reg64(dst, rd, ctx);
919 }
920
921 /* dst = dst >> val */
emit_a32_rsh_i64(const s8 dst[],const u32 val,struct jit_ctx * ctx)922 static inline void emit_a32_rsh_i64(const s8 dst[],
923 const u32 val, struct jit_ctx *ctx) {
924 const s8 *tmp = bpf2a32[TMP_REG_1];
925 const s8 *tmp2 = bpf2a32[TMP_REG_2];
926 const s8 *rd;
927
928 /* Setup operands */
929 rd = arm_bpf_get_reg64(dst, tmp, ctx);
930
931 /* Do LSR operation */
932 if (val < 32) {
933 emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
934 emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
935 emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx);
936 } else if (val == 32) {
937 emit(ARM_MOV_R(rd[1], rd[0]), ctx);
938 emit(ARM_MOV_I(rd[0], 0), ctx);
939 } else {
940 emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_LSR, val - 32), ctx);
941 emit(ARM_MOV_I(rd[0], 0), ctx);
942 }
943
944 arm_bpf_put_reg64(dst, rd, ctx);
945 }
946
947 /* dst = dst >> val (signed) */
emit_a32_arsh_i64(const s8 dst[],const u32 val,struct jit_ctx * ctx)948 static inline void emit_a32_arsh_i64(const s8 dst[],
949 const u32 val, struct jit_ctx *ctx){
950 const s8 *tmp = bpf2a32[TMP_REG_1];
951 const s8 *tmp2 = bpf2a32[TMP_REG_2];
952 const s8 *rd;
953
954 /* Setup operands */
955 rd = arm_bpf_get_reg64(dst, tmp, ctx);
956
957 /* Do ARSH operation */
958 if (val < 32) {
959 emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
960 emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
961 emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx);
962 } else if (val == 32) {
963 emit(ARM_MOV_R(rd[1], rd[0]), ctx);
964 emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
965 } else {
966 emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_ASR, val - 32), ctx);
967 emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
968 }
969
970 arm_bpf_put_reg64(dst, rd, ctx);
971 }
972
emit_a32_mul_r64(const s8 dst[],const s8 src[],struct jit_ctx * ctx)973 static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[],
974 struct jit_ctx *ctx) {
975 const s8 *tmp = bpf2a32[TMP_REG_1];
976 const s8 *tmp2 = bpf2a32[TMP_REG_2];
977 const s8 *rd, *rt;
978
979 /* Setup operands for multiplication */
980 rd = arm_bpf_get_reg64(dst, tmp, ctx);
981 rt = arm_bpf_get_reg64(src, tmp2, ctx);
982
983 /* Do Multiplication */
984 emit(ARM_MUL(ARM_IP, rd[1], rt[0]), ctx);
985 emit(ARM_MUL(ARM_LR, rd[0], rt[1]), ctx);
986 emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
987
988 emit(ARM_UMULL(ARM_IP, rd[0], rd[1], rt[1]), ctx);
989 emit(ARM_ADD_R(rd[0], ARM_LR, rd[0]), ctx);
990
991 arm_bpf_put_reg32(dst_lo, ARM_IP, ctx);
992 arm_bpf_put_reg32(dst_hi, rd[0], ctx);
993 }
994
995 /* *(size *)(dst + off) = src */
emit_str_r(const s8 dst,const s8 src[],s32 off,struct jit_ctx * ctx,const u8 sz)996 static inline void emit_str_r(const s8 dst, const s8 src[],
997 s32 off, struct jit_ctx *ctx, const u8 sz){
998 const s8 *tmp = bpf2a32[TMP_REG_1];
999 s32 off_max;
1000 s8 rd;
1001
1002 rd = arm_bpf_get_reg32(dst, tmp[1], ctx);
1003
1004 if (sz == BPF_H)
1005 off_max = 0xff;
1006 else
1007 off_max = 0xfff;
1008
1009 if (off < 0 || off > off_max) {
1010 emit_a32_mov_i(tmp[0], off, ctx);
1011 emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx);
1012 rd = tmp[0];
1013 off = 0;
1014 }
1015 switch (sz) {
1016 case BPF_B:
1017 /* Store a Byte */
1018 emit(ARM_STRB_I(src_lo, rd, off), ctx);
1019 break;
1020 case BPF_H:
1021 /* Store a HalfWord */
1022 emit(ARM_STRH_I(src_lo, rd, off), ctx);
1023 break;
1024 case BPF_W:
1025 /* Store a Word */
1026 emit(ARM_STR_I(src_lo, rd, off), ctx);
1027 break;
1028 case BPF_DW:
1029 /* Store a Double Word */
1030 emit(ARM_STR_I(src_lo, rd, off), ctx);
1031 emit(ARM_STR_I(src_hi, rd, off + 4), ctx);
1032 break;
1033 }
1034 }
1035
1036 /* dst = *(size*)(src + off) */
emit_ldx_r(const s8 dst[],const s8 src,s32 off,struct jit_ctx * ctx,const u8 sz)1037 static inline void emit_ldx_r(const s8 dst[], const s8 src,
1038 s32 off, struct jit_ctx *ctx, const u8 sz){
1039 const s8 *tmp = bpf2a32[TMP_REG_1];
1040 const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
1041 s8 rm = src;
1042 s32 off_max;
1043
1044 if (sz == BPF_H)
1045 off_max = 0xff;
1046 else
1047 off_max = 0xfff;
1048
1049 if (off < 0 || off > off_max) {
1050 emit_a32_mov_i(tmp[0], off, ctx);
1051 emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
1052 rm = tmp[0];
1053 off = 0;
1054 } else if (rd[1] == rm) {
1055 emit(ARM_MOV_R(tmp[0], rm), ctx);
1056 rm = tmp[0];
1057 }
1058 switch (sz) {
1059 case BPF_B:
1060 /* Load a Byte */
1061 emit(ARM_LDRB_I(rd[1], rm, off), ctx);
1062 if (!ctx->prog->aux->verifier_zext)
1063 emit_a32_mov_i(rd[0], 0, ctx);
1064 break;
1065 case BPF_H:
1066 /* Load a HalfWord */
1067 emit(ARM_LDRH_I(rd[1], rm, off), ctx);
1068 if (!ctx->prog->aux->verifier_zext)
1069 emit_a32_mov_i(rd[0], 0, ctx);
1070 break;
1071 case BPF_W:
1072 /* Load a Word */
1073 emit(ARM_LDR_I(rd[1], rm, off), ctx);
1074 if (!ctx->prog->aux->verifier_zext)
1075 emit_a32_mov_i(rd[0], 0, ctx);
1076 break;
1077 case BPF_DW:
1078 /* Load a Double Word */
1079 emit(ARM_LDR_I(rd[1], rm, off), ctx);
1080 emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
1081 break;
1082 }
1083 arm_bpf_put_reg64(dst, rd, ctx);
1084 }
1085
1086 /* Arithmatic Operation */
emit_ar_r(const u8 rd,const u8 rt,const u8 rm,const u8 rn,struct jit_ctx * ctx,u8 op,bool is_jmp64)1087 static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
1088 const u8 rn, struct jit_ctx *ctx, u8 op,
1089 bool is_jmp64) {
1090 switch (op) {
1091 case BPF_JSET:
1092 if (is_jmp64) {
1093 emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
1094 emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
1095 emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
1096 } else {
1097 emit(ARM_ANDS_R(ARM_IP, rt, rn), ctx);
1098 }
1099 break;
1100 case BPF_JEQ:
1101 case BPF_JNE:
1102 case BPF_JGT:
1103 case BPF_JGE:
1104 case BPF_JLE:
1105 case BPF_JLT:
1106 if (is_jmp64) {
1107 emit(ARM_CMP_R(rd, rm), ctx);
1108 /* Only compare low halve if high halve are equal. */
1109 _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx);
1110 } else {
1111 emit(ARM_CMP_R(rt, rn), ctx);
1112 }
1113 break;
1114 case BPF_JSLE:
1115 case BPF_JSGT:
1116 emit(ARM_CMP_R(rn, rt), ctx);
1117 if (is_jmp64)
1118 emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx);
1119 break;
1120 case BPF_JSLT:
1121 case BPF_JSGE:
1122 emit(ARM_CMP_R(rt, rn), ctx);
1123 if (is_jmp64)
1124 emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx);
1125 break;
1126 }
1127 }
1128
1129 static int out_offset = -1; /* initialized on the first pass of build_body() */
emit_bpf_tail_call(struct jit_ctx * ctx)1130 static int emit_bpf_tail_call(struct jit_ctx *ctx)
1131 {
1132
1133 /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
1134 const s8 *r2 = bpf2a32[BPF_REG_2];
1135 const s8 *r3 = bpf2a32[BPF_REG_3];
1136 const s8 *tmp = bpf2a32[TMP_REG_1];
1137 const s8 *tmp2 = bpf2a32[TMP_REG_2];
1138 const s8 *tcc = bpf2a32[TCALL_CNT];
1139 const s8 *tc;
1140 const int idx0 = ctx->idx;
1141 #define cur_offset (ctx->idx - idx0)
1142 #define jmp_offset (out_offset - (cur_offset) - 2)
1143 u32 lo, hi;
1144 s8 r_array, r_index;
1145 int off;
1146
1147 /* if (index >= array->map.max_entries)
1148 * goto out;
1149 */
1150 BUILD_BUG_ON(offsetof(struct bpf_array, map.max_entries) >
1151 ARM_INST_LDST__IMM12);
1152 off = offsetof(struct bpf_array, map.max_entries);
1153 r_array = arm_bpf_get_reg32(r2[1], tmp2[0], ctx);
1154 /* index is 32-bit for arrays */
1155 r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx);
1156 /* array->map.max_entries */
1157 emit(ARM_LDR_I(tmp[1], r_array, off), ctx);
1158 /* index >= array->map.max_entries */
1159 emit(ARM_CMP_R(r_index, tmp[1]), ctx);
1160 _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
1161
1162 /* tmp2[0] = array, tmp2[1] = index */
1163
1164 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
1165 * goto out;
1166 * tail_call_cnt++;
1167 */
1168 lo = (u32)MAX_TAIL_CALL_CNT;
1169 hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32);
1170 tc = arm_bpf_get_reg64(tcc, tmp, ctx);
1171 emit(ARM_CMP_I(tc[0], hi), ctx);
1172 _emit(ARM_COND_EQ, ARM_CMP_I(tc[1], lo), ctx);
1173 _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
1174 emit(ARM_ADDS_I(tc[1], tc[1], 1), ctx);
1175 emit(ARM_ADC_I(tc[0], tc[0], 0), ctx);
1176 arm_bpf_put_reg64(tcc, tmp, ctx);
1177
1178 /* prog = array->ptrs[index]
1179 * if (prog == NULL)
1180 * goto out;
1181 */
1182 BUILD_BUG_ON(imm8m(offsetof(struct bpf_array, ptrs)) < 0);
1183 off = imm8m(offsetof(struct bpf_array, ptrs));
1184 emit(ARM_ADD_I(tmp[1], r_array, off), ctx);
1185 emit(ARM_LDR_R_SI(tmp[1], tmp[1], r_index, SRTYPE_ASL, 2), ctx);
1186 emit(ARM_CMP_I(tmp[1], 0), ctx);
1187 _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
1188
1189 /* goto *(prog->bpf_func + prologue_size); */
1190 BUILD_BUG_ON(offsetof(struct bpf_prog, bpf_func) >
1191 ARM_INST_LDST__IMM12);
1192 off = offsetof(struct bpf_prog, bpf_func);
1193 emit(ARM_LDR_I(tmp[1], tmp[1], off), ctx);
1194 emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
1195 emit_bx_r(tmp[1], ctx);
1196
1197 /* out: */
1198 if (out_offset == -1)
1199 out_offset = cur_offset;
1200 if (cur_offset != out_offset) {
1201 pr_err_once("tail_call out_offset = %d, expected %d!\n",
1202 cur_offset, out_offset);
1203 return -1;
1204 }
1205 return 0;
1206 #undef cur_offset
1207 #undef jmp_offset
1208 }
1209
1210 /* 0xabcd => 0xcdab */
emit_rev16(const u8 rd,const u8 rn,struct jit_ctx * ctx)1211 static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx)
1212 {
1213 #if __LINUX_ARM_ARCH__ < 6
1214 const s8 *tmp2 = bpf2a32[TMP_REG_2];
1215
1216 emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
1217 emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx);
1218 emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
1219 emit(ARM_ORR_SI(rd, tmp2[0], tmp2[1], SRTYPE_LSL, 8), ctx);
1220 #else /* ARMv6+ */
1221 emit(ARM_REV16(rd, rn), ctx);
1222 #endif
1223 }
1224
1225 /* 0xabcdefgh => 0xghefcdab */
emit_rev32(const u8 rd,const u8 rn,struct jit_ctx * ctx)1226 static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx)
1227 {
1228 #if __LINUX_ARM_ARCH__ < 6
1229 const s8 *tmp2 = bpf2a32[TMP_REG_2];
1230
1231 emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
1232 emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx);
1233 emit(ARM_ORR_SI(ARM_IP, tmp2[0], tmp2[1], SRTYPE_LSL, 24), ctx);
1234
1235 emit(ARM_MOV_SI(tmp2[1], rn, SRTYPE_LSR, 8), ctx);
1236 emit(ARM_AND_I(tmp2[1], tmp2[1], 0xff), ctx);
1237 emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 16), ctx);
1238 emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
1239 emit(ARM_MOV_SI(tmp2[0], tmp2[0], SRTYPE_LSL, 8), ctx);
1240 emit(ARM_ORR_SI(tmp2[0], tmp2[0], tmp2[1], SRTYPE_LSL, 16), ctx);
1241 emit(ARM_ORR_R(rd, ARM_IP, tmp2[0]), ctx);
1242
1243 #else /* ARMv6+ */
1244 emit(ARM_REV(rd, rn), ctx);
1245 #endif
1246 }
1247
1248 // push the scratch stack register on top of the stack
emit_push_r64(const s8 src[],struct jit_ctx * ctx)1249 static inline void emit_push_r64(const s8 src[], struct jit_ctx *ctx)
1250 {
1251 const s8 *tmp2 = bpf2a32[TMP_REG_2];
1252 const s8 *rt;
1253 u16 reg_set = 0;
1254
1255 rt = arm_bpf_get_reg64(src, tmp2, ctx);
1256
1257 reg_set = (1 << rt[1]) | (1 << rt[0]);
1258 emit(ARM_PUSH(reg_set), ctx);
1259 }
1260
build_prologue(struct jit_ctx * ctx)1261 static void build_prologue(struct jit_ctx *ctx)
1262 {
1263 const s8 r0 = bpf2a32[BPF_REG_0][1];
1264 const s8 r2 = bpf2a32[BPF_REG_1][1];
1265 const s8 r3 = bpf2a32[BPF_REG_1][0];
1266 const s8 r4 = bpf2a32[BPF_REG_6][1];
1267 const s8 fplo = bpf2a32[BPF_REG_FP][1];
1268 const s8 fphi = bpf2a32[BPF_REG_FP][0];
1269 const s8 *tcc = bpf2a32[TCALL_CNT];
1270
1271 /* Save callee saved registers. */
1272 #ifdef CONFIG_FRAME_POINTER
1273 u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC;
1274 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
1275 emit(ARM_PUSH(reg_set), ctx);
1276 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
1277 #else
1278 emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx);
1279 emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx);
1280 #endif
1281 /* Save frame pointer for later */
1282 emit(ARM_SUB_I(ARM_IP, ARM_SP, SCRATCH_SIZE), ctx);
1283
1284 ctx->stack_size = imm8m(STACK_SIZE);
1285
1286 /* Set up function call stack */
1287 emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
1288
1289 /* Set up BPF prog stack base register */
1290 emit_a32_mov_r(fplo, ARM_IP, ctx);
1291 emit_a32_mov_i(fphi, 0, ctx);
1292
1293 /* mov r4, 0 */
1294 emit(ARM_MOV_I(r4, 0), ctx);
1295
1296 /* Move BPF_CTX to BPF_R1 */
1297 emit(ARM_MOV_R(r3, r4), ctx);
1298 emit(ARM_MOV_R(r2, r0), ctx);
1299 /* Initialize Tail Count */
1300 emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[0])), ctx);
1301 emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[1])), ctx);
1302 /* end of prologue */
1303 }
1304
1305 /* restore callee saved registers. */
build_epilogue(struct jit_ctx * ctx)1306 static void build_epilogue(struct jit_ctx *ctx)
1307 {
1308 #ifdef CONFIG_FRAME_POINTER
1309 /* When using frame pointers, some additional registers need to
1310 * be loaded. */
1311 u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP;
1312 emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx);
1313 emit(ARM_LDM(ARM_SP, reg_set), ctx);
1314 #else
1315 /* Restore callee saved registers. */
1316 emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx);
1317 emit(ARM_POP(CALLEE_POP_MASK), ctx);
1318 #endif
1319 }
1320
1321 /*
1322 * Convert an eBPF instruction to native instruction, i.e
1323 * JITs an eBPF instruction.
1324 * Returns :
1325 * 0 - Successfully JITed an 8-byte eBPF instruction
1326 * >0 - Successfully JITed a 16-byte eBPF instruction
1327 * <0 - Failed to JIT.
1328 */
build_insn(const struct bpf_insn * insn,struct jit_ctx * ctx)1329 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1330 {
1331 const u8 code = insn->code;
1332 const s8 *dst = bpf2a32[insn->dst_reg];
1333 const s8 *src = bpf2a32[insn->src_reg];
1334 const s8 *tmp = bpf2a32[TMP_REG_1];
1335 const s8 *tmp2 = bpf2a32[TMP_REG_2];
1336 const s16 off = insn->off;
1337 const s32 imm = insn->imm;
1338 const int i = insn - ctx->prog->insnsi;
1339 const bool is64 = BPF_CLASS(code) == BPF_ALU64;
1340 const s8 *rd, *rs;
1341 s8 rd_lo, rt, rm, rn;
1342 s32 jmp_offset;
1343
1344 #define check_imm(bits, imm) do { \
1345 if ((imm) >= (1 << ((bits) - 1)) || \
1346 (imm) < -(1 << ((bits) - 1))) { \
1347 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
1348 i, imm, imm); \
1349 return -EINVAL; \
1350 } \
1351 } while (0)
1352 #define check_imm24(imm) check_imm(24, imm)
1353
1354 switch (code) {
1355 /* ALU operations */
1356
1357 /* dst = src */
1358 case BPF_ALU | BPF_MOV | BPF_K:
1359 case BPF_ALU | BPF_MOV | BPF_X:
1360 case BPF_ALU64 | BPF_MOV | BPF_K:
1361 case BPF_ALU64 | BPF_MOV | BPF_X:
1362 switch (BPF_SRC(code)) {
1363 case BPF_X:
1364 if (imm == 1) {
1365 /* Special mov32 for zext */
1366 emit_a32_mov_i(dst_hi, 0, ctx);
1367 break;
1368 }
1369 emit_a32_mov_r64(is64, dst, src, ctx);
1370 break;
1371 case BPF_K:
1372 /* Sign-extend immediate value to destination reg */
1373 emit_a32_mov_se_i64(is64, dst, imm, ctx);
1374 break;
1375 }
1376 break;
1377 /* dst = dst + src/imm */
1378 /* dst = dst - src/imm */
1379 /* dst = dst | src/imm */
1380 /* dst = dst & src/imm */
1381 /* dst = dst ^ src/imm */
1382 /* dst = dst * src/imm */
1383 /* dst = dst << src */
1384 /* dst = dst >> src */
1385 case BPF_ALU | BPF_ADD | BPF_K:
1386 case BPF_ALU | BPF_ADD | BPF_X:
1387 case BPF_ALU | BPF_SUB | BPF_K:
1388 case BPF_ALU | BPF_SUB | BPF_X:
1389 case BPF_ALU | BPF_OR | BPF_K:
1390 case BPF_ALU | BPF_OR | BPF_X:
1391 case BPF_ALU | BPF_AND | BPF_K:
1392 case BPF_ALU | BPF_AND | BPF_X:
1393 case BPF_ALU | BPF_XOR | BPF_K:
1394 case BPF_ALU | BPF_XOR | BPF_X:
1395 case BPF_ALU | BPF_MUL | BPF_K:
1396 case BPF_ALU | BPF_MUL | BPF_X:
1397 case BPF_ALU | BPF_LSH | BPF_X:
1398 case BPF_ALU | BPF_RSH | BPF_X:
1399 case BPF_ALU | BPF_ARSH | BPF_K:
1400 case BPF_ALU | BPF_ARSH | BPF_X:
1401 case BPF_ALU64 | BPF_ADD | BPF_K:
1402 case BPF_ALU64 | BPF_ADD | BPF_X:
1403 case BPF_ALU64 | BPF_SUB | BPF_K:
1404 case BPF_ALU64 | BPF_SUB | BPF_X:
1405 case BPF_ALU64 | BPF_OR | BPF_K:
1406 case BPF_ALU64 | BPF_OR | BPF_X:
1407 case BPF_ALU64 | BPF_AND | BPF_K:
1408 case BPF_ALU64 | BPF_AND | BPF_X:
1409 case BPF_ALU64 | BPF_XOR | BPF_K:
1410 case BPF_ALU64 | BPF_XOR | BPF_X:
1411 switch (BPF_SRC(code)) {
1412 case BPF_X:
1413 emit_a32_alu_r64(is64, dst, src, ctx, BPF_OP(code));
1414 break;
1415 case BPF_K:
1416 /* Move immediate value to the temporary register
1417 * and then do the ALU operation on the temporary
1418 * register as this will sign-extend the immediate
1419 * value into temporary reg and then it would be
1420 * safe to do the operation on it.
1421 */
1422 emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
1423 emit_a32_alu_r64(is64, dst, tmp2, ctx, BPF_OP(code));
1424 break;
1425 }
1426 break;
1427 /* dst = dst / src(imm) */
1428 /* dst = dst % src(imm) */
1429 case BPF_ALU | BPF_DIV | BPF_K:
1430 case BPF_ALU | BPF_DIV | BPF_X:
1431 case BPF_ALU | BPF_MOD | BPF_K:
1432 case BPF_ALU | BPF_MOD | BPF_X:
1433 rd_lo = arm_bpf_get_reg32(dst_lo, tmp2[1], ctx);
1434 switch (BPF_SRC(code)) {
1435 case BPF_X:
1436 rt = arm_bpf_get_reg32(src_lo, tmp2[0], ctx);
1437 break;
1438 case BPF_K:
1439 rt = tmp2[0];
1440 emit_a32_mov_i(rt, imm, ctx);
1441 break;
1442 default:
1443 rt = src_lo;
1444 break;
1445 }
1446 emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code));
1447 arm_bpf_put_reg32(dst_lo, rd_lo, ctx);
1448 if (!ctx->prog->aux->verifier_zext)
1449 emit_a32_mov_i(dst_hi, 0, ctx);
1450 break;
1451 case BPF_ALU64 | BPF_DIV | BPF_K:
1452 case BPF_ALU64 | BPF_DIV | BPF_X:
1453 case BPF_ALU64 | BPF_MOD | BPF_K:
1454 case BPF_ALU64 | BPF_MOD | BPF_X:
1455 goto notyet;
1456 /* dst = dst >> imm */
1457 /* dst = dst << imm */
1458 case BPF_ALU | BPF_RSH | BPF_K:
1459 case BPF_ALU | BPF_LSH | BPF_K:
1460 if (unlikely(imm > 31))
1461 return -EINVAL;
1462 if (imm)
1463 emit_a32_alu_i(dst_lo, imm, ctx, BPF_OP(code));
1464 if (!ctx->prog->aux->verifier_zext)
1465 emit_a32_mov_i(dst_hi, 0, ctx);
1466 break;
1467 /* dst = dst << imm */
1468 case BPF_ALU64 | BPF_LSH | BPF_K:
1469 if (unlikely(imm > 63))
1470 return -EINVAL;
1471 emit_a32_lsh_i64(dst, imm, ctx);
1472 break;
1473 /* dst = dst >> imm */
1474 case BPF_ALU64 | BPF_RSH | BPF_K:
1475 if (unlikely(imm > 63))
1476 return -EINVAL;
1477 emit_a32_rsh_i64(dst, imm, ctx);
1478 break;
1479 /* dst = dst << src */
1480 case BPF_ALU64 | BPF_LSH | BPF_X:
1481 emit_a32_lsh_r64(dst, src, ctx);
1482 break;
1483 /* dst = dst >> src */
1484 case BPF_ALU64 | BPF_RSH | BPF_X:
1485 emit_a32_rsh_r64(dst, src, ctx);
1486 break;
1487 /* dst = dst >> src (signed) */
1488 case BPF_ALU64 | BPF_ARSH | BPF_X:
1489 emit_a32_arsh_r64(dst, src, ctx);
1490 break;
1491 /* dst = dst >> imm (signed) */
1492 case BPF_ALU64 | BPF_ARSH | BPF_K:
1493 if (unlikely(imm > 63))
1494 return -EINVAL;
1495 emit_a32_arsh_i64(dst, imm, ctx);
1496 break;
1497 /* dst = ~dst */
1498 case BPF_ALU | BPF_NEG:
1499 emit_a32_alu_i(dst_lo, 0, ctx, BPF_OP(code));
1500 if (!ctx->prog->aux->verifier_zext)
1501 emit_a32_mov_i(dst_hi, 0, ctx);
1502 break;
1503 /* dst = ~dst (64 bit) */
1504 case BPF_ALU64 | BPF_NEG:
1505 emit_a32_neg64(dst, ctx);
1506 break;
1507 /* dst = dst * src/imm */
1508 case BPF_ALU64 | BPF_MUL | BPF_X:
1509 case BPF_ALU64 | BPF_MUL | BPF_K:
1510 switch (BPF_SRC(code)) {
1511 case BPF_X:
1512 emit_a32_mul_r64(dst, src, ctx);
1513 break;
1514 case BPF_K:
1515 /* Move immediate value to the temporary register
1516 * and then do the multiplication on it as this
1517 * will sign-extend the immediate value into temp
1518 * reg then it would be safe to do the operation
1519 * on it.
1520 */
1521 emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
1522 emit_a32_mul_r64(dst, tmp2, ctx);
1523 break;
1524 }
1525 break;
1526 /* dst = htole(dst) */
1527 /* dst = htobe(dst) */
1528 case BPF_ALU | BPF_END | BPF_FROM_LE:
1529 case BPF_ALU | BPF_END | BPF_FROM_BE:
1530 rd = arm_bpf_get_reg64(dst, tmp, ctx);
1531 if (BPF_SRC(code) == BPF_FROM_LE)
1532 goto emit_bswap_uxt;
1533 switch (imm) {
1534 case 16:
1535 emit_rev16(rd[1], rd[1], ctx);
1536 goto emit_bswap_uxt;
1537 case 32:
1538 emit_rev32(rd[1], rd[1], ctx);
1539 goto emit_bswap_uxt;
1540 case 64:
1541 emit_rev32(ARM_LR, rd[1], ctx);
1542 emit_rev32(rd[1], rd[0], ctx);
1543 emit(ARM_MOV_R(rd[0], ARM_LR), ctx);
1544 break;
1545 }
1546 goto exit;
1547 emit_bswap_uxt:
1548 switch (imm) {
1549 case 16:
1550 /* zero-extend 16 bits into 64 bits */
1551 #if __LINUX_ARM_ARCH__ < 6
1552 emit_a32_mov_i(tmp2[1], 0xffff, ctx);
1553 emit(ARM_AND_R(rd[1], rd[1], tmp2[1]), ctx);
1554 #else /* ARMv6+ */
1555 emit(ARM_UXTH(rd[1], rd[1]), ctx);
1556 #endif
1557 if (!ctx->prog->aux->verifier_zext)
1558 emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
1559 break;
1560 case 32:
1561 /* zero-extend 32 bits into 64 bits */
1562 if (!ctx->prog->aux->verifier_zext)
1563 emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
1564 break;
1565 case 64:
1566 /* nop */
1567 break;
1568 }
1569 exit:
1570 arm_bpf_put_reg64(dst, rd, ctx);
1571 break;
1572 /* dst = imm64 */
1573 case BPF_LD | BPF_IMM | BPF_DW:
1574 {
1575 u64 val = (u32)imm | (u64)insn[1].imm << 32;
1576
1577 emit_a32_mov_i64(dst, val, ctx);
1578
1579 return 1;
1580 }
1581 /* LDX: dst = *(size *)(src + off) */
1582 case BPF_LDX | BPF_MEM | BPF_W:
1583 case BPF_LDX | BPF_MEM | BPF_H:
1584 case BPF_LDX | BPF_MEM | BPF_B:
1585 case BPF_LDX | BPF_MEM | BPF_DW:
1586 rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1587 emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
1588 break;
1589 /* ST: *(size *)(dst + off) = imm */
1590 case BPF_ST | BPF_MEM | BPF_W:
1591 case BPF_ST | BPF_MEM | BPF_H:
1592 case BPF_ST | BPF_MEM | BPF_B:
1593 case BPF_ST | BPF_MEM | BPF_DW:
1594 switch (BPF_SIZE(code)) {
1595 case BPF_DW:
1596 /* Sign-extend immediate value into temp reg */
1597 emit_a32_mov_se_i64(true, tmp2, imm, ctx);
1598 break;
1599 case BPF_W:
1600 case BPF_H:
1601 case BPF_B:
1602 emit_a32_mov_i(tmp2[1], imm, ctx);
1603 break;
1604 }
1605 emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code));
1606 break;
1607 /* STX XADD: lock *(u32 *)(dst + off) += src */
1608 case BPF_STX | BPF_XADD | BPF_W:
1609 /* STX XADD: lock *(u64 *)(dst + off) += src */
1610 case BPF_STX | BPF_XADD | BPF_DW:
1611 goto notyet;
1612 /* STX: *(size *)(dst + off) = src */
1613 case BPF_STX | BPF_MEM | BPF_W:
1614 case BPF_STX | BPF_MEM | BPF_H:
1615 case BPF_STX | BPF_MEM | BPF_B:
1616 case BPF_STX | BPF_MEM | BPF_DW:
1617 rs = arm_bpf_get_reg64(src, tmp2, ctx);
1618 emit_str_r(dst_lo, rs, off, ctx, BPF_SIZE(code));
1619 break;
1620 /* PC += off if dst == src */
1621 /* PC += off if dst > src */
1622 /* PC += off if dst >= src */
1623 /* PC += off if dst < src */
1624 /* PC += off if dst <= src */
1625 /* PC += off if dst != src */
1626 /* PC += off if dst > src (signed) */
1627 /* PC += off if dst >= src (signed) */
1628 /* PC += off if dst < src (signed) */
1629 /* PC += off if dst <= src (signed) */
1630 /* PC += off if dst & src */
1631 case BPF_JMP | BPF_JEQ | BPF_X:
1632 case BPF_JMP | BPF_JGT | BPF_X:
1633 case BPF_JMP | BPF_JGE | BPF_X:
1634 case BPF_JMP | BPF_JNE | BPF_X:
1635 case BPF_JMP | BPF_JSGT | BPF_X:
1636 case BPF_JMP | BPF_JSGE | BPF_X:
1637 case BPF_JMP | BPF_JSET | BPF_X:
1638 case BPF_JMP | BPF_JLE | BPF_X:
1639 case BPF_JMP | BPF_JLT | BPF_X:
1640 case BPF_JMP | BPF_JSLT | BPF_X:
1641 case BPF_JMP | BPF_JSLE | BPF_X:
1642 case BPF_JMP32 | BPF_JEQ | BPF_X:
1643 case BPF_JMP32 | BPF_JGT | BPF_X:
1644 case BPF_JMP32 | BPF_JGE | BPF_X:
1645 case BPF_JMP32 | BPF_JNE | BPF_X:
1646 case BPF_JMP32 | BPF_JSGT | BPF_X:
1647 case BPF_JMP32 | BPF_JSGE | BPF_X:
1648 case BPF_JMP32 | BPF_JSET | BPF_X:
1649 case BPF_JMP32 | BPF_JLE | BPF_X:
1650 case BPF_JMP32 | BPF_JLT | BPF_X:
1651 case BPF_JMP32 | BPF_JSLT | BPF_X:
1652 case BPF_JMP32 | BPF_JSLE | BPF_X:
1653 /* Setup source registers */
1654 rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx);
1655 rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1656 goto go_jmp;
1657 /* PC += off if dst == imm */
1658 /* PC += off if dst > imm */
1659 /* PC += off if dst >= imm */
1660 /* PC += off if dst < imm */
1661 /* PC += off if dst <= imm */
1662 /* PC += off if dst != imm */
1663 /* PC += off if dst > imm (signed) */
1664 /* PC += off if dst >= imm (signed) */
1665 /* PC += off if dst < imm (signed) */
1666 /* PC += off if dst <= imm (signed) */
1667 /* PC += off if dst & imm */
1668 case BPF_JMP | BPF_JEQ | BPF_K:
1669 case BPF_JMP | BPF_JGT | BPF_K:
1670 case BPF_JMP | BPF_JGE | BPF_K:
1671 case BPF_JMP | BPF_JNE | BPF_K:
1672 case BPF_JMP | BPF_JSGT | BPF_K:
1673 case BPF_JMP | BPF_JSGE | BPF_K:
1674 case BPF_JMP | BPF_JSET | BPF_K:
1675 case BPF_JMP | BPF_JLT | BPF_K:
1676 case BPF_JMP | BPF_JLE | BPF_K:
1677 case BPF_JMP | BPF_JSLT | BPF_K:
1678 case BPF_JMP | BPF_JSLE | BPF_K:
1679 case BPF_JMP32 | BPF_JEQ | BPF_K:
1680 case BPF_JMP32 | BPF_JGT | BPF_K:
1681 case BPF_JMP32 | BPF_JGE | BPF_K:
1682 case BPF_JMP32 | BPF_JNE | BPF_K:
1683 case BPF_JMP32 | BPF_JSGT | BPF_K:
1684 case BPF_JMP32 | BPF_JSGE | BPF_K:
1685 case BPF_JMP32 | BPF_JSET | BPF_K:
1686 case BPF_JMP32 | BPF_JLT | BPF_K:
1687 case BPF_JMP32 | BPF_JLE | BPF_K:
1688 case BPF_JMP32 | BPF_JSLT | BPF_K:
1689 case BPF_JMP32 | BPF_JSLE | BPF_K:
1690 if (off == 0)
1691 break;
1692 rm = tmp2[0];
1693 rn = tmp2[1];
1694 /* Sign-extend immediate value */
1695 emit_a32_mov_se_i64(true, tmp2, imm, ctx);
1696 go_jmp:
1697 /* Setup destination register */
1698 rd = arm_bpf_get_reg64(dst, tmp, ctx);
1699
1700 /* Check for the condition */
1701 emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code),
1702 BPF_CLASS(code) == BPF_JMP);
1703
1704 /* Setup JUMP instruction */
1705 jmp_offset = bpf2a32_offset(i+off, i, ctx);
1706 switch (BPF_OP(code)) {
1707 case BPF_JNE:
1708 case BPF_JSET:
1709 _emit(ARM_COND_NE, ARM_B(jmp_offset), ctx);
1710 break;
1711 case BPF_JEQ:
1712 _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
1713 break;
1714 case BPF_JGT:
1715 _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
1716 break;
1717 case BPF_JGE:
1718 _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
1719 break;
1720 case BPF_JSGT:
1721 _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
1722 break;
1723 case BPF_JSGE:
1724 _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
1725 break;
1726 case BPF_JLE:
1727 _emit(ARM_COND_LS, ARM_B(jmp_offset), ctx);
1728 break;
1729 case BPF_JLT:
1730 _emit(ARM_COND_CC, ARM_B(jmp_offset), ctx);
1731 break;
1732 case BPF_JSLT:
1733 _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
1734 break;
1735 case BPF_JSLE:
1736 _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
1737 break;
1738 }
1739 break;
1740 /* JMP OFF */
1741 case BPF_JMP | BPF_JA:
1742 {
1743 if (off == 0)
1744 break;
1745 jmp_offset = bpf2a32_offset(i+off, i, ctx);
1746 check_imm24(jmp_offset);
1747 emit(ARM_B(jmp_offset), ctx);
1748 break;
1749 }
1750 /* tail call */
1751 case BPF_JMP | BPF_TAIL_CALL:
1752 if (emit_bpf_tail_call(ctx))
1753 return -EFAULT;
1754 break;
1755 /* function call */
1756 case BPF_JMP | BPF_CALL:
1757 {
1758 const s8 *r0 = bpf2a32[BPF_REG_0];
1759 const s8 *r1 = bpf2a32[BPF_REG_1];
1760 const s8 *r2 = bpf2a32[BPF_REG_2];
1761 const s8 *r3 = bpf2a32[BPF_REG_3];
1762 const s8 *r4 = bpf2a32[BPF_REG_4];
1763 const s8 *r5 = bpf2a32[BPF_REG_5];
1764 const u32 func = (u32)__bpf_call_base + (u32)imm;
1765
1766 emit_a32_mov_r64(true, r0, r1, ctx);
1767 emit_a32_mov_r64(true, r1, r2, ctx);
1768 emit_push_r64(r5, ctx);
1769 emit_push_r64(r4, ctx);
1770 emit_push_r64(r3, ctx);
1771
1772 emit_a32_mov_i(tmp[1], func, ctx);
1773 emit_blx_r(tmp[1], ctx);
1774
1775 emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean
1776 break;
1777 }
1778 /* function return */
1779 case BPF_JMP | BPF_EXIT:
1780 /* Optimization: when last instruction is EXIT
1781 * simply fallthrough to epilogue.
1782 */
1783 if (i == ctx->prog->len - 1)
1784 break;
1785 jmp_offset = epilogue_offset(ctx);
1786 check_imm24(jmp_offset);
1787 emit(ARM_B(jmp_offset), ctx);
1788 break;
1789 notyet:
1790 pr_info_once("*** NOT YET: opcode %02x ***\n", code);
1791 return -EFAULT;
1792 default:
1793 pr_err_once("unknown opcode %02x\n", code);
1794 return -EINVAL;
1795 }
1796
1797 if (ctx->flags & FLAG_IMM_OVERFLOW)
1798 /*
1799 * this instruction generated an overflow when
1800 * trying to access the literal pool, so
1801 * delegate this filter to the kernel interpreter.
1802 */
1803 return -1;
1804 return 0;
1805 }
1806
build_body(struct jit_ctx * ctx)1807 static int build_body(struct jit_ctx *ctx)
1808 {
1809 const struct bpf_prog *prog = ctx->prog;
1810 unsigned int i;
1811
1812 for (i = 0; i < prog->len; i++) {
1813 const struct bpf_insn *insn = &(prog->insnsi[i]);
1814 int ret;
1815
1816 ret = build_insn(insn, ctx);
1817
1818 /* It's used with loading the 64 bit immediate value. */
1819 if (ret > 0) {
1820 i++;
1821 if (ctx->target == NULL)
1822 ctx->offsets[i] = ctx->idx;
1823 continue;
1824 }
1825
1826 if (ctx->target == NULL)
1827 ctx->offsets[i] = ctx->idx;
1828
1829 /* If unsuccesfull, return with error code */
1830 if (ret)
1831 return ret;
1832 }
1833 return 0;
1834 }
1835
validate_code(struct jit_ctx * ctx)1836 static int validate_code(struct jit_ctx *ctx)
1837 {
1838 int i;
1839
1840 for (i = 0; i < ctx->idx; i++) {
1841 if (ctx->target[i] == __opcode_to_mem_arm(ARM_INST_UDF))
1842 return -1;
1843 }
1844
1845 return 0;
1846 }
1847
bpf_jit_compile(struct bpf_prog * prog)1848 void bpf_jit_compile(struct bpf_prog *prog)
1849 {
1850 /* Nothing to do here. We support Internal BPF. */
1851 }
1852
bpf_jit_needs_zext(void)1853 bool bpf_jit_needs_zext(void)
1854 {
1855 return true;
1856 }
1857
bpf_int_jit_compile(struct bpf_prog * prog)1858 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1859 {
1860 struct bpf_prog *tmp, *orig_prog = prog;
1861 struct bpf_binary_header *header;
1862 bool tmp_blinded = false;
1863 struct jit_ctx ctx;
1864 unsigned int tmp_idx;
1865 unsigned int image_size;
1866 u8 *image_ptr;
1867
1868 /* If BPF JIT was not enabled then we must fall back to
1869 * the interpreter.
1870 */
1871 if (!prog->jit_requested)
1872 return orig_prog;
1873
1874 /* If constant blinding was enabled and we failed during blinding
1875 * then we must fall back to the interpreter. Otherwise, we save
1876 * the new JITed code.
1877 */
1878 tmp = bpf_jit_blind_constants(prog);
1879
1880 if (IS_ERR(tmp))
1881 return orig_prog;
1882 if (tmp != prog) {
1883 tmp_blinded = true;
1884 prog = tmp;
1885 }
1886
1887 memset(&ctx, 0, sizeof(ctx));
1888 ctx.prog = prog;
1889 ctx.cpu_architecture = cpu_architecture();
1890
1891 /* Not able to allocate memory for offsets[] , then
1892 * we must fall back to the interpreter
1893 */
1894 ctx.offsets = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
1895 if (ctx.offsets == NULL) {
1896 prog = orig_prog;
1897 goto out;
1898 }
1899
1900 /* 1) fake pass to find in the length of the JITed code,
1901 * to compute ctx->offsets and other context variables
1902 * needed to compute final JITed code.
1903 * Also, calculate random starting pointer/start of JITed code
1904 * which is prefixed by random number of fault instructions.
1905 *
1906 * If the first pass fails then there is no chance of it
1907 * being successful in the second pass, so just fall back
1908 * to the interpreter.
1909 */
1910 if (build_body(&ctx)) {
1911 prog = orig_prog;
1912 goto out_off;
1913 }
1914
1915 tmp_idx = ctx.idx;
1916 build_prologue(&ctx);
1917 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1918
1919 ctx.epilogue_offset = ctx.idx;
1920
1921 #if __LINUX_ARM_ARCH__ < 7
1922 tmp_idx = ctx.idx;
1923 build_epilogue(&ctx);
1924 ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
1925
1926 ctx.idx += ctx.imm_count;
1927 if (ctx.imm_count) {
1928 ctx.imms = kcalloc(ctx.imm_count, sizeof(u32), GFP_KERNEL);
1929 if (ctx.imms == NULL) {
1930 prog = orig_prog;
1931 goto out_off;
1932 }
1933 }
1934 #else
1935 /* there's nothing about the epilogue on ARMv7 */
1936 build_epilogue(&ctx);
1937 #endif
1938 /* Now we can get the actual image size of the JITed arm code.
1939 * Currently, we are not considering the THUMB-2 instructions
1940 * for jit, although it can decrease the size of the image.
1941 *
1942 * As each arm instruction is of length 32bit, we are translating
1943 * number of JITed intructions into the size required to store these
1944 * JITed code.
1945 */
1946 image_size = sizeof(u32) * ctx.idx;
1947
1948 /* Now we know the size of the structure to make */
1949 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1950 sizeof(u32), jit_fill_hole);
1951 /* Not able to allocate memory for the structure then
1952 * we must fall back to the interpretation
1953 */
1954 if (header == NULL) {
1955 prog = orig_prog;
1956 goto out_imms;
1957 }
1958
1959 /* 2.) Actual pass to generate final JIT code */
1960 ctx.target = (u32 *) image_ptr;
1961 ctx.idx = 0;
1962
1963 build_prologue(&ctx);
1964
1965 /* If building the body of the JITed code fails somehow,
1966 * we fall back to the interpretation.
1967 */
1968 if (build_body(&ctx) < 0) {
1969 image_ptr = NULL;
1970 bpf_jit_binary_free(header);
1971 prog = orig_prog;
1972 goto out_imms;
1973 }
1974 build_epilogue(&ctx);
1975
1976 /* 3.) Extra pass to validate JITed Code */
1977 if (validate_code(&ctx)) {
1978 image_ptr = NULL;
1979 bpf_jit_binary_free(header);
1980 prog = orig_prog;
1981 goto out_imms;
1982 }
1983 flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
1984
1985 if (bpf_jit_enable > 1)
1986 /* there are 2 passes here */
1987 bpf_jit_dump(prog->len, image_size, 2, ctx.target);
1988
1989 bpf_jit_binary_lock_ro(header);
1990 prog->bpf_func = (void *)ctx.target;
1991 prog->jited = 1;
1992 prog->jited_len = image_size;
1993
1994 out_imms:
1995 #if __LINUX_ARM_ARCH__ < 7
1996 if (ctx.imm_count)
1997 kfree(ctx.imms);
1998 #endif
1999 out_off:
2000 kfree(ctx.offsets);
2001 out:
2002 if (tmp_blinded)
2003 bpf_jit_prog_release_other(prog, prog == orig_prog ?
2004 tmp : orig_prog);
2005 return prog;
2006 }
2007
2008