• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Just-In-Time compiler for eBPF filters on MIPS
4  *
5  * Copyright (c) 2017 Cavium, Inc.
6  *
7  * Based on code from:
8  *
9  * Copyright (c) 2014 Imagination Technologies Ltd.
10  * Author: Markos Chandras <markos.chandras@imgtec.com>
11  */
12 
13 #include <linux/bitops.h>
14 #include <linux/errno.h>
15 #include <linux/filter.h>
16 #include <linux/bpf.h>
17 #include <linux/slab.h>
18 #include <asm/bitops.h>
19 #include <asm/byteorder.h>
20 #include <asm/cacheflush.h>
21 #include <asm/cpu-features.h>
22 #include <asm/isa-rev.h>
23 #include <asm/uasm.h>
24 
25 /* Registers used by JIT */
26 #define MIPS_R_ZERO	0
27 #define MIPS_R_AT	1
28 #define MIPS_R_V0	2	/* BPF_R0 */
29 #define MIPS_R_V1	3
30 #define MIPS_R_A0	4	/* BPF_R1 */
31 #define MIPS_R_A1	5	/* BPF_R2 */
32 #define MIPS_R_A2	6	/* BPF_R3 */
33 #define MIPS_R_A3	7	/* BPF_R4 */
34 #define MIPS_R_A4	8	/* BPF_R5 */
35 #define MIPS_R_T4	12	/* BPF_AX */
36 #define MIPS_R_T5	13
37 #define MIPS_R_T6	14
38 #define MIPS_R_T7	15
39 #define MIPS_R_S0	16	/* BPF_R6 */
40 #define MIPS_R_S1	17	/* BPF_R7 */
41 #define MIPS_R_S2	18	/* BPF_R8 */
42 #define MIPS_R_S3	19	/* BPF_R9 */
43 #define MIPS_R_S4	20	/* BPF_TCC */
44 #define MIPS_R_S5	21
45 #define MIPS_R_S6	22
46 #define MIPS_R_S7	23
47 #define MIPS_R_T8	24
48 #define MIPS_R_T9	25
49 #define MIPS_R_SP	29
50 #define MIPS_R_RA	31
51 
52 /* eBPF flags */
53 #define EBPF_SAVE_S0	BIT(0)
54 #define EBPF_SAVE_S1	BIT(1)
55 #define EBPF_SAVE_S2	BIT(2)
56 #define EBPF_SAVE_S3	BIT(3)
57 #define EBPF_SAVE_S4	BIT(4)
58 #define EBPF_SAVE_RA	BIT(5)
59 #define EBPF_SEEN_FP	BIT(6)
60 #define EBPF_SEEN_TC	BIT(7)
61 #define EBPF_TCC_IN_V1	BIT(8)
62 
63 /*
64  * For the mips64 ISA, we need to track the value range or type for
65  * each JIT register.  The BPF machine requires zero extended 32-bit
66  * values, but the mips64 ISA requires sign extended 32-bit values.
67  * At each point in the BPF program we track the state of every
68  * register so that we can zero extend or sign extend as the BPF
69  * semantics require.
70  */
71 enum reg_val_type {
72 	/* uninitialized */
73 	REG_UNKNOWN,
74 	/* not known to be 32-bit compatible. */
75 	REG_64BIT,
76 	/* 32-bit compatible, no truncation needed for 64-bit ops. */
77 	REG_64BIT_32BIT,
78 	/* 32-bit compatible, need truncation for 64-bit ops. */
79 	REG_32BIT,
80 	/* 32-bit no sign/zero extension needed. */
81 	REG_32BIT_POS
82 };
83 
84 /*
85  * high bit of offsets indicates if long branch conversion done at
86  * this insn.
87  */
88 #define OFFSETS_B_CONV	BIT(31)
89 
90 /**
91  * struct jit_ctx - JIT context
92  * @skf:		The sk_filter
93  * @stack_size:		eBPF stack size
94  * @idx:		Instruction index
95  * @flags:		JIT flags
96  * @offsets:		Instruction offsets
97  * @target:		Memory location for the compiled filter
98  * @reg_val_types	Packed enum reg_val_type for each register.
99  */
100 struct jit_ctx {
101 	const struct bpf_prog *skf;
102 	int stack_size;
103 	u32 idx;
104 	u32 flags;
105 	u32 *offsets;
106 	u32 *target;
107 	u64 *reg_val_types;
108 	unsigned int long_b_conversion:1;
109 	unsigned int gen_b_offsets:1;
110 	unsigned int use_bbit_insns:1;
111 };
112 
set_reg_val_type(u64 * rvt,int reg,enum reg_val_type type)113 static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type)
114 {
115 	*rvt &= ~(7ull << (reg * 3));
116 	*rvt |= ((u64)type << (reg * 3));
117 }
118 
get_reg_val_type(const struct jit_ctx * ctx,int index,int reg)119 static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx,
120 					  int index, int reg)
121 {
122 	return (ctx->reg_val_types[index] >> (reg * 3)) & 7;
123 }
124 
125 /* Simply emit the instruction if the JIT memory space has been allocated */
126 #define emit_instr_long(ctx, func64, func32, ...)		\
127 do {								\
128 	if ((ctx)->target != NULL) {				\
129 		u32 *p = &(ctx)->target[ctx->idx];		\
130 		if (IS_ENABLED(CONFIG_64BIT))			\
131 			uasm_i_##func64(&p, ##__VA_ARGS__);	\
132 		else						\
133 			uasm_i_##func32(&p, ##__VA_ARGS__);	\
134 	}							\
135 	(ctx)->idx++;						\
136 } while (0)
137 
138 #define emit_instr(ctx, func, ...)				\
139 	emit_instr_long(ctx, func, func, ##__VA_ARGS__)
140 
j_target(struct jit_ctx * ctx,int target_idx)141 static unsigned int j_target(struct jit_ctx *ctx, int target_idx)
142 {
143 	unsigned long target_va, base_va;
144 	unsigned int r;
145 
146 	if (!ctx->target)
147 		return 0;
148 
149 	base_va = (unsigned long)ctx->target;
150 	target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV);
151 
152 	if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful))
153 		return (unsigned int)-1;
154 	r = target_va & 0x0ffffffful;
155 	return r;
156 }
157 
158 /* Compute the immediate value for PC-relative branches. */
b_imm(unsigned int tgt,struct jit_ctx * ctx)159 static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
160 {
161 	if (!ctx->gen_b_offsets)
162 		return 0;
163 
164 	/*
165 	 * We want a pc-relative branch.  tgt is the instruction offset
166 	 * we want to jump to.
167 
168 	 * Branch on MIPS:
169 	 * I: target_offset <- sign_extend(offset)
170 	 * I+1: PC += target_offset (delay slot)
171 	 *
172 	 * ctx->idx currently points to the branch instruction
173 	 * but the offset is added to the delay slot so we need
174 	 * to subtract 4.
175 	 */
176 	return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) -
177 		(ctx->idx * 4) - 4;
178 }
179 
180 enum which_ebpf_reg {
181 	src_reg,
182 	src_reg_no_fp,
183 	dst_reg,
184 	dst_reg_fp_ok
185 };
186 
187 /*
188  * For eBPF, the register mapping naturally falls out of the
189  * requirements of eBPF and the MIPS n64 ABI.  We don't maintain a
190  * separate frame pointer, so BPF_REG_10 relative accesses are
191  * adjusted to be $sp relative.
192  */
ebpf_to_mips_reg(struct jit_ctx * ctx,const struct bpf_insn * insn,enum which_ebpf_reg w)193 static int ebpf_to_mips_reg(struct jit_ctx *ctx,
194 			    const struct bpf_insn *insn,
195 			    enum which_ebpf_reg w)
196 {
197 	int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
198 		insn->src_reg : insn->dst_reg;
199 
200 	switch (ebpf_reg) {
201 	case BPF_REG_0:
202 		return MIPS_R_V0;
203 	case BPF_REG_1:
204 		return MIPS_R_A0;
205 	case BPF_REG_2:
206 		return MIPS_R_A1;
207 	case BPF_REG_3:
208 		return MIPS_R_A2;
209 	case BPF_REG_4:
210 		return MIPS_R_A3;
211 	case BPF_REG_5:
212 		return MIPS_R_A4;
213 	case BPF_REG_6:
214 		ctx->flags |= EBPF_SAVE_S0;
215 		return MIPS_R_S0;
216 	case BPF_REG_7:
217 		ctx->flags |= EBPF_SAVE_S1;
218 		return MIPS_R_S1;
219 	case BPF_REG_8:
220 		ctx->flags |= EBPF_SAVE_S2;
221 		return MIPS_R_S2;
222 	case BPF_REG_9:
223 		ctx->flags |= EBPF_SAVE_S3;
224 		return MIPS_R_S3;
225 	case BPF_REG_10:
226 		if (w == dst_reg || w == src_reg_no_fp)
227 			goto bad_reg;
228 		ctx->flags |= EBPF_SEEN_FP;
229 		/*
230 		 * Needs special handling, return something that
231 		 * cannot be clobbered just in case.
232 		 */
233 		return MIPS_R_ZERO;
234 	case BPF_REG_AX:
235 		return MIPS_R_T4;
236 	default:
237 bad_reg:
238 		WARN(1, "Illegal bpf reg: %d\n", ebpf_reg);
239 		return -EINVAL;
240 	}
241 }
242 /*
243  * eBPF stack frame will be something like:
244  *
245  *  Entry $sp ------>   +--------------------------------+
246  *                      |   $ra  (optional)              |
247  *                      +--------------------------------+
248  *                      |   $s0  (optional)              |
249  *                      +--------------------------------+
250  *                      |   $s1  (optional)              |
251  *                      +--------------------------------+
252  *                      |   $s2  (optional)              |
253  *                      +--------------------------------+
254  *                      |   $s3  (optional)              |
255  *                      +--------------------------------+
256  *                      |   $s4  (optional)              |
257  *                      +--------------------------------+
258  *                      |   tmp-storage  (if $ra saved)  |
259  * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
260  *                      |   BPF_REG_10 relative storage  |
261  *                      |    MAX_BPF_STACK (optional)    |
262  *                      |      .                         |
263  *                      |      .                         |
264  *                      |      .                         |
265  *     $sp -------->    +--------------------------------+
266  *
267  * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
268  * area is not allocated.
269  */
gen_int_prologue(struct jit_ctx * ctx)270 static int gen_int_prologue(struct jit_ctx *ctx)
271 {
272 	int stack_adjust = 0;
273 	int store_offset;
274 	int locals_size;
275 
276 	if (ctx->flags & EBPF_SAVE_RA)
277 		/*
278 		 * If RA we are doing a function call and may need
279 		 * extra 8-byte tmp area.
280 		 */
281 		stack_adjust += 2 * sizeof(long);
282 	if (ctx->flags & EBPF_SAVE_S0)
283 		stack_adjust += sizeof(long);
284 	if (ctx->flags & EBPF_SAVE_S1)
285 		stack_adjust += sizeof(long);
286 	if (ctx->flags & EBPF_SAVE_S2)
287 		stack_adjust += sizeof(long);
288 	if (ctx->flags & EBPF_SAVE_S3)
289 		stack_adjust += sizeof(long);
290 	if (ctx->flags & EBPF_SAVE_S4)
291 		stack_adjust += sizeof(long);
292 
293 	BUILD_BUG_ON(MAX_BPF_STACK & 7);
294 	locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0;
295 
296 	stack_adjust += locals_size;
297 
298 	ctx->stack_size = stack_adjust;
299 
300 	/*
301 	 * First instruction initializes the tail call count (TCC).
302 	 * On tail call we skip this instruction, and the TCC is
303 	 * passed in $v1 from the caller.
304 	 */
305 	emit_instr(ctx, addiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
306 	if (stack_adjust)
307 		emit_instr_long(ctx, daddiu, addiu,
308 					MIPS_R_SP, MIPS_R_SP, -stack_adjust);
309 	else
310 		return 0;
311 
312 	store_offset = stack_adjust - sizeof(long);
313 
314 	if (ctx->flags & EBPF_SAVE_RA) {
315 		emit_instr_long(ctx, sd, sw,
316 					MIPS_R_RA, store_offset, MIPS_R_SP);
317 		store_offset -= sizeof(long);
318 	}
319 	if (ctx->flags & EBPF_SAVE_S0) {
320 		emit_instr_long(ctx, sd, sw,
321 					MIPS_R_S0, store_offset, MIPS_R_SP);
322 		store_offset -= sizeof(long);
323 	}
324 	if (ctx->flags & EBPF_SAVE_S1) {
325 		emit_instr_long(ctx, sd, sw,
326 					MIPS_R_S1, store_offset, MIPS_R_SP);
327 		store_offset -= sizeof(long);
328 	}
329 	if (ctx->flags & EBPF_SAVE_S2) {
330 		emit_instr_long(ctx, sd, sw,
331 					MIPS_R_S2, store_offset, MIPS_R_SP);
332 		store_offset -= sizeof(long);
333 	}
334 	if (ctx->flags & EBPF_SAVE_S3) {
335 		emit_instr_long(ctx, sd, sw,
336 					MIPS_R_S3, store_offset, MIPS_R_SP);
337 		store_offset -= sizeof(long);
338 	}
339 	if (ctx->flags & EBPF_SAVE_S4) {
340 		emit_instr_long(ctx, sd, sw,
341 					MIPS_R_S4, store_offset, MIPS_R_SP);
342 		store_offset -= sizeof(long);
343 	}
344 
345 	if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1))
346 		emit_instr_long(ctx, daddu, addu,
347 					MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
348 
349 	return 0;
350 }
351 
build_int_epilogue(struct jit_ctx * ctx,int dest_reg)352 static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
353 {
354 	const struct bpf_prog *prog = ctx->skf;
355 	int stack_adjust = ctx->stack_size;
356 	int store_offset = stack_adjust - sizeof(long);
357 	enum reg_val_type td;
358 	int r0 = MIPS_R_V0;
359 
360 	if (dest_reg == MIPS_R_RA) {
361 		/* Don't let zero extended value escape. */
362 		td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
363 		if (td == REG_64BIT)
364 			emit_instr(ctx, sll, r0, r0, 0);
365 	}
366 
367 	if (ctx->flags & EBPF_SAVE_RA) {
368 		emit_instr_long(ctx, ld, lw,
369 					MIPS_R_RA, store_offset, MIPS_R_SP);
370 		store_offset -= sizeof(long);
371 	}
372 	if (ctx->flags & EBPF_SAVE_S0) {
373 		emit_instr_long(ctx, ld, lw,
374 					MIPS_R_S0, store_offset, MIPS_R_SP);
375 		store_offset -= sizeof(long);
376 	}
377 	if (ctx->flags & EBPF_SAVE_S1) {
378 		emit_instr_long(ctx, ld, lw,
379 					MIPS_R_S1, store_offset, MIPS_R_SP);
380 		store_offset -= sizeof(long);
381 	}
382 	if (ctx->flags & EBPF_SAVE_S2) {
383 		emit_instr_long(ctx, ld, lw,
384 				MIPS_R_S2, store_offset, MIPS_R_SP);
385 		store_offset -= sizeof(long);
386 	}
387 	if (ctx->flags & EBPF_SAVE_S3) {
388 		emit_instr_long(ctx, ld, lw,
389 					MIPS_R_S3, store_offset, MIPS_R_SP);
390 		store_offset -= sizeof(long);
391 	}
392 	if (ctx->flags & EBPF_SAVE_S4) {
393 		emit_instr_long(ctx, ld, lw,
394 					MIPS_R_S4, store_offset, MIPS_R_SP);
395 		store_offset -= sizeof(long);
396 	}
397 	emit_instr(ctx, jr, dest_reg);
398 
399 	if (stack_adjust)
400 		emit_instr_long(ctx, daddiu, addiu,
401 					MIPS_R_SP, MIPS_R_SP, stack_adjust);
402 	else
403 		emit_instr(ctx, nop);
404 
405 	return 0;
406 }
407 
gen_imm_to_reg(const struct bpf_insn * insn,int reg,struct jit_ctx * ctx)408 static void gen_imm_to_reg(const struct bpf_insn *insn, int reg,
409 			   struct jit_ctx *ctx)
410 {
411 	if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
412 		emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm);
413 	} else {
414 		int lower = (s16)(insn->imm & 0xffff);
415 		int upper = insn->imm - lower;
416 
417 		emit_instr(ctx, lui, reg, upper >> 16);
418 		emit_instr(ctx, addiu, reg, reg, lower);
419 	}
420 }
421 
gen_imm_insn(const struct bpf_insn * insn,struct jit_ctx * ctx,int idx)422 static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
423 			int idx)
424 {
425 	int upper_bound, lower_bound;
426 	int dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
427 
428 	if (dst < 0)
429 		return dst;
430 
431 	switch (BPF_OP(insn->code)) {
432 	case BPF_MOV:
433 	case BPF_ADD:
434 		upper_bound = S16_MAX;
435 		lower_bound = S16_MIN;
436 		break;
437 	case BPF_SUB:
438 		upper_bound = -(int)S16_MIN;
439 		lower_bound = -(int)S16_MAX;
440 		break;
441 	case BPF_AND:
442 	case BPF_OR:
443 	case BPF_XOR:
444 		upper_bound = 0xffff;
445 		lower_bound = 0;
446 		break;
447 	case BPF_RSH:
448 	case BPF_LSH:
449 	case BPF_ARSH:
450 		/* Shift amounts are truncated, no need for bounds */
451 		upper_bound = S32_MAX;
452 		lower_bound = S32_MIN;
453 		break;
454 	default:
455 		return -EINVAL;
456 	}
457 
458 	/*
459 	 * Immediate move clobbers the register, so no sign/zero
460 	 * extension needed.
461 	 */
462 	if (BPF_CLASS(insn->code) == BPF_ALU64 &&
463 	    BPF_OP(insn->code) != BPF_MOV &&
464 	    get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT)
465 		emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
466 	/* BPF_ALU | BPF_LSH doesn't need separate sign extension */
467 	if (BPF_CLASS(insn->code) == BPF_ALU &&
468 	    BPF_OP(insn->code) != BPF_LSH &&
469 	    BPF_OP(insn->code) != BPF_MOV &&
470 	    get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT)
471 		emit_instr(ctx, sll, dst, dst, 0);
472 
473 	if (insn->imm >= lower_bound && insn->imm <= upper_bound) {
474 		/* single insn immediate case */
475 		switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
476 		case BPF_ALU64 | BPF_MOV:
477 			emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm);
478 			break;
479 		case BPF_ALU64 | BPF_AND:
480 		case BPF_ALU | BPF_AND:
481 			emit_instr(ctx, andi, dst, dst, insn->imm);
482 			break;
483 		case BPF_ALU64 | BPF_OR:
484 		case BPF_ALU | BPF_OR:
485 			emit_instr(ctx, ori, dst, dst, insn->imm);
486 			break;
487 		case BPF_ALU64 | BPF_XOR:
488 		case BPF_ALU | BPF_XOR:
489 			emit_instr(ctx, xori, dst, dst, insn->imm);
490 			break;
491 		case BPF_ALU64 | BPF_ADD:
492 			emit_instr(ctx, daddiu, dst, dst, insn->imm);
493 			break;
494 		case BPF_ALU64 | BPF_SUB:
495 			emit_instr(ctx, daddiu, dst, dst, -insn->imm);
496 			break;
497 		case BPF_ALU64 | BPF_RSH:
498 			emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f);
499 			break;
500 		case BPF_ALU | BPF_RSH:
501 			emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f);
502 			break;
503 		case BPF_ALU64 | BPF_LSH:
504 			emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f);
505 			break;
506 		case BPF_ALU | BPF_LSH:
507 			emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f);
508 			break;
509 		case BPF_ALU64 | BPF_ARSH:
510 			emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f);
511 			break;
512 		case BPF_ALU | BPF_ARSH:
513 			emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f);
514 			break;
515 		case BPF_ALU | BPF_MOV:
516 			emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm);
517 			break;
518 		case BPF_ALU | BPF_ADD:
519 			emit_instr(ctx, addiu, dst, dst, insn->imm);
520 			break;
521 		case BPF_ALU | BPF_SUB:
522 			emit_instr(ctx, addiu, dst, dst, -insn->imm);
523 			break;
524 		default:
525 			return -EINVAL;
526 		}
527 	} else {
528 		/* multi insn immediate case */
529 		if (BPF_OP(insn->code) == BPF_MOV) {
530 			gen_imm_to_reg(insn, dst, ctx);
531 		} else {
532 			gen_imm_to_reg(insn, MIPS_R_AT, ctx);
533 			switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
534 			case BPF_ALU64 | BPF_AND:
535 			case BPF_ALU | BPF_AND:
536 				emit_instr(ctx, and, dst, dst, MIPS_R_AT);
537 				break;
538 			case BPF_ALU64 | BPF_OR:
539 			case BPF_ALU | BPF_OR:
540 				emit_instr(ctx, or, dst, dst, MIPS_R_AT);
541 				break;
542 			case BPF_ALU64 | BPF_XOR:
543 			case BPF_ALU | BPF_XOR:
544 				emit_instr(ctx, xor, dst, dst, MIPS_R_AT);
545 				break;
546 			case BPF_ALU64 | BPF_ADD:
547 				emit_instr(ctx, daddu, dst, dst, MIPS_R_AT);
548 				break;
549 			case BPF_ALU64 | BPF_SUB:
550 				emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT);
551 				break;
552 			case BPF_ALU | BPF_ADD:
553 				emit_instr(ctx, addu, dst, dst, MIPS_R_AT);
554 				break;
555 			case BPF_ALU | BPF_SUB:
556 				emit_instr(ctx, subu, dst, dst, MIPS_R_AT);
557 				break;
558 			default:
559 				return -EINVAL;
560 			}
561 		}
562 	}
563 
564 	return 0;
565 }
566 
emit_const_to_reg(struct jit_ctx * ctx,int dst,u64 value)567 static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
568 {
569 	if (value >= 0xffffffffffff8000ull || value < 0x8000ull) {
570 		emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value);
571 	} else if (value >= 0xffffffff80000000ull ||
572 		   (value < 0x80000000 && value > 0xffff)) {
573 		emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16));
574 		emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff));
575 	} else {
576 		int i;
577 		bool seen_part = false;
578 		int needed_shift = 0;
579 
580 		for (i = 0; i < 4; i++) {
581 			u64 part = (value >> (16 * (3 - i))) & 0xffff;
582 
583 			if (seen_part && needed_shift > 0 && (part || i == 3)) {
584 				emit_instr(ctx, dsll_safe, dst, dst, needed_shift);
585 				needed_shift = 0;
586 			}
587 			if (part) {
588 				if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) {
589 					emit_instr(ctx, lui, dst, (s32)(s16)part);
590 					needed_shift = -16;
591 				} else {
592 					emit_instr(ctx, ori, dst,
593 						   seen_part ? dst : MIPS_R_ZERO,
594 						   (unsigned int)part);
595 				}
596 				seen_part = true;
597 			}
598 			if (seen_part)
599 				needed_shift += 16;
600 		}
601 	}
602 }
603 
emit_bpf_tail_call(struct jit_ctx * ctx,int this_idx)604 static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
605 {
606 	int off, b_off;
607 	int tcc_reg;
608 
609 	ctx->flags |= EBPF_SEEN_TC;
610 	/*
611 	 * if (index >= array->map.max_entries)
612 	 *     goto out;
613 	 */
614 	off = offsetof(struct bpf_array, map.max_entries);
615 	emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1);
616 	emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2);
617 	b_off = b_imm(this_idx + 1, ctx);
618 	emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
619 	/*
620 	 * if (TCC-- < 0)
621 	 *     goto out;
622 	 */
623 	/* Delay slot */
624 	tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4;
625 	emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1);
626 	b_off = b_imm(this_idx + 1, ctx);
627 	emit_instr(ctx, bltz, tcc_reg, b_off);
628 	/*
629 	 * prog = array->ptrs[index];
630 	 * if (prog == NULL)
631 	 *     goto out;
632 	 */
633 	/* Delay slot */
634 	emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3);
635 	emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1);
636 	off = offsetof(struct bpf_array, ptrs);
637 	emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8);
638 	b_off = b_imm(this_idx + 1, ctx);
639 	emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off);
640 	/* Delay slot */
641 	emit_instr(ctx, nop);
642 
643 	/* goto *(prog->bpf_func + 4); */
644 	off = offsetof(struct bpf_prog, bpf_func);
645 	emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT);
646 	/* All systems are go... propagate TCC */
647 	emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO);
648 	/* Skip first instruction (TCC initialization) */
649 	emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4);
650 	return build_int_epilogue(ctx, MIPS_R_T9);
651 }
652 
is_bad_offset(int b_off)653 static bool is_bad_offset(int b_off)
654 {
655 	return b_off > 0x1ffff || b_off < -0x20000;
656 }
657 
658 /* Returns the number of insn slots consumed. */
build_one_insn(const struct bpf_insn * insn,struct jit_ctx * ctx,int this_idx,int exit_idx)659 static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
660 			  int this_idx, int exit_idx)
661 {
662 	int src, dst, r, td, ts, mem_off, b_off;
663 	bool need_swap, did_move, cmp_eq;
664 	unsigned int target = 0;
665 	u64 t64;
666 	s64 t64s;
667 	int bpf_op = BPF_OP(insn->code);
668 
669 	if (IS_ENABLED(CONFIG_32BIT) && ((BPF_CLASS(insn->code) == BPF_ALU64)
670 						|| (bpf_op == BPF_DW)))
671 		return -EINVAL;
672 
673 	switch (insn->code) {
674 	case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
675 	case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */
676 	case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */
677 	case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */
678 	case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */
679 	case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */
680 	case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */
681 	case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */
682 	case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */
683 	case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */
684 	case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */
685 	case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */
686 	case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */
687 	case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */
688 	case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */
689 	case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */
690 	case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */
691 	case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */
692 		r = gen_imm_insn(insn, ctx, this_idx);
693 		if (r < 0)
694 			return r;
695 		break;
696 	case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */
697 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
698 		if (dst < 0)
699 			return dst;
700 		if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
701 			emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
702 		if (insn->imm == 1) /* Mult by 1 is a nop */
703 			break;
704 		gen_imm_to_reg(insn, MIPS_R_AT, ctx);
705 		if (MIPS_ISA_REV >= 6) {
706 			emit_instr(ctx, dmulu, dst, dst, MIPS_R_AT);
707 		} else {
708 			emit_instr(ctx, dmultu, MIPS_R_AT, dst);
709 			emit_instr(ctx, mflo, dst);
710 		}
711 		break;
712 	case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */
713 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
714 		if (dst < 0)
715 			return dst;
716 		if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
717 			emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
718 		emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst);
719 		break;
720 	case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */
721 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
722 		if (dst < 0)
723 			return dst;
724 		td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
725 		if (td == REG_64BIT) {
726 			/* sign extend */
727 			emit_instr(ctx, sll, dst, dst, 0);
728 		}
729 		if (insn->imm == 1) /* Mult by 1 is a nop */
730 			break;
731 		gen_imm_to_reg(insn, MIPS_R_AT, ctx);
732 		if (MIPS_ISA_REV >= 6) {
733 			emit_instr(ctx, mulu, dst, dst, MIPS_R_AT);
734 		} else {
735 			emit_instr(ctx, multu, dst, MIPS_R_AT);
736 			emit_instr(ctx, mflo, dst);
737 		}
738 		break;
739 	case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */
740 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
741 		if (dst < 0)
742 			return dst;
743 		td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
744 		if (td == REG_64BIT) {
745 			/* sign extend */
746 			emit_instr(ctx, sll, dst, dst, 0);
747 		}
748 		emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst);
749 		break;
750 	case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */
751 	case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */
752 		if (insn->imm == 0)
753 			return -EINVAL;
754 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
755 		if (dst < 0)
756 			return dst;
757 		td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
758 		if (td == REG_64BIT)
759 			/* sign extend */
760 			emit_instr(ctx, sll, dst, dst, 0);
761 		if (insn->imm == 1) {
762 			/* div by 1 is a nop, mod by 1 is zero */
763 			if (bpf_op == BPF_MOD)
764 				emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
765 			break;
766 		}
767 		gen_imm_to_reg(insn, MIPS_R_AT, ctx);
768 		if (MIPS_ISA_REV >= 6) {
769 			if (bpf_op == BPF_DIV)
770 				emit_instr(ctx, divu_r6, dst, dst, MIPS_R_AT);
771 			else
772 				emit_instr(ctx, modu, dst, dst, MIPS_R_AT);
773 			break;
774 		}
775 		emit_instr(ctx, divu, dst, MIPS_R_AT);
776 		if (bpf_op == BPF_DIV)
777 			emit_instr(ctx, mflo, dst);
778 		else
779 			emit_instr(ctx, mfhi, dst);
780 		break;
781 	case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */
782 	case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */
783 		if (insn->imm == 0)
784 			return -EINVAL;
785 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
786 		if (dst < 0)
787 			return dst;
788 		if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
789 			emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
790 		if (insn->imm == 1) {
791 			/* div by 1 is a nop, mod by 1 is zero */
792 			if (bpf_op == BPF_MOD)
793 				emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
794 			break;
795 		}
796 		gen_imm_to_reg(insn, MIPS_R_AT, ctx);
797 		if (MIPS_ISA_REV >= 6) {
798 			if (bpf_op == BPF_DIV)
799 				emit_instr(ctx, ddivu_r6, dst, dst, MIPS_R_AT);
800 			else
801 				emit_instr(ctx, modu, dst, dst, MIPS_R_AT);
802 			break;
803 		}
804 		emit_instr(ctx, ddivu, dst, MIPS_R_AT);
805 		if (bpf_op == BPF_DIV)
806 			emit_instr(ctx, mflo, dst);
807 		else
808 			emit_instr(ctx, mfhi, dst);
809 		break;
810 	case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */
811 	case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */
812 	case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */
813 	case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */
814 	case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */
815 	case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */
816 	case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */
817 	case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */
818 	case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */
819 	case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */
820 	case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */
821 	case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */
822 		src = ebpf_to_mips_reg(ctx, insn, src_reg);
823 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
824 		if (src < 0 || dst < 0)
825 			return -EINVAL;
826 		if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
827 			emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
828 		did_move = false;
829 		if (insn->src_reg == BPF_REG_10) {
830 			if (bpf_op == BPF_MOV) {
831 				emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK);
832 				did_move = true;
833 			} else {
834 				emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK);
835 				src = MIPS_R_AT;
836 			}
837 		} else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
838 			int tmp_reg = MIPS_R_AT;
839 
840 			if (bpf_op == BPF_MOV) {
841 				tmp_reg = dst;
842 				did_move = true;
843 			}
844 			emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO);
845 			emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32);
846 			src = MIPS_R_AT;
847 		}
848 		switch (bpf_op) {
849 		case BPF_MOV:
850 			if (!did_move)
851 				emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO);
852 			break;
853 		case BPF_ADD:
854 			emit_instr(ctx, daddu, dst, dst, src);
855 			break;
856 		case BPF_SUB:
857 			emit_instr(ctx, dsubu, dst, dst, src);
858 			break;
859 		case BPF_XOR:
860 			emit_instr(ctx, xor, dst, dst, src);
861 			break;
862 		case BPF_OR:
863 			emit_instr(ctx, or, dst, dst, src);
864 			break;
865 		case BPF_AND:
866 			emit_instr(ctx, and, dst, dst, src);
867 			break;
868 		case BPF_MUL:
869 			if (MIPS_ISA_REV >= 6) {
870 				emit_instr(ctx, dmulu, dst, dst, src);
871 			} else {
872 				emit_instr(ctx, dmultu, dst, src);
873 				emit_instr(ctx, mflo, dst);
874 			}
875 			break;
876 		case BPF_DIV:
877 		case BPF_MOD:
878 			if (MIPS_ISA_REV >= 6) {
879 				if (bpf_op == BPF_DIV)
880 					emit_instr(ctx, ddivu_r6,
881 							dst, dst, src);
882 				else
883 					emit_instr(ctx, modu, dst, dst, src);
884 				break;
885 			}
886 			emit_instr(ctx, ddivu, dst, src);
887 			if (bpf_op == BPF_DIV)
888 				emit_instr(ctx, mflo, dst);
889 			else
890 				emit_instr(ctx, mfhi, dst);
891 			break;
892 		case BPF_LSH:
893 			emit_instr(ctx, dsllv, dst, dst, src);
894 			break;
895 		case BPF_RSH:
896 			emit_instr(ctx, dsrlv, dst, dst, src);
897 			break;
898 		case BPF_ARSH:
899 			emit_instr(ctx, dsrav, dst, dst, src);
900 			break;
901 		default:
902 			pr_err("ALU64_REG NOT HANDLED\n");
903 			return -EINVAL;
904 		}
905 		break;
906 	case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */
907 	case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */
908 	case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */
909 	case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */
910 	case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */
911 	case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */
912 	case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */
913 	case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */
914 	case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */
915 	case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */
916 	case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */
917 	case BPF_ALU | BPF_ARSH | BPF_X: /* ALU_REG */
918 		src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
919 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
920 		if (src < 0 || dst < 0)
921 			return -EINVAL;
922 		td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
923 		if (td == REG_64BIT) {
924 			/* sign extend */
925 			emit_instr(ctx, sll, dst, dst, 0);
926 		}
927 		did_move = false;
928 		ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
929 		if (ts == REG_64BIT) {
930 			int tmp_reg = MIPS_R_AT;
931 
932 			if (bpf_op == BPF_MOV) {
933 				tmp_reg = dst;
934 				did_move = true;
935 			}
936 			/* sign extend */
937 			emit_instr(ctx, sll, tmp_reg, src, 0);
938 			src = MIPS_R_AT;
939 		}
940 		switch (bpf_op) {
941 		case BPF_MOV:
942 			if (!did_move)
943 				emit_instr(ctx, addu, dst, src, MIPS_R_ZERO);
944 			break;
945 		case BPF_ADD:
946 			emit_instr(ctx, addu, dst, dst, src);
947 			break;
948 		case BPF_SUB:
949 			emit_instr(ctx, subu, dst, dst, src);
950 			break;
951 		case BPF_XOR:
952 			emit_instr(ctx, xor, dst, dst, src);
953 			break;
954 		case BPF_OR:
955 			emit_instr(ctx, or, dst, dst, src);
956 			break;
957 		case BPF_AND:
958 			emit_instr(ctx, and, dst, dst, src);
959 			break;
960 		case BPF_MUL:
961 			emit_instr(ctx, mul, dst, dst, src);
962 			break;
963 		case BPF_DIV:
964 		case BPF_MOD:
965 			if (MIPS_ISA_REV >= 6) {
966 				if (bpf_op == BPF_DIV)
967 					emit_instr(ctx, divu_r6, dst, dst, src);
968 				else
969 					emit_instr(ctx, modu, dst, dst, src);
970 				break;
971 			}
972 			emit_instr(ctx, divu, dst, src);
973 			if (bpf_op == BPF_DIV)
974 				emit_instr(ctx, mflo, dst);
975 			else
976 				emit_instr(ctx, mfhi, dst);
977 			break;
978 		case BPF_LSH:
979 			emit_instr(ctx, sllv, dst, dst, src);
980 			break;
981 		case BPF_RSH:
982 			emit_instr(ctx, srlv, dst, dst, src);
983 			break;
984 		case BPF_ARSH:
985 			emit_instr(ctx, srav, dst, dst, src);
986 			break;
987 		default:
988 			pr_err("ALU_REG NOT HANDLED\n");
989 			return -EINVAL;
990 		}
991 		break;
992 	case BPF_JMP | BPF_EXIT:
993 		if (this_idx + 1 < exit_idx) {
994 			b_off = b_imm(exit_idx, ctx);
995 			if (is_bad_offset(b_off))
996 				return -E2BIG;
997 			emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
998 			emit_instr(ctx, nop);
999 		}
1000 		break;
1001 	case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */
1002 	case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */
1003 		cmp_eq = (bpf_op == BPF_JEQ);
1004 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1005 		if (dst < 0)
1006 			return dst;
1007 		if (insn->imm == 0) {
1008 			src = MIPS_R_ZERO;
1009 		} else {
1010 			gen_imm_to_reg(insn, MIPS_R_AT, ctx);
1011 			src = MIPS_R_AT;
1012 		}
1013 		goto jeq_common;
1014 	case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */
1015 	case BPF_JMP | BPF_JNE | BPF_X:
1016 	case BPF_JMP | BPF_JSLT | BPF_X:
1017 	case BPF_JMP | BPF_JSLE | BPF_X:
1018 	case BPF_JMP | BPF_JSGT | BPF_X:
1019 	case BPF_JMP | BPF_JSGE | BPF_X:
1020 	case BPF_JMP | BPF_JLT | BPF_X:
1021 	case BPF_JMP | BPF_JLE | BPF_X:
1022 	case BPF_JMP | BPF_JGT | BPF_X:
1023 	case BPF_JMP | BPF_JGE | BPF_X:
1024 	case BPF_JMP | BPF_JSET | BPF_X:
1025 		src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1026 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1027 		if (src < 0 || dst < 0)
1028 			return -EINVAL;
1029 		td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
1030 		ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
1031 		if (td == REG_32BIT && ts != REG_32BIT) {
1032 			emit_instr(ctx, sll, MIPS_R_AT, src, 0);
1033 			src = MIPS_R_AT;
1034 		} else if (ts == REG_32BIT && td != REG_32BIT) {
1035 			emit_instr(ctx, sll, MIPS_R_AT, dst, 0);
1036 			dst = MIPS_R_AT;
1037 		}
1038 		if (bpf_op == BPF_JSET) {
1039 			emit_instr(ctx, and, MIPS_R_AT, dst, src);
1040 			cmp_eq = false;
1041 			dst = MIPS_R_AT;
1042 			src = MIPS_R_ZERO;
1043 		} else if (bpf_op == BPF_JSGT || bpf_op == BPF_JSLE) {
1044 			emit_instr(ctx, dsubu, MIPS_R_AT, dst, src);
1045 			if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1046 				b_off = b_imm(exit_idx, ctx);
1047 				if (is_bad_offset(b_off))
1048 					return -E2BIG;
1049 				if (bpf_op == BPF_JSGT)
1050 					emit_instr(ctx, blez, MIPS_R_AT, b_off);
1051 				else
1052 					emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
1053 				emit_instr(ctx, nop);
1054 				return 2; /* We consumed the exit. */
1055 			}
1056 			b_off = b_imm(this_idx + insn->off + 1, ctx);
1057 			if (is_bad_offset(b_off))
1058 				return -E2BIG;
1059 			if (bpf_op == BPF_JSGT)
1060 				emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
1061 			else
1062 				emit_instr(ctx, blez, MIPS_R_AT, b_off);
1063 			emit_instr(ctx, nop);
1064 			break;
1065 		} else if (bpf_op == BPF_JSGE || bpf_op == BPF_JSLT) {
1066 			emit_instr(ctx, slt, MIPS_R_AT, dst, src);
1067 			cmp_eq = bpf_op == BPF_JSGE;
1068 			dst = MIPS_R_AT;
1069 			src = MIPS_R_ZERO;
1070 		} else if (bpf_op == BPF_JGT || bpf_op == BPF_JLE) {
1071 			/* dst or src could be AT */
1072 			emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
1073 			emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
1074 			/* SP known to be non-zero, movz becomes boolean not */
1075 			if (MIPS_ISA_REV >= 6) {
1076 				emit_instr(ctx, seleqz, MIPS_R_T9,
1077 						MIPS_R_SP, MIPS_R_T8);
1078 			} else {
1079 				emit_instr(ctx, movz, MIPS_R_T9,
1080 						MIPS_R_SP, MIPS_R_T8);
1081 				emit_instr(ctx, movn, MIPS_R_T9,
1082 						MIPS_R_ZERO, MIPS_R_T8);
1083 			}
1084 			emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
1085 			cmp_eq = bpf_op == BPF_JGT;
1086 			dst = MIPS_R_AT;
1087 			src = MIPS_R_ZERO;
1088 		} else if (bpf_op == BPF_JGE || bpf_op == BPF_JLT) {
1089 			emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
1090 			cmp_eq = bpf_op == BPF_JGE;
1091 			dst = MIPS_R_AT;
1092 			src = MIPS_R_ZERO;
1093 		} else { /* JNE/JEQ case */
1094 			cmp_eq = (bpf_op == BPF_JEQ);
1095 		}
1096 jeq_common:
1097 		/*
1098 		 * If the next insn is EXIT and we are jumping arround
1099 		 * only it, invert the sense of the compare and
1100 		 * conditionally jump to the exit.  Poor man's branch
1101 		 * chaining.
1102 		 */
1103 		if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1104 			b_off = b_imm(exit_idx, ctx);
1105 			if (is_bad_offset(b_off)) {
1106 				target = j_target(ctx, exit_idx);
1107 				if (target == (unsigned int)-1)
1108 					return -E2BIG;
1109 				cmp_eq = !cmp_eq;
1110 				b_off = 4 * 3;
1111 				if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
1112 					ctx->offsets[this_idx] |= OFFSETS_B_CONV;
1113 					ctx->long_b_conversion = 1;
1114 				}
1115 			}
1116 
1117 			if (cmp_eq)
1118 				emit_instr(ctx, bne, dst, src, b_off);
1119 			else
1120 				emit_instr(ctx, beq, dst, src, b_off);
1121 			emit_instr(ctx, nop);
1122 			if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
1123 				emit_instr(ctx, j, target);
1124 				emit_instr(ctx, nop);
1125 			}
1126 			return 2; /* We consumed the exit. */
1127 		}
1128 		b_off = b_imm(this_idx + insn->off + 1, ctx);
1129 		if (is_bad_offset(b_off)) {
1130 			target = j_target(ctx, this_idx + insn->off + 1);
1131 			if (target == (unsigned int)-1)
1132 				return -E2BIG;
1133 			cmp_eq = !cmp_eq;
1134 			b_off = 4 * 3;
1135 			if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
1136 				ctx->offsets[this_idx] |= OFFSETS_B_CONV;
1137 				ctx->long_b_conversion = 1;
1138 			}
1139 		}
1140 
1141 		if (cmp_eq)
1142 			emit_instr(ctx, beq, dst, src, b_off);
1143 		else
1144 			emit_instr(ctx, bne, dst, src, b_off);
1145 		emit_instr(ctx, nop);
1146 		if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
1147 			emit_instr(ctx, j, target);
1148 			emit_instr(ctx, nop);
1149 		}
1150 		break;
1151 	case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */
1152 	case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */
1153 	case BPF_JMP | BPF_JSLT | BPF_K: /* JMP_IMM */
1154 	case BPF_JMP | BPF_JSLE | BPF_K: /* JMP_IMM */
1155 		cmp_eq = (bpf_op == BPF_JSGE);
1156 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1157 		if (dst < 0)
1158 			return dst;
1159 
1160 		if (insn->imm == 0) {
1161 			if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1162 				b_off = b_imm(exit_idx, ctx);
1163 				if (is_bad_offset(b_off))
1164 					return -E2BIG;
1165 				switch (bpf_op) {
1166 				case BPF_JSGT:
1167 					emit_instr(ctx, blez, dst, b_off);
1168 					break;
1169 				case BPF_JSGE:
1170 					emit_instr(ctx, bltz, dst, b_off);
1171 					break;
1172 				case BPF_JSLT:
1173 					emit_instr(ctx, bgez, dst, b_off);
1174 					break;
1175 				case BPF_JSLE:
1176 					emit_instr(ctx, bgtz, dst, b_off);
1177 					break;
1178 				}
1179 				emit_instr(ctx, nop);
1180 				return 2; /* We consumed the exit. */
1181 			}
1182 			b_off = b_imm(this_idx + insn->off + 1, ctx);
1183 			if (is_bad_offset(b_off))
1184 				return -E2BIG;
1185 			switch (bpf_op) {
1186 			case BPF_JSGT:
1187 				emit_instr(ctx, bgtz, dst, b_off);
1188 				break;
1189 			case BPF_JSGE:
1190 				emit_instr(ctx, bgez, dst, b_off);
1191 				break;
1192 			case BPF_JSLT:
1193 				emit_instr(ctx, bltz, dst, b_off);
1194 				break;
1195 			case BPF_JSLE:
1196 				emit_instr(ctx, blez, dst, b_off);
1197 				break;
1198 			}
1199 			emit_instr(ctx, nop);
1200 			break;
1201 		}
1202 		/*
1203 		 * only "LT" compare available, so we must use imm + 1
1204 		 * to generate "GT" and imm -1 to generate LE
1205 		 */
1206 		if (bpf_op == BPF_JSGT)
1207 			t64s = insn->imm + 1;
1208 		else if (bpf_op == BPF_JSLE)
1209 			t64s = insn->imm + 1;
1210 		else
1211 			t64s = insn->imm;
1212 
1213 		cmp_eq = bpf_op == BPF_JSGT || bpf_op == BPF_JSGE;
1214 		if (t64s >= S16_MIN && t64s <= S16_MAX) {
1215 			emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s);
1216 			src = MIPS_R_AT;
1217 			dst = MIPS_R_ZERO;
1218 			goto jeq_common;
1219 		}
1220 		emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
1221 		emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT);
1222 		src = MIPS_R_AT;
1223 		dst = MIPS_R_ZERO;
1224 		goto jeq_common;
1225 
1226 	case BPF_JMP | BPF_JGT | BPF_K:
1227 	case BPF_JMP | BPF_JGE | BPF_K:
1228 	case BPF_JMP | BPF_JLT | BPF_K:
1229 	case BPF_JMP | BPF_JLE | BPF_K:
1230 		cmp_eq = (bpf_op == BPF_JGE);
1231 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1232 		if (dst < 0)
1233 			return dst;
1234 		/*
1235 		 * only "LT" compare available, so we must use imm + 1
1236 		 * to generate "GT" and imm -1 to generate LE
1237 		 */
1238 		if (bpf_op == BPF_JGT)
1239 			t64s = (u64)(u32)(insn->imm) + 1;
1240 		else if (bpf_op == BPF_JLE)
1241 			t64s = (u64)(u32)(insn->imm) + 1;
1242 		else
1243 			t64s = (u64)(u32)(insn->imm);
1244 
1245 		cmp_eq = bpf_op == BPF_JGT || bpf_op == BPF_JGE;
1246 
1247 		emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
1248 		emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT);
1249 		src = MIPS_R_AT;
1250 		dst = MIPS_R_ZERO;
1251 		goto jeq_common;
1252 
1253 	case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */
1254 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1255 		if (dst < 0)
1256 			return dst;
1257 
1258 		if (ctx->use_bbit_insns && hweight32((u32)insn->imm) == 1) {
1259 			if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1260 				b_off = b_imm(exit_idx, ctx);
1261 				if (is_bad_offset(b_off))
1262 					return -E2BIG;
1263 				emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off);
1264 				emit_instr(ctx, nop);
1265 				return 2; /* We consumed the exit. */
1266 			}
1267 			b_off = b_imm(this_idx + insn->off + 1, ctx);
1268 			if (is_bad_offset(b_off))
1269 				return -E2BIG;
1270 			emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off);
1271 			emit_instr(ctx, nop);
1272 			break;
1273 		}
1274 		t64 = (u32)insn->imm;
1275 		emit_const_to_reg(ctx, MIPS_R_AT, t64);
1276 		emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT);
1277 		src = MIPS_R_AT;
1278 		dst = MIPS_R_ZERO;
1279 		cmp_eq = false;
1280 		goto jeq_common;
1281 
1282 	case BPF_JMP | BPF_JA:
1283 		/*
1284 		 * Prefer relative branch for easier debugging, but
1285 		 * fall back if needed.
1286 		 */
1287 		b_off = b_imm(this_idx + insn->off + 1, ctx);
1288 		if (is_bad_offset(b_off)) {
1289 			target = j_target(ctx, this_idx + insn->off + 1);
1290 			if (target == (unsigned int)-1)
1291 				return -E2BIG;
1292 			emit_instr(ctx, j, target);
1293 		} else {
1294 			emit_instr(ctx, b, b_off);
1295 		}
1296 		emit_instr(ctx, nop);
1297 		break;
1298 	case BPF_LD | BPF_DW | BPF_IMM:
1299 		if (insn->src_reg != 0)
1300 			return -EINVAL;
1301 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1302 		if (dst < 0)
1303 			return dst;
1304 		t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32);
1305 		emit_const_to_reg(ctx, dst, t64);
1306 		return 2; /* Double slot insn */
1307 
1308 	case BPF_JMP | BPF_CALL:
1309 		ctx->flags |= EBPF_SAVE_RA;
1310 		t64s = (s64)insn->imm + (long)__bpf_call_base;
1311 		emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s);
1312 		emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
1313 		/* delay slot */
1314 		emit_instr(ctx, nop);
1315 		break;
1316 
1317 	case BPF_JMP | BPF_TAIL_CALL:
1318 		if (emit_bpf_tail_call(ctx, this_idx))
1319 			return -EINVAL;
1320 		break;
1321 
1322 	case BPF_ALU | BPF_END | BPF_FROM_BE:
1323 	case BPF_ALU | BPF_END | BPF_FROM_LE:
1324 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1325 		if (dst < 0)
1326 			return dst;
1327 		td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
1328 		if (insn->imm == 64 && td == REG_32BIT)
1329 			emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
1330 
1331 		if (insn->imm != 64 && td == REG_64BIT) {
1332 			/* sign extend */
1333 			emit_instr(ctx, sll, dst, dst, 0);
1334 		}
1335 
1336 #ifdef __BIG_ENDIAN
1337 		need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE);
1338 #else
1339 		need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE);
1340 #endif
1341 		if (insn->imm == 16) {
1342 			if (need_swap)
1343 				emit_instr(ctx, wsbh, dst, dst);
1344 			emit_instr(ctx, andi, dst, dst, 0xffff);
1345 		} else if (insn->imm == 32) {
1346 			if (need_swap) {
1347 				emit_instr(ctx, wsbh, dst, dst);
1348 				emit_instr(ctx, rotr, dst, dst, 16);
1349 			}
1350 		} else { /* 64-bit*/
1351 			if (need_swap) {
1352 				emit_instr(ctx, dsbh, dst, dst);
1353 				emit_instr(ctx, dshd, dst, dst);
1354 			}
1355 		}
1356 		break;
1357 
1358 	case BPF_ST | BPF_NOSPEC: /* speculation barrier */
1359 		break;
1360 
1361 	case BPF_ST | BPF_B | BPF_MEM:
1362 	case BPF_ST | BPF_H | BPF_MEM:
1363 	case BPF_ST | BPF_W | BPF_MEM:
1364 	case BPF_ST | BPF_DW | BPF_MEM:
1365 		if (insn->dst_reg == BPF_REG_10) {
1366 			ctx->flags |= EBPF_SEEN_FP;
1367 			dst = MIPS_R_SP;
1368 			mem_off = insn->off + MAX_BPF_STACK;
1369 		} else {
1370 			dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1371 			if (dst < 0)
1372 				return dst;
1373 			mem_off = insn->off;
1374 		}
1375 		gen_imm_to_reg(insn, MIPS_R_AT, ctx);
1376 		switch (BPF_SIZE(insn->code)) {
1377 		case BPF_B:
1378 			emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst);
1379 			break;
1380 		case BPF_H:
1381 			emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst);
1382 			break;
1383 		case BPF_W:
1384 			emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst);
1385 			break;
1386 		case BPF_DW:
1387 			emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst);
1388 			break;
1389 		}
1390 		break;
1391 
1392 	case BPF_LDX | BPF_B | BPF_MEM:
1393 	case BPF_LDX | BPF_H | BPF_MEM:
1394 	case BPF_LDX | BPF_W | BPF_MEM:
1395 	case BPF_LDX | BPF_DW | BPF_MEM:
1396 		if (insn->src_reg == BPF_REG_10) {
1397 			ctx->flags |= EBPF_SEEN_FP;
1398 			src = MIPS_R_SP;
1399 			mem_off = insn->off + MAX_BPF_STACK;
1400 		} else {
1401 			src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1402 			if (src < 0)
1403 				return src;
1404 			mem_off = insn->off;
1405 		}
1406 		dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1407 		if (dst < 0)
1408 			return dst;
1409 		switch (BPF_SIZE(insn->code)) {
1410 		case BPF_B:
1411 			emit_instr(ctx, lbu, dst, mem_off, src);
1412 			break;
1413 		case BPF_H:
1414 			emit_instr(ctx, lhu, dst, mem_off, src);
1415 			break;
1416 		case BPF_W:
1417 			emit_instr(ctx, lw, dst, mem_off, src);
1418 			break;
1419 		case BPF_DW:
1420 			emit_instr(ctx, ld, dst, mem_off, src);
1421 			break;
1422 		}
1423 		break;
1424 
1425 	case BPF_STX | BPF_B | BPF_MEM:
1426 	case BPF_STX | BPF_H | BPF_MEM:
1427 	case BPF_STX | BPF_W | BPF_MEM:
1428 	case BPF_STX | BPF_DW | BPF_MEM:
1429 	case BPF_STX | BPF_W | BPF_XADD:
1430 	case BPF_STX | BPF_DW | BPF_XADD:
1431 		if (insn->dst_reg == BPF_REG_10) {
1432 			ctx->flags |= EBPF_SEEN_FP;
1433 			dst = MIPS_R_SP;
1434 			mem_off = insn->off + MAX_BPF_STACK;
1435 		} else {
1436 			dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1437 			if (dst < 0)
1438 				return dst;
1439 			mem_off = insn->off;
1440 		}
1441 		src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1442 		if (src < 0)
1443 			return src;
1444 		if (BPF_MODE(insn->code) == BPF_XADD) {
1445 			/*
1446 			 * If mem_off does not fit within the 9 bit ll/sc
1447 			 * instruction immediate field, use a temp reg.
1448 			 */
1449 			if (MIPS_ISA_REV >= 6 &&
1450 			    (mem_off >= BIT(8) || mem_off < -BIT(8))) {
1451 				emit_instr(ctx, daddiu, MIPS_R_T6,
1452 						dst, mem_off);
1453 				mem_off = 0;
1454 				dst = MIPS_R_T6;
1455 			}
1456 			switch (BPF_SIZE(insn->code)) {
1457 			case BPF_W:
1458 				if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1459 					emit_instr(ctx, sll, MIPS_R_AT, src, 0);
1460 					src = MIPS_R_AT;
1461 				}
1462 				emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst);
1463 				emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src);
1464 				emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst);
1465 				/*
1466 				 * On failure back up to LL (-4
1467 				 * instructions of 4 bytes each
1468 				 */
1469 				emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
1470 				emit_instr(ctx, nop);
1471 				break;
1472 			case BPF_DW:
1473 				if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1474 					emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
1475 					emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
1476 					src = MIPS_R_AT;
1477 				}
1478 				emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst);
1479 				emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src);
1480 				emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst);
1481 				emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
1482 				emit_instr(ctx, nop);
1483 				break;
1484 			}
1485 		} else { /* BPF_MEM */
1486 			switch (BPF_SIZE(insn->code)) {
1487 			case BPF_B:
1488 				emit_instr(ctx, sb, src, mem_off, dst);
1489 				break;
1490 			case BPF_H:
1491 				emit_instr(ctx, sh, src, mem_off, dst);
1492 				break;
1493 			case BPF_W:
1494 				emit_instr(ctx, sw, src, mem_off, dst);
1495 				break;
1496 			case BPF_DW:
1497 				if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1498 					emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
1499 					emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
1500 					src = MIPS_R_AT;
1501 				}
1502 				emit_instr(ctx, sd, src, mem_off, dst);
1503 				break;
1504 			}
1505 		}
1506 		break;
1507 
1508 	default:
1509 		pr_err("NOT HANDLED %d - (%02x)\n",
1510 		       this_idx, (unsigned int)insn->code);
1511 		return -EINVAL;
1512 	}
1513 	return 1;
1514 }
1515 
1516 #define RVT_VISITED_MASK 0xc000000000000000ull
1517 #define RVT_FALL_THROUGH 0x4000000000000000ull
1518 #define RVT_BRANCH_TAKEN 0x8000000000000000ull
1519 #define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
1520 
build_int_body(struct jit_ctx * ctx)1521 static int build_int_body(struct jit_ctx *ctx)
1522 {
1523 	const struct bpf_prog *prog = ctx->skf;
1524 	const struct bpf_insn *insn;
1525 	int i, r;
1526 
1527 	for (i = 0; i < prog->len; ) {
1528 		insn = prog->insnsi + i;
1529 		if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) {
1530 			/* dead instruction, don't emit it. */
1531 			i++;
1532 			continue;
1533 		}
1534 
1535 		if (ctx->target == NULL)
1536 			ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4);
1537 
1538 		r = build_one_insn(insn, ctx, i, prog->len);
1539 		if (r < 0)
1540 			return r;
1541 		i += r;
1542 	}
1543 	/* epilogue offset */
1544 	if (ctx->target == NULL)
1545 		ctx->offsets[i] = ctx->idx * 4;
1546 
1547 	/*
1548 	 * All exits have an offset of the epilogue, some offsets may
1549 	 * not have been set due to banch-around threading, so set
1550 	 * them now.
1551 	 */
1552 	if (ctx->target == NULL)
1553 		for (i = 0; i < prog->len; i++) {
1554 			insn = prog->insnsi + i;
1555 			if (insn->code == (BPF_JMP | BPF_EXIT))
1556 				ctx->offsets[i] = ctx->idx * 4;
1557 		}
1558 	return 0;
1559 }
1560 
1561 /* return the last idx processed, or negative for error */
reg_val_propagate_range(struct jit_ctx * ctx,u64 initial_rvt,int start_idx,bool follow_taken)1562 static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt,
1563 				   int start_idx, bool follow_taken)
1564 {
1565 	const struct bpf_prog *prog = ctx->skf;
1566 	const struct bpf_insn *insn;
1567 	u64 exit_rvt = initial_rvt;
1568 	u64 *rvt = ctx->reg_val_types;
1569 	int idx;
1570 	int reg;
1571 
1572 	for (idx = start_idx; idx < prog->len; idx++) {
1573 		rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt;
1574 		insn = prog->insnsi + idx;
1575 		switch (BPF_CLASS(insn->code)) {
1576 		case BPF_ALU:
1577 			switch (BPF_OP(insn->code)) {
1578 			case BPF_ADD:
1579 			case BPF_SUB:
1580 			case BPF_MUL:
1581 			case BPF_DIV:
1582 			case BPF_OR:
1583 			case BPF_AND:
1584 			case BPF_LSH:
1585 			case BPF_RSH:
1586 			case BPF_NEG:
1587 			case BPF_MOD:
1588 			case BPF_XOR:
1589 				set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1590 				break;
1591 			case BPF_MOV:
1592 				if (BPF_SRC(insn->code)) {
1593 					set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1594 				} else {
1595 					/* IMM to REG move*/
1596 					if (insn->imm >= 0)
1597 						set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1598 					else
1599 						set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1600 				}
1601 				break;
1602 			case BPF_END:
1603 				if (insn->imm == 64)
1604 					set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1605 				else if (insn->imm == 32)
1606 					set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1607 				else /* insn->imm == 16 */
1608 					set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1609 				break;
1610 			}
1611 			rvt[idx] |= RVT_DONE;
1612 			break;
1613 		case BPF_ALU64:
1614 			switch (BPF_OP(insn->code)) {
1615 			case BPF_MOV:
1616 				if (BPF_SRC(insn->code)) {
1617 					/* REG to REG move*/
1618 					set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1619 				} else {
1620 					/* IMM to REG move*/
1621 					if (insn->imm >= 0)
1622 						set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1623 					else
1624 						set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
1625 				}
1626 				break;
1627 			default:
1628 				set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1629 			}
1630 			rvt[idx] |= RVT_DONE;
1631 			break;
1632 		case BPF_LD:
1633 			switch (BPF_SIZE(insn->code)) {
1634 			case BPF_DW:
1635 				if (BPF_MODE(insn->code) == BPF_IMM) {
1636 					s64 val;
1637 
1638 					val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32));
1639 					if (val > 0 && val <= S32_MAX)
1640 						set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1641 					else if (val >= S32_MIN && val <= S32_MAX)
1642 						set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
1643 					else
1644 						set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1645 					rvt[idx] |= RVT_DONE;
1646 					idx++;
1647 				} else {
1648 					set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1649 				}
1650 				break;
1651 			case BPF_B:
1652 			case BPF_H:
1653 				set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1654 				break;
1655 			case BPF_W:
1656 				if (BPF_MODE(insn->code) == BPF_IMM)
1657 					set_reg_val_type(&exit_rvt, insn->dst_reg,
1658 							 insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT);
1659 				else
1660 					set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1661 				break;
1662 			}
1663 			rvt[idx] |= RVT_DONE;
1664 			break;
1665 		case BPF_LDX:
1666 			switch (BPF_SIZE(insn->code)) {
1667 			case BPF_DW:
1668 				set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1669 				break;
1670 			case BPF_B:
1671 			case BPF_H:
1672 				set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1673 				break;
1674 			case BPF_W:
1675 				set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1676 				break;
1677 			}
1678 			rvt[idx] |= RVT_DONE;
1679 			break;
1680 		case BPF_JMP:
1681 			switch (BPF_OP(insn->code)) {
1682 			case BPF_EXIT:
1683 				rvt[idx] = RVT_DONE | exit_rvt;
1684 				rvt[prog->len] = exit_rvt;
1685 				return idx;
1686 			case BPF_JA:
1687 				rvt[idx] |= RVT_DONE;
1688 				idx += insn->off;
1689 				break;
1690 			case BPF_JEQ:
1691 			case BPF_JGT:
1692 			case BPF_JGE:
1693 			case BPF_JLT:
1694 			case BPF_JLE:
1695 			case BPF_JSET:
1696 			case BPF_JNE:
1697 			case BPF_JSGT:
1698 			case BPF_JSGE:
1699 			case BPF_JSLT:
1700 			case BPF_JSLE:
1701 				if (follow_taken) {
1702 					rvt[idx] |= RVT_BRANCH_TAKEN;
1703 					idx += insn->off;
1704 					follow_taken = false;
1705 				} else {
1706 					rvt[idx] |= RVT_FALL_THROUGH;
1707 				}
1708 				break;
1709 			case BPF_CALL:
1710 				set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT);
1711 				/* Upon call return, argument registers are clobbered. */
1712 				for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++)
1713 					set_reg_val_type(&exit_rvt, reg, REG_64BIT);
1714 
1715 				rvt[idx] |= RVT_DONE;
1716 				break;
1717 			default:
1718 				WARN(1, "Unhandled BPF_JMP case.\n");
1719 				rvt[idx] |= RVT_DONE;
1720 				break;
1721 			}
1722 			break;
1723 		default:
1724 			rvt[idx] |= RVT_DONE;
1725 			break;
1726 		}
1727 	}
1728 	return idx;
1729 }
1730 
1731 /*
1732  * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
1733  * each eBPF insn.  This allows unneeded sign and zero extension
1734  * operations to be omitted.
1735  *
1736  * Doesn't handle yet confluence of control paths with conflicting
1737  * ranges, but it is good enough for most sane code.
1738  */
reg_val_propagate(struct jit_ctx * ctx)1739 static int reg_val_propagate(struct jit_ctx *ctx)
1740 {
1741 	const struct bpf_prog *prog = ctx->skf;
1742 	u64 exit_rvt;
1743 	int reg;
1744 	int i;
1745 
1746 	/*
1747 	 * 11 registers * 3 bits/reg leaves top bits free for other
1748 	 * uses.  Bit-62..63 used to see if we have visited an insn.
1749 	 */
1750 	exit_rvt = 0;
1751 
1752 	/* Upon entry, argument registers are 64-bit. */
1753 	for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++)
1754 		set_reg_val_type(&exit_rvt, reg, REG_64BIT);
1755 
1756 	/*
1757 	 * First follow all conditional branches on the fall-through
1758 	 * edge of control flow..
1759 	 */
1760 	reg_val_propagate_range(ctx, exit_rvt, 0, false);
1761 restart_search:
1762 	/*
1763 	 * Then repeatedly find the first conditional branch where
1764 	 * both edges of control flow have not been taken, and follow
1765 	 * the branch taken edge.  We will end up restarting the
1766 	 * search once per conditional branch insn.
1767 	 */
1768 	for (i = 0; i < prog->len; i++) {
1769 		u64 rvt = ctx->reg_val_types[i];
1770 
1771 		if ((rvt & RVT_VISITED_MASK) == RVT_DONE ||
1772 		    (rvt & RVT_VISITED_MASK) == 0)
1773 			continue;
1774 		if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) {
1775 			reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true);
1776 		} else { /* RVT_BRANCH_TAKEN */
1777 			WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
1778 			reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false);
1779 		}
1780 		goto restart_search;
1781 	}
1782 	/*
1783 	 * Eventually all conditional branches have been followed on
1784 	 * both branches and we are done.  Any insn that has not been
1785 	 * visited at this point is dead.
1786 	 */
1787 
1788 	return 0;
1789 }
1790 
jit_fill_hole(void * area,unsigned int size)1791 static void jit_fill_hole(void *area, unsigned int size)
1792 {
1793 	u32 *p;
1794 
1795 	/* We are guaranteed to have aligned memory. */
1796 	for (p = area; size >= sizeof(u32); size -= sizeof(u32))
1797 		uasm_i_break(&p, BRK_BUG); /* Increments p */
1798 }
1799 
bpf_int_jit_compile(struct bpf_prog * prog)1800 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1801 {
1802 	struct bpf_prog *orig_prog = prog;
1803 	bool tmp_blinded = false;
1804 	struct bpf_prog *tmp;
1805 	struct bpf_binary_header *header = NULL;
1806 	struct jit_ctx ctx;
1807 	unsigned int image_size;
1808 	u8 *image_ptr;
1809 
1810 	if (!prog->jit_requested)
1811 		return prog;
1812 
1813 	tmp = bpf_jit_blind_constants(prog);
1814 	/* If blinding was requested and we failed during blinding,
1815 	 * we must fall back to the interpreter.
1816 	 */
1817 	if (IS_ERR(tmp))
1818 		return orig_prog;
1819 	if (tmp != prog) {
1820 		tmp_blinded = true;
1821 		prog = tmp;
1822 	}
1823 
1824 	memset(&ctx, 0, sizeof(ctx));
1825 
1826 	preempt_disable();
1827 	switch (current_cpu_type()) {
1828 	case CPU_CAVIUM_OCTEON:
1829 	case CPU_CAVIUM_OCTEON_PLUS:
1830 	case CPU_CAVIUM_OCTEON2:
1831 	case CPU_CAVIUM_OCTEON3:
1832 		ctx.use_bbit_insns = 1;
1833 		break;
1834 	default:
1835 		ctx.use_bbit_insns = 0;
1836 	}
1837 	preempt_enable();
1838 
1839 	ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
1840 	if (ctx.offsets == NULL)
1841 		goto out_err;
1842 
1843 	ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL);
1844 	if (ctx.reg_val_types == NULL)
1845 		goto out_err;
1846 
1847 	ctx.skf = prog;
1848 
1849 	if (reg_val_propagate(&ctx))
1850 		goto out_err;
1851 
1852 	/*
1853 	 * First pass discovers used resources and instruction offsets
1854 	 * assuming short branches are used.
1855 	 */
1856 	if (build_int_body(&ctx))
1857 		goto out_err;
1858 
1859 	/*
1860 	 * If no calls are made (EBPF_SAVE_RA), then tail call count
1861 	 * in $v1, else we must save in n$s4.
1862 	 */
1863 	if (ctx.flags & EBPF_SEEN_TC) {
1864 		if (ctx.flags & EBPF_SAVE_RA)
1865 			ctx.flags |= EBPF_SAVE_S4;
1866 		else
1867 			ctx.flags |= EBPF_TCC_IN_V1;
1868 	}
1869 
1870 	/*
1871 	 * Second pass generates offsets, if any branches are out of
1872 	 * range a jump-around long sequence is generated, and we have
1873 	 * to try again from the beginning to generate the new
1874 	 * offsets.  This is done until no additional conversions are
1875 	 * necessary.
1876 	 */
1877 	do {
1878 		ctx.idx = 0;
1879 		ctx.gen_b_offsets = 1;
1880 		ctx.long_b_conversion = 0;
1881 		if (gen_int_prologue(&ctx))
1882 			goto out_err;
1883 		if (build_int_body(&ctx))
1884 			goto out_err;
1885 		if (build_int_epilogue(&ctx, MIPS_R_RA))
1886 			goto out_err;
1887 	} while (ctx.long_b_conversion);
1888 
1889 	image_size = 4 * ctx.idx;
1890 
1891 	header = bpf_jit_binary_alloc(image_size, &image_ptr,
1892 				      sizeof(u32), jit_fill_hole);
1893 	if (header == NULL)
1894 		goto out_err;
1895 
1896 	ctx.target = (u32 *)image_ptr;
1897 
1898 	/* Third pass generates the code */
1899 	ctx.idx = 0;
1900 	if (gen_int_prologue(&ctx))
1901 		goto out_err;
1902 	if (build_int_body(&ctx))
1903 		goto out_err;
1904 	if (build_int_epilogue(&ctx, MIPS_R_RA))
1905 		goto out_err;
1906 
1907 	/* Update the icache */
1908 	flush_icache_range((unsigned long)ctx.target,
1909 			   (unsigned long)&ctx.target[ctx.idx]);
1910 
1911 	if (bpf_jit_enable > 1)
1912 		/* Dump JIT code */
1913 		bpf_jit_dump(prog->len, image_size, 2, ctx.target);
1914 
1915 	bpf_jit_binary_lock_ro(header);
1916 	prog->bpf_func = (void *)ctx.target;
1917 	prog->jited = 1;
1918 	prog->jited_len = image_size;
1919 out_normal:
1920 	if (tmp_blinded)
1921 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
1922 					   tmp : orig_prog);
1923 	kfree(ctx.offsets);
1924 	kfree(ctx.reg_val_types);
1925 
1926 	return prog;
1927 
1928 out_err:
1929 	prog = orig_prog;
1930 	if (header)
1931 		bpf_jit_binary_free(header);
1932 	goto out_normal;
1933 }
1934