• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * bpf_jit_comp64.c: eBPF JIT compiler
4  *
5  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6  *		  IBM Corporation
7  *
8  * Based on the powerpc classic BPF JIT compiler by Matt Evans
9  */
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
18 #include <asm/security_features.h>
19 
20 #include "bpf_jit64.h"
21 
bpf_jit_fill_ill_insns(void * area,unsigned int size)22 static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
23 {
24 	memset32(area, BREAKPOINT_INSTRUCTION, size/4);
25 }
26 
bpf_flush_icache(void * start,void * end)27 static inline void bpf_flush_icache(void *start, void *end)
28 {
29 	smp_wmb();
30 	flush_icache_range((unsigned long)start, (unsigned long)end);
31 }
32 
bpf_is_seen_register(struct codegen_context * ctx,int i)33 static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
34 {
35 	return (ctx->seen & (1 << (31 - b2p[i])));
36 }
37 
bpf_set_seen_register(struct codegen_context * ctx,int i)38 static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
39 {
40 	ctx->seen |= (1 << (31 - b2p[i]));
41 }
42 
bpf_has_stack_frame(struct codegen_context * ctx)43 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
44 {
45 	/*
46 	 * We only need a stack frame if:
47 	 * - we call other functions (kernel helpers), or
48 	 * - the bpf program uses its stack area
49 	 * The latter condition is deduced from the usage of BPF_REG_FP
50 	 */
51 	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
52 }
53 
54 /*
55  * When not setting up our own stackframe, the redzone usage is:
56  *
57  *		[	prev sp		] <-------------
58  *		[	  ...       	] 		|
59  * sp (r1) --->	[    stack pointer	] --------------
60  *		[   nv gpr save area	] 5*8
61  *		[    tail_call_cnt	] 8
62  *		[    local_tmp_var	] 16
63  *		[   unused red zone	] 208 bytes protected
64  */
bpf_jit_stack_local(struct codegen_context * ctx)65 static int bpf_jit_stack_local(struct codegen_context *ctx)
66 {
67 	if (bpf_has_stack_frame(ctx))
68 		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
69 	else
70 		return -(BPF_PPC_STACK_SAVE + 24);
71 }
72 
bpf_jit_stack_tailcallcnt(struct codegen_context * ctx)73 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
74 {
75 	return bpf_jit_stack_local(ctx) + 16;
76 }
77 
bpf_jit_stack_offsetof(struct codegen_context * ctx,int reg)78 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
79 {
80 	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
81 		return (bpf_has_stack_frame(ctx) ?
82 			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
83 				- (8 * (32 - reg));
84 
85 	pr_err("BPF JIT is asking about unknown registers");
86 	BUG();
87 }
88 
bpf_jit_build_prologue(u32 * image,struct codegen_context * ctx)89 static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
90 {
91 	int i;
92 
93 	/*
94 	 * Initialize tail_call_cnt if we do tail calls.
95 	 * Otherwise, put in NOPs so that it can be skipped when we are
96 	 * invoked through a tail call.
97 	 */
98 	if (ctx->seen & SEEN_TAILCALL) {
99 		EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0));
100 		/* this goes in the redzone */
101 		PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
102 	} else {
103 		EMIT(PPC_RAW_NOP());
104 		EMIT(PPC_RAW_NOP());
105 	}
106 
107 #define BPF_TAILCALL_PROLOGUE_SIZE	8
108 
109 	if (bpf_has_stack_frame(ctx)) {
110 		/*
111 		 * We need a stack frame, but we don't necessarily need to
112 		 * save/restore LR unless we call other functions
113 		 */
114 		if (ctx->seen & SEEN_FUNC) {
115 			EMIT(PPC_INST_MFLR | __PPC_RT(R0));
116 			PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
117 		}
118 
119 		PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
120 	}
121 
122 	/*
123 	 * Back up non-volatile regs -- BPF registers 6-10
124 	 * If we haven't created our own stack frame, we save these
125 	 * in the protected zone below the previous stack frame
126 	 */
127 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
128 		if (bpf_is_seen_register(ctx, i))
129 			PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
130 
131 	/* Setup frame pointer to point to the bpf stack area */
132 	if (bpf_is_seen_register(ctx, BPF_REG_FP))
133 		EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1,
134 				STACK_FRAME_MIN_SIZE + ctx->stack_size));
135 }
136 
bpf_jit_emit_common_epilogue(u32 * image,struct codegen_context * ctx)137 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
138 {
139 	int i;
140 
141 	/* Restore NVRs */
142 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
143 		if (bpf_is_seen_register(ctx, i))
144 			PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
145 
146 	/* Tear down our stack frame */
147 	if (bpf_has_stack_frame(ctx)) {
148 		EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size));
149 		if (ctx->seen & SEEN_FUNC) {
150 			PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
151 			EMIT(PPC_RAW_MTLR(0));
152 		}
153 	}
154 }
155 
bpf_jit_build_epilogue(u32 * image,struct codegen_context * ctx)156 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
157 {
158 	bpf_jit_emit_common_epilogue(image, ctx);
159 
160 	/* Move result to r3 */
161 	EMIT(PPC_RAW_MR(3, b2p[BPF_REG_0]));
162 
163 	EMIT(PPC_RAW_BLR());
164 }
165 
bpf_jit_emit_func_call_hlp(u32 * image,struct codegen_context * ctx,u64 func)166 static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
167 				       u64 func)
168 {
169 #ifdef PPC64_ELF_ABI_v1
170 	/* func points to the function descriptor */
171 	PPC_LI64(b2p[TMP_REG_2], func);
172 	/* Load actual entry point from function descriptor */
173 	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
174 	/* ... and move it to LR */
175 	EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
176 	/*
177 	 * Load TOC from function descriptor at offset 8.
178 	 * We can clobber r2 since we get called through a
179 	 * function pointer (so caller will save/restore r2)
180 	 * and since we don't use a TOC ourself.
181 	 */
182 	PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
183 #else
184 	/* We can clobber r12 */
185 	PPC_FUNC_ADDR(12, func);
186 	EMIT(PPC_RAW_MTLR(12));
187 #endif
188 	EMIT(PPC_RAW_BLRL());
189 }
190 
bpf_jit_emit_func_call_rel(u32 * image,struct codegen_context * ctx,u64 func)191 static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx,
192 				       u64 func)
193 {
194 	unsigned int i, ctx_idx = ctx->idx;
195 
196 	/* Load function address into r12 */
197 	PPC_LI64(12, func);
198 
199 	/* For bpf-to-bpf function calls, the callee's address is unknown
200 	 * until the last extra pass. As seen above, we use PPC_LI64() to
201 	 * load the callee's address, but this may optimize the number of
202 	 * instructions required based on the nature of the address.
203 	 *
204 	 * Since we don't want the number of instructions emitted to change,
205 	 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
206 	 * we always have a five-instruction sequence, which is the maximum
207 	 * that PPC_LI64() can emit.
208 	 */
209 	for (i = ctx->idx - ctx_idx; i < 5; i++)
210 		EMIT(PPC_RAW_NOP());
211 
212 #ifdef PPC64_ELF_ABI_v1
213 	/*
214 	 * Load TOC from function descriptor at offset 8.
215 	 * We can clobber r2 since we get called through a
216 	 * function pointer (so caller will save/restore r2)
217 	 * and since we don't use a TOC ourself.
218 	 */
219 	PPC_BPF_LL(2, 12, 8);
220 	/* Load actual entry point from function descriptor */
221 	PPC_BPF_LL(12, 12, 0);
222 #endif
223 
224 	EMIT(PPC_RAW_MTLR(12));
225 	EMIT(PPC_RAW_BLRL());
226 }
227 
bpf_jit_emit_tail_call(u32 * image,struct codegen_context * ctx,u32 out)228 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
229 {
230 	/*
231 	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
232 	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
233 	 * r4/BPF_REG_2 - pointer to bpf_array
234 	 * r5/BPF_REG_3 - index in bpf_array
235 	 */
236 	int b2p_bpf_array = b2p[BPF_REG_2];
237 	int b2p_index = b2p[BPF_REG_3];
238 
239 	/*
240 	 * if (index >= array->map.max_entries)
241 	 *   goto out;
242 	 */
243 	EMIT(PPC_RAW_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
244 	EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
245 	EMIT(PPC_RAW_CMPLW(b2p_index, b2p[TMP_REG_1]));
246 	PPC_BCC(COND_GE, out);
247 
248 	/*
249 	 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
250 	 *   goto out;
251 	 */
252 	PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
253 	EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT));
254 	PPC_BCC(COND_GT, out);
255 
256 	/*
257 	 * tail_call_cnt++;
258 	 */
259 	EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1));
260 	PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
261 
262 	/* prog = array->ptrs[index]; */
263 	EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8));
264 	EMIT(PPC_RAW_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array));
265 	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
266 
267 	/*
268 	 * if (prog == NULL)
269 	 *   goto out;
270 	 */
271 	EMIT(PPC_RAW_CMPLDI(b2p[TMP_REG_1], 0));
272 	PPC_BCC(COND_EQ, out);
273 
274 	/* goto *(prog->bpf_func + prologue_size); */
275 	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
276 #ifdef PPC64_ELF_ABI_v1
277 	/* skip past the function descriptor */
278 	EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
279 			FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE));
280 #else
281 	EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE));
282 #endif
283 	EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
284 
285 	/* tear down stack, restore NVRs, ... */
286 	bpf_jit_emit_common_epilogue(image, ctx);
287 
288 	EMIT(PPC_RAW_BCTR());
289 
290 	/* out: */
291 	return 0;
292 }
293 
294 /*
295  * We spill into the redzone always, even if the bpf program has its own stackframe.
296  * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
297  */
298 void bpf_stf_barrier(void);
299 
300 asm (
301 "		.global bpf_stf_barrier		;"
302 "	bpf_stf_barrier:			;"
303 "		std	21,-64(1)		;"
304 "		std	22,-56(1)		;"
305 "		sync				;"
306 "		ld	21,-64(1)		;"
307 "		ld	22,-56(1)		;"
308 "		ori	31,31,0			;"
309 "		.rept 14			;"
310 "		b	1f			;"
311 "	1:					;"
312 "		.endr				;"
313 "		blr				;"
314 );
315 
316 /* Assemble the body code between the prologue & epilogue */
bpf_jit_build_body(struct bpf_prog * fp,u32 * image,struct codegen_context * ctx,u32 * addrs,bool extra_pass)317 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
318 			      struct codegen_context *ctx,
319 			      u32 *addrs, bool extra_pass)
320 {
321 	enum stf_barrier_type stf_barrier = stf_barrier_type_get();
322 	const struct bpf_insn *insn = fp->insnsi;
323 	int flen = fp->len;
324 	int i, ret;
325 
326 	/* Start of epilogue code - will only be valid 2nd pass onwards */
327 	u32 exit_addr = addrs[flen];
328 
329 	for (i = 0; i < flen; i++) {
330 		u32 code = insn[i].code;
331 		u32 dst_reg = b2p[insn[i].dst_reg];
332 		u32 src_reg = b2p[insn[i].src_reg];
333 		s16 off = insn[i].off;
334 		s32 imm = insn[i].imm;
335 		bool func_addr_fixed;
336 		u64 func_addr;
337 		u64 imm64;
338 		u32 true_cond;
339 		u32 tmp_idx;
340 
341 		/*
342 		 * addrs[] maps a BPF bytecode address into a real offset from
343 		 * the start of the body code.
344 		 */
345 		addrs[i] = ctx->idx * 4;
346 
347 		/*
348 		 * As an optimization, we note down which non-volatile registers
349 		 * are used so that we can only save/restore those in our
350 		 * prologue and epilogue. We do this here regardless of whether
351 		 * the actual BPF instruction uses src/dst registers or not
352 		 * (for instance, BPF_CALL does not use them). The expectation
353 		 * is that those instructions will have src_reg/dst_reg set to
354 		 * 0. Even otherwise, we just lose some prologue/epilogue
355 		 * optimization but everything else should work without
356 		 * any issues.
357 		 */
358 		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
359 			bpf_set_seen_register(ctx, insn[i].dst_reg);
360 		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
361 			bpf_set_seen_register(ctx, insn[i].src_reg);
362 
363 		switch (code) {
364 		/*
365 		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
366 		 */
367 		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
368 		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
369 			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
370 			goto bpf_alu32_trunc;
371 		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
372 		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
373 			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
374 			goto bpf_alu32_trunc;
375 		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
376 		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
377 			if (!imm) {
378 				goto bpf_alu32_trunc;
379 			} else if (imm >= -32768 && imm < 32768) {
380 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
381 			} else {
382 				PPC_LI32(b2p[TMP_REG_1], imm);
383 				EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
384 			}
385 			goto bpf_alu32_trunc;
386 		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
387 		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
388 			if (!imm) {
389 				goto bpf_alu32_trunc;
390 			} else if (imm > -32768 && imm <= 32768) {
391 				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
392 			} else {
393 				PPC_LI32(b2p[TMP_REG_1], imm);
394 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
395 			}
396 			goto bpf_alu32_trunc;
397 		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
398 		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
399 			if (BPF_CLASS(code) == BPF_ALU)
400 				EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
401 			else
402 				EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
403 			goto bpf_alu32_trunc;
404 		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
405 		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
406 			if (imm >= -32768 && imm < 32768)
407 				EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
408 			else {
409 				PPC_LI32(b2p[TMP_REG_1], imm);
410 				if (BPF_CLASS(code) == BPF_ALU)
411 					EMIT(PPC_RAW_MULW(dst_reg, dst_reg,
412 							b2p[TMP_REG_1]));
413 				else
414 					EMIT(PPC_RAW_MULD(dst_reg, dst_reg,
415 							b2p[TMP_REG_1]));
416 			}
417 			goto bpf_alu32_trunc;
418 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
419 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
420 			if (BPF_OP(code) == BPF_MOD) {
421 				EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg));
422 				EMIT(PPC_RAW_MULW(b2p[TMP_REG_1], src_reg,
423 						b2p[TMP_REG_1]));
424 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
425 			} else
426 				EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
427 			goto bpf_alu32_trunc;
428 		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
429 		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
430 			if (BPF_OP(code) == BPF_MOD) {
431 				EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg));
432 				EMIT(PPC_RAW_MULD(b2p[TMP_REG_1], src_reg,
433 						b2p[TMP_REG_1]));
434 				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
435 			} else
436 				EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
437 			break;
438 		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
439 		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
440 		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
441 		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
442 			if (imm == 0)
443 				return -EINVAL;
444 			if (imm == 1) {
445 				if (BPF_OP(code) == BPF_DIV) {
446 					goto bpf_alu32_trunc;
447 				} else {
448 					EMIT(PPC_RAW_LI(dst_reg, 0));
449 					break;
450 				}
451 			}
452 
453 			PPC_LI32(b2p[TMP_REG_1], imm);
454 			switch (BPF_CLASS(code)) {
455 			case BPF_ALU:
456 				if (BPF_OP(code) == BPF_MOD) {
457 					EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_2],
458 							dst_reg,
459 							b2p[TMP_REG_1]));
460 					EMIT(PPC_RAW_MULW(b2p[TMP_REG_1],
461 							b2p[TMP_REG_1],
462 							b2p[TMP_REG_2]));
463 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
464 							b2p[TMP_REG_1]));
465 				} else
466 					EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg,
467 							b2p[TMP_REG_1]));
468 				break;
469 			case BPF_ALU64:
470 				if (BPF_OP(code) == BPF_MOD) {
471 					EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_2],
472 							dst_reg,
473 							b2p[TMP_REG_1]));
474 					EMIT(PPC_RAW_MULD(b2p[TMP_REG_1],
475 							b2p[TMP_REG_1],
476 							b2p[TMP_REG_2]));
477 					EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
478 							b2p[TMP_REG_1]));
479 				} else
480 					EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg,
481 							b2p[TMP_REG_1]));
482 				break;
483 			}
484 			goto bpf_alu32_trunc;
485 		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
486 		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
487 			EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
488 			goto bpf_alu32_trunc;
489 
490 		/*
491 		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
492 		 */
493 		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
494 		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
495 			EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
496 			goto bpf_alu32_trunc;
497 		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
498 		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
499 			if (!IMM_H(imm))
500 				EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
501 			else {
502 				/* Sign-extended */
503 				PPC_LI32(b2p[TMP_REG_1], imm);
504 				EMIT(PPC_RAW_AND(dst_reg, dst_reg, b2p[TMP_REG_1]));
505 			}
506 			goto bpf_alu32_trunc;
507 		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
508 		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
509 			EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
510 			goto bpf_alu32_trunc;
511 		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
512 		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
513 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
514 				/* Sign-extended */
515 				PPC_LI32(b2p[TMP_REG_1], imm);
516 				EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_1]));
517 			} else {
518 				if (IMM_L(imm))
519 					EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
520 				if (IMM_H(imm))
521 					EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
522 			}
523 			goto bpf_alu32_trunc;
524 		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
525 		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
526 			EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
527 			goto bpf_alu32_trunc;
528 		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
529 		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
530 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
531 				/* Sign-extended */
532 				PPC_LI32(b2p[TMP_REG_1], imm);
533 				EMIT(PPC_RAW_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]));
534 			} else {
535 				if (IMM_L(imm))
536 					EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
537 				if (IMM_H(imm))
538 					EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
539 			}
540 			goto bpf_alu32_trunc;
541 		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
542 			/* slw clears top 32 bits */
543 			EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
544 			/* skip zero extension move, but set address map. */
545 			if (insn_is_zext(&insn[i + 1]))
546 				addrs[++i] = ctx->idx * 4;
547 			break;
548 		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
549 			EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
550 			break;
551 		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
552 			/* with imm 0, we still need to clear top 32 bits */
553 			EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
554 			if (insn_is_zext(&insn[i + 1]))
555 				addrs[++i] = ctx->idx * 4;
556 			break;
557 		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
558 			if (imm != 0)
559 				EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
560 			break;
561 		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
562 			EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
563 			if (insn_is_zext(&insn[i + 1]))
564 				addrs[++i] = ctx->idx * 4;
565 			break;
566 		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
567 			EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
568 			break;
569 		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
570 			EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
571 			if (insn_is_zext(&insn[i + 1]))
572 				addrs[++i] = ctx->idx * 4;
573 			break;
574 		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
575 			if (imm != 0)
576 				EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
577 			break;
578 		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
579 			EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
580 			goto bpf_alu32_trunc;
581 		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
582 			EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
583 			break;
584 		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
585 			EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
586 			goto bpf_alu32_trunc;
587 		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
588 			if (imm != 0)
589 				EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
590 			break;
591 
592 		/*
593 		 * MOV
594 		 */
595 		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
596 		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
597 			if (imm == 1) {
598 				/* special mov32 for zext */
599 				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
600 				break;
601 			}
602 			EMIT(PPC_RAW_MR(dst_reg, src_reg));
603 			goto bpf_alu32_trunc;
604 		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
605 		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
606 			PPC_LI32(dst_reg, imm);
607 			if (imm < 0)
608 				goto bpf_alu32_trunc;
609 			else if (insn_is_zext(&insn[i + 1]))
610 				addrs[++i] = ctx->idx * 4;
611 			break;
612 
613 bpf_alu32_trunc:
614 		/* Truncate to 32-bits */
615 		if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
616 			EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
617 		break;
618 
619 		/*
620 		 * BPF_FROM_BE/LE
621 		 */
622 		case BPF_ALU | BPF_END | BPF_FROM_LE:
623 		case BPF_ALU | BPF_END | BPF_FROM_BE:
624 #ifdef __BIG_ENDIAN__
625 			if (BPF_SRC(code) == BPF_FROM_BE)
626 				goto emit_clear;
627 #else /* !__BIG_ENDIAN__ */
628 			if (BPF_SRC(code) == BPF_FROM_LE)
629 				goto emit_clear;
630 #endif
631 			switch (imm) {
632 			case 16:
633 				/* Rotate 8 bits left & mask with 0x0000ff00 */
634 				EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23));
635 				/* Rotate 8 bits right & insert LSB to reg */
636 				EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31));
637 				/* Move result back to dst_reg */
638 				EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
639 				break;
640 			case 32:
641 				/*
642 				 * Rotate word left by 8 bits:
643 				 * 2 bytes are already in their final position
644 				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
645 				 */
646 				EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31));
647 				/* Rotate 24 bits and insert byte 1 */
648 				EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7));
649 				/* Rotate 24 bits and insert byte 3 */
650 				EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23));
651 				EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
652 				break;
653 			case 64:
654 				/* Store the value to stack and then use byte-reverse loads */
655 				PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
656 				EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
657 				if (cpu_has_feature(CPU_FTR_ARCH_206)) {
658 					EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
659 				} else {
660 					EMIT(PPC_RAW_LWBRX(dst_reg, 0, b2p[TMP_REG_1]));
661 					if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
662 						EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
663 					EMIT(PPC_RAW_LI(b2p[TMP_REG_2], 4));
664 					EMIT(PPC_RAW_LWBRX(b2p[TMP_REG_2], b2p[TMP_REG_2], b2p[TMP_REG_1]));
665 					if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
666 						EMIT(PPC_RAW_SLDI(b2p[TMP_REG_2], b2p[TMP_REG_2], 32));
667 					EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_2]));
668 				}
669 				break;
670 			}
671 			break;
672 
673 emit_clear:
674 			switch (imm) {
675 			case 16:
676 				/* zero-extend 16 bits into 64 bits */
677 				EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
678 				if (insn_is_zext(&insn[i + 1]))
679 					addrs[++i] = ctx->idx * 4;
680 				break;
681 			case 32:
682 				if (!fp->aux->verifier_zext)
683 					/* zero-extend 32 bits into 64 bits */
684 					EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
685 				break;
686 			case 64:
687 				/* nop */
688 				break;
689 			}
690 			break;
691 
692 		/*
693 		 * BPF_ST NOSPEC (speculation barrier)
694 		 */
695 		case BPF_ST | BPF_NOSPEC:
696 			if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
697 					(!security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) &&
698 					 (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) || !cpu_has_feature(CPU_FTR_HVMODE))))
699 				break;
700 
701 			switch (stf_barrier) {
702 			case STF_BARRIER_EIEIO:
703 				EMIT(0x7c0006ac | 0x02000000);
704 				break;
705 			case STF_BARRIER_SYNC_ORI:
706 				EMIT(PPC_INST_SYNC);
707 				EMIT(PPC_RAW_LD(b2p[TMP_REG_1], 13, 0));
708 				EMIT(PPC_RAW_ORI(31, 31, 0));
709 				break;
710 			case STF_BARRIER_FALLBACK:
711 				EMIT(PPC_INST_MFLR | ___PPC_RT(b2p[TMP_REG_1]));
712 				PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
713 				EMIT(PPC_RAW_MTCTR(12));
714 				EMIT(PPC_INST_BCTR | 0x1);
715 				EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
716 				break;
717 			case STF_BARRIER_NONE:
718 				break;
719 			}
720 			break;
721 
722 		/*
723 		 * BPF_ST(X)
724 		 */
725 		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
726 		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
727 			if (BPF_CLASS(code) == BPF_ST) {
728 				EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
729 				src_reg = b2p[TMP_REG_1];
730 			}
731 			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
732 			break;
733 		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
734 		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
735 			if (BPF_CLASS(code) == BPF_ST) {
736 				EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
737 				src_reg = b2p[TMP_REG_1];
738 			}
739 			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
740 			break;
741 		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
742 		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
743 			if (BPF_CLASS(code) == BPF_ST) {
744 				PPC_LI32(b2p[TMP_REG_1], imm);
745 				src_reg = b2p[TMP_REG_1];
746 			}
747 			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
748 			break;
749 		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
750 		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
751 			if (BPF_CLASS(code) == BPF_ST) {
752 				PPC_LI32(b2p[TMP_REG_1], imm);
753 				src_reg = b2p[TMP_REG_1];
754 			}
755 			PPC_BPF_STL(src_reg, dst_reg, off);
756 			break;
757 
758 		/*
759 		 * BPF_STX XADD (atomic_add)
760 		 */
761 		/* *(u32 *)(dst + off) += src */
762 		case BPF_STX | BPF_XADD | BPF_W:
763 			/* Get EA into TMP_REG_1 */
764 			EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
765 			tmp_idx = ctx->idx * 4;
766 			/* load value from memory into TMP_REG_2 */
767 			EMIT(PPC_RAW_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
768 			/* add value from src_reg into this */
769 			EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
770 			/* store result back */
771 			EMIT(PPC_RAW_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
772 			/* we're done if this succeeded */
773 			PPC_BCC_SHORT(COND_NE, tmp_idx);
774 			break;
775 		/* *(u64 *)(dst + off) += src */
776 		case BPF_STX | BPF_XADD | BPF_DW:
777 			EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
778 			tmp_idx = ctx->idx * 4;
779 			EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
780 			EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
781 			EMIT(PPC_RAW_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
782 			PPC_BCC_SHORT(COND_NE, tmp_idx);
783 			break;
784 
785 		/*
786 		 * BPF_LDX
787 		 */
788 		/* dst = *(u8 *)(ul) (src + off) */
789 		case BPF_LDX | BPF_MEM | BPF_B:
790 			EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
791 			if (insn_is_zext(&insn[i + 1]))
792 				addrs[++i] = ctx->idx * 4;
793 			break;
794 		/* dst = *(u16 *)(ul) (src + off) */
795 		case BPF_LDX | BPF_MEM | BPF_H:
796 			EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
797 			if (insn_is_zext(&insn[i + 1]))
798 				addrs[++i] = ctx->idx * 4;
799 			break;
800 		/* dst = *(u32 *)(ul) (src + off) */
801 		case BPF_LDX | BPF_MEM | BPF_W:
802 			EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
803 			if (insn_is_zext(&insn[i + 1]))
804 				addrs[++i] = ctx->idx * 4;
805 			break;
806 		/* dst = *(u64 *)(ul) (src + off) */
807 		case BPF_LDX | BPF_MEM | BPF_DW:
808 			PPC_BPF_LL(dst_reg, src_reg, off);
809 			break;
810 
811 		/*
812 		 * Doubleword load
813 		 * 16 byte instruction that uses two 'struct bpf_insn'
814 		 */
815 		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
816 			imm64 = ((u64)(u32) insn[i].imm) |
817 				    (((u64)(u32) insn[i+1].imm) << 32);
818 			/* Adjust for two bpf instructions */
819 			addrs[++i] = ctx->idx * 4;
820 			PPC_LI64(dst_reg, imm64);
821 			break;
822 
823 		/*
824 		 * Return/Exit
825 		 */
826 		case BPF_JMP | BPF_EXIT:
827 			/*
828 			 * If this isn't the very last instruction, branch to
829 			 * the epilogue. If we _are_ the last instruction,
830 			 * we'll just fall through to the epilogue.
831 			 */
832 			if (i != flen - 1)
833 				PPC_JMP(exit_addr);
834 			/* else fall through to the epilogue */
835 			break;
836 
837 		/*
838 		 * Call kernel helper or bpf function
839 		 */
840 		case BPF_JMP | BPF_CALL:
841 			ctx->seen |= SEEN_FUNC;
842 
843 			ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
844 						    &func_addr, &func_addr_fixed);
845 			if (ret < 0)
846 				return ret;
847 
848 			if (func_addr_fixed)
849 				bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
850 			else
851 				bpf_jit_emit_func_call_rel(image, ctx, func_addr);
852 			/* move return value from r3 to BPF_REG_0 */
853 			EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3));
854 			break;
855 
856 		/*
857 		 * Jumps and branches
858 		 */
859 		case BPF_JMP | BPF_JA:
860 			PPC_JMP(addrs[i + 1 + off]);
861 			break;
862 
863 		case BPF_JMP | BPF_JGT | BPF_K:
864 		case BPF_JMP | BPF_JGT | BPF_X:
865 		case BPF_JMP | BPF_JSGT | BPF_K:
866 		case BPF_JMP | BPF_JSGT | BPF_X:
867 		case BPF_JMP32 | BPF_JGT | BPF_K:
868 		case BPF_JMP32 | BPF_JGT | BPF_X:
869 		case BPF_JMP32 | BPF_JSGT | BPF_K:
870 		case BPF_JMP32 | BPF_JSGT | BPF_X:
871 			true_cond = COND_GT;
872 			goto cond_branch;
873 		case BPF_JMP | BPF_JLT | BPF_K:
874 		case BPF_JMP | BPF_JLT | BPF_X:
875 		case BPF_JMP | BPF_JSLT | BPF_K:
876 		case BPF_JMP | BPF_JSLT | BPF_X:
877 		case BPF_JMP32 | BPF_JLT | BPF_K:
878 		case BPF_JMP32 | BPF_JLT | BPF_X:
879 		case BPF_JMP32 | BPF_JSLT | BPF_K:
880 		case BPF_JMP32 | BPF_JSLT | BPF_X:
881 			true_cond = COND_LT;
882 			goto cond_branch;
883 		case BPF_JMP | BPF_JGE | BPF_K:
884 		case BPF_JMP | BPF_JGE | BPF_X:
885 		case BPF_JMP | BPF_JSGE | BPF_K:
886 		case BPF_JMP | BPF_JSGE | BPF_X:
887 		case BPF_JMP32 | BPF_JGE | BPF_K:
888 		case BPF_JMP32 | BPF_JGE | BPF_X:
889 		case BPF_JMP32 | BPF_JSGE | BPF_K:
890 		case BPF_JMP32 | BPF_JSGE | BPF_X:
891 			true_cond = COND_GE;
892 			goto cond_branch;
893 		case BPF_JMP | BPF_JLE | BPF_K:
894 		case BPF_JMP | BPF_JLE | BPF_X:
895 		case BPF_JMP | BPF_JSLE | BPF_K:
896 		case BPF_JMP | BPF_JSLE | BPF_X:
897 		case BPF_JMP32 | BPF_JLE | BPF_K:
898 		case BPF_JMP32 | BPF_JLE | BPF_X:
899 		case BPF_JMP32 | BPF_JSLE | BPF_K:
900 		case BPF_JMP32 | BPF_JSLE | BPF_X:
901 			true_cond = COND_LE;
902 			goto cond_branch;
903 		case BPF_JMP | BPF_JEQ | BPF_K:
904 		case BPF_JMP | BPF_JEQ | BPF_X:
905 		case BPF_JMP32 | BPF_JEQ | BPF_K:
906 		case BPF_JMP32 | BPF_JEQ | BPF_X:
907 			true_cond = COND_EQ;
908 			goto cond_branch;
909 		case BPF_JMP | BPF_JNE | BPF_K:
910 		case BPF_JMP | BPF_JNE | BPF_X:
911 		case BPF_JMP32 | BPF_JNE | BPF_K:
912 		case BPF_JMP32 | BPF_JNE | BPF_X:
913 			true_cond = COND_NE;
914 			goto cond_branch;
915 		case BPF_JMP | BPF_JSET | BPF_K:
916 		case BPF_JMP | BPF_JSET | BPF_X:
917 		case BPF_JMP32 | BPF_JSET | BPF_K:
918 		case BPF_JMP32 | BPF_JSET | BPF_X:
919 			true_cond = COND_NE;
920 			/* Fall through */
921 
922 cond_branch:
923 			switch (code) {
924 			case BPF_JMP | BPF_JGT | BPF_X:
925 			case BPF_JMP | BPF_JLT | BPF_X:
926 			case BPF_JMP | BPF_JGE | BPF_X:
927 			case BPF_JMP | BPF_JLE | BPF_X:
928 			case BPF_JMP | BPF_JEQ | BPF_X:
929 			case BPF_JMP | BPF_JNE | BPF_X:
930 			case BPF_JMP32 | BPF_JGT | BPF_X:
931 			case BPF_JMP32 | BPF_JLT | BPF_X:
932 			case BPF_JMP32 | BPF_JGE | BPF_X:
933 			case BPF_JMP32 | BPF_JLE | BPF_X:
934 			case BPF_JMP32 | BPF_JEQ | BPF_X:
935 			case BPF_JMP32 | BPF_JNE | BPF_X:
936 				/* unsigned comparison */
937 				if (BPF_CLASS(code) == BPF_JMP32)
938 					EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
939 				else
940 					EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
941 				break;
942 			case BPF_JMP | BPF_JSGT | BPF_X:
943 			case BPF_JMP | BPF_JSLT | BPF_X:
944 			case BPF_JMP | BPF_JSGE | BPF_X:
945 			case BPF_JMP | BPF_JSLE | BPF_X:
946 			case BPF_JMP32 | BPF_JSGT | BPF_X:
947 			case BPF_JMP32 | BPF_JSLT | BPF_X:
948 			case BPF_JMP32 | BPF_JSGE | BPF_X:
949 			case BPF_JMP32 | BPF_JSLE | BPF_X:
950 				/* signed comparison */
951 				if (BPF_CLASS(code) == BPF_JMP32)
952 					EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
953 				else
954 					EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
955 				break;
956 			case BPF_JMP | BPF_JSET | BPF_X:
957 			case BPF_JMP32 | BPF_JSET | BPF_X:
958 				if (BPF_CLASS(code) == BPF_JMP) {
959 					EMIT(PPC_RAW_AND_DOT(b2p[TMP_REG_1], dst_reg,
960 						    src_reg));
961 				} else {
962 					int tmp_reg = b2p[TMP_REG_1];
963 
964 					EMIT(PPC_RAW_AND(tmp_reg, dst_reg, src_reg));
965 					EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
966 						       31));
967 				}
968 				break;
969 			case BPF_JMP | BPF_JNE | BPF_K:
970 			case BPF_JMP | BPF_JEQ | BPF_K:
971 			case BPF_JMP | BPF_JGT | BPF_K:
972 			case BPF_JMP | BPF_JLT | BPF_K:
973 			case BPF_JMP | BPF_JGE | BPF_K:
974 			case BPF_JMP | BPF_JLE | BPF_K:
975 			case BPF_JMP32 | BPF_JNE | BPF_K:
976 			case BPF_JMP32 | BPF_JEQ | BPF_K:
977 			case BPF_JMP32 | BPF_JGT | BPF_K:
978 			case BPF_JMP32 | BPF_JLT | BPF_K:
979 			case BPF_JMP32 | BPF_JGE | BPF_K:
980 			case BPF_JMP32 | BPF_JLE | BPF_K:
981 			{
982 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
983 
984 				/*
985 				 * Need sign-extended load, so only positive
986 				 * values can be used as imm in cmpldi
987 				 */
988 				if (imm >= 0 && imm < 32768) {
989 					if (is_jmp32)
990 						EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
991 					else
992 						EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
993 				} else {
994 					/* sign-extending load */
995 					PPC_LI32(b2p[TMP_REG_1], imm);
996 					/* ... but unsigned comparison */
997 					if (is_jmp32)
998 						EMIT(PPC_RAW_CMPLW(dst_reg,
999 							  b2p[TMP_REG_1]));
1000 					else
1001 						EMIT(PPC_RAW_CMPLD(dst_reg,
1002 							  b2p[TMP_REG_1]));
1003 				}
1004 				break;
1005 			}
1006 			case BPF_JMP | BPF_JSGT | BPF_K:
1007 			case BPF_JMP | BPF_JSLT | BPF_K:
1008 			case BPF_JMP | BPF_JSGE | BPF_K:
1009 			case BPF_JMP | BPF_JSLE | BPF_K:
1010 			case BPF_JMP32 | BPF_JSGT | BPF_K:
1011 			case BPF_JMP32 | BPF_JSLT | BPF_K:
1012 			case BPF_JMP32 | BPF_JSGE | BPF_K:
1013 			case BPF_JMP32 | BPF_JSLE | BPF_K:
1014 			{
1015 				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1016 
1017 				/*
1018 				 * signed comparison, so any 16-bit value
1019 				 * can be used in cmpdi
1020 				 */
1021 				if (imm >= -32768 && imm < 32768) {
1022 					if (is_jmp32)
1023 						EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1024 					else
1025 						EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1026 				} else {
1027 					PPC_LI32(b2p[TMP_REG_1], imm);
1028 					if (is_jmp32)
1029 						EMIT(PPC_RAW_CMPW(dst_reg,
1030 							 b2p[TMP_REG_1]));
1031 					else
1032 						EMIT(PPC_RAW_CMPD(dst_reg,
1033 							 b2p[TMP_REG_1]));
1034 				}
1035 				break;
1036 			}
1037 			case BPF_JMP | BPF_JSET | BPF_K:
1038 			case BPF_JMP32 | BPF_JSET | BPF_K:
1039 				/* andi does not sign-extend the immediate */
1040 				if (imm >= 0 && imm < 32768)
1041 					/* PPC_ANDI is _only/always_ dot-form */
1042 					EMIT(PPC_RAW_ANDI(b2p[TMP_REG_1], dst_reg, imm));
1043 				else {
1044 					int tmp_reg = b2p[TMP_REG_1];
1045 
1046 					PPC_LI32(tmp_reg, imm);
1047 					if (BPF_CLASS(code) == BPF_JMP) {
1048 						EMIT(PPC_RAW_AND_DOT(tmp_reg, dst_reg,
1049 							    tmp_reg));
1050 					} else {
1051 						EMIT(PPC_RAW_AND(tmp_reg, dst_reg,
1052 							tmp_reg));
1053 						EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg,
1054 							       0, 0, 31));
1055 					}
1056 				}
1057 				break;
1058 			}
1059 			PPC_BCC(true_cond, addrs[i + 1 + off]);
1060 			break;
1061 
1062 		/*
1063 		 * Tail call
1064 		 */
1065 		case BPF_JMP | BPF_TAIL_CALL:
1066 			ctx->seen |= SEEN_TAILCALL;
1067 			ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1068 			if (ret < 0)
1069 				return ret;
1070 			break;
1071 
1072 		default:
1073 			/*
1074 			 * The filter contains something cruel & unusual.
1075 			 * We don't handle it, but also there shouldn't be
1076 			 * anything missing from our list.
1077 			 */
1078 			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1079 					code, i);
1080 			return -ENOTSUPP;
1081 		}
1082 	}
1083 
1084 	/* Set end-of-body-code address for exit. */
1085 	addrs[i] = ctx->idx * 4;
1086 
1087 	return 0;
1088 }
1089 
1090 /* Fix the branch target addresses for subprog calls */
bpf_jit_fixup_subprog_calls(struct bpf_prog * fp,u32 * image,struct codegen_context * ctx,u32 * addrs)1091 static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
1092 				       struct codegen_context *ctx, u32 *addrs)
1093 {
1094 	const struct bpf_insn *insn = fp->insnsi;
1095 	bool func_addr_fixed;
1096 	u64 func_addr;
1097 	u32 tmp_idx;
1098 	int i, ret;
1099 
1100 	for (i = 0; i < fp->len; i++) {
1101 		/*
1102 		 * During the extra pass, only the branch target addresses for
1103 		 * the subprog calls need to be fixed. All other instructions
1104 		 * can left untouched.
1105 		 *
1106 		 * The JITed image length does not change because we already
1107 		 * ensure that the JITed instruction sequence for these calls
1108 		 * are of fixed length by padding them with NOPs.
1109 		 */
1110 		if (insn[i].code == (BPF_JMP | BPF_CALL) &&
1111 		    insn[i].src_reg == BPF_PSEUDO_CALL) {
1112 			ret = bpf_jit_get_func_addr(fp, &insn[i], true,
1113 						    &func_addr,
1114 						    &func_addr_fixed);
1115 			if (ret < 0)
1116 				return ret;
1117 
1118 			/*
1119 			 * Save ctx->idx as this would currently point to the
1120 			 * end of the JITed image and set it to the offset of
1121 			 * the instruction sequence corresponding to the
1122 			 * subprog call temporarily.
1123 			 */
1124 			tmp_idx = ctx->idx;
1125 			ctx->idx = addrs[i] / 4;
1126 			bpf_jit_emit_func_call_rel(image, ctx, func_addr);
1127 
1128 			/*
1129 			 * Restore ctx->idx here. This is safe as the length
1130 			 * of the JITed sequence remains unchanged.
1131 			 */
1132 			ctx->idx = tmp_idx;
1133 		}
1134 	}
1135 
1136 	return 0;
1137 }
1138 
1139 struct powerpc64_jit_data {
1140 	struct bpf_binary_header *header;
1141 	u32 *addrs;
1142 	u8 *image;
1143 	u32 proglen;
1144 	struct codegen_context ctx;
1145 };
1146 
bpf_jit_needs_zext(void)1147 bool bpf_jit_needs_zext(void)
1148 {
1149 	return true;
1150 }
1151 
bpf_int_jit_compile(struct bpf_prog * fp)1152 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
1153 {
1154 	u32 proglen;
1155 	u32 alloclen;
1156 	u8 *image = NULL;
1157 	u32 *code_base;
1158 	u32 *addrs;
1159 	struct powerpc64_jit_data *jit_data;
1160 	struct codegen_context cgctx;
1161 	int pass;
1162 	int flen;
1163 	struct bpf_binary_header *bpf_hdr;
1164 	struct bpf_prog *org_fp = fp;
1165 	struct bpf_prog *tmp_fp;
1166 	bool bpf_blinded = false;
1167 	bool extra_pass = false;
1168 
1169 	if (!fp->jit_requested)
1170 		return org_fp;
1171 
1172 	tmp_fp = bpf_jit_blind_constants(org_fp);
1173 	if (IS_ERR(tmp_fp))
1174 		return org_fp;
1175 
1176 	if (tmp_fp != org_fp) {
1177 		bpf_blinded = true;
1178 		fp = tmp_fp;
1179 	}
1180 
1181 	jit_data = fp->aux->jit_data;
1182 	if (!jit_data) {
1183 		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1184 		if (!jit_data) {
1185 			fp = org_fp;
1186 			goto out;
1187 		}
1188 		fp->aux->jit_data = jit_data;
1189 	}
1190 
1191 	flen = fp->len;
1192 	addrs = jit_data->addrs;
1193 	if (addrs) {
1194 		cgctx = jit_data->ctx;
1195 		image = jit_data->image;
1196 		bpf_hdr = jit_data->header;
1197 		proglen = jit_data->proglen;
1198 		alloclen = proglen + FUNCTION_DESCR_SIZE;
1199 		extra_pass = true;
1200 		goto skip_init_ctx;
1201 	}
1202 
1203 	addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
1204 	if (addrs == NULL) {
1205 		fp = org_fp;
1206 		goto out_addrs;
1207 	}
1208 
1209 	memset(&cgctx, 0, sizeof(struct codegen_context));
1210 
1211 	/* Make sure that the stack is quadword aligned. */
1212 	cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
1213 
1214 	/* Scouting faux-generate pass 0 */
1215 	if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
1216 		/* We hit something illegal or unsupported. */
1217 		fp = org_fp;
1218 		goto out_addrs;
1219 	}
1220 
1221 	/*
1222 	 * If we have seen a tail call, we need a second pass.
1223 	 * This is because bpf_jit_emit_common_epilogue() is called
1224 	 * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
1225 	 */
1226 	if (cgctx.seen & SEEN_TAILCALL) {
1227 		cgctx.idx = 0;
1228 		if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
1229 			fp = org_fp;
1230 			goto out_addrs;
1231 		}
1232 	}
1233 
1234 	/*
1235 	 * Pretend to build prologue, given the features we've seen.  This will
1236 	 * update ctgtx.idx as it pretends to output instructions, then we can
1237 	 * calculate total size from idx.
1238 	 */
1239 	bpf_jit_build_prologue(0, &cgctx);
1240 	bpf_jit_build_epilogue(0, &cgctx);
1241 
1242 	proglen = cgctx.idx * 4;
1243 	alloclen = proglen + FUNCTION_DESCR_SIZE;
1244 
1245 	bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
1246 			bpf_jit_fill_ill_insns);
1247 	if (!bpf_hdr) {
1248 		fp = org_fp;
1249 		goto out_addrs;
1250 	}
1251 
1252 skip_init_ctx:
1253 	code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
1254 
1255 	if (extra_pass) {
1256 		/*
1257 		 * Do not touch the prologue and epilogue as they will remain
1258 		 * unchanged. Only fix the branch target address for subprog
1259 		 * calls in the body.
1260 		 *
1261 		 * This does not change the offsets and lengths of the subprog
1262 		 * call instruction sequences and hence, the size of the JITed
1263 		 * image as well.
1264 		 */
1265 		bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs);
1266 
1267 		/* There is no need to perform the usual passes. */
1268 		goto skip_codegen_passes;
1269 	}
1270 
1271 	/* Code generation passes 1-2 */
1272 	for (pass = 1; pass < 3; pass++) {
1273 		/* Now build the prologue, body code & epilogue for real. */
1274 		cgctx.idx = 0;
1275 		bpf_jit_build_prologue(code_base, &cgctx);
1276 		bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
1277 		bpf_jit_build_epilogue(code_base, &cgctx);
1278 
1279 		if (bpf_jit_enable > 1)
1280 			pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
1281 				proglen - (cgctx.idx * 4), cgctx.seen);
1282 	}
1283 
1284 skip_codegen_passes:
1285 	if (bpf_jit_enable > 1)
1286 		/*
1287 		 * Note that we output the base address of the code_base
1288 		 * rather than image, since opcodes are in code_base.
1289 		 */
1290 		bpf_jit_dump(flen, proglen, pass, code_base);
1291 
1292 #ifdef PPC64_ELF_ABI_v1
1293 	/* Function descriptor nastiness: Address + TOC */
1294 	((u64 *)image)[0] = (u64)code_base;
1295 	((u64 *)image)[1] = local_paca->kernel_toc;
1296 #endif
1297 
1298 	fp->bpf_func = (void *)image;
1299 	fp->jited = 1;
1300 	fp->jited_len = alloclen;
1301 
1302 	bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
1303 	if (!fp->is_func || extra_pass) {
1304 		bpf_prog_fill_jited_linfo(fp, addrs);
1305 out_addrs:
1306 		kfree(addrs);
1307 		kfree(jit_data);
1308 		fp->aux->jit_data = NULL;
1309 	} else {
1310 		jit_data->addrs = addrs;
1311 		jit_data->ctx = cgctx;
1312 		jit_data->proglen = proglen;
1313 		jit_data->image = image;
1314 		jit_data->header = bpf_hdr;
1315 	}
1316 
1317 out:
1318 	if (bpf_blinded)
1319 		bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
1320 
1321 	return fp;
1322 }
1323 
1324 /* Overriding bpf_jit_free() as we don't set images read-only. */
bpf_jit_free(struct bpf_prog * fp)1325 void bpf_jit_free(struct bpf_prog *fp)
1326 {
1327 	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1328 	struct bpf_binary_header *bpf_hdr = (void *)addr;
1329 
1330 	if (fp->jited)
1331 		bpf_jit_binary_free(bpf_hdr);
1332 
1333 	bpf_prog_unlock_free(fp);
1334 }
1335