• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * bpf_jit_comp64.c: eBPF JIT compiler
3  *
4  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
5  *		  IBM Corporation
6  *
7  * Based on the powerpc classic BPF JIT compiler by Matt Evans
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; version 2
12  * of the License.
13  */
14 #include <linux/moduleloader.h>
15 #include <asm/cacheflush.h>
16 #include <linux/netdevice.h>
17 #include <linux/filter.h>
18 #include <linux/if_vlan.h>
19 #include <asm/kprobes.h>
20 #include <linux/bpf.h>
21 
22 #include "bpf_jit64.h"
23 
bpf_jit_fill_ill_insns(void * area,unsigned int size)24 static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
25 {
26 	memset32(area, BREAKPOINT_INSTRUCTION, size/4);
27 }
28 
bpf_flush_icache(void * start,void * end)29 static inline void bpf_flush_icache(void *start, void *end)
30 {
31 	smp_wmb();
32 	flush_icache_range((unsigned long)start, (unsigned long)end);
33 }
34 
bpf_is_seen_register(struct codegen_context * ctx,int i)35 static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
36 {
37 	return (ctx->seen & (1 << (31 - b2p[i])));
38 }
39 
bpf_set_seen_register(struct codegen_context * ctx,int i)40 static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
41 {
42 	ctx->seen |= (1 << (31 - b2p[i]));
43 }
44 
bpf_has_stack_frame(struct codegen_context * ctx)45 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
46 {
47 	/*
48 	 * We only need a stack frame if:
49 	 * - we call other functions (kernel helpers), or
50 	 * - the bpf program uses its stack area
51 	 * The latter condition is deduced from the usage of BPF_REG_FP
52 	 */
53 	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
54 }
55 
56 /*
57  * When not setting up our own stackframe, the redzone usage is:
58  *
59  *		[	prev sp		] <-------------
60  *		[	  ...       	] 		|
61  * sp (r1) --->	[    stack pointer	] --------------
62  *		[   nv gpr save area	] 8*8
63  *		[    tail_call_cnt	] 8
64  *		[    local_tmp_var	] 8
65  *		[   unused red zone	] 208 bytes protected
66  */
bpf_jit_stack_local(struct codegen_context * ctx)67 static int bpf_jit_stack_local(struct codegen_context *ctx)
68 {
69 	if (bpf_has_stack_frame(ctx))
70 		return STACK_FRAME_MIN_SIZE + MAX_BPF_STACK;
71 	else
72 		return -(BPF_PPC_STACK_SAVE + 16);
73 }
74 
bpf_jit_stack_tailcallcnt(struct codegen_context * ctx)75 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
76 {
77 	return bpf_jit_stack_local(ctx) + 8;
78 }
79 
bpf_jit_stack_offsetof(struct codegen_context * ctx,int reg)80 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
81 {
82 	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
83 		return (bpf_has_stack_frame(ctx) ? BPF_PPC_STACKFRAME : 0)
84 							- (8 * (32 - reg));
85 
86 	pr_err("BPF JIT is asking about unknown registers");
87 	BUG();
88 }
89 
bpf_jit_emit_skb_loads(u32 * image,struct codegen_context * ctx)90 static void bpf_jit_emit_skb_loads(u32 *image, struct codegen_context *ctx)
91 {
92 	/*
93 	 * Load skb->len and skb->data_len
94 	 * r3 points to skb
95 	 */
96 	PPC_LWZ(b2p[SKB_HLEN_REG], 3, offsetof(struct sk_buff, len));
97 	PPC_LWZ(b2p[TMP_REG_1], 3, offsetof(struct sk_buff, data_len));
98 	/* header_len = len - data_len */
99 	PPC_SUB(b2p[SKB_HLEN_REG], b2p[SKB_HLEN_REG], b2p[TMP_REG_1]);
100 
101 	/* skb->data pointer */
102 	PPC_BPF_LL(b2p[SKB_DATA_REG], 3, offsetof(struct sk_buff, data));
103 }
104 
bpf_jit_build_prologue(u32 * image,struct codegen_context * ctx)105 static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
106 {
107 	int i;
108 
109 	/*
110 	 * Initialize tail_call_cnt if we do tail calls.
111 	 * Otherwise, put in NOPs so that it can be skipped when we are
112 	 * invoked through a tail call.
113 	 */
114 	if (ctx->seen & SEEN_TAILCALL) {
115 		PPC_LI(b2p[TMP_REG_1], 0);
116 		/* this goes in the redzone */
117 		PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
118 	} else {
119 		PPC_NOP();
120 		PPC_NOP();
121 	}
122 
123 #define BPF_TAILCALL_PROLOGUE_SIZE	8
124 
125 	if (bpf_has_stack_frame(ctx)) {
126 		/*
127 		 * We need a stack frame, but we don't necessarily need to
128 		 * save/restore LR unless we call other functions
129 		 */
130 		if (ctx->seen & SEEN_FUNC) {
131 			EMIT(PPC_INST_MFLR | __PPC_RT(R0));
132 			PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
133 		}
134 
135 		PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
136 	}
137 
138 	/*
139 	 * Back up non-volatile regs -- BPF registers 6-10
140 	 * If we haven't created our own stack frame, we save these
141 	 * in the protected zone below the previous stack frame
142 	 */
143 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
144 		if (bpf_is_seen_register(ctx, i))
145 			PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
146 
147 	/*
148 	 * Save additional non-volatile regs if we cache skb
149 	 * Also, setup skb data
150 	 */
151 	if (ctx->seen & SEEN_SKB) {
152 		PPC_BPF_STL(b2p[SKB_HLEN_REG], 1,
153 				bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
154 		PPC_BPF_STL(b2p[SKB_DATA_REG], 1,
155 				bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
156 		bpf_jit_emit_skb_loads(image, ctx);
157 	}
158 
159 	/* Setup frame pointer to point to the bpf stack area */
160 	if (bpf_is_seen_register(ctx, BPF_REG_FP))
161 		PPC_ADDI(b2p[BPF_REG_FP], 1,
162 				STACK_FRAME_MIN_SIZE + MAX_BPF_STACK);
163 }
164 
bpf_jit_emit_common_epilogue(u32 * image,struct codegen_context * ctx)165 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
166 {
167 	int i;
168 
169 	/* Restore NVRs */
170 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
171 		if (bpf_is_seen_register(ctx, i))
172 			PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
173 
174 	/* Restore non-volatile registers used for skb cache */
175 	if (ctx->seen & SEEN_SKB) {
176 		PPC_BPF_LL(b2p[SKB_HLEN_REG], 1,
177 				bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
178 		PPC_BPF_LL(b2p[SKB_DATA_REG], 1,
179 				bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
180 	}
181 
182 	/* Tear down our stack frame */
183 	if (bpf_has_stack_frame(ctx)) {
184 		PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
185 		if (ctx->seen & SEEN_FUNC) {
186 			PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
187 			PPC_MTLR(0);
188 		}
189 	}
190 }
191 
bpf_jit_build_epilogue(u32 * image,struct codegen_context * ctx)192 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
193 {
194 	bpf_jit_emit_common_epilogue(image, ctx);
195 
196 	/* Move result to r3 */
197 	PPC_MR(3, b2p[BPF_REG_0]);
198 
199 	PPC_BLR();
200 }
201 
bpf_jit_emit_func_call(u32 * image,struct codegen_context * ctx,u64 func)202 static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
203 {
204 	unsigned int i, ctx_idx = ctx->idx;
205 
206 	/* Load function address into r12 */
207 	PPC_LI64(12, func);
208 
209 	/* For bpf-to-bpf function calls, the callee's address is unknown
210 	 * until the last extra pass. As seen above, we use PPC_LI64() to
211 	 * load the callee's address, but this may optimize the number of
212 	 * instructions required based on the nature of the address.
213 	 *
214 	 * Since we don't want the number of instructions emitted to change,
215 	 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
216 	 * we always have a five-instruction sequence, which is the maximum
217 	 * that PPC_LI64() can emit.
218 	 */
219 	for (i = ctx->idx - ctx_idx; i < 5; i++)
220 		PPC_NOP();
221 
222 #ifdef PPC64_ELF_ABI_v1
223 	/*
224 	 * Load TOC from function descriptor at offset 8.
225 	 * We can clobber r2 since we get called through a
226 	 * function pointer (so caller will save/restore r2)
227 	 * and since we don't use a TOC ourself.
228 	 */
229 	PPC_BPF_LL(2, 12, 8);
230 	/* Load actual entry point from function descriptor */
231 	PPC_BPF_LL(12, 12, 0);
232 #endif
233 
234 	PPC_MTLR(12);
235 	PPC_BLRL();
236 }
237 
bpf_jit_emit_tail_call(u32 * image,struct codegen_context * ctx,u32 out)238 static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
239 {
240 	/*
241 	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
242 	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
243 	 * r4/BPF_REG_2 - pointer to bpf_array
244 	 * r5/BPF_REG_3 - index in bpf_array
245 	 */
246 	int b2p_bpf_array = b2p[BPF_REG_2];
247 	int b2p_index = b2p[BPF_REG_3];
248 
249 	/*
250 	 * if (index >= array->map.max_entries)
251 	 *   goto out;
252 	 */
253 	PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
254 	PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
255 	PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
256 	PPC_BCC(COND_GE, out);
257 
258 	/*
259 	 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
260 	 *   goto out;
261 	 */
262 	PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
263 	PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
264 	PPC_BCC(COND_GT, out);
265 
266 	/*
267 	 * tail_call_cnt++;
268 	 */
269 	PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1);
270 	PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
271 
272 	/* prog = array->ptrs[index]; */
273 	PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
274 	PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
275 	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
276 
277 	/*
278 	 * if (prog == NULL)
279 	 *   goto out;
280 	 */
281 	PPC_CMPLDI(b2p[TMP_REG_1], 0);
282 	PPC_BCC(COND_EQ, out);
283 
284 	/* goto *(prog->bpf_func + prologue_size); */
285 	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
286 #ifdef PPC64_ELF_ABI_v1
287 	/* skip past the function descriptor */
288 	PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
289 			FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE);
290 #else
291 	PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE);
292 #endif
293 	PPC_MTCTR(b2p[TMP_REG_1]);
294 
295 	/* tear down stack, restore NVRs, ... */
296 	bpf_jit_emit_common_epilogue(image, ctx);
297 
298 	PPC_BCTR();
299 	/* out: */
300 }
301 
302 /* Assemble the body code between the prologue & epilogue */
bpf_jit_build_body(struct bpf_prog * fp,u32 * image,struct codegen_context * ctx,u32 * addrs)303 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
304 			      struct codegen_context *ctx,
305 			      u32 *addrs)
306 {
307 	const struct bpf_insn *insn = fp->insnsi;
308 	int flen = fp->len;
309 	int i;
310 
311 	/* Start of epilogue code - will only be valid 2nd pass onwards */
312 	u32 exit_addr = addrs[flen];
313 
314 	for (i = 0; i < flen; i++) {
315 		u32 code = insn[i].code;
316 		u32 dst_reg = b2p[insn[i].dst_reg];
317 		u32 src_reg = b2p[insn[i].src_reg];
318 		s16 off = insn[i].off;
319 		s32 imm = insn[i].imm;
320 		u64 imm64;
321 		u8 *func;
322 		u32 true_cond;
323 		u32 tmp_idx;
324 
325 		/*
326 		 * addrs[] maps a BPF bytecode address into a real offset from
327 		 * the start of the body code.
328 		 */
329 		addrs[i] = ctx->idx * 4;
330 
331 		/*
332 		 * As an optimization, we note down which non-volatile registers
333 		 * are used so that we can only save/restore those in our
334 		 * prologue and epilogue. We do this here regardless of whether
335 		 * the actual BPF instruction uses src/dst registers or not
336 		 * (for instance, BPF_CALL does not use them). The expectation
337 		 * is that those instructions will have src_reg/dst_reg set to
338 		 * 0. Even otherwise, we just lose some prologue/epilogue
339 		 * optimization but everything else should work without
340 		 * any issues.
341 		 */
342 		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
343 			bpf_set_seen_register(ctx, insn[i].dst_reg);
344 		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
345 			bpf_set_seen_register(ctx, insn[i].src_reg);
346 
347 		switch (code) {
348 		/*
349 		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
350 		 */
351 		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
352 		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
353 			PPC_ADD(dst_reg, dst_reg, src_reg);
354 			goto bpf_alu32_trunc;
355 		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
356 		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
357 			PPC_SUB(dst_reg, dst_reg, src_reg);
358 			goto bpf_alu32_trunc;
359 		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
360 		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
361 		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
362 		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
363 			if (BPF_OP(code) == BPF_SUB)
364 				imm = -imm;
365 			if (imm) {
366 				if (imm >= -32768 && imm < 32768)
367 					PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
368 				else {
369 					PPC_LI32(b2p[TMP_REG_1], imm);
370 					PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
371 				}
372 			}
373 			goto bpf_alu32_trunc;
374 		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
375 		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
376 			if (BPF_CLASS(code) == BPF_ALU)
377 				PPC_MULW(dst_reg, dst_reg, src_reg);
378 			else
379 				PPC_MULD(dst_reg, dst_reg, src_reg);
380 			goto bpf_alu32_trunc;
381 		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
382 		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
383 			if (imm >= -32768 && imm < 32768)
384 				PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
385 			else {
386 				PPC_LI32(b2p[TMP_REG_1], imm);
387 				if (BPF_CLASS(code) == BPF_ALU)
388 					PPC_MULW(dst_reg, dst_reg,
389 							b2p[TMP_REG_1]);
390 				else
391 					PPC_MULD(dst_reg, dst_reg,
392 							b2p[TMP_REG_1]);
393 			}
394 			goto bpf_alu32_trunc;
395 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
396 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
397 			PPC_CMPWI(src_reg, 0);
398 			PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
399 			PPC_LI(b2p[BPF_REG_0], 0);
400 			PPC_JMP(exit_addr);
401 			if (BPF_OP(code) == BPF_MOD) {
402 				PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
403 				PPC_MULW(b2p[TMP_REG_1], src_reg,
404 						b2p[TMP_REG_1]);
405 				PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
406 			} else
407 				PPC_DIVWU(dst_reg, dst_reg, src_reg);
408 			goto bpf_alu32_trunc;
409 		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
410 		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
411 			PPC_CMPDI(src_reg, 0);
412 			PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
413 			PPC_LI(b2p[BPF_REG_0], 0);
414 			PPC_JMP(exit_addr);
415 			if (BPF_OP(code) == BPF_MOD) {
416 				PPC_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg);
417 				PPC_MULD(b2p[TMP_REG_1], src_reg,
418 						b2p[TMP_REG_1]);
419 				PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
420 			} else
421 				PPC_DIVDU(dst_reg, dst_reg, src_reg);
422 			break;
423 		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
424 		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
425 		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
426 		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
427 			if (imm == 0)
428 				return -EINVAL;
429 			else if (imm == 1)
430 				goto bpf_alu32_trunc;
431 
432 			PPC_LI32(b2p[TMP_REG_1], imm);
433 			switch (BPF_CLASS(code)) {
434 			case BPF_ALU:
435 				if (BPF_OP(code) == BPF_MOD) {
436 					PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
437 							b2p[TMP_REG_1]);
438 					PPC_MULW(b2p[TMP_REG_1],
439 							b2p[TMP_REG_1],
440 							b2p[TMP_REG_2]);
441 					PPC_SUB(dst_reg, dst_reg,
442 							b2p[TMP_REG_1]);
443 				} else
444 					PPC_DIVWU(dst_reg, dst_reg,
445 							b2p[TMP_REG_1]);
446 				break;
447 			case BPF_ALU64:
448 				if (BPF_OP(code) == BPF_MOD) {
449 					PPC_DIVDU(b2p[TMP_REG_2], dst_reg,
450 							b2p[TMP_REG_1]);
451 					PPC_MULD(b2p[TMP_REG_1],
452 							b2p[TMP_REG_1],
453 							b2p[TMP_REG_2]);
454 					PPC_SUB(dst_reg, dst_reg,
455 							b2p[TMP_REG_1]);
456 				} else
457 					PPC_DIVDU(dst_reg, dst_reg,
458 							b2p[TMP_REG_1]);
459 				break;
460 			}
461 			goto bpf_alu32_trunc;
462 		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
463 		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
464 			PPC_NEG(dst_reg, dst_reg);
465 			goto bpf_alu32_trunc;
466 
467 		/*
468 		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
469 		 */
470 		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
471 		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
472 			PPC_AND(dst_reg, dst_reg, src_reg);
473 			goto bpf_alu32_trunc;
474 		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
475 		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
476 			if (!IMM_H(imm))
477 				PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
478 			else {
479 				/* Sign-extended */
480 				PPC_LI32(b2p[TMP_REG_1], imm);
481 				PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
482 			}
483 			goto bpf_alu32_trunc;
484 		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
485 		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
486 			PPC_OR(dst_reg, dst_reg, src_reg);
487 			goto bpf_alu32_trunc;
488 		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
489 		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
490 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
491 				/* Sign-extended */
492 				PPC_LI32(b2p[TMP_REG_1], imm);
493 				PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
494 			} else {
495 				if (IMM_L(imm))
496 					PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
497 				if (IMM_H(imm))
498 					PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
499 			}
500 			goto bpf_alu32_trunc;
501 		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
502 		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
503 			PPC_XOR(dst_reg, dst_reg, src_reg);
504 			goto bpf_alu32_trunc;
505 		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
506 		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
507 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
508 				/* Sign-extended */
509 				PPC_LI32(b2p[TMP_REG_1], imm);
510 				PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
511 			} else {
512 				if (IMM_L(imm))
513 					PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
514 				if (IMM_H(imm))
515 					PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
516 			}
517 			goto bpf_alu32_trunc;
518 		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
519 			/* slw clears top 32 bits */
520 			PPC_SLW(dst_reg, dst_reg, src_reg);
521 			break;
522 		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
523 			PPC_SLD(dst_reg, dst_reg, src_reg);
524 			break;
525 		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
526 			/* with imm 0, we still need to clear top 32 bits */
527 			PPC_SLWI(dst_reg, dst_reg, imm);
528 			break;
529 		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
530 			if (imm != 0)
531 				PPC_SLDI(dst_reg, dst_reg, imm);
532 			break;
533 		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
534 			PPC_SRW(dst_reg, dst_reg, src_reg);
535 			break;
536 		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
537 			PPC_SRD(dst_reg, dst_reg, src_reg);
538 			break;
539 		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
540 			PPC_SRWI(dst_reg, dst_reg, imm);
541 			break;
542 		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
543 			if (imm != 0)
544 				PPC_SRDI(dst_reg, dst_reg, imm);
545 			break;
546 		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
547 			PPC_SRAD(dst_reg, dst_reg, src_reg);
548 			break;
549 		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
550 			if (imm != 0)
551 				PPC_SRADI(dst_reg, dst_reg, imm);
552 			break;
553 
554 		/*
555 		 * MOV
556 		 */
557 		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
558 		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
559 			PPC_MR(dst_reg, src_reg);
560 			goto bpf_alu32_trunc;
561 		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
562 		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
563 			PPC_LI32(dst_reg, imm);
564 			if (imm < 0)
565 				goto bpf_alu32_trunc;
566 			break;
567 
568 bpf_alu32_trunc:
569 		/* Truncate to 32-bits */
570 		if (BPF_CLASS(code) == BPF_ALU)
571 			PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
572 		break;
573 
574 		/*
575 		 * BPF_FROM_BE/LE
576 		 */
577 		case BPF_ALU | BPF_END | BPF_FROM_LE:
578 		case BPF_ALU | BPF_END | BPF_FROM_BE:
579 #ifdef __BIG_ENDIAN__
580 			if (BPF_SRC(code) == BPF_FROM_BE)
581 				goto emit_clear;
582 #else /* !__BIG_ENDIAN__ */
583 			if (BPF_SRC(code) == BPF_FROM_LE)
584 				goto emit_clear;
585 #endif
586 			switch (imm) {
587 			case 16:
588 				/* Rotate 8 bits left & mask with 0x0000ff00 */
589 				PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
590 				/* Rotate 8 bits right & insert LSB to reg */
591 				PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
592 				/* Move result back to dst_reg */
593 				PPC_MR(dst_reg, b2p[TMP_REG_1]);
594 				break;
595 			case 32:
596 				/*
597 				 * Rotate word left by 8 bits:
598 				 * 2 bytes are already in their final position
599 				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
600 				 */
601 				PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
602 				/* Rotate 24 bits and insert byte 1 */
603 				PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
604 				/* Rotate 24 bits and insert byte 3 */
605 				PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
606 				PPC_MR(dst_reg, b2p[TMP_REG_1]);
607 				break;
608 			case 64:
609 				/*
610 				 * Way easier and faster(?) to store the value
611 				 * into stack and then use ldbrx
612 				 *
613 				 * ctx->seen will be reliable in pass2, but
614 				 * the instructions generated will remain the
615 				 * same across all passes
616 				 */
617 				PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
618 				PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
619 				PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
620 				break;
621 			}
622 			break;
623 
624 emit_clear:
625 			switch (imm) {
626 			case 16:
627 				/* zero-extend 16 bits into 64 bits */
628 				PPC_RLDICL(dst_reg, dst_reg, 0, 48);
629 				break;
630 			case 32:
631 				/* zero-extend 32 bits into 64 bits */
632 				PPC_RLDICL(dst_reg, dst_reg, 0, 32);
633 				break;
634 			case 64:
635 				/* nop */
636 				break;
637 			}
638 			break;
639 
640 		/*
641 		 * BPF_ST(X)
642 		 */
643 		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
644 		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
645 			if (BPF_CLASS(code) == BPF_ST) {
646 				PPC_LI(b2p[TMP_REG_1], imm);
647 				src_reg = b2p[TMP_REG_1];
648 			}
649 			PPC_STB(src_reg, dst_reg, off);
650 			break;
651 		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
652 		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
653 			if (BPF_CLASS(code) == BPF_ST) {
654 				PPC_LI(b2p[TMP_REG_1], imm);
655 				src_reg = b2p[TMP_REG_1];
656 			}
657 			PPC_STH(src_reg, dst_reg, off);
658 			break;
659 		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
660 		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
661 			if (BPF_CLASS(code) == BPF_ST) {
662 				PPC_LI32(b2p[TMP_REG_1], imm);
663 				src_reg = b2p[TMP_REG_1];
664 			}
665 			PPC_STW(src_reg, dst_reg, off);
666 			break;
667 		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
668 		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
669 			if (BPF_CLASS(code) == BPF_ST) {
670 				PPC_LI32(b2p[TMP_REG_1], imm);
671 				src_reg = b2p[TMP_REG_1];
672 			}
673 			PPC_BPF_STL(src_reg, dst_reg, off);
674 			break;
675 
676 		/*
677 		 * BPF_STX XADD (atomic_add)
678 		 */
679 		/* *(u32 *)(dst + off) += src */
680 		case BPF_STX | BPF_XADD | BPF_W:
681 			/* Get EA into TMP_REG_1 */
682 			PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
683 			tmp_idx = ctx->idx * 4;
684 			/* load value from memory into TMP_REG_2 */
685 			PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
686 			/* add value from src_reg into this */
687 			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
688 			/* store result back */
689 			PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
690 			/* we're done if this succeeded */
691 			PPC_BCC_SHORT(COND_NE, tmp_idx);
692 			break;
693 		/* *(u64 *)(dst + off) += src */
694 		case BPF_STX | BPF_XADD | BPF_DW:
695 			PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
696 			tmp_idx = ctx->idx * 4;
697 			PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
698 			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
699 			PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
700 			PPC_BCC_SHORT(COND_NE, tmp_idx);
701 			break;
702 
703 		/*
704 		 * BPF_LDX
705 		 */
706 		/* dst = *(u8 *)(ul) (src + off) */
707 		case BPF_LDX | BPF_MEM | BPF_B:
708 			PPC_LBZ(dst_reg, src_reg, off);
709 			break;
710 		/* dst = *(u16 *)(ul) (src + off) */
711 		case BPF_LDX | BPF_MEM | BPF_H:
712 			PPC_LHZ(dst_reg, src_reg, off);
713 			break;
714 		/* dst = *(u32 *)(ul) (src + off) */
715 		case BPF_LDX | BPF_MEM | BPF_W:
716 			PPC_LWZ(dst_reg, src_reg, off);
717 			break;
718 		/* dst = *(u64 *)(ul) (src + off) */
719 		case BPF_LDX | BPF_MEM | BPF_DW:
720 			PPC_BPF_LL(dst_reg, src_reg, off);
721 			break;
722 
723 		/*
724 		 * Doubleword load
725 		 * 16 byte instruction that uses two 'struct bpf_insn'
726 		 */
727 		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
728 			imm64 = ((u64)(u32) insn[i].imm) |
729 				    (((u64)(u32) insn[i+1].imm) << 32);
730 			/* Adjust for two bpf instructions */
731 			addrs[++i] = ctx->idx * 4;
732 			PPC_LI64(dst_reg, imm64);
733 			break;
734 
735 		/*
736 		 * Return/Exit
737 		 */
738 		case BPF_JMP | BPF_EXIT:
739 			/*
740 			 * If this isn't the very last instruction, branch to
741 			 * the epilogue. If we _are_ the last instruction,
742 			 * we'll just fall through to the epilogue.
743 			 */
744 			if (i != flen - 1)
745 				PPC_JMP(exit_addr);
746 			/* else fall through to the epilogue */
747 			break;
748 
749 		/*
750 		 * Call kernel helper
751 		 */
752 		case BPF_JMP | BPF_CALL:
753 			ctx->seen |= SEEN_FUNC;
754 			func = (u8 *) __bpf_call_base + imm;
755 
756 			/* Save skb pointer if we need to re-cache skb data */
757 			if ((ctx->seen & SEEN_SKB) &&
758 			    bpf_helper_changes_pkt_data(func))
759 				PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
760 
761 			bpf_jit_emit_func_call(image, ctx, (u64)func);
762 
763 			/* move return value from r3 to BPF_REG_0 */
764 			PPC_MR(b2p[BPF_REG_0], 3);
765 
766 			/* refresh skb cache */
767 			if ((ctx->seen & SEEN_SKB) &&
768 			    bpf_helper_changes_pkt_data(func)) {
769 				/* reload skb pointer to r3 */
770 				PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
771 				bpf_jit_emit_skb_loads(image, ctx);
772 			}
773 			break;
774 
775 		/*
776 		 * Jumps and branches
777 		 */
778 		case BPF_JMP | BPF_JA:
779 			PPC_JMP(addrs[i + 1 + off]);
780 			break;
781 
782 		case BPF_JMP | BPF_JGT | BPF_K:
783 		case BPF_JMP | BPF_JGT | BPF_X:
784 		case BPF_JMP | BPF_JSGT | BPF_K:
785 		case BPF_JMP | BPF_JSGT | BPF_X:
786 			true_cond = COND_GT;
787 			goto cond_branch;
788 		case BPF_JMP | BPF_JLT | BPF_K:
789 		case BPF_JMP | BPF_JLT | BPF_X:
790 		case BPF_JMP | BPF_JSLT | BPF_K:
791 		case BPF_JMP | BPF_JSLT | BPF_X:
792 			true_cond = COND_LT;
793 			goto cond_branch;
794 		case BPF_JMP | BPF_JGE | BPF_K:
795 		case BPF_JMP | BPF_JGE | BPF_X:
796 		case BPF_JMP | BPF_JSGE | BPF_K:
797 		case BPF_JMP | BPF_JSGE | BPF_X:
798 			true_cond = COND_GE;
799 			goto cond_branch;
800 		case BPF_JMP | BPF_JLE | BPF_K:
801 		case BPF_JMP | BPF_JLE | BPF_X:
802 		case BPF_JMP | BPF_JSLE | BPF_K:
803 		case BPF_JMP | BPF_JSLE | BPF_X:
804 			true_cond = COND_LE;
805 			goto cond_branch;
806 		case BPF_JMP | BPF_JEQ | BPF_K:
807 		case BPF_JMP | BPF_JEQ | BPF_X:
808 			true_cond = COND_EQ;
809 			goto cond_branch;
810 		case BPF_JMP | BPF_JNE | BPF_K:
811 		case BPF_JMP | BPF_JNE | BPF_X:
812 			true_cond = COND_NE;
813 			goto cond_branch;
814 		case BPF_JMP | BPF_JSET | BPF_K:
815 		case BPF_JMP | BPF_JSET | BPF_X:
816 			true_cond = COND_NE;
817 			/* Fall through */
818 
819 cond_branch:
820 			switch (code) {
821 			case BPF_JMP | BPF_JGT | BPF_X:
822 			case BPF_JMP | BPF_JLT | BPF_X:
823 			case BPF_JMP | BPF_JGE | BPF_X:
824 			case BPF_JMP | BPF_JLE | BPF_X:
825 			case BPF_JMP | BPF_JEQ | BPF_X:
826 			case BPF_JMP | BPF_JNE | BPF_X:
827 				/* unsigned comparison */
828 				PPC_CMPLD(dst_reg, src_reg);
829 				break;
830 			case BPF_JMP | BPF_JSGT | BPF_X:
831 			case BPF_JMP | BPF_JSLT | BPF_X:
832 			case BPF_JMP | BPF_JSGE | BPF_X:
833 			case BPF_JMP | BPF_JSLE | BPF_X:
834 				/* signed comparison */
835 				PPC_CMPD(dst_reg, src_reg);
836 				break;
837 			case BPF_JMP | BPF_JSET | BPF_X:
838 				PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
839 				break;
840 			case BPF_JMP | BPF_JNE | BPF_K:
841 			case BPF_JMP | BPF_JEQ | BPF_K:
842 			case BPF_JMP | BPF_JGT | BPF_K:
843 			case BPF_JMP | BPF_JLT | BPF_K:
844 			case BPF_JMP | BPF_JGE | BPF_K:
845 			case BPF_JMP | BPF_JLE | BPF_K:
846 				/*
847 				 * Need sign-extended load, so only positive
848 				 * values can be used as imm in cmpldi
849 				 */
850 				if (imm >= 0 && imm < 32768)
851 					PPC_CMPLDI(dst_reg, imm);
852 				else {
853 					/* sign-extending load */
854 					PPC_LI32(b2p[TMP_REG_1], imm);
855 					/* ... but unsigned comparison */
856 					PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
857 				}
858 				break;
859 			case BPF_JMP | BPF_JSGT | BPF_K:
860 			case BPF_JMP | BPF_JSLT | BPF_K:
861 			case BPF_JMP | BPF_JSGE | BPF_K:
862 			case BPF_JMP | BPF_JSLE | BPF_K:
863 				/*
864 				 * signed comparison, so any 16-bit value
865 				 * can be used in cmpdi
866 				 */
867 				if (imm >= -32768 && imm < 32768)
868 					PPC_CMPDI(dst_reg, imm);
869 				else {
870 					PPC_LI32(b2p[TMP_REG_1], imm);
871 					PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
872 				}
873 				break;
874 			case BPF_JMP | BPF_JSET | BPF_K:
875 				/* andi does not sign-extend the immediate */
876 				if (imm >= 0 && imm < 32768)
877 					/* PPC_ANDI is _only/always_ dot-form */
878 					PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
879 				else {
880 					PPC_LI32(b2p[TMP_REG_1], imm);
881 					PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
882 						    b2p[TMP_REG_1]);
883 				}
884 				break;
885 			}
886 			PPC_BCC(true_cond, addrs[i + 1 + off]);
887 			break;
888 
889 		/*
890 		 * Loads from packet header/data
891 		 * Assume 32-bit input value in imm and X (src_reg)
892 		 */
893 
894 		/* Absolute loads */
895 		case BPF_LD | BPF_W | BPF_ABS:
896 			func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_word);
897 			goto common_load_abs;
898 		case BPF_LD | BPF_H | BPF_ABS:
899 			func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_half);
900 			goto common_load_abs;
901 		case BPF_LD | BPF_B | BPF_ABS:
902 			func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_byte);
903 common_load_abs:
904 			/*
905 			 * Load from [imm]
906 			 * Load into r4, which can just be passed onto
907 			 *  skb load helpers as the second parameter
908 			 */
909 			PPC_LI32(4, imm);
910 			goto common_load;
911 
912 		/* Indirect loads */
913 		case BPF_LD | BPF_W | BPF_IND:
914 			func = (u8 *)sk_load_word;
915 			goto common_load_ind;
916 		case BPF_LD | BPF_H | BPF_IND:
917 			func = (u8 *)sk_load_half;
918 			goto common_load_ind;
919 		case BPF_LD | BPF_B | BPF_IND:
920 			func = (u8 *)sk_load_byte;
921 common_load_ind:
922 			/*
923 			 * Load from [src_reg + imm]
924 			 * Treat src_reg as a 32-bit value
925 			 */
926 			PPC_EXTSW(4, src_reg);
927 			if (imm) {
928 				if (imm >= -32768 && imm < 32768)
929 					PPC_ADDI(4, 4, IMM_L(imm));
930 				else {
931 					PPC_LI32(b2p[TMP_REG_1], imm);
932 					PPC_ADD(4, 4, b2p[TMP_REG_1]);
933 				}
934 			}
935 
936 common_load:
937 			ctx->seen |= SEEN_SKB;
938 			ctx->seen |= SEEN_FUNC;
939 			bpf_jit_emit_func_call(image, ctx, (u64)func);
940 
941 			/*
942 			 * Helper returns 'lt' condition on error, and an
943 			 * appropriate return value in BPF_REG_0
944 			 */
945 			PPC_BCC(COND_LT, exit_addr);
946 			break;
947 
948 		/*
949 		 * Tail call
950 		 */
951 		case BPF_JMP | BPF_TAIL_CALL:
952 			ctx->seen |= SEEN_TAILCALL;
953 			bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
954 			break;
955 
956 		default:
957 			/*
958 			 * The filter contains something cruel & unusual.
959 			 * We don't handle it, but also there shouldn't be
960 			 * anything missing from our list.
961 			 */
962 			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
963 					code, i);
964 			return -ENOTSUPP;
965 		}
966 	}
967 
968 	/* Set end-of-body-code address for exit. */
969 	addrs[i] = ctx->idx * 4;
970 
971 	return 0;
972 }
973 
bpf_int_jit_compile(struct bpf_prog * fp)974 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
975 {
976 	u32 proglen;
977 	u32 alloclen;
978 	u8 *image = NULL;
979 	u32 *code_base;
980 	u32 *addrs;
981 	struct codegen_context cgctx;
982 	int pass;
983 	int flen;
984 	struct bpf_binary_header *bpf_hdr;
985 	struct bpf_prog *org_fp = fp;
986 	struct bpf_prog *tmp_fp;
987 	bool bpf_blinded = false;
988 
989 	if (!bpf_jit_enable)
990 		return org_fp;
991 
992 	tmp_fp = bpf_jit_blind_constants(org_fp);
993 	if (IS_ERR(tmp_fp))
994 		return org_fp;
995 
996 	if (tmp_fp != org_fp) {
997 		bpf_blinded = true;
998 		fp = tmp_fp;
999 	}
1000 
1001 	flen = fp->len;
1002 	addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
1003 	if (addrs == NULL) {
1004 		fp = org_fp;
1005 		goto out;
1006 	}
1007 
1008 	memset(&cgctx, 0, sizeof(struct codegen_context));
1009 
1010 	/* Scouting faux-generate pass 0 */
1011 	if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) {
1012 		/* We hit something illegal or unsupported. */
1013 		fp = org_fp;
1014 		goto out;
1015 	}
1016 
1017 	/*
1018 	 * Pretend to build prologue, given the features we've seen.  This will
1019 	 * update ctgtx.idx as it pretends to output instructions, then we can
1020 	 * calculate total size from idx.
1021 	 */
1022 	bpf_jit_build_prologue(0, &cgctx);
1023 	bpf_jit_build_epilogue(0, &cgctx);
1024 
1025 	proglen = cgctx.idx * 4;
1026 	alloclen = proglen + FUNCTION_DESCR_SIZE;
1027 
1028 	bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
1029 			bpf_jit_fill_ill_insns);
1030 	if (!bpf_hdr) {
1031 		fp = org_fp;
1032 		goto out;
1033 	}
1034 
1035 	code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
1036 
1037 	/* Code generation passes 1-2 */
1038 	for (pass = 1; pass < 3; pass++) {
1039 		/* Now build the prologue, body code & epilogue for real. */
1040 		cgctx.idx = 0;
1041 		bpf_jit_build_prologue(code_base, &cgctx);
1042 		bpf_jit_build_body(fp, code_base, &cgctx, addrs);
1043 		bpf_jit_build_epilogue(code_base, &cgctx);
1044 
1045 		if (bpf_jit_enable > 1)
1046 			pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
1047 				proglen - (cgctx.idx * 4), cgctx.seen);
1048 	}
1049 
1050 	if (bpf_jit_enable > 1)
1051 		/*
1052 		 * Note that we output the base address of the code_base
1053 		 * rather than image, since opcodes are in code_base.
1054 		 */
1055 		bpf_jit_dump(flen, proglen, pass, code_base);
1056 
1057 #ifdef PPC64_ELF_ABI_v1
1058 	/* Function descriptor nastiness: Address + TOC */
1059 	((u64 *)image)[0] = (u64)code_base;
1060 	((u64 *)image)[1] = local_paca->kernel_toc;
1061 #endif
1062 
1063 	fp->bpf_func = (void *)image;
1064 	fp->jited = 1;
1065 	fp->jited_len = alloclen;
1066 
1067 	bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
1068 
1069 out:
1070 	kfree(addrs);
1071 
1072 	if (bpf_blinded)
1073 		bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
1074 
1075 	return fp;
1076 }
1077 
1078 /* Overriding bpf_jit_free() as we don't set images read-only. */
bpf_jit_free(struct bpf_prog * fp)1079 void bpf_jit_free(struct bpf_prog *fp)
1080 {
1081 	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1082 	struct bpf_binary_header *bpf_hdr = (void *)addr;
1083 
1084 	if (fp->jited)
1085 		bpf_jit_binary_free(bpf_hdr);
1086 
1087 	bpf_prog_unlock_free(fp);
1088 }
1089