• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* bpf_jit_comp.c : BPF JIT compiler
2  *
3  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
4  * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; version 2
9  * of the License.
10  */
11 #include <linux/netdevice.h>
12 #include <linux/filter.h>
13 #include <linux/if_vlan.h>
14 #include <asm/cacheflush.h>
15 #include <asm/set_memory.h>
16 #include <asm/nospec-branch.h>
17 #include <linux/bpf.h>
18 
19 /*
20  * assembly code in arch/x86/net/bpf_jit.S
21  */
22 extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
23 extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
24 extern u8 sk_load_byte_positive_offset[];
25 extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
26 extern u8 sk_load_byte_negative_offset[];
27 
emit_code(u8 * ptr,u32 bytes,unsigned int len)28 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
29 {
30 	if (len == 1)
31 		*ptr = bytes;
32 	else if (len == 2)
33 		*(u16 *)ptr = bytes;
34 	else {
35 		*(u32 *)ptr = bytes;
36 		barrier();
37 	}
38 	return ptr + len;
39 }
40 
41 #define EMIT(bytes, len) \
42 	do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
43 
44 #define EMIT1(b1)		EMIT(b1, 1)
45 #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
46 #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
47 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
48 #define EMIT1_off32(b1, off) \
49 	do {EMIT1(b1); EMIT(off, 4); } while (0)
50 #define EMIT2_off32(b1, b2, off) \
51 	do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
52 #define EMIT3_off32(b1, b2, b3, off) \
53 	do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
54 #define EMIT4_off32(b1, b2, b3, b4, off) \
55 	do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
56 
is_imm8(int value)57 static bool is_imm8(int value)
58 {
59 	return value <= 127 && value >= -128;
60 }
61 
is_simm32(s64 value)62 static bool is_simm32(s64 value)
63 {
64 	return value == (s64) (s32) value;
65 }
66 
67 /* mov dst, src */
68 #define EMIT_mov(DST, SRC) \
69 	do {if (DST != SRC) \
70 		EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
71 	} while (0)
72 
bpf_size_to_x86_bytes(int bpf_size)73 static int bpf_size_to_x86_bytes(int bpf_size)
74 {
75 	if (bpf_size == BPF_W)
76 		return 4;
77 	else if (bpf_size == BPF_H)
78 		return 2;
79 	else if (bpf_size == BPF_B)
80 		return 1;
81 	else if (bpf_size == BPF_DW)
82 		return 4; /* imm32 */
83 	else
84 		return 0;
85 }
86 
87 /* list of x86 cond jumps opcodes (. + s8)
88  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
89  */
90 #define X86_JB  0x72
91 #define X86_JAE 0x73
92 #define X86_JE  0x74
93 #define X86_JNE 0x75
94 #define X86_JBE 0x76
95 #define X86_JA  0x77
96 #define X86_JL  0x7C
97 #define X86_JGE 0x7D
98 #define X86_JLE 0x7E
99 #define X86_JG  0x7F
100 
bpf_flush_icache(void * start,void * end)101 static void bpf_flush_icache(void *start, void *end)
102 {
103 	mm_segment_t old_fs = get_fs();
104 
105 	set_fs(KERNEL_DS);
106 	smp_wmb();
107 	flush_icache_range((unsigned long)start, (unsigned long)end);
108 	set_fs(old_fs);
109 }
110 
111 #define CHOOSE_LOAD_FUNC(K, func) \
112 	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
113 
114 /* pick a register outside of BPF range for JIT internal work */
115 #define AUX_REG (MAX_BPF_JIT_REG + 1)
116 
117 /* The following table maps BPF registers to x64 registers.
118  *
119  * x64 register r12 is unused, since if used as base address
120  * register in load/store instructions, it always needs an
121  * extra byte of encoding and is callee saved.
122  *
123  *  r9 caches skb->len - skb->data_len
124  * r10 caches skb->data, and used for blinding (if enabled)
125  */
126 static const int reg2hex[] = {
127 	[BPF_REG_0] = 0,  /* rax */
128 	[BPF_REG_1] = 7,  /* rdi */
129 	[BPF_REG_2] = 6,  /* rsi */
130 	[BPF_REG_3] = 2,  /* rdx */
131 	[BPF_REG_4] = 1,  /* rcx */
132 	[BPF_REG_5] = 0,  /* r8 */
133 	[BPF_REG_6] = 3,  /* rbx callee saved */
134 	[BPF_REG_7] = 5,  /* r13 callee saved */
135 	[BPF_REG_8] = 6,  /* r14 callee saved */
136 	[BPF_REG_9] = 7,  /* r15 callee saved */
137 	[BPF_REG_FP] = 5, /* rbp readonly */
138 	[BPF_REG_AX] = 2, /* r10 temp register */
139 	[AUX_REG] = 3,    /* r11 temp register */
140 };
141 
142 /* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
143  * which need extra byte of encoding.
144  * rax,rcx,...,rbp have simpler encoding
145  */
is_ereg(u32 reg)146 static bool is_ereg(u32 reg)
147 {
148 	return (1 << reg) & (BIT(BPF_REG_5) |
149 			     BIT(AUX_REG) |
150 			     BIT(BPF_REG_7) |
151 			     BIT(BPF_REG_8) |
152 			     BIT(BPF_REG_9) |
153 			     BIT(BPF_REG_AX));
154 }
155 
156 /* add modifiers if 'reg' maps to x64 registers r8..r15 */
add_1mod(u8 byte,u32 reg)157 static u8 add_1mod(u8 byte, u32 reg)
158 {
159 	if (is_ereg(reg))
160 		byte |= 1;
161 	return byte;
162 }
163 
add_2mod(u8 byte,u32 r1,u32 r2)164 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
165 {
166 	if (is_ereg(r1))
167 		byte |= 1;
168 	if (is_ereg(r2))
169 		byte |= 4;
170 	return byte;
171 }
172 
173 /* encode 'dst_reg' register into x64 opcode 'byte' */
add_1reg(u8 byte,u32 dst_reg)174 static u8 add_1reg(u8 byte, u32 dst_reg)
175 {
176 	return byte + reg2hex[dst_reg];
177 }
178 
179 /* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
add_2reg(u8 byte,u32 dst_reg,u32 src_reg)180 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
181 {
182 	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
183 }
184 
jit_fill_hole(void * area,unsigned int size)185 static void jit_fill_hole(void *area, unsigned int size)
186 {
187 	/* fill whole space with int3 instructions */
188 	memset(area, 0xcc, size);
189 }
190 
191 struct jit_context {
192 	int cleanup_addr; /* epilogue code offset */
193 	bool seen_ld_abs;
194 	bool seen_ax_reg;
195 };
196 
197 /* maximum number of bytes emitted while JITing one eBPF insn */
198 #define BPF_MAX_INSN_SIZE	128
199 #define BPF_INSN_SAFETY		64
200 
201 #define AUX_STACK_SPACE \
202 	(32 /* space for rbx, r13, r14, r15 */ + \
203 	 8 /* space for skb_copy_bits() buffer */)
204 
205 #define PROLOGUE_SIZE 37
206 
207 /* emit x64 prologue code for BPF program and check it's size.
208  * bpf_tail_call helper will skip it while jumping into another program
209  */
emit_prologue(u8 ** pprog,u32 stack_depth)210 static void emit_prologue(u8 **pprog, u32 stack_depth)
211 {
212 	u8 *prog = *pprog;
213 	int cnt = 0;
214 
215 	EMIT1(0x55); /* push rbp */
216 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
217 
218 	/* sub rsp, rounded_stack_depth + AUX_STACK_SPACE */
219 	EMIT3_off32(0x48, 0x81, 0xEC,
220 		    round_up(stack_depth, 8) + AUX_STACK_SPACE);
221 
222 	/* sub rbp, AUX_STACK_SPACE */
223 	EMIT4(0x48, 0x83, 0xED, AUX_STACK_SPACE);
224 
225 	/* all classic BPF filters use R6(rbx) save it */
226 
227 	/* mov qword ptr [rbp+0],rbx */
228 	EMIT4(0x48, 0x89, 0x5D, 0);
229 
230 	/* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
231 	 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
232 	 * R8(r14). R9(r15) spill could be made conditional, but there is only
233 	 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
234 	 * The overhead of extra spill is negligible for any filter other
235 	 * than synthetic ones. Therefore not worth adding complexity.
236 	 */
237 
238 	/* mov qword ptr [rbp+8],r13 */
239 	EMIT4(0x4C, 0x89, 0x6D, 8);
240 	/* mov qword ptr [rbp+16],r14 */
241 	EMIT4(0x4C, 0x89, 0x75, 16);
242 	/* mov qword ptr [rbp+24],r15 */
243 	EMIT4(0x4C, 0x89, 0x7D, 24);
244 
245 	/* Clear the tail call counter (tail_call_cnt): for eBPF tail calls
246 	 * we need to reset the counter to 0. It's done in two instructions,
247 	 * resetting rax register to 0 (xor on eax gets 0 extended), and
248 	 * moving it to the counter location.
249 	 */
250 
251 	/* xor eax, eax */
252 	EMIT2(0x31, 0xc0);
253 	/* mov qword ptr [rbp+32], rax */
254 	EMIT4(0x48, 0x89, 0x45, 32);
255 
256 	BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
257 	*pprog = prog;
258 }
259 
260 /* generate the following code:
261  * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
262  *   if (index >= array->map.max_entries)
263  *     goto out;
264  *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
265  *     goto out;
266  *   prog = array->ptrs[index];
267  *   if (prog == NULL)
268  *     goto out;
269  *   goto *(prog->bpf_func + prologue_size);
270  * out:
271  */
emit_bpf_tail_call(u8 ** pprog)272 static void emit_bpf_tail_call(u8 **pprog)
273 {
274 	u8 *prog = *pprog;
275 	int label1, label2, label3;
276 	int cnt = 0;
277 
278 	/* rdi - pointer to ctx
279 	 * rsi - pointer to bpf_array
280 	 * rdx - index in bpf_array
281 	 */
282 
283 	/* if (index >= array->map.max_entries)
284 	 *   goto out;
285 	 */
286 	EMIT2(0x89, 0xD2);                        /* mov edx, edx */
287 	EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
288 	      offsetof(struct bpf_array, map.max_entries));
289 #define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */
290 	EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
291 	label1 = cnt;
292 
293 	/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
294 	 *   goto out;
295 	 */
296 	EMIT2_off32(0x8B, 0x85, 36);              /* mov eax, dword ptr [rbp + 36] */
297 	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
298 #define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
299 	EMIT2(X86_JA, OFFSET2);                   /* ja out */
300 	label2 = cnt;
301 	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
302 	EMIT2_off32(0x89, 0x85, 36);              /* mov dword ptr [rbp + 36], eax */
303 
304 	/* prog = array->ptrs[index]; */
305 	EMIT4_off32(0x48, 0x8B, 0x84, 0xD6,       /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
306 		    offsetof(struct bpf_array, ptrs));
307 
308 	/* if (prog == NULL)
309 	 *   goto out;
310 	 */
311 	EMIT3(0x48, 0x85, 0xC0);		  /* test rax,rax */
312 #define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
313 	EMIT2(X86_JE, OFFSET3);                   /* je out */
314 	label3 = cnt;
315 
316 	/* goto *(prog->bpf_func + prologue_size); */
317 	EMIT4(0x48, 0x8B, 0x40,                   /* mov rax, qword ptr [rax + 32] */
318 	      offsetof(struct bpf_prog, bpf_func));
319 	EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE);   /* add rax, prologue_size */
320 
321 	/* now we're ready to jump into next BPF program
322 	 * rdi == ctx (1st arg)
323 	 * rax == prog->bpf_func + prologue_size
324 	 */
325 	RETPOLINE_RAX_BPF_JIT();
326 
327 	/* out: */
328 	BUILD_BUG_ON(cnt - label1 != OFFSET1);
329 	BUILD_BUG_ON(cnt - label2 != OFFSET2);
330 	BUILD_BUG_ON(cnt - label3 != OFFSET3);
331 	*pprog = prog;
332 }
333 
334 
emit_load_skb_data_hlen(u8 ** pprog)335 static void emit_load_skb_data_hlen(u8 **pprog)
336 {
337 	u8 *prog = *pprog;
338 	int cnt = 0;
339 
340 	/* r9d = skb->len - skb->data_len (headlen)
341 	 * r10 = skb->data
342 	 */
343 	/* mov %r9d, off32(%rdi) */
344 	EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len));
345 
346 	/* sub %r9d, off32(%rdi) */
347 	EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len));
348 
349 	/* mov %r10, off32(%rdi) */
350 	EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data));
351 	*pprog = prog;
352 }
353 
do_jit(struct bpf_prog * bpf_prog,int * addrs,u8 * image,int oldproglen,struct jit_context * ctx)354 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
355 		  int oldproglen, struct jit_context *ctx)
356 {
357 	struct bpf_insn *insn = bpf_prog->insnsi;
358 	int insn_cnt = bpf_prog->len;
359 	bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
360 	bool seen_ax_reg = ctx->seen_ax_reg | (oldproglen == 0);
361 	bool seen_exit = false;
362 	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
363 	int i, cnt = 0;
364 	int proglen = 0;
365 	u8 *prog = temp;
366 
367 	emit_prologue(&prog, bpf_prog->aux->stack_depth);
368 
369 	if (seen_ld_abs)
370 		emit_load_skb_data_hlen(&prog);
371 
372 	for (i = 0; i < insn_cnt; i++, insn++) {
373 		const s32 imm32 = insn->imm;
374 		u32 dst_reg = insn->dst_reg;
375 		u32 src_reg = insn->src_reg;
376 		u8 b1 = 0, b2 = 0, b3 = 0;
377 		s64 jmp_offset;
378 		u8 jmp_cond;
379 		bool reload_skb_data;
380 		int ilen;
381 		u8 *func;
382 
383 		if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
384 			ctx->seen_ax_reg = seen_ax_reg = true;
385 
386 		switch (insn->code) {
387 			/* ALU */
388 		case BPF_ALU | BPF_ADD | BPF_X:
389 		case BPF_ALU | BPF_SUB | BPF_X:
390 		case BPF_ALU | BPF_AND | BPF_X:
391 		case BPF_ALU | BPF_OR | BPF_X:
392 		case BPF_ALU | BPF_XOR | BPF_X:
393 		case BPF_ALU64 | BPF_ADD | BPF_X:
394 		case BPF_ALU64 | BPF_SUB | BPF_X:
395 		case BPF_ALU64 | BPF_AND | BPF_X:
396 		case BPF_ALU64 | BPF_OR | BPF_X:
397 		case BPF_ALU64 | BPF_XOR | BPF_X:
398 			switch (BPF_OP(insn->code)) {
399 			case BPF_ADD: b2 = 0x01; break;
400 			case BPF_SUB: b2 = 0x29; break;
401 			case BPF_AND: b2 = 0x21; break;
402 			case BPF_OR: b2 = 0x09; break;
403 			case BPF_XOR: b2 = 0x31; break;
404 			}
405 			if (BPF_CLASS(insn->code) == BPF_ALU64)
406 				EMIT1(add_2mod(0x48, dst_reg, src_reg));
407 			else if (is_ereg(dst_reg) || is_ereg(src_reg))
408 				EMIT1(add_2mod(0x40, dst_reg, src_reg));
409 			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
410 			break;
411 
412 			/* mov dst, src */
413 		case BPF_ALU64 | BPF_MOV | BPF_X:
414 			EMIT_mov(dst_reg, src_reg);
415 			break;
416 
417 			/* mov32 dst, src */
418 		case BPF_ALU | BPF_MOV | BPF_X:
419 			if (is_ereg(dst_reg) || is_ereg(src_reg))
420 				EMIT1(add_2mod(0x40, dst_reg, src_reg));
421 			EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
422 			break;
423 
424 			/* neg dst */
425 		case BPF_ALU | BPF_NEG:
426 		case BPF_ALU64 | BPF_NEG:
427 			if (BPF_CLASS(insn->code) == BPF_ALU64)
428 				EMIT1(add_1mod(0x48, dst_reg));
429 			else if (is_ereg(dst_reg))
430 				EMIT1(add_1mod(0x40, dst_reg));
431 			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
432 			break;
433 
434 		case BPF_ALU | BPF_ADD | BPF_K:
435 		case BPF_ALU | BPF_SUB | BPF_K:
436 		case BPF_ALU | BPF_AND | BPF_K:
437 		case BPF_ALU | BPF_OR | BPF_K:
438 		case BPF_ALU | BPF_XOR | BPF_K:
439 		case BPF_ALU64 | BPF_ADD | BPF_K:
440 		case BPF_ALU64 | BPF_SUB | BPF_K:
441 		case BPF_ALU64 | BPF_AND | BPF_K:
442 		case BPF_ALU64 | BPF_OR | BPF_K:
443 		case BPF_ALU64 | BPF_XOR | BPF_K:
444 			if (BPF_CLASS(insn->code) == BPF_ALU64)
445 				EMIT1(add_1mod(0x48, dst_reg));
446 			else if (is_ereg(dst_reg))
447 				EMIT1(add_1mod(0x40, dst_reg));
448 
449 			switch (BPF_OP(insn->code)) {
450 			case BPF_ADD: b3 = 0xC0; break;
451 			case BPF_SUB: b3 = 0xE8; break;
452 			case BPF_AND: b3 = 0xE0; break;
453 			case BPF_OR: b3 = 0xC8; break;
454 			case BPF_XOR: b3 = 0xF0; break;
455 			}
456 
457 			if (is_imm8(imm32))
458 				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
459 			else
460 				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
461 			break;
462 
463 		case BPF_ALU64 | BPF_MOV | BPF_K:
464 			/* optimization: if imm32 is positive,
465 			 * use 'mov eax, imm32' (which zero-extends imm32)
466 			 * to save 2 bytes
467 			 */
468 			if (imm32 < 0) {
469 				/* 'mov rax, imm32' sign extends imm32 */
470 				b1 = add_1mod(0x48, dst_reg);
471 				b2 = 0xC7;
472 				b3 = 0xC0;
473 				EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
474 				break;
475 			}
476 
477 		case BPF_ALU | BPF_MOV | BPF_K:
478 			/* optimization: if imm32 is zero, use 'xor <dst>,<dst>'
479 			 * to save 3 bytes.
480 			 */
481 			if (imm32 == 0) {
482 				if (is_ereg(dst_reg))
483 					EMIT1(add_2mod(0x40, dst_reg, dst_reg));
484 				b2 = 0x31; /* xor */
485 				b3 = 0xC0;
486 				EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
487 				break;
488 			}
489 
490 			/* mov %eax, imm32 */
491 			if (is_ereg(dst_reg))
492 				EMIT1(add_1mod(0x40, dst_reg));
493 			EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
494 			break;
495 
496 		case BPF_LD | BPF_IMM | BPF_DW:
497 			/* optimization: if imm64 is zero, use 'xor <dst>,<dst>'
498 			 * to save 7 bytes.
499 			 */
500 			if (insn[0].imm == 0 && insn[1].imm == 0) {
501 				b1 = add_2mod(0x48, dst_reg, dst_reg);
502 				b2 = 0x31; /* xor */
503 				b3 = 0xC0;
504 				EMIT3(b1, b2, add_2reg(b3, dst_reg, dst_reg));
505 
506 				insn++;
507 				i++;
508 				break;
509 			}
510 
511 			/* movabsq %rax, imm64 */
512 			EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
513 			EMIT(insn[0].imm, 4);
514 			EMIT(insn[1].imm, 4);
515 
516 			insn++;
517 			i++;
518 			break;
519 
520 			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
521 		case BPF_ALU | BPF_MOD | BPF_X:
522 		case BPF_ALU | BPF_DIV | BPF_X:
523 		case BPF_ALU | BPF_MOD | BPF_K:
524 		case BPF_ALU | BPF_DIV | BPF_K:
525 		case BPF_ALU64 | BPF_MOD | BPF_X:
526 		case BPF_ALU64 | BPF_DIV | BPF_X:
527 		case BPF_ALU64 | BPF_MOD | BPF_K:
528 		case BPF_ALU64 | BPF_DIV | BPF_K:
529 			EMIT1(0x50); /* push rax */
530 			EMIT1(0x52); /* push rdx */
531 
532 			if (BPF_SRC(insn->code) == BPF_X)
533 				/* mov r11, src_reg */
534 				EMIT_mov(AUX_REG, src_reg);
535 			else
536 				/* mov r11, imm32 */
537 				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
538 
539 			/* mov rax, dst_reg */
540 			EMIT_mov(BPF_REG_0, dst_reg);
541 
542 			/* xor edx, edx
543 			 * equivalent to 'xor rdx, rdx', but one byte less
544 			 */
545 			EMIT2(0x31, 0xd2);
546 
547 			if (BPF_SRC(insn->code) == BPF_X) {
548 				/* if (src_reg == 0) return 0 */
549 
550 				/* cmp r11, 0 */
551 				EMIT4(0x49, 0x83, 0xFB, 0x00);
552 
553 				/* jne .+9 (skip over pop, pop, xor and jmp) */
554 				EMIT2(X86_JNE, 1 + 1 + 2 + 5);
555 				EMIT1(0x5A); /* pop rdx */
556 				EMIT1(0x58); /* pop rax */
557 				EMIT2(0x31, 0xc0); /* xor eax, eax */
558 
559 				/* jmp cleanup_addr
560 				 * addrs[i] - 11, because there are 11 bytes
561 				 * after this insn: div, mov, pop, pop, mov
562 				 */
563 				jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
564 				EMIT1_off32(0xE9, jmp_offset);
565 			}
566 
567 			if (BPF_CLASS(insn->code) == BPF_ALU64)
568 				/* div r11 */
569 				EMIT3(0x49, 0xF7, 0xF3);
570 			else
571 				/* div r11d */
572 				EMIT3(0x41, 0xF7, 0xF3);
573 
574 			if (BPF_OP(insn->code) == BPF_MOD)
575 				/* mov r11, rdx */
576 				EMIT3(0x49, 0x89, 0xD3);
577 			else
578 				/* mov r11, rax */
579 				EMIT3(0x49, 0x89, 0xC3);
580 
581 			EMIT1(0x5A); /* pop rdx */
582 			EMIT1(0x58); /* pop rax */
583 
584 			/* mov dst_reg, r11 */
585 			EMIT_mov(dst_reg, AUX_REG);
586 			break;
587 
588 		case BPF_ALU | BPF_MUL | BPF_K:
589 		case BPF_ALU | BPF_MUL | BPF_X:
590 		case BPF_ALU64 | BPF_MUL | BPF_K:
591 		case BPF_ALU64 | BPF_MUL | BPF_X:
592 			EMIT1(0x50); /* push rax */
593 			EMIT1(0x52); /* push rdx */
594 
595 			/* mov r11, dst_reg */
596 			EMIT_mov(AUX_REG, dst_reg);
597 
598 			if (BPF_SRC(insn->code) == BPF_X)
599 				/* mov rax, src_reg */
600 				EMIT_mov(BPF_REG_0, src_reg);
601 			else
602 				/* mov rax, imm32 */
603 				EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
604 
605 			if (BPF_CLASS(insn->code) == BPF_ALU64)
606 				EMIT1(add_1mod(0x48, AUX_REG));
607 			else if (is_ereg(AUX_REG))
608 				EMIT1(add_1mod(0x40, AUX_REG));
609 			/* mul(q) r11 */
610 			EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
611 
612 			/* mov r11, rax */
613 			EMIT_mov(AUX_REG, BPF_REG_0);
614 
615 			EMIT1(0x5A); /* pop rdx */
616 			EMIT1(0x58); /* pop rax */
617 
618 			/* mov dst_reg, r11 */
619 			EMIT_mov(dst_reg, AUX_REG);
620 			break;
621 
622 			/* shifts */
623 		case BPF_ALU | BPF_LSH | BPF_K:
624 		case BPF_ALU | BPF_RSH | BPF_K:
625 		case BPF_ALU | BPF_ARSH | BPF_K:
626 		case BPF_ALU64 | BPF_LSH | BPF_K:
627 		case BPF_ALU64 | BPF_RSH | BPF_K:
628 		case BPF_ALU64 | BPF_ARSH | BPF_K:
629 			if (BPF_CLASS(insn->code) == BPF_ALU64)
630 				EMIT1(add_1mod(0x48, dst_reg));
631 			else if (is_ereg(dst_reg))
632 				EMIT1(add_1mod(0x40, dst_reg));
633 
634 			switch (BPF_OP(insn->code)) {
635 			case BPF_LSH: b3 = 0xE0; break;
636 			case BPF_RSH: b3 = 0xE8; break;
637 			case BPF_ARSH: b3 = 0xF8; break;
638 			}
639 			EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
640 			break;
641 
642 		case BPF_ALU | BPF_LSH | BPF_X:
643 		case BPF_ALU | BPF_RSH | BPF_X:
644 		case BPF_ALU | BPF_ARSH | BPF_X:
645 		case BPF_ALU64 | BPF_LSH | BPF_X:
646 		case BPF_ALU64 | BPF_RSH | BPF_X:
647 		case BPF_ALU64 | BPF_ARSH | BPF_X:
648 
649 			/* check for bad case when dst_reg == rcx */
650 			if (dst_reg == BPF_REG_4) {
651 				/* mov r11, dst_reg */
652 				EMIT_mov(AUX_REG, dst_reg);
653 				dst_reg = AUX_REG;
654 			}
655 
656 			if (src_reg != BPF_REG_4) { /* common case */
657 				EMIT1(0x51); /* push rcx */
658 
659 				/* mov rcx, src_reg */
660 				EMIT_mov(BPF_REG_4, src_reg);
661 			}
662 
663 			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
664 			if (BPF_CLASS(insn->code) == BPF_ALU64)
665 				EMIT1(add_1mod(0x48, dst_reg));
666 			else if (is_ereg(dst_reg))
667 				EMIT1(add_1mod(0x40, dst_reg));
668 
669 			switch (BPF_OP(insn->code)) {
670 			case BPF_LSH: b3 = 0xE0; break;
671 			case BPF_RSH: b3 = 0xE8; break;
672 			case BPF_ARSH: b3 = 0xF8; break;
673 			}
674 			EMIT2(0xD3, add_1reg(b3, dst_reg));
675 
676 			if (src_reg != BPF_REG_4)
677 				EMIT1(0x59); /* pop rcx */
678 
679 			if (insn->dst_reg == BPF_REG_4)
680 				/* mov dst_reg, r11 */
681 				EMIT_mov(insn->dst_reg, AUX_REG);
682 			break;
683 
684 		case BPF_ALU | BPF_END | BPF_FROM_BE:
685 			switch (imm32) {
686 			case 16:
687 				/* emit 'ror %ax, 8' to swap lower 2 bytes */
688 				EMIT1(0x66);
689 				if (is_ereg(dst_reg))
690 					EMIT1(0x41);
691 				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
692 
693 				/* emit 'movzwl eax, ax' */
694 				if (is_ereg(dst_reg))
695 					EMIT3(0x45, 0x0F, 0xB7);
696 				else
697 					EMIT2(0x0F, 0xB7);
698 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
699 				break;
700 			case 32:
701 				/* emit 'bswap eax' to swap lower 4 bytes */
702 				if (is_ereg(dst_reg))
703 					EMIT2(0x41, 0x0F);
704 				else
705 					EMIT1(0x0F);
706 				EMIT1(add_1reg(0xC8, dst_reg));
707 				break;
708 			case 64:
709 				/* emit 'bswap rax' to swap 8 bytes */
710 				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
711 				      add_1reg(0xC8, dst_reg));
712 				break;
713 			}
714 			break;
715 
716 		case BPF_ALU | BPF_END | BPF_FROM_LE:
717 			switch (imm32) {
718 			case 16:
719 				/* emit 'movzwl eax, ax' to zero extend 16-bit
720 				 * into 64 bit
721 				 */
722 				if (is_ereg(dst_reg))
723 					EMIT3(0x45, 0x0F, 0xB7);
724 				else
725 					EMIT2(0x0F, 0xB7);
726 				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
727 				break;
728 			case 32:
729 				/* emit 'mov eax, eax' to clear upper 32-bits */
730 				if (is_ereg(dst_reg))
731 					EMIT1(0x45);
732 				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
733 				break;
734 			case 64:
735 				/* nop */
736 				break;
737 			}
738 			break;
739 
740 			/* ST: *(u8*)(dst_reg + off) = imm */
741 		case BPF_ST | BPF_MEM | BPF_B:
742 			if (is_ereg(dst_reg))
743 				EMIT2(0x41, 0xC6);
744 			else
745 				EMIT1(0xC6);
746 			goto st;
747 		case BPF_ST | BPF_MEM | BPF_H:
748 			if (is_ereg(dst_reg))
749 				EMIT3(0x66, 0x41, 0xC7);
750 			else
751 				EMIT2(0x66, 0xC7);
752 			goto st;
753 		case BPF_ST | BPF_MEM | BPF_W:
754 			if (is_ereg(dst_reg))
755 				EMIT2(0x41, 0xC7);
756 			else
757 				EMIT1(0xC7);
758 			goto st;
759 		case BPF_ST | BPF_MEM | BPF_DW:
760 			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
761 
762 st:			if (is_imm8(insn->off))
763 				EMIT2(add_1reg(0x40, dst_reg), insn->off);
764 			else
765 				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
766 
767 			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
768 			break;
769 
770 			/* STX: *(u8*)(dst_reg + off) = src_reg */
771 		case BPF_STX | BPF_MEM | BPF_B:
772 			/* emit 'mov byte ptr [rax + off], al' */
773 			if (is_ereg(dst_reg) || is_ereg(src_reg) ||
774 			    /* have to add extra byte for x86 SIL, DIL regs */
775 			    src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
776 				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
777 			else
778 				EMIT1(0x88);
779 			goto stx;
780 		case BPF_STX | BPF_MEM | BPF_H:
781 			if (is_ereg(dst_reg) || is_ereg(src_reg))
782 				EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
783 			else
784 				EMIT2(0x66, 0x89);
785 			goto stx;
786 		case BPF_STX | BPF_MEM | BPF_W:
787 			if (is_ereg(dst_reg) || is_ereg(src_reg))
788 				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
789 			else
790 				EMIT1(0x89);
791 			goto stx;
792 		case BPF_STX | BPF_MEM | BPF_DW:
793 			EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
794 stx:			if (is_imm8(insn->off))
795 				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
796 			else
797 				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
798 					    insn->off);
799 			break;
800 
801 			/* LDX: dst_reg = *(u8*)(src_reg + off) */
802 		case BPF_LDX | BPF_MEM | BPF_B:
803 			/* emit 'movzx rax, byte ptr [rax + off]' */
804 			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
805 			goto ldx;
806 		case BPF_LDX | BPF_MEM | BPF_H:
807 			/* emit 'movzx rax, word ptr [rax + off]' */
808 			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
809 			goto ldx;
810 		case BPF_LDX | BPF_MEM | BPF_W:
811 			/* emit 'mov eax, dword ptr [rax+0x14]' */
812 			if (is_ereg(dst_reg) || is_ereg(src_reg))
813 				EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
814 			else
815 				EMIT1(0x8B);
816 			goto ldx;
817 		case BPF_LDX | BPF_MEM | BPF_DW:
818 			/* emit 'mov rax, qword ptr [rax+0x14]' */
819 			EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
820 ldx:			/* if insn->off == 0 we can save one extra byte, but
821 			 * special case of x86 r13 which always needs an offset
822 			 * is not worth the hassle
823 			 */
824 			if (is_imm8(insn->off))
825 				EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
826 			else
827 				EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
828 					    insn->off);
829 			break;
830 
831 			/* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
832 		case BPF_STX | BPF_XADD | BPF_W:
833 			/* emit 'lock add dword ptr [rax + off], eax' */
834 			if (is_ereg(dst_reg) || is_ereg(src_reg))
835 				EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
836 			else
837 				EMIT2(0xF0, 0x01);
838 			goto xadd;
839 		case BPF_STX | BPF_XADD | BPF_DW:
840 			EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
841 xadd:			if (is_imm8(insn->off))
842 				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
843 			else
844 				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
845 					    insn->off);
846 			break;
847 
848 			/* call */
849 		case BPF_JMP | BPF_CALL:
850 			func = (u8 *) __bpf_call_base + imm32;
851 			jmp_offset = func - (image + addrs[i]);
852 			if (seen_ld_abs) {
853 				reload_skb_data = bpf_helper_changes_pkt_data(func);
854 				if (reload_skb_data) {
855 					EMIT1(0x57); /* push %rdi */
856 					jmp_offset += 22; /* pop, mov, sub, mov */
857 				} else {
858 					EMIT2(0x41, 0x52); /* push %r10 */
859 					EMIT2(0x41, 0x51); /* push %r9 */
860 					/* need to adjust jmp offset, since
861 					 * pop %r9, pop %r10 take 4 bytes after call insn
862 					 */
863 					jmp_offset += 4;
864 				}
865 			}
866 			if (!imm32 || !is_simm32(jmp_offset)) {
867 				pr_err("unsupported bpf func %d addr %p image %p\n",
868 				       imm32, func, image);
869 				return -EINVAL;
870 			}
871 			EMIT1_off32(0xE8, jmp_offset);
872 			if (seen_ld_abs) {
873 				if (reload_skb_data) {
874 					EMIT1(0x5F); /* pop %rdi */
875 					emit_load_skb_data_hlen(&prog);
876 				} else {
877 					EMIT2(0x41, 0x59); /* pop %r9 */
878 					EMIT2(0x41, 0x5A); /* pop %r10 */
879 				}
880 			}
881 			break;
882 
883 		case BPF_JMP | BPF_TAIL_CALL:
884 			emit_bpf_tail_call(&prog);
885 			break;
886 
887 			/* cond jump */
888 		case BPF_JMP | BPF_JEQ | BPF_X:
889 		case BPF_JMP | BPF_JNE | BPF_X:
890 		case BPF_JMP | BPF_JGT | BPF_X:
891 		case BPF_JMP | BPF_JLT | BPF_X:
892 		case BPF_JMP | BPF_JGE | BPF_X:
893 		case BPF_JMP | BPF_JLE | BPF_X:
894 		case BPF_JMP | BPF_JSGT | BPF_X:
895 		case BPF_JMP | BPF_JSLT | BPF_X:
896 		case BPF_JMP | BPF_JSGE | BPF_X:
897 		case BPF_JMP | BPF_JSLE | BPF_X:
898 			/* cmp dst_reg, src_reg */
899 			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
900 			      add_2reg(0xC0, dst_reg, src_reg));
901 			goto emit_cond_jmp;
902 
903 		case BPF_JMP | BPF_JSET | BPF_X:
904 			/* test dst_reg, src_reg */
905 			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
906 			      add_2reg(0xC0, dst_reg, src_reg));
907 			goto emit_cond_jmp;
908 
909 		case BPF_JMP | BPF_JSET | BPF_K:
910 			/* test dst_reg, imm32 */
911 			EMIT1(add_1mod(0x48, dst_reg));
912 			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
913 			goto emit_cond_jmp;
914 
915 		case BPF_JMP | BPF_JEQ | BPF_K:
916 		case BPF_JMP | BPF_JNE | BPF_K:
917 		case BPF_JMP | BPF_JGT | BPF_K:
918 		case BPF_JMP | BPF_JLT | BPF_K:
919 		case BPF_JMP | BPF_JGE | BPF_K:
920 		case BPF_JMP | BPF_JLE | BPF_K:
921 		case BPF_JMP | BPF_JSGT | BPF_K:
922 		case BPF_JMP | BPF_JSLT | BPF_K:
923 		case BPF_JMP | BPF_JSGE | BPF_K:
924 		case BPF_JMP | BPF_JSLE | BPF_K:
925 			/* cmp dst_reg, imm8/32 */
926 			EMIT1(add_1mod(0x48, dst_reg));
927 
928 			if (is_imm8(imm32))
929 				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
930 			else
931 				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
932 
933 emit_cond_jmp:		/* convert BPF opcode to x86 */
934 			switch (BPF_OP(insn->code)) {
935 			case BPF_JEQ:
936 				jmp_cond = X86_JE;
937 				break;
938 			case BPF_JSET:
939 			case BPF_JNE:
940 				jmp_cond = X86_JNE;
941 				break;
942 			case BPF_JGT:
943 				/* GT is unsigned '>', JA in x86 */
944 				jmp_cond = X86_JA;
945 				break;
946 			case BPF_JLT:
947 				/* LT is unsigned '<', JB in x86 */
948 				jmp_cond = X86_JB;
949 				break;
950 			case BPF_JGE:
951 				/* GE is unsigned '>=', JAE in x86 */
952 				jmp_cond = X86_JAE;
953 				break;
954 			case BPF_JLE:
955 				/* LE is unsigned '<=', JBE in x86 */
956 				jmp_cond = X86_JBE;
957 				break;
958 			case BPF_JSGT:
959 				/* signed '>', GT in x86 */
960 				jmp_cond = X86_JG;
961 				break;
962 			case BPF_JSLT:
963 				/* signed '<', LT in x86 */
964 				jmp_cond = X86_JL;
965 				break;
966 			case BPF_JSGE:
967 				/* signed '>=', GE in x86 */
968 				jmp_cond = X86_JGE;
969 				break;
970 			case BPF_JSLE:
971 				/* signed '<=', LE in x86 */
972 				jmp_cond = X86_JLE;
973 				break;
974 			default: /* to silence gcc warning */
975 				return -EFAULT;
976 			}
977 			jmp_offset = addrs[i + insn->off] - addrs[i];
978 			if (is_imm8(jmp_offset)) {
979 				EMIT2(jmp_cond, jmp_offset);
980 			} else if (is_simm32(jmp_offset)) {
981 				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
982 			} else {
983 				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
984 				return -EFAULT;
985 			}
986 
987 			break;
988 
989 		case BPF_JMP | BPF_JA:
990 			jmp_offset = addrs[i + insn->off] - addrs[i];
991 			if (!jmp_offset)
992 				/* optimize out nop jumps */
993 				break;
994 emit_jmp:
995 			if (is_imm8(jmp_offset)) {
996 				EMIT2(0xEB, jmp_offset);
997 			} else if (is_simm32(jmp_offset)) {
998 				EMIT1_off32(0xE9, jmp_offset);
999 			} else {
1000 				pr_err("jmp gen bug %llx\n", jmp_offset);
1001 				return -EFAULT;
1002 			}
1003 			break;
1004 
1005 		case BPF_LD | BPF_IND | BPF_W:
1006 			func = sk_load_word;
1007 			goto common_load;
1008 		case BPF_LD | BPF_ABS | BPF_W:
1009 			func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
1010 common_load:
1011 			ctx->seen_ld_abs = seen_ld_abs = true;
1012 			jmp_offset = func - (image + addrs[i]);
1013 			if (!func || !is_simm32(jmp_offset)) {
1014 				pr_err("unsupported bpf func %d addr %p image %p\n",
1015 				       imm32, func, image);
1016 				return -EINVAL;
1017 			}
1018 			if (BPF_MODE(insn->code) == BPF_ABS) {
1019 				/* mov %esi, imm32 */
1020 				EMIT1_off32(0xBE, imm32);
1021 			} else {
1022 				/* mov %rsi, src_reg */
1023 				EMIT_mov(BPF_REG_2, src_reg);
1024 				if (imm32) {
1025 					if (is_imm8(imm32))
1026 						/* add %esi, imm8 */
1027 						EMIT3(0x83, 0xC6, imm32);
1028 					else
1029 						/* add %esi, imm32 */
1030 						EMIT2_off32(0x81, 0xC6, imm32);
1031 				}
1032 			}
1033 			/* skb pointer is in R6 (%rbx), it will be copied into
1034 			 * %rdi if skb_copy_bits() call is necessary.
1035 			 * sk_load_* helpers also use %r10 and %r9d.
1036 			 * See bpf_jit.S
1037 			 */
1038 			if (seen_ax_reg)
1039 				/* r10 = skb->data, mov %r10, off32(%rbx) */
1040 				EMIT3_off32(0x4c, 0x8b, 0x93,
1041 					    offsetof(struct sk_buff, data));
1042 			EMIT1_off32(0xE8, jmp_offset); /* call */
1043 			break;
1044 
1045 		case BPF_LD | BPF_IND | BPF_H:
1046 			func = sk_load_half;
1047 			goto common_load;
1048 		case BPF_LD | BPF_ABS | BPF_H:
1049 			func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
1050 			goto common_load;
1051 		case BPF_LD | BPF_IND | BPF_B:
1052 			func = sk_load_byte;
1053 			goto common_load;
1054 		case BPF_LD | BPF_ABS | BPF_B:
1055 			func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
1056 			goto common_load;
1057 
1058 		case BPF_JMP | BPF_EXIT:
1059 			if (seen_exit) {
1060 				jmp_offset = ctx->cleanup_addr - addrs[i];
1061 				goto emit_jmp;
1062 			}
1063 			seen_exit = true;
1064 			/* update cleanup_addr */
1065 			ctx->cleanup_addr = proglen;
1066 			/* mov rbx, qword ptr [rbp+0] */
1067 			EMIT4(0x48, 0x8B, 0x5D, 0);
1068 			/* mov r13, qword ptr [rbp+8] */
1069 			EMIT4(0x4C, 0x8B, 0x6D, 8);
1070 			/* mov r14, qword ptr [rbp+16] */
1071 			EMIT4(0x4C, 0x8B, 0x75, 16);
1072 			/* mov r15, qword ptr [rbp+24] */
1073 			EMIT4(0x4C, 0x8B, 0x7D, 24);
1074 
1075 			/* add rbp, AUX_STACK_SPACE */
1076 			EMIT4(0x48, 0x83, 0xC5, AUX_STACK_SPACE);
1077 			EMIT1(0xC9); /* leave */
1078 			EMIT1(0xC3); /* ret */
1079 			break;
1080 
1081 		default:
1082 			/* By design x64 JIT should support all BPF instructions
1083 			 * This error will be seen if new instruction was added
1084 			 * to interpreter, but not to JIT
1085 			 * or if there is junk in bpf_prog
1086 			 */
1087 			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1088 			return -EINVAL;
1089 		}
1090 
1091 		ilen = prog - temp;
1092 		if (ilen > BPF_MAX_INSN_SIZE) {
1093 			pr_err("bpf_jit: fatal insn size error\n");
1094 			return -EFAULT;
1095 		}
1096 
1097 		if (image) {
1098 			if (unlikely(proglen + ilen > oldproglen)) {
1099 				pr_err("bpf_jit: fatal error\n");
1100 				return -EFAULT;
1101 			}
1102 			memcpy(image + proglen, temp, ilen);
1103 		}
1104 		proglen += ilen;
1105 		addrs[i] = proglen;
1106 		prog = temp;
1107 	}
1108 	return proglen;
1109 }
1110 
bpf_int_jit_compile(struct bpf_prog * prog)1111 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1112 {
1113 	struct bpf_binary_header *header = NULL;
1114 	struct bpf_prog *tmp, *orig_prog = prog;
1115 	int proglen, oldproglen = 0;
1116 	struct jit_context ctx = {};
1117 	bool tmp_blinded = false;
1118 	u8 *image = NULL;
1119 	int *addrs;
1120 	int pass;
1121 	int i;
1122 
1123 	if (!bpf_jit_enable)
1124 		return orig_prog;
1125 
1126 	tmp = bpf_jit_blind_constants(prog);
1127 	/* If blinding was requested and we failed during blinding,
1128 	 * we must fall back to the interpreter.
1129 	 */
1130 	if (IS_ERR(tmp))
1131 		return orig_prog;
1132 	if (tmp != prog) {
1133 		tmp_blinded = true;
1134 		prog = tmp;
1135 	}
1136 
1137 	addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
1138 	if (!addrs) {
1139 		prog = orig_prog;
1140 		goto out;
1141 	}
1142 
1143 	/* Before first pass, make a rough estimation of addrs[]
1144 	 * each bpf instruction is translated to less than 64 bytes
1145 	 */
1146 	for (proglen = 0, i = 0; i < prog->len; i++) {
1147 		proglen += 64;
1148 		addrs[i] = proglen;
1149 	}
1150 	ctx.cleanup_addr = proglen;
1151 
1152 	/* JITed image shrinks with every pass and the loop iterates
1153 	 * until the image stops shrinking. Very large bpf programs
1154 	 * may converge on the last pass. In such case do one more
1155 	 * pass to emit the final image
1156 	 */
1157 	for (pass = 0; pass < 20 || image; pass++) {
1158 		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1159 		if (proglen <= 0) {
1160 out_image:
1161 			image = NULL;
1162 			if (header)
1163 				bpf_jit_binary_free(header);
1164 			prog = orig_prog;
1165 			goto out_addrs;
1166 		}
1167 		if (image) {
1168 			if (proglen != oldproglen) {
1169 				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1170 				       proglen, oldproglen);
1171 				goto out_image;
1172 			}
1173 			break;
1174 		}
1175 		if (proglen == oldproglen) {
1176 			header = bpf_jit_binary_alloc(proglen, &image,
1177 						      1, jit_fill_hole);
1178 			if (!header) {
1179 				prog = orig_prog;
1180 				goto out_addrs;
1181 			}
1182 		}
1183 		oldproglen = proglen;
1184 		cond_resched();
1185 	}
1186 
1187 	if (bpf_jit_enable > 1)
1188 		bpf_jit_dump(prog->len, proglen, pass + 1, image);
1189 
1190 	if (image) {
1191 		bpf_flush_icache(header, image + proglen);
1192 		bpf_jit_binary_lock_ro(header);
1193 		prog->bpf_func = (void *)image;
1194 		prog->jited = 1;
1195 		prog->jited_len = proglen;
1196 	} else {
1197 		prog = orig_prog;
1198 	}
1199 
1200 out_addrs:
1201 	kfree(addrs);
1202 out:
1203 	if (tmp_blinded)
1204 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
1205 					   tmp : orig_prog);
1206 	return prog;
1207 }
1208