1 /* bpf_jit_comp.c: BPF JIT compiler for PPC64
2 *
3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
4 *
5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12 #include <linux/moduleloader.h>
13 #include <asm/cacheflush.h>
14 #include <linux/netdevice.h>
15 #include <linux/filter.h>
16 #include <linux/if_vlan.h>
17
18 #include "bpf_jit.h"
19
20 int bpf_jit_enable __read_mostly;
21
bpf_flush_icache(void * start,void * end)22 static inline void bpf_flush_icache(void *start, void *end)
23 {
24 smp_wmb();
25 flush_icache_range((unsigned long)start, (unsigned long)end);
26 }
27
bpf_jit_build_prologue(struct bpf_prog * fp,u32 * image,struct codegen_context * ctx)28 static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
29 struct codegen_context *ctx)
30 {
31 int i;
32 const struct sock_filter *filter = fp->insns;
33
34 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
35 /* Make stackframe */
36 if (ctx->seen & SEEN_DATAREF) {
37 /* If we call any helpers (for loads), save LR */
38 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
39 PPC_STD(0, 1, 16);
40
41 /* Back up non-volatile regs. */
42 PPC_STD(r_D, 1, -(8*(32-r_D)));
43 PPC_STD(r_HL, 1, -(8*(32-r_HL)));
44 }
45 if (ctx->seen & SEEN_MEM) {
46 /*
47 * Conditionally save regs r15-r31 as some will be used
48 * for M[] data.
49 */
50 for (i = r_M; i < (r_M+16); i++) {
51 if (ctx->seen & (1 << (i-r_M)))
52 PPC_STD(i, 1, -(8*(32-i)));
53 }
54 }
55 EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) |
56 (-BPF_PPC_STACKFRAME & 0xfffc));
57 }
58
59 if (ctx->seen & SEEN_DATAREF) {
60 /*
61 * If this filter needs to access skb data,
62 * prepare r_D and r_HL:
63 * r_HL = skb->len - skb->data_len
64 * r_D = skb->data
65 */
66 PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
67 data_len));
68 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
69 PPC_SUB(r_HL, r_HL, r_scratch1);
70 PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
71 }
72
73 if (ctx->seen & SEEN_XREG) {
74 /*
75 * TODO: Could also detect whether first instr. sets X and
76 * avoid this (as below, with A).
77 */
78 PPC_LI(r_X, 0);
79 }
80
81 /* make sure we dont leak kernel information to user */
82 if (bpf_needs_clear_a(&filter[0]))
83 PPC_LI(r_A, 0);
84 }
85
bpf_jit_build_epilogue(u32 * image,struct codegen_context * ctx)86 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
87 {
88 int i;
89
90 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
91 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
92 if (ctx->seen & SEEN_DATAREF) {
93 PPC_LD(0, 1, 16);
94 PPC_MTLR(0);
95 PPC_LD(r_D, 1, -(8*(32-r_D)));
96 PPC_LD(r_HL, 1, -(8*(32-r_HL)));
97 }
98 if (ctx->seen & SEEN_MEM) {
99 /* Restore any saved non-vol registers */
100 for (i = r_M; i < (r_M+16); i++) {
101 if (ctx->seen & (1 << (i-r_M)))
102 PPC_LD(i, 1, -(8*(32-i)));
103 }
104 }
105 }
106 /* The RETs have left a return value in R3. */
107
108 PPC_BLR();
109 }
110
111 #define CHOOSE_LOAD_FUNC(K, func) \
112 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
113
114 /* Assemble the body code between the prologue & epilogue. */
bpf_jit_build_body(struct bpf_prog * fp,u32 * image,struct codegen_context * ctx,unsigned int * addrs)115 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
116 struct codegen_context *ctx,
117 unsigned int *addrs)
118 {
119 const struct sock_filter *filter = fp->insns;
120 int flen = fp->len;
121 u8 *func;
122 unsigned int true_cond;
123 int i;
124
125 /* Start of epilogue code */
126 unsigned int exit_addr = addrs[flen];
127
128 for (i = 0; i < flen; i++) {
129 unsigned int K = filter[i].k;
130 u16 code = bpf_anc_helper(&filter[i]);
131
132 /*
133 * addrs[] maps a BPF bytecode address into a real offset from
134 * the start of the body code.
135 */
136 addrs[i] = ctx->idx * 4;
137
138 switch (code) {
139 /*** ALU ops ***/
140 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
141 ctx->seen |= SEEN_XREG;
142 PPC_ADD(r_A, r_A, r_X);
143 break;
144 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
145 if (!K)
146 break;
147 PPC_ADDI(r_A, r_A, IMM_L(K));
148 if (K >= 32768)
149 PPC_ADDIS(r_A, r_A, IMM_HA(K));
150 break;
151 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
152 ctx->seen |= SEEN_XREG;
153 PPC_SUB(r_A, r_A, r_X);
154 break;
155 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
156 if (!K)
157 break;
158 PPC_ADDI(r_A, r_A, IMM_L(-K));
159 if (K >= 32768)
160 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
161 break;
162 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
163 ctx->seen |= SEEN_XREG;
164 PPC_MUL(r_A, r_A, r_X);
165 break;
166 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
167 if (K < 32768)
168 PPC_MULI(r_A, r_A, K);
169 else {
170 PPC_LI32(r_scratch1, K);
171 PPC_MUL(r_A, r_A, r_scratch1);
172 }
173 break;
174 case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
175 ctx->seen |= SEEN_XREG;
176 PPC_CMPWI(r_X, 0);
177 if (ctx->pc_ret0 != -1) {
178 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
179 } else {
180 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
181 PPC_LI(r_ret, 0);
182 PPC_JMP(exit_addr);
183 }
184 PPC_DIVWU(r_scratch1, r_A, r_X);
185 PPC_MUL(r_scratch1, r_X, r_scratch1);
186 PPC_SUB(r_A, r_A, r_scratch1);
187 break;
188 case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
189 PPC_LI32(r_scratch2, K);
190 PPC_DIVWU(r_scratch1, r_A, r_scratch2);
191 PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
192 PPC_SUB(r_A, r_A, r_scratch1);
193 break;
194 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
195 ctx->seen |= SEEN_XREG;
196 PPC_CMPWI(r_X, 0);
197 if (ctx->pc_ret0 != -1) {
198 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
199 } else {
200 /*
201 * Exit, returning 0; first pass hits here
202 * (longer worst-case code size).
203 */
204 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
205 PPC_LI(r_ret, 0);
206 PPC_JMP(exit_addr);
207 }
208 PPC_DIVWU(r_A, r_A, r_X);
209 break;
210 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
211 if (K == 1)
212 break;
213 PPC_LI32(r_scratch1, K);
214 PPC_DIVWU(r_A, r_A, r_scratch1);
215 break;
216 case BPF_ALU | BPF_AND | BPF_X:
217 ctx->seen |= SEEN_XREG;
218 PPC_AND(r_A, r_A, r_X);
219 break;
220 case BPF_ALU | BPF_AND | BPF_K:
221 if (!IMM_H(K))
222 PPC_ANDI(r_A, r_A, K);
223 else {
224 PPC_LI32(r_scratch1, K);
225 PPC_AND(r_A, r_A, r_scratch1);
226 }
227 break;
228 case BPF_ALU | BPF_OR | BPF_X:
229 ctx->seen |= SEEN_XREG;
230 PPC_OR(r_A, r_A, r_X);
231 break;
232 case BPF_ALU | BPF_OR | BPF_K:
233 if (IMM_L(K))
234 PPC_ORI(r_A, r_A, IMM_L(K));
235 if (K >= 65536)
236 PPC_ORIS(r_A, r_A, IMM_H(K));
237 break;
238 case BPF_ANC | SKF_AD_ALU_XOR_X:
239 case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
240 ctx->seen |= SEEN_XREG;
241 PPC_XOR(r_A, r_A, r_X);
242 break;
243 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
244 if (IMM_L(K))
245 PPC_XORI(r_A, r_A, IMM_L(K));
246 if (K >= 65536)
247 PPC_XORIS(r_A, r_A, IMM_H(K));
248 break;
249 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
250 ctx->seen |= SEEN_XREG;
251 PPC_SLW(r_A, r_A, r_X);
252 break;
253 case BPF_ALU | BPF_LSH | BPF_K:
254 if (K == 0)
255 break;
256 else
257 PPC_SLWI(r_A, r_A, K);
258 break;
259 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
260 ctx->seen |= SEEN_XREG;
261 PPC_SRW(r_A, r_A, r_X);
262 break;
263 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
264 if (K == 0)
265 break;
266 else
267 PPC_SRWI(r_A, r_A, K);
268 break;
269 case BPF_ALU | BPF_NEG:
270 PPC_NEG(r_A, r_A);
271 break;
272 case BPF_RET | BPF_K:
273 PPC_LI32(r_ret, K);
274 if (!K) {
275 if (ctx->pc_ret0 == -1)
276 ctx->pc_ret0 = i;
277 }
278 /*
279 * If this isn't the very last instruction, branch to
280 * the epilogue if we've stuff to clean up. Otherwise,
281 * if there's nothing to tidy, just return. If we /are/
282 * the last instruction, we're about to fall through to
283 * the epilogue to return.
284 */
285 if (i != flen - 1) {
286 /*
287 * Note: 'seen' is properly valid only on pass
288 * #2. Both parts of this conditional are the
289 * same instruction size though, meaning the
290 * first pass will still correctly determine the
291 * code size/addresses.
292 */
293 if (ctx->seen)
294 PPC_JMP(exit_addr);
295 else
296 PPC_BLR();
297 }
298 break;
299 case BPF_RET | BPF_A:
300 PPC_MR(r_ret, r_A);
301 if (i != flen - 1) {
302 if (ctx->seen)
303 PPC_JMP(exit_addr);
304 else
305 PPC_BLR();
306 }
307 break;
308 case BPF_MISC | BPF_TAX: /* X = A */
309 PPC_MR(r_X, r_A);
310 break;
311 case BPF_MISC | BPF_TXA: /* A = X */
312 ctx->seen |= SEEN_XREG;
313 PPC_MR(r_A, r_X);
314 break;
315
316 /*** Constant loads/M[] access ***/
317 case BPF_LD | BPF_IMM: /* A = K */
318 PPC_LI32(r_A, K);
319 break;
320 case BPF_LDX | BPF_IMM: /* X = K */
321 PPC_LI32(r_X, K);
322 break;
323 case BPF_LD | BPF_MEM: /* A = mem[K] */
324 PPC_MR(r_A, r_M + (K & 0xf));
325 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
326 break;
327 case BPF_LDX | BPF_MEM: /* X = mem[K] */
328 PPC_MR(r_X, r_M + (K & 0xf));
329 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
330 break;
331 case BPF_ST: /* mem[K] = A */
332 PPC_MR(r_M + (K & 0xf), r_A);
333 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
334 break;
335 case BPF_STX: /* mem[K] = X */
336 PPC_MR(r_M + (K & 0xf), r_X);
337 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
338 break;
339 case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
340 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
341 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
342 break;
343 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
344 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
345 break;
346
347 /*** Ancillary info loads ***/
348 case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
349 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
350 protocol) != 2);
351 PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
352 protocol));
353 break;
354 case BPF_ANC | SKF_AD_IFINDEX:
355 PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
356 dev));
357 PPC_CMPDI(r_scratch1, 0);
358 if (ctx->pc_ret0 != -1) {
359 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
360 } else {
361 /* Exit, returning 0; first pass hits here. */
362 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
363 PPC_LI(r_ret, 0);
364 PPC_JMP(exit_addr);
365 }
366 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
367 ifindex) != 4);
368 PPC_LWZ_OFFS(r_A, r_scratch1,
369 offsetof(struct net_device, ifindex));
370 break;
371 case BPF_ANC | SKF_AD_MARK:
372 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
373 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
374 mark));
375 break;
376 case BPF_ANC | SKF_AD_RXHASH:
377 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
378 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
379 hash));
380 break;
381 case BPF_ANC | SKF_AD_VLAN_TAG:
382 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
383 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
384 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
385
386 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
387 vlan_tci));
388 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
389 PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
390 } else {
391 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
392 PPC_SRWI(r_A, r_A, 12);
393 }
394 break;
395 case BPF_ANC | SKF_AD_QUEUE:
396 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
397 queue_mapping) != 2);
398 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
399 queue_mapping));
400 break;
401 case BPF_ANC | SKF_AD_CPU:
402 #ifdef CONFIG_SMP
403 /*
404 * PACA ptr is r13:
405 * raw_smp_processor_id() = local_paca->paca_index
406 */
407 BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
408 paca_index) != 2);
409 PPC_LHZ_OFFS(r_A, 13,
410 offsetof(struct paca_struct, paca_index));
411 #else
412 PPC_LI(r_A, 0);
413 #endif
414 break;
415
416 /*** Absolute loads from packet header/data ***/
417 case BPF_LD | BPF_W | BPF_ABS:
418 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
419 goto common_load;
420 case BPF_LD | BPF_H | BPF_ABS:
421 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
422 goto common_load;
423 case BPF_LD | BPF_B | BPF_ABS:
424 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
425 common_load:
426 /* Load from [K]. */
427 ctx->seen |= SEEN_DATAREF;
428 PPC_LI64(r_scratch1, func);
429 PPC_MTLR(r_scratch1);
430 PPC_LI32(r_addr, K);
431 PPC_BLRL();
432 /*
433 * Helper returns 'lt' condition on error, and an
434 * appropriate return value in r3
435 */
436 PPC_BCC(COND_LT, exit_addr);
437 break;
438
439 /*** Indirect loads from packet header/data ***/
440 case BPF_LD | BPF_W | BPF_IND:
441 func = sk_load_word;
442 goto common_load_ind;
443 case BPF_LD | BPF_H | BPF_IND:
444 func = sk_load_half;
445 goto common_load_ind;
446 case BPF_LD | BPF_B | BPF_IND:
447 func = sk_load_byte;
448 common_load_ind:
449 /*
450 * Load from [X + K]. Negative offsets are tested for
451 * in the helper functions.
452 */
453 ctx->seen |= SEEN_DATAREF | SEEN_XREG;
454 PPC_LI64(r_scratch1, func);
455 PPC_MTLR(r_scratch1);
456 PPC_ADDI(r_addr, r_X, IMM_L(K));
457 if (K >= 32768)
458 PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
459 PPC_BLRL();
460 /* If error, cr0.LT set */
461 PPC_BCC(COND_LT, exit_addr);
462 break;
463
464 case BPF_LDX | BPF_B | BPF_MSH:
465 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
466 goto common_load;
467 break;
468
469 /*** Jump and branches ***/
470 case BPF_JMP | BPF_JA:
471 if (K != 0)
472 PPC_JMP(addrs[i + 1 + K]);
473 break;
474
475 case BPF_JMP | BPF_JGT | BPF_K:
476 case BPF_JMP | BPF_JGT | BPF_X:
477 true_cond = COND_GT;
478 goto cond_branch;
479 case BPF_JMP | BPF_JGE | BPF_K:
480 case BPF_JMP | BPF_JGE | BPF_X:
481 true_cond = COND_GE;
482 goto cond_branch;
483 case BPF_JMP | BPF_JEQ | BPF_K:
484 case BPF_JMP | BPF_JEQ | BPF_X:
485 true_cond = COND_EQ;
486 goto cond_branch;
487 case BPF_JMP | BPF_JSET | BPF_K:
488 case BPF_JMP | BPF_JSET | BPF_X:
489 true_cond = COND_NE;
490 /* Fall through */
491 cond_branch:
492 /* same targets, can avoid doing the test :) */
493 if (filter[i].jt == filter[i].jf) {
494 if (filter[i].jt > 0)
495 PPC_JMP(addrs[i + 1 + filter[i].jt]);
496 break;
497 }
498
499 switch (code) {
500 case BPF_JMP | BPF_JGT | BPF_X:
501 case BPF_JMP | BPF_JGE | BPF_X:
502 case BPF_JMP | BPF_JEQ | BPF_X:
503 ctx->seen |= SEEN_XREG;
504 PPC_CMPLW(r_A, r_X);
505 break;
506 case BPF_JMP | BPF_JSET | BPF_X:
507 ctx->seen |= SEEN_XREG;
508 PPC_AND_DOT(r_scratch1, r_A, r_X);
509 break;
510 case BPF_JMP | BPF_JEQ | BPF_K:
511 case BPF_JMP | BPF_JGT | BPF_K:
512 case BPF_JMP | BPF_JGE | BPF_K:
513 if (K < 32768)
514 PPC_CMPLWI(r_A, K);
515 else {
516 PPC_LI32(r_scratch1, K);
517 PPC_CMPLW(r_A, r_scratch1);
518 }
519 break;
520 case BPF_JMP | BPF_JSET | BPF_K:
521 if (K < 32768)
522 /* PPC_ANDI is /only/ dot-form */
523 PPC_ANDI(r_scratch1, r_A, K);
524 else {
525 PPC_LI32(r_scratch1, K);
526 PPC_AND_DOT(r_scratch1, r_A,
527 r_scratch1);
528 }
529 break;
530 }
531 /* Sometimes branches are constructed "backward", with
532 * the false path being the branch and true path being
533 * a fallthrough to the next instruction.
534 */
535 if (filter[i].jt == 0)
536 /* Swap the sense of the branch */
537 PPC_BCC(true_cond ^ COND_CMP_TRUE,
538 addrs[i + 1 + filter[i].jf]);
539 else {
540 PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
541 if (filter[i].jf != 0)
542 PPC_JMP(addrs[i + 1 + filter[i].jf]);
543 }
544 break;
545 default:
546 /* The filter contains something cruel & unusual.
547 * We don't handle it, but also there shouldn't be
548 * anything missing from our list.
549 */
550 if (printk_ratelimit())
551 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
552 filter[i].code, i);
553 return -ENOTSUPP;
554 }
555
556 }
557 /* Set end-of-body-code address for exit. */
558 addrs[i] = ctx->idx * 4;
559
560 return 0;
561 }
562
bpf_jit_compile(struct bpf_prog * fp)563 void bpf_jit_compile(struct bpf_prog *fp)
564 {
565 unsigned int proglen;
566 unsigned int alloclen;
567 u32 *image = NULL;
568 u32 *code_base;
569 unsigned int *addrs;
570 struct codegen_context cgctx;
571 int pass;
572 int flen = fp->len;
573
574 if (!bpf_jit_enable)
575 return;
576
577 addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
578 if (addrs == NULL)
579 return;
580
581 /*
582 * There are multiple assembly passes as the generated code will change
583 * size as it settles down, figuring out the max branch offsets/exit
584 * paths required.
585 *
586 * The range of standard conditional branches is +/- 32Kbytes. Since
587 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
588 * finish with 8 bytes/instruction. Not feasible, so long jumps are
589 * used, distinct from short branches.
590 *
591 * Current:
592 *
593 * For now, both branch types assemble to 2 words (short branches padded
594 * with a NOP); this is less efficient, but assembly will always complete
595 * after exactly 3 passes:
596 *
597 * First pass: No code buffer; Program is "faux-generated" -- no code
598 * emitted but maximum size of output determined (and addrs[] filled
599 * in). Also, we note whether we use M[], whether we use skb data, etc.
600 * All generation choices assumed to be 'worst-case', e.g. branches all
601 * far (2 instructions), return path code reduction not available, etc.
602 *
603 * Second pass: Code buffer allocated with size determined previously.
604 * Prologue generated to support features we have seen used. Exit paths
605 * determined and addrs[] is filled in again, as code may be slightly
606 * smaller as a result.
607 *
608 * Third pass: Code generated 'for real', and branch destinations
609 * determined from now-accurate addrs[] map.
610 *
611 * Ideal:
612 *
613 * If we optimise this, near branches will be shorter. On the
614 * first assembly pass, we should err on the side of caution and
615 * generate the biggest code. On subsequent passes, branches will be
616 * generated short or long and code size will reduce. With smaller
617 * code, more branches may fall into the short category, and code will
618 * reduce more.
619 *
620 * Finally, if we see one pass generate code the same size as the
621 * previous pass we have converged and should now generate code for
622 * real. Allocating at the end will also save the memory that would
623 * otherwise be wasted by the (small) current code shrinkage.
624 * Preferably, we should do a small number of passes (e.g. 5) and if we
625 * haven't converged by then, get impatient and force code to generate
626 * as-is, even if the odd branch would be left long. The chances of a
627 * long jump are tiny with all but the most enormous of BPF filter
628 * inputs, so we should usually converge on the third pass.
629 */
630
631 cgctx.idx = 0;
632 cgctx.seen = 0;
633 cgctx.pc_ret0 = -1;
634 /* Scouting faux-generate pass 0 */
635 if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
636 /* We hit something illegal or unsupported. */
637 goto out;
638
639 /*
640 * Pretend to build prologue, given the features we've seen. This will
641 * update ctgtx.idx as it pretends to output instructions, then we can
642 * calculate total size from idx.
643 */
644 bpf_jit_build_prologue(fp, 0, &cgctx);
645 bpf_jit_build_epilogue(0, &cgctx);
646
647 proglen = cgctx.idx * 4;
648 alloclen = proglen + FUNCTION_DESCR_SIZE;
649 image = module_alloc(alloclen);
650 if (!image)
651 goto out;
652
653 code_base = image + (FUNCTION_DESCR_SIZE/4);
654
655 /* Code generation passes 1-2 */
656 for (pass = 1; pass < 3; pass++) {
657 /* Now build the prologue, body code & epilogue for real. */
658 cgctx.idx = 0;
659 bpf_jit_build_prologue(fp, code_base, &cgctx);
660 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
661 bpf_jit_build_epilogue(code_base, &cgctx);
662
663 if (bpf_jit_enable > 1)
664 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
665 proglen - (cgctx.idx * 4), cgctx.seen);
666 }
667
668 if (bpf_jit_enable > 1)
669 /* Note that we output the base address of the code_base
670 * rather than image, since opcodes are in code_base.
671 */
672 bpf_jit_dump(flen, proglen, pass, code_base);
673
674 if (image) {
675 bpf_flush_icache(code_base, code_base + (proglen/4));
676 /* Function descriptor nastiness: Address + TOC */
677 ((u64 *)image)[0] = (u64)code_base;
678 ((u64 *)image)[1] = local_paca->kernel_toc;
679 fp->bpf_func = (void *)image;
680 fp->jited = true;
681 }
682 out:
683 kfree(addrs);
684 return;
685 }
686
bpf_jit_free(struct bpf_prog * fp)687 void bpf_jit_free(struct bpf_prog *fp)
688 {
689 if (fp->jited)
690 module_free(NULL, fp->bpf_func);
691
692 bpf_prog_unlock_free(fp);
693 }
694