1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * BPF JIT compiler for LoongArch
4 *
5 * Copyright (C) 2022 Loongson Technology Corporation Limited
6 */
7 #include "bpf_jit.h"
8
9 #define REG_TCC LOONGARCH_GPR_A6
10 #define TCC_SAVED LOONGARCH_GPR_S5
11
12 #define SAVE_RA BIT(0)
13 #define SAVE_TCC BIT(1)
14
15 static const int regmap[] = {
16 /* return value from in-kernel function, and exit value for eBPF program */
17 [BPF_REG_0] = LOONGARCH_GPR_A5,
18 /* arguments from eBPF program to in-kernel function */
19 [BPF_REG_1] = LOONGARCH_GPR_A0,
20 [BPF_REG_2] = LOONGARCH_GPR_A1,
21 [BPF_REG_3] = LOONGARCH_GPR_A2,
22 [BPF_REG_4] = LOONGARCH_GPR_A3,
23 [BPF_REG_5] = LOONGARCH_GPR_A4,
24 /* callee saved registers that in-kernel function will preserve */
25 [BPF_REG_6] = LOONGARCH_GPR_S0,
26 [BPF_REG_7] = LOONGARCH_GPR_S1,
27 [BPF_REG_8] = LOONGARCH_GPR_S2,
28 [BPF_REG_9] = LOONGARCH_GPR_S3,
29 /* read-only frame pointer to access stack */
30 [BPF_REG_FP] = LOONGARCH_GPR_S4,
31 /* temporary register for blinding constants */
32 [BPF_REG_AX] = LOONGARCH_GPR_T0,
33 };
34
mark_call(struct jit_ctx * ctx)35 static void mark_call(struct jit_ctx *ctx)
36 {
37 ctx->flags |= SAVE_RA;
38 }
39
mark_tail_call(struct jit_ctx * ctx)40 static void mark_tail_call(struct jit_ctx *ctx)
41 {
42 ctx->flags |= SAVE_TCC;
43 }
44
seen_call(struct jit_ctx * ctx)45 static bool seen_call(struct jit_ctx *ctx)
46 {
47 return (ctx->flags & SAVE_RA);
48 }
49
seen_tail_call(struct jit_ctx * ctx)50 static bool seen_tail_call(struct jit_ctx *ctx)
51 {
52 return (ctx->flags & SAVE_TCC);
53 }
54
tail_call_reg(struct jit_ctx * ctx)55 static u8 tail_call_reg(struct jit_ctx *ctx)
56 {
57 if (seen_call(ctx))
58 return TCC_SAVED;
59
60 return REG_TCC;
61 }
62
63 /*
64 * eBPF prog stack layout:
65 *
66 * high
67 * original $sp ------------> +-------------------------+ <--LOONGARCH_GPR_FP
68 * | $ra |
69 * +-------------------------+
70 * | $fp |
71 * +-------------------------+
72 * | $s0 |
73 * +-------------------------+
74 * | $s1 |
75 * +-------------------------+
76 * | $s2 |
77 * +-------------------------+
78 * | $s3 |
79 * +-------------------------+
80 * | $s4 |
81 * +-------------------------+
82 * | $s5 |
83 * +-------------------------+ <--BPF_REG_FP
84 * | prog->aux->stack_depth |
85 * | (optional) |
86 * current $sp -------------> +-------------------------+
87 * low
88 */
build_prologue(struct jit_ctx * ctx)89 static void build_prologue(struct jit_ctx *ctx)
90 {
91 int stack_adjust = 0, store_offset, bpf_stack_adjust;
92
93 bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
94
95 /* To store ra, fp, s0, s1, s2, s3, s4 and s5. */
96 stack_adjust += sizeof(long) * 8;
97
98 stack_adjust = round_up(stack_adjust, 16);
99 stack_adjust += bpf_stack_adjust;
100
101 /*
102 * First instruction initializes the tail call count (TCC).
103 * On tail call we skip this instruction, and the TCC is
104 * passed in REG_TCC from the caller.
105 */
106 emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT);
107
108 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_adjust);
109
110 store_offset = stack_adjust - sizeof(long);
111 emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, store_offset);
112
113 store_offset -= sizeof(long);
114 emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, store_offset);
115
116 store_offset -= sizeof(long);
117 emit_insn(ctx, std, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, store_offset);
118
119 store_offset -= sizeof(long);
120 emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, store_offset);
121
122 store_offset -= sizeof(long);
123 emit_insn(ctx, std, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, store_offset);
124
125 store_offset -= sizeof(long);
126 emit_insn(ctx, std, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, store_offset);
127
128 store_offset -= sizeof(long);
129 emit_insn(ctx, std, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, store_offset);
130
131 store_offset -= sizeof(long);
132 emit_insn(ctx, std, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, store_offset);
133
134 emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_adjust);
135
136 if (bpf_stack_adjust)
137 emit_insn(ctx, addid, regmap[BPF_REG_FP], LOONGARCH_GPR_SP, bpf_stack_adjust);
138
139 /*
140 * Program contains calls and tail calls, so REG_TCC need
141 * to be saved across calls.
142 */
143 if (seen_tail_call(ctx) && seen_call(ctx))
144 move_reg(ctx, TCC_SAVED, REG_TCC);
145 else
146 emit_insn(ctx, nop);
147
148 ctx->stack_size = stack_adjust;
149 }
150
__build_epilogue(struct jit_ctx * ctx,bool is_tail_call)151 static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
152 {
153 int stack_adjust = ctx->stack_size;
154 int load_offset;
155
156 load_offset = stack_adjust - sizeof(long);
157 emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, load_offset);
158
159 load_offset -= sizeof(long);
160 emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, load_offset);
161
162 load_offset -= sizeof(long);
163 emit_insn(ctx, ldd, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, load_offset);
164
165 load_offset -= sizeof(long);
166 emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, load_offset);
167
168 load_offset -= sizeof(long);
169 emit_insn(ctx, ldd, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, load_offset);
170
171 load_offset -= sizeof(long);
172 emit_insn(ctx, ldd, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, load_offset);
173
174 load_offset -= sizeof(long);
175 emit_insn(ctx, ldd, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, load_offset);
176
177 load_offset -= sizeof(long);
178 emit_insn(ctx, ldd, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, load_offset);
179
180 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_adjust);
181
182 if (!is_tail_call) {
183 /* Set return value */
184 emit_insn(ctx, addiw, LOONGARCH_GPR_A0, regmap[BPF_REG_0], 0);
185 /* Return to the caller */
186 emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
187 } else {
188 /*
189 * Call the next bpf prog and skip the first instruction
190 * of TCC initialization.
191 */
192 emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T3, 1);
193 }
194 }
195
build_epilogue(struct jit_ctx * ctx)196 static void build_epilogue(struct jit_ctx *ctx)
197 {
198 __build_epilogue(ctx, false);
199 }
200
bpf_jit_supports_kfunc_call(void)201 bool bpf_jit_supports_kfunc_call(void)
202 {
203 return true;
204 }
205
206 /* initialized on the first pass of build_body() */
207 static int out_offset = -1;
emit_bpf_tail_call(struct jit_ctx * ctx)208 static int emit_bpf_tail_call(struct jit_ctx *ctx)
209 {
210 int off;
211 u8 tcc = tail_call_reg(ctx);
212 u8 a1 = LOONGARCH_GPR_A1;
213 u8 a2 = LOONGARCH_GPR_A2;
214 u8 t1 = LOONGARCH_GPR_T1;
215 u8 t2 = LOONGARCH_GPR_T2;
216 u8 t3 = LOONGARCH_GPR_T3;
217 const int idx0 = ctx->idx;
218
219 #define cur_offset (ctx->idx - idx0)
220 #define jmp_offset (out_offset - (cur_offset))
221
222 /*
223 * a0: &ctx
224 * a1: &array
225 * a2: index
226 *
227 * if (index >= array->map.max_entries)
228 * goto out;
229 */
230 off = offsetof(struct bpf_array, map.max_entries);
231 emit_insn(ctx, ldwu, t1, a1, off);
232 /* bgeu $a2, $t1, jmp_offset */
233 if (emit_tailcall_jmp(ctx, BPF_JGE, a2, t1, jmp_offset) < 0)
234 goto toofar;
235
236 /*
237 * if (--TCC < 0)
238 * goto out;
239 */
240 emit_insn(ctx, addid, REG_TCC, tcc, -1);
241 if (emit_tailcall_jmp(ctx, BPF_JSLT, REG_TCC, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
242 goto toofar;
243
244 /*
245 * prog = array->ptrs[index];
246 * if (!prog)
247 * goto out;
248 */
249 emit_insn(ctx, alsld, t2, a2, a1, 2);
250 off = offsetof(struct bpf_array, ptrs);
251 emit_insn(ctx, ldd, t2, t2, off);
252 /* beq $t2, $zero, jmp_offset */
253 if (emit_tailcall_jmp(ctx, BPF_JEQ, t2, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
254 goto toofar;
255
256 /* goto *(prog->bpf_func + 4); */
257 off = offsetof(struct bpf_prog, bpf_func);
258 emit_insn(ctx, ldd, t3, t2, off);
259 __build_epilogue(ctx, true);
260
261 /* out: */
262 if (out_offset == -1)
263 out_offset = cur_offset;
264 if (cur_offset != out_offset) {
265 pr_err_once("tail_call out_offset = %d, expected %d!\n",
266 cur_offset, out_offset);
267 return -1;
268 }
269
270 return 0;
271
272 toofar:
273 pr_info_once("tail_call: jump too far\n");
274 return -1;
275 #undef cur_offset
276 #undef jmp_offset
277 }
278
emit_atomic(const struct bpf_insn * insn,struct jit_ctx * ctx)279 static void emit_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
280 {
281 const u8 t1 = LOONGARCH_GPR_T1;
282 const u8 t2 = LOONGARCH_GPR_T2;
283 const u8 t3 = LOONGARCH_GPR_T3;
284 const u8 r0 = regmap[BPF_REG_0];
285 const u8 src = regmap[insn->src_reg];
286 const u8 dst = regmap[insn->dst_reg];
287 const s16 off = insn->off;
288 const s32 imm = insn->imm;
289 const bool isdw = BPF_SIZE(insn->code) == BPF_DW;
290
291 move_imm(ctx, t1, off, false);
292 emit_insn(ctx, addd, t1, dst, t1);
293 move_reg(ctx, t3, src);
294
295 switch (imm) {
296 /* lock *(size *)(dst + off) <op>= src */
297 case BPF_ADD:
298 if (isdw)
299 emit_insn(ctx, amaddd, t2, t1, src);
300 else
301 emit_insn(ctx, amaddw, t2, t1, src);
302 break;
303 case BPF_AND:
304 if (isdw)
305 emit_insn(ctx, amandd, t2, t1, src);
306 else
307 emit_insn(ctx, amandw, t2, t1, src);
308 break;
309 case BPF_OR:
310 if (isdw)
311 emit_insn(ctx, amord, t2, t1, src);
312 else
313 emit_insn(ctx, amorw, t2, t1, src);
314 break;
315 case BPF_XOR:
316 if (isdw)
317 emit_insn(ctx, amxord, t2, t1, src);
318 else
319 emit_insn(ctx, amxorw, t2, t1, src);
320 break;
321 /* src = atomic_fetch_<op>(dst + off, src) */
322 case BPF_ADD | BPF_FETCH:
323 if (isdw) {
324 emit_insn(ctx, amaddd, src, t1, t3);
325 } else {
326 emit_insn(ctx, amaddw, src, t1, t3);
327 emit_zext_32(ctx, src, true);
328 }
329 break;
330 case BPF_AND | BPF_FETCH:
331 if (isdw) {
332 emit_insn(ctx, amandd, src, t1, t3);
333 } else {
334 emit_insn(ctx, amandw, src, t1, t3);
335 emit_zext_32(ctx, src, true);
336 }
337 break;
338 case BPF_OR | BPF_FETCH:
339 if (isdw) {
340 emit_insn(ctx, amord, src, t1, t3);
341 } else {
342 emit_insn(ctx, amorw, src, t1, t3);
343 emit_zext_32(ctx, src, true);
344 }
345 break;
346 case BPF_XOR | BPF_FETCH:
347 if (isdw) {
348 emit_insn(ctx, amxord, src, t1, t3);
349 } else {
350 emit_insn(ctx, amxorw, src, t1, t3);
351 emit_zext_32(ctx, src, true);
352 }
353 break;
354 /* src = atomic_xchg(dst + off, src); */
355 case BPF_XCHG:
356 if (isdw) {
357 emit_insn(ctx, amswapd, src, t1, t3);
358 } else {
359 emit_insn(ctx, amswapw, src, t1, t3);
360 emit_zext_32(ctx, src, true);
361 }
362 break;
363 /* r0 = atomic_cmpxchg(dst + off, r0, src); */
364 case BPF_CMPXCHG:
365 move_reg(ctx, t2, r0);
366 if (isdw) {
367 emit_insn(ctx, lld, r0, t1, 0);
368 emit_insn(ctx, bne, t2, r0, 4);
369 move_reg(ctx, t3, src);
370 emit_insn(ctx, scd, t3, t1, 0);
371 emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -4);
372 } else {
373 emit_insn(ctx, llw, r0, t1, 0);
374 emit_zext_32(ctx, t2, true);
375 emit_zext_32(ctx, r0, true);
376 emit_insn(ctx, bne, t2, r0, 4);
377 move_reg(ctx, t3, src);
378 emit_insn(ctx, scw, t3, t1, 0);
379 emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -6);
380 emit_zext_32(ctx, r0, true);
381 }
382 break;
383 }
384 }
385
is_signed_bpf_cond(u8 cond)386 static bool is_signed_bpf_cond(u8 cond)
387 {
388 return cond == BPF_JSGT || cond == BPF_JSLT ||
389 cond == BPF_JSGE || cond == BPF_JSLE;
390 }
391
392 #define BPF_FIXUP_REG_MASK GENMASK(31, 27)
393 #define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
394
ex_handler_bpf(const struct exception_table_entry * ex,struct pt_regs * regs)395 bool ex_handler_bpf(const struct exception_table_entry *ex,
396 struct pt_regs *regs)
397 {
398 int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
399 off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
400
401 regs->regs[dst_reg] = 0;
402 regs->csr_era = (unsigned long)&ex->fixup - offset;
403
404 return true;
405 }
406
407 /* For accesses to BTF pointers, add an entry to the exception table */
add_exception_handler(const struct bpf_insn * insn,struct jit_ctx * ctx,int dst_reg)408 static int add_exception_handler(const struct bpf_insn *insn,
409 struct jit_ctx *ctx,
410 int dst_reg)
411 {
412 unsigned long pc;
413 off_t offset;
414 struct exception_table_entry *ex;
415
416 if (!ctx->image || !ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM)
417 return 0;
418
419 if (WARN_ON_ONCE(ctx->num_exentries >= ctx->prog->aux->num_exentries))
420 return -EINVAL;
421
422 ex = &ctx->prog->aux->extable[ctx->num_exentries];
423 pc = (unsigned long)&ctx->image[ctx->idx - 1];
424
425 offset = pc - (long)&ex->insn;
426 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
427 return -ERANGE;
428
429 ex->insn = offset;
430
431 /*
432 * Since the extable follows the program, the fixup offset is always
433 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
434 * to keep things simple, and put the destination register in the upper
435 * bits. We don't need to worry about buildtime or runtime sort
436 * modifying the upper bits because the table is already sorted, and
437 * isn't part of the main exception table.
438 */
439 offset = (long)&ex->fixup - (pc + LOONGARCH_INSN_SIZE);
440 if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
441 return -ERANGE;
442
443 ex->type = EX_TYPE_BPF;
444 ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) | FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
445
446 ctx->num_exentries++;
447
448 return 0;
449 }
450
build_insn(const struct bpf_insn * insn,struct jit_ctx * ctx,bool extra_pass)451 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool extra_pass)
452 {
453 u8 tm = -1;
454 u64 func_addr;
455 bool func_addr_fixed;
456 int i = insn - ctx->prog->insnsi;
457 int ret, jmp_offset;
458 const u8 code = insn->code;
459 const u8 cond = BPF_OP(code);
460 const u8 t1 = LOONGARCH_GPR_T1;
461 const u8 t2 = LOONGARCH_GPR_T2;
462 const u8 src = regmap[insn->src_reg];
463 const u8 dst = regmap[insn->dst_reg];
464 const s16 off = insn->off;
465 const s32 imm = insn->imm;
466 const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32;
467
468 switch (code) {
469 /* dst = src */
470 case BPF_ALU | BPF_MOV | BPF_X:
471 case BPF_ALU64 | BPF_MOV | BPF_X:
472 move_reg(ctx, dst, src);
473 emit_zext_32(ctx, dst, is32);
474 break;
475
476 /* dst = imm */
477 case BPF_ALU | BPF_MOV | BPF_K:
478 case BPF_ALU64 | BPF_MOV | BPF_K:
479 move_imm(ctx, dst, imm, is32);
480 break;
481
482 /* dst = dst + src */
483 case BPF_ALU | BPF_ADD | BPF_X:
484 case BPF_ALU64 | BPF_ADD | BPF_X:
485 emit_insn(ctx, addd, dst, dst, src);
486 emit_zext_32(ctx, dst, is32);
487 break;
488
489 /* dst = dst + imm */
490 case BPF_ALU | BPF_ADD | BPF_K:
491 case BPF_ALU64 | BPF_ADD | BPF_K:
492 if (is_signed_imm12(imm)) {
493 emit_insn(ctx, addid, dst, dst, imm);
494 } else {
495 move_imm(ctx, t1, imm, is32);
496 emit_insn(ctx, addd, dst, dst, t1);
497 }
498 emit_zext_32(ctx, dst, is32);
499 break;
500
501 /* dst = dst - src */
502 case BPF_ALU | BPF_SUB | BPF_X:
503 case BPF_ALU64 | BPF_SUB | BPF_X:
504 emit_insn(ctx, subd, dst, dst, src);
505 emit_zext_32(ctx, dst, is32);
506 break;
507
508 /* dst = dst - imm */
509 case BPF_ALU | BPF_SUB | BPF_K:
510 case BPF_ALU64 | BPF_SUB | BPF_K:
511 if (is_signed_imm12(-imm)) {
512 emit_insn(ctx, addid, dst, dst, -imm);
513 } else {
514 move_imm(ctx, t1, imm, is32);
515 emit_insn(ctx, subd, dst, dst, t1);
516 }
517 emit_zext_32(ctx, dst, is32);
518 break;
519
520 /* dst = dst * src */
521 case BPF_ALU | BPF_MUL | BPF_X:
522 case BPF_ALU64 | BPF_MUL | BPF_X:
523 emit_insn(ctx, muld, dst, dst, src);
524 emit_zext_32(ctx, dst, is32);
525 break;
526
527 /* dst = dst * imm */
528 case BPF_ALU | BPF_MUL | BPF_K:
529 case BPF_ALU64 | BPF_MUL | BPF_K:
530 move_imm(ctx, t1, imm, is32);
531 emit_insn(ctx, muld, dst, dst, t1);
532 emit_zext_32(ctx, dst, is32);
533 break;
534
535 /* dst = dst / src */
536 case BPF_ALU | BPF_DIV | BPF_X:
537 case BPF_ALU64 | BPF_DIV | BPF_X:
538 emit_zext_32(ctx, dst, is32);
539 move_reg(ctx, t1, src);
540 emit_zext_32(ctx, t1, is32);
541 emit_insn(ctx, divdu, dst, dst, t1);
542 emit_zext_32(ctx, dst, is32);
543 break;
544
545 /* dst = dst / imm */
546 case BPF_ALU | BPF_DIV | BPF_K:
547 case BPF_ALU64 | BPF_DIV | BPF_K:
548 move_imm(ctx, t1, imm, is32);
549 emit_zext_32(ctx, dst, is32);
550 emit_insn(ctx, divdu, dst, dst, t1);
551 emit_zext_32(ctx, dst, is32);
552 break;
553
554 /* dst = dst % src */
555 case BPF_ALU | BPF_MOD | BPF_X:
556 case BPF_ALU64 | BPF_MOD | BPF_X:
557 emit_zext_32(ctx, dst, is32);
558 move_reg(ctx, t1, src);
559 emit_zext_32(ctx, t1, is32);
560 emit_insn(ctx, moddu, dst, dst, t1);
561 emit_zext_32(ctx, dst, is32);
562 break;
563
564 /* dst = dst % imm */
565 case BPF_ALU | BPF_MOD | BPF_K:
566 case BPF_ALU64 | BPF_MOD | BPF_K:
567 move_imm(ctx, t1, imm, is32);
568 emit_zext_32(ctx, dst, is32);
569 emit_insn(ctx, moddu, dst, dst, t1);
570 emit_zext_32(ctx, dst, is32);
571 break;
572
573 /* dst = -dst */
574 case BPF_ALU | BPF_NEG:
575 case BPF_ALU64 | BPF_NEG:
576 move_imm(ctx, t1, imm, is32);
577 emit_insn(ctx, subd, dst, LOONGARCH_GPR_ZERO, dst);
578 emit_zext_32(ctx, dst, is32);
579 break;
580
581 /* dst = dst & src */
582 case BPF_ALU | BPF_AND | BPF_X:
583 case BPF_ALU64 | BPF_AND | BPF_X:
584 emit_insn(ctx, and, dst, dst, src);
585 emit_zext_32(ctx, dst, is32);
586 break;
587
588 /* dst = dst & imm */
589 case BPF_ALU | BPF_AND | BPF_K:
590 case BPF_ALU64 | BPF_AND | BPF_K:
591 if (is_unsigned_imm12(imm)) {
592 emit_insn(ctx, andi, dst, dst, imm);
593 } else {
594 move_imm(ctx, t1, imm, is32);
595 emit_insn(ctx, and, dst, dst, t1);
596 }
597 emit_zext_32(ctx, dst, is32);
598 break;
599
600 /* dst = dst | src */
601 case BPF_ALU | BPF_OR | BPF_X:
602 case BPF_ALU64 | BPF_OR | BPF_X:
603 emit_insn(ctx, or, dst, dst, src);
604 emit_zext_32(ctx, dst, is32);
605 break;
606
607 /* dst = dst | imm */
608 case BPF_ALU | BPF_OR | BPF_K:
609 case BPF_ALU64 | BPF_OR | BPF_K:
610 if (is_unsigned_imm12(imm)) {
611 emit_insn(ctx, ori, dst, dst, imm);
612 } else {
613 move_imm(ctx, t1, imm, is32);
614 emit_insn(ctx, or, dst, dst, t1);
615 }
616 emit_zext_32(ctx, dst, is32);
617 break;
618
619 /* dst = dst ^ src */
620 case BPF_ALU | BPF_XOR | BPF_X:
621 case BPF_ALU64 | BPF_XOR | BPF_X:
622 emit_insn(ctx, xor, dst, dst, src);
623 emit_zext_32(ctx, dst, is32);
624 break;
625
626 /* dst = dst ^ imm */
627 case BPF_ALU | BPF_XOR | BPF_K:
628 case BPF_ALU64 | BPF_XOR | BPF_K:
629 if (is_unsigned_imm12(imm)) {
630 emit_insn(ctx, xori, dst, dst, imm);
631 } else {
632 move_imm(ctx, t1, imm, is32);
633 emit_insn(ctx, xor, dst, dst, t1);
634 }
635 emit_zext_32(ctx, dst, is32);
636 break;
637
638 /* dst = dst << src (logical) */
639 case BPF_ALU | BPF_LSH | BPF_X:
640 emit_insn(ctx, sllw, dst, dst, src);
641 emit_zext_32(ctx, dst, is32);
642 break;
643
644 case BPF_ALU64 | BPF_LSH | BPF_X:
645 emit_insn(ctx, slld, dst, dst, src);
646 break;
647
648 /* dst = dst << imm (logical) */
649 case BPF_ALU | BPF_LSH | BPF_K:
650 emit_insn(ctx, slliw, dst, dst, imm);
651 emit_zext_32(ctx, dst, is32);
652 break;
653
654 case BPF_ALU64 | BPF_LSH | BPF_K:
655 emit_insn(ctx, sllid, dst, dst, imm);
656 break;
657
658 /* dst = dst >> src (logical) */
659 case BPF_ALU | BPF_RSH | BPF_X:
660 emit_insn(ctx, srlw, dst, dst, src);
661 emit_zext_32(ctx, dst, is32);
662 break;
663
664 case BPF_ALU64 | BPF_RSH | BPF_X:
665 emit_insn(ctx, srld, dst, dst, src);
666 break;
667
668 /* dst = dst >> imm (logical) */
669 case BPF_ALU | BPF_RSH | BPF_K:
670 emit_insn(ctx, srliw, dst, dst, imm);
671 emit_zext_32(ctx, dst, is32);
672 break;
673
674 case BPF_ALU64 | BPF_RSH | BPF_K:
675 emit_insn(ctx, srlid, dst, dst, imm);
676 break;
677
678 /* dst = dst >> src (arithmetic) */
679 case BPF_ALU | BPF_ARSH | BPF_X:
680 emit_insn(ctx, sraw, dst, dst, src);
681 emit_zext_32(ctx, dst, is32);
682 break;
683
684 case BPF_ALU64 | BPF_ARSH | BPF_X:
685 emit_insn(ctx, srad, dst, dst, src);
686 break;
687
688 /* dst = dst >> imm (arithmetic) */
689 case BPF_ALU | BPF_ARSH | BPF_K:
690 emit_insn(ctx, sraiw, dst, dst, imm);
691 emit_zext_32(ctx, dst, is32);
692 break;
693
694 case BPF_ALU64 | BPF_ARSH | BPF_K:
695 emit_insn(ctx, sraid, dst, dst, imm);
696 break;
697
698 /* dst = BSWAP##imm(dst) */
699 case BPF_ALU | BPF_END | BPF_FROM_LE:
700 switch (imm) {
701 case 16:
702 /* zero-extend 16 bits into 64 bits */
703 emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
704 break;
705 case 32:
706 /* zero-extend 32 bits into 64 bits */
707 emit_zext_32(ctx, dst, is32);
708 break;
709 case 64:
710 /* do nothing */
711 break;
712 }
713 break;
714
715 case BPF_ALU | BPF_END | BPF_FROM_BE:
716 switch (imm) {
717 case 16:
718 emit_insn(ctx, revb2h, dst, dst);
719 /* zero-extend 16 bits into 64 bits */
720 emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
721 break;
722 case 32:
723 emit_insn(ctx, revb2w, dst, dst);
724 /* zero-extend 32 bits into 64 bits */
725 emit_zext_32(ctx, dst, is32);
726 break;
727 case 64:
728 emit_insn(ctx, revbd, dst, dst);
729 break;
730 }
731 break;
732
733 /* PC += off if dst cond src */
734 case BPF_JMP | BPF_JEQ | BPF_X:
735 case BPF_JMP | BPF_JNE | BPF_X:
736 case BPF_JMP | BPF_JGT | BPF_X:
737 case BPF_JMP | BPF_JGE | BPF_X:
738 case BPF_JMP | BPF_JLT | BPF_X:
739 case BPF_JMP | BPF_JLE | BPF_X:
740 case BPF_JMP | BPF_JSGT | BPF_X:
741 case BPF_JMP | BPF_JSGE | BPF_X:
742 case BPF_JMP | BPF_JSLT | BPF_X:
743 case BPF_JMP | BPF_JSLE | BPF_X:
744 case BPF_JMP32 | BPF_JEQ | BPF_X:
745 case BPF_JMP32 | BPF_JNE | BPF_X:
746 case BPF_JMP32 | BPF_JGT | BPF_X:
747 case BPF_JMP32 | BPF_JGE | BPF_X:
748 case BPF_JMP32 | BPF_JLT | BPF_X:
749 case BPF_JMP32 | BPF_JLE | BPF_X:
750 case BPF_JMP32 | BPF_JSGT | BPF_X:
751 case BPF_JMP32 | BPF_JSGE | BPF_X:
752 case BPF_JMP32 | BPF_JSLT | BPF_X:
753 case BPF_JMP32 | BPF_JSLE | BPF_X:
754 jmp_offset = bpf2la_offset(i, off, ctx);
755 move_reg(ctx, t1, dst);
756 move_reg(ctx, t2, src);
757 if (is_signed_bpf_cond(BPF_OP(code))) {
758 emit_sext_32(ctx, t1, is32);
759 emit_sext_32(ctx, t2, is32);
760 } else {
761 emit_zext_32(ctx, t1, is32);
762 emit_zext_32(ctx, t2, is32);
763 }
764 if (emit_cond_jmp(ctx, cond, t1, t2, jmp_offset) < 0)
765 goto toofar;
766 break;
767
768 /* PC += off if dst cond imm */
769 case BPF_JMP | BPF_JEQ | BPF_K:
770 case BPF_JMP | BPF_JNE | BPF_K:
771 case BPF_JMP | BPF_JGT | BPF_K:
772 case BPF_JMP | BPF_JGE | BPF_K:
773 case BPF_JMP | BPF_JLT | BPF_K:
774 case BPF_JMP | BPF_JLE | BPF_K:
775 case BPF_JMP | BPF_JSGT | BPF_K:
776 case BPF_JMP | BPF_JSGE | BPF_K:
777 case BPF_JMP | BPF_JSLT | BPF_K:
778 case BPF_JMP | BPF_JSLE | BPF_K:
779 case BPF_JMP32 | BPF_JEQ | BPF_K:
780 case BPF_JMP32 | BPF_JNE | BPF_K:
781 case BPF_JMP32 | BPF_JGT | BPF_K:
782 case BPF_JMP32 | BPF_JGE | BPF_K:
783 case BPF_JMP32 | BPF_JLT | BPF_K:
784 case BPF_JMP32 | BPF_JLE | BPF_K:
785 case BPF_JMP32 | BPF_JSGT | BPF_K:
786 case BPF_JMP32 | BPF_JSGE | BPF_K:
787 case BPF_JMP32 | BPF_JSLT | BPF_K:
788 case BPF_JMP32 | BPF_JSLE | BPF_K:
789 jmp_offset = bpf2la_offset(i, off, ctx);
790 if (imm) {
791 move_imm(ctx, t1, imm, false);
792 tm = t1;
793 } else {
794 /* If imm is 0, simply use zero register. */
795 tm = LOONGARCH_GPR_ZERO;
796 }
797 move_reg(ctx, t2, dst);
798 if (is_signed_bpf_cond(BPF_OP(code))) {
799 emit_sext_32(ctx, tm, is32);
800 emit_sext_32(ctx, t2, is32);
801 } else {
802 emit_zext_32(ctx, tm, is32);
803 emit_zext_32(ctx, t2, is32);
804 }
805 if (emit_cond_jmp(ctx, cond, t2, tm, jmp_offset) < 0)
806 goto toofar;
807 break;
808
809 /* PC += off if dst & src */
810 case BPF_JMP | BPF_JSET | BPF_X:
811 case BPF_JMP32 | BPF_JSET | BPF_X:
812 jmp_offset = bpf2la_offset(i, off, ctx);
813 emit_insn(ctx, and, t1, dst, src);
814 emit_zext_32(ctx, t1, is32);
815 if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
816 goto toofar;
817 break;
818
819 /* PC += off if dst & imm */
820 case BPF_JMP | BPF_JSET | BPF_K:
821 case BPF_JMP32 | BPF_JSET | BPF_K:
822 jmp_offset = bpf2la_offset(i, off, ctx);
823 move_imm(ctx, t1, imm, is32);
824 emit_insn(ctx, and, t1, dst, t1);
825 emit_zext_32(ctx, t1, is32);
826 if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
827 goto toofar;
828 break;
829
830 /* PC += off */
831 case BPF_JMP | BPF_JA:
832 jmp_offset = bpf2la_offset(i, off, ctx);
833 if (emit_uncond_jmp(ctx, jmp_offset) < 0)
834 goto toofar;
835 break;
836
837 /* function call */
838 case BPF_JMP | BPF_CALL:
839 mark_call(ctx);
840 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
841 &func_addr, &func_addr_fixed);
842 if (ret < 0)
843 return ret;
844
845 move_addr(ctx, t1, func_addr);
846 emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
847
848 if (insn->src_reg != BPF_PSEUDO_CALL)
849 move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
850
851 break;
852
853 /* tail call */
854 case BPF_JMP | BPF_TAIL_CALL:
855 mark_tail_call(ctx);
856 if (emit_bpf_tail_call(ctx) < 0)
857 return -EINVAL;
858 break;
859
860 /* function return */
861 case BPF_JMP | BPF_EXIT:
862 if (i == ctx->prog->len - 1)
863 break;
864
865 jmp_offset = epilogue_offset(ctx);
866 if (emit_uncond_jmp(ctx, jmp_offset) < 0)
867 goto toofar;
868 break;
869
870 /* dst = imm64 */
871 case BPF_LD | BPF_IMM | BPF_DW:
872 {
873 const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
874
875 if (bpf_pseudo_func(insn))
876 move_addr(ctx, dst, imm64);
877 else
878 move_imm(ctx, dst, imm64, is32);
879 return 1;
880 }
881
882 /* dst = *(size *)(src + off) */
883 case BPF_LDX | BPF_MEM | BPF_B:
884 case BPF_LDX | BPF_MEM | BPF_H:
885 case BPF_LDX | BPF_MEM | BPF_W:
886 case BPF_LDX | BPF_MEM | BPF_DW:
887 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
888 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
889 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
890 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
891 switch (BPF_SIZE(code)) {
892 case BPF_B:
893 if (is_signed_imm12(off)) {
894 emit_insn(ctx, ldbu, dst, src, off);
895 } else {
896 move_imm(ctx, t1, off, is32);
897 emit_insn(ctx, ldxbu, dst, src, t1);
898 }
899 break;
900 case BPF_H:
901 if (is_signed_imm12(off)) {
902 emit_insn(ctx, ldhu, dst, src, off);
903 } else {
904 move_imm(ctx, t1, off, is32);
905 emit_insn(ctx, ldxhu, dst, src, t1);
906 }
907 break;
908 case BPF_W:
909 if (is_signed_imm12(off)) {
910 emit_insn(ctx, ldwu, dst, src, off);
911 } else if (is_signed_imm14(off)) {
912 emit_insn(ctx, ldptrw, dst, src, off);
913 } else {
914 move_imm(ctx, t1, off, is32);
915 emit_insn(ctx, ldxwu, dst, src, t1);
916 }
917 break;
918 case BPF_DW:
919 move_imm(ctx, t1, off, is32);
920 emit_insn(ctx, ldxd, dst, src, t1);
921 break;
922 }
923
924 ret = add_exception_handler(insn, ctx, dst);
925 if (ret)
926 return ret;
927 break;
928
929 /* *(size *)(dst + off) = imm */
930 case BPF_ST | BPF_MEM | BPF_B:
931 case BPF_ST | BPF_MEM | BPF_H:
932 case BPF_ST | BPF_MEM | BPF_W:
933 case BPF_ST | BPF_MEM | BPF_DW:
934 switch (BPF_SIZE(code)) {
935 case BPF_B:
936 move_imm(ctx, t1, imm, is32);
937 if (is_signed_imm12(off)) {
938 emit_insn(ctx, stb, t1, dst, off);
939 } else {
940 move_imm(ctx, t2, off, is32);
941 emit_insn(ctx, stxb, t1, dst, t2);
942 }
943 break;
944 case BPF_H:
945 move_imm(ctx, t1, imm, is32);
946 if (is_signed_imm12(off)) {
947 emit_insn(ctx, sth, t1, dst, off);
948 } else {
949 move_imm(ctx, t2, off, is32);
950 emit_insn(ctx, stxh, t1, dst, t2);
951 }
952 break;
953 case BPF_W:
954 move_imm(ctx, t1, imm, is32);
955 if (is_signed_imm12(off)) {
956 emit_insn(ctx, stw, t1, dst, off);
957 } else if (is_signed_imm14(off)) {
958 emit_insn(ctx, stptrw, t1, dst, off);
959 } else {
960 move_imm(ctx, t2, off, is32);
961 emit_insn(ctx, stxw, t1, dst, t2);
962 }
963 break;
964 case BPF_DW:
965 move_imm(ctx, t1, imm, is32);
966 if (is_signed_imm12(off)) {
967 emit_insn(ctx, std, t1, dst, off);
968 } else if (is_signed_imm14(off)) {
969 emit_insn(ctx, stptrd, t1, dst, off);
970 } else {
971 move_imm(ctx, t2, off, is32);
972 emit_insn(ctx, stxd, t1, dst, t2);
973 }
974 break;
975 }
976 break;
977
978 /* *(size *)(dst + off) = src */
979 case BPF_STX | BPF_MEM | BPF_B:
980 case BPF_STX | BPF_MEM | BPF_H:
981 case BPF_STX | BPF_MEM | BPF_W:
982 case BPF_STX | BPF_MEM | BPF_DW:
983 switch (BPF_SIZE(code)) {
984 case BPF_B:
985 if (is_signed_imm12(off)) {
986 emit_insn(ctx, stb, src, dst, off);
987 } else {
988 move_imm(ctx, t1, off, is32);
989 emit_insn(ctx, stxb, src, dst, t1);
990 }
991 break;
992 case BPF_H:
993 if (is_signed_imm12(off)) {
994 emit_insn(ctx, sth, src, dst, off);
995 } else {
996 move_imm(ctx, t1, off, is32);
997 emit_insn(ctx, stxh, src, dst, t1);
998 }
999 break;
1000 case BPF_W:
1001 if (is_signed_imm12(off)) {
1002 emit_insn(ctx, stw, src, dst, off);
1003 } else if (is_signed_imm14(off)) {
1004 emit_insn(ctx, stptrw, src, dst, off);
1005 } else {
1006 move_imm(ctx, t1, off, is32);
1007 emit_insn(ctx, stxw, src, dst, t1);
1008 }
1009 break;
1010 case BPF_DW:
1011 if (is_signed_imm12(off)) {
1012 emit_insn(ctx, std, src, dst, off);
1013 } else if (is_signed_imm14(off)) {
1014 emit_insn(ctx, stptrd, src, dst, off);
1015 } else {
1016 move_imm(ctx, t1, off, is32);
1017 emit_insn(ctx, stxd, src, dst, t1);
1018 }
1019 break;
1020 }
1021 break;
1022
1023 case BPF_STX | BPF_ATOMIC | BPF_W:
1024 case BPF_STX | BPF_ATOMIC | BPF_DW:
1025 emit_atomic(insn, ctx);
1026 break;
1027
1028 /* Speculation barrier */
1029 case BPF_ST | BPF_NOSPEC:
1030 break;
1031
1032 default:
1033 pr_err("bpf_jit: unknown opcode %02x\n", code);
1034 return -EINVAL;
1035 }
1036
1037 return 0;
1038
1039 toofar:
1040 pr_info_once("bpf_jit: opcode %02x, jump too far\n", code);
1041 return -E2BIG;
1042 }
1043
build_body(struct jit_ctx * ctx,bool extra_pass)1044 static int build_body(struct jit_ctx *ctx, bool extra_pass)
1045 {
1046 int i;
1047 const struct bpf_prog *prog = ctx->prog;
1048
1049 for (i = 0; i < prog->len; i++) {
1050 const struct bpf_insn *insn = &prog->insnsi[i];
1051 int ret;
1052
1053 if (ctx->image == NULL)
1054 ctx->offset[i] = ctx->idx;
1055
1056 ret = build_insn(insn, ctx, extra_pass);
1057 if (ret > 0) {
1058 i++;
1059 if (ctx->image == NULL)
1060 ctx->offset[i] = ctx->idx;
1061 continue;
1062 }
1063 if (ret)
1064 return ret;
1065 }
1066
1067 if (ctx->image == NULL)
1068 ctx->offset[i] = ctx->idx;
1069
1070 return 0;
1071 }
1072
1073 /* Fill space with break instructions */
jit_fill_hole(void * area,unsigned int size)1074 static void jit_fill_hole(void *area, unsigned int size)
1075 {
1076 u32 *ptr;
1077
1078 /* We are guaranteed to have aligned memory */
1079 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
1080 *ptr++ = INSN_BREAK;
1081 }
1082
validate_code(struct jit_ctx * ctx)1083 static int validate_code(struct jit_ctx *ctx)
1084 {
1085 int i;
1086 union loongarch_instruction insn;
1087
1088 for (i = 0; i < ctx->idx; i++) {
1089 insn = ctx->image[i];
1090 /* Check INSN_BREAK */
1091 if (insn.word == INSN_BREAK)
1092 return -1;
1093 }
1094
1095 if (WARN_ON_ONCE(ctx->num_exentries != ctx->prog->aux->num_exentries))
1096 return -1;
1097
1098 return 0;
1099 }
1100
bpf_int_jit_compile(struct bpf_prog * prog)1101 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1102 {
1103 bool tmp_blinded = false, extra_pass = false;
1104 u8 *image_ptr;
1105 int image_size, prog_size, extable_size;
1106 struct jit_ctx ctx;
1107 struct jit_data *jit_data;
1108 struct bpf_binary_header *header;
1109 struct bpf_prog *tmp, *orig_prog = prog;
1110
1111 /*
1112 * If BPF JIT was not enabled then we must fall back to
1113 * the interpreter.
1114 */
1115 if (!prog->jit_requested)
1116 return orig_prog;
1117
1118 tmp = bpf_jit_blind_constants(prog);
1119 /*
1120 * If blinding was requested and we failed during blinding,
1121 * we must fall back to the interpreter. Otherwise, we save
1122 * the new JITed code.
1123 */
1124 if (IS_ERR(tmp))
1125 return orig_prog;
1126
1127 if (tmp != prog) {
1128 tmp_blinded = true;
1129 prog = tmp;
1130 }
1131
1132 jit_data = prog->aux->jit_data;
1133 if (!jit_data) {
1134 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1135 if (!jit_data) {
1136 prog = orig_prog;
1137 goto out;
1138 }
1139 prog->aux->jit_data = jit_data;
1140 }
1141 if (jit_data->ctx.offset) {
1142 ctx = jit_data->ctx;
1143 image_ptr = jit_data->image;
1144 header = jit_data->header;
1145 extra_pass = true;
1146 prog_size = sizeof(u32) * ctx.idx;
1147 goto skip_init_ctx;
1148 }
1149
1150 memset(&ctx, 0, sizeof(ctx));
1151 ctx.prog = prog;
1152
1153 ctx.offset = kvcalloc(prog->len + 1, sizeof(u32), GFP_KERNEL);
1154 if (ctx.offset == NULL) {
1155 prog = orig_prog;
1156 goto out_offset;
1157 }
1158
1159 /* 1. Initial fake pass to compute ctx->idx and set ctx->flags */
1160 build_prologue(&ctx);
1161 if (build_body(&ctx, extra_pass)) {
1162 prog = orig_prog;
1163 goto out_offset;
1164 }
1165 ctx.epilogue_offset = ctx.idx;
1166 build_epilogue(&ctx);
1167
1168 extable_size = prog->aux->num_exentries * sizeof(struct exception_table_entry);
1169
1170 /* Now we know the actual image size.
1171 * As each LoongArch instruction is of length 32bit,
1172 * we are translating number of JITed intructions into
1173 * the size required to store these JITed code.
1174 */
1175 prog_size = sizeof(u32) * ctx.idx;
1176 image_size = prog_size + extable_size;
1177 /* Now we know the size of the structure to make */
1178 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1179 sizeof(u32), jit_fill_hole);
1180 if (header == NULL) {
1181 prog = orig_prog;
1182 goto out_offset;
1183 }
1184
1185 /* 2. Now, the actual pass to generate final JIT code */
1186 ctx.image = (union loongarch_instruction *)image_ptr;
1187 if (extable_size)
1188 prog->aux->extable = (void *)image_ptr + prog_size;
1189
1190 skip_init_ctx:
1191 ctx.idx = 0;
1192 ctx.num_exentries = 0;
1193
1194 build_prologue(&ctx);
1195 if (build_body(&ctx, extra_pass)) {
1196 bpf_jit_binary_free(header);
1197 prog = orig_prog;
1198 goto out_offset;
1199 }
1200 build_epilogue(&ctx);
1201
1202 /* 3. Extra pass to validate JITed code */
1203 if (validate_code(&ctx)) {
1204 bpf_jit_binary_free(header);
1205 prog = orig_prog;
1206 goto out_offset;
1207 }
1208
1209 /* And we're done */
1210 if (bpf_jit_enable > 1)
1211 bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
1212
1213 /* Update the icache */
1214 flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
1215
1216 if (!prog->is_func || extra_pass) {
1217 if (extra_pass && ctx.idx != jit_data->ctx.idx) {
1218 pr_err_once("multi-func JIT bug %d != %d\n",
1219 ctx.idx, jit_data->ctx.idx);
1220 bpf_jit_binary_free(header);
1221 prog->bpf_func = NULL;
1222 prog->jited = 0;
1223 prog->jited_len = 0;
1224 goto out_offset;
1225 }
1226 bpf_jit_binary_lock_ro(header);
1227 } else {
1228 jit_data->ctx = ctx;
1229 jit_data->image = image_ptr;
1230 jit_data->header = header;
1231 }
1232 prog->jited = 1;
1233 prog->jited_len = prog_size;
1234 prog->bpf_func = (void *)ctx.image;
1235
1236 if (!prog->is_func || extra_pass) {
1237 int i;
1238
1239 /* offset[prog->len] is the size of program */
1240 for (i = 0; i <= prog->len; i++)
1241 ctx.offset[i] *= LOONGARCH_INSN_SIZE;
1242 bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
1243
1244 out_offset:
1245 kvfree(ctx.offset);
1246 kfree(jit_data);
1247 prog->aux->jit_data = NULL;
1248 }
1249
1250 out:
1251 if (tmp_blinded)
1252 bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog);
1253
1254 out_offset = -1;
1255
1256 return prog;
1257 }
1258
1259 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)1260 bool bpf_jit_supports_subprog_tailcalls(void)
1261 {
1262 return true;
1263 }
1264