Lines Matching refs:insn
236 static bool bpf_pseudo_call(const struct bpf_insn *insn) in bpf_pseudo_call() argument
238 return insn->code == (BPF_JMP | BPF_CALL) && in bpf_pseudo_call()
239 insn->src_reg == BPF_PSEUDO_CALL; in bpf_pseudo_call()
242 static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn) in bpf_pseudo_kfunc_call() argument
244 return insn->code == (BPF_JMP | BPF_CALL) && in bpf_pseudo_kfunc_call()
245 insn->src_reg == BPF_PSEUDO_KFUNC_CALL; in bpf_pseudo_kfunc_call()
538 static bool is_cmpxchg_insn(const struct bpf_insn *insn) in is_cmpxchg_insn() argument
540 return BPF_CLASS(insn->code) == BPF_STX && in is_cmpxchg_insn()
541 BPF_MODE(insn->code) == BPF_ATOMIC && in is_cmpxchg_insn()
542 insn->imm == BPF_CMPXCHG; in is_cmpxchg_insn()
2176 const struct bpf_insn *insn) in bpf_jit_find_kfunc_model() argument
2179 .imm = insn->imm, in bpf_jit_find_kfunc_model()
2194 struct bpf_insn *insn = env->prog->insnsi; in add_subprog_and_kfunc() local
2202 for (i = 0; i < insn_cnt; i++, insn++) { in add_subprog_and_kfunc()
2203 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) && in add_subprog_and_kfunc()
2204 !bpf_pseudo_kfunc_call(insn)) in add_subprog_and_kfunc()
2212 if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn)) in add_subprog_and_kfunc()
2213 ret = add_subprog(env, i + insn->imm + 1); in add_subprog_and_kfunc()
2215 ret = add_kfunc_call(env, insn->imm, insn->off); in add_subprog_and_kfunc()
2237 struct bpf_insn *insn = env->prog->insnsi; in check_subprogs() local
2244 u8 code = insn[i].code; in check_subprogs()
2247 insn[i].imm == BPF_FUNC_tail_call && in check_subprogs()
2248 insn[i].src_reg != BPF_PSEUDO_CALL) in check_subprogs()
2257 off = i + insn[i].off + 1; in check_subprogs()
2337 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, in is_reg64() argument
2342 code = insn->code; in is_reg64()
2357 if (insn->src_reg == BPF_PSEUDO_CALL) in is_reg64()
2371 (class == BPF_ALU && op == BPF_END && insn->imm == 64)) in is_reg64()
2422 static int insn_def_regno(const struct bpf_insn *insn) in insn_def_regno() argument
2424 switch (BPF_CLASS(insn->code)) { in insn_def_regno()
2430 if (BPF_MODE(insn->code) == BPF_ATOMIC && in insn_def_regno()
2431 (insn->imm & BPF_FETCH)) { in insn_def_regno()
2432 if (insn->imm == BPF_CMPXCHG) in insn_def_regno()
2435 return insn->src_reg; in insn_def_regno()
2440 return insn->dst_reg; in insn_def_regno()
2445 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) in insn_has_def32() argument
2447 int dst_reg = insn_def_regno(insn); in insn_has_def32()
2452 return !is_reg64(env, insn, dst_reg, NULL, DST_OP); in insn_has_def32()
2473 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; in check_reg_arg() local
2485 rw64 = is_reg64(env, insn, regno, reg, t); in check_reg_arg()
2569 static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn) in disasm_kfunc_name() argument
2574 if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL) in disasm_kfunc_name()
2577 desc_btf = find_kfunc_desc_btf(data, insn->off); in disasm_kfunc_name()
2581 func = btf_type_by_id(desc_btf, insn->imm); in disasm_kfunc_name()
2597 struct bpf_insn *insn = env->prog->insnsi + idx; in backtrack_insn() local
2598 u8 class = BPF_CLASS(insn->code); in backtrack_insn()
2599 u8 opcode = BPF_OP(insn->code); in backtrack_insn()
2600 u8 mode = BPF_MODE(insn->code); in backtrack_insn()
2601 u32 dreg = 1u << insn->dst_reg; in backtrack_insn()
2602 u32 sreg = 1u << insn->src_reg; in backtrack_insn()
2605 if (insn->code == 0) in backtrack_insn()
2610 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); in backtrack_insn()
2622 if (BPF_SRC(insn->code) == BPF_X) { in backtrack_insn()
2639 if (BPF_SRC(insn->code) == BPF_X) { in backtrack_insn()
2660 if (insn->src_reg != BPF_REG_FP) in backtrack_insn()
2667 spi = (-insn->off - 1) / BPF_REG_SIZE; in backtrack_insn()
2682 if (insn->dst_reg != BPF_REG_FP) in backtrack_insn()
2684 spi = (-insn->off - 1) / BPF_REG_SIZE; in backtrack_insn()
2697 if (insn->src_reg == BPF_PSEUDO_CALL) in backtrack_insn()
2703 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0) in backtrack_insn()
2708 if (insn->src_reg == 0 && is_callback_calling_function(insn->imm)) in backtrack_insn()
2722 } else if (BPF_SRC(insn->code) == BPF_X) { in backtrack_insn()
3243 static bool is_bpf_st_mem(struct bpf_insn *insn) in is_bpf_st_mem() argument
3245 return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM; in is_bpf_st_mem()
3259 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_stack_write_fixed_off() local
3261 u32 dst_reg = insn->dst_reg; in check_stack_write_fixed_off()
3313 } else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) && in check_stack_write_fixed_off()
3314 insn->imm != 0 && env->bpf_capable) { in check_stack_write_fixed_off()
3317 __mark_reg_known(&fake_reg, insn->imm); in check_stack_write_fixed_off()
3355 (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) { in check_stack_write_fixed_off()
4001 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_map_kptr_access() local
4002 int class = BPF_CLASS(insn->code); in check_map_kptr_access()
4012 if (BPF_MODE(insn->code) != BPF_MEM) { in check_map_kptr_access()
4040 if (insn->imm) { in check_map_kptr_access()
4473 struct bpf_insn *insn = env->prog->insnsi; in check_max_stack_depth_subprog() local
4521 if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i)) in check_max_stack_depth_subprog()
4528 next_insn = i + insn[i].imm + 1; in check_max_stack_depth_subprog()
4541 if (!bpf_pseudo_call(insn + i)) in check_max_stack_depth_subprog()
4599 const struct bpf_insn *insn, int idx) in get_callee_stack_depth() argument
4601 int start = idx + insn->imm + 1, subprog; in get_callee_stack_depth()
5213 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) in check_atomic() argument
5218 switch (insn->imm) { in check_atomic()
5231 verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm); in check_atomic()
5235 if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) { in check_atomic()
5241 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_atomic()
5246 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_atomic()
5250 if (insn->imm == BPF_CMPXCHG) { in check_atomic()
5264 if (is_pointer_value(env, insn->src_reg)) { in check_atomic()
5265 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); in check_atomic()
5269 if (is_ctx_reg(env, insn->dst_reg) || in check_atomic()
5270 is_pkt_reg(env, insn->dst_reg) || in check_atomic()
5271 is_flow_key_reg(env, insn->dst_reg) || in check_atomic()
5272 is_sk_reg(env, insn->dst_reg)) { in check_atomic()
5274 insn->dst_reg, in check_atomic()
5275 reg_type_str(env, reg_state(env, insn->dst_reg)->type)); in check_atomic()
5279 if (insn->imm & BPF_FETCH) { in check_atomic()
5280 if (insn->imm == BPF_CMPXCHG) in check_atomic()
5283 load_reg = insn->src_reg; in check_atomic()
5299 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
5300 BPF_SIZE(insn->code), BPF_READ, -1, true); in check_atomic()
5302 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
5303 BPF_SIZE(insn->code), BPF_READ, load_reg, in check_atomic()
5309 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
5310 BPF_SIZE(insn->code), BPF_WRITE, -1, true); in check_atomic()
6912 static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, in __check_func_call() argument
6966 if (set_callee_state_cb != set_callee_state && !is_callback_calling_function(insn->imm)) { in __check_func_call()
6968 func_id_name(insn->imm), insn->imm); in __check_func_call()
6972 if (insn->code == (BPF_JMP | BPF_CALL) && in __check_func_call()
6973 insn->src_reg == 0 && in __check_func_call()
6974 insn->imm == BPF_FUNC_timer_set_callback) { in __check_func_call()
7085 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, in check_func_call() argument
7090 target_insn = *insn_idx + insn->imm + 1; in check_func_call()
7098 return __check_func_call(env, insn, insn_idx, subprog, set_callee_state); in check_func_call()
7522 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn, in check_helper_call() argument
7536 func_id = insn->imm; in check_helper_call()
7660 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, in check_helper_call()
7664 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, in check_helper_call()
7668 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, in check_helper_call()
7676 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, in check_helper_call()
7722 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, in check_helper_call()
7946 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, in check_kfunc_call() argument
7961 if (!insn->imm) in check_kfunc_call()
7964 desc_btf = find_kfunc_desc_btf(env, insn->off); in check_kfunc_call()
7968 func_id = insn->imm; in check_kfunc_call()
8196 const struct bpf_insn *insn) in can_skip_alu_sanitation() argument
8198 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K; in can_skip_alu_sanitation()
8219 struct bpf_insn *insn) in sanitize_val_alu() argument
8223 if (can_skip_alu_sanitation(env, insn)) in sanitize_val_alu()
8241 const struct bpf_insn *insn, in sanitize_speculative_path() argument
8248 if (branch && insn) { in sanitize_speculative_path()
8250 if (BPF_SRC(insn->code) == BPF_K) { in sanitize_speculative_path()
8251 mark_reg_unknown(env, regs, insn->dst_reg); in sanitize_speculative_path()
8252 } else if (BPF_SRC(insn->code) == BPF_X) { in sanitize_speculative_path()
8253 mark_reg_unknown(env, regs, insn->dst_reg); in sanitize_speculative_path()
8254 mark_reg_unknown(env, regs, insn->src_reg); in sanitize_speculative_path()
8261 struct bpf_insn *insn, in sanitize_ptr_alu() argument
8273 u8 opcode = BPF_OP(insn->code); in sanitize_ptr_alu()
8279 if (can_skip_alu_sanitation(env, insn)) in sanitize_ptr_alu()
8370 const struct bpf_insn *insn, int reason, in sanitize_err() argument
8375 const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub"; in sanitize_err()
8376 u32 dst = insn->dst_reg, src = insn->src_reg; in sanitize_err()
8443 const struct bpf_insn *insn, in sanitize_check_bounds() argument
8446 u32 dst = insn->dst_reg; in sanitize_check_bounds()
8480 struct bpf_insn *insn, in adjust_ptr_min_max_vals() argument
8493 u8 opcode = BPF_OP(insn->code); in adjust_ptr_min_max_vals()
8494 u32 dst = insn->dst_reg; in adjust_ptr_min_max_vals()
8508 if (BPF_CLASS(insn->code) != BPF_ALU64) { in adjust_ptr_min_max_vals()
8563 ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg, in adjust_ptr_min_max_vals()
8566 return sanitize_err(env, insn, ret, off_reg, dst_reg); in adjust_ptr_min_max_vals()
8697 if (sanitize_check_bounds(env, insn, dst_reg) < 0) in adjust_ptr_min_max_vals()
8700 ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg, in adjust_ptr_min_max_vals()
8703 return sanitize_err(env, insn, ret, off_reg, dst_reg); in adjust_ptr_min_max_vals()
9281 struct bpf_insn *insn, in adjust_scalar_min_max_vals() argument
9286 u8 opcode = BPF_OP(insn->code); in adjust_scalar_min_max_vals()
9292 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; in adjust_scalar_min_max_vals()
9293 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); in adjust_scalar_min_max_vals()
9337 ret = sanitize_val_alu(env, insn); in adjust_scalar_min_max_vals()
9339 return sanitize_err(env, insn, ret, NULL, NULL); in adjust_scalar_min_max_vals()
9392 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
9405 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
9418 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
9427 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
9442 struct bpf_insn *insn) in adjust_reg_min_max_vals() argument
9448 u8 opcode = BPF_OP(insn->code); in adjust_reg_min_max_vals()
9451 dst_reg = ®s[insn->dst_reg]; in adjust_reg_min_max_vals()
9460 if (BPF_SRC(insn->code) == BPF_X) { in adjust_reg_min_max_vals()
9461 src_reg = ®s[insn->src_reg]; in adjust_reg_min_max_vals()
9469 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_reg_min_max_vals()
9473 insn->dst_reg, in adjust_reg_min_max_vals()
9481 err = mark_chain_precision(env, insn->dst_reg); in adjust_reg_min_max_vals()
9484 return adjust_ptr_min_max_vals(env, insn, in adjust_reg_min_max_vals()
9489 err = mark_chain_precision(env, insn->src_reg); in adjust_reg_min_max_vals()
9492 return adjust_ptr_min_max_vals(env, insn, in adjust_reg_min_max_vals()
9496 err = mark_chain_precision(env, insn->src_reg); in adjust_reg_min_max_vals()
9505 __mark_reg_known(&off_reg, insn->imm); in adjust_reg_min_max_vals()
9508 return adjust_ptr_min_max_vals(env, insn, in adjust_reg_min_max_vals()
9523 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); in adjust_reg_min_max_vals()
9527 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) in check_alu_op() argument
9530 u8 opcode = BPF_OP(insn->code); in check_alu_op()
9535 if (BPF_SRC(insn->code) != BPF_K || in check_alu_op()
9536 insn->src_reg != BPF_REG_0 || in check_alu_op()
9537 insn->off != 0 || insn->imm != 0) { in check_alu_op()
9542 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || in check_alu_op()
9543 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || in check_alu_op()
9544 BPF_CLASS(insn->code) == BPF_ALU64) { in check_alu_op()
9551 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_alu_op()
9555 if (is_pointer_value(env, insn->dst_reg)) { in check_alu_op()
9557 insn->dst_reg); in check_alu_op()
9562 err = check_reg_arg(env, insn->dst_reg, DST_OP); in check_alu_op()
9568 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
9569 if (insn->imm != 0 || insn->off != 0) { in check_alu_op()
9575 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_alu_op()
9579 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { in check_alu_op()
9586 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_alu_op()
9590 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
9591 struct bpf_reg_state *src_reg = regs + insn->src_reg; in check_alu_op()
9592 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; in check_alu_op()
9594 if (BPF_CLASS(insn->code) == BPF_ALU64) { in check_alu_op()
9609 if (is_pointer_value(env, insn->src_reg)) { in check_alu_op()
9612 insn->src_reg); in check_alu_op()
9625 insn->dst_reg); in check_alu_op()
9635 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
9636 regs[insn->dst_reg].type = SCALAR_VALUE; in check_alu_op()
9637 if (BPF_CLASS(insn->code) == BPF_ALU64) { in check_alu_op()
9638 __mark_reg_known(regs + insn->dst_reg, in check_alu_op()
9639 insn->imm); in check_alu_op()
9641 __mark_reg_known(regs + insn->dst_reg, in check_alu_op()
9642 (u32)insn->imm); in check_alu_op()
9652 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
9653 if (insn->imm != 0 || insn->off != 0) { in check_alu_op()
9658 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_alu_op()
9662 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { in check_alu_op()
9669 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_alu_op()
9674 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { in check_alu_op()
9680 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { in check_alu_op()
9681 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; in check_alu_op()
9683 if (insn->imm < 0 || insn->imm >= size) { in check_alu_op()
9684 verbose(env, "invalid shift %d\n", insn->imm); in check_alu_op()
9690 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_alu_op()
9694 return adjust_reg_min_max_vals(env, insn); in check_alu_op()
10298 static bool try_match_pkt_pointers(const struct bpf_insn *insn, in try_match_pkt_pointers() argument
10304 if (BPF_SRC(insn->code) != BPF_X) in try_match_pkt_pointers()
10308 if (BPF_CLASS(insn->code) == BPF_JMP32) in try_match_pkt_pointers()
10311 switch (BPF_OP(insn->code)) { in try_match_pkt_pointers()
10320 mark_pkt_end(other_branch, insn->dst_reg, true); in try_match_pkt_pointers()
10328 mark_pkt_end(this_branch, insn->src_reg, false); in try_match_pkt_pointers()
10341 mark_pkt_end(this_branch, insn->dst_reg, false); in try_match_pkt_pointers()
10349 mark_pkt_end(other_branch, insn->src_reg, true); in try_match_pkt_pointers()
10362 mark_pkt_end(other_branch, insn->dst_reg, false); in try_match_pkt_pointers()
10370 mark_pkt_end(this_branch, insn->src_reg, true); in try_match_pkt_pointers()
10383 mark_pkt_end(this_branch, insn->dst_reg, true); in try_match_pkt_pointers()
10391 mark_pkt_end(other_branch, insn->src_reg, false); in try_match_pkt_pointers()
10416 struct bpf_insn *insn, int *insn_idx) in check_cond_jmp_op() argument
10422 u8 opcode = BPF_OP(insn->code); in check_cond_jmp_op()
10434 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_cond_jmp_op()
10438 dst_reg = ®s[insn->dst_reg]; in check_cond_jmp_op()
10439 if (BPF_SRC(insn->code) == BPF_X) { in check_cond_jmp_op()
10440 if (insn->imm != 0) { in check_cond_jmp_op()
10446 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_cond_jmp_op()
10450 src_reg = ®s[insn->src_reg]; in check_cond_jmp_op()
10452 is_pointer_value(env, insn->src_reg)) { in check_cond_jmp_op()
10454 insn->src_reg); in check_cond_jmp_op()
10458 if (insn->src_reg != BPF_REG_0) { in check_cond_jmp_op()
10464 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; in check_cond_jmp_op()
10466 if (BPF_SRC(insn->code) == BPF_K) { in check_cond_jmp_op()
10467 pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32); in check_cond_jmp_op()
10491 err = mark_chain_precision(env, insn->dst_reg); in check_cond_jmp_op()
10492 if (BPF_SRC(insn->code) == BPF_X && !err && in check_cond_jmp_op()
10494 err = mark_chain_precision(env, insn->src_reg); in check_cond_jmp_op()
10505 !sanitize_speculative_path(env, insn, *insn_idx + 1, in check_cond_jmp_op()
10510 *insn_idx += insn->off; in check_cond_jmp_op()
10518 !sanitize_speculative_path(env, insn, in check_cond_jmp_op()
10519 *insn_idx + insn->off + 1, in check_cond_jmp_op()
10527 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, in check_cond_jmp_op()
10540 if (BPF_SRC(insn->code) == BPF_X) { in check_cond_jmp_op()
10541 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; in check_cond_jmp_op()
10548 reg_set_min_max(&other_branch_regs[insn->dst_reg], in check_cond_jmp_op()
10556 reg_set_min_max_inv(&other_branch_regs[insn->src_reg], in check_cond_jmp_op()
10564 reg_combine_min_max(&other_branch_regs[insn->src_reg], in check_cond_jmp_op()
10565 &other_branch_regs[insn->dst_reg], in check_cond_jmp_op()
10568 !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) { in check_cond_jmp_op()
10570 find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]); in check_cond_jmp_op()
10575 reg_set_min_max(&other_branch_regs[insn->dst_reg], in check_cond_jmp_op()
10576 dst_reg, insn->imm, (u32)insn->imm, in check_cond_jmp_op()
10581 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) { in check_cond_jmp_op()
10583 find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]); in check_cond_jmp_op()
10590 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && in check_cond_jmp_op()
10591 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && in check_cond_jmp_op()
10596 mark_ptr_or_null_regs(this_branch, insn->dst_reg, in check_cond_jmp_op()
10598 mark_ptr_or_null_regs(other_branch, insn->dst_reg, in check_cond_jmp_op()
10600 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], in check_cond_jmp_op()
10602 is_pointer_value(env, insn->dst_reg)) { in check_cond_jmp_op()
10604 insn->dst_reg); in check_cond_jmp_op()
10613 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) in check_ld_imm() argument
10621 if (BPF_SIZE(insn->code) != BPF_DW) { in check_ld_imm()
10625 if (insn->off != 0) { in check_ld_imm()
10630 err = check_reg_arg(env, insn->dst_reg, DST_OP); in check_ld_imm()
10634 dst_reg = ®s[insn->dst_reg]; in check_ld_imm()
10635 if (insn->src_reg == 0) { in check_ld_imm()
10636 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; in check_ld_imm()
10639 __mark_reg_known(®s[insn->dst_reg], imm); in check_ld_imm()
10647 mark_reg_known_zero(env, regs, insn->dst_reg); in check_ld_imm()
10649 if (insn->src_reg == BPF_PSEUDO_BTF_ID) { in check_ld_imm()
10666 if (insn->src_reg == BPF_PSEUDO_FUNC) { in check_ld_imm()
10669 env->insn_idx + insn->imm + 1); in check_ld_imm()
10688 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE || in check_ld_imm()
10689 insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) { in check_ld_imm()
10694 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD || in check_ld_imm()
10695 insn->src_reg == BPF_PSEUDO_MAP_IDX) { in check_ld_imm()
10732 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) in check_ld_abs() argument
10736 u8 mode = BPF_MODE(insn->code); in check_ld_abs()
10749 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || in check_ld_abs()
10750 BPF_SIZE(insn->code) == BPF_DW || in check_ld_abs()
10751 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { in check_ld_abs()
10784 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_ld_abs()
11764 static void clean_live_states(struct bpf_verifier_env *env, int insn, in clean_live_states() argument
11770 sl = *explored_state(env, insn); in clean_live_states()
11774 if (sl->state.insn_idx != insn || in clean_live_states()
12466 struct bpf_insn *insn; in do_check() local
12477 insn = &insns[env->insn_idx]; in do_check()
12478 class = BPF_CLASS(insn->code); in do_check()
12532 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); in do_check()
12549 err = check_alu_op(env, insn); in do_check()
12559 err = check_reg_arg(env, insn->src_reg, SRC_OP); in do_check()
12563 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in do_check()
12567 src_reg_type = regs[insn->src_reg].type; in do_check()
12572 err = check_mem_access(env, env->insn_idx, insn->src_reg, in do_check()
12573 insn->off, BPF_SIZE(insn->code), in do_check()
12574 BPF_READ, insn->dst_reg, false); in do_check()
12602 if (BPF_MODE(insn->code) == BPF_ATOMIC) { in do_check()
12603 err = check_atomic(env, env->insn_idx, insn); in do_check()
12610 if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) { in do_check()
12616 err = check_reg_arg(env, insn->src_reg, SRC_OP); in do_check()
12620 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in do_check()
12624 dst_reg_type = regs[insn->dst_reg].type; in do_check()
12627 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in do_check()
12628 insn->off, BPF_SIZE(insn->code), in do_check()
12629 BPF_WRITE, insn->src_reg, false); in do_check()
12643 if (BPF_MODE(insn->code) != BPF_MEM || in do_check()
12644 insn->src_reg != BPF_REG_0) { in do_check()
12649 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in do_check()
12653 if (is_ctx_reg(env, insn->dst_reg)) { in do_check()
12655 insn->dst_reg, in do_check()
12656 reg_type_str(env, reg_state(env, insn->dst_reg)->type)); in do_check()
12661 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in do_check()
12662 insn->off, BPF_SIZE(insn->code), in do_check()
12668 u8 opcode = BPF_OP(insn->code); in do_check()
12672 if (BPF_SRC(insn->code) != BPF_K || in do_check()
12673 (insn->src_reg != BPF_PSEUDO_KFUNC_CALL in do_check()
12674 && insn->off != 0) || in do_check()
12675 (insn->src_reg != BPF_REG_0 && in do_check()
12676 insn->src_reg != BPF_PSEUDO_CALL && in do_check()
12677 insn->src_reg != BPF_PSEUDO_KFUNC_CALL) || in do_check()
12678 insn->dst_reg != BPF_REG_0 || in do_check()
12685 (insn->src_reg == BPF_PSEUDO_CALL || in do_check()
12686 insn->imm != BPF_FUNC_spin_unlock)) { in do_check()
12690 if (insn->src_reg == BPF_PSEUDO_CALL) in do_check()
12691 err = check_func_call(env, insn, &env->insn_idx); in do_check()
12692 else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) in do_check()
12693 err = check_kfunc_call(env, insn, &env->insn_idx); in do_check()
12695 err = check_helper_call(env, insn, &env->insn_idx); in do_check()
12699 if (BPF_SRC(insn->code) != BPF_K || in do_check()
12700 insn->imm != 0 || in do_check()
12701 insn->src_reg != BPF_REG_0 || in do_check()
12702 insn->dst_reg != BPF_REG_0 || in do_check()
12708 env->insn_idx += insn->off + 1; in do_check()
12712 if (BPF_SRC(insn->code) != BPF_K || in do_check()
12713 insn->imm != 0 || in do_check()
12714 insn->src_reg != BPF_REG_0 || in do_check()
12715 insn->dst_reg != BPF_REG_0 || in do_check()
12762 err = check_cond_jmp_op(env, insn, &env->insn_idx); in do_check()
12767 u8 mode = BPF_MODE(insn->code); in do_check()
12770 err = check_ld_abs(env, insn); in do_check()
12775 err = check_ld_imm(env, insn); in do_check()
12828 struct bpf_insn *insn, in check_pseudo_btf_id() argument
12837 u32 type, id = insn->imm; in check_pseudo_btf_id()
12843 btf_fd = insn[1].imm; in check_pseudo_btf_id()
12892 insn[0].imm = (u32)addr; in check_pseudo_btf_id()
12893 insn[1].imm = addr >> 32; in check_pseudo_btf_id()
13053 struct bpf_insn *insn = env->prog->insnsi; in resolve_pseudo_ldimm64() local
13061 for (i = 0; i < insn_cnt; i++, insn++) { in resolve_pseudo_ldimm64()
13062 if (BPF_CLASS(insn->code) == BPF_LDX && in resolve_pseudo_ldimm64()
13063 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { in resolve_pseudo_ldimm64()
13068 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { in resolve_pseudo_ldimm64()
13075 if (i == insn_cnt - 1 || insn[1].code != 0 || in resolve_pseudo_ldimm64()
13076 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || in resolve_pseudo_ldimm64()
13077 insn[1].off != 0) { in resolve_pseudo_ldimm64()
13082 if (insn[0].src_reg == 0) in resolve_pseudo_ldimm64()
13086 if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) { in resolve_pseudo_ldimm64()
13088 err = check_pseudo_btf_id(env, insn, aux); in resolve_pseudo_ldimm64()
13094 if (insn[0].src_reg == BPF_PSEUDO_FUNC) { in resolve_pseudo_ldimm64()
13103 switch (insn[0].src_reg) { in resolve_pseudo_ldimm64()
13109 if (insn[1].imm == 0) in resolve_pseudo_ldimm64()
13117 switch (insn[0].src_reg) { in resolve_pseudo_ldimm64()
13125 insn[0].imm * sizeof(fd), in resolve_pseudo_ldimm64()
13130 fd = insn[0].imm; in resolve_pseudo_ldimm64()
13138 insn[0].imm); in resolve_pseudo_ldimm64()
13149 if (insn[0].src_reg == BPF_PSEUDO_MAP_FD || in resolve_pseudo_ldimm64()
13150 insn[0].src_reg == BPF_PSEUDO_MAP_IDX) { in resolve_pseudo_ldimm64()
13153 u32 off = insn[1].imm; in resolve_pseudo_ldimm64()
13179 insn[0].imm = (u32)addr; in resolve_pseudo_ldimm64()
13180 insn[1].imm = addr >> 32; in resolve_pseudo_ldimm64()
13215 insn++; in resolve_pseudo_ldimm64()
13221 if (!bpf_opcode_in_insntable(insn->code)) { in resolve_pseudo_ldimm64()
13222 verbose(env, "unknown opcode %02x\n", insn->code); in resolve_pseudo_ldimm64()
13251 struct bpf_insn *insn = env->prog->insnsi; in convert_pseudo_ld_imm64() local
13255 for (i = 0; i < insn_cnt; i++, insn++) { in convert_pseudo_ld_imm64()
13256 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) in convert_pseudo_ld_imm64()
13258 if (insn->src_reg == BPF_PSEUDO_FUNC) in convert_pseudo_ld_imm64()
13260 insn->src_reg = 0; in convert_pseudo_ld_imm64()
13273 struct bpf_insn *insn = new_prog->insnsi; in adjust_insn_aux_data() local
13282 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); in adjust_insn_aux_data()
13294 new_data[i].zext_dst = insn_has_def32(env, insn + i); in adjust_insn_aux_data()
13518 struct bpf_insn *insn = env->prog->insnsi; in sanitize_dead_code() local
13525 memcpy(insn + i, &trap, sizeof(trap)); in sanitize_dead_code()
13548 struct bpf_insn *insn = env->prog->insnsi; in opt_hard_wire_dead_code_branches() local
13552 for (i = 0; i < insn_cnt; i++, insn++) { in opt_hard_wire_dead_code_branches()
13553 if (!insn_is_cond_jump(insn->code)) in opt_hard_wire_dead_code_branches()
13557 ja.off = insn->off; in opt_hard_wire_dead_code_branches()
13558 else if (!aux_data[i + 1 + insn->off].seen) in opt_hard_wire_dead_code_branches()
13566 memcpy(insn, &ja, sizeof(ja)); in opt_hard_wire_dead_code_branches()
13597 struct bpf_insn *insn = env->prog->insnsi; in opt_remove_nops() local
13602 if (memcmp(&insn[i], &ja, sizeof(ja))) in opt_remove_nops()
13632 struct bpf_insn insn; in opt_subreg_zext_lo32_rnd_hi32() local
13635 insn = insns[adj_idx]; in opt_subreg_zext_lo32_rnd_hi32()
13636 load_reg = insn_def_regno(&insn); in opt_subreg_zext_lo32_rnd_hi32()
13644 code = insn.code; in opt_subreg_zext_lo32_rnd_hi32()
13653 if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) { in opt_subreg_zext_lo32_rnd_hi32()
13666 rnd_hi32_patch[0] = insn; in opt_subreg_zext_lo32_rnd_hi32()
13684 if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn)) in opt_subreg_zext_lo32_rnd_hi32()
13688 if (bpf_pseudo_kfunc_call(&insn)) in opt_subreg_zext_lo32_rnd_hi32()
13696 zext_patch[0] = insn; in opt_subreg_zext_lo32_rnd_hi32()
13724 struct bpf_insn insn_buf[16], *insn; in convert_ctx_accesses() local
13753 insn = env->prog->insnsi + delta; in convert_ctx_accesses()
13755 for (i = 0; i < insn_cnt; i++, insn++) { in convert_ctx_accesses()
13759 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || in convert_ctx_accesses()
13760 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || in convert_ctx_accesses()
13761 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || in convert_ctx_accesses()
13762 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) { in convert_ctx_accesses()
13765 } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || in convert_ctx_accesses()
13766 insn->code == (BPF_STX | BPF_MEM | BPF_H) || in convert_ctx_accesses()
13767 insn->code == (BPF_STX | BPF_MEM | BPF_W) || in convert_ctx_accesses()
13768 insn->code == (BPF_STX | BPF_MEM | BPF_DW) || in convert_ctx_accesses()
13769 insn->code == (BPF_ST | BPF_MEM | BPF_B) || in convert_ctx_accesses()
13770 insn->code == (BPF_ST | BPF_MEM | BPF_H) || in convert_ctx_accesses()
13771 insn->code == (BPF_ST | BPF_MEM | BPF_W) || in convert_ctx_accesses()
13772 insn->code == (BPF_ST | BPF_MEM | BPF_DW)) { in convert_ctx_accesses()
13774 ctx_access = BPF_CLASS(insn->code) == BPF_STX; in convert_ctx_accesses()
13782 *insn, in convert_ctx_accesses()
13793 insn = new_prog->insnsi + i + delta; in convert_ctx_accesses()
13819 insn->code = BPF_LDX | BPF_PROBE_MEM | in convert_ctx_accesses()
13820 BPF_SIZE((insn)->code); in convert_ctx_accesses()
13829 size = BPF_LDST_BYTES(insn); in convert_ctx_accesses()
13838 off = insn->off; in convert_ctx_accesses()
13853 insn->off = off & ~(size_default - 1); in convert_ctx_accesses()
13854 insn->code = BPF_LDX | BPF_MEM | size_code; in convert_ctx_accesses()
13858 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, in convert_ctx_accesses()
13876 insn->dst_reg, in convert_ctx_accesses()
13878 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, in convert_ctx_accesses()
13883 insn->dst_reg, in convert_ctx_accesses()
13885 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, in convert_ctx_accesses()
13898 insn = new_prog->insnsi + i + delta; in convert_ctx_accesses()
13909 struct bpf_insn *insn; in jit_subprogs() local
13916 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
13917 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn)) in jit_subprogs()
13924 subprog = find_subprog(env, i + insn->imm + 1); in jit_subprogs()
13927 i + insn->imm + 1); in jit_subprogs()
13933 insn->off = subprog; in jit_subprogs()
13937 env->insn_aux_data[i].call_imm = insn->imm; in jit_subprogs()
13939 insn->imm = 1; in jit_subprogs()
13940 if (bpf_pseudo_func(insn)) in jit_subprogs()
13945 insn[1].imm = 1; in jit_subprogs()
14005 insn = func[i]->insnsi; in jit_subprogs()
14006 for (j = 0; j < func[i]->len; j++, insn++) { in jit_subprogs()
14007 if (BPF_CLASS(insn->code) == BPF_LDX && in jit_subprogs()
14008 BPF_MODE(insn->code) == BPF_PROBE_MEM) in jit_subprogs()
14026 insn = func[i]->insnsi; in jit_subprogs()
14027 for (j = 0; j < func[i]->len; j++, insn++) { in jit_subprogs()
14028 if (bpf_pseudo_func(insn)) { in jit_subprogs()
14029 subprog = insn->off; in jit_subprogs()
14030 insn[0].imm = (u32)(long)func[subprog]->bpf_func; in jit_subprogs()
14031 insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32; in jit_subprogs()
14034 if (!bpf_pseudo_call(insn)) in jit_subprogs()
14036 subprog = insn->off; in jit_subprogs()
14037 insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func); in jit_subprogs()
14078 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
14079 if (bpf_pseudo_func(insn)) { in jit_subprogs()
14080 insn[0].imm = env->insn_aux_data[i].call_imm; in jit_subprogs()
14081 insn[1].imm = insn->off; in jit_subprogs()
14082 insn->off = 0; in jit_subprogs()
14085 if (!bpf_pseudo_call(insn)) in jit_subprogs()
14087 insn->off = env->insn_aux_data[i].call_imm; in jit_subprogs()
14088 subprog = find_subprog(env, i + insn->off + 1); in jit_subprogs()
14089 insn->imm = subprog; in jit_subprogs()
14125 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
14126 if (!bpf_pseudo_call(insn)) in jit_subprogs()
14128 insn->off = 0; in jit_subprogs()
14129 insn->imm = env->insn_aux_data[i].call_imm; in jit_subprogs()
14139 struct bpf_insn *insn = prog->insnsi; in fixup_call_args() local
14165 for (i = 0; i < prog->len; i++, insn++) { in fixup_call_args()
14166 if (bpf_pseudo_func(insn)) { in fixup_call_args()
14174 if (!bpf_pseudo_call(insn)) in fixup_call_args()
14176 depth = get_callee_stack_depth(env, insn, i); in fixup_call_args()
14179 bpf_patch_call_args(insn, depth); in fixup_call_args()
14187 struct bpf_insn *insn) in fixup_kfunc_call() argument
14191 if (!insn->imm) { in fixup_kfunc_call()
14199 desc = find_kfunc_desc(env->prog, insn->imm, insn->off); in fixup_kfunc_call()
14202 insn->imm); in fixup_kfunc_call()
14206 insn->imm = desc->imm; in fixup_kfunc_call()
14219 struct bpf_insn *insn = prog->insnsi; in do_misc_fixups() local
14229 for (i = 0; i < insn_cnt; i++, insn++) { in do_misc_fixups()
14231 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || in do_misc_fixups()
14232 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || in do_misc_fixups()
14233 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || in do_misc_fixups()
14234 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { in do_misc_fixups()
14235 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; in do_misc_fixups()
14236 bool isdiv = BPF_OP(insn->code) == BPF_DIV; in do_misc_fixups()
14241 BPF_JNE | BPF_K, insn->src_reg, in do_misc_fixups()
14243 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), in do_misc_fixups()
14245 *insn, in do_misc_fixups()
14250 BPF_JEQ | BPF_K, insn->src_reg, in do_misc_fixups()
14252 *insn, in do_misc_fixups()
14254 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), in do_misc_fixups()
14267 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
14272 if (BPF_CLASS(insn->code) == BPF_LD && in do_misc_fixups()
14273 (BPF_MODE(insn->code) == BPF_ABS || in do_misc_fixups()
14274 BPF_MODE(insn->code) == BPF_IND)) { in do_misc_fixups()
14275 cnt = env->ops->gen_ld_abs(insn, insn_buf); in do_misc_fixups()
14287 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
14292 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || in do_misc_fixups()
14293 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { in do_misc_fixups()
14310 off_reg = issrc ? insn->src_reg : insn->dst_reg; in do_misc_fixups()
14324 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); in do_misc_fixups()
14325 insn->src_reg = BPF_REG_AX; in do_misc_fixups()
14327 insn->code = insn->code == code_add ? in do_misc_fixups()
14329 *patch++ = *insn; in do_misc_fixups()
14340 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
14344 if (insn->code != (BPF_JMP | BPF_CALL)) in do_misc_fixups()
14346 if (insn->src_reg == BPF_PSEUDO_CALL) in do_misc_fixups()
14348 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { in do_misc_fixups()
14349 ret = fixup_kfunc_call(env, insn); in do_misc_fixups()
14355 if (insn->imm == BPF_FUNC_get_route_realm) in do_misc_fixups()
14357 if (insn->imm == BPF_FUNC_get_prandom_u32) in do_misc_fixups()
14359 if (insn->imm == BPF_FUNC_override_return) in do_misc_fixups()
14361 if (insn->imm == BPF_FUNC_tail_call) { in do_misc_fixups()
14377 insn->imm = 0; in do_misc_fixups()
14378 insn->code = BPF_JMP | BPF_TAIL_CALL; in do_misc_fixups()
14399 insn->imm = ret + 1; in do_misc_fixups()
14424 insn_buf[2] = *insn; in do_misc_fixups()
14432 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
14436 if (insn->imm == BPF_FUNC_timer_set_callback) { in do_misc_fixups()
14456 insn_buf[2] = *insn; in do_misc_fixups()
14465 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
14469 if (insn->imm == BPF_FUNC_task_storage_get || in do_misc_fixups()
14470 insn->imm == BPF_FUNC_sk_storage_get || in do_misc_fixups()
14471 insn->imm == BPF_FUNC_inode_storage_get) { in do_misc_fixups()
14476 insn_buf[1] = *insn; in do_misc_fixups()
14485 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
14494 (insn->imm == BPF_FUNC_map_lookup_elem || in do_misc_fixups()
14495 insn->imm == BPF_FUNC_map_update_elem || in do_misc_fixups()
14496 insn->imm == BPF_FUNC_map_delete_elem || in do_misc_fixups()
14497 insn->imm == BPF_FUNC_map_push_elem || in do_misc_fixups()
14498 insn->imm == BPF_FUNC_map_pop_elem || in do_misc_fixups()
14499 insn->imm == BPF_FUNC_map_peek_elem || in do_misc_fixups()
14500 insn->imm == BPF_FUNC_redirect_map || in do_misc_fixups()
14501 insn->imm == BPF_FUNC_for_each_map_elem || in do_misc_fixups()
14502 insn->imm == BPF_FUNC_map_lookup_percpu_elem)) { in do_misc_fixups()
14509 if (insn->imm == BPF_FUNC_map_lookup_elem && in do_misc_fixups()
14526 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
14555 switch (insn->imm) { in do_misc_fixups()
14557 insn->imm = BPF_CALL_IMM(ops->map_lookup_elem); in do_misc_fixups()
14560 insn->imm = BPF_CALL_IMM(ops->map_update_elem); in do_misc_fixups()
14563 insn->imm = BPF_CALL_IMM(ops->map_delete_elem); in do_misc_fixups()
14566 insn->imm = BPF_CALL_IMM(ops->map_push_elem); in do_misc_fixups()
14569 insn->imm = BPF_CALL_IMM(ops->map_pop_elem); in do_misc_fixups()
14572 insn->imm = BPF_CALL_IMM(ops->map_peek_elem); in do_misc_fixups()
14575 insn->imm = BPF_CALL_IMM(ops->map_redirect); in do_misc_fixups()
14578 insn->imm = BPF_CALL_IMM(ops->map_for_each_callback); in do_misc_fixups()
14581 insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem); in do_misc_fixups()
14590 insn->imm == BPF_FUNC_jiffies64) { in do_misc_fixups()
14609 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
14615 insn->imm == BPF_FUNC_get_func_arg) { in do_misc_fixups()
14634 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
14640 insn->imm == BPF_FUNC_get_func_ret) { in do_misc_fixups()
14662 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
14668 insn->imm == BPF_FUNC_get_func_arg_cnt) { in do_misc_fixups()
14677 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
14683 insn->imm == BPF_FUNC_get_func_ip) { in do_misc_fixups()
14692 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
14697 fn = env->ops->get_func_proto(insn->imm, env->prog); in do_misc_fixups()
14704 func_id_name(insn->imm), insn->imm); in do_misc_fixups()
14707 insn->imm = fn->func - __bpf_call_base; in do_misc_fixups()
14807 static bool is_bpf_loop_call(struct bpf_insn *insn) in is_bpf_loop_call() argument
14809 return insn->code == (BPF_JMP | BPF_CALL) && in is_bpf_loop_call()
14810 insn->src_reg == 0 && in is_bpf_loop_call()
14811 insn->imm == BPF_FUNC_loop; in is_bpf_loop_call()
14827 struct bpf_insn *insn = env->prog->insnsi; in optimize_bpf_loop() local
14833 for (i = 0; i < insn_cnt; i++, insn++) { in optimize_bpf_loop()
14837 if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) { in optimize_bpf_loop()
14851 insn = new_prog->insnsi + i + delta; in optimize_bpf_loop()