• Home
  • Raw
  • Download

Lines Matching refs:regs

856 		reg = &state->regs[i];  in print_verifier_state()
1394 struct bpf_reg_state *regs, u32 regno) in mark_reg_known_zero() argument
1400 __mark_reg_not_init(env, regs + regno); in mark_reg_known_zero()
1403 __mark_reg_known_zero(regs + regno); in mark_reg_known_zero()
1711 struct bpf_reg_state *regs, u32 regno) in mark_reg_unknown() argument
1717 __mark_reg_not_init(env, regs + regno); in mark_reg_unknown()
1720 __mark_reg_unknown(env, regs + regno); in mark_reg_unknown()
1731 struct bpf_reg_state *regs, u32 regno) in mark_reg_not_init() argument
1737 __mark_reg_not_init(env, regs + regno); in mark_reg_not_init()
1740 __mark_reg_not_init(env, regs + regno); in mark_reg_not_init()
1744 struct bpf_reg_state *regs, u32 regno, in mark_btf_ld_reg() argument
1750 mark_reg_unknown(env, regs, regno); in mark_btf_ld_reg()
1753 mark_reg_known_zero(env, regs, regno); in mark_btf_ld_reg()
1754 regs[regno].type = PTR_TO_BTF_ID | flag; in mark_btf_ld_reg()
1755 regs[regno].btf = btf; in mark_btf_ld_reg()
1756 regs[regno].btf_id = btf_id; in mark_btf_ld_reg()
1763 struct bpf_reg_state *regs = state->regs; in init_reg_state() local
1767 mark_reg_not_init(env, regs, i); in init_reg_state()
1768 regs[i].live = REG_LIVE_NONE; in init_reg_state()
1769 regs[i].parent = NULL; in init_reg_state()
1770 regs[i].subreg_def = DEF_NOT_SUBREG; in init_reg_state()
1774 regs[BPF_REG_FP].type = PTR_TO_STACK; in init_reg_state()
1775 mark_reg_known_zero(env, regs, BPF_REG_FP); in init_reg_state()
1776 regs[BPF_REG_FP].frameno = state->frameno; in init_reg_state()
2474 struct bpf_reg_state *reg, *regs = state->regs; in check_reg_arg() local
2484 reg = &regs[regno]; in check_reg_arg()
2510 mark_reg_unknown(env, regs, regno); in check_reg_arg()
2822 reg = &func->regs[j]; in mark_all_scalars_precise()
2848 reg = &func->regs[j]; in mark_all_scalars_imprecise()
2974 reg = &func->regs[regno]; in __mark_chain_precision()
3020 reg = &st->frame[0]->regs[i]; in __mark_chain_precision()
3078 reg = &func->regs[i]; in __mark_chain_precision()
3278 reg = &cur->regs[value_regno]; in check_stack_write_fixed_off()
3407 ptr_reg = &cur->regs[ptr_regno]; in check_stack_write_var_off()
3411 value_reg = &cur->regs[value_regno]; in check_stack_write_var_off()
3511 __mark_reg_const_zero(&state->regs[dst_regno]); in mark_reg_stack_read()
3522 state->regs[dst_regno].precise = true; in mark_reg_stack_read()
3525 mark_reg_unknown(env, state->regs, dst_regno); in mark_reg_stack_read()
3527 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in mark_reg_stack_read()
3574 s32 subreg_def = state->regs[dst_regno].subreg_def; in check_stack_read_fixed_off()
3576 copy_register_state(&state->regs[dst_regno], reg); in check_stack_read_fixed_off()
3577 state->regs[dst_regno].subreg_def = subreg_def; in check_stack_read_fixed_off()
3589 mark_reg_unknown(env, state->regs, dst_regno); in check_stack_read_fixed_off()
3591 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in check_stack_read_fixed_off()
3597 copy_register_state(&state->regs[dst_regno], reg); in check_stack_read_fixed_off()
3602 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in check_stack_read_fixed_off()
3776 struct bpf_reg_state *regs = cur_regs(env); in check_map_access_type() local
3777 struct bpf_map *map = regs[regno].map_ptr; in check_map_access_type()
3838 struct bpf_reg_state *reg = &state->regs[regno]; in check_mem_region_access()
4059 struct bpf_reg_state *reg = &state->regs[regno]; in check_map_access()
4171 struct bpf_reg_state *regs = cur_regs(env); in check_packet_access() local
4172 struct bpf_reg_state *reg = &regs[regno]; in check_packet_access()
4263 struct bpf_reg_state *regs = cur_regs(env); in check_sock_access() local
4264 struct bpf_reg_state *reg = &regs[regno]; in check_sock_access()
4761 struct bpf_reg_state *regs, in check_ptr_to_btf_access() argument
4766 struct bpf_reg_state *reg = regs + regno; in check_ptr_to_btf_access()
4826 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag); in check_ptr_to_btf_access()
4832 struct bpf_reg_state *regs, in check_ptr_to_map_access() argument
4837 struct bpf_reg_state *reg = regs + regno; in check_ptr_to_map_access()
4882 mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag); in check_ptr_to_map_access()
4919 struct bpf_reg_state *regs = cur_regs(env); in check_stack_access_within_bounds() local
4920 struct bpf_reg_state *reg = regs + regno; in check_stack_access_within_bounds()
4977 struct bpf_reg_state *regs = cur_regs(env); in check_mem_access() local
4978 struct bpf_reg_state *reg = regs + regno; in check_mem_access()
5005 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
5041 regs[value_regno].type = SCALAR_VALUE; in check_mem_access()
5042 __mark_reg_known(&regs[value_regno], val); in check_mem_access()
5044 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
5071 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
5097 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
5099 mark_reg_known_zero(env, regs, in check_mem_access()
5102 regs[value_regno].id = ++env->id_gen; in check_mem_access()
5108 regs[value_regno].subreg_def = DEF_NOT_SUBREG; in check_mem_access()
5110 regs[value_regno].btf = btf; in check_mem_access()
5111 regs[value_regno].btf_id = btf_id; in check_mem_access()
5114 regs[value_regno].type = reg_type; in check_mem_access()
5147 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
5158 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
5167 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
5171 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
5174 err = check_ptr_to_btf_access(env, regs, regno, off, size, t, in check_mem_access()
5177 err = check_ptr_to_map_access(env, regs, regno, off, size, t, in check_mem_access()
5198 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
5206 regs[value_regno].type == SCALAR_VALUE) { in check_mem_access()
5208 coerce_reg_to_size(&regs[value_regno], size); in check_mem_access()
5458 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; in check_helper_mem_access() local
5681 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; in process_spin_lock() local
5743 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; in process_timer_func() local
5791 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; in process_kptr_func() local
6004 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; in check_reg_type() local
6156 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; in check_func_arg() local
6847 struct bpf_reg_state *reg = &state->regs[regn]; in mark_pkt_end()
6892 struct bpf_reg_state *regs) in clear_caller_saved_regs() argument
6898 mark_reg_not_init(env, regs, caller_saved[i]); in clear_caller_saved_regs()
6938 err = btf_check_subprog_call(env, subprog, caller->regs); in __check_func_call()
6951 clear_caller_saved_regs(env, caller->regs); in __check_func_call()
6954 mark_reg_unknown(env, caller->regs, BPF_REG_0); in __check_func_call()
6955 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; in __check_func_call()
6991 clear_caller_saved_regs(env, caller->regs); in __check_func_call()
6992 mark_reg_unknown(env, caller->regs, BPF_REG_0); in __check_func_call()
6993 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; in __check_func_call()
7022 clear_caller_saved_regs(env, caller->regs); in __check_func_call()
7053 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; in map_set_for_each_callback_args()
7055 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; in map_set_for_each_callback_args()
7056 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in map_set_for_each_callback_args()
7057 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr; in map_set_for_each_callback_args()
7059 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; in map_set_for_each_callback_args()
7060 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); in map_set_for_each_callback_args()
7061 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr; in map_set_for_each_callback_args()
7064 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3]; in map_set_for_each_callback_args()
7067 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in map_set_for_each_callback_args()
7081 callee->regs[i] = caller->regs[i]; in set_callee_state()
7140 callee->regs[BPF_REG_1].type = SCALAR_VALUE; in set_loop_callback_state()
7141 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; in set_loop_callback_state()
7144 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_loop_callback_state()
7145 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_loop_callback_state()
7146 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_loop_callback_state()
7158 struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr; in set_timer_callback_state()
7163 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; in set_timer_callback_state()
7164 __mark_reg_known_zero(&callee->regs[BPF_REG_1]); in set_timer_callback_state()
7165 callee->regs[BPF_REG_1].map_ptr = map_ptr; in set_timer_callback_state()
7167 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; in set_timer_callback_state()
7168 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in set_timer_callback_state()
7169 callee->regs[BPF_REG_2].map_ptr = map_ptr; in set_timer_callback_state()
7171 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; in set_timer_callback_state()
7172 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); in set_timer_callback_state()
7173 callee->regs[BPF_REG_3].map_ptr = map_ptr; in set_timer_callback_state()
7176 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_timer_callback_state()
7177 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_timer_callback_state()
7193 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; in set_find_vma_callback_state()
7195 callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID; in set_find_vma_callback_state()
7196 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in set_find_vma_callback_state()
7197 callee->regs[BPF_REG_2].btf = btf_vmlinux; in set_find_vma_callback_state()
7198 callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA], in set_find_vma_callback_state()
7201 callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4]; in set_find_vma_callback_state()
7204 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_find_vma_callback_state()
7205 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_find_vma_callback_state()
7220 __mark_reg_not_init(env, &callee->regs[BPF_REG_0]); in set_user_ringbuf_callback_state()
7221 callee->regs[BPF_REG_1].type = PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL; in set_user_ringbuf_callback_state()
7222 __mark_reg_known_zero(&callee->regs[BPF_REG_1]); in set_user_ringbuf_callback_state()
7223 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; in set_user_ringbuf_callback_state()
7226 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_user_ringbuf_callback_state()
7227 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_user_ringbuf_callback_state()
7228 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_user_ringbuf_callback_state()
7243 r0 = &callee->regs[BPF_REG_0]; in prepare_func_exit()
7277 caller->regs[BPF_REG_0] = *r0; in prepare_func_exit()
7305 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, in do_refine_retval_range() argument
7309 struct bpf_reg_state *ret_reg = &regs[BPF_REG_0]; in do_refine_retval_range()
7377 struct bpf_reg_state *regs = cur_regs(env), *reg; in record_func_key() local
7389 reg = &regs[BPF_REG_3]; in record_func_key()
7429 struct bpf_reg_state *regs) in check_bpf_snprintf_call() argument
7431 struct bpf_reg_state *fmt_reg = &regs[BPF_REG_3]; in check_bpf_snprintf_call()
7432 struct bpf_reg_state *data_len_reg = &regs[BPF_REG_5]; in check_bpf_snprintf_call()
7494 struct bpf_reg_state *regs = cur_regs(env); in loop_flag_is_zero() local
7495 struct bpf_reg_state *reg = &regs[BPF_REG_4]; in loop_flag_is_zero()
7529 struct bpf_reg_state *regs; in check_helper_call() local
7606 regs = cur_regs(env); in check_helper_call()
7617 err = mark_stack_slots_dynptr(env, &regs[meta.uninit_dynptr_regno], in check_helper_call()
7627 err = unmark_stack_slots_dynptr(env, &regs[meta.release_regno]); in check_helper_call()
7633 else if (register_is_null(&regs[meta.release_regno])) in check_helper_call()
7654 if (!register_is_null(&regs[BPF_REG_2])) { in check_helper_call()
7672 err = check_bpf_snprintf_call(env, regs); in check_helper_call()
7680 if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) { in check_helper_call()
7682 reg_type_str(env, regs[BPF_REG_1].type)); in check_helper_call()
7701 struct bpf_reg_state *reg = &regs[BPF_REG_1 + i]; in check_helper_call()
7732 mark_reg_not_init(env, regs, caller_saved[i]); in check_helper_call()
7737 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; in check_helper_call()
7746 mark_reg_unknown(env, regs, BPF_REG_0); in check_helper_call()
7749 regs[BPF_REG_0].type = NOT_INIT; in check_helper_call()
7753 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
7763 regs[BPF_REG_0].map_ptr = meta.map_ptr; in check_helper_call()
7764 regs[BPF_REG_0].map_uid = meta.map_uid; in check_helper_call()
7765 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag; in check_helper_call()
7768 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
7772 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
7773 regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag; in check_helper_call()
7776 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
7777 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag; in check_helper_call()
7780 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
7781 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag; in check_helper_call()
7784 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
7785 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; in check_helper_call()
7786 regs[BPF_REG_0].mem_size = meta.mem_size; in check_helper_call()
7792 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
7807 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; in check_helper_call()
7808 regs[BPF_REG_0].mem_size = tsize; in check_helper_call()
7817 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; in check_helper_call()
7818 regs[BPF_REG_0].btf = meta.ret_btf; in check_helper_call()
7819 regs[BPF_REG_0].btf_id = meta.ret_btf_id; in check_helper_call()
7828 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
7829 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; in check_helper_call()
7849 regs[BPF_REG_0].btf = ret_btf; in check_helper_call()
7850 regs[BPF_REG_0].btf_id = ret_btf_id; in check_helper_call()
7859 if (type_may_be_null(regs[BPF_REG_0].type)) in check_helper_call()
7860 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
7870 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; in check_helper_call()
7877 regs[BPF_REG_0].id = id; in check_helper_call()
7879 regs[BPF_REG_0].ref_obj_id = id; in check_helper_call()
7882 do_refine_retval_range(regs, fn->ret_type, func_id, &meta); in check_helper_call()
7950 struct bpf_reg_state *regs = cur_regs(env); in check_kfunc_call() local
7989 err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs, &meta); in check_kfunc_call()
7996 err = release_reference(env, regs[err].ref_obj_id); in check_kfunc_call()
8005 mark_reg_not_init(env, regs, caller_saved[i]); in check_kfunc_call()
8016 mark_reg_unknown(env, regs, BPF_REG_0); in check_kfunc_call()
8033 mark_reg_known_zero(env, regs, BPF_REG_0); in check_kfunc_call()
8034 regs[BPF_REG_0].type = PTR_TO_MEM; in check_kfunc_call()
8035 regs[BPF_REG_0].mem_size = meta.r0_size; in check_kfunc_call()
8038 regs[BPF_REG_0].type |= MEM_RDONLY; in check_kfunc_call()
8042 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; in check_kfunc_call()
8044 mark_reg_known_zero(env, regs, BPF_REG_0); in check_kfunc_call()
8045 regs[BPF_REG_0].btf = desc_btf; in check_kfunc_call()
8046 regs[BPF_REG_0].type = PTR_TO_BTF_ID; in check_kfunc_call()
8047 regs[BPF_REG_0].btf_id = ptr_type_id; in check_kfunc_call()
8050 regs[BPF_REG_0].type |= PTR_MAYBE_NULL; in check_kfunc_call()
8052 regs[BPF_REG_0].id = ++env->id_gen; in check_kfunc_call()
8060 regs[BPF_REG_0].id = id; in check_kfunc_call()
8061 regs[BPF_REG_0].ref_obj_id = id; in check_kfunc_call()
8245 struct bpf_reg_state *regs; in sanitize_speculative_path() local
8249 regs = branch->frame[branch->curframe]->regs; in sanitize_speculative_path()
8251 mark_reg_unknown(env, regs, insn->dst_reg); in sanitize_speculative_path()
8253 mark_reg_unknown(env, regs, insn->dst_reg); in sanitize_speculative_path()
8254 mark_reg_unknown(env, regs, insn->src_reg); in sanitize_speculative_path()
8486 struct bpf_reg_state *regs = state->regs, *dst_reg; in adjust_ptr_min_max_vals() local
8497 dst_reg = &regs[dst]; in adjust_ptr_min_max_vals()
9285 struct bpf_reg_state *regs = cur_regs(env); in adjust_scalar_min_max_vals() local
9392 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
9405 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
9418 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
9427 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
9446 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; in adjust_reg_min_max_vals() local
9451 dst_reg = &regs[insn->dst_reg]; in adjust_reg_min_max_vals()
9461 src_reg = &regs[insn->src_reg]; in adjust_reg_min_max_vals()
9469 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_reg_min_max_vals()
9529 struct bpf_reg_state *regs = cur_regs(env); in check_alu_op() local
9591 struct bpf_reg_state *src_reg = regs + insn->src_reg; in check_alu_op()
9592 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; in check_alu_op()
9624 mark_reg_unknown(env, regs, in check_alu_op()
9635 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
9636 regs[insn->dst_reg].type = SCALAR_VALUE; in check_alu_op()
9638 __mark_reg_known(regs + insn->dst_reg, in check_alu_op()
9641 __mark_reg_known(regs + insn->dst_reg, in check_alu_op()
10282 struct bpf_reg_state *regs = state->regs, *reg; in mark_ptr_or_null_regs() local
10283 u32 ref_obj_id = regs[regno].ref_obj_id; in mark_ptr_or_null_regs()
10284 u32 id = regs[regno].id; in mark_ptr_or_null_regs()
10420 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; in check_cond_jmp_op() local
10438 dst_reg = &regs[insn->dst_reg]; in check_cond_jmp_op()
10450 src_reg = &regs[insn->src_reg]; in check_cond_jmp_op()
10531 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; in check_cond_jmp_op()
10541 struct bpf_reg_state *src_reg = &regs[insn->src_reg]; in check_cond_jmp_op()
10600 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg], in check_cond_jmp_op()
10616 struct bpf_reg_state *regs = cur_regs(env); in check_ld_imm() local
10634 dst_reg = &regs[insn->dst_reg]; in check_ld_imm()
10639 __mark_reg_known(&regs[insn->dst_reg], imm); in check_ld_imm()
10647 mark_reg_known_zero(env, regs, insn->dst_reg); in check_ld_imm()
10734 struct bpf_reg_state *regs = cur_regs(env); in check_ld_abs() local
10776 if (regs[ctx_reg].type != PTR_TO_CTX) { in check_ld_abs()
10789 err = check_ptr_off_reg(env, &regs[ctx_reg], ctx_reg); in check_ld_abs()
10795 mark_reg_not_init(env, regs, caller_saved[i]); in check_ld_abs()
10803 mark_reg_unknown(env, regs, BPF_REG_0); in check_ld_abs()
10805 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; in check_ld_abs()
11697 live = st->regs[i].live; in clean_func_state()
11699 st->regs[i].live |= REG_LIVE_DONE; in clean_func_state()
11704 __mark_reg_not_init(env, &st->regs[i]); in clean_func_state()
11724 if (st->frame[0]->regs[0].live & REG_LIVE_DONE) in clean_verifier_state()
12009 if (!regsafe(env, &old->regs[i], &cur->regs[i], in func_states_equal()
12106 parent_reg = parent->regs; in propagate_liveness()
12107 state_reg = state->regs; in propagate_liveness()
12144 state_reg = state->regs; in propagate_precision()
12188 if (memcmp(&fold->regs[i], &fcur->regs[i], in states_maybe_looping()
12315 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) { in is_state_visited()
12402 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; in is_state_visited()
12404 cur->frame[j]->regs[i].live = REG_LIVE_NONE; in is_state_visited()
12460 struct bpf_reg_state *regs; in do_check() local
12544 regs = cur_regs(env); in do_check()
12567 src_reg_type = regs[insn->src_reg].type; in do_check()
12624 dst_reg_type = regs[insn->dst_reg].type; in do_check()
14902 struct bpf_reg_state *regs; in do_check_common() local
14927 regs = state->frame[state->curframe]->regs; in do_check_common()
14929 ret = btf_prepare_func_args(env, subprog, regs); in do_check_common()
14933 if (regs[i].type == PTR_TO_CTX) in do_check_common()
14934 mark_reg_known_zero(env, regs, i); in do_check_common()
14935 else if (regs[i].type == SCALAR_VALUE) in do_check_common()
14936 mark_reg_unknown(env, regs, i); in do_check_common()
14937 else if (base_type(regs[i].type) == PTR_TO_MEM) { in do_check_common()
14938 const u32 mem_size = regs[i].mem_size; in do_check_common()
14940 mark_reg_known_zero(env, regs, i); in do_check_common()
14941 regs[i].mem_size = mem_size; in do_check_common()
14942 regs[i].id = ++env->id_gen; in do_check_common()
14947 regs[BPF_REG_1].type = PTR_TO_CTX; in do_check_common()
14948 mark_reg_known_zero(env, regs, BPF_REG_1); in do_check_common()
14949 ret = btf_check_subprog_arg_match(env, subprog, regs); in do_check_common()