Lines Matching refs:regs
1366 reg = &state->regs[i]; in print_verifier_state()
2146 struct bpf_reg_state *regs, u32 regno) in mark_reg_known_zero() argument
2152 __mark_reg_not_init(env, regs + regno); in mark_reg_known_zero()
2155 __mark_reg_known_zero(regs + regno); in mark_reg_known_zero()
2200 static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno, in mark_reg_graph_node() argument
2203 __mark_reg_known_zero(®s[regno]); in mark_reg_graph_node()
2204 regs[regno].type = PTR_TO_BTF_ID | MEM_ALLOC; in mark_reg_graph_node()
2205 regs[regno].btf = ds_head->btf; in mark_reg_graph_node()
2206 regs[regno].btf_id = ds_head->value_btf_id; in mark_reg_graph_node()
2207 regs[regno].off = ds_head->node_offset; in mark_reg_graph_node()
2496 struct bpf_reg_state *regs, u32 regno) in mark_reg_unknown() argument
2502 __mark_reg_not_init(env, regs + regno); in mark_reg_unknown()
2505 __mark_reg_unknown(env, regs + regno); in mark_reg_unknown()
2516 struct bpf_reg_state *regs, u32 regno) in mark_reg_not_init() argument
2522 __mark_reg_not_init(env, regs + regno); in mark_reg_not_init()
2525 __mark_reg_not_init(env, regs + regno); in mark_reg_not_init()
2529 struct bpf_reg_state *regs, u32 regno, in mark_btf_ld_reg() argument
2535 mark_reg_unknown(env, regs, regno); in mark_btf_ld_reg()
2538 mark_reg_known_zero(env, regs, regno); in mark_btf_ld_reg()
2539 regs[regno].type = PTR_TO_BTF_ID | flag; in mark_btf_ld_reg()
2540 regs[regno].btf = btf; in mark_btf_ld_reg()
2541 regs[regno].btf_id = btf_id; in mark_btf_ld_reg()
2543 regs[regno].id = ++env->id_gen; in mark_btf_ld_reg()
2550 struct bpf_reg_state *regs = state->regs; in init_reg_state() local
2554 mark_reg_not_init(env, regs, i); in init_reg_state()
2555 regs[i].live = REG_LIVE_NONE; in init_reg_state()
2556 regs[i].parent = NULL; in init_reg_state()
2557 regs[i].subreg_def = DEF_NOT_SUBREG; in init_reg_state()
2561 regs[BPF_REG_FP].type = PTR_TO_STACK; in init_reg_state()
2562 mark_reg_known_zero(env, regs, BPF_REG_FP); in init_reg_state()
2563 regs[BPF_REG_FP].frameno = state->frameno; in init_reg_state()
3338 static int __check_reg_arg(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno, in __check_reg_arg() argument
3352 reg = ®s[regno]; in __check_reg_arg()
3378 mark_reg_unknown(env, regs, regno); in __check_reg_arg()
3389 return __check_reg_arg(env, state->regs, regno, t); in check_reg_arg()
3995 reg = &func->regs[j]; in mark_all_scalars_precise()
4029 reg = &func->regs[j]; in mark_all_scalars_imprecise()
4088 reg = &func->regs[i]; in mark_precise_scalar_ids()
4113 reg = &func->regs[i]; in mark_precise_scalar_ids()
4246 reg = &func->regs[regno]; in __mark_chain_precision()
4304 reg = &st->frame[0]->regs[i]; in __mark_chain_precision()
4362 reg = &func->regs[i]; in __mark_chain_precision()
4570 reg = &cur->regs[value_regno]; in check_stack_write_fixed_off()
4704 ptr_reg = &cur->regs[ptr_regno]; in check_stack_write_var_off()
4708 value_reg = &cur->regs[value_regno]; in check_stack_write_var_off()
4814 __mark_reg_const_zero(&state->regs[dst_regno]); in mark_reg_stack_read()
4825 state->regs[dst_regno].precise = true; in mark_reg_stack_read()
4828 mark_reg_unknown(env, state->regs, dst_regno); in mark_reg_stack_read()
4830 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in mark_reg_stack_read()
4879 s32 subreg_def = state->regs[dst_regno].subreg_def; in check_stack_read_fixed_off()
4881 copy_register_state(&state->regs[dst_regno], reg); in check_stack_read_fixed_off()
4882 state->regs[dst_regno].subreg_def = subreg_def; in check_stack_read_fixed_off()
4896 mark_reg_unknown(env, state->regs, dst_regno); in check_stack_read_fixed_off()
4898 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in check_stack_read_fixed_off()
4904 copy_register_state(&state->regs[dst_regno], reg); in check_stack_read_fixed_off()
4909 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in check_stack_read_fixed_off()
5085 struct bpf_reg_state *regs = cur_regs(env); in check_map_access_type() local
5086 struct bpf_map *map = regs[regno].map_ptr; in check_map_access_type()
5147 struct bpf_reg_state *reg = &state->regs[regno]; in check_mem_region_access()
5403 struct bpf_reg_state *reg = &state->regs[regno]; in check_map_access()
5504 struct bpf_reg_state *regs = cur_regs(env); in check_packet_access() local
5505 struct bpf_reg_state *reg = ®s[regno]; in check_packet_access()
5596 struct bpf_reg_state *regs = cur_regs(env); in check_sock_access() local
5597 struct bpf_reg_state *reg = ®s[regno]; in check_sock_access()
6383 struct bpf_reg_state *regs, in check_ptr_to_btf_access() argument
6388 struct bpf_reg_state *reg = regs + regno; in check_ptr_to_btf_access()
6527 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag); in check_ptr_to_btf_access()
6533 struct bpf_reg_state *regs, in check_ptr_to_map_access() argument
6538 struct bpf_reg_state *reg = regs + regno; in check_ptr_to_map_access()
6587 mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag); in check_ptr_to_map_access()
6625 struct bpf_reg_state *regs = cur_regs(env); in check_stack_access_within_bounds() local
6626 struct bpf_reg_state *reg = regs + regno; in check_stack_access_within_bounds()
6690 struct bpf_reg_state *regs = cur_regs(env); in check_mem_access() local
6691 struct bpf_reg_state *reg = regs + regno; in check_mem_access()
6717 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6752 regs[value_regno].type = SCALAR_VALUE; in check_mem_access()
6753 __mark_reg_known(®s[value_regno], val); in check_mem_access()
6755 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6782 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6808 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6810 mark_reg_known_zero(env, regs, in check_mem_access()
6813 regs[value_regno].id = ++env->id_gen; in check_mem_access()
6819 regs[value_regno].subreg_def = DEF_NOT_SUBREG; in check_mem_access()
6821 regs[value_regno].btf = btf; in check_mem_access()
6822 regs[value_regno].btf_id = btf_id; in check_mem_access()
6825 regs[value_regno].type = reg_type; in check_mem_access()
6853 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6864 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6873 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6877 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6880 err = check_ptr_to_btf_access(env, regs, regno, off, size, t, in check_mem_access()
6883 err = check_ptr_to_map_access(env, regs, regno, off, size, t, in check_mem_access()
6904 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6912 regs[value_regno].type == SCALAR_VALUE) { in check_mem_access()
6915 coerce_reg_to_size(®s[value_regno], size); in check_mem_access()
6917 coerce_reg_to_size_sx(®s[value_regno], size); in check_mem_access()
7196 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; in check_helper_mem_access() local
7251 return check_ptr_to_btf_access(env, regs, regno, reg->off, in check_helper_mem_access()
7425 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; in process_spin_lock() local
7502 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; in process_timer_func() local
7539 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; in process_kptr_func() local
7603 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; in process_dynptr_func() local
7711 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; in process_iter_arg() local
7829 &fold->regs[i], in widen_imprecise_scalars()
7830 &fcur->regs[i], in widen_imprecise_scalars()
7971 mark_ptr_not_null_reg(&queued_fr->regs[BPF_REG_0]); in process_iter_next_call()
7977 __mark_reg_const_zero(&cur_fr->regs[BPF_REG_0]); in process_iter_next_call()
8160 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; in check_reg_type() local
8384 struct bpf_reg_state *regs) in get_dynptr_arg_reg() argument
8395 state = ®s[BPF_REG_1 + i]; in get_dynptr_arg_reg()
8454 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; in check_func_arg() local
9143 struct bpf_reg_state *reg = &state->regs[regn]; in mark_pkt_end()
9195 struct bpf_reg_state *regs) in clear_caller_saved_regs() argument
9201 mark_reg_not_init(env, regs, caller_saved[i]); in clear_caller_saved_regs()
9202 __check_reg_arg(env, regs, caller_saved[i], DST_OP_NO_MARK); in clear_caller_saved_regs()
9275 err = btf_check_subprog_call(env, subprog, caller->regs); in push_callback_call()
9350 err = btf_check_subprog_call(env, subprog, caller->regs); in check_func_call()
9361 clear_caller_saved_regs(env, caller->regs); in check_func_call()
9364 mark_reg_unknown(env, caller->regs, BPF_REG_0); in check_func_call()
9365 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; in check_func_call()
9378 clear_caller_saved_regs(env, caller->regs); in check_func_call()
9402 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; in map_set_for_each_callback_args()
9404 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; in map_set_for_each_callback_args()
9405 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in map_set_for_each_callback_args()
9406 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr; in map_set_for_each_callback_args()
9408 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; in map_set_for_each_callback_args()
9409 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); in map_set_for_each_callback_args()
9410 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr; in map_set_for_each_callback_args()
9413 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3]; in map_set_for_each_callback_args()
9416 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in map_set_for_each_callback_args()
9430 callee->regs[i] = caller->regs[i]; in set_callee_state()
9473 callee->regs[BPF_REG_1].type = SCALAR_VALUE; in set_loop_callback_state()
9474 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; in set_loop_callback_state()
9477 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_loop_callback_state()
9478 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_loop_callback_state()
9479 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_loop_callback_state()
9491 struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr; in set_timer_callback_state()
9496 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; in set_timer_callback_state()
9497 __mark_reg_known_zero(&callee->regs[BPF_REG_1]); in set_timer_callback_state()
9498 callee->regs[BPF_REG_1].map_ptr = map_ptr; in set_timer_callback_state()
9500 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; in set_timer_callback_state()
9501 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in set_timer_callback_state()
9502 callee->regs[BPF_REG_2].map_ptr = map_ptr; in set_timer_callback_state()
9504 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; in set_timer_callback_state()
9505 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); in set_timer_callback_state()
9506 callee->regs[BPF_REG_3].map_ptr = map_ptr; in set_timer_callback_state()
9509 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_timer_callback_state()
9510 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_timer_callback_state()
9526 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; in set_find_vma_callback_state()
9528 callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID; in set_find_vma_callback_state()
9529 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in set_find_vma_callback_state()
9530 callee->regs[BPF_REG_2].btf = btf_vmlinux; in set_find_vma_callback_state()
9531 callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA], in set_find_vma_callback_state()
9534 callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4]; in set_find_vma_callback_state()
9537 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_find_vma_callback_state()
9538 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_find_vma_callback_state()
9553 __mark_reg_not_init(env, &callee->regs[BPF_REG_0]); in set_user_ringbuf_callback_state()
9554 mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL); in set_user_ringbuf_callback_state()
9555 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; in set_user_ringbuf_callback_state()
9558 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_user_ringbuf_callback_state()
9559 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_user_ringbuf_callback_state()
9560 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_user_ringbuf_callback_state()
9581 field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off, in set_rbtree_add_callback_state()
9586 mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root); in set_rbtree_add_callback_state()
9587 ref_set_non_owning(env, &callee->regs[BPF_REG_1]); in set_rbtree_add_callback_state()
9588 mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root); in set_rbtree_add_callback_state()
9589 ref_set_non_owning(env, &callee->regs[BPF_REG_2]); in set_rbtree_add_callback_state()
9591 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_rbtree_add_callback_state()
9592 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_rbtree_add_callback_state()
9593 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_rbtree_add_callback_state()
9633 r0 = &callee->regs[BPF_REG_0]; in prepare_func_exit()
9672 caller->regs[BPF_REG_0] = *r0; in prepare_func_exit()
9727 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, in do_refine_retval_range() argument
9731 struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; in do_refine_retval_range()
9813 struct bpf_reg_state *regs = cur_regs(env), *reg; in record_func_key() local
9825 reg = ®s[BPF_REG_3]; in record_func_key()
9865 struct bpf_reg_state *regs) in check_bpf_snprintf_call() argument
9867 struct bpf_reg_state *fmt_reg = ®s[BPF_REG_3]; in check_bpf_snprintf_call()
9868 struct bpf_reg_state *data_len_reg = ®s[BPF_REG_5]; in check_bpf_snprintf_call()
9930 struct bpf_reg_state *regs = cur_regs(env); in loop_flag_is_zero() local
9931 struct bpf_reg_state *reg = ®s[BPF_REG_4]; in loop_flag_is_zero()
9965 struct bpf_reg_state *regs; in check_helper_call() local
10058 regs = cur_regs(env); in check_helper_call()
10067 if (regs[meta.release_regno].type == CONST_PTR_TO_DYNPTR) { in check_helper_call()
10071 err = unmark_stack_slots_dynptr(env, ®s[meta.release_regno]); in check_helper_call()
10074 } else if (register_is_null(®s[meta.release_regno])) { in check_helper_call()
10099 if (!register_is_null(®s[BPF_REG_2])) { in check_helper_call()
10117 err = check_bpf_snprintf_call(env, regs); in check_helper_call()
10127 if (cur_func(env)->callback_depth < regs[BPF_REG_1].umax_value) { in check_helper_call()
10138 if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) { in check_helper_call()
10140 reg_type_str(env, regs[BPF_REG_1].type)); in check_helper_call()
10161 reg = get_dynptr_arg_reg(env, fn, regs); in check_helper_call()
10197 reg = get_dynptr_arg_reg(env, fn, regs); in check_helper_call()
10224 mark_reg_not_init(env, regs, caller_saved[i]); in check_helper_call()
10229 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; in check_helper_call()
10238 mark_reg_unknown(env, regs, BPF_REG_0); in check_helper_call()
10241 regs[BPF_REG_0].type = NOT_INIT; in check_helper_call()
10245 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
10255 regs[BPF_REG_0].map_ptr = meta.map_ptr; in check_helper_call()
10256 regs[BPF_REG_0].map_uid = meta.map_uid; in check_helper_call()
10257 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag; in check_helper_call()
10260 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
10264 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
10265 regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag; in check_helper_call()
10268 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
10269 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag; in check_helper_call()
10272 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
10273 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag; in check_helper_call()
10276 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
10277 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; in check_helper_call()
10278 regs[BPF_REG_0].mem_size = meta.mem_size; in check_helper_call()
10284 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
10299 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; in check_helper_call()
10300 regs[BPF_REG_0].mem_size = tsize; in check_helper_call()
10309 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; in check_helper_call()
10310 regs[BPF_REG_0].btf = meta.ret_btf; in check_helper_call()
10311 regs[BPF_REG_0].btf_id = meta.ret_btf_id; in check_helper_call()
10320 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
10321 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; in check_helper_call()
10326 regs[BPF_REG_0].type |= MEM_ALLOC; in check_helper_call()
10343 regs[BPF_REG_0].btf = ret_btf; in check_helper_call()
10344 regs[BPF_REG_0].btf_id = ret_btf_id; in check_helper_call()
10353 if (type_may_be_null(regs[BPF_REG_0].type)) in check_helper_call()
10354 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
10363 regs[BPF_REG_0].dynptr_id = meta.dynptr_id; in check_helper_call()
10367 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; in check_helper_call()
10374 regs[BPF_REG_0].id = id; in check_helper_call()
10376 regs[BPF_REG_0].ref_obj_id = id; in check_helper_call()
10379 do_refine_retval_range(regs, fn->ret_type, func_id, &meta); in check_helper_call()
10782 struct bpf_reg_state *regs = cur_regs(env); in get_kfunc_ptr_arg_type() local
10783 struct bpf_reg_state *reg = ®s[regno]; in get_kfunc_ptr_arg_type()
10835 (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]) || in get_kfunc_ptr_arg_type()
10836 is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]))) in get_kfunc_ptr_arg_type()
11299 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[i + 1]; in check_kfunc_args() local
11616 struct bpf_reg_state *buff_reg = ®s[regno]; in check_kfunc_args()
11618 struct bpf_reg_state *size_reg = ®s[regno + 1]; in check_kfunc_args()
11734 struct bpf_reg_state *regs = cur_regs(env); in check_kfunc_call() local
11822 err = release_reference(env, regs[meta.release_regno].ref_obj_id); in check_kfunc_call()
11833 release_ref_obj_id = regs[BPF_REG_2].ref_obj_id; in check_kfunc_call()
11834 insn_aux->insert_off = regs[BPF_REG_2].off; in check_kfunc_call()
11852 mark_reg_not_init(env, regs, caller_saved[i]); in check_kfunc_call()
11868 mark_reg_unknown(env, regs, BPF_REG_0); in check_kfunc_call()
11901 mark_reg_known_zero(env, regs, BPF_REG_0); in check_kfunc_call()
11902 regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; in check_kfunc_call()
11903 regs[BPF_REG_0].btf = ret_btf; in check_kfunc_call()
11904 regs[BPF_REG_0].btf_id = ret_btf_id; in check_kfunc_call()
11910 mark_reg_known_zero(env, regs, BPF_REG_0); in check_kfunc_call()
11911 regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; in check_kfunc_call()
11912 regs[BPF_REG_0].btf = meta.arg_btf; in check_kfunc_call()
11913 regs[BPF_REG_0].btf_id = meta.arg_btf_id; in check_kfunc_call()
11922 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); in check_kfunc_call()
11927 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); in check_kfunc_call()
11929 mark_reg_known_zero(env, regs, BPF_REG_0); in check_kfunc_call()
11930 regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED; in check_kfunc_call()
11931 regs[BPF_REG_0].btf = desc_btf; in check_kfunc_call()
11932 regs[BPF_REG_0].btf_id = meta.ret_btf_id; in check_kfunc_call()
11941 mark_reg_known_zero(env, regs, BPF_REG_0); in check_kfunc_call()
11942 regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED; in check_kfunc_call()
11943 regs[BPF_REG_0].btf = desc_btf; in check_kfunc_call()
11944 regs[BPF_REG_0].btf_id = meta.arg_constant.value; in check_kfunc_call()
11949 mark_reg_known_zero(env, regs, BPF_REG_0); in check_kfunc_call()
11956 regs[BPF_REG_0].mem_size = meta.arg_constant.value; in check_kfunc_call()
11959 regs[BPF_REG_0].type = PTR_TO_MEM | type_flag; in check_kfunc_call()
11962 regs[BPF_REG_0].type |= MEM_RDONLY; in check_kfunc_call()
11975 regs[BPF_REG_0].dynptr_id = meta.initialized_dynptr.id; in check_kfunc_call()
12006 mark_reg_known_zero(env, regs, BPF_REG_0); in check_kfunc_call()
12007 regs[BPF_REG_0].type = PTR_TO_MEM; in check_kfunc_call()
12008 regs[BPF_REG_0].mem_size = meta.r0_size; in check_kfunc_call()
12011 regs[BPF_REG_0].type |= MEM_RDONLY; in check_kfunc_call()
12015 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; in check_kfunc_call()
12017 mark_reg_known_zero(env, regs, BPF_REG_0); in check_kfunc_call()
12018 regs[BPF_REG_0].btf = desc_btf; in check_kfunc_call()
12019 regs[BPF_REG_0].type = PTR_TO_BTF_ID; in check_kfunc_call()
12020 regs[BPF_REG_0].btf_id = ptr_type_id; in check_kfunc_call()
12024 regs[BPF_REG_0].type |= PTR_MAYBE_NULL; in check_kfunc_call()
12026 regs[BPF_REG_0].id = ++env->id_gen; in check_kfunc_call()
12035 regs[BPF_REG_0].id = id; in check_kfunc_call()
12036 regs[BPF_REG_0].ref_obj_id = id; in check_kfunc_call()
12038 ref_set_non_owning(env, ®s[BPF_REG_0]); in check_kfunc_call()
12041 if (reg_may_point_to_spin_lock(®s[BPF_REG_0]) && !regs[BPF_REG_0].id) in check_kfunc_call()
12042 regs[BPF_REG_0].id = ++env->id_gen; in check_kfunc_call()
12239 struct bpf_reg_state *regs; in sanitize_speculative_path() local
12243 regs = branch->frame[branch->curframe]->regs; in sanitize_speculative_path()
12245 mark_reg_unknown(env, regs, insn->dst_reg); in sanitize_speculative_path()
12247 mark_reg_unknown(env, regs, insn->dst_reg); in sanitize_speculative_path()
12248 mark_reg_unknown(env, regs, insn->src_reg); in sanitize_speculative_path()
12480 struct bpf_reg_state *regs = state->regs, *dst_reg; in adjust_ptr_min_max_vals() local
12491 dst_reg = ®s[dst]; in adjust_ptr_min_max_vals()
13279 struct bpf_reg_state *regs = cur_regs(env); in adjust_scalar_min_max_vals() local
13386 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
13399 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
13412 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
13421 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
13440 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; in adjust_reg_min_max_vals() local
13445 dst_reg = ®s[insn->dst_reg]; in adjust_reg_min_max_vals()
13455 src_reg = ®s[insn->src_reg]; in adjust_reg_min_max_vals()
13463 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_reg_min_max_vals()
13523 struct bpf_reg_state *regs = cur_regs(env); in check_alu_op() local
13599 struct bpf_reg_state *src_reg = regs + insn->src_reg; in check_alu_op()
13600 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; in check_alu_op()
13638 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
13677 mark_reg_unknown(env, regs, in check_alu_op()
13688 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
13689 regs[insn->dst_reg].type = SCALAR_VALUE; in check_alu_op()
13691 __mark_reg_known(regs + insn->dst_reg, in check_alu_op()
13694 __mark_reg_known(regs + insn->dst_reg, in check_alu_op()
14350 struct bpf_reg_state *regs = state->regs, *reg; in mark_ptr_or_null_regs() local
14351 u32 ref_obj_id = regs[regno].ref_obj_id; in mark_ptr_or_null_regs()
14352 u32 id = regs[regno].id; in mark_ptr_or_null_regs()
14488 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; in check_cond_jmp_op() local
14507 dst_reg = ®s[insn->dst_reg]; in check_cond_jmp_op()
14519 src_reg = ®s[insn->src_reg]; in check_cond_jmp_op()
14612 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; in check_cond_jmp_op()
14622 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; in check_cond_jmp_op()
14690 eq_branch_regs = regs; in check_cond_jmp_op()
14718 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], in check_cond_jmp_op()
14734 struct bpf_reg_state *regs = cur_regs(env); in check_ld_imm() local
14752 dst_reg = ®s[insn->dst_reg]; in check_ld_imm()
14757 __mark_reg_known(®s[insn->dst_reg], imm); in check_ld_imm()
14765 mark_reg_known_zero(env, regs, insn->dst_reg); in check_ld_imm()
14852 struct bpf_reg_state *regs = cur_regs(env); in check_ld_abs() local
14899 if (regs[ctx_reg].type != PTR_TO_CTX) { in check_ld_abs()
14912 err = check_ptr_off_reg(env, ®s[ctx_reg], ctx_reg); in check_ld_abs()
14918 mark_reg_not_init(env, regs, caller_saved[i]); in check_ld_abs()
14926 mark_reg_unknown(env, regs, BPF_REG_0); in check_ld_abs()
14928 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; in check_ld_abs()
15896 live = st->regs[i].live; in clean_func_state()
15898 st->regs[i].live |= REG_LIVE_DONE; in clean_func_state()
15903 __mark_reg_not_init(env, &st->regs[i]); in clean_func_state()
15923 if (st->frame[0]->regs[0].live & REG_LIVE_DONE) in clean_verifier_state()
16272 if (!regsafe(env, &old->regs[i], &cur->regs[i], in func_states_equal()
16391 parent_reg = parent->regs; in propagate_liveness()
16392 state_reg = state->regs; in propagate_liveness()
16430 state_reg = state->regs; in propagate_precision()
16488 if (memcmp(&fold->regs[i], &fcur->regs[i], in states_maybe_looping()
16677 iter_reg = &cur_frame->regs[BPF_REG_1]; in is_state_visited()
16806 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE && in is_state_visited()
16895 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; in is_state_visited()
16897 cur->frame[j]->regs[i].live = REG_LIVE_NONE; in is_state_visited()
16991 struct bpf_reg_state *regs; in do_check() local
17085 regs = cur_regs(env); in do_check()
17108 src_reg_type = regs[insn->src_reg].type; in do_check()
17148 dst_reg_type = regs[insn->dst_reg].type; in do_check()
17173 dst_reg_type = regs[insn->dst_reg].type; in do_check()
19577 struct bpf_reg_state *regs; in do_check_common() local
19602 regs = state->frame[state->curframe]->regs; in do_check_common()
19604 ret = btf_prepare_func_args(env, subprog, regs); in do_check_common()
19608 if (regs[i].type == PTR_TO_CTX) in do_check_common()
19609 mark_reg_known_zero(env, regs, i); in do_check_common()
19610 else if (regs[i].type == SCALAR_VALUE) in do_check_common()
19611 mark_reg_unknown(env, regs, i); in do_check_common()
19612 else if (base_type(regs[i].type) == PTR_TO_MEM) { in do_check_common()
19613 const u32 mem_size = regs[i].mem_size; in do_check_common()
19615 mark_reg_known_zero(env, regs, i); in do_check_common()
19616 regs[i].mem_size = mem_size; in do_check_common()
19617 regs[i].id = ++env->id_gen; in do_check_common()
19622 regs[BPF_REG_1].type = PTR_TO_CTX; in do_check_common()
19623 mark_reg_known_zero(env, regs, BPF_REG_1); in do_check_common()
19624 ret = btf_check_subprog_arg_match(env, subprog, regs); in do_check_common()