Lines Matching refs:state
228 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state) in bpf_map_key_store() argument
232 aux->map_key_state = state | BPF_MAP_KEY_SEEN | in bpf_map_key_store()
627 static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots) in is_spi_bounds_valid() argument
629 int allocated_slots = state->allocated_stack / BPF_REG_SIZE; in is_spi_bounds_valid()
712 struct bpf_func_state *state = func(env, reg); in mark_stack_slots_dynptr() local
718 if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) in mark_stack_slots_dynptr()
722 state->stack[spi].slot_type[i] = STACK_DYNPTR; in mark_stack_slots_dynptr()
723 state->stack[spi - 1].slot_type[i] = STACK_DYNPTR; in mark_stack_slots_dynptr()
730 state->stack[spi].spilled_ptr.dynptr.first_slot = true; in mark_stack_slots_dynptr()
731 state->stack[spi].spilled_ptr.dynptr.type = type; in mark_stack_slots_dynptr()
732 state->stack[spi - 1].spilled_ptr.dynptr.type = type; in mark_stack_slots_dynptr()
740 state->stack[spi].spilled_ptr.id = id; in mark_stack_slots_dynptr()
741 state->stack[spi - 1].spilled_ptr.id = id; in mark_stack_slots_dynptr()
749 struct bpf_func_state *state = func(env, reg); in unmark_stack_slots_dynptr() local
754 if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) in unmark_stack_slots_dynptr()
758 state->stack[spi].slot_type[i] = STACK_INVALID; in unmark_stack_slots_dynptr()
759 state->stack[spi - 1].slot_type[i] = STACK_INVALID; in unmark_stack_slots_dynptr()
763 if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { in unmark_stack_slots_dynptr()
764 release_reference(env, state->stack[spi].spilled_ptr.id); in unmark_stack_slots_dynptr()
765 state->stack[spi].spilled_ptr.id = 0; in unmark_stack_slots_dynptr()
766 state->stack[spi - 1].spilled_ptr.id = 0; in unmark_stack_slots_dynptr()
769 state->stack[spi].spilled_ptr.dynptr.first_slot = false; in unmark_stack_slots_dynptr()
770 state->stack[spi].spilled_ptr.dynptr.type = 0; in unmark_stack_slots_dynptr()
771 state->stack[spi - 1].spilled_ptr.dynptr.type = 0; in unmark_stack_slots_dynptr()
778 struct bpf_func_state *state = func(env, reg); in is_dynptr_reg_valid_uninit() local
782 if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) in is_dynptr_reg_valid_uninit()
786 if (state->stack[spi].slot_type[i] == STACK_DYNPTR || in is_dynptr_reg_valid_uninit()
787 state->stack[spi - 1].slot_type[i] == STACK_DYNPTR) in is_dynptr_reg_valid_uninit()
797 struct bpf_func_state *state = func(env, reg); in is_dynptr_reg_valid_init() local
801 if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) || in is_dynptr_reg_valid_init()
802 !state->stack[spi].spilled_ptr.dynptr.first_slot) in is_dynptr_reg_valid_init()
806 if (state->stack[spi].slot_type[i] != STACK_DYNPTR || in is_dynptr_reg_valid_init()
807 state->stack[spi - 1].slot_type[i] != STACK_DYNPTR) in is_dynptr_reg_valid_init()
818 struct bpf_func_state *state = func(env, reg); in is_dynptr_type_expected() local
828 return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type; in is_dynptr_type_expected()
846 const struct bpf_func_state *state, in print_verifier_state() argument
853 if (state->frameno) in print_verifier_state()
854 verbose(env, " frame%d:", state->frameno); in print_verifier_state()
856 reg = &state->regs[i]; in print_verifier_state()
940 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in print_verifier_state()
946 if (state->stack[i].slot_type[j] != STACK_INVALID) in print_verifier_state()
949 state->stack[i].slot_type[j]]; in print_verifier_state()
957 print_liveness(env, state->stack[i].spilled_ptr.live); in print_verifier_state()
958 if (is_spilled_reg(&state->stack[i])) { in print_verifier_state()
959 reg = &state->stack[i].spilled_ptr; in print_verifier_state()
970 if (state->acquired_refs && state->refs[0].id) { in print_verifier_state()
971 verbose(env, " refs=%d", state->refs[0].id); in print_verifier_state()
972 for (i = 1; i < state->acquired_refs; i++) in print_verifier_state()
973 if (state->refs[i].id) in print_verifier_state()
974 verbose(env, ",%d", state->refs[i].id); in print_verifier_state()
976 if (state->in_callback_fn) in print_verifier_state()
978 if (state->in_async_callback_fn) in print_verifier_state()
992 const struct bpf_func_state *state) in print_insn_state() argument
1001 print_verifier_state(env, state, false); in print_insn_state()
1087 static int resize_reference_state(struct bpf_func_state *state, size_t n) in resize_reference_state() argument
1089 state->refs = realloc_array(state->refs, state->acquired_refs, n, in resize_reference_state()
1091 if (!state->refs) in resize_reference_state()
1094 state->acquired_refs = n; in resize_reference_state()
1098 static int grow_stack_state(struct bpf_func_state *state, int size) in grow_stack_state() argument
1100 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE; in grow_stack_state()
1105 state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state)); in grow_stack_state()
1106 if (!state->stack) in grow_stack_state()
1109 state->allocated_stack = size; in grow_stack_state()
1120 struct bpf_func_state *state = cur_func(env); in acquire_reference_state() local
1121 int new_ofs = state->acquired_refs; in acquire_reference_state()
1124 err = resize_reference_state(state, state->acquired_refs + 1); in acquire_reference_state()
1128 state->refs[new_ofs].id = id; in acquire_reference_state()
1129 state->refs[new_ofs].insn_idx = insn_idx; in acquire_reference_state()
1130 state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0; in acquire_reference_state()
1136 static int release_reference_state(struct bpf_func_state *state, int ptr_id) in release_reference_state() argument
1140 last_idx = state->acquired_refs - 1; in release_reference_state()
1141 for (i = 0; i < state->acquired_refs; i++) { in release_reference_state()
1142 if (state->refs[i].id == ptr_id) { in release_reference_state()
1144 if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno) in release_reference_state()
1147 memcpy(&state->refs[i], &state->refs[last_idx], in release_reference_state()
1148 sizeof(*state->refs)); in release_reference_state()
1149 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); in release_reference_state()
1150 state->acquired_refs--; in release_reference_state()
1157 static void free_func_state(struct bpf_func_state *state) in free_func_state() argument
1159 if (!state) in free_func_state()
1161 kfree(state->refs); in free_func_state()
1162 kfree(state->stack); in free_func_state()
1163 kfree(state); in free_func_state()
1166 static void clear_jmp_history(struct bpf_verifier_state *state) in clear_jmp_history() argument
1168 kfree(state->jmp_history); in clear_jmp_history()
1169 state->jmp_history = NULL; in clear_jmp_history()
1170 state->jmp_history_cnt = 0; in clear_jmp_history()
1173 static void free_verifier_state(struct bpf_verifier_state *state, in free_verifier_state() argument
1178 for (i = 0; i <= state->curframe; i++) { in free_verifier_state()
1179 free_func_state(state->frame[i]); in free_verifier_state()
1180 state->frame[i] = NULL; in free_verifier_state()
1182 clear_jmp_history(state); in free_verifier_state()
1184 kfree(state); in free_verifier_state()
1761 struct bpf_func_state *state) in init_reg_state() argument
1763 struct bpf_reg_state *regs = state->regs; in init_reg_state()
1776 regs[BPF_REG_FP].frameno = state->frameno; in init_reg_state()
1781 struct bpf_func_state *state, in init_func_state() argument
1784 state->callsite = callsite; in init_func_state()
1785 state->frameno = frameno; in init_func_state()
1786 state->subprogno = subprogno; in init_func_state()
1787 state->callback_ret_range = tnum_range(0, 0); in init_func_state()
1788 init_reg_state(env, state); in init_func_state()
2286 const struct bpf_reg_state *state, in mark_reg_read() argument
2289 bool writes = parent == state->parent; /* Observe write marks */ in mark_reg_read()
2294 if (writes && state->live & REG_LIVE_WRITTEN) in mark_reg_read()
2322 state = parent; in mark_reg_read()
2323 parent = state->parent; in mark_reg_read()
2472 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_reg_arg() local
2474 struct bpf_reg_state *reg, *regs = state->regs; in check_reg_arg()
3225 static void save_register_state(struct bpf_func_state *state, in save_register_state() argument
3231 copy_register_state(&state->stack[spi].spilled_ptr, reg); in save_register_state()
3233 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in save_register_state()
3236 state->stack[spi].slot_type[i - 1] = STACK_SPILL; in save_register_state()
3240 scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]); in save_register_state()
3253 struct bpf_func_state *state, in check_stack_write_fixed_off() argument
3263 err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE)); in check_stack_write_fixed_off()
3270 is_spilled_reg(&state->stack[spi]) && in check_stack_write_fixed_off()
3283 u8 type = state->stack[spi].slot_type[i]; in check_stack_write_fixed_off()
3309 save_register_state(state, spi, reg, size); in check_stack_write_fixed_off()
3312 state->stack[spi].spilled_ptr.id = 0; in check_stack_write_fixed_off()
3319 save_register_state(state, spi, &fake_reg, size); in check_stack_write_fixed_off()
3327 if (state != cur && reg->type == PTR_TO_STACK) { in check_stack_write_fixed_off()
3331 save_register_state(state, spi, reg, size); in check_stack_write_fixed_off()
3336 state->stack[spi].spilled_ptr.type = NOT_INIT; in check_stack_write_fixed_off()
3338 if (is_spilled_reg(&state->stack[spi])) in check_stack_write_fixed_off()
3340 scrub_spilled_slot(&state->stack[spi].slot_type[i]); in check_stack_write_fixed_off()
3351 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in check_stack_write_fixed_off()
3365 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = in check_stack_write_fixed_off()
3392 struct bpf_func_state *state, in check_stack_write_var_off() argument
3415 err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE)); in check_stack_write_var_off()
3427 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_write_var_off()
3448 state->stack[spi].spilled_ptr.type = NOT_INIT; in check_stack_write_var_off()
3494 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in mark_reg_stack_read() local
3511 __mark_reg_const_zero(&state->regs[dst_regno]); in mark_reg_stack_read()
3522 state->regs[dst_regno].precise = true; in mark_reg_stack_read()
3525 mark_reg_unknown(env, state->regs, dst_regno); in mark_reg_stack_read()
3527 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in mark_reg_stack_read()
3545 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_stack_read_fixed_off() local
3574 s32 subreg_def = state->regs[dst_regno].subreg_def; in check_stack_read_fixed_off()
3576 copy_register_state(&state->regs[dst_regno], reg); in check_stack_read_fixed_off()
3577 state->regs[dst_regno].subreg_def = subreg_def; in check_stack_read_fixed_off()
3589 mark_reg_unknown(env, state->regs, dst_regno); in check_stack_read_fixed_off()
3591 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in check_stack_read_fixed_off()
3597 copy_register_state(&state->regs[dst_regno], reg); in check_stack_read_fixed_off()
3602 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in check_stack_read_fixed_off()
3698 struct bpf_func_state *state = func(env, reg); in check_stack_read() local
3726 err = check_stack_read_fixed_off(env, state, off, size, in check_stack_read()
3755 struct bpf_func_state *state = func(env, reg); in check_stack_write() local
3760 err = check_stack_write_fixed_off(env, state, off, size, in check_stack_write()
3766 err = check_stack_write_var_off(env, state, in check_stack_write()
3837 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_mem_region_access() local
3838 struct bpf_reg_state *reg = &state->regs[regno]; in check_mem_region_access()
4058 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_map_access() local
4059 struct bpf_reg_state *reg = &state->regs[regno]; in check_map_access()
4894 struct bpf_func_state *state, in check_stack_slot_within_bounds() argument
4902 min_valid_off = -state->allocated_stack; in check_stack_slot_within_bounds()
4921 struct bpf_func_state *state = func(env, reg); in check_stack_access_within_bounds() local
4948 err = check_stack_slot_within_bounds(min_off, state, type); in check_stack_access_within_bounds()
4979 struct bpf_func_state *state; in check_mem_access() local
5123 state = func(env, reg); in check_mem_access()
5124 err = update_stack_depth(env, state, off); in check_mem_access()
5332 struct bpf_func_state *state = func(env, reg); in check_stack_range_initialized() local
5402 if (state->allocated_stack <= slot) in check_stack_range_initialized()
5404 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_range_initialized()
5415 if (is_spilled_reg(&state->stack[spi]) && in check_stack_range_initialized()
5416 (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || in check_stack_range_initialized()
5419 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); in check_stack_range_initialized()
5421 scrub_spilled_slot(&state->stack[spi].slot_type[j]); in check_stack_range_initialized()
5442 mark_reg_read(env, &state->stack[spi].spilled_ptr, in check_stack_range_initialized()
5443 state->stack[spi].spilled_ptr.parent, in check_stack_range_initialized()
5451 return update_stack_depth(env, state, min_off); in check_stack_range_initialized()
6145 struct bpf_func_state *state = func(env, reg); in stack_slot_get_id() local
6148 return state->stack[spi].spilled_ptr.id; in stack_slot_get_id()
6211 struct bpf_func_state *state = func(env, reg); in check_func_arg() local
6214 if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) || in check_func_arg()
6215 !state->stack[spi].spilled_ptr.id) { in check_func_arg()
6830 struct bpf_func_state *state; in clear_all_pkt_pointers() local
6833 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ in clear_all_pkt_pointers()
6846 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in mark_pkt_end() local
6847 struct bpf_reg_state *reg = &state->regs[regn]; in mark_pkt_end()
6871 struct bpf_func_state *state; in release_reference() local
6879 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ in release_reference()
6916 struct bpf_verifier_state *state = env->cur_state; in __check_func_call() local
6922 if (state->curframe + 1 >= MAX_CALL_FRAMES) { in __check_func_call()
6924 state->curframe + 2); in __check_func_call()
6928 caller = state->frame[state->curframe]; in __check_func_call()
6929 if (state->frame[state->curframe + 1]) { in __check_func_call()
6931 state->curframe + 1); in __check_func_call()
7001 state->frame[state->curframe + 1] = callee; in __check_func_call()
7010 state->curframe + 1 /* frameno within this callchain */, in __check_func_call()
7025 state->curframe++; in __check_func_call()
7040 state->frame[state->curframe + 1] = NULL; in __check_func_call()
7237 struct bpf_verifier_state *state = env->cur_state; in prepare_func_exit() local
7242 callee = state->frame[state->curframe]; in prepare_func_exit()
7255 caller = state->frame[state->curframe - 1]; in prepare_func_exit()
7301 state->frame[state->curframe--] = NULL; in prepare_func_exit()
7411 struct bpf_func_state *state = cur_func(env); in check_reference_leak() local
7415 if (state->frameno && !state->in_callback_fn) in check_reference_leak()
7418 for (i = 0; i < state->acquired_refs; i++) { in check_reference_leak()
7419 if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno) in check_reference_leak()
7422 state->refs[i].id, state->refs[i].insn_idx); in check_reference_leak()
7506 struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state; in update_loop_inline_state() local
7508 if (!state->initialized) { in update_loop_inline_state()
7509 state->initialized = 1; in update_loop_inline_state()
7510 state->fit_for_inline = loop_flag_is_zero(env); in update_loop_inline_state()
7511 state->callback_subprogno = subprogno; in update_loop_inline_state()
7515 if (!state->fit_for_inline) in update_loop_inline_state()
7518 state->fit_for_inline = (loop_flag_is_zero(env) && in update_loop_inline_state()
7519 state->callback_subprogno == subprogno); in update_loop_inline_state()
8485 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in adjust_ptr_min_max_vals() local
8486 struct bpf_reg_state *regs = state->regs, *dst_reg; in adjust_ptr_min_max_vals()
9445 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in adjust_reg_min_max_vals() local
9446 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; in adjust_reg_min_max_vals()
9514 print_verifier_state(env, state, true); in adjust_reg_min_max_vals()
9519 print_verifier_state(env, state, true); in adjust_reg_min_max_vals()
9705 struct bpf_func_state *state; in find_good_pkt_pointers() local
9772 bpf_for_each_reg_in_vstate(vstate, state, reg, ({ in find_good_pkt_pointers()
10233 static void mark_ptr_or_null_reg(struct bpf_func_state *state, in mark_ptr_or_null_reg() argument
10281 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in mark_ptr_or_null_regs() local
10282 struct bpf_reg_state *regs = state->regs, *reg; in mark_ptr_or_null_regs()
10291 WARN_ON_ONCE(release_reference_state(state, id)); in mark_ptr_or_null_regs()
10293 bpf_for_each_reg_in_vstate(vstate, state, reg, ({ in mark_ptr_or_null_regs()
10294 mark_ptr_or_null_reg(state, reg, id, is_null); in mark_ptr_or_null_regs()
10406 struct bpf_func_state *state; in find_equal_scalars() local
10409 bpf_for_each_reg_in_vstate(vstate, state, reg, ({ in find_equal_scalars()
11021 struct bpf_func_state *state = cur->frame[cur->curframe]; in explored_state() local
11023 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; in explored_state()
11772 if (sl->state.branches) in clean_live_states()
11774 if (sl->state.insn_idx != insn || in clean_live_states()
11775 sl->state.curframe != cur->curframe) in clean_live_states()
11778 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) in clean_live_states()
11780 clean_verifier_state(env, &sl->state); in clean_live_states()
12093 struct bpf_func_state *state, *parent; in propagate_liveness() local
12105 state = vstate->frame[frame]; in propagate_liveness()
12107 state_reg = state->regs; in propagate_liveness()
12119 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && in propagate_liveness()
12122 state_reg = &state->stack[i].spilled_ptr; in propagate_liveness()
12139 struct bpf_func_state *state; in propagate_precision() local
12143 state = old->frame[fr]; in propagate_precision()
12144 state_reg = state->regs; in propagate_precision()
12157 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in propagate_precision()
12158 if (!is_spilled_reg(&state->stack[i])) in propagate_precision()
12160 state_reg = &state->stack[i].spilled_ptr; in propagate_precision()
12229 if (sl->state.insn_idx != insn_idx) in is_state_visited()
12232 if (sl->state.branches) { in is_state_visited()
12233 struct bpf_func_state *frame = sl->state.frame[sl->state.curframe]; in is_state_visited()
12248 } else if (states_maybe_looping(&sl->state, cur) && in is_state_visited()
12249 states_equal(env, &sl->state, cur)) { in is_state_visited()
12271 if (states_equal(env, &sl->state, cur)) { in is_state_visited()
12283 err = propagate_liveness(env, &sl->state, cur); in is_state_visited()
12291 err = err ? : propagate_precision(env, &sl->state); in is_state_visited()
12315 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) { in is_state_visited()
12316 u32 br = sl->state.branches; in is_state_visited()
12321 free_verifier_state(&sl->state, false); in is_state_visited()
12371 new = &new_sl->state; in is_state_visited()
12458 struct bpf_verifier_state *state = env->cur_state; in do_check() local
12515 print_verifier_state(env, state->frame[state->curframe], true); in do_check()
12527 print_insn_state(env, state->frame[state->curframe]); in do_check()
12736 if (state->curframe) { in do_check()
14876 free_verifier_state(&sl->state, false); in free_states()
14890 free_verifier_state(&sl->state, false); in free_states()
14901 struct bpf_verifier_state *state; in do_check_common() local
14908 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); in do_check_common()
14909 if (!state) in do_check_common()
14911 state->curframe = 0; in do_check_common()
14912 state->speculative = false; in do_check_common()
14913 state->branches = 1; in do_check_common()
14914 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); in do_check_common()
14915 if (!state->frame[0]) { in do_check_common()
14916 kfree(state); in do_check_common()
14919 env->cur_state = state; in do_check_common()
14920 init_func_state(env, state->frame[0], in do_check_common()
14924 state->first_insn_idx = env->subprog_info[subprog].start; in do_check_common()
14925 state->last_insn_idx = -1; in do_check_common()
14927 regs = state->frame[state->curframe]->regs; in do_check_common()