Lines Matching refs:state
434 const struct bpf_func_state *state) in print_verifier_state() argument
440 if (state->frameno) in print_verifier_state()
441 verbose(env, " frame%d:", state->frameno); in print_verifier_state()
443 reg = &state->regs[i]; in print_verifier_state()
501 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in print_verifier_state()
507 if (state->stack[i].slot_type[j] != STACK_INVALID) in print_verifier_state()
510 state->stack[i].slot_type[j]]; in print_verifier_state()
516 print_liveness(env, state->stack[i].spilled_ptr.live); in print_verifier_state()
517 if (state->stack[i].slot_type[0] == STACK_SPILL) { in print_verifier_state()
518 reg = &state->stack[i].spilled_ptr; in print_verifier_state()
529 if (state->acquired_refs && state->refs[0].id) { in print_verifier_state()
530 verbose(env, " refs=%d", state->refs[0].id); in print_verifier_state()
531 for (i = 1; i < state->acquired_refs; i++) in print_verifier_state()
532 if (state->refs[i].id) in print_verifier_state()
533 verbose(env, ",%d", state->refs[i].id); in print_verifier_state()
560 static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \ in COPY_STATE_FN()
563 u32 old_size = state->COUNT; \ in COPY_STATE_FN()
570 state->COUNT = slot * SIZE; \ in COPY_STATE_FN()
572 kfree(state->FIELD); \ in COPY_STATE_FN()
573 state->FIELD = NULL; \ in COPY_STATE_FN()
582 if (state->FIELD) \ in COPY_STATE_FN()
583 memcpy(new_##FIELD, state->FIELD, \ in COPY_STATE_FN()
588 state->COUNT = slot * SIZE; \ in COPY_STATE_FN()
589 kfree(state->FIELD); \ in COPY_STATE_FN()
590 state->FIELD = new_##FIELD; \ in COPY_STATE_FN()
606 static int realloc_func_state(struct bpf_func_state *state, int stack_size,
609 int err = realloc_reference_state(state, refs_size, copy_old);
612 return realloc_stack_state(state, stack_size, copy_old);
622 struct bpf_func_state *state = cur_func(env); in acquire_reference_state() local
623 int new_ofs = state->acquired_refs; in acquire_reference_state()
626 err = realloc_reference_state(state, state->acquired_refs + 1, true); in acquire_reference_state()
630 state->refs[new_ofs].id = id; in acquire_reference_state()
631 state->refs[new_ofs].insn_idx = insn_idx; in acquire_reference_state()
637 static int release_reference_state(struct bpf_func_state *state, int ptr_id) in release_reference_state() argument
641 last_idx = state->acquired_refs - 1; in release_reference_state()
642 for (i = 0; i < state->acquired_refs; i++) { in release_reference_state()
643 if (state->refs[i].id == ptr_id) { in release_reference_state()
645 memcpy(&state->refs[i], &state->refs[last_idx], in release_reference_state()
646 sizeof(*state->refs)); in release_reference_state()
647 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); in release_reference_state()
648 state->acquired_refs--; in release_reference_state()
667 static void free_func_state(struct bpf_func_state *state) in free_func_state() argument
669 if (!state) in free_func_state()
671 kfree(state->refs); in free_func_state()
672 kfree(state->stack); in free_func_state()
673 kfree(state); in free_func_state()
676 static void clear_jmp_history(struct bpf_verifier_state *state) in clear_jmp_history() argument
678 kfree(state->jmp_history); in clear_jmp_history()
679 state->jmp_history = NULL; in clear_jmp_history()
680 state->jmp_history_cnt = 0; in clear_jmp_history()
683 static void free_verifier_state(struct bpf_verifier_state *state, in free_verifier_state() argument
688 for (i = 0; i <= state->curframe; i++) { in free_verifier_state()
689 free_func_state(state->frame[i]); in free_verifier_state()
690 state->frame[i] = NULL; in free_verifier_state()
692 clear_jmp_history(state); in free_verifier_state()
694 kfree(state); in free_verifier_state()
1054 struct bpf_func_state *state) in init_reg_state() argument
1056 struct bpf_reg_state *regs = state->regs; in init_reg_state()
1069 regs[BPF_REG_FP].frameno = state->frameno; in init_reg_state()
1078 struct bpf_func_state *state, in init_func_state() argument
1081 state->callsite = callsite; in init_func_state()
1082 state->frameno = frameno; in init_func_state()
1083 state->subprogno = subprogno; in init_func_state()
1084 init_reg_state(env, state); in init_func_state()
1208 const struct bpf_reg_state *state, in mark_reg_read() argument
1211 bool writes = parent == state->parent; /* Observe write marks */ in mark_reg_read()
1216 if (writes && state->live & REG_LIVE_WRITTEN) in mark_reg_read()
1244 state = parent; in mark_reg_read()
1245 parent = state->parent; in mark_reg_read()
1374 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_reg_arg() local
1376 struct bpf_reg_state *reg, *regs = state->regs; in check_reg_arg()
1881 static void save_register_state(struct bpf_func_state *state, in save_register_state() argument
1886 state->stack[spi].spilled_ptr = *reg; in save_register_state()
1887 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in save_register_state()
1890 state->stack[spi].slot_type[i] = STACK_SPILL; in save_register_state()
1897 struct bpf_func_state *state, /* func where register points to */ in check_stack_write() argument
1905 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), in check_stack_write()
1906 state->acquired_refs, true); in check_stack_write()
1913 state->stack[spi].slot_type[0] == STACK_SPILL && in check_stack_write()
1936 save_register_state(state, spi, reg); in check_stack_write()
1945 if (state != cur && reg->type == PTR_TO_STACK) { in check_stack_write()
1953 if (state->stack[spi].slot_type[0] == STACK_SPILL && in check_stack_write()
1954 register_is_const(&state->stack[spi].spilled_ptr)) in check_stack_write()
1957 if (state->stack[spi].slot_type[i] == STACK_MISC) { in check_stack_write()
1985 save_register_state(state, spi, reg); in check_stack_write()
1990 state->stack[spi].spilled_ptr.type = NOT_INIT; in check_stack_write()
1992 if (state->stack[spi].slot_type[0] == STACK_SPILL) in check_stack_write()
1994 state->stack[spi].slot_type[i] = STACK_MISC; in check_stack_write()
2005 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in check_stack_write()
2018 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = in check_stack_write()
2029 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_stack_read() local
2050 mark_reg_unknown(env, state->regs, value_regno); in check_stack_read()
2051 state->regs[value_regno].live |= REG_LIVE_WRITTEN; in check_stack_read()
2065 state->regs[value_regno] = *reg; in check_stack_read()
2070 state->regs[value_regno].live |= REG_LIVE_WRITTEN; in check_stack_read()
2093 __mark_reg_const_zero(&state->regs[value_regno]); in check_stack_read()
2104 state->regs[value_regno].precise = true; in check_stack_read()
2107 mark_reg_unknown(env, state->regs, value_regno); in check_stack_read()
2109 state->regs[value_regno].live |= REG_LIVE_WRITTEN; in check_stack_read()
2183 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_map_access() local
2184 struct bpf_reg_state *reg = &state->regs[regno]; in check_map_access()
2192 print_verifier_state(env, state); in check_map_access()
2766 struct bpf_func_state *state; in check_mem_access() local
2836 state = func(env, reg); in check_mem_access()
2837 err = update_stack_depth(env, state, off); in check_mem_access()
2842 err = check_stack_write(env, state, off, size, in check_mem_access()
2845 err = check_stack_read(env, state, off, size, in check_mem_access()
2979 struct bpf_func_state *state = func(env, reg); in check_stack_boundary() local
3058 if (state->allocated_stack <= slot) in check_stack_boundary()
3060 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_boundary()
3068 if (state->stack[spi].slot_type[0] == STACK_SPILL && in check_stack_boundary()
3069 state->stack[spi].spilled_ptr.type == SCALAR_VALUE) { in check_stack_boundary()
3070 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); in check_stack_boundary()
3072 state->stack[spi].slot_type[j] = STACK_MISC; in check_stack_boundary()
3092 mark_reg_read(env, &state->stack[spi].spilled_ptr, in check_stack_boundary()
3093 state->stack[spi].spilled_ptr.parent, in check_stack_boundary()
3096 return update_stack_depth(env, state, min_off); in check_stack_boundary()
3697 struct bpf_func_state *state) in __clear_all_pkt_pointers() argument
3699 struct bpf_reg_state *regs = state->regs, *reg; in __clear_all_pkt_pointers()
3706 bpf_for_each_spilled_reg(i, state, reg) { in __clear_all_pkt_pointers()
3724 struct bpf_func_state *state, in release_reg_references() argument
3727 struct bpf_reg_state *regs = state->regs, *reg; in release_reg_references()
3734 bpf_for_each_spilled_reg(i, state, reg) { in release_reg_references()
3765 struct bpf_verifier_state *state = env->cur_state; in check_func_call() local
3769 if (state->curframe + 1 >= MAX_CALL_FRAMES) { in check_func_call()
3771 state->curframe + 2); in check_func_call()
3783 caller = state->frame[state->curframe]; in check_func_call()
3784 if (state->frame[state->curframe + 1]) { in check_func_call()
3786 state->curframe + 1); in check_func_call()
3793 state->frame[state->curframe + 1] = callee; in check_func_call()
3802 state->curframe + 1 /* frameno within this callchain */, in check_func_call()
3823 state->curframe++; in check_func_call()
3839 struct bpf_verifier_state *state = env->cur_state; in prepare_func_exit() local
3844 callee = state->frame[state->curframe]; in prepare_func_exit()
3857 state->curframe--; in prepare_func_exit()
3858 caller = state->frame[state->curframe]; in prepare_func_exit()
3876 state->frame[state->curframe + 1] = NULL; in prepare_func_exit()
3942 struct bpf_func_state *state = cur_func(env); in check_reference_leak() local
3945 for (i = 0; i < state->acquired_refs; i++) { in check_reference_leak()
3947 state->refs[i].id, state->refs[i].insn_idx); in check_reference_leak()
3949 return state->acquired_refs ? -EINVAL : 0; in check_reference_leak()
4343 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in adjust_ptr_min_max_vals() local
4344 struct bpf_reg_state *regs = state->regs, *dst_reg; in adjust_ptr_min_max_vals()
4867 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in adjust_reg_min_max_vals() local
4868 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; in adjust_reg_min_max_vals()
4926 print_verifier_state(env, state); in adjust_reg_min_max_vals()
4931 print_verifier_state(env, state); in adjust_reg_min_max_vals()
5100 static void __find_good_pkt_pointers(struct bpf_func_state *state, in __find_good_pkt_pointers() argument
5108 reg = &state->regs[i]; in __find_good_pkt_pointers()
5114 bpf_for_each_spilled_reg(i, state, reg) { in __find_good_pkt_pointers()
5627 static void mark_ptr_or_null_reg(struct bpf_func_state *state, in mark_ptr_or_null_reg() argument
5680 static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id, in __mark_ptr_or_null_regs() argument
5687 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null); in __mark_ptr_or_null_regs()
5689 bpf_for_each_spilled_reg(i, state, reg) { in __mark_ptr_or_null_regs()
5692 mark_ptr_or_null_reg(state, reg, id, is_null); in __mark_ptr_or_null_regs()
5702 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in mark_ptr_or_null_regs() local
5703 struct bpf_reg_state *regs = state->regs; in mark_ptr_or_null_regs()
5713 WARN_ON_ONCE(release_reference_state(state, id)); in mark_ptr_or_null_regs()
6241 struct bpf_func_state *state = cur->frame[cur->curframe]; in explored_state() local
6243 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; in explored_state()
6830 if (sl->state.branches) in clean_live_states()
6832 if (sl->state.insn_idx != insn || in clean_live_states()
6833 sl->state.curframe != cur->curframe) in clean_live_states()
6836 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) in clean_live_states()
6838 clean_verifier_state(env, &sl->state); in clean_live_states()
7159 struct bpf_func_state *state, *parent; in propagate_liveness() local
7171 state = vstate->frame[frame]; in propagate_liveness()
7173 state_reg = state->regs; in propagate_liveness()
7185 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && in propagate_liveness()
7188 state_reg = &state->stack[i].spilled_ptr; in propagate_liveness()
7205 struct bpf_func_state *state; in propagate_precision() local
7208 state = old->frame[old->curframe]; in propagate_precision()
7209 state_reg = state->regs; in propagate_precision()
7221 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in propagate_precision()
7222 if (state->stack[i].slot_type[0] != STACK_SPILL) in propagate_precision()
7224 state_reg = &state->stack[i].spilled_ptr; in propagate_precision()
7291 if (sl->state.insn_idx != insn_idx) in is_state_visited()
7293 if (sl->state.branches) { in is_state_visited()
7294 if (states_maybe_looping(&sl->state, cur) && in is_state_visited()
7295 states_equal(env, &sl->state, cur)) { in is_state_visited()
7317 if (states_equal(env, &sl->state, cur)) { in is_state_visited()
7329 err = propagate_liveness(env, &sl->state, cur); in is_state_visited()
7337 err = err ? : propagate_precision(env, &sl->state); in is_state_visited()
7361 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) { in is_state_visited()
7362 u32 br = sl->state.branches; in is_state_visited()
7367 free_verifier_state(&sl->state, false); in is_state_visited()
7413 new = &new_sl->state; in is_state_visited()
7501 struct bpf_verifier_state *state; in do_check() local
7510 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); in do_check()
7511 if (!state) in do_check()
7513 state->curframe = 0; in do_check()
7514 state->speculative = false; in do_check()
7515 state->branches = 1; in do_check()
7516 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); in do_check()
7517 if (!state->frame[0]) { in do_check()
7518 kfree(state); in do_check()
7521 env->cur_state = state; in do_check()
7522 init_func_state(env, state->frame[0], in do_check()
7581 print_verifier_state(env, state->frame[state->curframe]); in do_check()
7777 if (state->curframe) { in do_check()
9197 free_verifier_state(&sl->state, false); in free_states()
9210 free_verifier_state(&sl->state, false); in free_states()