Lines Matching refs:insn_idx

173 	int insn_idx;  member
191 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
793 enum bpf_arg_type arg_type, int insn_idx) in mark_stack_slots_dynptr() argument
833 id = acquire_reference_state(env, insn_idx); in mark_stack_slots_dynptr()
1196 verbose(env, "%d:", env->insn_idx); in print_insn_state()
1315 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) in acquire_reference_state() argument
1326 state->refs[new_ofs].insn_idx = insn_idx; in acquire_reference_state()
1459 int *insn_idx, bool pop_log) in pop_stack() argument
1475 if (insn_idx) in pop_stack()
1476 *insn_idx = head->insn_idx; in pop_stack()
1488 int insn_idx, int prev_insn_idx, in push_stack() argument
1499 elem->insn_idx = insn_idx; in push_stack()
2019 int insn_idx, int prev_insn_idx, in push_async_cb() argument
2029 elem->insn_idx = insn_idx; in push_async_cb()
2730 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; in check_reg_arg()
2765 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; in check_reg_arg()
2777 static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx) in is_jmp_point() argument
2779 return env->insn_aux_data[insn_idx].jmp_point; in is_jmp_point()
2790 if (!is_jmp_point(env, env->insn_idx)) in push_jmp_history()
2798 p[cnt - 1].idx = env->insn_idx; in push_jmp_history()
3189 int last_idx = env->insn_idx; in __mark_chain_precision()
3488 int insn_idx) in check_stack_write_fixed_off() argument
3492 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_stack_write_fixed_off()
3525 env->insn_aux_data[insn_idx].sanitize_stack_spill = true; in check_stack_write_fixed_off()
3557 verbose_linfo(env, insn_idx, "; "); in check_stack_write_fixed_off()
3628 int value_regno, int insn_idx) in check_stack_write_var_off() argument
3634 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_stack_write_var_off()
3687 insn_idx, i); in check_stack_write_var_off()
3710 insn_idx, i); in check_stack_write_var_off()
3805 verbose_linfo(env, env->insn_idx, "; "); in check_stack_read_fixed_off()
4000 int value_regno, int insn_idx) in check_stack_write() argument
4009 value_regno, insn_idx); in check_stack_write()
4016 value_regno, insn_idx); in check_stack_write()
4246 int value_regno, int insn_idx, in check_map_kptr_access() argument
4249 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_map_kptr_access()
4448 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, in check_ctx_access() argument
4472 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; in check_ctx_access()
4496 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, in check_sock_access() argument
4530 env->insn_aux_data[insn_idx].ctx_field_size = in check_sock_access()
5299 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, in check_mem_access() argument
5350 err = check_map_kptr_access(env, regno, value_regno, insn_idx, kptr_field); in check_mem_access()
5412 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf, in check_mem_access()
5415 verbose_linfo(env, insn_idx, "; "); in check_mem_access()
5458 value_regno, insn_idx); in check_mem_access()
5490 err = check_sock_access(env, insn_idx, regno, off, size, t); in check_mem_access()
5538 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) in check_atomic() argument
5624 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
5627 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
5634 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
5876 return check_mem_access(env, env->insn_idx, regno, offset, BPF_B, in check_helper_mem_access()
7448 int insn_idx);
7452 struct bpf_func_state *callee, int insn_idx);
7457 int *insn_idx, int subprog, in __check_func_call() argument
7532 *insn_idx, subprog); in __check_func_call()
7539 err = set_callee_state_cb(env, caller, callee, *insn_idx); in __check_func_call()
7561 *insn_idx /* callsite */, in __check_func_call()
7570 err = set_callee_state_cb(env, caller, callee, *insn_idx); in __check_func_call()
7580 *insn_idx = env->subprog_info[subprog].start - 1; in __check_func_call()
7625 struct bpf_func_state *callee, int insn_idx) in set_callee_state() argument
7638 int *insn_idx) in check_func_call() argument
7642 target_insn = *insn_idx + insn->imm + 1; in check_func_call()
7650 return __check_func_call(env, insn, insn_idx, subprog, set_callee_state); in check_func_call()
7656 int insn_idx) in set_map_elem_callback_state() argument
7658 struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx]; in set_map_elem_callback_state()
7686 int insn_idx) in set_loop_callback_state() argument
7708 int insn_idx) in set_timer_callback_state() argument
7738 int insn_idx) in set_find_vma_callback_state() argument
7766 int insn_idx) in set_user_ringbuf_callback_state() argument
7789 int insn_idx) in set_rbtree_add_callback_state() argument
7843 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) in prepare_func_exit() argument
7893 *insn_idx = callee->callsite + 1; in prepare_func_exit()
7897 verbose(env, "to caller at %d:\n", *insn_idx); in prepare_func_exit()
7929 int func_id, int insn_idx) in record_func_map() argument
7931 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; in record_func_map()
7975 int func_id, int insn_idx) in record_func_key() argument
7977 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; in record_func_key()
8023 state->refs[i].id, state->refs[i].insn_idx); in check_reference_leak()
8090 return &env->insn_aux_data[env->insn_idx]; in cur_aux()
8132 int insn_idx = *insn_idx_p; in check_helper_call() local
8194 env->insn_aux_data[insn_idx].storage_get_func_atomic = true; in check_helper_call()
8205 err = record_func_map(env, &meta, func_id, insn_idx); in check_helper_call()
8209 err = record_func_key(env, &meta, func_id, insn_idx); in check_helper_call()
8217 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, in check_helper_call()
8236 err = check_mem_access(env, insn_idx, meta.uninit_dynptr_regno, in check_helper_call()
8244 insn_idx); in check_helper_call()
8524 int id = acquire_reference_state(env, insn_idx); in check_helper_call()
8586 DEF_NOT_SUBREG : env->insn_idx + 1; in mark_btf_func_reg_size()
9784 int err, insn_idx = *insn_idx_p; in check_kfunc_call() local
9960 env->insn_aux_data[insn_idx].obj_new_size = ret_t->size; in check_kfunc_call()
9961 env->insn_aux_data[insn_idx].kptr_struct_meta = in check_kfunc_call()
9964 env->insn_aux_data[insn_idx].kptr_struct_meta = in check_kfunc_call()
10035 int id = acquire_reference_state(env, insn_idx); in check_kfunc_call()
10337 ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, in sanitize_ptr_alu()
10338 env->insn_idx); in sanitize_ptr_alu()
10354 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; in sanitize_mark_insn_seen()
11606 dst_reg->subreg_def = env->insn_idx + 1; in check_alu_op()
12405 struct bpf_insn *insn, int *insn_idx) in check_cond_jmp_op() argument
12494 !sanitize_speculative_path(env, insn, *insn_idx + 1, in check_cond_jmp_op()
12495 *insn_idx)) in check_cond_jmp_op()
12497 *insn_idx += insn->off; in check_cond_jmp_op()
12506 *insn_idx + insn->off + 1, in check_cond_jmp_op()
12507 *insn_idx)) in check_cond_jmp_op()
12512 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, in check_cond_jmp_op()
12691 env->insn_idx + insn->imm + 1); in check_ld_imm()
12832 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; in check_ld_abs()
13058 static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx) in is_prune_point() argument
13060 return env->insn_aux_data[insn_idx].prune_point; in is_prune_point()
13809 if (sl->state.insn_idx != insn || in clean_live_states()
14250 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) in is_state_visited() argument
14270 pprev = explored_state(env, insn_idx); in is_state_visited()
14273 clean_live_states(env, insn_idx, cur); in is_state_visited()
14277 if (sl->state.insn_idx != insn_idx) in is_state_visited()
14298 verbose_linfo(env, insn_idx, "; "); in is_state_visited()
14299 verbose(env, "infinite loop detected at insn %d\n", insn_idx); in is_state_visited()
14426 new->insn_idx = insn_idx; in is_state_visited()
14428 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); in is_state_visited()
14431 cur->first_insn_idx = insn_idx; in is_state_visited()
14433 new_sl->next = *explored_state(env, insn_idx); in is_state_visited()
14434 *explored_state(env, insn_idx) = new_sl; in is_state_visited()
14519 if (env->insn_idx >= insn_cnt) { in do_check()
14521 env->insn_idx, insn_cnt); in do_check()
14525 insn = &insns[env->insn_idx]; in do_check()
14537 if (is_prune_point(env, env->insn_idx)) { in do_check()
14538 err = is_state_visited(env, env->insn_idx); in do_check()
14546 env->prev_insn_idx, env->insn_idx, in do_check()
14550 verbose(env, "%d: safe\n", env->insn_idx); in do_check()
14556 if (is_jmp_point(env, env->insn_idx)) { in do_check()
14570 env->prev_insn_idx, env->insn_idx, in do_check()
14587 verbose_linfo(env, env->insn_idx, "; "); in do_check()
14589 verbose(env, "%d: ", env->insn_idx); in do_check()
14596 err = bpf_prog_offload_verify_insn(env, env->insn_idx, in do_check()
14604 prev_insn_idx = env->insn_idx; in do_check()
14630 err = check_mem_access(env, env->insn_idx, insn->src_reg, in do_check()
14636 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; in do_check()
14661 err = check_atomic(env, env->insn_idx, insn); in do_check()
14664 env->insn_idx++; in do_check()
14685 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in do_check()
14691 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; in do_check()
14719 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in do_check()
14752 err = check_func_call(env, insn, &env->insn_idx); in do_check()
14754 err = check_kfunc_call(env, insn, &env->insn_idx); in do_check()
14756 err = check_helper_call(env, insn, &env->insn_idx); in do_check()
14769 env->insn_idx += insn->off + 1; in do_check()
14805 err = prepare_func_exit(env, &env->insn_idx); in do_check()
14819 &env->insn_idx, pop_log); in do_check()
14829 err = check_cond_jmp_op(env, insn, &env->insn_idx); in do_check()
14846 env->insn_idx++; in do_check()
14857 env->insn_idx++; in do_check()
15398 if (desc->insn_idx <= off) in adjust_poke_descs()
15400 desc->insn_idx += len - 1; in adjust_poke_descs()
16072 if (poke->insn_idx < subprog_end && in jit_subprogs()
16073 poke->insn_idx >= subprog_start) in jit_subprogs()
16267 struct bpf_insn *insn_buf, int insn_idx, int *cnt) in fixup_kfunc_call() argument
16303 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; in fixup_kfunc_call()
16305 u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size; in fixup_kfunc_call()
16313 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; in fixup_kfunc_call()
16517 .insn_idx = i + delta, in do_misc_fixups()
17133 env->insn_idx = env->subprog_info[i].start; in do_check_subprogs()
17134 WARN_ON_ONCE(env->insn_idx == 0); in do_check_subprogs()
17151 env->insn_idx = 0; in do_check_main()