Lines Matching refs:callee

7447 				   struct bpf_func_state *callee,
7452 struct bpf_func_state *callee, int insn_idx);
7462 struct bpf_func_state *caller, *callee; in __check_func_call() local
7535 callee = async_cb->frame[0]; in __check_func_call()
7536 callee->async_entry_cnt = caller->async_entry_cnt + 1; in __check_func_call()
7539 err = set_callee_state_cb(env, caller, callee, *insn_idx); in __check_func_call()
7550 callee = kzalloc(sizeof(*callee), GFP_KERNEL); in __check_func_call()
7551 if (!callee) in __check_func_call()
7553 state->frame[state->curframe + 1] = callee; in __check_func_call()
7559 init_func_state(env, callee, in __check_func_call()
7566 err = copy_reference_state(callee, caller); in __check_func_call()
7570 err = set_callee_state_cb(env, caller, callee, *insn_idx); in __check_func_call()
7586 print_verifier_state(env, callee, true); in __check_func_call()
7591 free_func_state(callee); in __check_func_call()
7598 struct bpf_func_state *callee) in map_set_for_each_callback_args() argument
7605 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; in map_set_for_each_callback_args()
7607 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; in map_set_for_each_callback_args()
7608 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in map_set_for_each_callback_args()
7609 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr; in map_set_for_each_callback_args()
7611 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; in map_set_for_each_callback_args()
7612 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); in map_set_for_each_callback_args()
7613 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr; in map_set_for_each_callback_args()
7616 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3]; in map_set_for_each_callback_args()
7619 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in map_set_for_each_callback_args()
7625 struct bpf_func_state *callee, int insn_idx) in set_callee_state() argument
7633 callee->regs[i] = caller->regs[i]; in set_callee_state()
7655 struct bpf_func_state *callee, in set_map_elem_callback_state() argument
7674 err = map->ops->map_set_for_each_callback_args(env, caller, callee); in set_map_elem_callback_state()
7678 callee->in_callback_fn = true; in set_map_elem_callback_state()
7679 callee->callback_ret_range = tnum_range(0, 1); in set_map_elem_callback_state()
7685 struct bpf_func_state *callee, in set_loop_callback_state() argument
7692 callee->regs[BPF_REG_1].type = SCALAR_VALUE; in set_loop_callback_state()
7693 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; in set_loop_callback_state()
7696 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_loop_callback_state()
7697 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_loop_callback_state()
7698 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_loop_callback_state()
7700 callee->in_callback_fn = true; in set_loop_callback_state()
7701 callee->callback_ret_range = tnum_range(0, 1); in set_loop_callback_state()
7707 struct bpf_func_state *callee, in set_timer_callback_state() argument
7715 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; in set_timer_callback_state()
7716 __mark_reg_known_zero(&callee->regs[BPF_REG_1]); in set_timer_callback_state()
7717 callee->regs[BPF_REG_1].map_ptr = map_ptr; in set_timer_callback_state()
7719 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; in set_timer_callback_state()
7720 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in set_timer_callback_state()
7721 callee->regs[BPF_REG_2].map_ptr = map_ptr; in set_timer_callback_state()
7723 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; in set_timer_callback_state()
7724 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); in set_timer_callback_state()
7725 callee->regs[BPF_REG_3].map_ptr = map_ptr; in set_timer_callback_state()
7728 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_timer_callback_state()
7729 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_timer_callback_state()
7730 callee->in_async_callback_fn = true; in set_timer_callback_state()
7731 callee->callback_ret_range = tnum_range(0, 1); in set_timer_callback_state()
7737 struct bpf_func_state *callee, in set_find_vma_callback_state() argument
7745 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; in set_find_vma_callback_state()
7747 callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID; in set_find_vma_callback_state()
7748 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in set_find_vma_callback_state()
7749 callee->regs[BPF_REG_2].btf = btf_vmlinux; in set_find_vma_callback_state()
7750 callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA], in set_find_vma_callback_state()
7753 callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4]; in set_find_vma_callback_state()
7756 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_find_vma_callback_state()
7757 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_find_vma_callback_state()
7758 callee->in_callback_fn = true; in set_find_vma_callback_state()
7759 callee->callback_ret_range = tnum_range(0, 1); in set_find_vma_callback_state()
7765 struct bpf_func_state *callee, in set_user_ringbuf_callback_state() argument
7772 __mark_reg_not_init(env, &callee->regs[BPF_REG_0]); in set_user_ringbuf_callback_state()
7773 mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL); in set_user_ringbuf_callback_state()
7774 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; in set_user_ringbuf_callback_state()
7777 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_user_ringbuf_callback_state()
7778 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_user_ringbuf_callback_state()
7779 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_user_ringbuf_callback_state()
7781 callee->in_callback_fn = true; in set_user_ringbuf_callback_state()
7782 callee->callback_ret_range = tnum_range(0, 1); in set_user_ringbuf_callback_state()
7788 struct bpf_func_state *callee, in set_rbtree_add_callback_state() argument
7805 mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root); in set_rbtree_add_callback_state()
7806 ref_set_non_owning(env, &callee->regs[BPF_REG_1]); in set_rbtree_add_callback_state()
7807 mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root); in set_rbtree_add_callback_state()
7808 ref_set_non_owning(env, &callee->regs[BPF_REG_2]); in set_rbtree_add_callback_state()
7810 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_rbtree_add_callback_state()
7811 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_rbtree_add_callback_state()
7812 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_rbtree_add_callback_state()
7813 callee->in_callback_fn = true; in set_rbtree_add_callback_state()
7814 callee->callback_ret_range = tnum_range(0, 1); in set_rbtree_add_callback_state()
7828 struct bpf_func_state *callee; in in_rbtree_lock_required_cb() local
7834 callee = state->frame[state->curframe]; in in_rbtree_lock_required_cb()
7836 if (!callee->in_callback_fn) in in_rbtree_lock_required_cb()
7839 kfunc_btf_id = insn[callee->callsite].imm; in in_rbtree_lock_required_cb()
7846 struct bpf_func_state *caller, *callee; in prepare_func_exit() local
7850 callee = state->frame[state->curframe]; in prepare_func_exit()
7851 r0 = &callee->regs[BPF_REG_0]; in prepare_func_exit()
7864 if (callee->in_callback_fn) { in prepare_func_exit()
7866 struct tnum range = callee->callback_ret_range; in prepare_func_exit()
7886 if (!callee->in_callback_fn) { in prepare_func_exit()
7888 err = copy_reference_state(caller, callee); in prepare_func_exit()
7893 *insn_idx = callee->callsite + 1; in prepare_func_exit()
7896 print_verifier_state(env, callee, true); in prepare_func_exit()
7901 free_func_state(callee); in prepare_func_exit()