Lines Matching refs:prog

435 	struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux;  in subprog_is_global()
444 if (!env->prog->aux->func_info) in subprog_name()
447 info = &env->prog->aux->func_info[subprog]; in subprog_name()
448 return btf_type_name(env->prog->aux->btf, info->type_id); in subprog_name()
557 return is_may_goto_insn(&env->prog->insnsi[insn_idx]); in is_may_goto_insn_at()
1783 return env->prog->len; in state_htab_size()
2966 if (off >= env->prog->len || off < 0 || env->subprog_cnt == 0) in find_containing_subprog()
2994 int insn_cnt = env->prog->len; in add_subprog()
3017 struct bpf_prog_aux *aux = env->prog->aux; in bpf_find_exception_callback_insn_off()
3127 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset) in find_kfunc_desc() argument
3135 tab = prog->aux->kfunc_tab; in find_kfunc_desc()
3140 int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id, in bpf_get_kfunc_addr() argument
3145 desc = find_kfunc_desc(prog, func_id, btf_fd_idx); in bpf_get_kfunc_addr()
3163 tab = env->prog->aux->kfunc_btf_tab; in __find_kfunc_desc_btf()
3258 prog_aux = env->prog->aux; in add_kfunc_call()
3267 if (!env->prog->jit_requested) { in add_kfunc_call()
3277 if (!env->prog->gpl_compatible) { in add_kfunc_call()
3310 if (find_kfunc_desc(env->prog, func_id, offset)) in add_kfunc_call()
3384 static void sort_kfunc_descs_by_imm_off(struct bpf_prog *prog) in sort_kfunc_descs_by_imm_off() argument
3388 tab = prog->aux->kfunc_tab; in sort_kfunc_descs_by_imm_off()
3396 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) in bpf_prog_has_kfunc_call() argument
3398 return !!prog->aux->kfunc_tab; in bpf_prog_has_kfunc_call()
3402 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, in bpf_jit_find_kfunc_model() argument
3412 tab = prog->aux->kfunc_tab; in bpf_jit_find_kfunc_model()
3437 int i, ret, insn_cnt = env->prog->len, ex_cb_insn; in add_subprog_and_kfunc()
3438 struct bpf_insn *insn = env->prog->insnsi; in add_subprog_and_kfunc()
3510 struct bpf_insn *insn = env->prog->insnsi; in check_subprogs()
3511 int insn_cnt = env->prog->len; in check_subprogs()
3803 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; in __check_reg_arg()
4267 struct bpf_insn *insn = env->prog->insnsi + idx; in backtrack_insn()
4500 bpf_pseudo_call(&env->prog->insnsi[subseq_idx - 1]) && in backtrack_insn()
4865 if (i >= env->prog->len) { in __mark_chain_precision()
5117 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_stack_write_fixed_off()
5264 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_stack_write_var_off()
5913 return env->prog->sleepable || in in_sleepable()
6015 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_map_kptr_access()
6148 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in may_access_direct_pkt_data()
6221 env->prog->aux->max_pkt_offset = in check_packet_access()
6222 max_t(u32, env->prog->aux->max_pkt_offset, in check_packet_access()
6233 env->ops->is_valid_access(off, size, t, env->prog, info)) { in check_ctx_access()
6252 if (env->prog->aux->max_ctx_offset < off + size) in check_ctx_access()
6253 env->prog->aux->max_ctx_offset = off + size; in check_ctx_access()
6536 static enum priv_stack_mode bpf_enable_priv_stack(struct bpf_prog *prog) in bpf_enable_priv_stack() argument
6545 switch (prog->type) { in bpf_enable_priv_stack()
6554 if (prog->aux->priv_stack_requested || bpf_prog_check_recur(prog)) in bpf_enable_priv_stack()
6566 if (env->prog->jit_requested) in round_up_stack_depth()
6585 struct bpf_insn *insn = env->prog->insnsi; in check_max_stack_depth_subprog()
6727 env->prog->aux->tail_call_reachable = true; in check_max_stack_depth_subprog()
6757 priv_stack_mode = bpf_enable_priv_stack(env->prog); in check_max_stack_depth()
6778 env->prog->aux->jits_use_priv_stack = true; in check_max_stack_depth()
6833 if (off + size > env->prog->aux->max_tp_access) in check_tp_buffer_access()
6834 env->prog->aux->max_tp_access = off + size; in check_tp_buffer_access()
7231 if (!env->prog->gpl_compatible && btf_is_kernel(reg->btf)) { in check_ptr_to_btf_access()
7518 static bool get_func_retval_range(struct bpf_prog *prog, in get_func_retval_range() argument
7521 if (prog->type == BPF_PROG_TYPE_LSM && in get_func_retval_range()
7522 prog->expected_attach_type == BPF_LSM_MAC && in get_func_retval_range()
7523 !bpf_lsm_get_retval_range(prog, range)) { in get_func_retval_range()
7665 if (info.is_retval && get_func_retval_range(env->prog, &range)) { in check_mem_access()
7760 max_access = &env->prog->aux->max_rdonly_access; in check_mem_access()
7762 max_access = &env->prog->aux->max_rdwr_access; in check_mem_access()
8217 max_access = &env->prog->aux->max_rdonly_access; in check_helper_mem_access()
8219 max_access = &env->prog->aux->max_rdwr_access; in check_helper_mem_access()
9917 enum bpf_attach_type eatype = env->prog->expected_attach_type; in may_update_sockmap()
9918 enum bpf_prog_type type = resolve_prog_type(env->prog); in may_update_sockmap()
9955 return env->prog->jit_requested && in allow_tail_call_in_subprogs()
10557 struct bpf_prog *prog = env->prog; in btf_check_subprog_call() local
10558 struct btf *btf = prog->aux->btf; in btf_check_subprog_call()
10562 if (!prog->aux->func_info) in btf_check_subprog_call()
10565 btf_id = prog->aux->func_info[subprog].type_id; in btf_check_subprog_call()
10569 if (prog->aux->func_info_aux[subprog].unreliable) in btf_check_subprog_call()
10578 prog->aux->func_info_aux[subprog].unreliable = true; in btf_check_subprog_call()
10941 struct bpf_insn *insn = env->prog->insnsi; in in_rbtree_lock_required_cb()
11181 enum bpf_prog_type type = resolve_prog_type(env->prog); in check_reference_leak()
11278 enum bpf_prog_type type = resolve_prog_type(env->prog); in check_get_func_ip()
11282 if (!bpf_prog_has_trampoline(env->prog)) { in check_get_func_ip()
11356 *ptr = env->ops->get_func_proto(func_id, env->prog); in get_helper_proto()
11363 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_helper_call()
11389 if (!env->prog->gpl_compatible && fn->gpl_only) { in check_helper_call()
11394 if (fn->allowed && !fn->allowed(env->prog)) { in check_helper_call()
11585 env->prog->expected_attach_type == BPF_LSM_CGROUP) { in check_helper_call()
11586 if (!env->prog->aux->attach_func_proto->type) { in check_helper_call()
11857 !env->prog->has_callchain_buf) { in check_helper_call()
11872 env->prog->has_callchain_buf = true; in check_helper_call()
11876 env->prog->call_get_stack = true; in check_helper_call()
11881 env->prog->call_get_func_ip = true; in check_helper_call()
12372 if (btf_is_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno)) in get_kfunc_ptr_arg_type()
12991 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_css_task_iter_allowlist()
12997 if (env->prog->expected_attach_type == BPF_TRACE_ITER) in check_css_task_iter_allowlist()
13211 ret = get_kern_ctx_btf_id(&env->log, resolve_prog_type(env->prog)); in check_kfunc_args()
13549 kfunc_flags = btf_kfunc_id_set_contains(desc_btf, func_id, env->prog); in fetch_kfunc_meta()
13593 ret_btf = env->prog->aux->btf; in check_special_kfunc()
15601 if (!env->prog->aux->arena) { in check_alu_op()
16918 struct bpf_prog_aux *aux = env->prog->aux; in check_ld_imm()
16994 if (!may_access_skb(resolve_prog_type(env->prog))) { in check_ld_abs()
17061 const struct bpf_prog *prog = env->prog; in check_return_code() local
17064 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_return_code()
17075 if (prog->expected_attach_type == BPF_LSM_CGROUP) in check_return_code()
17078 if (!prog->aux->attach_func_proto->type) in check_return_code()
17082 if (!prog->aux->attach_func_proto->type) in check_return_code()
17093 ret_type = btf_type_resolve_ptr(prog->aux->attach_btf, in check_return_code()
17094 prog->aux->attach_func_proto->type, in check_return_code()
17137 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || in check_return_code()
17138 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || in check_return_code()
17139 env->prog->expected_attach_type == BPF_CGROUP_UNIX_RECVMSG || in check_return_code()
17140 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || in check_return_code()
17141 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || in check_return_code()
17142 env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETPEERNAME || in check_return_code()
17143 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || in check_return_code()
17144 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME || in check_return_code()
17145 env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETSOCKNAME) in check_return_code()
17147 if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND || in check_return_code()
17148 env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND) in check_return_code()
17152 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { in check_return_code()
17164 if (!env->prog->aux->attach_btf_id) in check_return_code()
17169 switch (env->prog->expected_attach_type) { in check_return_code()
17184 switch (env->prog->expected_attach_type) { in check_return_code()
17198 if (env->prog->expected_attach_type != BPF_LSM_CGROUP) { in check_return_code()
17200 if (!get_func_retval_range(env->prog, &range)) in check_return_code()
17206 } else if (!env->prog->aux->attach_func_proto->type) { in check_return_code()
17244 prog->expected_attach_type == BPF_LSM_CGROUP && in check_return_code()
17246 !prog->aux->attach_func_proto->type) in check_return_code()
17253 env->prog->enforce_expected_attach_type = 1; in check_return_code()
17380 if (w < 0 || w >= env->prog->len) { in push_insn()
17396 if (env->cfg.cur_stack >= env->prog->len) in push_insn()
17454 return env->prog->jit_requested && bpf_jit_supports_percpu_insn(); in verifier_inlines_helper_call()
17589 struct bpf_insn *insns = env->prog->insnsi, *stx, *ldx; in mark_fastcall_pattern_for_call()
17590 struct bpf_insn *call = &env->prog->insnsi[insn_idx]; in mark_fastcall_pattern_for_call()
17620 if (insn_idx - i < 0 || insn_idx + i >= env->prog->len) in mark_fastcall_pattern_for_call()
17683 insn = env->prog->insnsi + i; in mark_fastcall_patterns()
17691 insn = env->prog->insnsi + i; in mark_fastcall_patterns()
17707 struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t]; in visit_insn()
17827 int insn_cnt = env->prog->len; in check_cfg()
17893 struct bpf_insn *insn = &env->prog->insnsi[i]; in check_cfg()
17910 env->prog->aux->changes_pkt_data = env->subprog_info[0].changes_pkt_data; in check_cfg()
17911 env->prog->aux->might_sleep = env->subprog_info[0].might_sleep; in check_cfg()
17949 struct bpf_prog *prog; in check_btf_func_early() local
17970 prog = env->prog; in check_btf_func_early()
17971 btf = prog->aux->btf; in check_btf_func_early()
18034 prog->aux->func_info = krecord; in check_btf_func_early()
18035 prog->aux->func_info_cnt = nfuncs; in check_btf_func_early()
18051 struct bpf_prog *prog; in check_btf_func() local
18070 prog = env->prog; in check_btf_func()
18071 btf = prog->aux->btf; in check_btf_func()
18075 krecord = prog->aux->func_info; in check_btf_func()
18110 prog->aux->func_info_aux = info_aux; in check_btf_func()
18120 struct bpf_prog_aux *aux = env->prog->aux; in adjust_btf_func()
18141 struct bpf_prog *prog; in check_btf_line() local
18166 prog = env->prog; in check_btf_line()
18167 btf = prog->aux->btf; in check_btf_line()
18204 linfo[i].insn_off >= prog->len) { in check_btf_line()
18207 prog->len); in check_btf_line()
18212 if (!prog->insnsi[linfo[i].insn_off].code) { in check_btf_line()
18249 prog->aux->linfo = linfo; in check_btf_line()
18250 prog->aux->nr_linfo = nr_linfo; in check_btf_line()
18268 struct bpf_prog *prog = env->prog; in check_core_relo() local
18269 const struct btf *btf = prog->aux->btf; in check_core_relo()
18315 if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) { in check_core_relo()
18317 i, core_relo.insn_off, prog->len); in check_core_relo()
18323 &prog->insnsi[core_relo.insn_off / 8]); in check_core_relo()
18351 env->prog->aux->btf = btf; in check_btf_info_early()
19832 struct bpf_insn *insn = &env->prog->insnsi[env->insn_idx]; in do_check_insn()
19992 struct bpf_insn *insns = env->prog->insnsi; in do_check()
19993 int insn_cnt = env->prog->len; in do_check()
20077 if (bpf_prog_is_offloaded(env->prog->aux)) { in do_check()
20358 struct bpf_prog *prog) in check_map_prog_compatibility() argument
20361 enum bpf_prog_type prog_type = resolve_prog_type(prog); in check_map_prog_compatibility()
20397 if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) && in check_map_prog_compatibility()
20398 !bpf_offload_prog_map_match(prog, map)) { in check_map_prog_compatibility()
20408 if (prog->sleepable) in check_map_prog_compatibility()
20435 bpf_cgroup_storage_assign(env->prog->aux, map)) { in check_map_prog_compatibility()
20441 if (env->prog->aux->arena) { in check_map_prog_compatibility()
20449 if (!env->prog->jit_requested) { in check_map_prog_compatibility()
20457 env->prog->aux->arena = (void *)map; in check_map_prog_compatibility()
20458 if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) { in check_map_prog_compatibility()
20482 err = check_map_prog_compatibility(env, map, env->prog); in __add_used_map()
20486 if (env->prog->sleepable) in __add_used_map()
20528 struct bpf_insn *insn = env->prog->insnsi; in resolve_pseudo_ldimm64()
20529 int insn_cnt = env->prog->len; in resolve_pseudo_ldimm64()
20532 err = bpf_prog_calc_tag(env->prog); in resolve_pseudo_ldimm64()
20671 __bpf_free_used_maps(env->prog->aux, env->used_maps, in release_maps()
20684 struct bpf_insn *insn = env->prog->insnsi; in convert_pseudo_ld_imm64()
20685 int insn_cnt = env->prog->len; in convert_pseudo_ld_imm64()
20747 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len) in adjust_poke_descs() argument
20749 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; in adjust_poke_descs()
20750 int i, sz = prog->aux->size_poke_tab; in adjust_poke_descs()
20768 new_data = vzalloc(array_size(env->prog->len + len - 1, in bpf_patch_insn_data()
20774 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); in bpf_patch_insn_data()
20793 static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta) in adjust_jmp_off() argument
20795 struct bpf_insn *insn = prog->insnsi; in adjust_jmp_off()
20796 u32 insn_cnt = prog->len, i; in adjust_jmp_off()
20847 struct bpf_prog_aux *aux = env->prog->aux; in adjust_subprog_starts_after_remove()
20886 struct bpf_prog *prog = env->prog; in bpf_adj_linfo_after_remove() local
20890 nr_linfo = prog->aux->nr_linfo; in bpf_adj_linfo_after_remove()
20894 linfo = prog->aux->linfo; in bpf_adj_linfo_after_remove()
20913 if (prog->len != off && l_cnt && in bpf_adj_linfo_after_remove()
20924 prog->aux->nr_linfo -= l_cnt; in bpf_adj_linfo_after_remove()
20925 nr_linfo = prog->aux->nr_linfo; in bpf_adj_linfo_after_remove()
20950 unsigned int orig_prog_len = env->prog->len; in verifier_remove_insns()
20953 if (bpf_prog_is_offloaded(env->prog->aux)) in verifier_remove_insns()
20956 err = bpf_remove_insns(env->prog, off, cnt); in verifier_remove_insns()
20989 struct bpf_insn *insn = env->prog->insnsi; in sanitize_dead_code()
20990 const int insn_cnt = env->prog->len; in sanitize_dead_code()
21019 struct bpf_insn *insn = env->prog->insnsi; in opt_hard_wire_dead_code_branches()
21020 const int insn_cnt = env->prog->len; in opt_hard_wire_dead_code_branches()
21034 if (bpf_prog_is_offloaded(env->prog->aux)) in opt_hard_wire_dead_code_branches()
21044 int insn_cnt = env->prog->len; in opt_remove_dead_code()
21059 insn_cnt = env->prog->len; in opt_remove_dead_code()
21070 struct bpf_insn *insn = env->prog->insnsi; in opt_remove_nops()
21071 int insn_cnt = env->prog->len; in opt_remove_nops()
21101 int i, patch_len, delta = 0, len = env->prog->len; in opt_subreg_zext_lo32_rnd_hi32()
21102 struct bpf_insn *insns = env->prog->insnsi; in opt_subreg_zext_lo32_rnd_hi32()
21185 env->prog = new_prog; in opt_subreg_zext_lo32_rnd_hi32()
21204 const int insn_cnt = env->prog->len; in convert_ctx_accesses()
21215 epilogue_cnt = ops->gen_epilogue(epilogue_buf, env->prog, in convert_ctx_accesses()
21226 insn_buf[cnt++] = env->prog->insnsi[0]; in convert_ctx_accesses()
21230 env->prog = new_prog; in convert_ctx_accesses()
21245 env->prog); in convert_ctx_accesses()
21254 env->prog = new_prog; in convert_ctx_accesses()
21264 WARN_ON(adjust_jmp_off(env->prog, 0, delta)); in convert_ctx_accesses()
21266 if (bpf_prog_is_offloaded(env->prog->aux)) in convert_ctx_accesses()
21269 insn = env->prog->insnsi + delta; in convert_ctx_accesses()
21287 env->prog = new_prog; in convert_ctx_accesses()
21320 env->prog->aux->num_exentries++; in convert_ctx_accesses()
21360 env->prog = new_prog; in convert_ctx_accesses()
21398 env->prog->aux->num_exentries++; in convert_ctx_accesses()
21407 env->prog->aux->num_exentries++; in convert_ctx_accesses()
21444 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, in convert_ctx_accesses()
21488 env->prog = new_prog; in convert_ctx_accesses()
21497 struct bpf_prog *prog = env->prog, **func, *tmp; in jit_subprogs() local
21507 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
21544 err = bpf_prog_alloc_jited_linfo(prog); in jit_subprogs()
21549 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); in jit_subprogs()
21566 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], in jit_subprogs()
21568 func[i]->type = prog->type; in jit_subprogs()
21573 func[i]->sleepable = prog->sleepable; in jit_subprogs()
21576 func[i]->aux->btf = prog->aux->btf; in jit_subprogs()
21577 func[i]->aux->func_info = prog->aux->func_info; in jit_subprogs()
21578 func[i]->aux->func_info_cnt = prog->aux->func_info_cnt; in jit_subprogs()
21579 func[i]->aux->poke_tab = prog->aux->poke_tab; in jit_subprogs()
21580 func[i]->aux->size_poke_tab = prog->aux->size_poke_tab; in jit_subprogs()
21582 for (j = 0; j < prog->aux->size_poke_tab; j++) { in jit_subprogs()
21585 poke = &prog->aux->poke_tab[j]; in jit_subprogs()
21597 func[i]->blinding_requested = prog->blinding_requested; in jit_subprogs()
21598 func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; in jit_subprogs()
21599 func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab; in jit_subprogs()
21600 func[i]->aux->linfo = prog->aux->linfo; in jit_subprogs()
21601 func[i]->aux->nr_linfo = prog->aux->nr_linfo; in jit_subprogs()
21602 func[i]->aux->jited_linfo = prog->aux->jited_linfo; in jit_subprogs()
21604 func[i]->aux->arena = prog->aux->arena; in jit_subprogs()
21698 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
21712 prog->jited = 1; in jit_subprogs()
21713 prog->bpf_func = func[0]->bpf_func; in jit_subprogs()
21714 prog->jited_len = func[0]->jited_len; in jit_subprogs()
21715 prog->aux->extable = func[0]->aux->extable; in jit_subprogs()
21716 prog->aux->num_exentries = func[0]->aux->num_exentries; in jit_subprogs()
21717 prog->aux->func = func; in jit_subprogs()
21718 prog->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt; in jit_subprogs()
21719 prog->aux->real_func_cnt = env->subprog_cnt; in jit_subprogs()
21720 prog->aux->bpf_exception_cb = (void *)func[env->exception_callback_subprog]->bpf_func; in jit_subprogs()
21721 prog->aux->exception_boundary = func[0]->aux->exception_boundary; in jit_subprogs()
21722 bpf_prog_jit_attempt_done(prog); in jit_subprogs()
21729 for (i = 0; i < prog->aux->size_poke_tab; i++) { in jit_subprogs()
21730 map_ptr = prog->aux->poke_tab[i].tail_call.map; in jit_subprogs()
21731 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux); in jit_subprogs()
21746 prog->jit_requested = 0; in jit_subprogs()
21747 prog->blinding_requested = 0; in jit_subprogs()
21748 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
21754 bpf_prog_jit_attempt_done(prog); in jit_subprogs()
21761 struct bpf_prog *prog = env->prog; in fixup_call_args() local
21762 struct bpf_insn *insn = prog->insnsi; in fixup_call_args()
21763 bool has_kfunc_call = bpf_prog_has_kfunc_call(prog); in fixup_call_args()
21768 if (env->prog->jit_requested && in fixup_call_args()
21769 !bpf_prog_is_offloaded(env->prog->aux)) { in fixup_call_args()
21781 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) { in fixup_call_args()
21788 for (i = 0; i < prog->len; i++, insn++) { in fixup_call_args()
21813 struct bpf_prog *prog = env->prog; in specialize_kfunc() local
21819 xdp_kfunc = bpf_dev_bound_resolve_kfunc(prog, func_id); in specialize_kfunc()
21844 bpf_lsm_has_d_inode_locked(prog)) in specialize_kfunc()
21848 bpf_lsm_has_d_inode_locked(prog)) in specialize_kfunc()
21885 desc = find_kfunc_desc(env->prog, insn->imm, insn->off); in fixup_kfunc_call()
21965 struct bpf_insn ld_addrs[2] = { BPF_LD_IMM64(regno, (long)env->prog->aux) }; in fixup_kfunc_call()
21981 struct bpf_prog *prog; in add_hidden_subprog() local
21992 prog = bpf_patch_insn_data(env, env->prog->len - 1, patch, len); in add_hidden_subprog()
21993 if (!prog) in add_hidden_subprog()
21995 env->prog = prog; in add_hidden_subprog()
21997 info[cnt].start = prog->len - len + 1; in add_hidden_subprog()
22008 struct bpf_prog *prog = env->prog; in do_misc_fixups() local
22009 enum bpf_attach_type eatype = prog->expected_attach_type; in do_misc_fixups()
22010 enum bpf_prog_type prog_type = resolve_prog_type(prog); in do_misc_fixups()
22011 struct bpf_insn *insn = prog->insnsi; in do_misc_fixups()
22013 const int insn_cnt = prog->len; in do_misc_fixups()
22027 *patch++ = env->prog->insnsi[insn_cnt - 1]; in do_misc_fixups()
22033 prog = env->prog; in do_misc_fixups()
22034 insn = prog->insnsi; in do_misc_fixups()
22044 (((struct bpf_map *)env->prog->aux->arena)->map_flags & BPF_F_NO_USER_CONV)) { in do_misc_fixups()
22082 env->prog = prog = new_prog; in do_misc_fixups()
22173 env->prog = prog = new_prog; in do_misc_fixups()
22203 env->prog = prog = new_prog; in do_misc_fixups()
22223 env->prog = prog = new_prog; in do_misc_fixups()
22276 env->prog = prog = new_prog; in do_misc_fixups()
22321 env->prog = prog = new_prog; in do_misc_fixups()
22342 env->prog = prog = new_prog; in do_misc_fixups()
22363 env->prog = prog = new_prog; in do_misc_fixups()
22373 prog->dst_needed = 1; in do_misc_fixups()
22377 prog->kprobe_override = 1; in do_misc_fixups()
22384 prog->cb_access = 1; in do_misc_fixups()
22386 prog->aux->stack_depth = MAX_BPF_STACK; in do_misc_fixups()
22387 prog->aux->max_pkt_offset = MAX_PACKET_OFF; in do_misc_fixups()
22398 if (env->bpf_capable && !prog->blinding_requested && in do_misc_fixups()
22399 prog->jit_requested && in do_misc_fixups()
22410 ret = bpf_jit_add_poke_descriptor(prog, &desc); in do_misc_fixups()
22448 env->prog = prog = new_prog; in do_misc_fixups()
22468 BPF_LD_IMM64(BPF_REG_3, (long)prog->aux), in do_misc_fixups()
22481 env->prog = prog = new_prog; in do_misc_fixups()
22500 env->prog = prog = new_prog; in do_misc_fixups()
22519 env->prog = prog = new_prog; in do_misc_fixups()
22528 if (prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
22560 env->prog = prog = new_prog; in do_misc_fixups()
22624 if (prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
22643 env->prog = prog = new_prog; in do_misc_fixups()
22671 env->prog = prog = new_prog; in do_misc_fixups()
22696 env->prog = prog = new_prog; in do_misc_fixups()
22724 env->prog = prog = new_prog; in do_misc_fixups()
22739 env->prog = prog = new_prog; in do_misc_fixups()
22754 env->prog = prog = new_prog; in do_misc_fixups()
22761 prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
22810 env->prog = prog = new_prog; in do_misc_fixups()
22816 if (prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
22828 env->prog = prog = new_prog; in do_misc_fixups()
22833 fn = env->ops->get_func_proto(insn->imm, env->prog); in do_misc_fixups()
22850 if (stack_depth > MAX_BPF_STACK && !prog->jit_requested) { in do_misc_fixups()
22863 env->prog->aux->stack_depth = subprogs[0].stack_depth; in do_misc_fixups()
22889 insn_buf[cnt++] = env->prog->insnsi[subprog_start]; in do_misc_fixups()
22894 env->prog = prog = new_prog; in do_misc_fixups()
22901 WARN_ON(adjust_jmp_off(env->prog, subprog_start, delta)); in do_misc_fixups()
22905 for (i = 0; i < prog->aux->size_poke_tab; i++) { in do_misc_fixups()
22906 map_ptr = prog->aux->poke_tab[i].tail_call.map; in do_misc_fixups()
22914 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); in do_misc_fixups()
22921 sort_kfunc_descs_by_imm_off(env->prog); in do_misc_fixups()
23022 struct bpf_insn *insn = env->prog->insnsi; in optimize_bpf_loop()
23023 int insn_cnt = env->prog->len; in optimize_bpf_loop()
23045 env->prog = new_prog; in optimize_bpf_loop()
23058 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; in optimize_bpf_loop()
23070 struct bpf_insn *insn = env->prog->insnsi; in remove_fastcall_spills_fills()
23071 int insn_cnt = env->prog->len; in remove_fastcall_spills_fills()
23144 struct bpf_prog_aux *aux = env->prog->aux; in do_check_common()
23172 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { in do_check_common()
23242 if (env->prog->aux->func_info_aux) { in do_check_common()
23245 env->prog->aux->func_info_aux[0].unreliable = true; in do_check_common()
23254 if (!subprog && env->prog->type == BPF_PROG_TYPE_STRUCT_OPS) { in do_check_common()
23290 struct bpf_prog_aux *aux = env->prog->aux; in do_check_subprogs()
23345 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; in do_check_main()
23374 int bpf_prog_ctx_arg_info_init(struct bpf_prog *prog, in bpf_prog_ctx_arg_info_init() argument
23377 prog->aux->ctx_arg_info = kmemdup_array(info, cnt, sizeof(*info), GFP_KERNEL_ACCOUNT); in bpf_prog_ctx_arg_info_init()
23378 prog->aux->ctx_arg_info_size = cnt; in bpf_prog_ctx_arg_info_init()
23380 return prog->aux->ctx_arg_info ? 0 : -ENOMEM; in bpf_prog_ctx_arg_info_init()
23389 struct bpf_prog *prog = env->prog; in check_struct_ops_btf_id() local
23396 if (!prog->gpl_compatible) { in check_struct_ops_btf_id()
23401 if (!prog->aux->attach_btf_id) in check_struct_ops_btf_id()
23404 btf = prog->aux->attach_btf; in check_struct_ops_btf_id()
23415 btf_id = prog->aux->attach_btf_id; in check_struct_ops_btf_id()
23425 member_idx = prog->expected_attach_type; in check_struct_ops_btf_id()
23451 err = st_ops->check_member(t, member, prog); in check_struct_ops_btf_id()
23460 if (prog->aux->priv_stack_requested && !bpf_jit_supports_private_stack()) { in check_struct_ops_btf_id()
23482 prog->aux->st_ops = st_ops; in check_struct_ops_btf_id()
23483 prog->aux->attach_st_ops_member_off = member_off; in check_struct_ops_btf_id()
23485 prog->aux->attach_func_proto = func_proto; in check_struct_ops_btf_id()
23486 prog->aux->attach_func_name = mname; in check_struct_ops_btf_id()
23489 return bpf_prog_ctx_arg_info_init(prog, st_ops_desc->arg_info[member_idx].info, in check_struct_ops_btf_id()
23525 const struct bpf_prog *prog, in bpf_check_attach_target() argument
23530 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; in bpf_check_attach_target()
23531 bool prog_tracing = prog->type == BPF_PROG_TYPE_TRACING; in bpf_check_attach_target()
23547 btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf; in bpf_check_attach_target()
23568 if (bpf_prog_is_dev_bound(prog->aux) && in bpf_check_attach_target()
23569 !bpf_prog_dev_bound_match(prog, tgt_prog)) { in bpf_check_attach_target()
23596 if (!prog->jit_requested) { in bpf_check_attach_target()
23604 if (prog->aux->changes_pkt_data && !tgt_changes_pkt_data) { in bpf_check_attach_target()
23613 if (prog->aux->might_sleep && !tgt_might_sleep) { in bpf_check_attach_target()
23633 } else if (tgt_prog->type == prog->type) { in bpf_check_attach_target()
23670 switch (prog->expected_attach_type) { in bpf_check_attach_target()
23750 btf_check_type_match(log, prog, btf, t)) in bpf_check_attach_target()
23756 if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) && in bpf_check_attach_target()
23757 (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type || in bpf_check_attach_target()
23758 prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type)) in bpf_check_attach_target()
23792 if (prog->sleepable) { in bpf_check_attach_target()
23794 switch (prog->type) { in bpf_check_attach_target()
23808 prog); in bpf_check_attach_target()
23829 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) { in bpf_check_attach_target()
23836 if (btf_kfunc_is_modify_return(btf, btf_id, prog) || in bpf_check_attach_target()
23901 static bool can_be_sleepable(struct bpf_prog *prog) in BTF_SET_START()
23903 if (prog->type == BPF_PROG_TYPE_TRACING) { in BTF_SET_START()
23904 switch (prog->expected_attach_type) { in BTF_SET_START()
23914 return prog->type == BPF_PROG_TYPE_LSM || in BTF_SET_START()
23915 prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ || in BTF_SET_START()
23916 prog->type == BPF_PROG_TYPE_STRUCT_OPS; in BTF_SET_START()
23921 struct bpf_prog *prog = env->prog; in check_attach_btf_id() local
23922 struct bpf_prog *tgt_prog = prog->aux->dst_prog; in check_attach_btf_id()
23924 u32 btf_id = prog->aux->attach_btf_id; in check_attach_btf_id()
23929 if (prog->type == BPF_PROG_TYPE_SYSCALL) { in check_attach_btf_id()
23930 if (prog->sleepable) in check_attach_btf_id()
23937 if (prog->sleepable && !can_be_sleepable(prog)) { in check_attach_btf_id()
23942 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) in check_attach_btf_id()
23945 if (prog->type != BPF_PROG_TYPE_TRACING && in check_attach_btf_id()
23946 prog->type != BPF_PROG_TYPE_LSM && in check_attach_btf_id()
23947 prog->type != BPF_PROG_TYPE_EXT) in check_attach_btf_id()
23950 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info); in check_attach_btf_id()
23954 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) { in check_attach_btf_id()
23960 prog->expected_attach_type = tgt_prog->expected_attach_type; in check_attach_btf_id()
23964 prog->aux->attach_func_proto = tgt_info.tgt_type; in check_attach_btf_id()
23965 prog->aux->attach_func_name = tgt_info.tgt_name; in check_attach_btf_id()
23966 prog->aux->mod = tgt_info.tgt_mod; in check_attach_btf_id()
23969 prog->aux->saved_dst_prog_type = tgt_prog->type; in check_attach_btf_id()
23970 prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type; in check_attach_btf_id()
23973 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) { in check_attach_btf_id()
23974 prog->aux->attach_btf_trace = true; in check_attach_btf_id()
23976 } else if (prog->expected_attach_type == BPF_TRACE_ITER) { in check_attach_btf_id()
23977 return bpf_iter_prog_supported(prog); in check_attach_btf_id()
23980 if (prog->type == BPF_PROG_TYPE_LSM) { in check_attach_btf_id()
23981 ret = bpf_lsm_verify_prog(&env->log, prog); in check_attach_btf_id()
23984 } else if (prog->type == BPF_PROG_TYPE_TRACING && in check_attach_btf_id()
23989 } else if ((prog->expected_attach_type == BPF_TRACE_FEXIT || in check_attach_btf_id()
23990 prog->expected_attach_type == BPF_MODIFY_RETURN) && in check_attach_btf_id()
23997 key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id); in check_attach_btf_id()
24005 prog->aux->dst_trampoline = tr; in check_attach_btf_id()
24129 static int insn_successors(struct bpf_prog *prog, u32 idx, u32 succ[2]) in insn_successors() argument
24131 struct bpf_insn *insn = &prog->insnsi[idx]; in insn_successors()
24136 if (can_fallthrough(insn) && idx + 1 < prog->len) in insn_successors()
24300 struct bpf_insn *insns = env->prog->insnsi; in compute_live_registers()
24302 int insn_cnt = env->prog->len; in compute_live_registers()
24345 succ_num = insn_successors(env->prog, insn_idx, succ); in compute_live_registers()
24400 const u32 insn_cnt = env->prog->len; in compute_scc()
24514 succ_cnt = insn_successors(env->prog, w, succ); in compute_scc()
24568 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) in bpf_check() argument
24591 len = (*prog)->len; in bpf_check()
24599 env->prog = *prog; in bpf_check()
24600 env->ops = bpf_verifier_ops[env->prog->type]; in bpf_check()
24602 env->allow_ptr_leaks = bpf_allow_ptr_leaks(env->prog->aux->token); in bpf_check()
24603 env->allow_uninit_stack = bpf_allow_uninit_stack(env->prog->aux->token); in bpf_check()
24604 env->bypass_spec_v1 = bpf_bypass_spec_v1(env->prog->aux->token); in bpf_check()
24605 env->bypass_spec_v4 = bpf_bypass_spec_v4(env->prog->aux->token); in bpf_check()
24606 env->bpf_capable = is_priv = bpf_token_capable(env->prog->aux->token, CAP_BPF); in bpf_check()
24677 if (bpf_prog_is_offloaded(env->prog->aux)) { in bpf_check()
24678 ret = bpf_prog_offload_verifier_prep(env->prog); in bpf_check()
24706 if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux)) in bpf_check()
24747 if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) { in bpf_check()
24749 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret in bpf_check()
24758 env->prog->aux->verified_insns = env->insn_processed; in bpf_check()
24777 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, in bpf_check()
24781 if (!env->prog->aux->used_maps) { in bpf_check()
24786 memcpy(env->prog->aux->used_maps, env->used_maps, in bpf_check()
24788 env->prog->aux->used_map_cnt = env->used_map_cnt; in bpf_check()
24792 env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt, in bpf_check()
24795 if (!env->prog->aux->used_btfs) { in bpf_check()
24800 memcpy(env->prog->aux->used_btfs, env->used_btfs, in bpf_check()
24802 env->prog->aux->used_btf_cnt = env->used_btf_cnt; in bpf_check()
24814 if (!env->prog->aux->used_maps) in bpf_check()
24819 if (!env->prog->aux->used_btfs) in bpf_check()
24825 if (env->prog->type == BPF_PROG_TYPE_EXT) in bpf_check()
24826 env->prog->expected_attach_type = 0; in bpf_check()
24828 *prog = env->prog; in bpf_check()