| /kernel/bpf/ |
| A D | bpf_lsm.c | 44 BTF_ID(func, bpf_lsm_getselfattr) in BTF_SET_END() 45 BTF_ID(func, bpf_lsm_getprocattr) in BTF_SET_END() 46 BTF_ID(func, bpf_lsm_setprocattr) in BTF_SET_END() 53 BTF_ID(func, bpf_lsm_ismaclabel) in BTF_SET_END() 156 .func = bpf_bprm_opts_set, 177 .func = bpf_ima_inode_hash, 196 .func = bpf_ima_file_hash, 287 BTF_ID(func, bpf_lsm_bpf) in BTF_ID() argument 303 BTF_ID(func, bpf_lsm_capget) in BTF_ID() 304 BTF_ID(func, bpf_lsm_capset) in BTF_ID() [all …]
|
| A D | cpumask.c | 481 BTF_ID_FLAGS(func, bpf_cpumask_first, KF_RCU) 489 BTF_ID_FLAGS(func, bpf_cpumask_setall, KF_RCU) 490 BTF_ID_FLAGS(func, bpf_cpumask_clear, KF_RCU) 491 BTF_ID_FLAGS(func, bpf_cpumask_and, KF_RCU) 492 BTF_ID_FLAGS(func, bpf_cpumask_or, KF_RCU) 493 BTF_ID_FLAGS(func, bpf_cpumask_xor, KF_RCU) 494 BTF_ID_FLAGS(func, bpf_cpumask_equal, KF_RCU) 497 BTF_ID_FLAGS(func, bpf_cpumask_empty, KF_RCU) 498 BTF_ID_FLAGS(func, bpf_cpumask_full, KF_RCU) 499 BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_RCU) [all …]
|
| A D | helpers.c | 97 .func = bpf_map_push_elem, 112 .func = bpf_map_pop_elem, 150 .func = bpf_user_rnd_u32, 185 .func = bpf_ktime_get_ns, 348 .func = bpf_spin_lock, 371 .func = bpf_spin_unlock, 400 .func = bpf_jiffies64, 538 .func = bpf_strtol, 566 .func = bpf_strtoul, 582 .func = bpf_strncmp, [all …]
|
| A D | trampoline.c | 180 void *ip = tr->func.addr; in unregister_fentry() 183 if (tr->func.ftrace_managed) in unregister_fentry() 194 void *ip = tr->func.addr; in modify_fentry() 197 if (tr->func.ftrace_managed) { in modify_fentry() 211 void *ip = tr->func.addr; in register_fentry() 219 tr->func.ftrace_managed = true; in register_fentry() 222 if (tr->func.ftrace_managed) { in register_fentry() 441 tlinks, tr->func.addr); in bpf_trampoline_update() 460 tr->func.addr); in bpf_trampoline_update() 483 tr->fops->func = NULL; in bpf_trampoline_update() [all …]
|
| /kernel/livepatch/ |
| A D | patch.c | 28 struct klp_func *func; in klp_find_ops() local 46 struct klp_func *func; in klp_ftrace_handler() local 70 if (WARN_ON_ONCE(!func)) in klp_ftrace_handler() 106 func = list_entry_rcu(func->stack_node.next, in klp_ftrace_handler() 118 if (func->nop) in klp_ftrace_handler() 157 func->patched = false; in klp_unpatch_func() 178 func->old_name); in klp_patch_func() 219 func->patched = true; in klp_patch_func() 232 struct klp_func *func; in __klp_unpatch_object() local 238 if (func->patched) in __klp_unpatch_object() [all …]
|
| A D | core.c | 93 return func; in klp_find_func() 543 kfree(func); in klp_free_func_nop() 551 func = kzalloc(sizeof(*func), GFP_KERNEL); in klp_alloc_func_nop() 552 if (!func) in klp_alloc_func_nop() 558 kfree(func); in klp_alloc_func_nop() 571 return func; in klp_alloc_func_nop() 590 if (func) in klp_add_object_nops() 594 if (!func) in klp_add_object_nops() 803 if (!func->new_func && !func->nop) in klp_init_func() 820 func->old_sympos ? func->old_sympos : 1); in klp_init_func() [all …]
|
| A D | transition.c | 84 struct klp_func *func; in klp_complete_transition() local 114 klp_for_each_func(obj, func) in klp_complete_transition() 115 func->transition = false; in klp_complete_transition() 218 func_size = func->new_size; in klp_check_stack_func() 229 func_size = func->old_size; in klp_check_stack_func() 258 struct klp_func *func; in klp_check_stack() local 272 klp_for_each_func(obj, func) { in klp_check_stack() 275 *oldname = func->old_name; in klp_check_stack() 557 struct klp_func *func; in klp_init_transition() local 617 klp_for_each_func(obj, func) in klp_init_transition() [all …]
|
| /kernel/ |
| A D | tracepoint.c | 152 if (old[iter_probes].func == tp_func->func && in func_add() 199 if (tp_func->func) { in func_remove() 201 if ((old[nr_probes].func == tp_func->func && in func_remove() 224 if ((old[i].func != tp_func->func || in func_remove() 237 if (old[i].func == tp_func->func && in func_remove() 255 if (!tp_funcs[1].func) in nr_func_state() 257 if (!tp_funcs[2].func) in nr_func_state() 270 func = tp_funcs[0].func; in tracepoint_update_call() 432 tp_func.func = probe; in tracepoint_probe_register_prio_may_exist() 461 tp_func.func = probe; in tracepoint_probe_register_prio() [all …]
|
| A D | smp.c | 136 func(info); in csd_do_func() 280 cpu, csd->func, csd->info); in csd_lock_wait_toolong() 391 smp_call_func_t func; in __smp_call_single_queue() local 428 smp_call_func_t func = csd->func; in generic_exec_single() local 514 csd->func); in __flush_smp_call_function_queue() 536 smp_call_func_t func = csd->func; in __flush_smp_call_function_queue() local 572 smp_call_func_t func = csd->func; in __flush_smp_call_function_queue() local 677 csd->func = func; in smp_call_function_single() 832 csd->func = func; in smp_call_function_many_cond() 1107 int (*func)(void *); member [all …]
|
| A D | async.c | 74 async_func_t func; member 126 entry->func, task_pid_nr(current)); in async_run_entry_fn() 129 entry->func(entry->data, entry->cookie); in async_run_entry_fn() 132 (long long)entry->cookie, entry->func, in async_run_entry_fn() 150 static async_cookie_t __async_schedule_node_domain(async_func_t func, in __async_schedule_node_domain() argument 161 entry->func = func; in __async_schedule_node_domain() 200 async_cookie_t async_schedule_node_domain(async_func_t func, void *data, in async_schedule_node_domain() argument 221 func(data, newcookie); in async_schedule_node_domain() 225 return __async_schedule_node_domain(func, data, node, domain, entry); in async_schedule_node_domain() 260 bool async_schedule_dev_nocall(async_func_t func, struct device *dev) in async_schedule_dev_nocall() argument [all …]
|
| A D | up.c | 12 int smp_call_function_single(int cpu, void (*func) (void *info), void *info, in smp_call_function_single() 21 func(info); in smp_call_function_single() 33 csd->func(csd->info); in smp_call_function_single_async() 43 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, in on_each_cpu_cond_mask() argument 51 func(info); in on_each_cpu_cond_mask() 58 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) in smp_call_on_cpu() 67 ret = func(par); in smp_call_on_cpu()
|
| A D | static_call_inline.c | 134 void __static_call_update(struct static_call_key *key, void *tramp, void *func) in __static_call_update() argument 142 if (key->func == func) in __static_call_update() 145 key->func = func; in __static_call_update() 147 arch_static_call_transform(NULL, tramp, func, false); in __static_call_update() 209 arch_static_call_transform(site_addr, tramp, func, in __static_call_update() 286 arch_static_call_transform(site_addr, NULL, key->func, in __static_call_init() 540 int (*func)(int); member 556 if (scd->func) in test_static_call_init() 557 static_call_update(sc_selftest, scd->func); in test_static_call_init()
|
| A D | task_work.c | 142 return cb->func == data; in task_work_func_match() 157 task_work_cancel_func(struct task_struct *task, task_work_func_t func) in task_work_cancel_func() argument 159 return task_work_cancel_match(task, task_work_func_match, func); in task_work_cancel_func() 227 work->func(work); in task_work_run()
|
| /kernel/trace/ |
| A D | trace_functions.c | 69 ops->func = function_trace_call; in ftrace_allocate_ftrace_ops() 148 ftrace_func_t func; in function_trace_init() local 158 if (!func) in function_trace_init() 458 ftrace_func_t func; in func_set_flag() local 471 if (!func) in func_set_flag() 475 if (tr->ops->func == func) in func_set_flag() 482 tr->ops->func = func; in func_set_flag() 826 .func = ftrace_dump_probe, 838 .func = ftrace_traceon, 843 .func = ftrace_traceoff, [all …]
|
| A D | trace_entries.h | 82 __field_packed( unsigned long, graph_ent, func ) 87 F_printk("--> %ps (%u)", (void *)__entry->func, __entry->depth) 99 __field_packed( unsigned long, graph_ent, func ) 104 F_printk("--> %ps (%u) <- %ps", (void *)__entry->func, __entry->depth, 125 __field_packed( unsigned long, ret, func ) 134 (void *)__entry->func, __entry->depth, 148 __field_packed( unsigned long, ret, func ) 156 (void *)__entry->func, __entry->depth, 357 __array( char, func, TRACE_FUNC_SIZE+1 ) 365 __entry->func, __entry->file, __entry->correct,
|
| A D | ftrace.c | 1336 ftrace_mod->func = kstrdup(func, GFP_KERNEL); in ftrace_add_mod() 4897 if (func) { in match_records() 5017 func++; in cache_mod() 5027 strcmp(ftrace_mod->func, func) == 0)) { in cache_mod() 5073 func = kstrdup(ftrace_mod->func, GFP_KERNEL); in process_mod_list() 5084 ftrace_mod->func = func; in process_mod_list() 5091 func = ftrace_mod->func; in process_mod_list() 5094 match_records(new_hash, func, strlen(func), mod); in process_mod_list() 5152 if (!func) in ftrace_mod_callback() 7803 func = kmalloc(sizeof(*func), GFP_KERNEL); in add_to_clear_hash_list() [all …]
|
| A D | trace_functions_graph.c | 169 entry->graph_ent.func = trace->func; in __trace_graph_retaddr_entry() 289 .func = ip, in __trace_graph_function() 293 .func = ip, in __trace_graph_function() 650 curr->graph_ent.func != next->ret.func) in get_return_for_leaf() 859 trace_seq_printf(s, "%ps", func); in print_graph_retval() 871 trace_seq_printf(s, "} /* %ps", func); in print_graph_retval() 978 unsigned long func; in print_graph_entry_nested() local 1002 func = call->func + iter->tr->text_delta; in print_graph_entry_nested() 1004 trace_seq_printf(s, "%ps", (void *)func); in print_graph_entry_nested() 1243 unsigned long func; in print_graph_return() local [all …]
|
| A D | bpf_trace.c | 388 .func = bpf_trace_printk, 486 .func = bpf_seq_printf, 503 .func = bpf_seq_write, 885 .func = bpf_send_signal, 941 BTF_ID(func, vfs_truncate) in BTF_ID() 943 BTF_ID(func, dentry_open) in BTF_ID() 944 BTF_ID(func, vfs_getattr) in BTF_ID() 945 BTF_ID(func, filp_close) in BTF_ID() 964 .func = bpf_d_path, 1207 .func = get_func_arg, [all …]
|
| A D | fgraph.c | 628 ret_stack->func = func; in ftrace_push_return_trace() 661 bit = ftrace_test_recursion_trylock(func, ret); in function_graph_enter_regs() 665 trace.func = func; in function_graph_enter_regs() 759 (void *)ret_stack->func, in ftrace_pop_return_trace() 768 trace->func = ret_stack->func; in ftrace_pop_return_trace() 991 .func = ftrace_graph_func, 1279 trace_func_graph_ent_t func = NULL; in ftrace_graph_enable_direct() local 1284 func = gops->entryfunc; in ftrace_graph_enable_direct() 1290 func = fgraph_array[i]->entryfunc; in ftrace_graph_enable_direct() 1295 if (WARN_ON_ONCE(!func)) in ftrace_graph_enable_direct() [all …]
|
| A D | trace_event_perf.c | 250 char *func = NULL; in perf_kprobe_init() local 254 func = strndup_user(u64_to_user_ptr(p_event->attr.kprobe_func), in perf_kprobe_init() 256 if (IS_ERR(func)) { in perf_kprobe_init() 257 ret = PTR_ERR(func); in perf_kprobe_init() 261 if (func[0] == '\0') { in perf_kprobe_init() 262 kfree(func); in perf_kprobe_init() 263 func = NULL; in perf_kprobe_init() 268 func, (void *)(unsigned long)(p_event->attr.kprobe_addr), in perf_kprobe_init() 281 kfree(func); in perf_kprobe_init() 491 ops->func = perf_ftrace_function_call; in perf_ftrace_function_register()
|
| /kernel/debug/kdb/ |
| A D | kdb_main.c | 2575 .func = kdb_md, 2582 .func = kdb_md, 2588 .func = kdb_md, 2594 .func = kdb_md, 2600 .func = kdb_mm, 2606 .func = kdb_go, 2614 .func = kdb_rd, 2620 .func = kdb_rm, 2626 .func = kdb_ef, 2632 .func = kdb_bt, [all …]
|
| A D | kdb_bp.c | 529 .func = kdb_bp, 535 .func = kdb_bp, 541 .func = kdb_bc, 547 .func = kdb_bc, 553 .func = kdb_bc, 559 .func = kdb_ss, 569 .func = kdb_bp,
|
| /kernel/rcu/ |
| A D | tiny.c | 92 f = head->func; in rcu_reclaim_tiny() 94 WRITE_ONCE(head->func, (rcu_callback_t)0L); in rcu_reclaim_tiny() 160 void call_rcu(struct rcu_head *head, rcu_callback_t func) in call_rcu() argument 167 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func); in call_rcu() 173 head->func = func; in call_rcu()
|
| /kernel/sched/ |
| A D | cpufreq.c | 31 void (*func)(struct update_util_data *data, u64 time, in cpufreq_add_update_util_hook() 34 if (WARN_ON(!data || !func)) in cpufreq_add_update_util_hook() 40 data->func = func; in cpufreq_add_update_util_hook()
|
| A D | ext_idle.c | 1259 BTF_ID_FLAGS(func, scx_bpf_cpu_node) 1261 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE) 1263 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE) 1264 BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE) 1265 BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle) 1266 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu_node, KF_RCU) 1267 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU) 1268 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu_node, KF_RCU) 1269 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU) 1270 BTF_ID_FLAGS(func, scx_bpf_select_cpu_and, KF_RCU) [all …]
|