| /kernel/trace/ |
| A D | trace_events.c | 705 call); in trace_event_reg() 709 call); in trace_event_reg() 812 ret = call->class->reg(call, TRACE_REG_UNREGISTER, file); in __ftrace_event_enable_disable() 860 ret = call->class->reg(call, TRACE_REG_REGISTER, file); in __ftrace_event_enable_disable() 1336 if (!name || !call->class || !call->class->reg) in __ftrace_set_clr_event_nolock() 1549 if (call->class && call->class->reg && in t_next() 1859 !trace_event_name(call) || !call->class || !call->class->reg) in trace_events_enabled() 2933 if (call->event.type && call->class->reg && in event_callback() 2957 if (call->event.type && call->class->reg && in event_callback() 3122 ret = call->class->raw_init(call); in event_init() [all …]
|
| A D | trace_syscalls.c | 243 if (entry->enter_event != call) { in set_syscall_print_fmt() 257 call->print_fmt = print_fmt; in set_syscall_print_fmt() 266 if (entry->enter_event == call) in free_syscall_print_fmt() 267 kfree(call->print_fmt); in free_syscall_print_fmt() 383 struct trace_event_call *call) in reg_event_syscall_enter() argument 421 struct trace_event_call *call) in reg_event_syscall_exit() argument 470 if (set_syscall_print_fmt(call) < 0) in init_syscall_trace() 473 id = trace_event_raw_init(call); in init_syscall_trace() 476 free_syscall_print_fmt(call); in init_syscall_trace() 586 return trace_call_bpf(call, ¶m); in perf_call_bpf_enter() [all …]
|
| A D | trace_export.c | 18 static int ftrace_event_register(struct trace_event_call *call, in ftrace_event_register() argument 174 .fields_array = ftrace_event_fields_##call, \ 175 .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\ 179 struct trace_event_call __used event_##call = { \ 180 .class = &event_class_ftrace_##call, \ 182 .name = #call, \ 189 __section("_ftrace_events") *__event_##call = &event_##call; 192 #define FTRACE_ENTRY(call, struct_name, etype, tstruct, print) \ argument 193 FTRACE_ENTRY_REG(call, struct_name, etype, \ 196 bool ftrace_event_is_function(struct trace_event_call *call) in ftrace_event_is_function() argument [all …]
|
| A D | trace_events_inject.c | 36 parse_field(char *str, struct trace_event_call *call, in parse_field() argument 60 field = trace_find_event_field(call, field_name); in parse_field() 144 head = trace_get_fields(call); in trace_get_entry_size() 155 int entry_size = trace_get_entry_size(call); in trace_alloc_entry() 165 head = trace_get_fields(call); in trace_alloc_entry() 204 entry = trace_alloc_entry(call, &entry_size); in parse_entry() 209 tracing_generic_entry_update(entry, call->event.type, in parse_entry() 212 while ((len = parse_field(str, call, &field, &val)) > 0) { in parse_entry() 287 struct trace_event_call *call; in event_inject_write() local 304 call = file->event_call; in event_inject_write() [all …]
|
| A D | trace_dynevent.c | 24 struct trace_event_call *call; in trace_event_dyn_try_get_ref() local 31 list_for_each_entry(call, &ftrace_events, list) { in trace_event_dyn_try_get_ref() 32 if (call == dyn_call) { in trace_event_dyn_try_get_ref() 41 void trace_event_dyn_put_ref(struct trace_event_call *call) in trace_event_dyn_put_ref() argument 43 if (WARN_ON_ONCE(!(call->flags & TRACE_EVENT_FL_DYNAMIC))) in trace_event_dyn_put_ref() 46 if (WARN_ON_ONCE(atomic_read(&call->refcnt) <= 0)) { in trace_event_dyn_put_ref() 47 atomic_set(&call->refcnt, 0); in trace_event_dyn_put_ref() 51 atomic_dec(&call->refcnt); in trace_event_dyn_put_ref() 54 bool trace_event_dyn_busy(struct trace_event_call *call) in trace_event_dyn_busy() argument 56 return atomic_read(&call->refcnt) != 0; in trace_event_dyn_busy()
|
| A D | trace_events_synth.c | 591 if (call) { in free_synth_event_print_fmt() 592 kfree(call->print_fmt); in free_synth_event_print_fmt() 593 call->print_fmt = NULL; in free_synth_event_print_fmt() 650 call->print_fmt = print_fmt; in set_synth_event_print_fmt() 855 struct synth_event *event = container_of(call, struct synth_event, call); in synth_event_reg() 886 struct trace_event_call *call = &event->call; in register_synth_event() local 915 call->data = event; in register_synth_event() 916 call->tp = event->tp; in register_synth_event() 921 trace_event_name(call)); in register_synth_event() 928 trace_remove_event_call(call); in register_synth_event() [all …]
|
| A D | trace_probe.h | 214 bool trace_kprobe_on_func_entry(struct trace_event_call *call); 215 bool trace_kprobe_error_injectable(struct trace_event_call *call); 217 static inline bool trace_kprobe_on_func_entry(struct trace_event_call *call) in trace_kprobe_on_func_entry() argument 254 struct trace_event_call call; member 299 return trace_event_name(&tp->event->call); in trace_probe_name() 304 return tp->event->call.class->system; in trace_probe_group_name() 310 return &tp->event->call; in trace_probe_event_call() 316 return container_of(event_call, struct trace_probe_event, call); in trace_probe_event_from_call() 320 trace_probe_primary_from_call(struct trace_event_call *call) in trace_probe_primary_from_call() argument 322 struct trace_probe_event *tpe = trace_probe_event_from_call(call); in trace_probe_primary_from_call() [all …]
|
| A D | trace_kprobe.c | 219 tp = trace_probe_primary_from_call(call); in trace_kprobe_primary_from_call() 364 tp = trace_probe_primary_from_call(call); in enable_trace_kprobe() 411 tp = trace_probe_primary_from_call(call); in disable_trace_kprobe() 1679 if (bpf_prog_array_valid(call)) { in kprobe_perf_func() 1683 ret = trace_call_bpf(call, regs); in kprobe_perf_func() 1696 head = this_cpu_ptr(call->perf_events); in kprobe_perf_func() 1729 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs)) in kretprobe_perf_func() 1882 call->event.funcs = &kretprobe_funcs; in init_trace_event_call() 1885 call->event.funcs = &kprobe_funcs; in init_trace_event_call() 1889 call->flags = TRACE_EVENT_FL_KPROBE; in init_trace_event_call() [all …]
|
| A D | trace_fprobe.c | 329 if (WARN_ON_ONCE(call != trace_file->event_call)) in NOKPROBE_SYMBOL() 415 if (WARN_ON_ONCE(call != trace_file->event_call)) in NOKPROBE_SYMBOL() 460 head = this_cpu_ptr(call->perf_events); in fentry_perf_func() 496 head = this_cpu_ptr(call->perf_events); in fexit_perf_func() 741 call->event.funcs = &fexit_funcs; in init_trace_event_call() 742 call->class->fields_array = fexit_fields_array; in init_trace_event_call() 744 call->event.funcs = &fentry_funcs; in init_trace_event_call() 748 call->flags = TRACE_EVENT_FL_FPROBE; in init_trace_event_call() 749 call->class->reg = fprobe_register; in init_trace_event_call() 1464 tp = trace_probe_primary_from_call(call); in enable_trace_fprobe() [all …]
|
| A D | trace_uprobe.c | 325 tp = trace_probe_primary_from_call(call); in trace_uprobe_primary_from_call() 1022 WARN_ON(call != trace_file->event_call); in __uprobe_trace_func() 1162 tp = trace_probe_primary_from_call(call); in probe_event_enable() 1219 tp = trace_probe_primary_from_call(call); in probe_event_disable() 1338 tp = trace_probe_primary_from_call(call); in uprobe_perf_close() 1362 tp = trace_probe_primary_from_call(call); in uprobe_perf_open() 1373 uprobe_perf_close(call, event); in uprobe_perf_open() 1417 if (bpf_prog_array_valid(call)) { in __uprobe_perf_func() 1439 head = this_cpu_ptr(call->perf_events); in __uprobe_perf_func() 1617 call->event.funcs = &uprobe_funcs; in init_trace_event_call() [all …]
|
| A D | trace_events_user.c | 87 struct trace_event_call call; member 1434 user->call.print_fmt = print_fmt; in user_event_create_print_fmt() 1508 kfree(user->call.print_fmt); in destroy_user_event() 1726 call->class->probe, in user_event_reg() 1734 call->class->probe, in user_event_reg() 1741 call->class->perf_probe, in user_event_reg() 1749 call->class->perf_probe, in user_event_reg() 1978 user->call.name = multi_name; in user_event_set_tp_name() 2146 user->call.data = user; in user_event_parse() 2147 user->call.class = &user->class; in user_event_parse() [all …]
|
| A D | trace_functions_graph.c | 906 struct ftrace_graph_ent *call; in print_graph_entry_leaf() local 916 call = &entry->graph_ent; in print_graph_entry_leaf() 929 cpu_data->depth = call->depth - 1; in print_graph_entry_leaf() 933 !WARN_ON_ONCE(call->depth < 0)) in print_graph_entry_leaf() 934 cpu_data->enter_funcs[call->depth] = 0; in print_graph_entry_leaf() 987 cpu_data->depth = call->depth; in print_graph_entry_nested() 991 !WARN_ON_ONCE(call->depth < 0)) in print_graph_entry_nested() 992 cpu_data->enter_funcs[call->depth] = call->func; in print_graph_entry_nested() 1184 struct ftrace_graph_ent *call; in print_graph_entry() local 1204 call = &entry->graph_ent; in print_graph_entry() [all …]
|
| A D | trace_eprobe.c | 432 if (WARN_ON_ONCE(call != edata->file->event_call)) in NOKPROBE_SYMBOL() 650 static int enable_trace_eprobe(struct trace_event_call *call, in enable_trace_eprobe() argument 659 tp = trace_probe_primary_from_call(call); in enable_trace_eprobe() 707 static int disable_trace_eprobe(struct trace_event_call *call, in disable_trace_eprobe() argument 713 tp = trace_probe_primary_from_call(call); in disable_trace_eprobe() 769 struct trace_event_call *call = trace_probe_event_call(&ep->tp); in init_trace_eprobe_call() local 771 call->flags = TRACE_EVENT_FL_EPROBE; in init_trace_eprobe_call() 772 call->event.funcs = &eprobe_funcs; in init_trace_eprobe_call() 773 call->class->fields_array = eprobe_fields_array; in init_trace_eprobe_call() 774 call->class->reg = eprobe_register; in init_trace_eprobe_call() [all …]
|
| A D | trace_events_filter.c | 1632 struct trace_event_call *call = data; in parse_pred() local 1670 field = trace_find_event_field(call, field_name); in parse_pred() 1783 } else if (ftrace_event_is_function(call)) { in parse_pred() 2234 parse_pred, call, pe); in process_preds() 2392 struct trace_event_call *call, in create_filter() argument 2416 struct trace_event_call *call, in create_event_filter() argument 2711 struct trace_event_call *call; in ftrace_profile_set_filter() local 2715 call = event->tp_event; in ftrace_profile_set_filter() 2717 if (!call) in ftrace_profile_set_filter() 2727 if (ftrace_event_is_function(call)) in ftrace_profile_set_filter() [all …]
|
| A D | trace_dynevent.h | 80 struct trace_event_call *call) in dyn_event_add() argument 87 call->flags |= TRACE_EVENT_FL_DYNAMIC; in dyn_event_add()
|
| A D | trace.h | 1284 bool ftrace_event_is_function(struct trace_event_call *call); 1676 struct trace_event_call *call, 1682 trace_find_event_field(struct trace_event_call *call, char *name); 2096 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ argument 2098 __aligned(4) event_##call; 2100 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ argument 2101 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) 2103 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \ argument 2104 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) 2109 int perf_ftrace_event_register(struct trace_event_call *call,
|
| A D | trace_probe.c | 1980 call->print_fmt = print_fmt; in traceprobe_set_print_fmt() 2013 kfree(tpe->call.name); in trace_probe_event_free() 2014 kfree(tpe->call.print_fmt); in trace_probe_event_free() 2060 struct trace_event_call *call; in trace_probe_init() local 2080 call = trace_probe_event_call(tp); in trace_probe_init() 2081 call->class = &tp->event->class; in trace_probe_init() 2082 call->name = kstrdup(event, GFP_KERNEL); in trace_probe_init() 2083 if (!call->name) { in trace_probe_init() 2136 ret = register_trace_event(&call->event); in trace_probe_register_event_call() 2140 ret = trace_add_event_call(call); in trace_probe_register_event_call() [all …]
|
| A D | trace_output.c | 943 static void print_fields(struct trace_iterator *iter, struct trace_event_call *call, in print_fields() argument 1073 struct trace_event_call *call; in print_event_fields() local 1082 list_for_each_entry(call, &ftrace_events, list) { in print_event_fields() 1083 if (call->event.type == event->type) { in print_event_fields() 1088 if (call->event.type > __TRACE_LAST_TYPE) in print_event_fields() 1096 call = container_of(event, struct trace_event_call, event); in print_event_fields() 1098 head = trace_get_fields(call); in print_event_fields() 1100 trace_seq_printf(&iter->seq, "%s:", trace_event_name(call)); in print_event_fields() 1103 print_fields(iter, call, head); in print_event_fields()
|
| /kernel/ |
| A D | Makefile | 46 CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack) -fno-stack-protector 143 CFLAGS_kstack_erase.o += $(call cc-option,-mgeneral-regs-only) 155 $(call if_changed,gzip) 160 $(call filechk,cat) 170 $(call cmd_and_savecmd,kheaders_data) 171 $(call cmd,kheaders_data_dep) 176 $(call if_changed_rule,kheaders_data) 188 $(call filechk,kheaders_objlist) 197 $(call filechk,kheaders_srclist) 204 $(call filechk,kheaders_md5sum)
|
| A D | Kconfig.kexec | 21 bool "Enable kexec system call" 25 kexec is a system call that implements the ability to shutdown your 30 The name comes from the similarity to the exec system call. 39 bool "Enable kexec file based system call" 44 This is new version of kexec system call. This system call is 45 file based and takes file descriptors as system call argument 47 accepted by kexec system call.
|
| /kernel/bpf/preload/iterators/ |
| A D | Makefile | 43 $(call msg,CLEAN) 47 $(call msg,GEN-SKEL,$@) 51 $(call msg,BPF,$@) 58 $(call msg,MKDIR,$@)
|
| /kernel/kcsan/ |
| A D | Makefile | 10 CFLAGS_core.o := $(call cc-option,-fno-conserve-stack) \ 11 $(call cc-option,-mno-outline-atomics) \
|
| /kernel/trace/rv/monitors/sts/ |
| A D | Kconfig | 13 * each call to the scheduler has up to one switch 15 * each call to the scheduler disables interrupts to switch
|
| /kernel/rcu/ |
| A D | rcutorture.c | 413 call_rcu_func_t call; member 621 .call = call_rcu_hurry, 681 .call = call_rcu_busted, 880 .call = srcu_torture_call, 928 .call = srcu_torture_call, 954 .call = srcu_torture_call, 1049 .call = call_rcu_tasks, 1131 .call = call_rcu_tasks_trace, 2401 if (cur_ops->call) { in rcu_torture_timer() 3303 if (!cur_ops->call) in rcu_torture_fwd_prog_cr() [all …]
|
| /kernel/bpf/ |
| A D | Kconfig | 28 bool "Enable bpf() system call" 39 Enable the bpf() system call that allows to manipulate BPF programs
|