| /kernel/ |
| A D | auditsc.c | 1038 context->context = AUDIT_CTX_UNUSED; in audit_alloc_context() 1684 switch (context->context) { in audit_log_exit() 1690 context->arch, context->major); in audit_log_exit() 1756 context->fds[0], context->fds[1]); in audit_log_exit() 1806 if (context->context == AUDIT_CTX_SYSCALL) in audit_log_exit() 2018 if (context->context != AUDIT_CTX_UNUSED || context->name_count) { in __audit_syscall_entry() 2040 context->context = AUDIT_CTX_SYSCALL; in __audit_syscall_entry() 2060 if (!context || context->dummy || in __audit_syscall_exit() 2228 if (context->context == AUDIT_CTX_UNUSED) in __audit_getname() 2300 if (context->context == AUDIT_CTX_UNUSED) in __audit_inode() [all …]
|
| A D | audit.h | 109 } context; member 300 extern void audit_kill_trees(struct audit_context *context); 331 #define audit_kill_trees(context) BUG() argument
|
| A D | audit_tree.c | 525 static void audit_tree_log_remove_rule(struct audit_context *context, in audit_tree_log_remove_rule() argument 532 ab = audit_log_start(context, GFP_KERNEL, AUDIT_CONFIG_CHANGE); in audit_tree_log_remove_rule() 542 static void kill_rules(struct audit_context *context, struct audit_tree *tree) in kill_rules() argument 553 audit_tree_log_remove_rule(context, rule); in kill_rules() 977 void audit_kill_trees(struct audit_context *context) in audit_kill_trees() argument 979 struct list_head *list = &context->killed_trees; in audit_kill_trees() 988 kill_rules(context, victim); in audit_kill_trees()
|
| A D | audit.c | 1086 static void audit_log_common_recv_msg(struct audit_context *context, in audit_log_common_recv_msg() argument 1097 *ab = audit_log_start(context, GFP_KERNEL, msg_type); in audit_log_common_recv_msg() 1490 memcpy(sig_data->ctx, lsmctx.context, lsmctx.len); in audit_receive_msg() 2197 audit_log_format(ab, " subj=%s", ctx.context); in audit_log_task_context()
|
| A D | kprobes.c | 1862 static int kretprobe_init_inst(void *nod, void *context) in kretprobe_init_inst() argument 1866 ri->rph = context; in kretprobe_init_inst() 1869 static int kretprobe_fini_pool(struct objpool_head *head, void *context) in kretprobe_fini_pool() argument 1871 kfree(context); in kretprobe_fini_pool()
|
| A D | ptrace.c | 1299 tmp = mm->context.exec_fdpic_loadmap; in ptrace_request() 1302 tmp = mm->context.interp_fdpic_loadmap; in ptrace_request()
|
| /kernel/time/ |
| A D | Kconfig | 52 # and not from the timer interrupt context 155 tickless cputime accounting. The former case relies on context 159 bool "Force user context tracking" 164 support the user context tracking subsystem. But there are also 169 user context tracking backend but doesn't yet fulfill all the 172 for user context tracking and the subsystems that rely on it: RCU 175 dynticks subsystem by forcing the user context tracking on all 179 architecture backend for the user context tracking.
|
| /kernel/trace/ |
| A D | rethook.c | 71 static int rethook_init_node(void *nod, void *context) in rethook_init_node() argument 75 node->rethook = context; in rethook_init_node() 79 static int rethook_fini_pool(struct objpool_head *head, void *context) in rethook_fini_pool() argument 81 kfree(context); in rethook_fini_pool()
|
| A D | trace_entries.h | 450 __field( int, context ) 456 __entry->context,
|
| A D | trace_osnoise.c | 563 entry->context = sample->context; in __record_timerlat_sample() 1779 s.context = IRQ_CONTEXT; in timerlat_irq() 1898 s.context = THREAD_CONTEXT; in timerlat_main() 2503 s.context = THREAD_URET; in timerlat_fd_read() 2538 s.context = THREAD_CONTEXT; in timerlat_fd_read()
|
| A D | Kconfig | 163 # enabled by all tracers (context switch and event tracer) they select TRACING. 490 In the context of high-performance computing (HPC), the Operating 493 context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread 536 timer latency observed at the hardirq context before the 545 stacktrace at the IRQ context, which helps to identify the code 562 bool "Trace process context switches and events" 866 generally used outside of that context, and is normally
|
| A D | trace_events_user.c | 1642 int context; in user_event_perf() local 1645 ®s, &context); in user_event_perf() 1659 perf_trace_buf_submit(perf_entry, size, context, in user_event_perf() 1666 perf_swevent_put_recursion_context(context); in user_event_perf()
|
| A D | trace_events_hist.c | 3408 struct snapshot_context *context = cond_data; in cond_snapshot_update() local 3417 track_val = get_track_val(track_data->hist_data, context->elt, in cond_snapshot_update() 3424 memcpy(track_data->key, context->key, track_data->key_len); in cond_snapshot_update() 3426 elt_data = context->elt->private_data; in cond_snapshot_update() 3444 struct snapshot_context context; in save_track_data_snapshot() local 3446 context.elt = elt; in save_track_data_snapshot() 3447 context.key = key; in save_track_data_snapshot() 3449 tracing_snapshot_cond(file->tr, &context); in save_track_data_snapshot()
|
| A D | trace_output.c | 1608 timerlat_lat_context[field->context], in trace_timerlat_print() 1625 field->context, in trace_timerlat_raw()
|
| /kernel/events/ |
| A D | hw_breakpoint.c | 743 void *context, in register_user_hw_breakpoint() argument 747 context); in register_user_hw_breakpoint() 844 void *context) in register_wide_hw_breakpoint() argument 857 triggered, context); in register_wide_hw_breakpoint()
|
| A D | core.c | 12824 void *context, int cgroup_fd) in perf_event_alloc() argument 12908 context = parent_event->overflow_handler_context; in perf_event_alloc() 12921 event->overflow_handler_context = context; in perf_event_alloc() 13767 void *context) in perf_event_create_kernel_counter() argument 13788 overflow_handler, context, -1); in perf_event_create_kernel_counter()
|
| /kernel/trace/rv/monitors/sco/ |
| A D | Kconfig | 10 Monitor to ensure sched_set_state happens only in thread context.
|
| /kernel/trace/rv/monitors/snroc/ |
| A D | Kconfig | 10 Monitor to ensure sched_set_state happens only in the respective task's context.
|
| /kernel/debug/kdb/ |
| A D | kdb_cmds | 2 # These commands are executed in kdb_init() context, no SMP, no
|
| /kernel/rcu/ |
| A D | Kconfig | 97 that uses only voluntary context switch (not preemption!), 117 that uses only context switch (including preemption) and 119 context switches on all online CPUs, including idle ones, 262 workloads will incur significant increases in context-switch
|
| A D | Kconfig.debug | 62 reader state for each segment against the actual context.
|
| /kernel/irq/ |
| A D | Kconfig | 30 # Support for delayed migration from interrupt context
|