| /kernel/bpf/ |
| A D | tnum.c | 39 return TNUM(a.value << shift, a.mask << shift); in tnum_lshift() 44 return TNUM(a.value >> shift, a.mask >> shift); in tnum_rshift() 95 alpha = a.value | a.mask; in tnum_and() 132 while (a.value || a.mask) { in tnum_mul() 134 if (a.value & 1) in tnum_mul() 140 a = tnum_rshift(a, 1); in tnum_mul() 162 return a; in tnum_cast() 169 return !((a.value | a.mask) & (size - 1)); in tnum_is_aligned() 186 if (a.mask & 1) in tnum_sbin() 193 a.mask >>= 1; in tnum_sbin() [all …]
|
| /kernel/trace/ |
| A D | tracing_map.c | 146 return (a > b) ? 1 : ((a < b) ? -1 : 0); in tracing_map_cmp_atomic64() 155 return (a > b) ? 1 : ((a < b) ? -1 : 0); \ 302 if (!a) in tracing_map_array_free() 318 kfree(a); in tracing_map_array_free() 327 a = kzalloc(sizeof(*a), GFP_KERNEL); in tracing_map_array_alloc() 328 if (!a) in tracing_map_array_alloc() 333 a->n_pages = n_elts / a->entries_per_page; in tracing_map_array_alloc() 336 a->entry_shift = fls(a->entries_per_page) - 1; in tracing_map_array_alloc() 337 a->entry_mask = (1 << a->entry_shift) - 1; in tracing_map_array_alloc() 350 return a; in tracing_map_array_alloc() [all …]
|
| A D | Kconfig | 236 by using a compiler feature to insert a small, 5-byte No-Operation 354 When a 1 is echoed into this file profiling begins, and when a 460 if a system is reliable for Real Time tasks. 579 bool "Create a snapshot trace buffer" 593 Allow doing a snapshot of a single CPU buffer instead of a 736 kernel function entry or a tracepoint. 808 address into a string. 1014 that triggered a recursion. 1040 also has a noticeable overhead when enabled. 1074 bool "Perform a startup test on ftrace" [all …]
|
| A D | trace_events_filter_test.h | 12 TP_PROTO(int a, int b, int c, int d, int e, int f, int g, int h), 14 TP_ARGS(a, b, c, d, e, f, g, h), 17 __field(int, a) 28 __entry->a = a; 39 __entry->a, __entry->b, __entry->c, __entry->d,
|
| A D | trace_branch.c | 331 const struct ftrace_branch_data *a = p1; in annotated_branch_stat_cmp() local 336 percent_a = get_incorrect_percent(a); in annotated_branch_stat_cmp() 344 if (a->incorrect < b->incorrect) in annotated_branch_stat_cmp() 346 if (a->incorrect > b->incorrect) in annotated_branch_stat_cmp() 354 if (a->correct > b->correct) in annotated_branch_stat_cmp() 356 if (a->correct < b->correct) in annotated_branch_stat_cmp()
|
| A D | trace_probe.c | 21 #define C(a, b) b argument 2202 if (a->nr_args < b->nr_args) in trace_probe_compare_arg_type() 2203 return a->nr_args + 1; in trace_probe_compare_arg_type() 2204 if (a->nr_args > b->nr_args) in trace_probe_compare_arg_type() 2207 for (i = 0; i < a->nr_args; i++) { in trace_probe_compare_arg_type() 2260 struct probe_arg *a = args + i; in trace_probe_print_args() local 2263 if (likely(!a->count)) { in trace_probe_print_args() 2264 if (!a->type->print(s, data + a->offset, field)) in trace_probe_print_args() 2269 p = data + a->offset; in trace_probe_print_args() 2270 for (j = 0; j < a->count; j++) { in trace_probe_print_args() [all …]
|
| /kernel/ |
| A D | Kconfig.preempt | 31 Select this option if you are building a kernel for a server or 48 This allows reaction to interactive events by allowing a 50 is in kernel mode executing a system call. This allows 54 Select this if you are building a kernel for a desktop system. 62 all kernel code (that is not executing in a critical section) 65 even if it is in kernel mode executing a system call and would 66 otherwise not be about to reach a natural preemption point. 69 and a slight runtime overhead to kernel code. 71 Select this if you are building a kernel for a desktop or 81 This option provides a scheduler driven preemption model that [all …]
|
| A D | Kconfig.kexec | 25 kexec is a system call that implements the ability to shutdown your 26 current kernel, and to start another kernel. It is like a reboot 27 but it is independent of the system firmware. And like a reboot 32 It is an ongoing process to be certain the hardware in a machine 57 there's a signature that we can check, then it must be valid. 64 bool "Require a valid signature in kexec_file_load() syscall" 122 a specially reserved region and then later executed after 123 a crash by kdump/kexec. The crash dump kernel must be compiled 124 to a memory address not used by the main kernel or BIOS using 125 PHYSICAL_START, or it must be built as a relocatable image [all …]
|
| A D | Kconfig.hz | 13 a fast response for user interaction and that may experience bus 14 contention and cacheline bounces as a result of timer interrupts. 23 100 Hz is a typical choice for servers, SMP and NUMA systems 30 250 Hz is a good compromise choice allowing server performance 38 300 Hz is a good compromise choice allowing server performance
|
| A D | auditfilter.c | 710 if (a->flags != b->flags || in audit_compare_rule() 711 a->pflags != b->pflags || in audit_compare_rule() 712 a->listnr != b->listnr || in audit_compare_rule() 713 a->action != b->action || in audit_compare_rule() 714 a->field_count != b->field_count) in audit_compare_rule() 717 for (i = 0; i < a->field_count; i++) { in audit_compare_rule() 719 a->fields[i].op != b->fields[i].op) in audit_compare_rule() 722 switch (a->fields[i].type) { in audit_compare_rule() 742 if (strcmp(audit_tree_path(a->tree), in audit_compare_rule() 753 if (strcmp(audit_mark_path(a->exe), in audit_compare_rule() [all …]
|
| A D | static_call_inline.c | 78 const struct static_call_site *a = _a; in static_call_site_cmp() local 80 const struct static_call_key *key_a = static_call_key(a); in static_call_site_cmp() 95 struct static_call_site *a = _a; in static_call_site_swap() local 97 struct static_call_site tmp = *a; in static_call_site_swap() 99 a->addr = b->addr - delta; in static_call_site_swap() 100 a->key = b->key - delta; in static_call_site_swap()
|
| A D | cred.c | 493 int cred_fscmp(const struct cred *a, const struct cred *b) in cred_fscmp() argument 498 if (a == b) in cred_fscmp() 500 if (uid_lt(a->fsuid, b->fsuid)) in cred_fscmp() 502 if (uid_gt(a->fsuid, b->fsuid)) in cred_fscmp() 505 if (gid_lt(a->fsgid, b->fsgid)) in cred_fscmp() 507 if (gid_gt(a->fsgid, b->fsgid)) in cred_fscmp() 510 ga = a->group_info; in cred_fscmp()
|
| A D | sys.c | 90 # define SET_FPEMU_CTL(a, b) (-EINVAL) argument 93 # define GET_FPEMU_CTL(a, b) (-EINVAL) argument 96 # define SET_FPEXC_CTL(a, b) (-EINVAL) argument 102 # define GET_ENDIAN(a, b) (-EINVAL) argument 105 # define SET_ENDIAN(a, b) (-EINVAL) argument 108 # define GET_TSC_CTL(a) (-EINVAL) argument 111 # define SET_TSC_CTL(a) (-EINVAL) argument 114 # define GET_FP_MODE(a) (-EINVAL) argument 117 # define SET_FP_MODE(a,b) (-EINVAL) argument 120 # define SVE_SET_VL(a) (-EINVAL) argument [all …]
|
| /kernel/power/ |
| A D | Kconfig | 30 user-space before invoking suspend. There's a run-time switch 79 will get corrupted in a nasty way. 164 Allow the kernel to trigger a system transition into a global sleep 192 objects with the help of a sysfs-based interface. 211 wake-up event or a driver's request. 253 Sets up a watchdog timer to capture drivers that are 254 locked up attempting to suspend/resume a device. 272 different then a non-fatal warning (with a stack trace of 293 <asm/resume-trace.h> header with a TRACE_RESUME() macro. 296 dependent, x86 will print the information during a [all …]
|
| /kernel/module/ |
| A D | Kconfig | 69 loaded before trying to load a module there is a small time window in 90 users of request_module() do want a proper return value. If a call 130 is usually a really bad idea. 155 This option allows you to maintain a record of each unloaded 156 module that tainted the kernel. In addition to displaying a 244 field inserted into their modinfo section, which contains a 246 see exactly which source was used to build a module (since 248 the version). With this option, such a "srcversion" field 351 might have a limited selection of the supported types. 412 a namespace. A module that makes use of a symbol exported with such a [all …]
|
| /kernel/rcu/ |
| A D | refscale.c | 642 unsigned int a; member 663 WRITE_ONCE(rtsp->a, rtsp->a + 1); in typesafe_ref_release() 702 unsigned int a; in typesafe_delay_section() local 716 a = READ_ONCE(rtsp->a); in typesafe_delay_section() 721 if (a != READ_ONCE(rtsp->a)) { in typesafe_delay_section() 727 b = READ_ONCE(rtsp->a); in typesafe_delay_section() 733 WARN_ONCE(a != b, "Re-read of ->a changed from %u to %u.\n", a, b); in typesafe_delay_section() 736 WARN_ON_ONCE(a * a != b); in typesafe_delay_section() 758 WRITE_ONCE(rtsp->a, rtsp->a + 1); in typesafe_alloc_one() 759 WRITE_ONCE(rtsp->b, rtsp->a * rtsp->a); in typesafe_alloc_one() [all …]
|
| A D | Kconfig.debug | 18 need to be converted to pass a lockdep expression. To prevent 32 This option provides a kernel module that runs performance 38 Say M if you want the RCU performance tests to build as a module. 47 This option provides a kernel module that runs torture tests 53 Say M if you want the RCU torture tests to build as a module. 107 This option provides a kernel module that runs performance tests 113 Say M if you want to build it as a module instead. 122 If a given RCU grace period extends more than the specified 123 number of seconds, a CPU stall warning is printed. If the 133 If a given expedited RCU grace period extends more than the [all …]
|
| A D | Kconfig | 96 This option force-enables a task-based RCU implementation 116 This option force-enables a task-based RCU implementation 133 This option enables a task-based RCU implementation that uses 173 Select a specific number if testing RCU itself. 198 fanout to a large number will likely cause problematic 202 Select a specific number if testing RCU itself. 231 a given grace period before priority-boosting preempted RCU 238 bool "Perform RCU expedited work in a real-time kthread" 267 For each such CPU, a kthread ("rcuox/N") will be created to 282 sqrt(nr_cpu_ids) can be a bit of a blunt instrument. [all …]
|
| /kernel/time/ |
| A D | timeconst.bc | 5 define gcd(a,b) { 9 b = a % b; 10 a = t; 12 return a; 20 /* Adjustment factor when a ceiling value is used. Use as: 29 /* Compute the appropriate mul/adj values as well as a shift count, 31 a shift value will be correct in the signed integer range and off
|
| A D | Kconfig | 28 # Architecture can handle broadcast in a driver-agnostic way 105 This option keeps the tick running periodically at a constant 112 This option enables a tickless idle system: timer interrupts 133 the CPU is running tasks. Typically this requires running a single 145 If you're a distro say Y. 188 We keep it around for a little while to enforce backward 207 The default is based on a half-second clocksource watchdog
|
| /kernel/trace/rv/ |
| A D | Kconfig | 30 Enable the kernel runtime verification infrastructure. RV is a 34 actual execution, comparing it against a formal specification of 76 monitor can cause a reaction to the detection of an exception 86 Enables the printk reactor. The printk reactor emits a printk() 94 Enables the panic reactor. The panic reactor emits a printk()
|
| /kernel/kcsan/ |
| A D | .kunitconfig | 3 # option to configure a machine with several cores. For example: 11 # Need some level of concurrency to test a concurrency sanitizer. 22 # (or alter) this, in conjunction with setting a different test timeout with,
|
| /kernel/dma/ |
| A D | Kconfig | 100 may have to specify a smaller size of the initial pool using 118 This enables support for restricted DMA pools which provide a level of 129 # The only thing that is really required is a way to set an uncached bit 207 Allocator as a percentage of the total memory in the system. 236 size. This works well for buffers up to a few hundreds kilobytes, but 237 for larger buffers it just a memory waste. With this parameter you can 240 expressed as a power of two multiplied by the PAGE_SIZE. 258 This option causes a performance degradation. Use only if you want to
|
| /kernel/gcov/ |
| A D | Kconfig | 18 directories, add a line similar to the following to the respective 21 For a single file (e.g. main.o): 51 Note that a kernel compiled with profiling flags will be significantly
|
| /kernel/locking/ |
| A D | ww_mutex.h | 228 __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) in __ww_ctx_less() argument 237 int a_prio = a->task->prio; in __ww_ctx_less() 252 a->task->dl.deadline)) in __ww_ctx_less() 255 if (dl_time_before(a->task->dl.deadline, in __ww_ctx_less() 265 return (signed long)(a->stamp - b->stamp) > 0; in __ww_ctx_less()
|