| /kernel/trace/ |
| A D | trace_probe_tmpl.h | 11 *(u8 *)buf = (u8)val; in fetch_store_raw() 14 *(u16 *)buf = (u16)val; in fetch_store_raw() 17 *(u32 *)buf = (u32)val; in fetch_store_raw() 21 *(u64 *)buf = (u64)val; in fetch_store_raw() 107 *val = code->immediate; in process_common_fetch_insn() 135 lval = val; in process_fetch_insn_bottom() 136 ret = probe_mem_read(&val, (void *)val + code->offset, in process_fetch_insn_bottom() 137 sizeof(val)); in process_fetch_insn_bottom() 139 lval = val; in process_fetch_insn_bottom() 141 (void *)val + code->offset, sizeof(val)); in process_fetch_insn_bottom() [all …]
|
| A D | fgraph.c | 227 return val & FGRAPH_FRAME_OFFSET_MASK; in __get_offset() 282 offset -= __get_data_size(val); in get_data_type_data() 311 gvals[idx] = val; in ret_stack_set_task_var() 348 unsigned long val; in fgraph_reserve_data() local 366 val = make_data_type_val(idx, data_size, __get_offset(val) + data_size + 1); in fgraph_reserve_data() 468 unsigned long val; in fgraph_retrieve_parent_data() local 489 if (__get_data_index(val) == idx) in fgraph_retrieve_parent_data() 491 offset -= __get_data_size(val) + 1; in fgraph_retrieve_parent_data() 567 unsigned long val; in ftrace_push_return_trace() local 601 current->ret_stack[offset] = val; in ftrace_push_return_trace() [all …]
|
| A D | trace_eprobe.c | 301 unsigned long val; in get_event_field() local 315 val = (unsigned long)addr; in get_event_field() 324 return val; in get_event_field() 330 val = *(char *)addr; in get_event_field() 336 val = *(short *)addr; in get_event_field() 342 val = *(int *)addr; in get_event_field() 349 val = *(long *)addr; in get_event_field() 355 val = (unsigned long)addr; in get_event_field() 358 return val; in get_event_field() 370 unsigned long val; in get_eprobe_size() local [all …]
|
| A D | trace_events_inject.c | 43 u64 val; in parse_field() local 95 ret = kstrtoll(num, 0, &val); in parse_field() 97 ret = kstrtoull(num, 0, &val); in parse_field() 102 *pv = val; in parse_field() 201 u64 val = 0; in parse_entry() local 212 while ((len = parse_field(str, call, &field, &val)) > 0) { in parse_entry() 217 char *addr = (char *)(unsigned long) val; in parse_entry() 249 u8 tmp = (u8) val; in parse_entry() 255 u16 tmp = (u16) val; in parse_entry() 261 u32 tmp = (u32) val; in parse_entry() [all …]
|
| A D | trace_sched_switch.c | 209 orig_size = sizeof(*s) + val * SAVED_CMDLINE_MAP_ELEMENT_SIZE(s); in allocate_cmdlines_buffer() 221 val = (size - sizeof(*s)) / SAVED_CMDLINE_MAP_ELEMENT_SIZE(s); in allocate_cmdlines_buffer() 222 s->cmdline_num = val; in allocate_cmdlines_buffer() 225 s->map_cmdline_to_pid = (unsigned *)&s->saved_cmdlines[val * TASK_COMM_LEN]; in allocate_cmdlines_buffer() 231 val * sizeof(*s->map_cmdline_to_pid)); in allocate_cmdlines_buffer() 618 static int tracing_resize_saved_cmdlines(unsigned int val) in tracing_resize_saved_cmdlines() argument 622 s = allocate_cmdlines_buffer(val); in tracing_resize_saved_cmdlines() 641 unsigned long val; in tracing_saved_cmdlines_size_write() local 644 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); in tracing_saved_cmdlines_size_write() 649 if (!val || val > PID_MAX_DEFAULT) in tracing_saved_cmdlines_size_write() [all …]
|
| A D | trace_branch.c | 31 probe_likely_condition(struct ftrace_likely_data *f, int val, int expect) in probe_likely_condition() argument 78 entry->correct = val == expect; in probe_likely_condition() 88 void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect) in trace_likely_condition() argument 93 probe_likely_condition(f, val, expect); in trace_likely_condition() 194 void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect) in trace_likely_condition() argument 199 void ftrace_likely_update(struct ftrace_likely_data *f, int val, in ftrace_likely_update() argument 207 val = expect; in ftrace_likely_update() 215 trace_likely_condition(f, val, expect); in ftrace_likely_update() 218 if (val == expect) in ftrace_likely_update()
|
| A D | trace_events_synth.c | 563 u64 val = var_ref_vals[val_idx]; in trace_event_raw_event_synth() local 567 entry->fields[n_u64].as_u8 = (u8)val; in trace_event_raw_event_synth() 579 entry->fields[n_u64].as_u64 = val; in trace_event_raw_event_synth() 1754 u64 val = va_arg(args, u64); in synth_event_trace() local 1758 char *str_val = (char *)(long)val; in synth_event_trace() 1780 u64 val; in synth_event_trace() local 1782 val = va_arg(args, u64); in synth_event_trace() 1785 char *str_val = (char *)(long)val; in synth_event_trace() 1885 u64 val = vals[i]; in synth_event_trace_array() local 2018 char *str_val = (char *)(long)val; in __synth_event_add_val() [all …]
|
| /kernel/locking/ |
| A D | qspinlock.c | 150 if (val == _Q_PENDING_VAL) { in queued_spin_lock_slowpath() 152 val = atomic_cond_read_relaxed(&lock->val, in queued_spin_lock_slowpath() 159 if (val & ~_Q_LOCKED_MASK) in queued_spin_lock_slowpath() 167 val = queued_fetch_set_pending_acquire(lock); in queued_spin_lock_slowpath() 176 if (unlikely(val & ~_Q_LOCKED_MASK)) { in queued_spin_lock_slowpath() 179 if (!(val & _Q_PENDING_MASK)) in queued_spin_lock_slowpath() 196 if (val & _Q_LOCKED_MASK) in queued_spin_lock_slowpath() 325 if ((val = pv_wait_head_or_lock(lock, node))) in queued_spin_lock_slowpath() 328 val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK)); in queued_spin_lock_slowpath() 352 if ((val & _Q_TAIL_MASK) == tail) { in queued_spin_lock_slowpath() [all …]
|
| A D | qspinlock_paravirt.h | 88 int val = atomic_read(&lock->val); in pv_hybrid_queued_unfair_trylock() local 91 if (!(val & _Q_LOCKED_PENDING_MASK) && in pv_hybrid_queued_unfair_trylock() 96 if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK)) in pv_hybrid_queued_unfair_trylock() 130 atomic_or(_Q_PENDING_VAL, &lock->val); in set_pending() 137 old = atomic_read(&lock->val); in trylock_clear_pending() 145 } while (!atomic_try_cmpxchg_acquire (&lock->val, &old, new)); in trylock_clear_pending() 481 return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL); in pv_wait_head_or_lock() 506 (unsigned long)lock, atomic_read(&lock->val)); in __pv_queued_spin_unlock_slowpath()
|
| A D | qspinlock.h | 134 atomic_andnot(_Q_PENDING_VAL, &lock->val); in clear_pending() 145 atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val); in clear_pending_set_locked() 162 old = atomic_read(&lock->val); in xchg_tail() 170 } while (!atomic_try_cmpxchg_relaxed(&lock->val, &old, new)); in xchg_tail() 186 return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val); in queued_fetch_set_pending_acquire()
|
| /kernel/ |
| A D | params.c | 43 char val[]; member 60 return p->val; in kmalloc_parameter() 70 if (p->val == param) { in maybe_kfree_parameter() 118 char *val, in parse_one() argument 136 if (!val && in parse_one() 250 if (!val) in param_set_uint_minmax() 313 if (!val) val = "1"; in param_set_bool() 418 const char *val, in param_array() argument 446 save = val[len]; in param_array() 449 ret = set(val, &kp); in param_array() [all …]
|
| A D | sysctl.c | 373 if (val < 0) { in do_proc_dointvec_conv() 640 int res, val; in proc_dobool() local 648 tmp.data = &val; in proc_dobool() 875 tmp.data = &val; in proc_dou8vec_minmax() 935 val = convmul * val / convdiv; in __do_proc_doulongvec_minmax() 936 if ((min && val < *min) || (max && val > *max)) { in __do_proc_doulongvec_minmax() 1030 if (val < 0) { in do_proc_dointvec_jiffies_conv() 1053 if (val < 0) { in do_proc_dointvec_userhz_jiffies_conv() 1078 if (val < 0) { in do_proc_dointvec_ms_jiffies_conv() 1432 int val, ret; in proc_do_static_key() local [all …]
|
| A D | notifier.c | 66 unsigned long val, void *v, in notifier_call_chain() argument 85 ret = nb->notifier_call(nb, val, v); in notifier_call_chain() 218 unsigned long val, void *v) in atomic_notifier_call_chain() argument 223 ret = notifier_call_chain(&nh->head, val, v, -1, NULL); in atomic_notifier_call_chain() 369 unsigned long val, void *v) in blocking_notifier_call_chain() argument 380 ret = notifier_call_chain(&nh->head, val, v, -1, NULL); in blocking_notifier_call_chain() 451 unsigned long val, void *v) in raw_notifier_call_chain() argument 453 return notifier_call_chain(&nh->head, val, v, -1, NULL); in raw_notifier_call_chain() 540 unsigned long val, void *v) in srcu_notifier_call_chain() argument 546 ret = notifier_call_chain(&nh->head, val, v, -1, NULL); in srcu_notifier_call_chain() [all …]
|
| A D | reboot.c | 1196 const char *val; in mode_show() local 1200 val = REBOOT_COLD_STR; in mode_show() 1203 val = REBOOT_WARM_STR; in mode_show() 1206 val = REBOOT_HARD_STR; in mode_show() 1209 val = REBOOT_SOFT_STR; in mode_show() 1270 const char *val; in type_show() local 1277 val = BOOT_KBD_STR; in type_show() 1280 val = BOOT_BIOS_STR; in type_show() 1283 val = BOOT_ACPI_STR; in type_show() 1286 val = BOOT_EFI_STR; in type_show() [all …]
|
| A D | capability.c | 164 kdata[0].effective = pE.val; kdata[1].effective = pE.val >> 32; in SYSCALL_DEFINE2() 165 kdata[0].permitted = pP.val; kdata[1].permitted = pP.val >> 32; in SYSCALL_DEFINE2() 166 kdata[0].inheritable = pI.val; kdata[1].inheritable = pI.val >> 32; in SYSCALL_DEFINE2()
|
| A D | fail_function.c | 107 static int fei_retval_set(void *data, u64 val) in fei_retval_set() argument 110 unsigned long retv = (unsigned long)val; in fei_retval_set() 126 val) != retv) in fei_retval_set() 130 attr->retval = val; in fei_retval_set() 137 static int fei_retval_get(void *data, u64 *val) in fei_retval_get() argument 147 *val = attr->retval; in fei_retval_get()
|
| /kernel/bpf/ |
| A D | rqspinlock.c | 278 int val, ret = 0; in resilient_tas_spin_lock() local 290 val = atomic_read(&lock->val); in resilient_tas_spin_lock() 292 if (val || !atomic_try_cmpxchg(&lock->val, &val, 1)) { in resilient_tas_spin_lock() 368 if (val == _Q_PENDING_VAL) { in resilient_queued_spin_lock_slowpath() 370 val = atomic_cond_read_relaxed(&lock->val, in resilient_queued_spin_lock_slowpath() 377 if (val & ~_Q_LOCKED_MASK) in resilient_queued_spin_lock_slowpath() 397 if (!(val & _Q_PENDING_MASK)) in resilient_queued_spin_lock_slowpath() 419 if (val & _Q_LOCKED_MASK) { in resilient_queued_spin_lock_slowpath() 534 int val; in resilient_queued_spin_lock_slowpath() local 575 val = res_atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK) || in resilient_queued_spin_lock_slowpath() [all …]
|
| A D | devmap.c | 71 struct bpf_devmap_val val; member 799 return obj ? &obj->val : NULL; in dev_map_lookup_elem() 806 return obj ? &obj->val : NULL; in dev_map_hash_lookup_elem() 861 struct bpf_devmap_val *val, in __dev_map_alloc_node() argument 877 if (val->bpf_prog.fd > 0) { in __dev_map_alloc_node() 893 dev->val.bpf_prog.id = 0; in __dev_map_alloc_node() 895 dev->val.ifindex = val->ifindex; in __dev_map_alloc_node() 912 struct bpf_devmap_val val = {}; in __dev_map_update_elem() local 925 if (!val.ifindex) { in __dev_map_update_elem() 928 if (val.bpf_prog.fd > 0) in __dev_map_update_elem() [all …]
|
| /kernel/cgroup/ |
| A D | cpuset-v1.c | 68 fmp->val = 0; in fmeter_init() 87 fmp->val = (FM_COEF * fmp->val) / FM_SCALE; in fmeter_update() 106 int val; in fmeter_getrate() local 110 val = fmp->val; in fmeter_getrate() 112 return val; in fmeter_getrate() 151 if (val < -1 || val > sched_domain_level_max + 1) in update_relax_domain_level() 155 if (val != cs->relax_domain_level) { in update_relax_domain_level() 156 cs->relax_domain_level = val; in update_relax_domain_level() 166 s64 val) in cpuset_write_s64() argument 451 u64 val) in cpuset_write_u64() argument [all …]
|
| /kernel/power/ |
| A D | main.c | 133 unsigned long val; in pm_async_store() local 138 if (val > 1) in pm_async_store() 239 unsigned long val; in sync_on_suspend_store() local 244 if (val > 1) in sync_on_suspend_store() 594 unsigned long val; in pm_print_times_store() local 599 if (val > 1) in pm_print_times_store() 648 if (val > 1) in pm_debug_messages_store() 797 unsigned int val; in wakeup_count_show() local 807 unsigned int val; in wakeup_count_store() local 928 int val; in pm_trace_store() local [all …]
|
| /kernel/futex/ |
| A D | syscalls.c | 84 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, in do_futex() argument 102 return futex_wait(uaddr, flags, val, timeout, val3); in do_futex() 107 return futex_wake(uaddr, flags, val, val3); in do_futex() 113 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); in do_futex() 125 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3, in do_futex() 160 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, in SYSCALL_DEFINE6() argument 213 if (!futex_validate_input(flags, aux.val)) in futex_parse_waitv() 217 futexv[i].w.val = aux.val; in futex_parse_waitv() 372 unsigned long, val, in SYSCALL_DEFINE6() argument 388 if (!futex_validate_input(flags, val) || in SYSCALL_DEFINE6() [all …]
|
| A D | waitwake.c | 446 u32 val = vs[i].w.val; in futex_wait_multiple_setup() local 454 if (!ret && uval == val) { in futex_wait_multiple_setup() 492 if (uval != val) in futex_wait_multiple_setup() 591 int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, in futex_wait_setup() argument 642 if (uval != val) { in futex_wait_setup() 666 int __futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, in __futex_wait() argument 682 ret = futex_wait_setup(uaddr, val, flags, &q, NULL, current); in __futex_wait() 706 int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, ktime_t *abs_time, u32 bitset) in futex_wait() argument 715 ret = __futex_wait(uaddr, flags, val, to, bitset); in futex_wait() 727 restart->futex.val = val; in futex_wait() [all …]
|
| A D | futex.h | 110 static inline bool futex_validate_input(unsigned int flags, u64 val) in futex_validate_input() argument 114 if (bits < 64 && (val >> bits)) in futex_validate_input() 263 extern int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, 303 u32 val; in futex_get_value() local 309 unsafe_get_user(val, from, Efault); in futex_get_value() 311 *dest = val; in futex_get_value() 318 static __always_inline int futex_put_value(u32 val, u32 __user *to) in futex_put_value() argument 324 unsafe_put_user(val, to, Efault); in futex_put_value() 454 val, ktime_t *abs_time, u32 bitset, u32 __user 462 extern int __futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, [all …]
|
| /kernel/debug/kdb/ |
| A D | kdb_main.c | 293 unsigned long val; in kdbgetintenv() local 298 *value = (int) val; in kdbgetintenv() 2276 unsigned long val; in kdb_pid() local 2380 memset(val, 0, sizeof(*val)); in kdb_sysinfo() 2386 si_meminfo(val); in kdb_sysinfo() 2411 kdb_sysinfo(&val); in kdb_summary() 2418 kdb_printf("%02ld:%02ld\n", val.uptime/(60*60), (val.uptime/60)%60); in kdb_summary() 2421 LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]), in kdb_summary() 2422 LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]), in kdb_summary() 2423 LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2])); in kdb_summary() [all …]
|
| /kernel/sched/ |
| A D | cpuacct.c | 194 u64 val) in cpuusage_write() argument 202 if (val) in cpuusage_write() 266 u64 val[CPUACCT_STAT_NSTATS]; in cpuacct_stats_show() local 284 &val[CPUACCT_STAT_USER], &val[CPUACCT_STAT_SYSTEM]); in cpuacct_stats_show() 288 nsec_to_clock_t(val[stat])); in cpuacct_stats_show() 352 void cpuacct_account_field(struct task_struct *tsk, int index, u64 val) in cpuacct_account_field() argument 357 __this_cpu_add(ca->cpustat->cpustat[index], val); in cpuacct_account_field()
|