| /kernel/sched/ |
| A D | cputime.c | 83 static u64 irqtime_tick_accounted(u64 maxtime) in irqtime_tick_accounted() 86 u64 delta; in irqtime_tick_accounted() 96 static u64 irqtime_tick_accounted(u64 dummy) in irqtime_tick_accounted() 258 u64 steal; in steal_account_process_time() 275 static inline u64 account_other_time(u64 max) in account_other_time() 297 u64 ns; in read_sum_exec_runtime() 445 u64 *ut, u64 *st) in cputime_adjust() 547 u64 *ut, u64 *st) in cputime_adjust() 660 u64 other; in get_vtime_delta() 822 u64 gtime; in task_gtime() [all …]
|
| A D | clock.c | 92 u64 tick_raw; 93 u64 tick_gtod; 94 u64 clock; 250 static __always_inline u64 wrap_min(u64 x, u64 y) in wrap_min() 255 static __always_inline u64 wrap_max(u64 x, u64 y) in wrap_max() 301 u64 clock; in local_clock_noinstr() 314 u64 local_clock(void) in local_clock() 316 u64 now; in local_clock() 327 u64 this_clock, remote_clock; in sched_clock_remote() 328 u64 *ptr, old_val, val; in sched_clock_remote() [all …]
|
| A D | pelt.h | 10 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq); 11 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); 12 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); 16 int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity); 18 static inline u64 hw_load_avg(struct rq *rq) in hw_load_avg() 24 update_hw_load_avg(u64 now, struct rq *rq, u64 capacity) in update_hw_load_avg() 29 static inline u64 hw_load_avg(struct rq *rq) in hw_load_avg() 36 int update_irq_load_avg(struct rq *rq, u64 running); 39 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() 69 static inline u64 rq_clock_pelt(struct rq *rq) in rq_clock_pelt() [all …]
|
| A D | pelt.c | 32 static u64 decay_load(u64 val, u64 n) in decay_load() 65 c1 = decay_load((u64)d1, periods); in __accumulate_pelt_segments() 103 accumulate_sum(u64 delta, struct sched_avg *sa, in accumulate_sum() 107 u64 periods; in accumulate_sum() 119 sa->util_sum = decay_load((u64)(sa->util_sum), periods); in accumulate_sum() 181 ___update_load_sum(u64 now, struct sched_avg *sa, in ___update_load_sum() 184 u64 delta; in ___update_load_sum() 321 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) in __update_load_avg_cfs_rq() 404 int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity) in update_hw_load_avg() 431 int update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() [all …]
|
| A D | cpuacct.c | 29 u64 __percpu *cpuusage; 49 static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage); 68 ca->cpuusage = alloc_percpu(u64); in cpuacct_css_alloc() 99 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_read() 100 u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; in cpuacct_cpuusage_read() 101 u64 data; in cpuacct_cpuusage_read() 139 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_write() 167 u64 totalcpuusage = 0; in __cpuusage_read() 194 u64 val) in cpuusage_write() 215 u64 percpu; in __cpuacct_percpu_seq_show() [all …]
|
| A D | sched.h | 238 static inline void update_avg(u64 *avg, u64 sample) in update_avg() 347 u64 bw; 395 u64 runtime, u64 period, bool init); 423 u64 quota; 425 u64 burst; 846 u64 curr; 847 u64 next; 882 u64 max_bw; 1158 u64 clock; 2667 extern unsigned long to_ratio(u64 period, u64 runtime); [all …]
|
| /kernel/time/ |
| A D | timecounter.c | 10 u64 start_tstamp) in timecounter_init() 31 static u64 timecounter_read_delta(struct timecounter *tc) in timecounter_read_delta() 33 u64 cycle_now, cycle_delta; in timecounter_read_delta() 34 u64 ns_offset; in timecounter_read_delta() 52 u64 timecounter_read(struct timecounter *tc) in timecounter_read() 54 u64 nsec; in timecounter_read() 70 u64 cycles, u64 mask, u64 frac) in cc_cyc2ns_backwards() 72 u64 ns = (u64) cycles; in cc_cyc2ns_backwards() 79 u64 timecounter_cyc2time(const struct timecounter *tc, in timecounter_cyc2time() 80 u64 cycle_tstamp) in timecounter_cyc2time() [all …]
|
| A D | posix-cpu-timers.c | 122 static u64 bump_cpu_timer(struct k_itimer *timer, u64 now) in bump_cpu_timer() 214 static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime) in store_samples() 362 u64 t; in posix_cpu_clock_get() 781 u64 now; in posix_cpu_timer_get() 808 u64 expires; in collect_timerqueue() 916 u64 *expires, u64 cur_time, int signo) in check_cpu_itimer() 986 u64 softns = (u64)soft * NSEC_PER_SEC; in check_process_timers() 987 u64 hardns = (u64)hard * NSEC_PER_SEC; in check_process_timers() 1021 u64 now; in posix_cpu_timer_rearm() 1439 u64 *newval, u64 *oldval) in set_process_cpu_timer() [all …]
|
| A D | timekeeping.c | 440 u64 now; in __ktime_get_fast_ns() 791 u64 nsecs; in ktime_get_real_ts64() 813 u64 nsecs; in ktime_get() 856 u64 nsecs; in ktime_get_with_offset() 877 u64 nsecs; in ktime_get_coarse_with_offset() 928 u64 nsecs; in ktime_get_raw() 954 u64 nsec; in ktime_get_ts64() 1049 u64 now; in ktime_get_snapshot() 1076 static int scale64_check_overflow(u64 mult, u64 div, u64 *base) in scale64_check_overflow() 1176 static bool timestamp_in_interval(u64 start, u64 end, u64 ts) in timestamp_in_interval() [all …]
|
| A D | timer_migration.h | 65 u64 next_expiry; 105 u64 wakeup; 137 extern u64 tmigr_cpu_deactivate(u64 nextevt); 138 extern u64 tmigr_cpu_new_timer(u64 nextevt); 139 extern u64 tmigr_quick_check(u64 nextevt);
|
| A D | timekeeping_internal.h | 33 static inline u64 clocksource_delta(u64 now, u64 last, u64 mask, u64 max_delta) in clocksource_delta() 35 u64 ret = (now - last) & mask; in clocksource_delta()
|
| A D | jiffies.c | 16 static u64 jiffies_read(struct clocksource *cs) in jiffies_read() 18 return (u64) jiffies; in jiffies_read() 48 u64 get_jiffies_64(void) in get_jiffies_64() 51 u64 ret; in get_jiffies_64() 80 u64 nsec_per_tick, shift_hz; in register_refined_jiffies() 90 shift_hz = (u64)cycles_per_second << 8; in register_refined_jiffies() 94 nsec_per_tick = (u64)NSEC_PER_SEC << 8; in register_refined_jiffies()
|
| A D | sched_clock.c | 44 u64 (*actual_read_sched_clock)(void); 52 static u64 notrace jiffy_sched_clock_read(void) in jiffy_sched_clock_read() 58 return (u64)(jiffies - INITIAL_JIFFIES); in jiffy_sched_clock_read() 67 static __always_inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) in cyc_to_ns() 87 u64 cyc, res; in __sched_clock() 154 u64 cyc; in update_sched_clock() 155 u64 ns; in update_sched_clock() 178 sched_clock_register(u64 (*read)(void), int bits, unsigned long rate) in sched_clock_register() 180 u64 res, wrap, new_mask, new_epoch, cyc, ns; in sched_clock_register() 281 static u64 notrace suspended_sched_clock_read(void) in suspended_sched_clock_read()
|
| /kernel/trace/ |
| A D | synth_event_gen_test.c | 48 u64 vals[7]; in test_gen_synth_cmd() 139 u64 vals[7]; in test_empty_synth_event() 255 u64 vals[9]; in test_create_synth_event() 426 (u64)444, /* next_pid_field */ in test_trace_synth_event() 427 (u64)(long)"clackers", /* next_comm_field */ in test_trace_synth_event() 428 (u64)1000000, /* ts_ns */ in test_trace_synth_event() 430 (u64)1000, /* ts_ms */ in test_trace_synth_event() 431 (u64)raw_smp_processor_id(), /* cpu */ in test_trace_synth_event() 432 (u64)(long)"Thneed", /* my_string_field */ in test_trace_synth_event() 433 (u64)(long)"yoyos", /* dynstring_field_2 */ in test_trace_synth_event() [all …]
|
| A D | trace_benchmark.c | 14 static u64 bm_total; 16 static u64 bm_last; 17 static u64 bm_max; 18 static u64 bm_min; 20 static u64 bm_cnt; 39 u64 start; in trace_do_benchmark() 40 u64 stop; in trace_do_benchmark() 41 u64 delta; in trace_do_benchmark() 42 u64 stddev; in trace_do_benchmark() 43 u64 seed; in trace_do_benchmark() [all …]
|
| A D | trace_clock.c | 32 u64 notrace trace_clock_local(void) in trace_clock_local() 34 u64 clock; in trace_clock_local() 57 u64 notrace trace_clock(void) in trace_clock() 70 u64 notrace trace_clock_jiffies(void) in trace_clock_jiffies() 87 u64 prev_time; 94 u64 notrace trace_clock_global(void) in trace_clock_global() 98 u64 now, prev_time; in trace_clock_global() 155 u64 notrace trace_clock_counter(void) in trace_clock_counter()
|
| A D | bpf_trace.c | 84 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 85 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 364 u64, arg2, u64, arg3) in BPF_CALL_5() 539 u64 *value, u64 *enabled, u64 *running) in get_map_perf_counter() 659 u64, flags, void *, data, u64, size) in BPF_CALL_5() argument 714 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, in bpf_event_output() 734 u64 ret; in bpf_event_output() 1198 u64 nr_args = ((u64 *)ctx)[-1]; in BPF_CALL_3() 1218 u64 nr_args = ((u64 *)ctx)[-1]; in BPF_CALL_2() 1547 u64, flags, void *, data, u64, size) in BPF_CALL_5() argument [all …]
|
| A D | trace_osnoise.c | 169 u64 count; 177 u64 count; 189 u64 count; 198 u64 count; 246 u64 count; 803 static u64 826 static u64 857 u64 duration; in trace_osnoise_callback() 1720 u64 now; in timerlat_irq() 1721 u64 diff; in timerlat_irq() [all …]
|
| /kernel/irq/ |
| A D | timings.c | 290 u64 last_ts; 291 u64 ema_time[PREDICTION_BUFFER_SIZE]; 300 static u64 irq_timings_ema_new(u64 value, u64 ema_old) in irq_timings_ema_new() 382 static u64 __irq_timings_next_event(struct irqt_stat *irqs, int irq, u64 now) in __irq_timings_next_event() 447 u64 interval) in __irq_timings_store() 475 u64 old_ts = irqs->last_ts; in irq_timings_store() 476 u64 interval; in irq_timings_store() 537 u64 irq_timings_next_event(u64 now) in irq_timings_next_event() 542 u64 ts, next_evt = U64_MAX; in irq_timings_next_event() 639 u64 *intervals; [all …]
|
| /kernel/bpf/ |
| A D | stackmap.c | 23 u64 data[]; 48 (u64)smap->map.value_size; in prealloc_elems_and_freelist() 74 u64 cost, n_buckets; in stack_map_alloc() 211 u64 *to = entry->ip; in get_callchain_entry_for_task() 216 to[i] = (u64)(from[i]); in get_callchain_entry_for_task() 235 u64 *ips; in __bpf_get_stackid() 301 u64, flags) in BPF_CALL_3() argument 417 u64 *ips; in __bpf_get_stack() 497 u64, flags) in BPF_CALL_4() argument 513 u64, flags) in BPF_CALL_4() argument [all …]
|
| A D | tnum.c | 16 struct tnum tnum_const(u64 value) in tnum_const() 21 struct tnum tnum_range(u64 min, u64 max) in tnum_range() 23 u64 chi = min ^ max, delta; in tnum_range() 64 u64 sm, sv, sigma, chi, mu; in tnum_add() 76 u64 dv, alpha, beta, chi, mu; in tnum_sub() 93 u64 alpha, beta, v; in tnum_and() 103 u64 v, mu; in tnum_or() 112 u64 v, mu; in tnum_xor() 129 u64 acc_v = a.value * b.value; in tnum_mul() 151 u64 v, mu; in tnum_intersect() [all …]
|
| /kernel/printk/ |
| A D | printk_ringbuffer.h | 19 u64 seq; /* sequence number */ 20 u64 ts_nsec; /* timestamp in nanoseconds */ 264 .seq = -(u64)_DESCS_COUNT(descbits), \ 393 bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq, 395 bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq, 398 u64 prb_first_seq(struct printk_ringbuffer *rb); 399 u64 prb_first_valid_seq(struct printk_ringbuffer *rb); 400 u64 prb_next_seq(struct printk_ringbuffer *rb); 401 u64 prb_next_reserve_seq(struct printk_ringbuffer *rb); 416 u64 rb_first_seq = prb_first_seq(rb); in __ulseq_to_u64seq() [all …]
|
| /kernel/cgroup/ |
| A D | misc.c | 44 static u64 misc_res_capacity[MISC_CG_RES_TYPES]; 86 int misc_cg_set_capacity(enum misc_res_type type, u64 capacity) in misc_cg_set_capacity() 105 u64 amount) in misc_cg_cancel_charge() 114 u64 old; in misc_cg_update_watermark() 157 u64 new_usage; in misc_cg_try_charge() 220 u64 max; in misc_cg_max_show() 258 u64 max; in misc_cg_max_write() 308 u64 usage; in misc_cg_current_show() 331 u64 watermark; in misc_cg_peak_show() 356 u64 cap; in misc_cg_capacity_show() [all …]
|
| /kernel/ |
| A D | kcov.c | 82 u64 handle; 239 static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip) in write_comp_data() 242 u64 *area; in write_comp_data() 255 area = (u64 *)t->kcov_area; in write_comp_data() 328 u64 i; in __sanitizer_cov_trace_switch() 329 u64 *cases = arg; in __sanitizer_cov_trace_switch() 330 u64 count = cases[0]; in __sanitizer_cov_trace_switch() 331 u64 size = cases[1]; in __sanitizer_cov_trace_switch() 332 u64 type = KCOV_CMP_CONST; in __sanitizer_cov_trace_switch() 925 *(u64 *)area = 0; in kcov_remote_start() [all …]
|
| /kernel/kcsan/ |
| A D | kcsan.h | 133 int watchpoint_idx, u64 old, u64 new, u64 mask); 140 unsigned long ip, u64 old, u64 new, u64 mask);
|