Searched refs:SCHED_CAPACITY_SHIFT (Results 1 – 15 of 15) sorted by relevance
| /linux/arch/arm64/kernel/ |
| A D | topology.c | 90 …EFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale) = 1UL << (2 * SCHED_CAPACITY_SHIFT); 143 ratio = ref_rate << (2 * SCHED_CAPACITY_SHIFT); in freq_inv_set_max_ratio() 180 scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT, in amu_scale_freq_tick()
|
| /linux/arch/arm/kernel/ |
| A D | topology.c | 150 >> (SCHED_CAPACITY_SHIFT+1); in parse_dt_topology() 153 >> (SCHED_CAPACITY_SHIFT-1)) + 1; in parse_dt_topology()
|
| /linux/arch/x86/kernel/cpu/ |
| A D | aperfmperf.c | 415 div_u64(cap << SCHED_CAPACITY_SHIFT, max_cap)); in arch_set_cpu_capacity() 417 div_u64(cap_freq << SCHED_CAPACITY_SHIFT, base_freq)); in arch_set_cpu_capacity() 439 if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt)) in scale_freq_tick()
|
| /linux/kernel/sched/ |
| A D | pelt.c | 144 sa->runnable_sum += runnable * contrib << SCHED_CAPACITY_SHIFT; in accumulate_sum() 146 sa->util_sum += contrib << SCHED_CAPACITY_SHIFT; in accumulate_sum()
|
| A D | pelt.h | 136 u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX; in update_idle_rq_clock_pelt()
|
| A D | cpufreq_schedutil.c | 326 return (sg_cpu->iowait_boost * max_cap) >> SCHED_CAPACITY_SHIFT; in sugov_iowait_apply()
|
| A D | sched.h | 241 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 3305 return cap >= p->dl.dl_density >> (BW_SHIFT - SCHED_CAPACITY_SHIFT); in dl_task_fits_capacity()
|
| A D | deadline.c | 160 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT; in dl_bw_capacity()
|
| A D | fair.c | 4468 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; in update_tg_cfs_load() 4718 -(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT); in update_cfs_rq_load_avg()
|
| A D | core.c | 9168 req.util = req.percent << SCHED_CAPACITY_SHIFT; in capacity_from_percent()
|
| /linux/drivers/base/ |
| A D | arch_topology.c | 150 scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq; in topology_set_freq_scale() 303 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, in topology_normalize_cpu_scale() 407 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, in topology_init_cpu_capacity_cppc()
|
| /linux/drivers/cpufreq/ |
| A D | amd-pstate.c | 914 boost_ratio = div_u64(numerator << SCHED_CAPACITY_SHIFT, nominal_perf); in amd_pstate_init_freq() 915 max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000; in amd_pstate_init_freq() 918 lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT, in amd_pstate_init_freq() 920 lowest_nonlinear_freq = (nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT) * 1000; in amd_pstate_init_freq()
|
| A D | acpi-cpufreq.c | 668 return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf); in get_max_boost_ratio() 843 policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT; in acpi_cpufreq_cpu_init()
|
| A D | cppc_cpufreq.c | 123 perf <<= SCHED_CAPACITY_SHIFT; in cppc_scale_freq_workfn()
|
| /linux/include/linux/ |
| A D | sched.h | 423 # define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT macro 424 # define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
|
Completed in 87 milliseconds