Lines Matching refs:cpu

29 static bool __init acpi_cpu_is_threaded(int cpu)  in acpi_cpu_is_threaded()  argument
31 int is_threaded = acpi_pptt_cpu_is_thread(cpu); in acpi_cpu_is_threaded()
58 int cpu, topology_id; in parse_acpi_topology() local
65 for_each_possible_cpu(cpu) { in parse_acpi_topology()
66 topology_id = find_acpi_cpu_topology(cpu, 0); in parse_acpi_topology()
70 if (acpi_cpu_is_threaded(cpu)) { in parse_acpi_topology()
71 cpu_topology[cpu].thread_id = topology_id; in parse_acpi_topology()
72 topology_id = find_acpi_cpu_topology(cpu, 1); in parse_acpi_topology()
73 cpu_topology[cpu].core_id = topology_id; in parse_acpi_topology()
87 hetero_id = find_acpi_cpu_topology_hetero_id(cpu); in parse_acpi_topology()
103 cpu_topology[cpu].thread_id = -1; in parse_acpi_topology()
104 cpu_topology[cpu].core_id = topology_id; in parse_acpi_topology()
106 topology_id = find_acpi_cpu_topology_cluster(cpu); in parse_acpi_topology()
107 cpu_topology[cpu].cluster_id = topology_id; in parse_acpi_topology()
108 topology_id = find_acpi_cpu_topology_package(cpu); in parse_acpi_topology()
109 cpu_topology[cpu].package_id = topology_id; in parse_acpi_topology()
164 static inline bool freq_counters_valid(int cpu) in freq_counters_valid() argument
166 struct amu_cntr_sample *amu_sample = per_cpu_ptr(&cpu_amu_samples, cpu); in freq_counters_valid()
168 if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask)) in freq_counters_valid()
171 if (!cpu_has_amu_feat(cpu)) { in freq_counters_valid()
172 pr_debug("CPU%d: counters are not supported.\n", cpu); in freq_counters_valid()
178 pr_debug("CPU%d: cycle counters are not enabled.\n", cpu); in freq_counters_valid()
185 void freq_inv_set_max_ratio(int cpu, u64 max_rate) in freq_inv_set_max_ratio() argument
191 cpu); in freq_inv_set_max_ratio()
215 WRITE_ONCE(per_cpu(arch_max_freq_scale, cpu), (unsigned long)ratio); in freq_inv_set_max_ratio()
264 static __always_inline bool amu_fie_cpu_supported(unsigned int cpu) in amu_fie_cpu_supported() argument
267 cpumask_test_cpu(cpu, amu_fie_cpus); in amu_fie_cpu_supported()
272 unsigned int cpu = smp_processor_id(); in arch_cpu_idle_enter() local
274 if (!amu_fie_cpu_supported(cpu)) in arch_cpu_idle_enter()
278 if (housekeeping_cpu(cpu, HK_TYPE_TICK) && in arch_cpu_idle_enter()
279 time_is_before_jiffies(per_cpu(cpu_amu_samples.last_scale_update, cpu))) in arch_cpu_idle_enter()
285 int arch_freq_get_on_cpu(int cpu) in arch_freq_get_on_cpu() argument
288 unsigned int start_cpu = cpu; in arch_freq_get_on_cpu()
293 if (!amu_fie_cpu_supported(cpu) || !arch_scale_freq_ref(cpu)) in arch_freq_get_on_cpu()
298 amu_sample = per_cpu_ptr(&cpu_amu_samples, cpu); in arch_freq_get_on_cpu()
308 if (!housekeeping_cpu(cpu, HK_TYPE_TICK) || in arch_freq_get_on_cpu()
310 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); in arch_freq_get_on_cpu()
322 for_each_cpu_wrap(ref_cpu, policy->cpus, cpu + 1) { in arch_freq_get_on_cpu()
338 cpu = ref_cpu; in arch_freq_get_on_cpu()
348 scale = arch_scale_freq_capacity(cpu); in arch_freq_get_on_cpu()
349 freq = scale * arch_scale_freq_ref(cpu); in arch_freq_get_on_cpu()
356 int cpu; in amu_fie_setup() local
363 for_each_cpu(cpu, cpus) in amu_fie_setup()
364 if (!freq_counters_valid(cpu)) in amu_fie_setup()
441 int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val) in counters_read_on_cpu() argument
447 if (!cpu_has_amu_feat(cpu)) in counters_read_on_cpu()
453 smp_call_function_single(cpu, func, val, 1); in counters_read_on_cpu()
464 int cpu = get_cpu_with_amu_feat(); in cpc_ffh_supported() local
476 if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask)) in cpc_ffh_supported()
482 int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val) in cpc_read_ffh() argument
488 ret = counters_read_on_cpu(cpu, cpu_read_corecnt, val); in cpc_read_ffh()
491 ret = counters_read_on_cpu(cpu, cpu_read_constcnt, val); in cpc_read_ffh()