1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * cppc.c: CPPC Interface for x86
4 * Copyright (c) 2016, Intel Corporation.
5 */
6
7 #include <acpi/cppc_acpi.h>
8 #include <asm/msr.h>
9 #include <asm/processor.h>
10 #include <asm/topology.h>
11
12 /* Refer to drivers/acpi/cppc_acpi.c for the description of functions */
13
cpc_supported_by_cpu(void)14 bool cpc_supported_by_cpu(void)
15 {
16 switch (boot_cpu_data.x86_vendor) {
17 case X86_VENDOR_AMD:
18 case X86_VENDOR_HYGON:
19 if (boot_cpu_data.x86 == 0x19 && ((boot_cpu_data.x86_model <= 0x0f) ||
20 (boot_cpu_data.x86_model >= 0x20 && boot_cpu_data.x86_model <= 0x2f)))
21 return true;
22 else if (boot_cpu_data.x86 == 0x17 &&
23 boot_cpu_data.x86_model >= 0x70 && boot_cpu_data.x86_model <= 0x7f)
24 return true;
25 return boot_cpu_has(X86_FEATURE_CPPC);
26 }
27 return false;
28 }
29
cpc_ffh_supported(void)30 bool cpc_ffh_supported(void)
31 {
32 return true;
33 }
34
cpc_read_ffh(int cpunum,struct cpc_reg * reg,u64 * val)35 int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
36 {
37 int err;
38
39 err = rdmsrl_safe_on_cpu(cpunum, reg->address, val);
40 if (!err) {
41 u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
42 reg->bit_offset);
43
44 *val &= mask;
45 *val >>= reg->bit_offset;
46 }
47 return err;
48 }
49
cpc_write_ffh(int cpunum,struct cpc_reg * reg,u64 val)50 int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
51 {
52 u64 rd_val;
53 int err;
54
55 err = rdmsrl_safe_on_cpu(cpunum, reg->address, &rd_val);
56 if (!err) {
57 u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
58 reg->bit_offset);
59
60 val <<= reg->bit_offset;
61 val &= mask;
62 rd_val &= ~mask;
63 rd_val |= val;
64 err = wrmsrl_safe_on_cpu(cpunum, reg->address, rd_val);
65 }
66 return err;
67 }
68
amd_set_max_freq_ratio(void)69 static void amd_set_max_freq_ratio(void)
70 {
71 struct cppc_perf_caps perf_caps;
72 u64 highest_perf, nominal_perf;
73 u64 perf_ratio;
74 int rc;
75
76 rc = cppc_get_perf_caps(0, &perf_caps);
77 if (rc) {
78 pr_debug("Could not retrieve perf counters (%d)\n", rc);
79 return;
80 }
81
82 highest_perf = amd_get_highest_perf();
83 nominal_perf = perf_caps.nominal_perf;
84
85 if (!highest_perf || !nominal_perf) {
86 pr_debug("Could not retrieve highest or nominal performance\n");
87 return;
88 }
89
90 perf_ratio = div_u64(highest_perf * SCHED_CAPACITY_SCALE, nominal_perf);
91 /* midpoint between max_boost and max_P */
92 perf_ratio = (perf_ratio + SCHED_CAPACITY_SCALE) >> 1;
93 if (!perf_ratio) {
94 pr_debug("Non-zero highest/nominal perf values led to a 0 ratio\n");
95 return;
96 }
97
98 freq_invariance_set_perf_ratio(perf_ratio, false);
99 }
100
101 static DEFINE_MUTEX(freq_invariance_lock);
102
init_freq_invariance_cppc(void)103 void init_freq_invariance_cppc(void)
104 {
105 static bool init_done;
106
107 if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
108 return;
109
110 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
111 return;
112
113 mutex_lock(&freq_invariance_lock);
114 if (!init_done)
115 amd_set_max_freq_ratio();
116 init_done = true;
117 mutex_unlock(&freq_invariance_lock);
118 }
119