1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * KVM PMU support for AMD
4 *
5 * Copyright 2015, Red Hat, Inc. and/or its affiliates.
6 *
7 * Author:
8 * Wei Huang <wei@redhat.com>
9 *
10 * Implementation is based on pmu_intel.c file
11 */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/types.h>
15 #include <linux/kvm_host.h>
16 #include <linux/perf_event.h>
17 #include "x86.h"
18 #include "cpuid.h"
19 #include "lapic.h"
20 #include "pmu.h"
21 #include "svm.h"
22
23 enum pmu_type {
24 PMU_TYPE_COUNTER = 0,
25 PMU_TYPE_EVNTSEL,
26 };
27
amd_pmc_idx_to_pmc(struct kvm_pmu * pmu,int pmc_idx)28 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
29 {
30 unsigned int num_counters = pmu->nr_arch_gp_counters;
31
32 if (pmc_idx >= num_counters)
33 return NULL;
34
35 return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
36 }
37
get_gp_pmc_amd(struct kvm_pmu * pmu,u32 msr,enum pmu_type type)38 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
39 enum pmu_type type)
40 {
41 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
42 unsigned int idx;
43
44 if (!vcpu->kvm->arch.enable_pmu)
45 return NULL;
46
47 switch (msr) {
48 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
49 if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
50 return NULL;
51 /*
52 * Each PMU counter has a pair of CTL and CTR MSRs. CTLn
53 * MSRs (accessed via EVNTSEL) are even, CTRn MSRs are odd.
54 */
55 idx = (unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2);
56 if (!(msr & 0x1) != (type == PMU_TYPE_EVNTSEL))
57 return NULL;
58 break;
59 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
60 if (type != PMU_TYPE_EVNTSEL)
61 return NULL;
62 idx = msr - MSR_K7_EVNTSEL0;
63 break;
64 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
65 if (type != PMU_TYPE_COUNTER)
66 return NULL;
67 idx = msr - MSR_K7_PERFCTR0;
68 break;
69 default:
70 return NULL;
71 }
72
73 return amd_pmc_idx_to_pmc(pmu, idx);
74 }
75
amd_hw_event_available(struct kvm_pmc * pmc)76 static bool amd_hw_event_available(struct kvm_pmc *pmc)
77 {
78 return true;
79 }
80
81 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
82 * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
83 */
amd_pmc_is_enabled(struct kvm_pmc * pmc)84 static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
85 {
86 return true;
87 }
88
amd_is_valid_rdpmc_ecx(struct kvm_vcpu * vcpu,unsigned int idx)89 static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
90 {
91 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
92
93 idx &= ~(3u << 30);
94
95 return idx < pmu->nr_arch_gp_counters;
96 }
97
98 /* idx is the ECX register of RDPMC instruction */
amd_rdpmc_ecx_to_pmc(struct kvm_vcpu * vcpu,unsigned int idx,u64 * mask)99 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
100 unsigned int idx, u64 *mask)
101 {
102 return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30));
103 }
104
amd_is_valid_msr(struct kvm_vcpu * vcpu,u32 msr)105 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
106 {
107 /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */
108 return false;
109 }
110
amd_msr_idx_to_pmc(struct kvm_vcpu * vcpu,u32 msr)111 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
112 {
113 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
114 struct kvm_pmc *pmc;
115
116 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
117 pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
118
119 return pmc;
120 }
121
amd_pmu_get_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)122 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
123 {
124 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
125 struct kvm_pmc *pmc;
126 u32 msr = msr_info->index;
127
128 /* MSR_PERFCTRn */
129 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
130 if (pmc) {
131 msr_info->data = pmc_read_counter(pmc);
132 return 0;
133 }
134 /* MSR_EVNTSELn */
135 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
136 if (pmc) {
137 msr_info->data = pmc->eventsel;
138 return 0;
139 }
140
141 return 1;
142 }
143
amd_pmu_set_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)144 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
145 {
146 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
147 struct kvm_pmc *pmc;
148 u32 msr = msr_info->index;
149 u64 data = msr_info->data;
150
151 /* MSR_PERFCTRn */
152 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
153 if (pmc) {
154 pmc->counter += data - pmc_read_counter(pmc);
155 pmc_update_sample_period(pmc);
156 return 0;
157 }
158 /* MSR_EVNTSELn */
159 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
160 if (pmc) {
161 data &= ~pmu->reserved_bits;
162 if (data != pmc->eventsel) {
163 pmc->eventsel = data;
164 kvm_pmu_request_counter_reprogam(pmc);
165 }
166 return 0;
167 }
168
169 return 1;
170 }
171
amd_pmu_refresh(struct kvm_vcpu * vcpu)172 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
173 {
174 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
175
176 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
177 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
178 else
179 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
180
181 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
182 pmu->reserved_bits = 0xfffffff000280000ull;
183 pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
184 pmu->version = 1;
185 /* not applicable to AMD; but clean them to prevent any fall out */
186 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
187 pmu->nr_arch_fixed_counters = 0;
188 pmu->global_status = 0;
189 bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
190 }
191
amd_pmu_init(struct kvm_vcpu * vcpu)192 static void amd_pmu_init(struct kvm_vcpu *vcpu)
193 {
194 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
195 int i;
196
197 BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > AMD64_NUM_COUNTERS_CORE);
198 BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > INTEL_PMC_MAX_GENERIC);
199
200 for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC ; i++) {
201 pmu->gp_counters[i].type = KVM_PMC_GP;
202 pmu->gp_counters[i].vcpu = vcpu;
203 pmu->gp_counters[i].idx = i;
204 pmu->gp_counters[i].current_config = 0;
205 }
206 }
207
amd_pmu_reset(struct kvm_vcpu * vcpu)208 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
209 {
210 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
211 int i;
212
213 for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC; i++) {
214 struct kvm_pmc *pmc = &pmu->gp_counters[i];
215
216 pmc_stop_counter(pmc);
217 pmc->counter = pmc->prev_counter = pmc->eventsel = 0;
218 }
219 }
220
221 struct kvm_pmu_ops amd_pmu_ops __initdata = {
222 .hw_event_available = amd_hw_event_available,
223 .pmc_is_enabled = amd_pmc_is_enabled,
224 .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
225 .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
226 .msr_idx_to_pmc = amd_msr_idx_to_pmc,
227 .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
228 .is_valid_msr = amd_is_valid_msr,
229 .get_msr = amd_pmu_get_msr,
230 .set_msr = amd_pmu_set_msr,
231 .refresh = amd_pmu_refresh,
232 .init = amd_pmu_init,
233 .reset = amd_pmu_reset,
234 .EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
235 .MAX_NR_GP_COUNTERS = KVM_AMD_PMC_MAX_GENERIC,
236 };
237