1 /*
2 * Copyright 2019 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "feature_id.h"
10
11 #include "hf/check.h"
12 #include "hf/dlog.h"
13 #include "hf/types.h"
14 #include "hf/vm.h"
15
16 #include "msr.h"
17 #include "sysregs.h"
18
19 /* clang-format off */
20
21 /**
22 * Definitions of read-only feature ID (group 3) registers' encodings.
23 * See Arm Architecture Reference Manual Armv8-A, Table D1-52 and D12-2.
24 * NAME, op0, op1, crn, crm, op2
25 */
26 #define FEATURE_ID_REGISTERS_READ \
27 X(ID_PFR0_EL1 , 3, 0, 0, 1, 0) \
28 X(ID_PFR1_EL1 , 3, 0, 0, 1, 1) \
29 X(ID_DFR0_EL1 , 3, 0, 0, 1, 2) \
30 X(ID_AFR0_EL1 , 3, 0, 0, 1, 3) \
31 X(ID_MMFR0_EL1 , 3, 0, 0, 1, 4) \
32 X(ID_MMFR1_EL1 , 3, 0, 0, 1, 5) \
33 X(ID_MMFR2_EL1 , 3, 0, 0, 1, 6) \
34 X(ID_MMFR3_EL1 , 3, 0, 0, 1, 7) \
35 X(ID_ISAR0_EL1 , 3, 0, 0, 2, 0) \
36 X(ID_ISAR1_EL1 , 3, 0, 0, 2, 1) \
37 X(ID_ISAR2_EL1 , 3, 0, 0, 2, 2) \
38 X(ID_ISAR3_EL1 , 3, 0, 0, 2, 3) \
39 X(ID_ISAR4_EL1 , 3, 0, 0, 2, 4) \
40 X(ID_ISAR5_EL1 , 3, 0, 0, 2, 5) \
41 X(ID_MMFR4_EL1 , 3, 0, 0, 2, 6) \
42 \
43 X(MVFR0_EL1 , 3, 0, 0, 3, 0) \
44 X(MVFR1_EL1 , 3, 0, 0, 3, 1) \
45 X(MVFR2_EL1 , 3, 0, 0, 3, 2) \
46 \
47 X(ID_AA64PFR0_EL1 , 3, 0, 0, 4, 0) \
48 X(ID_AA64PFR1_EL1 , 3, 0, 0, 4, 1) \
49 \
50 X(ID_AA64DFR0_EL1 , 3, 0, 0, 5, 0) \
51 X(ID_AA64DFR1_EL1 , 3, 0, 0, 5, 1) \
52 \
53 X(ID_AA64AFR0_EL1 , 3, 0, 0, 5, 4) \
54 X(ID_AA64AFR1_EL1 , 3, 0, 0, 5, 5) \
55 \
56 X(ID_AA64ISAR0_EL1 , 3, 0, 0, 6, 0) \
57 X(ID_AA64ISAR1_EL1 , 3, 0, 0, 6, 1) \
58 \
59 X(ID_AA64MMFR0_EL1 , 3, 0, 0, 7, 0) \
60 X(ID_AA64MMFR1_EL1 , 3, 0, 0, 7, 1) \
61 X(ID_AA64MMFR2_EL1 , 3, 0, 0, 7, 2)
62
63 /* clang-format on */
64
65 enum {
66 #define X(reg_name, op0, op1, crn, crm, op2) \
67 reg_name##_ENC = GET_ISS_ENCODING(op0, op1, crn, crm, op2),
68 FEATURE_ID_REGISTERS_READ
69 #undef X
70 };
71
72 /**
73 * Returns true if the ESR register shows an access to a feature ID group 3
74 * register.
75 */
feature_id_is_register_access(uintreg_t esr)76 bool feature_id_is_register_access(uintreg_t esr)
77 {
78 uintreg_t op0 = GET_ISS_OP0(esr);
79 uintreg_t op1 = GET_ISS_OP1(esr);
80 uintreg_t crn = GET_ISS_CRN(esr);
81 uintreg_t crm = GET_ISS_CRM(esr);
82
83 /* From the Arm Architecture Reference Manual Table D12-2. */
84 return op0 == 3 && op1 == 0 && crn == 0 && crm >= 1 && crm <= 7;
85 }
86
87 /**
88 * RAS-related. RES0 when RAS is not implemented.
89 */
90 #define ID_AA64MMFR1_EL1_SPEC_SEI (UINT64_C(0xf) << 24)
91
92 /**
93 * Indicates support for LORegions.
94 */
95 #define ID_AA64MMFR1_EL1_LO (UINT64_C(0xf) << 24)
96
97 /**
98 * RAS Extension version.
99 */
100 #define ID_AA64PFR0_EL1_RAS (UINT64_C(0xf) << 28)
101
102 /**
103 * Self-hosted Trace Extension Version
104 */
105 #define ID_AA64DFR0_EL1_TRACE_FILT (UINT64_C(0xf) << 40)
106
107 /**
108 * OS Double Lock implemented.
109 */
110 #define ID_AA64DFR0_EL1_DOUBLE_LOCK (UINT64_C(0xf) << 36)
111
112 /**
113 * Statistical Profiling Extension version.
114 */
115 #define ID_AA64DFR0_EL1_PMS_VER (UINT64_C(0xf) << 32)
116
117 /**
118 * Performance Monitors Extension version.
119 */
120 #define ID_AA64DFR0_EL1_PMU_VER (UINT64_C(0xf) << 8)
121
122 /**
123 * Indicates whether System register interface to trace unit is implemented.
124 */
125 #define ID_AA64DFR0_EL1_TRACE_VER (UINT64_C(0xf) << 4)
126
127 /**
128 * Debug architecture version.
129 */
130 #define ID_AA64DFR0_EL1_DEBUG_VER (UINT64_C(0xf))
131
132 /**
133 * PAuth: whether an implementation defined algorithm for generic code
134 * authentication is implemented.
135 */
136 #define ID_AA64ISAR1_EL1_GPI (UINT64_C(0xf) << 28)
137
138 /**
139 * PAuth: whether QARMA or Architected algorithm for generic code authentication
140 * is implemented.
141 */
142 #define ID_AA64ISAR1_EL1_GPA (UINT64_C(0xf) << 24)
143
144 /**
145 * PAuth: whether an implementation defined algorithm for address authentication
146 * is implemented.
147 */
148 #define ID_AA64ISAR1_EL1_API (UINT64_C(0xf) << 8)
149
150 /**
151 * PAuth: whether QARMA or Architected algorithm for address authentication is
152 * implemented.
153 */
154 #define ID_AA64ISAR1_EL1_APA (UINT64_C(0xf) << 24)
155
feature_set_traps(struct vm * vm,struct arch_regs * regs)156 void feature_set_traps(struct vm *vm, struct arch_regs *regs)
157 {
158 arch_features_t features = vm->arch.trapped_features;
159
160 if (features & ~HF_FEATURE_ALL) {
161 panic("features has undefined bits 0x%x", features);
162 }
163
164 /* By default do not mask out any features. */
165 vm->arch.tid3_masks.id_aa64mmfr1_el1 = ~0ULL;
166 vm->arch.tid3_masks.id_aa64pfr0_el1 = ~0ULL;
167 vm->arch.tid3_masks.id_aa64dfr0_el1 = ~0ULL;
168 vm->arch.tid3_masks.id_aa64isar1_el1 = ~0ULL;
169
170 /*
171 * Always mask VHE feature. No nested virualization support at this
172 * point so there is no need to expose VHE to guests.
173 */
174 vm->arch.tid3_masks.id_aa64mmfr1_el1 &=
175 ~(ID_AA64MMFR1_EL1_VH_MASK << ID_AA64MMFR1_EL1_VH_SHIFT);
176
177 if (features & HF_FEATURE_RAS) {
178 regs->hyp_state.hcr_el2 |= HCR_EL2_TERR;
179 vm->arch.tid3_masks.id_aa64mmfr1_el1 &=
180 ~ID_AA64MMFR1_EL1_SPEC_SEI;
181 vm->arch.tid3_masks.id_aa64pfr0_el1 &= ~ID_AA64PFR0_EL1_RAS;
182 }
183
184 if (features & HF_FEATURE_SPE) {
185 /*
186 * Trap VM accesses to Statistical Profiling Extension (SPE)
187 * registers.
188 */
189 regs->lazy.mdcr_el2 |= MDCR_EL2_TPMS;
190
191 /*
192 * Set E2PB to 0b00. This ensures that accesses to Profiling
193 * Buffer controls at EL1 are trapped to EL2.
194 */
195 regs->lazy.mdcr_el2 &= ~MDCR_EL2_E2PB;
196
197 vm->arch.tid3_masks.id_aa64dfr0_el1 &= ~ID_AA64DFR0_EL1_PMS_VER;
198 }
199
200 if (features & HF_FEATURE_DEBUG) {
201 regs->lazy.mdcr_el2 |=
202 MDCR_EL2_TDRA | MDCR_EL2_TDOSA | MDCR_EL2_TDA;
203
204 vm->arch.tid3_masks.id_aa64dfr0_el1 &=
205 ~ID_AA64DFR0_EL1_DOUBLE_LOCK;
206 }
207
208 if (features & HF_FEATURE_TRACE) {
209 regs->lazy.mdcr_el2 |= MDCR_EL2_TTRF;
210
211 vm->arch.tid3_masks.id_aa64dfr0_el1 &=
212 ~ID_AA64DFR0_EL1_TRACE_FILT;
213 vm->arch.tid3_masks.id_aa64dfr0_el1 &=
214 ~ID_AA64DFR0_EL1_TRACE_VER;
215 }
216
217 if (features & HF_FEATURE_PERFMON) {
218 regs->lazy.mdcr_el2 |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
219
220 vm->arch.tid3_masks.id_aa64dfr0_el1 &= ~ID_AA64DFR0_EL1_PMU_VER;
221 }
222
223 if (features & HF_FEATURE_LOR) {
224 regs->hyp_state.hcr_el2 |= HCR_EL2_TLOR;
225
226 vm->arch.tid3_masks.id_aa64mmfr1_el1 &= ~ID_AA64MMFR1_EL1_LO;
227 }
228
229 if (features & HF_FEATURE_PAUTH) {
230 /* APK and API bits *enable* trapping when cleared. */
231 regs->hyp_state.hcr_el2 &= ~(HCR_EL2_APK | HCR_EL2_API);
232
233 vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_GPI;
234 vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_GPA;
235 vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_API;
236 vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_APA;
237 }
238 }
239
240 /**
241 * Processes an access (mrs) to a feature ID register.
242 * Returns true if the access was allowed and performed, false otherwise.
243 */
feature_id_process_access(struct vcpu * vcpu,uintreg_t esr)244 bool feature_id_process_access(struct vcpu *vcpu, uintreg_t esr)
245 {
246 const struct vm *vm = vcpu->vm;
247 uintreg_t sys_register = GET_ISS_SYSREG(esr);
248 uintreg_t rt_register = GET_ISS_RT(esr);
249 uintreg_t value;
250
251 /* +1 because Rt can access register XZR */
252 CHECK(rt_register < NUM_GP_REGS + 1);
253
254 if (!ISS_IS_READ(esr)) {
255 dlog_notice(
256 "Unsupported feature ID register write: "
257 "op0=%d, op1=%d, crn=%d, crm=%d, op2=%d, rt=%d.\n",
258 GET_ISS_OP0(esr), GET_ISS_OP1(esr), GET_ISS_CRN(esr),
259 GET_ISS_CRM(esr), GET_ISS_OP2(esr), GET_ISS_RT(esr));
260 return true;
261 }
262
263 switch (sys_register) {
264 #define X(reg_name, op0, op1, crn, crm, op2) \
265 case (GET_ISS_ENCODING(op0, op1, crn, crm, op2)): \
266 value = read_msr(reg_name); \
267 break;
268 FEATURE_ID_REGISTERS_READ
269 #undef X
270 default:
271 /* Reserved registers should be read as zero (raz). */
272 value = 0;
273 dlog_notice(
274 "Unsupported feature ID register read: "
275 "op0=%d, op1=%d, crn=%d, crm=%d, op2=%d, rt=%d.\n",
276 GET_ISS_OP0(esr), GET_ISS_OP1(esr), GET_ISS_CRN(esr),
277 GET_ISS_CRM(esr), GET_ISS_OP2(esr), GET_ISS_RT(esr));
278 break;
279 }
280
281 /* Mask values for features Hafnium might restrict. */
282 switch (sys_register) {
283 case ID_AA64MMFR1_EL1_ENC:
284 value &= vm->arch.tid3_masks.id_aa64mmfr1_el1;
285 break;
286 case ID_AA64PFR0_EL1_ENC:
287 value &= vm->arch.tid3_masks.id_aa64pfr0_el1;
288 break;
289 case ID_AA64DFR0_EL1_ENC:
290 value &= vm->arch.tid3_masks.id_aa64dfr0_el1;
291 break;
292 case ID_AA64ISAR1_EL1_ENC:
293 value &= vm->arch.tid3_masks.id_aa64isar1_el1;
294 break;
295 default:
296 break;
297 }
298
299 if (rt_register != RT_REG_XZR) {
300 vcpu->regs.r[rt_register] = value;
301 }
302
303 return true;
304 }
305