1 /*
2  * Copyright 2019 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #include "feature_id.h"
10 
11 #include "hf/check.h"
12 #include "hf/dlog.h"
13 #include "hf/types.h"
14 #include "hf/vm.h"
15 
16 #include "msr.h"
17 #include "sysregs.h"
18 
19 /* clang-format off */
20 
21 /**
22  * Definitions of read-only feature ID (group 3) registers' encodings.
23  * See Arm Architecture Reference Manual Armv8-A, Table D1-52 and D12-2.
24  * NAME, op0, op1, crn, crm, op2
25  */
26 #define FEATURE_ID_REGISTERS_READ              \
27 	X(ID_PFR0_EL1       , 3, 0,  0,  1, 0) \
28 	X(ID_PFR1_EL1       , 3, 0,  0,  1, 1) \
29 	X(ID_DFR0_EL1       , 3, 0,  0,  1, 2) \
30 	X(ID_AFR0_EL1       , 3, 0,  0,  1, 3) \
31 	X(ID_MMFR0_EL1      , 3, 0,  0,  1, 4) \
32 	X(ID_MMFR1_EL1      , 3, 0,  0,  1, 5) \
33 	X(ID_MMFR2_EL1      , 3, 0,  0,  1, 6) \
34 	X(ID_MMFR3_EL1      , 3, 0,  0,  1, 7) \
35 	X(ID_ISAR0_EL1      , 3, 0,  0,  2, 0) \
36 	X(ID_ISAR1_EL1      , 3, 0,  0,  2, 1) \
37 	X(ID_ISAR2_EL1      , 3, 0,  0,  2, 2) \
38 	X(ID_ISAR3_EL1      , 3, 0,  0,  2, 3) \
39 	X(ID_ISAR4_EL1      , 3, 0,  0,  2, 4) \
40 	X(ID_ISAR5_EL1      , 3, 0,  0,  2, 5) \
41 	X(ID_MMFR4_EL1      , 3, 0,  0,  2, 6) \
42 	\
43 	X(MVFR0_EL1         , 3, 0,  0,  3, 0) \
44 	X(MVFR1_EL1         , 3, 0,  0,  3, 1) \
45 	X(MVFR2_EL1         , 3, 0,  0,  3, 2) \
46 	\
47 	X(ID_AA64PFR0_EL1   , 3, 0,  0,  4, 0) \
48 	X(ID_AA64PFR1_EL1   , 3, 0,  0,  4, 1) \
49 	\
50 	X(ID_AA64DFR0_EL1   , 3, 0,  0,  5, 0) \
51 	X(ID_AA64DFR1_EL1   , 3, 0,  0,  5, 1) \
52 	\
53 	X(ID_AA64AFR0_EL1   , 3, 0,  0,  5, 4) \
54 	X(ID_AA64AFR1_EL1   , 3, 0,  0,  5, 5) \
55 	\
56 	X(ID_AA64ISAR0_EL1  , 3, 0,  0,  6, 0) \
57 	X(ID_AA64ISAR1_EL1  , 3, 0,  0,  6, 1) \
58 	\
59 	X(ID_AA64MMFR0_EL1  , 3, 0,  0,  7, 0) \
60 	X(ID_AA64MMFR1_EL1  , 3, 0,  0,  7, 1) \
61 	X(ID_AA64MMFR2_EL1  , 3, 0,  0,  7, 2)
62 
63 /* clang-format on */
64 
65 enum {
66 #define X(reg_name, op0, op1, crn, crm, op2) \
67 	reg_name##_ENC = GET_ISS_ENCODING(op0, op1, crn, crm, op2),
68 	FEATURE_ID_REGISTERS_READ
69 #undef X
70 };
71 
72 /**
73  * Returns true if the ESR register shows an access to a feature ID group 3
74  * register.
75  */
feature_id_is_register_access(uintreg_t esr)76 bool feature_id_is_register_access(uintreg_t esr)
77 {
78 	uintreg_t op0 = GET_ISS_OP0(esr);
79 	uintreg_t op1 = GET_ISS_OP1(esr);
80 	uintreg_t crn = GET_ISS_CRN(esr);
81 	uintreg_t crm = GET_ISS_CRM(esr);
82 
83 	/* From the Arm Architecture Reference Manual Table D12-2. */
84 	return op0 == 3 && op1 == 0 && crn == 0 && crm >= 1 && crm <= 7;
85 }
86 
87 /**
88  * RAS-related. RES0 when RAS is not implemented.
89  */
90 #define ID_AA64MMFR1_EL1_SPEC_SEI (UINT64_C(0xf) << 24)
91 
92 /**
93  * Indicates support for LORegions.
94  */
95 #define ID_AA64MMFR1_EL1_LO (UINT64_C(0xf) << 24)
96 
97 /**
98  * RAS Extension version.
99  */
100 #define ID_AA64PFR0_EL1_RAS (UINT64_C(0xf) << 28)
101 
102 /**
103  * Activity Monitor Unit.
104  */
105 #define ID_AA64PFR0_EL1_AMU (UINT64_C(0xf) << 44)
106 
107 /**
108  * Self-hosted Trace Extension Version
109  */
110 #define ID_AA64DFR0_EL1_TRACE_FILT (UINT64_C(0xf) << 40)
111 
112 /**
113  * OS Double Lock implemented.
114  */
115 #define ID_AA64DFR0_EL1_DOUBLE_LOCK (UINT64_C(0xf) << 36)
116 
117 /**
118  * Statistical Profiling Extension version.
119  */
120 #define ID_AA64DFR0_EL1_PMS_VER (UINT64_C(0xf) << 32)
121 
122 /**
123  * Performance Monitors Extension version.
124  */
125 #define ID_AA64DFR0_EL1_PMU_VER (UINT64_C(0xf) << 8)
126 
127 /**
128  * Indicates whether System register interface to trace unit is implemented.
129  */
130 #define ID_AA64DFR0_EL1_TRACE_VER (UINT64_C(0xf) << 4)
131 
132 /**
133  * Debug architecture version.
134  */
135 #define ID_AA64DFR0_EL1_DEBUG_VER (UINT64_C(0xf))
136 
137 /**
138  * PAuth: whether an implementation defined algorithm for generic code
139  * authentication is implemented.
140  */
141 #define ID_AA64ISAR1_EL1_GPI (UINT64_C(0xf) << 28)
142 
143 /**
144  * PAuth: whether QARMA or Architected algorithm for generic code authentication
145  * is implemented.
146  */
147 #define ID_AA64ISAR1_EL1_GPA (UINT64_C(0xf) << 24)
148 
149 /**
150  * PAuth: whether an implementation defined algorithm for address authentication
151  * is implemented.
152  */
153 #define ID_AA64ISAR1_EL1_API (UINT64_C(0xf) << 8)
154 
155 /**
156  * PAuth: whether QARMA or Architected algorithm for address authentication is
157  * implemented.
158  */
159 #define ID_AA64ISAR1_EL1_APA (UINT64_C(0xf) << 24)
160 
feature_set_traps(struct vm * vm,struct arch_regs * regs)161 void feature_set_traps(struct vm *vm, struct arch_regs *regs)
162 {
163 	arch_features_t features = vm->arch.trapped_features;
164 
165 	if (features & ~HF_FEATURE_ALL) {
166 		panic("features has undefined bits 0x%x", features);
167 	}
168 
169 	/* By default do not mask out any features. */
170 	vm->arch.tid3_masks.id_aa64mmfr1_el1 = ~0ULL;
171 	vm->arch.tid3_masks.id_aa64pfr0_el1 = ~0ULL;
172 	vm->arch.tid3_masks.id_aa64pfr1_el1 = ~0ULL;
173 	vm->arch.tid3_masks.id_aa64dfr0_el1 = ~0ULL;
174 	vm->arch.tid3_masks.id_aa64isar1_el1 = ~0ULL;
175 
176 	/*
177 	 * Always mask VHE feature. No nested virualization support at this
178 	 * point so there is no need to expose VHE to guests.
179 	 */
180 	vm->arch.tid3_masks.id_aa64mmfr1_el1 &=
181 		~(ID_AA64MMFR1_EL1_VH_MASK << ID_AA64MMFR1_EL1_VH_SHIFT);
182 
183 	if (features & HF_FEATURE_SVE) {
184 		vm->arch.tid3_masks.id_aa64pfr0_el1 &= ~(
185 			ID_AA64PFR0_EL1_SVE_MASK << ID_AA64PFR0_EL1_SVE_SHIFT);
186 	}
187 
188 	if (features & HF_FEATURE_SME) {
189 		vm->arch.tid3_masks.id_aa64pfr1_el1 &= ~(
190 			ID_AA64PFR1_EL1_SME_MASK << ID_AA64PFR1_EL1_SME_SHIFT);
191 	}
192 
193 	if (features & HF_FEATURE_RAS) {
194 		regs->hyp_state.hcr_el2 |= HCR_EL2_TERR;
195 		vm->arch.tid3_masks.id_aa64mmfr1_el1 &=
196 			~ID_AA64MMFR1_EL1_SPEC_SEI;
197 		vm->arch.tid3_masks.id_aa64pfr0_el1 &= ~ID_AA64PFR0_EL1_RAS;
198 	}
199 
200 	if (features & HF_FEATURE_SPE) {
201 		/*
202 		 * Trap VM accesses to Statistical Profiling Extension (SPE)
203 		 * registers.
204 		 */
205 		regs->lazy.mdcr_el2 |= MDCR_EL2_TPMS;
206 
207 		/*
208 		 * Set E2PB to 0b00. This ensures that accesses to Profiling
209 		 * Buffer controls at EL1 are trapped to EL2.
210 		 */
211 		regs->lazy.mdcr_el2 &= ~MDCR_EL2_E2PB;
212 
213 		vm->arch.tid3_masks.id_aa64dfr0_el1 &= ~ID_AA64DFR0_EL1_PMS_VER;
214 	}
215 
216 	if (features & HF_FEATURE_DEBUG) {
217 		regs->lazy.mdcr_el2 |=
218 			MDCR_EL2_TDRA | MDCR_EL2_TDOSA | MDCR_EL2_TDA;
219 
220 		vm->arch.tid3_masks.id_aa64dfr0_el1 &=
221 			~ID_AA64DFR0_EL1_DOUBLE_LOCK;
222 	}
223 
224 	if (features & HF_FEATURE_TRACE) {
225 		regs->lazy.mdcr_el2 |= MDCR_EL2_TTRF;
226 
227 		vm->arch.tid3_masks.id_aa64dfr0_el1 &=
228 			~ID_AA64DFR0_EL1_TRACE_FILT;
229 		vm->arch.tid3_masks.id_aa64dfr0_el1 &=
230 			~ID_AA64DFR0_EL1_TRACE_VER;
231 	}
232 
233 	if (features & HF_FEATURE_PERFMON) {
234 		regs->lazy.mdcr_el2 |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
235 
236 		vm->arch.tid3_masks.id_aa64dfr0_el1 &= ~ID_AA64DFR0_EL1_PMU_VER;
237 	}
238 
239 	if (features & HF_FEATURE_LOR) {
240 		regs->hyp_state.hcr_el2 |= HCR_EL2_TLOR;
241 
242 		vm->arch.tid3_masks.id_aa64mmfr1_el1 &= ~ID_AA64MMFR1_EL1_LO;
243 	}
244 
245 	if (features & HF_FEATURE_PAUTH) {
246 		/* APK and API bits *enable* trapping when cleared. */
247 		regs->hyp_state.hcr_el2 &= ~(HCR_EL2_APK | HCR_EL2_API);
248 
249 		vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_GPI;
250 		vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_GPA;
251 		vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_API;
252 		vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_APA;
253 	}
254 
255 	if (features & HF_FEATURE_AMU) {
256 		vm->arch.tid3_masks.id_aa64pfr0_el1 &= ~ID_AA64PFR0_EL1_AMU;
257 	}
258 }
259 
260 /**
261  * Processes an access (mrs) to a feature ID register.
262  * Returns true if the access was allowed and performed, false otherwise.
263  */
feature_id_process_access(struct vcpu * vcpu,uintreg_t esr)264 bool feature_id_process_access(struct vcpu *vcpu, uintreg_t esr)
265 {
266 	const struct vm *vm = vcpu->vm;
267 	uintreg_t sys_register = GET_ISS_SYSREG(esr);
268 	uintreg_t rt_register = GET_ISS_RT(esr);
269 	uintreg_t value;
270 
271 	/* +1 because Rt can access register XZR */
272 	CHECK(rt_register < NUM_GP_REGS + 1);
273 
274 	if (!ISS_IS_READ(esr)) {
275 		dlog_notice(
276 			"Unsupported feature ID register write: "
277 			"op0=%lu, op1=%lu, crn=%lu, crm=%lu, op2=%lu, "
278 			"rt=%lu.\n",
279 			GET_ISS_OP0(esr), GET_ISS_OP1(esr), GET_ISS_CRN(esr),
280 			GET_ISS_CRM(esr), GET_ISS_OP2(esr), GET_ISS_RT(esr));
281 		return true;
282 	}
283 
284 	switch (sys_register) {
285 #define X(reg_name, op0, op1, crn, crm, op2)              \
286 	case (GET_ISS_ENCODING(op0, op1, crn, crm, op2)): \
287 		value = read_msr(reg_name);               \
288 		break;
289 		FEATURE_ID_REGISTERS_READ
290 #undef X
291 	default:
292 		/* Reserved registers should be read as zero (raz). */
293 		value = 0;
294 		dlog_notice(
295 			"Unsupported feature ID register read: "
296 			"op0=%lu, op1=%lu, crn=%lu, crm=%lu, op2=%lu, "
297 			"rt=%lu.\n",
298 			GET_ISS_OP0(esr), GET_ISS_OP1(esr), GET_ISS_CRN(esr),
299 			GET_ISS_CRM(esr), GET_ISS_OP2(esr), GET_ISS_RT(esr));
300 		break;
301 	}
302 
303 	/* Mask values for features Hafnium might restrict. */
304 	switch (sys_register) {
305 	case ID_AA64MMFR1_EL1_ENC:
306 		value &= vm->arch.tid3_masks.id_aa64mmfr1_el1;
307 		break;
308 	case ID_AA64PFR0_EL1_ENC:
309 		value &= vm->arch.tid3_masks.id_aa64pfr0_el1;
310 		break;
311 	case ID_AA64PFR1_EL1_ENC:
312 		value &= vm->arch.tid3_masks.id_aa64pfr1_el1;
313 		break;
314 	case ID_AA64DFR0_EL1_ENC:
315 		value &= vm->arch.tid3_masks.id_aa64dfr0_el1;
316 		break;
317 	case ID_AA64ISAR1_EL1_ENC:
318 		value &= vm->arch.tid3_masks.id_aa64isar1_el1;
319 		break;
320 	default:
321 		break;
322 	}
323 
324 	if (rt_register != RT_REG_XZR) {
325 		vcpu->regs.r[rt_register] = value;
326 	}
327 
328 	return true;
329 }
330