1 /*
2 * Copyright 2018 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/arch/cpu.h"
10
11 #include <stddef.h>
12 #include <stdint.h>
13
14 #include "hf/arch/gicv3.h"
15 #include "hf/arch/host_timer.h"
16 #include "hf/arch/plat/psci.h"
17 #include "hf/arch/types.h"
18
19 #include "hf/addr.h"
20 #include "hf/check.h"
21 #include "hf/ffa.h"
22 #include "hf/hf_ipi.h"
23 #include "hf/plat/interrupts.h"
24 #include "hf/std.h"
25 #include "hf/vm.h"
26
27 #include "feature_id.h"
28 #include "msr.h"
29 #include "perfmon.h"
30 #include "plat/prng/prng.h"
31 #include "sysregs.h"
32
33 #if BRANCH_PROTECTION
34
35 __uint128_t pauth_apia_key;
36
37 #endif
38
39 #if ENABLE_MTE
40
41 /* MTE hypervisor seed. */
42 uintptr_t mte_seed;
43
44 #endif
45
46 /**
47 * The LO field indicates whether LORegions are supported.
48 */
49 #define ID_AA64MMFR1_EL1_LO (UINT64_C(1) << 16)
50
lor_disable(void)51 static void lor_disable(void)
52 {
53 #if SECURE_WORLD == 0
54 /*
55 * Accesses to LORC_EL1 are undefined if LORegions are not supported.
56 */
57 if (read_msr(ID_AA64MMFR1_EL1) & ID_AA64MMFR1_EL1_LO) {
58 write_msr(MSR_LORC_EL1, 0);
59 }
60 #endif
61 }
62
gic_regs_reset(struct arch_regs * r,bool is_primary)63 static void gic_regs_reset(struct arch_regs *r, bool is_primary)
64 {
65 (void)r;
66 (void)is_primary;
67
68 #if GIC_VERSION == 3 || GIC_VERSION == 4
69 uint32_t ich_hcr = 0;
70 uint32_t icc_sre_el2 =
71 (1U << 0) | /* SRE, enable ICH_* and ICC_* at EL2. */
72 (0x3 << 1); /* DIB and DFB, disable IRQ/FIQ bypass. */
73
74 if (is_primary) {
75 icc_sre_el2 |= 1U << 3; /* Enable EL1 access to ICC_SRE_EL1. */
76 } else {
77 /* Trap EL1 access to GICv3 system registers. */
78 ich_hcr =
79 (0x1fU << 10); /* TDIR, TSEI, TALL1, TALL0, TC bits. */
80 }
81 r->gic.ich_hcr_el2 = ich_hcr;
82 r->gic.icc_sre_el2 = icc_sre_el2;
83 #endif
84 }
85
pauth_el0_keys_reset(struct arch_regs * r)86 static void pauth_el0_keys_reset(struct arch_regs *r)
87 {
88 (void)r;
89
90 #if BRANCH_PROTECTION
91 if (is_arch_feat_pauth_supported()) {
92 __uint128_t apia_key_for_el0 = plat_prng_get_number();
93
94 r->pac.apiakeylo_el1 =
95 (uint64_t)(apia_key_for_el0 & UINT64_MAX);
96 r->pac.apiakeyhi_el1 = (uint64_t)(apia_key_for_el0 >> 64);
97 }
98 #endif
99 }
100
arch_regs_reset(struct vcpu * vcpu)101 void arch_regs_reset(struct vcpu *vcpu)
102 {
103 ffa_id_t vm_id = vcpu->vm->id;
104 bool is_primary = vm_is_primary(vcpu->vm);
105 cpu_id_t vcpu_id = is_primary ? vcpu->cpu->id : vcpu_index(vcpu);
106
107 paddr_t table = pa_init((uintpaddr_t)vcpu->vm->ptable.root_tables);
108 struct arch_regs *r = &vcpu->regs;
109 uintreg_t pc = r->pc;
110 uintreg_t arg = r->r[0];
111 uintreg_t cnthctl;
112
113 memset_s(r, sizeof(*r), 0, sizeof(*r));
114
115 r->pc = pc;
116 r->r[0] = arg;
117
118 cnthctl = 0;
119
120 /*
121 * EL0PTEN = 0: Trap EL0 access to physical timer registers.
122 * EL0PCTEN = 1: Don't trap EL0 access to physical counter and
123 * frequency register.
124 * EL1PCEN = 0: Trap EL1 access to physical timer registers.
125 * EL1PCTEN = 1: Don't trap EL1 access to physical counter.
126 */
127 if (vcpu->vm->el0_partition) {
128 cnthctl |= CNTHCTL_EL2_VHE_EL0PCTEN;
129 } else {
130 cnthctl |= CNTHCTL_EL2_VHE_EL1PCTEN;
131 }
132
133 r->hyp_state.cptr_el2 = get_cptr_el2_value();
134 if (is_primary) {
135 /* Do not trap FPU/Adv. SIMD/SVE/SME in the primary VM. */
136 if (has_vhe_support()) {
137 r->hyp_state.cptr_el2 |=
138 (CPTR_EL2_VHE_ZEN | CPTR_EL2_VHE_FPEN |
139 CPTR_EL2_SME_VHE_SMEN);
140 } else {
141 r->hyp_state.cptr_el2 &=
142 ~(CPTR_EL2_TFP | CPTR_EL2_TZ | CPTR_EL2_TSM);
143 }
144 }
145
146 r->hyp_state.hcr_el2 =
147 get_hcr_el2_value(vm_id, vcpu->vm->el0_partition);
148 r->hyp_state.sctlr_el2 = get_sctlr_el2_value(vcpu->vm->el0_partition);
149 r->lazy.cnthctl_el2 = cnthctl;
150 if (vcpu->vm->el0_partition) {
151 pauth_el0_keys_reset(r);
152
153 CHECK(has_vhe_support());
154 /*
155 * AArch64 hafnium only uses 8 bit ASIDs at the moment.
156 * TCR_EL2.AS is set to 0, and per the Arm ARM, the upper 8 bits
157 * are ignored and treated as 0. There is no need to mask the
158 * VMID (used as asid) to only 8 bits.
159 */
160 r->hyp_state.ttbr0_el2 =
161 pa_addr(table) | ((uint64_t)vm_id << 48);
162 r->spsr = PSR_PE_MODE_EL0T;
163 } else {
164 r->hyp_state.ttbr0_el2 = read_msr(ttbr0_el2);
165 r->lazy.vtcr_el2 = arch_mm_get_vtcr_el2();
166 #if SECURE_WORLD == 0
167 /*
168 * For a VM managed by the Hypervisor a single set
169 * of NS S2 PT exists.
170 * vttbr_el2 points to the single S2 root PT.
171 */
172 r->lazy.vttbr_el2 = pa_addr(table) | ((uint64_t)vm_id << 48);
173 #else
174 /*
175 * For a SP managed by the SPMC both sets of NS and secure
176 * S2 PTs exist.
177 * vttbr_el2 points to the NS S2 root PT.
178 * vsttbr_el2 points to secure S2 root PT.
179 */
180 r->lazy.vttbr_el2 =
181 (uintpaddr_t)(vcpu->vm->arch.ptable_ns.root_tables) |
182 ((uint64_t)vm_id << 48);
183 r->lazy.vstcr_el2 = arch_mm_get_vstcr_el2();
184 r->lazy.vsttbr_el2 = pa_addr(table);
185 #endif
186
187 r->lazy.vmpidr_el2 = vcpu_id;
188 /* Mask (disable) interrupts and run in EL1h mode. */
189 r->spsr = PSR_D | PSR_A | PSR_I | PSR_F | PSR_PE_MODE_EL1H;
190
191 r->lazy.mdcr_el2 = get_mdcr_el2_value();
192
193 /*
194 * NOTE: It is important that MDSCR_EL1.MDE (bit 15) is set to 0
195 * for secondary VMs as long as Hafnium does not support debug
196 * register access for secondary VMs. If adding Hafnium support
197 * for secondary VM debug register accesses, then on context
198 * switches Hafnium needs to save/restore EL1 debug register
199 * state that either might change, or that needs to be
200 * protected.
201 */
202 r->lazy.mdscr_el1 = 0x0U & ~(0x1U << 15);
203
204 /* Disable cycle counting on initialization. */
205 r->lazy.pmccfiltr_el0 =
206 perfmon_get_pmccfiltr_el0_init_value(vm_id);
207
208 /* Set feature-specific register values. */
209 feature_set_traps(vcpu->vm, r);
210 }
211
212 gic_regs_reset(r, is_primary);
213 }
214
arch_regs_set_pc_arg(struct arch_regs * r,ipaddr_t pc,uintreg_t arg)215 void arch_regs_set_pc_arg(struct arch_regs *r, ipaddr_t pc, uintreg_t arg)
216 {
217 r->pc = ipa_addr(pc);
218 r->r[0] = arg;
219 }
220
arch_regs_reg_num_valid(const unsigned int gp_reg_num)221 bool arch_regs_reg_num_valid(const unsigned int gp_reg_num)
222 {
223 return gp_reg_num < NUM_GP_REGS;
224 }
225
arch_regs_set_gp_reg(struct arch_regs * r,const uintreg_t value,const unsigned int gp_reg_num)226 void arch_regs_set_gp_reg(struct arch_regs *r, const uintreg_t value,
227 const unsigned int gp_reg_num)
228 {
229 assert(arch_regs_reg_num_valid(gp_reg_num));
230 r->r[gp_reg_num] = value;
231 }
232
arch_regs_set_retval(struct arch_regs * r,struct ffa_value v)233 void arch_regs_set_retval(struct arch_regs *r, struct ffa_value v)
234 {
235 r->r[0] = v.func;
236 r->r[1] = v.arg1;
237 r->r[2] = v.arg2;
238 r->r[3] = v.arg3;
239 r->r[4] = v.arg4;
240 r->r[5] = v.arg5;
241 r->r[6] = v.arg6;
242 r->r[7] = v.arg7;
243
244 if (v.extended_val.valid) {
245 r->r[8] = v.extended_val.arg8;
246 r->r[9] = v.extended_val.arg9;
247 r->r[10] = v.extended_val.arg10;
248 r->r[11] = v.extended_val.arg11;
249 r->r[12] = v.extended_val.arg12;
250 r->r[13] = v.extended_val.arg13;
251 r->r[14] = v.extended_val.arg14;
252 r->r[15] = v.extended_val.arg15;
253 r->r[16] = v.extended_val.arg16;
254 r->r[17] = v.extended_val.arg17;
255 }
256 }
257
arch_regs_get_args_ext(struct arch_regs * regs)258 static struct ffa_value arch_regs_get_args_ext(struct arch_regs *regs)
259 {
260 return (struct ffa_value){
261 .func = regs->r[0],
262 .arg1 = regs->r[1],
263 .arg2 = regs->r[2],
264 .arg3 = regs->r[3],
265 .arg4 = regs->r[4],
266 .arg5 = regs->r[5],
267 .arg6 = regs->r[6],
268 .arg7 = regs->r[7],
269 .extended_val.valid = true,
270 .extended_val.arg8 = regs->r[8],
271 .extended_val.arg9 = regs->r[9],
272 .extended_val.arg10 = regs->r[10],
273 .extended_val.arg11 = regs->r[11],
274 .extended_val.arg12 = regs->r[12],
275 .extended_val.arg13 = regs->r[13],
276 .extended_val.arg14 = regs->r[14],
277 .extended_val.arg15 = regs->r[15],
278 .extended_val.arg16 = regs->r[16],
279 .extended_val.arg17 = regs->r[17],
280 };
281 }
282
arch_regs_get_args(struct arch_regs * regs)283 struct ffa_value arch_regs_get_args(struct arch_regs *regs)
284 {
285 uint32_t func_id = regs->r[0];
286
287 if (func_id == FFA_MSG_SEND_DIRECT_REQ2_64 ||
288 func_id == FFA_MSG_SEND_DIRECT_RESP2_64 ||
289 (func_id == FFA_CONSOLE_LOG_64 &&
290 FFA_VERSION_1_2 <= FFA_VERSION_COMPILED)) {
291 return arch_regs_get_args_ext(regs);
292 }
293
294 return (struct ffa_value){
295 .func = func_id,
296 .arg1 = regs->r[1],
297 .arg2 = regs->r[2],
298 .arg3 = regs->r[3],
299 .arg4 = regs->r[4],
300 .arg5 = regs->r[5],
301 .arg6 = regs->r[6],
302 .arg7 = regs->r[7],
303 .extended_val.valid = false,
304 };
305 }
306
arch_cpu_init(struct cpu * c)307 void arch_cpu_init(struct cpu *c)
308 {
309 /*
310 * Linux expects LORegions to be disabled, hence if the current system
311 * supports them, Hafnium ensures that they are disabled.
312 */
313 lor_disable();
314
315 write_msr(CPTR_EL2, get_cptr_el2_value());
316
317 /* Initialize counter-timer virtual offset register to 0. */
318 write_msr(CNTVOFF_EL2, 0);
319 isb();
320
321 plat_interrupts_controller_hw_init(c);
322
323 /*
324 * Initialize the interrupt associated with S-EL2 physical timer for
325 * running core.
326 */
327 host_timer_init();
328
329 /* Initialise IPIs for the current cpu. */
330 hf_ipi_init_interrupt();
331 }
332
arch_vcpu_resume(struct cpu * c)333 struct vcpu *arch_vcpu_resume(struct cpu *c)
334 {
335 return plat_psci_cpu_resume(c);
336 }
337
arch_affinity_to_core_pos(uint64_t reg)338 uint32_t arch_affinity_to_core_pos(uint64_t reg)
339 {
340 struct cpu *this_cpu;
341 uint32_t core_id;
342
343 this_cpu = cpu_find(reg & MPIDR_AFFINITY_MASK);
344
345 if (this_cpu == NULL) {
346 /*
347 * There might be holes in all redistributor frames (some CPUs
348 * don't exist). For these CPUs, return MAX_CPUS, so that the
349 * caller has a chance to recover.
350 */
351 core_id = MAX_CPUS;
352 } else {
353 core_id = cpu_index(this_cpu);
354 }
355
356 return core_id;
357 }
358
arch_find_core_pos(void)359 uint32_t arch_find_core_pos(void)
360 {
361 uint32_t core_id;
362
363 core_id = arch_affinity_to_core_pos(read_msr(MPIDR_EL1));
364 CHECK(core_id < MAX_CPUS);
365
366 return core_id;
367 }
368