1 /*
2 * Copyright 2018 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/arch/cpu.h"
10
11 #include <stdbool.h>
12 #include <stddef.h>
13 #include <stdint.h>
14
15 #include "hf/arch/plat/psci.h"
16
17 #include "hf/addr.h"
18 #include "hf/check.h"
19 #include "hf/ffa.h"
20 #include "hf/plat/interrupts.h"
21 #include "hf/std.h"
22 #include "hf/vm.h"
23
24 #include "feature_id.h"
25 #include "msr.h"
26 #include "perfmon.h"
27 #include "sysregs.h"
28
29 #if BRANCH_PROTECTION
30
31 __uint128_t pauth_apia_key;
32
33 #endif
34
35 #if ENABLE_MTE
36
37 /* MTE hypervisor seed. */
38 uintptr_t mte_seed;
39
40 #endif
41
42 /**
43 * The LO field indicates whether LORegions are supported.
44 */
45 #define ID_AA64MMFR1_EL1_LO (UINT64_C(1) << 16)
46
lor_disable(void)47 static void lor_disable(void)
48 {
49 #if SECURE_WORLD == 0
50 /*
51 * Accesses to LORC_EL1 are undefined if LORegions are not supported.
52 */
53 if (read_msr(ID_AA64MMFR1_EL1) & ID_AA64MMFR1_EL1_LO) {
54 write_msr(MSR_LORC_EL1, 0);
55 }
56 #endif
57 }
58
gic_regs_reset(struct arch_regs * r,bool is_primary)59 static void gic_regs_reset(struct arch_regs *r, bool is_primary)
60 {
61 #if GIC_VERSION == 3 || GIC_VERSION == 4
62 uint32_t ich_hcr = 0;
63 uint32_t icc_sre_el2 =
64 (1U << 0) | /* SRE, enable ICH_* and ICC_* at EL2. */
65 (0x3 << 1); /* DIB and DFB, disable IRQ/FIQ bypass. */
66
67 if (is_primary) {
68 icc_sre_el2 |= 1U << 3; /* Enable EL1 access to ICC_SRE_EL1. */
69 } else {
70 /* Trap EL1 access to GICv3 system registers. */
71 ich_hcr =
72 (0x1fU << 10); /* TDIR, TSEI, TALL1, TALL0, TC bits. */
73 }
74 r->gic.ich_hcr_el2 = ich_hcr;
75 r->gic.icc_sre_el2 = icc_sre_el2;
76 #endif
77 }
78
arch_regs_reset(struct vcpu * vcpu)79 void arch_regs_reset(struct vcpu *vcpu)
80 {
81 ffa_vm_id_t vm_id = vcpu->vm->id;
82 bool is_primary = vm_id == HF_PRIMARY_VM_ID;
83 cpu_id_t vcpu_id = is_primary ? vcpu->cpu->id : vcpu_index(vcpu);
84
85 paddr_t table = vcpu->vm->ptable.root;
86 struct arch_regs *r = &vcpu->regs;
87 uintreg_t pc = r->pc;
88 uintreg_t arg = r->r[0];
89 uintreg_t cnthctl;
90
91 memset_s(r, sizeof(*r), 0, sizeof(*r));
92
93 r->pc = pc;
94 r->r[0] = arg;
95
96 cnthctl = 0;
97
98 if (is_primary) {
99 /*
100 * cnthctl_el2 is redefined when VHE is enabled.
101 * EL1PCTEN, don't trap phys cnt access.
102 * EL1PCEN, don't trap phys timer access.
103 */
104 if (has_vhe_support()) {
105 cnthctl |= (1U << 10) | (1U << 11);
106 } else {
107 cnthctl |= (1U << 0) | (1U << 1);
108 }
109 }
110
111 r->hyp_state.hcr_el2 =
112 get_hcr_el2_value(vm_id, vcpu->vm->el0_partition);
113 r->hyp_state.sctlr_el2 = get_sctlr_el2_value(vcpu->vm->el0_partition);
114 r->lazy.cnthctl_el2 = cnthctl;
115 if (vcpu->vm->el0_partition) {
116 CHECK(has_vhe_support());
117 /*
118 * AArch64 hafnium only uses 8 bit ASIDs at the moment.
119 * TCR_EL2.AS is set to 0, and per the Arm ARM, the upper 8 bits
120 * are ignored and treated as 0. There is no need to mask the
121 * VMID (used as asid) to only 8 bits.
122 */
123 r->hyp_state.ttbr0_el2 =
124 pa_addr(table) | ((uint64_t)vm_id << 48);
125 r->spsr = PSR_PE_MODE_EL0T;
126 } else {
127 r->hyp_state.ttbr0_el2 = read_msr(ttbr0_el2);
128 r->lazy.vtcr_el2 = arch_mm_get_vtcr_el2();
129 r->lazy.vttbr_el2 = pa_addr(table) | ((uint64_t)vm_id << 48);
130 #if SECURE_WORLD == 1
131 r->lazy.vstcr_el2 = arch_mm_get_vstcr_el2();
132 r->lazy.vsttbr_el2 = pa_addr(table);
133 #endif
134 r->lazy.vmpidr_el2 = vcpu_id;
135 /* Mask (disable) interrupts and run in EL1h mode. */
136 r->spsr = PSR_D | PSR_A | PSR_I | PSR_F | PSR_PE_MODE_EL1H;
137
138 r->lazy.mdcr_el2 = get_mdcr_el2_value();
139
140 /*
141 * NOTE: It is important that MDSCR_EL1.MDE (bit 15) is set to 0
142 * for secondary VMs as long as Hafnium does not support debug
143 * register access for secondary VMs. If adding Hafnium support
144 * for secondary VM debug register accesses, then on context
145 * switches Hafnium needs to save/restore EL1 debug register
146 * state that either might change, or that needs to be
147 * protected.
148 */
149 r->lazy.mdscr_el1 = 0x0U & ~(0x1U << 15);
150
151 /* Disable cycle counting on initialization. */
152 r->lazy.pmccfiltr_el0 =
153 perfmon_get_pmccfiltr_el0_init_value(vm_id);
154
155 /* Set feature-specific register values. */
156 feature_set_traps(vcpu->vm, r);
157 }
158
159 gic_regs_reset(r, is_primary);
160 }
161
arch_regs_set_pc_arg(struct arch_regs * r,ipaddr_t pc,uintreg_t arg)162 void arch_regs_set_pc_arg(struct arch_regs *r, ipaddr_t pc, uintreg_t arg)
163 {
164 r->pc = ipa_addr(pc);
165 r->r[0] = arg;
166 }
167
arch_regs_reg_num_valid(const unsigned int gp_reg_num)168 bool arch_regs_reg_num_valid(const unsigned int gp_reg_num)
169 {
170 return gp_reg_num < NUM_GP_REGS;
171 }
172
arch_regs_set_gp_reg(struct arch_regs * r,const uintreg_t value,const unsigned int gp_reg_num)173 void arch_regs_set_gp_reg(struct arch_regs *r, const uintreg_t value,
174 const unsigned int gp_reg_num)
175 {
176 assert(arch_regs_reg_num_valid(gp_reg_num));
177 r->r[gp_reg_num] = value;
178 }
179
arch_regs_set_retval(struct arch_regs * r,struct ffa_value v)180 void arch_regs_set_retval(struct arch_regs *r, struct ffa_value v)
181 {
182 r->r[0] = v.func;
183 r->r[1] = v.arg1;
184 r->r[2] = v.arg2;
185 r->r[3] = v.arg3;
186 r->r[4] = v.arg4;
187 r->r[5] = v.arg5;
188 r->r[6] = v.arg6;
189 r->r[7] = v.arg7;
190 }
191
arch_regs_get_args(struct arch_regs * regs)192 struct ffa_value arch_regs_get_args(struct arch_regs *regs)
193 {
194 return (struct ffa_value){
195 .func = regs->r[0],
196 .arg1 = regs->r[1],
197 .arg2 = regs->r[2],
198 .arg3 = regs->r[3],
199 .arg4 = regs->r[4],
200 .arg5 = regs->r[5],
201 .arg6 = regs->r[6],
202 .arg7 = regs->r[7],
203 };
204 }
205
206 /* Returns the SVE implemented VL in bytes (constrained by ZCR_EL3.LEN) */
arch_cpu_sve_len_get(void)207 static uint64_t arch_cpu_sve_len_get(void)
208 {
209 uint64_t vl;
210
211 __asm__ volatile(
212 ".arch_extension sve;"
213 "rdvl %0, #1;"
214 ".arch_extension nosve;"
215 : "=r"(vl));
216
217 return vl;
218 }
219
arch_cpu_sve_configure_sve_vector_length(void)220 static void arch_cpu_sve_configure_sve_vector_length(void)
221 {
222 uint64_t vl_bits;
223 uint32_t zcr_len;
224
225 /*
226 * Set ZCR_EL2.LEN to the maximum vector length permitted by the
227 * architecture which applies to EL2 and lower ELs (limited by the
228 * HW implementation).
229 * This is done so that the VL read by arch_cpu_sve_len_get isn't
230 * constrained by EL2 and thus indirectly retrieves the value
231 * constrained by EL3 which applies to EL3 and lower ELs (limited by
232 * the HW implementation).
233 */
234 write_msr(MSR_ZCR_EL2, ZCR_LEN_MAX);
235 isb();
236
237 vl_bits = arch_cpu_sve_len_get() << 3;
238 zcr_len = (vl_bits >> 7) - 1;
239
240 /*
241 * Set ZCR_EL2.LEN to the discovered value which contrains the VL at
242 * EL2 and lower ELs to the value set by EL3.
243 */
244 write_msr(MSR_ZCR_EL2, zcr_len & ZCR_LEN_MASK);
245 isb();
246 }
247
arch_cpu_init(struct cpu * c,ipaddr_t entry_point)248 void arch_cpu_init(struct cpu *c, ipaddr_t entry_point)
249 {
250 plat_psci_cpu_resume(c, entry_point);
251
252 /*
253 * Linux expects LORegions to be disabled, hence if the current system
254 * supports them, Hafnium ensures that they are disabled.
255 */
256 lor_disable();
257
258 write_msr(CPTR_EL2, get_cptr_el2_value());
259
260 /* Initialize counter-timer virtual offset register to 0. */
261 write_msr(CNTVOFF_EL2, 0);
262 isb();
263
264 if (is_arch_feat_sve_supported()) {
265 arch_cpu_sve_configure_sve_vector_length();
266 }
267
268 plat_interrupts_controller_hw_init(c);
269 }
270