1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012-2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #ifndef __ARM64_KVM_HYP_SYSREG_SR_H__
8 #define __ARM64_KVM_HYP_SYSREG_SR_H__
9
10 #include <linux/compiler.h>
11 #include <linux/kvm_host.h>
12
13 #include <asm/kprobes.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_emulate.h>
16 #include <asm/kvm_hyp.h>
17 #include <asm/kvm_mmu.h>
18
__sysreg_save_common_state(struct kvm_cpu_context * ctxt)19 static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
20 {
21 ctxt_sys_reg(ctxt, MDSCR_EL1) = read_sysreg(mdscr_el1);
22 }
23
__sysreg_save_user_state(struct kvm_cpu_context * ctxt)24 static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
25 {
26 ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0);
27 ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
28 }
29
ctxt_has_mte(struct kvm_cpu_context * ctxt)30 static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
31 {
32 struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu;
33
34 if (!vcpu)
35 vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
36
37 return kvm_has_mte(kern_hyp_va(vcpu->kvm));
38 }
39
__sysreg_save_el1_state(struct kvm_cpu_context * ctxt)40 static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
41 {
42 ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);
43 ctxt_sys_reg(ctxt, CPACR_EL1) = read_sysreg_el1(SYS_CPACR);
44 ctxt_sys_reg(ctxt, TTBR0_EL1) = read_sysreg_el1(SYS_TTBR0);
45 ctxt_sys_reg(ctxt, TTBR1_EL1) = read_sysreg_el1(SYS_TTBR1);
46 ctxt_sys_reg(ctxt, TCR_EL1) = read_sysreg_el1(SYS_TCR);
47 ctxt_sys_reg(ctxt, ESR_EL1) = read_sysreg_el1(SYS_ESR);
48 ctxt_sys_reg(ctxt, AFSR0_EL1) = read_sysreg_el1(SYS_AFSR0);
49 ctxt_sys_reg(ctxt, AFSR1_EL1) = read_sysreg_el1(SYS_AFSR1);
50 ctxt_sys_reg(ctxt, FAR_EL1) = read_sysreg_el1(SYS_FAR);
51 ctxt_sys_reg(ctxt, MAIR_EL1) = read_sysreg_el1(SYS_MAIR);
52 ctxt_sys_reg(ctxt, VBAR_EL1) = read_sysreg_el1(SYS_VBAR);
53 ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR);
54 ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR);
55 ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL);
56 ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg_par();
57 ctxt_sys_reg(ctxt, TPIDR_EL1) = read_sysreg(tpidr_el1);
58
59 if (ctxt_has_mte(ctxt)) {
60 ctxt_sys_reg(ctxt, TFSR_EL1) = read_sysreg_el1(SYS_TFSR);
61 ctxt_sys_reg(ctxt, TFSRE0_EL1) = read_sysreg_s(SYS_TFSRE0_EL1);
62 }
63
64 ctxt_sys_reg(ctxt, SP_EL1) = read_sysreg(sp_el1);
65 ctxt_sys_reg(ctxt, ELR_EL1) = read_sysreg_el1(SYS_ELR);
66 ctxt_sys_reg(ctxt, SPSR_EL1) = read_sysreg_el1(SYS_SPSR);
67 }
68
__sysreg_save_el2_return_state(struct kvm_cpu_context * ctxt)69 static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
70 {
71 ctxt->regs.pc = read_sysreg_el2(SYS_ELR);
72 /*
73 * Guest PSTATE gets saved at guest fixup time in all
74 * cases. We still need to handle the nVHE host side here.
75 */
76 if (!has_vhe() && ctxt->__hyp_running_vcpu)
77 ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
78
79 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
80 ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
81 }
82
__sysreg_restore_common_state(struct kvm_cpu_context * ctxt)83 static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
84 {
85 write_sysreg(ctxt_sys_reg(ctxt, MDSCR_EL1), mdscr_el1);
86 }
87
__sysreg_restore_user_state(struct kvm_cpu_context * ctxt)88 static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
89 {
90 write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL0), tpidr_el0);
91 write_sysreg(ctxt_sys_reg(ctxt, TPIDRRO_EL0), tpidrro_el0);
92 }
93
__sysreg_restore_el1_state(struct kvm_cpu_context * ctxt)94 static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
95 {
96 write_sysreg(ctxt_sys_reg(ctxt, MPIDR_EL1), vmpidr_el2);
97
98 if (has_vhe() ||
99 !cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
100 write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
101 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
102 } else if (!ctxt->__hyp_running_vcpu) {
103 /*
104 * Must only be done for guest registers, hence the context
105 * test. We're coming from the host, so SCTLR.M is already
106 * set. Pairs with nVHE's __activate_traps().
107 */
108 write_sysreg_el1((ctxt_sys_reg(ctxt, TCR_EL1) |
109 TCR_EPD1_MASK | TCR_EPD0_MASK),
110 SYS_TCR);
111 isb();
112 }
113
114 write_sysreg_el1(ctxt_sys_reg(ctxt, CPACR_EL1), SYS_CPACR);
115 write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR0_EL1), SYS_TTBR0);
116 write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR1_EL1), SYS_TTBR1);
117 write_sysreg_el1(ctxt_sys_reg(ctxt, ESR_EL1), SYS_ESR);
118 write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR0_EL1), SYS_AFSR0);
119 write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR1_EL1), SYS_AFSR1);
120 write_sysreg_el1(ctxt_sys_reg(ctxt, FAR_EL1), SYS_FAR);
121 write_sysreg_el1(ctxt_sys_reg(ctxt, MAIR_EL1), SYS_MAIR);
122 write_sysreg_el1(ctxt_sys_reg(ctxt, VBAR_EL1), SYS_VBAR);
123 write_sysreg_el1(ctxt_sys_reg(ctxt, CONTEXTIDR_EL1), SYS_CONTEXTIDR);
124 write_sysreg_el1(ctxt_sys_reg(ctxt, AMAIR_EL1), SYS_AMAIR);
125 write_sysreg_el1(ctxt_sys_reg(ctxt, CNTKCTL_EL1), SYS_CNTKCTL);
126 write_sysreg(ctxt_sys_reg(ctxt, PAR_EL1), par_el1);
127 write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL1), tpidr_el1);
128
129 if (ctxt_has_mte(ctxt)) {
130 write_sysreg_el1(ctxt_sys_reg(ctxt, TFSR_EL1), SYS_TFSR);
131 write_sysreg_s(ctxt_sys_reg(ctxt, TFSRE0_EL1), SYS_TFSRE0_EL1);
132 }
133
134 if (!has_vhe() &&
135 cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) &&
136 ctxt->__hyp_running_vcpu) {
137 /*
138 * Must only be done for host registers, hence the context
139 * test. Pairs with nVHE's __deactivate_traps().
140 */
141 isb();
142 /*
143 * At this stage, and thanks to the above isb(), S2 is
144 * deconfigured and disabled. We can now restore the host's
145 * S1 configuration: SCTLR, and only then TCR.
146 */
147 write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
148 isb();
149 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
150 }
151
152 write_sysreg(ctxt_sys_reg(ctxt, SP_EL1), sp_el1);
153 write_sysreg_el1(ctxt_sys_reg(ctxt, ELR_EL1), SYS_ELR);
154 write_sysreg_el1(ctxt_sys_reg(ctxt, SPSR_EL1), SYS_SPSR);
155 }
156
157 /* Read the VCPU state's PSTATE, but translate (v)EL2 to EL1. */
to_hw_pstate(const struct kvm_cpu_context * ctxt)158 static inline u64 to_hw_pstate(const struct kvm_cpu_context *ctxt)
159 {
160 u64 mode = ctxt->regs.pstate & (PSR_MODE_MASK | PSR_MODE32_BIT);
161
162 switch (mode) {
163 case PSR_MODE_EL2t:
164 mode = PSR_MODE_EL1t;
165 break;
166 case PSR_MODE_EL2h:
167 mode = PSR_MODE_EL1h;
168 break;
169 }
170
171 return (ctxt->regs.pstate & ~(PSR_MODE_MASK | PSR_MODE32_BIT)) | mode;
172 }
173
__sysreg_restore_el2_return_state(struct kvm_cpu_context * ctxt)174 static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
175 {
176 u64 pstate = to_hw_pstate(ctxt);
177 u64 mode = pstate & PSR_AA32_MODE_MASK;
178
179 /*
180 * Safety check to ensure we're setting the CPU up to enter the guest
181 * in a less privileged mode.
182 *
183 * If we are attempting a return to EL2 or higher in AArch64 state,
184 * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that
185 * we'll take an illegal exception state exception immediately after
186 * the ERET to the guest. Attempts to return to AArch32 Hyp will
187 * result in an illegal exception return because EL2's execution state
188 * is determined by SCR_EL3.RW.
189 */
190 if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
191 pstate = PSR_MODE_EL2h | PSR_IL_BIT;
192
193 write_sysreg_el2(ctxt->regs.pc, SYS_ELR);
194 write_sysreg_el2(pstate, SYS_SPSR);
195
196 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
197 write_sysreg_s(ctxt_sys_reg(ctxt, DISR_EL1), SYS_VDISR_EL2);
198 }
199
__sysreg32_save_state(struct kvm_vcpu * vcpu)200 static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
201 {
202 if (!vcpu_el1_is_32bit(vcpu))
203 return;
204
205 vcpu->arch.ctxt.spsr_abt = read_sysreg(spsr_abt);
206 vcpu->arch.ctxt.spsr_und = read_sysreg(spsr_und);
207 vcpu->arch.ctxt.spsr_irq = read_sysreg(spsr_irq);
208 vcpu->arch.ctxt.spsr_fiq = read_sysreg(spsr_fiq);
209
210 __vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2);
211 __vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2);
212
213 if (has_vhe() || vcpu_get_flag(vcpu, DEBUG_DIRTY))
214 __vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2);
215 }
216
__sysreg32_restore_state(struct kvm_vcpu * vcpu)217 static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
218 {
219 if (!vcpu_el1_is_32bit(vcpu))
220 return;
221
222 write_sysreg(vcpu->arch.ctxt.spsr_abt, spsr_abt);
223 write_sysreg(vcpu->arch.ctxt.spsr_und, spsr_und);
224 write_sysreg(vcpu->arch.ctxt.spsr_irq, spsr_irq);
225 write_sysreg(vcpu->arch.ctxt.spsr_fiq, spsr_fiq);
226
227 write_sysreg(__vcpu_sys_reg(vcpu, DACR32_EL2), dacr32_el2);
228 write_sysreg(__vcpu_sys_reg(vcpu, IFSR32_EL2), ifsr32_el2);
229
230 if (has_vhe() || vcpu_get_flag(vcpu, DEBUG_DIRTY))
231 write_sysreg(__vcpu_sys_reg(vcpu, DBGVCR32_EL2), dbgvcr32_el2);
232 }
233
234 #endif /* __ARM64_KVM_HYP_SYSREG_SR_H__ */
235