1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4
5 #if defined(INTERFACE_VCPU)
6 #include <assert.h>
7 #include <hyptypes.h>
8
9 #include <hypregisters.h>
10
11 #include <addrspace.h>
12 #include <atomic.h>
13 #include <platform_mem.h>
14 #include <rcu.h>
15 #include <spinlock.h>
16 #include <thread.h>
17
18 #include <asm/barrier.h>
19
20 #include "event_handlers.h"
21
22 static bool
addrspace_undergoing_bbm(addrspace_t * addrspace)23 addrspace_undergoing_bbm(addrspace_t *addrspace)
24 {
25 bool ret;
26
27 if (addrspace->platform_pgtable) {
28 ret = platform_pgtable_undergoing_bbm();
29 } else {
30 #if (CPU_PGTABLE_BBM_LEVEL == 0) && !defined(PLATFORM_PGTABLE_AVOID_BBM)
31 // We use break-before-make for block splits and merges,
32 // which might affect addresses outside the operation range
33 // and therefore might cause faults that should be hidden.
34 if (!spinlock_trylock(&addrspace->pgtable_lock)) {
35 ret = true;
36 } else {
37 spinlock_release(&addrspace->pgtable_lock);
38 ret = false;
39 }
40 #else
41 // Break-before-make is only used when changing the output
42 // address or cache attributes, which shouldn't happen while
43 // the affected pages are being accessed.
44 ret = false;
45 #endif
46 }
47
48 return ret;
49 }
50
51 static vcpu_trap_result_t
addrspace_handle_guest_tlb_conflict(vmaddr_result_t ipa,FAR_EL2_t far,bool s1ptw)52 addrspace_handle_guest_tlb_conflict(vmaddr_result_t ipa, FAR_EL2_t far,
53 bool s1ptw)
54 {
55 // If this fault was not on a stage 1 PT walk, the ipa argument is not
56 // valid, because the architecture allows the TLB to avoid caching it.
57 // We can do a lookup on the VA to try to find it. This may fail if the
58 // CPU caches S1-only translations and the conflict is in that cache.
59 //
60 // For a fault on a stage 1 PT walk, the ipa argument is always valid.
61 if (!s1ptw) {
62 ipa = addrspace_va_to_ipa_read(
63 FAR_EL2_get_VirtualAddress(&far));
64 } else {
65 assert(ipa.e == OK);
66 }
67
68 asm_ordering_dummy_t tlbi_s2_ordering;
69 if (ipa.e == OK) {
70 // If the IPA is valid, the conflict may have been between S2
71 // TLB entries, so flush the IPA from the S2 TLB. Note that if
72 // our IPA lookup above failed, the conflict must be in S1+S2 or
73 // S1-only entries, so no S2 flush is needed.
74 vmsa_tlbi_ipa_input_t ipa_input = vmsa_tlbi_ipa_input_default();
75 vmsa_tlbi_ipa_input_set_IPA(&ipa_input, ipa.r);
76 __asm__ volatile(
77 "tlbi IPAS2E1, %[VA]"
78 : "=m"(tlbi_s2_ordering)
79 : [VA] "r"(vmsa_tlbi_ipa_input_raw(ipa_input)));
80 }
81
82 // Regardless of whether the IPA is valid, there is always a possibility
83 // that the conflict was on S1+S2 or S1-only entries. So we always flush
84 // by VA. If the fault was on a stage 1 page table walk, the fault may
85 // have been on a cached next-level entry, so we flush those too.
86 asm_ordering_dummy_t tlbi_s1_ordering;
87 vmsa_tlbi_vaa_input_t va_input = vmsa_tlbi_vaa_input_default();
88 vmsa_tlbi_vaa_input_set_VA(&va_input, FAR_EL2_get_VirtualAddress(&far));
89 if (s1ptw) {
90 __asm__ volatile("tlbi VAAE1, %[VA]"
91 : "=m"(tlbi_s1_ordering)
92 : [VA] "r"(vmsa_tlbi_vaa_input_raw(va_input)));
93 } else {
94 __asm__ volatile("tlbi VAALE1, %[VA]"
95 : "=m"(tlbi_s1_ordering)
96 : [VA] "r"(vmsa_tlbi_vaa_input_raw(va_input)));
97 }
98
99 __asm__ volatile("dsb nsh" ::"m"(tlbi_s1_ordering),
100 "m"(tlbi_s2_ordering));
101
102 return VCPU_TRAP_RESULT_RETRY;
103 }
104
105 // Retry faults if they may have been caused by break before make during block
106 // splits in the direct physical access region
107 static vcpu_trap_result_t
addrspace_handle_guest_translation_fault(FAR_EL2_t far)108 addrspace_handle_guest_translation_fault(FAR_EL2_t far)
109 {
110 vcpu_trap_result_t ret;
111
112 uintptr_t addr = FAR_EL2_get_VirtualAddress(&far);
113
114 thread_t *current = thread_get_self();
115 assert(current != NULL);
116
117 addrspace_t *addrspace = current->addrspace;
118 assert(addrspace != NULL);
119
120 rcu_read_start();
121 if (!addrspace_undergoing_bbm(addrspace)) {
122 // There is no BBM in progress, but there might have been when
123 // the fault occurred. Perform a lookup to see whether the
124 // accessed address is now mapped in S2.
125 //
126 // If the accessed address no longer faults in stage 2, we can
127 // just retry the faulting access. Otherwise we can consider the
128 // fault to be fatal, because there is no BBM operation still in
129 // progress.
130 ret = (addrspace_va_to_pa_read(addr).e != ERROR_DENIED)
131 ? VCPU_TRAP_RESULT_RETRY
132 : VCPU_TRAP_RESULT_UNHANDLED;
133 } else {
134 // A map operation is in progress, so retry until it finishes.
135 // Note that we might get stuck here if the page table is
136 // corrupt!
137 ret = VCPU_TRAP_RESULT_RETRY;
138 }
139 rcu_read_finish();
140
141 return ret;
142 }
143
144 vcpu_trap_result_t
addrspace_handle_vcpu_trap_data_abort_guest(ESR_EL2_t esr,vmaddr_result_t ipa,FAR_EL2_t far)145 addrspace_handle_vcpu_trap_data_abort_guest(ESR_EL2_t esr, vmaddr_result_t ipa,
146 FAR_EL2_t far)
147 {
148 vcpu_trap_result_t ret = VCPU_TRAP_RESULT_UNHANDLED;
149
150 ESR_EL2_ISS_DATA_ABORT_t iss =
151 ESR_EL2_ISS_DATA_ABORT_cast(ESR_EL2_get_ISS(&esr));
152 iss_da_ia_fsc_t fsc = ESR_EL2_ISS_DATA_ABORT_get_DFSC(&iss);
153
154 if (fsc == ISS_DA_IA_FSC_TLB_CONFLICT) {
155 ret = addrspace_handle_guest_tlb_conflict(
156 ipa, far, ESR_EL2_ISS_DATA_ABORT_get_S1PTW(&iss));
157 }
158
159 // Only translation faults can be caused by BBM
160 if ((fsc == ISS_DA_IA_FSC_TRANSLATION_1) ||
161 (fsc == ISS_DA_IA_FSC_TRANSLATION_2) ||
162 (fsc == ISS_DA_IA_FSC_TRANSLATION_3)) {
163 ret = addrspace_handle_guest_translation_fault(far);
164 }
165
166 return ret;
167 }
168
169 vcpu_trap_result_t
addrspace_handle_vcpu_trap_pf_abort_guest(ESR_EL2_t esr,vmaddr_result_t ipa,FAR_EL2_t far)170 addrspace_handle_vcpu_trap_pf_abort_guest(ESR_EL2_t esr, vmaddr_result_t ipa,
171 FAR_EL2_t far)
172 {
173 vcpu_trap_result_t ret = VCPU_TRAP_RESULT_UNHANDLED;
174
175 ESR_EL2_ISS_INST_ABORT_t iss =
176 ESR_EL2_ISS_INST_ABORT_cast(ESR_EL2_get_ISS(&esr));
177 iss_da_ia_fsc_t fsc = ESR_EL2_ISS_INST_ABORT_get_IFSC(&iss);
178
179 if (fsc == ISS_DA_IA_FSC_TLB_CONFLICT) {
180 ret = addrspace_handle_guest_tlb_conflict(
181 ipa, far, ESR_EL2_ISS_INST_ABORT_get_S1PTW(&iss));
182 }
183
184 // Only translation faults can be caused by BBM
185 if ((fsc == ISS_DA_IA_FSC_TRANSLATION_1) ||
186 (fsc == ISS_DA_IA_FSC_TRANSLATION_2) ||
187 (fsc == ISS_DA_IA_FSC_TRANSLATION_3)) {
188 ret = addrspace_handle_guest_translation_fault(far);
189 }
190
191 return ret;
192 }
193 #else
194 extern char unused;
195 #endif
196