1 /*
2 * Copyright 2019 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/arch/vm.h"
10
11 #include "hf/arch/mmu.h"
12
13 #include "hf/dlog.h"
14
15 #include "hypervisor/feature_id.h"
16
arch_vm_features_set(struct vm * vm)17 void arch_vm_features_set(struct vm *vm)
18 {
19 /* Features to trap for all VMs. */
20
21 /*
22 * It is not safe to enable this yet, in part, because the feature's
23 * registers are not context switched in Hafnium.
24 */
25 vm->arch.trapped_features |= HF_FEATURE_LOR;
26
27 vm->arch.trapped_features |= HF_FEATURE_SPE;
28
29 vm->arch.trapped_features |= HF_FEATURE_TRACE;
30
31 vm->arch.trapped_features |= HF_FEATURE_DEBUG;
32
33 vm->arch.trapped_features |= HF_FEATURE_SVE;
34
35 vm->arch.trapped_features |= HF_FEATURE_SME;
36
37 if (!vm_is_primary(vm)) {
38 /*
39 * Features to trap only for the secondary VMs (and Secure
40 * Partitions).
41 */
42
43 vm->arch.trapped_features |= HF_FEATURE_AMU;
44
45 vm->arch.trapped_features |= HF_FEATURE_PERFMON;
46
47 /*
48 * TODO(b/132395845): Access to RAS registers is not trapped at
49 * the moment for the primary VM, only for the secondaries. RAS
50 * register access isn't needed now, but it might be
51 * required for debugging. When Hafnium introduces debug vs
52 * release builds, trap accesses for primary VMs in release
53 * builds, but do not trap them in debug builds.
54 */
55 vm->arch.trapped_features |= HF_FEATURE_RAS;
56
57 #if !BRANCH_PROTECTION
58 /*
59 * When branch protection is enabled in the build
60 * (BRANCH_PROTECTION=1), the primary VM, secondary VMs and SPs
61 * are allowed to enable and use pointer authentication. When
62 * branch protection is disabled, only the primary VM is allowed
63 * to. Secondary VMs and SPs shall trap on accessing PAuth key
64 * registers.
65 */
66 vm->arch.trapped_features |= HF_FEATURE_PAUTH;
67 #endif
68 }
69 }
70
71 /*
72 * Allow the partition manager to perform necessary steps to enforce access
73 * control, with the help of IOMMU, for DMA accesses on behalf of a given
74 * partition.
75 */
arch_vm_iommu_init_mm(struct vm * vm,struct mpool * ppool)76 bool arch_vm_iommu_init_mm(struct vm *vm, struct mpool *ppool)
77 {
78 bool ret = true;
79
80 /*
81 * No support to enforce access control through (stage 1) address
82 * translation for memory accesses by DMA device on behalf of an
83 * EL0/S-EL0 partition.
84 */
85 if (vm->el0_partition) {
86 return true;
87 }
88
89 for (uint8_t k = 0; k < vm->dma_device_count; k++) {
90 /*
91 * Hafnium maintains an independent set of page tables for each
92 * DMA device that is upstream of given VM. This is necessary
93 * to enforce static DMA isolation.
94 */
95 ret = ret && mm_ptable_init(&vm->iommu_ptables[k], vm->id,
96 false, ppool);
97 #if SECURE_WORLD == 1
98 ret = ret && mm_ptable_init(&vm->arch.iommu_ptables_ns[k],
99 vm->id, false, ppool);
100 #endif
101 if (!ret) {
102 dlog_error(
103 "Failed to allocate entries for DMA page "
104 "tables. Consider increasing heap page "
105 "count.\n");
106 return ret;
107 }
108 }
109
110 return ret;
111 }
112
arch_vm_init_mm(struct vm * vm,struct mpool * ppool)113 bool arch_vm_init_mm(struct vm *vm, struct mpool *ppool)
114 {
115 bool ret;
116
117 if (vm->el0_partition) {
118 return mm_ptable_init(&vm->ptable, vm->id, true, ppool);
119 }
120
121 ret = mm_vm_init(&vm->ptable, vm->id, ppool);
122
123 #if SECURE_WORLD == 1
124 ret = ret && mm_vm_init(&vm->arch.ptable_ns, vm->id, ppool);
125 #endif
126
127 return ret;
128 }
129
arch_vm_identity_prepare(struct vm_locked vm_locked,paddr_t begin,paddr_t end,mm_mode_t mode,struct mpool * ppool)130 bool arch_vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin,
131 paddr_t end, mm_mode_t mode, struct mpool *ppool)
132 {
133 struct mm_ptable *ptable = &vm_locked.vm->ptable;
134
135 if (vm_locked.vm->el0_partition) {
136 return mm_identity_prepare(ptable, begin, end, mode, ppool);
137 }
138
139 #if SECURE_WORLD == 1
140 if (0 != (mode & MM_MODE_NS)) {
141 ptable = &vm_locked.vm->arch.ptable_ns;
142 }
143 #endif
144
145 return mm_vm_identity_prepare(ptable, begin, end, mode, ppool);
146 }
147
arch_vm_identity_commit(struct vm_locked vm_locked,paddr_t begin,paddr_t end,mm_mode_t mode,struct mpool * ppool,ipaddr_t * ipa)148 void arch_vm_identity_commit(struct vm_locked vm_locked, paddr_t begin,
149 paddr_t end, mm_mode_t mode, struct mpool *ppool,
150 ipaddr_t *ipa)
151 {
152 struct mm_ptable *ptable = &vm_locked.vm->ptable;
153
154 if (vm_locked.vm->el0_partition) {
155 mm_identity_commit(&vm_locked.vm->ptable, begin, end, mode,
156 ppool);
157 if (ipa != NULL) {
158 /*
159 * EL0 partitions are modeled as lightweight VM's, to
160 * promote code reuse. The below statement returns the
161 * mapped PA as an IPA, however, for an EL0 partition,
162 * this is really a VA.
163 */
164 *ipa = ipa_from_pa(begin);
165 }
166 } else {
167 #if SECURE_WORLD == 1
168 if (0 != (mode & MM_MODE_NS)) {
169 ptable = &vm_locked.vm->arch.ptable_ns;
170 }
171 #endif
172
173 mm_vm_identity_commit(ptable, begin, end, mode, ppool, ipa);
174 }
175 }
176
arch_vm_unmap(struct vm_locked vm_locked,paddr_t begin,paddr_t end,struct mpool * ppool)177 bool arch_vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
178 struct mpool *ppool)
179 {
180 bool ret;
181 mm_mode_t mode = MM_MODE_UNMAPPED_MASK;
182
183 ret = vm_identity_map(vm_locked, begin, end, mode, ppool, NULL);
184
185 #if SECURE_WORLD == 1
186 ret = ret && vm_identity_map(vm_locked, begin, end, mode | MM_MODE_NS,
187 ppool, NULL);
188 #endif
189
190 return ret;
191 }
192
arch_vm_ptable_defrag(struct vm_locked vm_locked,struct mpool * ppool)193 void arch_vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
194 {
195 if (vm_locked.vm->el0_partition) {
196 mm_stage1_defrag(&vm_locked.vm->ptable, ppool);
197 } else {
198 mm_vm_defrag(&vm_locked.vm->ptable, ppool, false);
199 #if SECURE_WORLD == 1
200 /*
201 * TODO: check if this can be better optimized (pass the
202 * security state?).
203 */
204 mm_vm_defrag(&vm_locked.vm->arch.ptable_ns, ppool, true);
205 #endif
206 }
207 }
208
arch_vm_mem_get_mode(struct vm_locked vm_locked,ipaddr_t begin,ipaddr_t end,mm_mode_t * mode)209 bool arch_vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin,
210 ipaddr_t end, mm_mode_t *mode)
211 {
212 bool ret;
213
214 if (vm_locked.vm->el0_partition) {
215 return mm_get_mode(&vm_locked.vm->ptable,
216 va_from_pa(pa_from_ipa(begin)),
217 va_from_pa(pa_from_ipa(end)), mode);
218 }
219
220 ret = mm_vm_get_mode(&vm_locked.vm->ptable, begin, end, mode);
221
222 #if SECURE_WORLD == 1
223 mm_mode_t mode2;
224 const mm_mode_t mask =
225 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
226
227 /* If the region is fully unmapped in the secure IPA space. */
228 if (ret && ((*mode & mask) == mask)) {
229 /* Look up the non-secure IPA space. */
230 ret = mm_vm_get_mode(&vm_locked.vm->arch.ptable_ns, begin, end,
231 &mode2);
232
233 /* If region is fully mapped in the non-secure IPA space. */
234 if (ret && ((mode2 & mask) != mask)) {
235 *mode = mode2;
236 }
237 }
238 #endif
239
240 return ret;
241 }
242
arch_vm_iommu_mm_prepare(struct vm_locked vm_locked,paddr_t begin,paddr_t end,mm_mode_t mode,struct mpool * ppool,uint8_t dma_device_id)243 static bool arch_vm_iommu_mm_prepare(struct vm_locked vm_locked, paddr_t begin,
244 paddr_t end, mm_mode_t mode,
245 struct mpool *ppool, uint8_t dma_device_id)
246 {
247 struct mm_ptable *ptable = &vm_locked.vm->iommu_ptables[dma_device_id];
248
249 #if SECURE_WORLD == 1
250 if (0 != (mode & MM_MODE_NS)) {
251 ptable = &vm_locked.vm->arch.iommu_ptables_ns[dma_device_id];
252 }
253 #endif
254
255 return mm_vm_identity_prepare(ptable, begin, end, mode, ppool);
256 }
257
arch_vm_iommu_mm_commit(struct vm_locked vm_locked,paddr_t begin,paddr_t end,mm_mode_t mode,struct mpool * ppool,ipaddr_t * ipa,uint8_t dma_device_id)258 static void arch_vm_iommu_mm_commit(struct vm_locked vm_locked, paddr_t begin,
259 paddr_t end, mm_mode_t mode,
260 struct mpool *ppool, ipaddr_t *ipa,
261 uint8_t dma_device_id)
262 {
263 struct mm_ptable *ptable = &vm_locked.vm->iommu_ptables[dma_device_id];
264
265 #if SECURE_WORLD == 1
266 if (0 != (mode & MM_MODE_NS)) {
267 ptable = &vm_locked.vm->arch.iommu_ptables_ns[dma_device_id];
268 }
269 #endif
270
271 mm_vm_identity_commit(ptable, begin, end, mode, ppool, ipa);
272 }
273
arch_vm_iommu_mm_identity_map(struct vm_locked vm_locked,paddr_t begin,paddr_t end,mm_mode_t mode,struct mpool * ppool,ipaddr_t * ipa,uint8_t dma_device_id)274 bool arch_vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
275 paddr_t end, mm_mode_t mode,
276 struct mpool *ppool, ipaddr_t *ipa,
277 uint8_t dma_device_id)
278 {
279 /*
280 * No support to enforce access control through (stage 1) address
281 * translation for memory accesses by DMA device on behalf of an
282 * EL0/S-EL0 partition.
283 */
284 if (vm_locked.vm->el0_partition) {
285 return true;
286 }
287
288 if (dma_device_id >= vm_locked.vm->dma_device_count) {
289 dlog_error("Illegal DMA device specified.\n");
290 return false;
291 }
292
293 if (!arch_vm_iommu_mm_prepare(vm_locked, begin, end, mode, ppool,
294 dma_device_id)) {
295 return false;
296 }
297
298 arch_vm_iommu_mm_commit(vm_locked, begin, end, mode, ppool, ipa,
299 dma_device_id);
300
301 return true;
302 }
303