1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4
5 #include <assert.h>
6 #include <hyptypes.h>
7 #include <string.h>
8
9 #include <hypregisters.h>
10
11 #include <compiler.h>
12 #include <partition.h>
13 #include <pgtable.h>
14 #include <rcu.h>
15 #include <util.h>
16
17 #include <asm/barrier.h>
18 #include <asm/cache.h>
19 #include <asm/cpu.h>
20
21 #include "useraccess.h"
22
23 static void
useraccess_clean_range(const uint8_t * va,size_t size)24 useraccess_clean_range(const uint8_t *va, size_t size)
25 {
26 CACHE_CLEAN_RANGE(va, size);
27 }
28
29 static void
useraccess_clean_invalidate_range(const uint8_t * va,size_t size)30 useraccess_clean_invalidate_range(const uint8_t *va, size_t size)
31 {
32 CACHE_CLEAN_INVALIDATE_RANGE(va, size);
33 }
34
35 static size_t
useraccess_copy_from_to_translated_pa(PAR_EL1_t par,gvaddr_t guest_va,size_t page_size,size_t page_offset,bool from_guest,void * hyp_buf,size_t remaining)36 useraccess_copy_from_to_translated_pa(PAR_EL1_t par, gvaddr_t guest_va,
37 size_t page_size, size_t page_offset,
38 bool from_guest, void *hyp_buf,
39 size_t remaining)
40 {
41 paddr_t guest_pa = PAR_EL1_F0_get_PA(&par.f0);
42 guest_pa |= (paddr_t)guest_va & (page_size - 1U);
43
44 size_t mapped_size = page_size - page_offset;
45 void *va = partition_phys_map(guest_pa, mapped_size);
46
47 MAIR_ATTR_t attr = PAR_EL1_F0_get_ATTR(&par.f0);
48 bool writeback = ((index_t)attr | (index_t)MAIR_ATTR_ALLOC_HINT_MASK) ==
49 (index_t)MAIR_ATTR_NORMAL_WB;
50 #if defined(ARCH_ARM_FEAT_MTE)
51 writeback = writeback || (attr == MAIR_ATTR_TAGGED_NORMAL_WB);
52 #endif
53
54 partition_phys_access_enable(va);
55
56 if (compiler_unexpected(from_guest && !writeback)) {
57 useraccess_clean_range((uint8_t *)va,
58 util_min(remaining, mapped_size));
59 }
60
61 size_t copied_size;
62 if (from_guest) {
63 copied_size = memscpy(hyp_buf, remaining, va, mapped_size);
64 } else {
65 copied_size = memscpy(va, mapped_size, hyp_buf, remaining);
66 }
67
68 if (compiler_unexpected(!from_guest && !writeback)) {
69 useraccess_clean_invalidate_range((uint8_t *)va, copied_size);
70 }
71
72 partition_phys_access_disable(va);
73
74 partition_phys_unmap(va, guest_pa, mapped_size);
75
76 return copied_size;
77 }
78
79 static size_result_t
useraccess_copy_from_to_guest_va(gvaddr_t gvaddr,void * hvaddr,size_t size,bool from_guest,bool force_access)80 useraccess_copy_from_to_guest_va(gvaddr_t gvaddr, void *hvaddr, size_t size,
81 bool from_guest, bool force_access)
82 {
83 error_t ret = OK;
84 size_t remaining = size;
85 gvaddr_t guest_va = gvaddr;
86 void *hyp_buf = hvaddr;
87
88 assert(hyp_buf != NULL);
89 assert(remaining != 0U);
90
91 if (util_add_overflows((uintptr_t)hvaddr, size - 1U) ||
92 util_add_overflows(gvaddr, size - 1U)) {
93 ret = ERROR_ADDR_OVERFLOW;
94 goto out;
95 }
96
97 PAR_EL1_base_t saved_par =
98 register_PAR_EL1_base_read_volatile_ordered(&asm_ordering);
99
100 const size_t page_size = 4096U;
101 size_t page_offset = gvaddr & (page_size - 1U);
102
103 do {
104 // Guest stage 2 lookups are in RCU read-side critical sections
105 // so that unmap or access change operations can wait for them
106 // to complete.
107 rcu_read_start();
108
109 if (from_guest || force_access) {
110 __asm__ volatile("at S12E1R, %[guest_va];"
111 "isb ;"
112 : "+m"(asm_ordering)
113 : [guest_va] "r"(guest_va));
114 } else {
115 __asm__ volatile("at S12E1W, %[guest_va];"
116 "isb ;"
117 : "+m"(asm_ordering)
118 : [guest_va] "r"(guest_va));
119 }
120
121 PAR_EL1_t par = {
122 .base = register_PAR_EL1_base_read_volatile_ordered(
123 &asm_ordering),
124 };
125
126 if (compiler_expected(!PAR_EL1_base_get_F(&par.base))) {
127 // No fault; copy to/from the translated PA
128 size_t copied_size =
129 useraccess_copy_from_to_translated_pa(
130 par, guest_va, page_size, page_offset,
131 from_guest, hyp_buf, remaining);
132 assert(copied_size > 0U);
133 guest_va += copied_size;
134 hyp_buf = (void *)((uintptr_t)hyp_buf + copied_size);
135 remaining -= copied_size;
136 page_offset = 0U;
137 } else if (!PAR_EL1_F1_get_S(&par.f1)) {
138 // Stage 1 fault (reason is not distinguished here)
139 ret = ERROR_ARGUMENT_INVALID;
140 } else {
141 // Stage 2 fault; return DENIED for permission faults,
142 // ADDR_INVALID otherwise
143 iss_da_ia_fsc_t fst = PAR_EL1_F1_get_FST(&par.f1);
144 ret = ((fst == ISS_DA_IA_FSC_PERMISSION_1) ||
145 (fst == ISS_DA_IA_FSC_PERMISSION_2) ||
146 (fst == ISS_DA_IA_FSC_PERMISSION_3))
147 ? ERROR_DENIED
148 : ERROR_ADDR_INVALID;
149 }
150
151 rcu_read_finish();
152 } while ((remaining != 0U) && (ret == OK));
153
154 register_PAR_EL1_base_write_ordered(saved_par, &asm_ordering);
155
156 out:
157 return (size_result_t){ .e = ret, .r = size - remaining };
158 }
159
160 size_result_t
useraccess_copy_from_guest_va(void * hyp_va,size_t hsize,gvaddr_t guest_va,size_t gsize)161 useraccess_copy_from_guest_va(void *hyp_va, size_t hsize, gvaddr_t guest_va,
162 size_t gsize)
163 {
164 size_result_t ret;
165 bool force_access = false; // only write to guest_va requires this flag.
166 if ((gsize == 0U) || (hsize < gsize)) {
167 ret = size_result_error(ERROR_ARGUMENT_SIZE);
168 } else {
169 ret = useraccess_copy_from_to_guest_va(guest_va, hyp_va, gsize,
170 true, force_access);
171 }
172 return ret;
173 }
174
175 size_result_t
useraccess_copy_to_guest_va(gvaddr_t guest_va,size_t gsize,const void * hyp_va,size_t hsize,bool force_access)176 useraccess_copy_to_guest_va(gvaddr_t guest_va, size_t gsize, const void *hyp_va,
177 size_t hsize, bool force_access)
178 {
179 size_result_t ret;
180 if ((hsize == 0U) || (gsize < hsize)) {
181 ret = size_result_error(ERROR_ARGUMENT_SIZE);
182 } else {
183 ret = useraccess_copy_from_to_guest_va(
184 guest_va, (void *)(uintptr_t)hyp_va, hsize, false,
185 force_access);
186 }
187 return ret;
188 }
189
190 static size_result_t
useraccess_copy_from_to_guest_ipa(addrspace_t * addrspace,vmaddr_t ipa,void * hvaddr,size_t size,bool from_guest,bool force_access,bool force_coherent)191 useraccess_copy_from_to_guest_ipa(addrspace_t *addrspace, vmaddr_t ipa,
192 void *hvaddr, size_t size, bool from_guest,
193 bool force_access, bool force_coherent)
194 {
195 error_t ret = OK;
196 size_t offset = 0U;
197
198 if (util_add_overflows((uintptr_t)hvaddr, size - 1U) ||
199 util_add_overflows(ipa, size - 1U)) {
200 ret = ERROR_ADDR_OVERFLOW;
201 goto out;
202 }
203
204 while (offset < size) {
205 paddr_t mapped_base;
206 size_t mapped_size;
207 pgtable_vm_memtype_t mapped_memtype;
208 pgtable_access_t mapped_vm_kernel_access;
209 pgtable_access_t mapped_vm_user_access;
210
211 // Guest stage 2 lookups are in RCU read-side critical sections
212 // so that unmap or access change operations can wait for them
213 // to complete.
214 rcu_read_start();
215
216 if (!pgtable_vm_lookup(
217 &addrspace->vm_pgtable, ipa + offset, &mapped_base,
218 &mapped_size, &mapped_memtype,
219 &mapped_vm_kernel_access, &mapped_vm_user_access)) {
220 rcu_read_finish();
221 ret = ERROR_ADDR_INVALID;
222 break;
223 }
224
225 if (!force_access &&
226 !pgtable_access_check(mapped_vm_kernel_access,
227 (from_guest ? PGTABLE_ACCESS_R
228 : PGTABLE_ACCESS_W))) {
229 rcu_read_finish();
230 ret = ERROR_DENIED;
231 break;
232 }
233
234 size_t mapping_offset = (ipa + offset) & (mapped_size - 1U);
235 mapped_base += mapping_offset;
236 mapped_size -= mapping_offset;
237
238 uint8_t *vm_addr = partition_phys_map(mapped_base, mapped_size);
239 partition_phys_access_enable(vm_addr);
240
241 uint8_t *hyp_va = (uint8_t *)hvaddr + offset;
242 size_t hyp_size = size - offset;
243 size_t copied_size;
244
245 if (from_guest) {
246 if (force_coherent ||
247 (mapped_memtype != PGTABLE_VM_MEMTYPE_NORMAL_WB)) {
248 useraccess_clean_invalidate_range(
249 vm_addr,
250 util_min(mapped_size, hyp_size));
251 }
252
253 copied_size =
254 memscpy(hyp_va, hyp_size, vm_addr, mapped_size);
255 } else {
256 copied_size =
257 memscpy(vm_addr, mapped_size, hyp_va, hyp_size);
258
259 if (force_coherent ||
260 (mapped_memtype != PGTABLE_VM_MEMTYPE_NORMAL_WB)) {
261 useraccess_clean_range(vm_addr, copied_size);
262 }
263 }
264
265 partition_phys_access_disable(vm_addr);
266 partition_phys_unmap(vm_addr, mapped_base, mapped_size);
267
268 rcu_read_finish();
269
270 offset += copied_size;
271 }
272
273 out:
274 return (size_result_t){ .e = ret, .r = offset };
275 }
276
277 size_result_t
useraccess_copy_from_guest_ipa(addrspace_t * addrspace,void * hyp_va,size_t hsize,vmaddr_t guest_ipa,size_t gsize,bool force_access,bool force_coherent)278 useraccess_copy_from_guest_ipa(addrspace_t *addrspace, void *hyp_va,
279 size_t hsize, vmaddr_t guest_ipa, size_t gsize,
280 bool force_access, bool force_coherent)
281 {
282 size_result_t ret;
283 if ((gsize == 0U) || (hsize < gsize)) {
284 ret = size_result_error(ERROR_ARGUMENT_SIZE);
285 } else {
286 ret = useraccess_copy_from_to_guest_ipa(addrspace, guest_ipa,
287 hyp_va, gsize, true,
288 force_access,
289 force_coherent);
290 }
291 return ret;
292 }
293
294 size_result_t
useraccess_copy_to_guest_ipa(addrspace_t * addrspace,vmaddr_t guest_ipa,size_t gsize,const void * hyp_va,size_t hsize,bool force_access,bool force_coherent)295 useraccess_copy_to_guest_ipa(addrspace_t *addrspace, vmaddr_t guest_ipa,
296 size_t gsize, const void *hyp_va, size_t hsize,
297 bool force_access, bool force_coherent)
298 {
299 size_result_t ret;
300 if ((hsize == 0U) || (gsize < hsize)) {
301 ret = size_result_error(ERROR_ARGUMENT_SIZE);
302 } else {
303 ret = useraccess_copy_from_to_guest_ipa(
304 addrspace, guest_ipa, (void *)(uintptr_t)hyp_va, hsize,
305 false, force_access, force_coherent);
306 }
307 return ret;
308 }
309