1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4
5 #include <assert.h>
6 #include <hyptypes.h>
7
8 #include <allocator.h>
9 #include <compiler.h>
10 #include <hyp_aspace.h>
11 #include <log.h>
12 #include <memdb.h>
13 #include <object.h>
14 #include <panic.h>
15 #include <partition.h>
16 #include <pgtable.h>
17 #include <spinlock.h>
18 #include <trace.h>
19 #include <util.h>
20
21 #include <events/allocator.h>
22
23 #include "event_handlers.h"
24
25 void_ptr_result_t NOINLINE
partition_alloc(partition_t * partition,size_t bytes,size_t min_alignment)26 partition_alloc(partition_t *partition, size_t bytes, size_t min_alignment)
27 {
28 void_ptr_result_t ret;
29
30 assert(bytes > 0U);
31
32 ret = allocator_allocate_object(&partition->allocator, bytes,
33 min_alignment);
34
35 if (compiler_expected(ret.e == OK)) {
36 assert(ret.r != NULL);
37 }
38 return ret;
39 }
40
41 error_t
partition_free(partition_t * partition,void * mem,size_t bytes)42 partition_free(partition_t *partition, void *mem, size_t bytes)
43 {
44 error_t ret;
45 assert((bytes > 0U) && !util_add_overflows((uintptr_t)mem, bytes - 1U));
46 assert(partition_virt_to_phys(partition, (uintptr_t)mem) !=
47 PADDR_INVALID);
48
49 ret = allocator_deallocate_object(&partition->allocator, mem, bytes);
50
51 return ret;
52 }
53
54 // FIXME: partition->mapped_ranges is not updated atomically. Its not an issue
55 // yet since its only done during single-threaded init. Once we support dynamic
56 // heap adjustment, it will become a problem.
57 // FIXME:
58
59 static uintptr_t
phys_to_virt(partition_t * partition,paddr_t phys,size_t size)60 phys_to_virt(partition_t *partition, paddr_t phys, size_t size)
61 {
62 uintptr_t virt = VADDR_INVALID;
63
64 assert(!util_add_overflows(phys, size - 1U));
65
66 for (count_t i = 0U; i < util_array_size(partition->mapped_ranges);
67 i++) {
68 partition_mapped_range_t *mr = &partition->mapped_ranges[i];
69 if (mr->size == 0U) {
70 continue;
71 }
72 if ((phys >= mr->phys) &&
73 ((phys + (size - 1U)) <= (mr->phys + (mr->size - 1U)))) {
74 virt = (uintptr_t)(phys - mr->phys) + mr->virt;
75 break;
76 }
77 }
78
79 return virt;
80 }
81
82 error_t
partition_free_phys(partition_t * partition,paddr_t phys,size_t bytes)83 partition_free_phys(partition_t *partition, paddr_t phys, size_t bytes)
84 {
85 uintptr_t virt = phys_to_virt(partition, phys, bytes);
86
87 if (virt == VADDR_INVALID) {
88 panic("Attempt to free memory not in partition");
89 }
90
91 return partition_free(partition, (void *)virt, bytes);
92 }
93
94 paddr_t
partition_virt_to_phys(partition_t * partition,uintptr_t addr)95 partition_virt_to_phys(partition_t *partition, uintptr_t addr)
96 {
97 paddr_t phys = PADDR_INVALID;
98
99 for (count_t i = 0U; i < util_array_size(partition->mapped_ranges);
100 i++) {
101 partition_mapped_range_t *mr = &partition->mapped_ranges[i];
102 if (mr->size == 0U) {
103 continue;
104 }
105 if ((addr >= mr->virt) &&
106 (addr <= (mr->virt + (mr->size - 1U)))) {
107 phys = (paddr_t)(addr - mr->virt) + mr->phys;
108 break;
109 }
110 }
111
112 return phys;
113 }
114
115 error_t
partition_standard_handle_object_create_partition(partition_create_t create)116 partition_standard_handle_object_create_partition(partition_create_t create)
117 {
118 partition_t *partition = create.partition;
119 assert(partition != NULL);
120
121 return allocator_init(&partition->allocator);
122 }
123
124 error_t
partition_standard_handle_object_activate_partition(partition_t * partition)125 partition_standard_handle_object_activate_partition(partition_t *partition)
126 {
127 error_t err;
128
129 assert(partition->header.partition != NULL);
130 assert(partition->header.partition != partition);
131
132 if (partition_option_flags_get_privileged(&partition->options) &&
133 !partition_option_flags_get_privileged(
134 &partition->header.partition->options)) {
135 err = ERROR_DENIED;
136 goto out;
137 }
138
139 // Partitions hold a reference to themselves to prevent asynchronous
140 // destruction when the last capability is deleted.
141 //
142 // Partitions must be explicitly destroyed to ensure that all objects in
143 // them are deactivated synchronously, especially threads which might
144 // still be executing on other CPUs; this self-reference will be deleted
145 // after that is done. This destruction operation is not yet
146 // implemented.
147 (void)object_get_partition_additional(partition);
148
149 err = OK;
150 out:
151 return err;
152 }
153
154 noreturn void
partition_standard_handle_object_deactivate_partition(void)155 partition_standard_handle_object_deactivate_partition(void)
156 {
157 // This is currently not implemented and not needed. The self-reference
158 // taken in activate() above should prevent this, but we panic here to
159 // ensure that it doesn't happen by accident.
160 panic("Partition deactivation attempted");
161 }
162
163 error_t
partition_mem_donate(partition_t * src_partition,paddr_t base,size_t size,partition_t * dst_partition,bool from_heap)164 partition_mem_donate(partition_t *src_partition, paddr_t base, size_t size,
165 partition_t *dst_partition, bool from_heap)
166 {
167 error_t ret;
168
169 partition_t *hyp_partition = partition_get_private();
170
171 if ((size != 0U) && (!util_add_overflows(base, size - 1U))) {
172 if (from_heap) {
173 ret = memdb_update(hyp_partition, base,
174 base + (size - 1U),
175 (uintptr_t)dst_partition,
176 MEMDB_TYPE_PARTITION,
177 (uintptr_t)&src_partition->allocator,
178 MEMDB_TYPE_ALLOCATOR);
179 } else {
180 ret = memdb_update(
181 hyp_partition, base, base + (size - 1U),
182 (uintptr_t)dst_partition, MEMDB_TYPE_PARTITION,
183 (uintptr_t)src_partition, MEMDB_TYPE_PARTITION);
184 }
185 } else {
186 ret = ERROR_ARGUMENT_SIZE;
187 }
188
189 return ret;
190 }
191
192 error_t
partition_add_heap(partition_t * partition,paddr_t base,size_t size)193 partition_add_heap(partition_t *partition, paddr_t base, size_t size)
194 {
195 error_t ret;
196
197 assert(partition != NULL);
198 assert(size != 0U);
199
200 partition_t *hyp_partition = partition_get_private();
201
202 if ((size != 0U) && (!util_add_overflows(base, size - 1U))) {
203 ret = memdb_update(hyp_partition, base, base + (size - 1U),
204 (uintptr_t)&partition->allocator,
205 MEMDB_TYPE_ALLOCATOR, (uintptr_t)partition,
206 MEMDB_TYPE_PARTITION);
207 } else {
208 ret = ERROR_ARGUMENT_SIZE;
209 }
210
211 if (ret == OK) {
212 uintptr_t virt = phys_to_virt(partition, base, size);
213 assert(virt != VADDR_INVALID);
214 ret = trigger_allocator_add_ram_range_event(partition, base,
215 virt, size);
216 }
217
218 return ret;
219 }
220
221 static error_t
new_memory_add(partition_t * partition,partition_t * hyp_partition,paddr_t phys,size_t size)222 new_memory_add(partition_t *partition, partition_t *hyp_partition, paddr_t phys,
223 size_t size)
224 {
225 error_t ret = OK;
226 uintptr_t virt;
227
228 partition_mapped_range_t *mr = NULL;
229 for (count_t i = 0U; i < util_array_size(partition->mapped_ranges);
230 i++) {
231 if (partition->mapped_ranges[i].size == 0U) {
232 mr = &partition->mapped_ranges[i];
233 break;
234 }
235 }
236
237 if (mr == NULL) {
238 ret = ERROR_NORESOURCES;
239 goto out;
240 }
241
242 // Use large page size for virt-phys alignment.
243 paddr_t phys_align_base =
244 util_balign_down(phys, PGTABLE_HYP_LARGE_PAGE_SIZE);
245 size_t phys_align_offset = phys - phys_align_base;
246 size_t phys_align_size = phys_align_offset + size;
247
248 virt_range_result_t vr = hyp_aspace_allocate(phys_align_size);
249 if (vr.e != OK) {
250 ret = vr.e;
251 goto out;
252 }
253
254 virt = vr.r.base + phys_align_offset;
255
256 pgtable_hyp_start();
257 // FIXME:
258 ret = pgtable_hyp_map(hyp_partition, virt, size, phys,
259 PGTABLE_HYP_MEMTYPE_WRITEBACK, PGTABLE_ACCESS_RW,
260 VMSA_SHAREABILITY_INNER_SHAREABLE);
261 pgtable_hyp_commit();
262 if (ret == OK) {
263 ret = trigger_allocator_add_ram_range_event(partition, phys,
264 virt, size);
265 }
266 if (ret != OK) {
267 // FIXME:
268 // This should unmap the failed range, freeing to the target
269 // partition and preserve the levels that were preallocated,
270 // followed by unmapping the preserved tables (if they are
271 // empty), freeing to the hyp_partition.
272 pgtable_hyp_start();
273 pgtable_hyp_unmap(hyp_partition, virt, size,
274 PGTABLE_HYP_UNMAP_PRESERVE_NONE);
275 pgtable_hyp_commit();
276 hyp_aspace_deallocate(partition, vr.r);
277 } else {
278 mr->virt = virt;
279 mr->phys = phys;
280 mr->size = size;
281
282 LOG(DEBUG, INFO,
283 "added heap: partition {:#x}, virt {:#x}, phys {:#x}, size {:#x}",
284 (uintptr_t)partition, virt, phys, size);
285 }
286
287 out:
288 return ret;
289 }
290
291 error_t
partition_map_and_add_heap(partition_t * partition,paddr_t phys,size_t size)292 partition_map_and_add_heap(partition_t *partition, paddr_t phys, size_t size)
293 {
294 error_t ret;
295 error_t err = OK;
296
297 assert(partition != NULL);
298 assert(size != 0U);
299
300 // This should not be called for memory already mapped.
301 if (phys_to_virt(partition, phys, size) != VADDR_INVALID) {
302 panic("Attempt to add memory already in partition");
303 }
304
305 // FIXME:
306 // Mapping the partition should preallocate top page-table levels from
307 // the hyp partition and then map with the target partition, but we
308 // have a chicken-and-egg problem to solve: if the target partition has
309 // no memory yet (because it is new) then it can't allocate page
310 // tables. We will probably need to seed new partition allocators with
311 // some memory from the parent partition.
312 partition_t *hyp_partition = partition_get_private();
313
314 if ((size == 0U) || (util_add_overflows(phys, size - 1U))) {
315 ret = ERROR_ARGUMENT_SIZE;
316 goto out;
317 }
318
319 if (!util_is_baligned(phys, PGTABLE_HYP_PAGE_SIZE) ||
320 !util_is_baligned(size, PGTABLE_HYP_PAGE_SIZE)) {
321 ret = ERROR_ARGUMENT_ALIGNMENT;
322 goto out;
323 }
324
325 ret = memdb_update(hyp_partition, phys, phys + (size - 1U),
326 (uintptr_t)&partition->allocator,
327 MEMDB_TYPE_ALLOCATOR, (uintptr_t)partition,
328 MEMDB_TYPE_PARTITION);
329 if (ret != OK) {
330 goto out;
331 }
332
333 spinlock_acquire(&partition->header.lock);
334
335 // Add a new mapped range for the memory.
336 ret = new_memory_add(partition, hyp_partition, phys, size);
337
338 spinlock_release(&partition->header.lock);
339
340 if (ret != OK) {
341 err = memdb_update(hyp_partition, phys, phys + (size - 1U),
342 (uintptr_t)partition, MEMDB_TYPE_PARTITION,
343 (uintptr_t)&partition->allocator,
344 MEMDB_TYPE_ALLOCATOR);
345 if (err != OK) {
346 panic("Error updating memdb.");
347 }
348 }
349 out:
350 return ret;
351 }
352
353 #if defined(PLATFORM_TRACE_STANDALONE_REGION)
354
355 static error_t
new_memory_add_trace(partition_t * partition,paddr_t phys,size_t size,partition_mapped_range_t ** mr,uintptr_result_t * virt_ret)356 new_memory_add_trace(partition_t *partition, paddr_t phys, size_t size,
357 partition_mapped_range_t **mr, uintptr_result_t *virt_ret)
358 {
359 error_t ret = OK;
360 uintptr_t virt;
361
362 for (count_t i = 0U; i < util_array_size(partition->mapped_ranges);
363 i++) {
364 if (partition->mapped_ranges[i].size == 0U) {
365 *mr = &partition->mapped_ranges[i];
366 break;
367 }
368 }
369
370 if (*mr == NULL) {
371 ret = ERROR_NORESOURCES;
372 goto out;
373 }
374
375 // Use large page size for virt-phys alignment.
376 paddr_t phys_align_base =
377 util_balign_down(phys, PGTABLE_HYP_LARGE_PAGE_SIZE);
378 size_t phys_align_offset = phys - phys_align_base;
379 size_t phys_align_size = phys_align_offset + size;
380
381 virt_range_result_t vr = hyp_aspace_allocate(phys_align_size);
382 if (vr.e != OK) {
383 ret = vr.e;
384 goto out;
385 }
386
387 virt = vr.r.base + phys_align_offset;
388 (*mr)->virt = virt;
389 (*mr)->phys = phys;
390 (*mr)->size = size;
391
392 pgtable_hyp_start();
393 ret = pgtable_hyp_map(partition, virt, size, phys,
394 PGTABLE_HYP_MEMTYPE_WRITEBACK, PGTABLE_ACCESS_RW,
395 VMSA_SHAREABILITY_INNER_SHAREABLE);
396 if (ret == OK) {
397 pgtable_hyp_commit();
398 } else {
399 pgtable_hyp_unmap(partition, virt, size,
400 PGTABLE_HYP_UNMAP_PRESERVE_NONE);
401 pgtable_hyp_commit();
402 hyp_aspace_deallocate(partition, vr.r);
403 }
404 if (ret == OK) {
405 (*virt_ret).r = virt;
406 LOG(DEBUG, INFO,
407 "added trace: partition {:#x}, virt {:#x}, phys {:#x}, size {:#x}",
408 (uintptr_t)partition, (*virt_ret).r, phys, size);
409 }
410
411 out:
412 return ret;
413 }
414
415 uintptr_result_t
partition_map_and_add_trace(partition_t * partition,paddr_t phys,size_t size)416 partition_map_and_add_trace(partition_t *partition, paddr_t phys, size_t size)
417 {
418 error_t ret;
419 error_t err = OK;
420 uintptr_result_t virt_ret = { 0 };
421
422 assert(partition != NULL);
423 assert(size != 0U);
424
425 if ((size == 0U) || (util_add_overflows(phys, size - 1U))) {
426 ret = ERROR_ARGUMENT_SIZE;
427 goto out;
428 }
429
430 if (!util_is_baligned(phys, PGTABLE_HYP_PAGE_SIZE) ||
431 !util_is_baligned(size, PGTABLE_HYP_PAGE_SIZE)) {
432 ret = ERROR_ARGUMENT_ALIGNMENT;
433 goto out;
434 }
435
436 partition_t *hyp_partition = partition_get_private();
437 ret = memdb_update(hyp_partition, phys, phys + (size - 1U),
438 (uintptr_t)NULL, MEMDB_TYPE_TRACE,
439 (uintptr_t)partition, MEMDB_TYPE_PARTITION);
440 if (ret != OK) {
441 goto out;
442 }
443
444 // Add a new mapped range for the memory.
445 partition_mapped_range_t *mr = NULL;
446
447 ret = new_memory_add_trace(partition, phys, size, &mr, &virt_ret);
448
449 if (ret != OK) {
450 err = memdb_update(hyp_partition, phys, phys + (size - 1U),
451 (uintptr_t)partition, MEMDB_TYPE_PARTITION,
452 (uintptr_t)NULL, MEMDB_TYPE_TRACE);
453 if (err != OK) {
454 panic("Error updating memdb.");
455 }
456
457 if (mr != NULL) {
458 mr->virt = 0U;
459 mr->phys = 0U;
460 mr->size = 0U;
461 }
462 }
463 out:
464 virt_ret.e = ret;
465 return virt_ret;
466 }
467 #endif
468