1 /*
2 * Copyright 2019 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/arch/vm/mm.h"
10
11 #include "test/hftest.h"
12
13 /* Number of pages reserved for page tables. Increase if necessary. */
14 #define PTABLE_PAGES 4
15
16 /**
17 * Start address space mapping at 0x1000 for the mm to create a L2 table to
18 * which the first L1 descriptor points to.
19 * Provided SPMC and SP images reside below 1GB, same as peripherals, this
20 * prevents a case in which the mm library has to break down the first
21 * L1 block descriptor, while currently executing from a region within
22 * the same L1 descriptor. This is not architecturally possible.
23 */
24 #define HFTEST_STAGE1_START_ADDRESS (0x1000)
25
26 alignas(alignof(struct mm_page_table)) static char ptable_buf
27 [sizeof(struct mm_page_table) * PTABLE_PAGES];
28
29 static struct mpool ppool;
30 static struct mm_ptable ptable;
31
hftest_mm_get_stage1(void)32 struct mm_stage1_locked hftest_mm_get_stage1(void)
33 {
34 return (struct mm_stage1_locked){.ptable = &ptable};
35 }
36
hftest_mm_get_ppool(void)37 struct mpool *hftest_mm_get_ppool(void)
38 {
39 return &ppool;
40 }
41
hftest_mm_init(void)42 bool hftest_mm_init(void)
43 {
44 struct mm_stage1_locked stage1_locked;
45
46 /* Call arch init before calling below mapping routines */
47 if (!arch_vm_mm_init()) {
48 return false;
49 }
50
51 mpool_init(&ppool, sizeof(struct mm_page_table));
52 if (!mpool_add_chunk(&ppool, ptable_buf, sizeof(ptable_buf))) {
53 HFTEST_FAIL(true, "Failed to add buffer to page-table pool.");
54 }
55
56 if (!mm_ptable_init(&ptable, 0, MM_FLAG_STAGE1, &ppool)) {
57 HFTEST_FAIL(true, "Unable to allocate memory for page table.");
58 }
59
60 stage1_locked = hftest_mm_get_stage1();
61 mm_identity_map(stage1_locked,
62 pa_init((uintptr_t)HFTEST_STAGE1_START_ADDRESS),
63 pa_init(mm_ptable_addr_space_end(MM_FLAG_STAGE1)),
64 MM_MODE_R | MM_MODE_W | MM_MODE_X, &ppool);
65
66 arch_vm_mm_enable(ptable.root);
67
68 return true;
69 }
70
hftest_mm_identity_map(const void * base,size_t size,uint32_t mode)71 void hftest_mm_identity_map(const void *base, size_t size, uint32_t mode)
72 {
73 struct mm_stage1_locked stage1_locked = hftest_mm_get_stage1();
74 paddr_t start = pa_from_va(va_from_ptr(base));
75 paddr_t end = pa_add(start, size);
76
77 if (mm_identity_map(stage1_locked, start, end, mode, &ppool) != base) {
78 FAIL("Could not add new page table mapping. Try increasing "
79 "size of the page table buffer.");
80 }
81 }
82
hftest_mm_vcpu_init(void)83 void hftest_mm_vcpu_init(void)
84 {
85 arch_vm_mm_enable(ptable.root);
86 }
87