1 /*
2  * Copyright (c) 2020 Travis Geiselbrecht
3  *
4  * Use of this source code is governed by a MIT-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/MIT
7  */
8 #if ARCH_HAS_MMU
9 
10 #include <arch/mmu.h>
11 
12 #include <lk/cpp.h>
13 #include <lk/debug.h>
14 #include <lk/err.h>
15 #include <lib/unittest.h>
16 #include <kernel/vm.h>
17 
18 namespace {
19 
create_user_aspace()20 bool create_user_aspace() {
21     BEGIN_TEST;
22 
23     if (arch_mmu_supports_user_aspaces()) {
24         arch_aspace_t as;
25         status_t err = arch_mmu_init_aspace(&as, USER_ASPACE_BASE, USER_ASPACE_SIZE, 0);
26         ASSERT_EQ(NO_ERROR, err, "init aspace");
27 
28         err = arch_mmu_destroy_aspace(&as);
29         EXPECT_EQ(NO_ERROR, err, "destroy");
30     } else {
31         arch_aspace_t as;
32         status_t err = arch_mmu_init_aspace(&as, USER_ASPACE_BASE, USER_ASPACE_SIZE, 0);
33         ASSERT_EQ(ERR_NOT_SUPPORTED, err, "init aspace");
34     }
35 
36     END_TEST;
37 }
38 
map_user_pages()39 bool map_user_pages() {
40     BEGIN_TEST;
41 
42     if (arch_mmu_supports_user_aspaces()) {
43         arch_aspace_t as;
44         status_t err = arch_mmu_init_aspace(&as, USER_ASPACE_BASE, USER_ASPACE_SIZE, 0);
45         ASSERT_EQ(NO_ERROR, err, "init aspace");
46 
47         auto aspace_cleanup = lk::make_auto_call([&]() { arch_mmu_destroy_aspace(&as); });
48 
49         // allocate a batch of pages
50         struct list_node pages = LIST_INITIAL_VALUE(pages);
51         size_t count = pmm_alloc_pages(4, &pages);
52         ASSERT_EQ(4U, count, "alloc pages");
53         ASSERT_EQ(4U, list_length(&pages), "page list");
54 
55         auto pages_cleanup = lk::make_auto_call([&]() { pmm_free(&pages); });
56 
57         // map the pages into the address space
58         vaddr_t va = USER_ASPACE_BASE;
59         vm_page_t *p;
60         list_for_every_entry(&pages, p, vm_page_t, node) {
61             err = arch_mmu_map(&as, va, vm_page_to_paddr(p), 1, ARCH_MMU_FLAG_PERM_USER);
62             EXPECT_LE(NO_ERROR, err, "map page");
63             va += PAGE_SIZE;
64         }
65 
66         // query the pages to make sure they match
67         va = USER_ASPACE_BASE;
68         list_for_every_entry(&pages, p, vm_page_t, node) {
69             paddr_t pa;
70             uint flags;
71             err = arch_mmu_query(&as, va, &pa, &flags);
72             EXPECT_EQ(NO_ERROR, err, "query");
73             EXPECT_EQ(vm_page_to_paddr(p), pa, "pa");
74             EXPECT_EQ(ARCH_MMU_FLAG_PERM_USER, flags, "flags");
75             va += PAGE_SIZE;
76 
77             //unittest_printf("\npa %#lx, flags %#x", pa, flags);
78         }
79 
80         // destroy the aspace with the pages mapped
81         aspace_cleanup.cancel();
82         err = arch_mmu_destroy_aspace(&as);
83         EXPECT_EQ(NO_ERROR, err, "destroy");
84 
85         // free the pages we allocated before
86         pages_cleanup.cancel();
87         size_t freed = pmm_free(&pages);
88         ASSERT_EQ(count, freed, "free");
89     }
90 
91     END_TEST;
92 }
93 
map_region_query_result(vmm_aspace_t * aspace,uint arch_flags)94 bool map_region_query_result(vmm_aspace_t *aspace, uint arch_flags) {
95     BEGIN_TEST;
96     void *ptr = NULL;
97 
98     // create a region of an arbitrary page in kernel aspace
99     EXPECT_EQ(NO_ERROR, vmm_alloc(aspace, "test region", PAGE_SIZE, &ptr, 0, /* vmm_flags */ 0, arch_flags), "map region");
100     EXPECT_NONNULL(ptr, "not null");
101 
102     // query the page to see if it's realistic
103     {
104         paddr_t pa = 0;
105         uint flags = ~arch_flags;
106         EXPECT_EQ(NO_ERROR, arch_mmu_query(&aspace->arch_aspace, (vaddr_t)ptr, &pa, &flags), "arch_query");
107         EXPECT_NE(0U, pa, "valid pa");
108         EXPECT_EQ(arch_flags, flags, "query flags");
109     }
110 
111     // free this region we made
112     EXPECT_EQ(NO_ERROR, vmm_free_region(aspace, (vaddr_t)ptr), "free region");
113 
114     // query that the page is not there anymore
115     {
116         paddr_t pa = 0;
117         uint flags = ~arch_flags;
118         EXPECT_EQ(ERR_NOT_FOUND, arch_mmu_query(&aspace->arch_aspace, (vaddr_t)ptr, &pa, &flags), "arch_query");
119     }
120 
121     END_TEST;
122 }
123 
map_region_expect_failure(vmm_aspace_t * aspace,uint arch_flags,int expected_error)124 bool map_region_expect_failure(vmm_aspace_t *aspace, uint arch_flags, int expected_error) {
125     BEGIN_TEST;
126     void *ptr = NULL;
127 
128     // create a region of an arbitrary page in kernel aspace
129     EXPECT_EQ(expected_error, vmm_alloc(aspace, "test region", PAGE_SIZE, &ptr, 0, /* vmm_flags */ 0, arch_flags), "map region");
130     EXPECT_NULL(ptr, "null");
131 
132     END_TEST;
133 }
134 
map_query_pages()135 bool map_query_pages() {
136     BEGIN_TEST;
137 
138     vmm_aspace_t *kaspace = vmm_get_kernel_aspace();
139     ASSERT_NONNULL(kaspace, "kaspace");
140 
141     // try mapping pages in the kernel address space with various permissions and read them back via arch query
142     EXPECT_TRUE(map_region_query_result(kaspace, 0), "0");
143     EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_RO), "1");
144     if (arch_mmu_supports_nx_mappings()) {
145         EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_NO_EXECUTE), "2");
146         EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_RO | ARCH_MMU_FLAG_PERM_NO_EXECUTE), "3");
147     } else {
148         EXPECT_TRUE(map_region_expect_failure(kaspace, ARCH_MMU_FLAG_PERM_NO_EXECUTE, ERR_INVALID_ARGS), "2");
149         EXPECT_TRUE(map_region_expect_failure(kaspace, ARCH_MMU_FLAG_PERM_RO | ARCH_MMU_FLAG_PERM_NO_EXECUTE, ERR_INVALID_ARGS), "3");
150     }
151 
152     EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_USER), "4");
153     EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO), "5");
154     if (arch_mmu_supports_nx_mappings()) {
155         EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_NO_EXECUTE), "6");
156         EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO | ARCH_MMU_FLAG_PERM_NO_EXECUTE), "7");
157     } else {
158         EXPECT_TRUE(map_region_expect_failure(kaspace, ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_NO_EXECUTE, ERR_INVALID_ARGS), "6");
159         EXPECT_TRUE(map_region_expect_failure(kaspace, ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO | ARCH_MMU_FLAG_PERM_NO_EXECUTE, ERR_INVALID_ARGS), "7");
160     }
161 
162     END_TEST;
163 }
164 
context_switch()165 bool context_switch() {
166     BEGIN_TEST;
167 
168     // create a user space, map a page or two and access it
169     // NOTE: this assumes that kernel code can directly access user space, which isn't necessarily true
170     // on all architectures. See SMAP on x86, PAN on ARM, and SUM on RISC-V.
171     if (arch_mmu_supports_user_aspaces()) {
172         arch_aspace_t as;
173         status_t err = arch_mmu_init_aspace(&as, USER_ASPACE_BASE, USER_ASPACE_SIZE, 0);
174         ASSERT_EQ(NO_ERROR, err, "init aspace");
175         auto aspace_cleanup = lk::make_auto_call([&]() { arch_mmu_destroy_aspace(&as); });
176 
177         // switch to the address space
178         arch_mmu_context_switch(&as);
179         auto cleanup_switch = lk::make_auto_call([&]() { arch_mmu_context_switch(NULL); });
180 
181         // map a page, verify can be read through the page
182         vm_page_t *p = pmm_alloc_page();
183         ASSERT_NONNULL(p, "page");
184         auto page_cleanup = lk::make_auto_call([&]() { pmm_free_page(p); });
185 
186         // map it
187         err = arch_mmu_map(&as, USER_ASPACE_BASE, vm_page_to_paddr(p), 1, ARCH_MMU_FLAG_PERM_USER);
188         ASSERT_LE(NO_ERROR, err, "map");
189 
190         // write a known value to the kvaddr portion of the page
191         volatile int *kv = static_cast<volatile int *>(paddr_to_kvaddr(vm_page_to_paddr(p)));
192         *kv = 99;
193 
194         // read the data back from the page
195         volatile int *ptr = reinterpret_cast<volatile int *>(USER_ASPACE_BASE);
196         volatile int foo = *ptr;
197 
198         EXPECT_EQ(99, foo, "readback");
199         *kv = 0xaa;
200         foo = *ptr;
201         EXPECT_EQ(0xaa, foo, "readback 2");
202 
203         // write to the page and read it back from the kernel side
204         *ptr = 0x55;
205         foo = *kv;
206         EXPECT_EQ(0x55, foo, "readback 3");
207 
208         // switch back to kernel aspace
209         cleanup_switch.cancel();
210         arch_mmu_context_switch(NULL);
211 
212         // destroy it
213         aspace_cleanup.cancel();
214         err = arch_mmu_destroy_aspace(&as);
215         EXPECT_EQ(NO_ERROR, err, "destroy");
216 
217         // free the page
218         page_cleanup.cancel();
219         size_t c = pmm_free_page(p);
220         EXPECT_EQ(1U, c, "free");
221     }
222 
223     END_TEST;
224 }
225 
226 BEGIN_TEST_CASE(arch_mmu_tests)
227 RUN_TEST(create_user_aspace);
228 RUN_TEST(map_user_pages);
229 RUN_TEST(map_query_pages);
230 RUN_TEST(context_switch);
231 END_TEST_CASE(arch_mmu_tests)
232 
233 } // namespace
234 
235 #endif // ARCH_HAS_MMU
236