1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6
7 #include <assert.h>
8 #include <err.h>
9 #include <fbl/alloc_checker.h>
10 #include <fbl/array.h>
11 #include <ktl/move.h>
12 #include <lib/unittest/unittest.h>
13 #include <vm/physmap.h>
14 #include <vm/vm.h>
15 #include <vm/vm_address_region.h>
16 #include <vm/vm_aspace.h>
17 #include <vm/vm_object.h>
18 #include <vm/vm_object_paged.h>
19 #include <vm/vm_object_physical.h>
20 #include <zircon/types.h>
21
22 static const uint kArchRwFlags = ARCH_MMU_FLAG_PERM_READ | ARCH_MMU_FLAG_PERM_WRITE;
23
24 // Allocates a single page, translates it to a vm_page_t and frees it.
pmm_smoke_test()25 static bool pmm_smoke_test() {
26 BEGIN_TEST;
27 paddr_t pa;
28 vm_page_t* page;
29
30 zx_status_t status = pmm_alloc_page(0, &page, &pa);
31 ASSERT_EQ(ZX_OK, status, "pmm_alloc single page");
32 ASSERT_NONNULL(page, "pmm_alloc single page");
33 ASSERT_NE(0u, pa, "pmm_alloc single page");
34
35 vm_page_t* page2 = paddr_to_vm_page(pa);
36 ASSERT_EQ(page2, page, "paddr_to_vm_page on single page");
37
38 pmm_free_page(page);
39 END_TEST;
40 }
41
42 // Allocates more than one page and frees them
pmm_multi_alloc_test()43 static bool pmm_multi_alloc_test() {
44 BEGIN_TEST;
45 list_node list = LIST_INITIAL_VALUE(list);
46
47 static const size_t alloc_count = 16;
48
49 zx_status_t status = pmm_alloc_pages(alloc_count, 0, &list);
50 EXPECT_EQ(ZX_OK, status, "pmm_alloc_pages a few pages");
51 EXPECT_EQ(alloc_count, list_length(&list),
52 "pmm_alloc_pages a few pages list count");
53
54 pmm_free(&list);
55 END_TEST;
56 }
57
58 // Allocates too many pages and makes sure it fails nicely.
pmm_oversized_alloc_test()59 static bool pmm_oversized_alloc_test() {
60 BEGIN_TEST;
61 list_node list = LIST_INITIAL_VALUE(list);
62
63 static const size_t alloc_count =
64 (128 * 1024 * 1024 * 1024ULL) / PAGE_SIZE; // 128GB
65
66 zx_status_t status = pmm_alloc_pages(alloc_count, 0, &list);
67 EXPECT_EQ(ZX_ERR_NO_MEMORY, status, "pmm_alloc_pages failed to alloc");
68 EXPECT_TRUE(list_is_empty(&list), "pmm_alloc_pages list is empty");
69
70 pmm_free(&list);
71 END_TEST;
72 }
73
74 // Allocates one page and frees it.
pmm_alloc_contiguous_one_test()75 static bool pmm_alloc_contiguous_one_test() {
76 BEGIN_TEST;
77 list_node list = LIST_INITIAL_VALUE(list);
78 paddr_t pa;
79 size_t count = 1U;
80 zx_status_t status = pmm_alloc_contiguous(count, 0, PAGE_SIZE_SHIFT, &pa, &list);
81 ASSERT_EQ(ZX_OK, status, "pmm_alloc_contiguous returned failure\n");
82 ASSERT_EQ(count, list_length(&list), "pmm_alloc_contiguous list size is wrong");
83 ASSERT_NONNULL(paddr_to_physmap(pa), "");
84 pmm_free(&list);
85 END_TEST;
86 }
87
test_rand(uint32_t seed)88 static uint32_t test_rand(uint32_t seed) {
89 return (seed = seed * 1664525 + 1013904223);
90 }
91
92 // fill a region of memory with a pattern based on the address of the region
fill_region(uintptr_t seed,void * _ptr,size_t len)93 static void fill_region(uintptr_t seed, void* _ptr, size_t len) {
94 uint32_t* ptr = (uint32_t*)_ptr;
95
96 ASSERT(IS_ALIGNED((uintptr_t)ptr, 4));
97
98 uint32_t val = (uint32_t)seed;
99 #if UINTPTR_MAX > UINT32_MAX
100 val ^= (uint32_t)(seed >> 32);
101 #endif
102 for (size_t i = 0; i < len / 4; i++) {
103 ptr[i] = val;
104
105 val = test_rand(val);
106 }
107 }
108
109 // test a region of memory against a known pattern
test_region(uintptr_t seed,void * _ptr,size_t len)110 static bool test_region(uintptr_t seed, void* _ptr, size_t len) {
111 uint32_t* ptr = (uint32_t*)_ptr;
112
113 ASSERT(IS_ALIGNED((uintptr_t)ptr, 4));
114
115 uint32_t val = (uint32_t)seed;
116 #if UINTPTR_MAX > UINT32_MAX
117 val ^= (uint32_t)(seed >> 32);
118 #endif
119 for (size_t i = 0; i < len / 4; i++) {
120 if (ptr[i] != val) {
121 unittest_printf("value at %p (%zu) is incorrect: 0x%x vs 0x%x\n", &ptr[i], i, ptr[i],
122 val);
123 return false;
124 }
125
126 val = test_rand(val);
127 }
128
129 return true;
130 }
131
fill_and_test(void * ptr,size_t len)132 static bool fill_and_test(void* ptr, size_t len) {
133 BEGIN_TEST;
134
135 // fill it with a pattern
136 fill_region((uintptr_t)ptr, ptr, len);
137
138 // test that the pattern is read back properly
139 auto result = test_region((uintptr_t)ptr, ptr, len);
140 EXPECT_TRUE(result, "testing region for corruption");
141
142 END_TEST;
143 }
144
145 // Allocates a region in kernel space, reads/writes it, then destroys it.
vmm_alloc_smoke_test()146 static bool vmm_alloc_smoke_test() {
147 BEGIN_TEST;
148 static const size_t alloc_size = 256 * 1024;
149
150 // allocate a region of memory
151 void* ptr;
152 auto kaspace = VmAspace::kernel_aspace();
153 auto err = kaspace->Alloc(
154 "test", alloc_size, &ptr, 0, 0, kArchRwFlags);
155 ASSERT_EQ(ZX_OK, err, "VmAspace::Alloc region of memory");
156 ASSERT_NONNULL(ptr, "VmAspace::Alloc region of memory");
157
158 // fill with known pattern and test
159 if (!fill_and_test(ptr, alloc_size)) {
160 all_ok = false;
161 }
162
163 // free the region
164 err = kaspace->FreeRegion(reinterpret_cast<vaddr_t>(ptr));
165 EXPECT_EQ(ZX_OK, err, "VmAspace::FreeRegion region of memory");
166 END_TEST;
167 }
168
169 // Allocates a contiguous region in kernel space, reads/writes it,
170 // then destroys it.
vmm_alloc_contiguous_smoke_test()171 static bool vmm_alloc_contiguous_smoke_test() {
172 BEGIN_TEST;
173 static const size_t alloc_size = 256 * 1024;
174
175 // allocate a region of memory
176 void* ptr;
177 auto kaspace = VmAspace::kernel_aspace();
178 auto err = kaspace->AllocContiguous("test",
179 alloc_size, &ptr, 0,
180 VmAspace::VMM_FLAG_COMMIT, kArchRwFlags);
181 ASSERT_EQ(ZX_OK, err, "VmAspace::AllocContiguous region of memory");
182 ASSERT_NONNULL(ptr, "VmAspace::AllocContiguous region of memory");
183
184 // fill with known pattern and test
185 if (!fill_and_test(ptr, alloc_size)) {
186 all_ok = false;
187 }
188
189 // test that it is indeed contiguous
190 unittest_printf("testing that region is contiguous\n");
191 paddr_t last_pa = 0;
192 for (size_t i = 0; i < alloc_size / PAGE_SIZE; i++) {
193 paddr_t pa = vaddr_to_paddr((uint8_t*)ptr + i * PAGE_SIZE);
194 if (last_pa != 0) {
195 EXPECT_EQ(pa, last_pa + PAGE_SIZE, "region is contiguous");
196 }
197
198 last_pa = pa;
199 }
200
201 // free the region
202 err = kaspace->FreeRegion(reinterpret_cast<vaddr_t>(ptr));
203 EXPECT_EQ(ZX_OK, err, "VmAspace::FreeRegion region of memory");
204 END_TEST;
205 }
206
207 // Allocates a new address space and creates a few regions in it,
208 // then destroys it.
multiple_regions_test()209 static bool multiple_regions_test() {
210 BEGIN_TEST;
211 void* ptr;
212 static const size_t alloc_size = 16 * 1024;
213
214 fbl::RefPtr<VmAspace> aspace = VmAspace::Create(0, "test aspace");
215 ASSERT_NONNULL(aspace, "VmAspace::Create pointer");
216
217 vmm_aspace_t* old_aspace = get_current_thread()->aspace;
218 vmm_set_active_aspace(reinterpret_cast<vmm_aspace_t*>(aspace.get()));
219
220 // allocate region 0
221 zx_status_t err = aspace->Alloc("test0", alloc_size, &ptr, 0, 0, kArchRwFlags);
222 ASSERT_EQ(ZX_OK, err, "VmAspace::Alloc region of memory");
223 ASSERT_NONNULL(ptr, "VmAspace::Alloc region of memory");
224
225 // fill with known pattern and test
226 if (!fill_and_test(ptr, alloc_size)) {
227 all_ok = false;
228 }
229
230 // allocate region 1
231 err = aspace->Alloc("test1", 16384, &ptr, 0, 0, kArchRwFlags);
232 ASSERT_EQ(ZX_OK, err, "VmAspace::Alloc region of memory");
233 ASSERT_NONNULL(ptr, "VmAspace::Alloc region of memory");
234
235 // fill with known pattern and test
236 if (!fill_and_test(ptr, alloc_size)) {
237 all_ok = false;
238 }
239
240 // allocate region 2
241 err = aspace->Alloc("test2", 16384, &ptr, 0, 0, kArchRwFlags);
242 ASSERT_EQ(ZX_OK, err, "VmAspace::Alloc region of memory");
243 ASSERT_NONNULL(ptr, "VmAspace::Alloc region of memory");
244
245 // fill with known pattern and test
246 if (!fill_and_test(ptr, alloc_size)) {
247 all_ok = false;
248 }
249
250 vmm_set_active_aspace(old_aspace);
251
252 // free the address space all at once
253 err = aspace->Destroy();
254 EXPECT_EQ(ZX_OK, err, "VmAspace::Destroy");
255 END_TEST;
256 }
257
vmm_alloc_zero_size_fails()258 static bool vmm_alloc_zero_size_fails() {
259 BEGIN_TEST;
260 const size_t zero_size = 0;
261 void* ptr;
262 zx_status_t err = VmAspace::kernel_aspace()->Alloc(
263 "test", zero_size, &ptr, 0, 0, kArchRwFlags);
264 ASSERT_EQ(ZX_ERR_INVALID_ARGS, err, "");
265 END_TEST;
266 }
267
vmm_alloc_bad_specific_pointer_fails()268 static bool vmm_alloc_bad_specific_pointer_fails() {
269 BEGIN_TEST;
270 // bad specific pointer
271 void* ptr = (void*)1;
272 zx_status_t err = VmAspace::kernel_aspace()->Alloc(
273 "test", 16384, &ptr, 0,
274 VmAspace::VMM_FLAG_VALLOC_SPECIFIC | VmAspace::VMM_FLAG_COMMIT, kArchRwFlags);
275 ASSERT_EQ(ZX_ERR_INVALID_ARGS, err, "");
276 END_TEST;
277 }
278
vmm_alloc_contiguous_missing_flag_commit_fails()279 static bool vmm_alloc_contiguous_missing_flag_commit_fails() {
280 BEGIN_TEST;
281 // should have VmAspace::VMM_FLAG_COMMIT
282 const uint zero_vmm_flags = 0;
283 void* ptr;
284 zx_status_t err = VmAspace::kernel_aspace()->AllocContiguous(
285 "test", 4096, &ptr, 0, zero_vmm_flags, kArchRwFlags);
286 ASSERT_EQ(ZX_ERR_INVALID_ARGS, err, "");
287 END_TEST;
288 }
289
vmm_alloc_contiguous_zero_size_fails()290 static bool vmm_alloc_contiguous_zero_size_fails() {
291 BEGIN_TEST;
292 const size_t zero_size = 0;
293 void* ptr;
294 zx_status_t err = VmAspace::kernel_aspace()->AllocContiguous(
295 "test", zero_size, &ptr, 0, VmAspace::VMM_FLAG_COMMIT, kArchRwFlags);
296 ASSERT_EQ(ZX_ERR_INVALID_ARGS, err, "");
297 END_TEST;
298 }
299
300 // Allocates a vm address space object directly, allows it to go out of scope.
vmaspace_create_smoke_test()301 static bool vmaspace_create_smoke_test() {
302 BEGIN_TEST;
303 auto aspace = VmAspace::Create(0, "test aspace");
304 zx_status_t err = aspace->Destroy();
305 EXPECT_EQ(ZX_OK, err, "VmAspace::Destroy");
306 END_TEST;
307 }
308
309 // Allocates a vm address space object directly, maps something on it,
310 // allows it to go out of scope.
vmaspace_alloc_smoke_test()311 static bool vmaspace_alloc_smoke_test() {
312 BEGIN_TEST;
313 auto aspace = VmAspace::Create(0, "test aspace2");
314
315 void* ptr;
316 auto err = aspace->Alloc("test", PAGE_SIZE, &ptr, 0, 0, kArchRwFlags);
317 ASSERT_EQ(ZX_OK, err, "allocating region\n");
318
319 // destroy the aspace, which should drop all the internal refs to it
320 err = aspace->Destroy();
321 EXPECT_EQ(ZX_OK, err, "VmAspace::Destroy");
322
323 // drop the ref held by this pointer
324 aspace.reset();
325 END_TEST;
326 }
327
328 // Doesn't do anything, just prints all aspaces.
329 // Should be run after all other tests so that people can manually comb
330 // through the output for leaked test aspaces.
dump_all_aspaces()331 static bool dump_all_aspaces() {
332 BEGIN_TEST;
333 unittest_printf("verify there are no test aspaces left around\n");
334 DumpAllAspaces(/*verbose*/ true);
335 END_TEST;
336 }
337
338 // Creates a vm object.
vmo_create_test()339 static bool vmo_create_test() {
340 BEGIN_TEST;
341 fbl::RefPtr<VmObject> vmo;
342 zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, PAGE_SIZE, &vmo);
343 ASSERT_EQ(status, ZX_OK, "");
344 ASSERT_TRUE(vmo, "");
345 EXPECT_FALSE(vmo->is_contiguous(), "vmo is not contig\n");
346 EXPECT_FALSE(vmo->is_resizable(), "vmo is not resizable\n");
347 END_TEST;
348 }
349
350 // Creates a vm object, commits memory.
vmo_commit_test()351 static bool vmo_commit_test() {
352 BEGIN_TEST;
353 static const size_t alloc_size = PAGE_SIZE * 16;
354 fbl::RefPtr<VmObject> vmo;
355 zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, alloc_size, &vmo);
356 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
357 ASSERT_TRUE(vmo, "vmobject creation\n");
358
359 auto ret = vmo->CommitRange(0, alloc_size);
360 ASSERT_EQ(ZX_OK, ret, "committing vm object\n");
361 EXPECT_EQ(ROUNDUP_PAGE_SIZE(alloc_size),
362 PAGE_SIZE * vmo->AllocatedPages(),
363 "committing vm object\n");
364 END_TEST;
365 }
366
367 // Creates a paged VMO, pins it, and tries operations that should unpin it.
vmo_pin_test()368 static bool vmo_pin_test() {
369 BEGIN_TEST;
370
371 static const size_t alloc_size = PAGE_SIZE * 16;
372 fbl::RefPtr<VmObject> vmo;
373 zx_status_t status = VmObjectPaged::Create(
374 PMM_ALLOC_FLAG_ANY, VmObjectPaged::kResizable, alloc_size, &vmo);
375 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
376 ASSERT_TRUE(vmo, "vmobject creation\n");
377
378 status = vmo->Pin(PAGE_SIZE, alloc_size);
379 EXPECT_EQ(ZX_ERR_OUT_OF_RANGE, status, "pinning out of range\n");
380 status = vmo->Pin(PAGE_SIZE, 0);
381 EXPECT_EQ(ZX_OK, status, "pinning range of len 0\n");
382 status = vmo->Pin(alloc_size + PAGE_SIZE, 0);
383 EXPECT_EQ(ZX_ERR_OUT_OF_RANGE, status, "pinning out-of-range of len 0\n");
384
385 status = vmo->Pin(PAGE_SIZE, 3 * PAGE_SIZE);
386 EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "pinning uncommitted range\n");
387 status = vmo->Pin(0, alloc_size);
388 EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "pinning uncommitted range\n");
389
390 status = vmo->CommitRange(PAGE_SIZE, 3 * PAGE_SIZE);
391 EXPECT_EQ(ZX_OK, status, "committing range\n");
392
393 status = vmo->Pin(0, alloc_size);
394 EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "pinning uncommitted range\n");
395 status = vmo->Pin(PAGE_SIZE, 4 * PAGE_SIZE);
396 EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "pinning uncommitted range\n");
397 status = vmo->Pin(0, 4 * PAGE_SIZE);
398 EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "pinning uncommitted range\n");
399
400 status = vmo->Pin(PAGE_SIZE, 3 * PAGE_SIZE);
401 EXPECT_EQ(ZX_OK, status, "pinning committed range\n");
402
403 status = vmo->DecommitRange(PAGE_SIZE, 3 * PAGE_SIZE);
404 EXPECT_EQ(ZX_ERR_BAD_STATE, status, "decommitting pinned range\n");
405 status = vmo->DecommitRange(PAGE_SIZE, PAGE_SIZE);
406 EXPECT_EQ(ZX_ERR_BAD_STATE, status, "decommitting pinned range\n");
407 status = vmo->DecommitRange(3 * PAGE_SIZE, PAGE_SIZE);
408 EXPECT_EQ(ZX_ERR_BAD_STATE, status, "decommitting pinned range\n");
409
410 vmo->Unpin(PAGE_SIZE, 3 * PAGE_SIZE);
411
412 status = vmo->DecommitRange(PAGE_SIZE, 3 * PAGE_SIZE);
413 EXPECT_EQ(ZX_OK, status, "decommitting unpinned range\n");
414
415 status = vmo->CommitRange(PAGE_SIZE, 3 * PAGE_SIZE);
416 EXPECT_EQ(ZX_OK, status, "committing range\n");
417 status = vmo->Pin(PAGE_SIZE, 3 * PAGE_SIZE);
418 EXPECT_EQ(ZX_OK, status, "pinning committed range\n");
419
420 status = vmo->Resize(0);
421 EXPECT_EQ(ZX_ERR_BAD_STATE, status, "resizing pinned range\n");
422
423 vmo->Unpin(PAGE_SIZE, 3 * PAGE_SIZE);
424
425 status = vmo->Resize(0);
426 EXPECT_EQ(ZX_OK, status, "resizing unpinned range\n");
427
428 END_TEST;
429 }
430
431 // Creates a page VMO and pins the same pages multiple times
vmo_multiple_pin_test()432 static bool vmo_multiple_pin_test() {
433 BEGIN_TEST;
434
435 static const size_t alloc_size = PAGE_SIZE * 16;
436 fbl::RefPtr<VmObject> vmo;
437 zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, alloc_size, &vmo);
438 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
439 ASSERT_TRUE(vmo, "vmobject creation\n");
440
441 status = vmo->CommitRange(0, alloc_size);
442 EXPECT_EQ(ZX_OK, status, "committing range\n");
443
444 status = vmo->Pin(0, alloc_size);
445 EXPECT_EQ(ZX_OK, status, "pinning whole range\n");
446 status = vmo->Pin(PAGE_SIZE, 4 * PAGE_SIZE);
447 EXPECT_EQ(ZX_OK, status, "pinning subrange\n");
448
449 for (unsigned int i = 1; i < VM_PAGE_OBJECT_MAX_PIN_COUNT; ++i) {
450 status = vmo->Pin(0, PAGE_SIZE);
451 EXPECT_EQ(ZX_OK, status, "pinning first page max times\n");
452 }
453 status = vmo->Pin(0, PAGE_SIZE);
454 EXPECT_EQ(ZX_ERR_UNAVAILABLE, status, "page is pinned too much\n");
455
456 vmo->Unpin(0, alloc_size);
457 status = vmo->DecommitRange(PAGE_SIZE, 4 * PAGE_SIZE);
458 EXPECT_EQ(ZX_ERR_BAD_STATE, status, "decommitting pinned range\n");
459 status = vmo->DecommitRange(5 * PAGE_SIZE, alloc_size - 5 * PAGE_SIZE);
460 EXPECT_EQ(ZX_OK, status, "decommitting unpinned range\n");
461
462 vmo->Unpin(PAGE_SIZE, 4 * PAGE_SIZE);
463 status = vmo->DecommitRange(PAGE_SIZE, 4 * PAGE_SIZE);
464 EXPECT_EQ(ZX_OK, status, "decommitting unpinned range\n");
465
466 for (unsigned int i = 2; i < VM_PAGE_OBJECT_MAX_PIN_COUNT; ++i) {
467 vmo->Unpin(0, PAGE_SIZE);
468 }
469 status = vmo->DecommitRange(0, PAGE_SIZE);
470 EXPECT_EQ(ZX_ERR_BAD_STATE, status, "decommitting unpinned range\n");
471
472 vmo->Unpin(0, PAGE_SIZE);
473 status = vmo->DecommitRange(0, PAGE_SIZE);
474 EXPECT_EQ(ZX_OK, status, "decommitting unpinned range\n");
475
476 END_TEST;
477 }
478
479 // Creates a vm object, commits odd sized memory.
vmo_odd_size_commit_test()480 static bool vmo_odd_size_commit_test() {
481 BEGIN_TEST;
482 static const size_t alloc_size = 15;
483 fbl::RefPtr<VmObject> vmo;
484 zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, alloc_size, &vmo);
485 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
486 ASSERT_TRUE(vmo, "vmobject creation\n");
487
488 auto ret = vmo->CommitRange(0, alloc_size);
489 EXPECT_EQ(ZX_OK, ret, "committing vm object\n");
490 EXPECT_EQ(ROUNDUP_PAGE_SIZE(alloc_size),
491 PAGE_SIZE * vmo->AllocatedPages(),
492 "committing vm object\n");
493 END_TEST;
494 }
495
vmo_create_physical_test()496 static bool vmo_create_physical_test() {
497 BEGIN_TEST;
498
499 paddr_t pa;
500 vm_page_t* vm_page;
501 zx_status_t status = pmm_alloc_page(0, &vm_page, &pa);
502 uint32_t cache_policy;
503
504 ASSERT_EQ(ZX_OK, status, "vm page allocation\n");
505 ASSERT_TRUE(vm_page, "");
506
507 fbl::RefPtr<VmObject> vmo;
508 status = VmObjectPhysical::Create(pa, PAGE_SIZE, &vmo);
509 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
510 ASSERT_TRUE(vmo, "vmobject creation\n");
511 cache_policy = vmo->GetMappingCachePolicy();
512 EXPECT_EQ(ARCH_MMU_FLAG_UNCACHED, cache_policy, "check initial cache policy");
513 EXPECT_TRUE(vmo->is_contiguous(), "check contiguous");
514
515 pmm_free_page(vm_page);
516
517 END_TEST;
518 }
519
520 // Creates a vm object that commits contiguous memory.
vmo_create_contiguous_test()521 static bool vmo_create_contiguous_test() {
522 BEGIN_TEST;
523 static const size_t alloc_size = PAGE_SIZE * 16;
524 fbl::RefPtr<VmObject> vmo;
525 zx_status_t status = VmObjectPaged::CreateContiguous(PMM_ALLOC_FLAG_ANY, alloc_size, 0, &vmo);
526 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
527 ASSERT_TRUE(vmo, "vmobject creation\n");
528
529 EXPECT_TRUE(vmo->is_contiguous(), "vmo is contig\n");
530
531 paddr_t last_pa;
532 auto lookup_func = [](void* ctx, size_t offset, size_t index, paddr_t pa) {
533 paddr_t* last_pa = static_cast<paddr_t*>(ctx);
534 if (index != 0 && *last_pa + PAGE_SIZE != pa) {
535 return ZX_ERR_BAD_STATE;
536 }
537 *last_pa = pa;
538 return ZX_OK;
539 };
540 status = vmo->Lookup(0, alloc_size, lookup_func, &last_pa);
541 EXPECT_EQ(status, ZX_OK, "vmo lookup\n");
542
543 END_TEST;
544 }
545
546 // Make sure decommitting is disallowed
vmo_contiguous_decommit_test()547 static bool vmo_contiguous_decommit_test() {
548 BEGIN_TEST;
549
550 static const size_t alloc_size = PAGE_SIZE * 16;
551 fbl::RefPtr<VmObject> vmo;
552 zx_status_t status = VmObjectPaged::CreateContiguous(PMM_ALLOC_FLAG_ANY, alloc_size, 0, &vmo);
553 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
554 ASSERT_TRUE(vmo, "vmobject creation\n");
555
556 status = vmo->DecommitRange(PAGE_SIZE, 4 * PAGE_SIZE);
557 ASSERT_EQ(status, ZX_ERR_NOT_SUPPORTED, "decommit fails due to pinned pages\n");
558 status = vmo->DecommitRange(0, 4 * PAGE_SIZE);
559 ASSERT_EQ(status, ZX_ERR_NOT_SUPPORTED, "decommit fails due to pinned pages\n");
560 status = vmo->DecommitRange(alloc_size - PAGE_SIZE, PAGE_SIZE);
561 ASSERT_EQ(status, ZX_ERR_NOT_SUPPORTED, "decommit fails due to pinned pages\n");
562
563 // Make sure all pages are still present and contiguous
564 paddr_t last_pa;
565 auto lookup_func = [](void* ctx, size_t offset, size_t index, paddr_t pa) {
566 paddr_t* last_pa = static_cast<paddr_t*>(ctx);
567 if (index != 0 && *last_pa + PAGE_SIZE != pa) {
568 return ZX_ERR_BAD_STATE;
569 }
570 *last_pa = pa;
571 return ZX_OK;
572 };
573 status = vmo->Lookup(0, alloc_size, lookup_func, &last_pa);
574 ASSERT_EQ(status, ZX_OK, "vmo lookup\n");
575
576 END_TEST;
577 }
578
579 // Creats a vm object, maps it, precommitted.
vmo_precommitted_map_test()580 static bool vmo_precommitted_map_test() {
581 BEGIN_TEST;
582 static const size_t alloc_size = PAGE_SIZE * 16;
583 fbl::RefPtr<VmObject> vmo;
584 zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0, alloc_size, &vmo);
585 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
586 ASSERT_TRUE(vmo, "vmobject creation\n");
587
588 auto ka = VmAspace::kernel_aspace();
589 void* ptr;
590 auto ret = ka->MapObjectInternal(vmo, "test", 0, alloc_size, &ptr,
591 0, VmAspace::VMM_FLAG_COMMIT, kArchRwFlags);
592 ASSERT_EQ(ZX_OK, ret, "mapping object");
593
594 // fill with known pattern and test
595 if (!fill_and_test(ptr, alloc_size)) {
596 all_ok = false;
597 }
598
599 auto err = ka->FreeRegion((vaddr_t)ptr);
600 EXPECT_EQ(ZX_OK, err, "unmapping object");
601 END_TEST;
602 }
603
604 // Creates a vm object, maps it, demand paged.
vmo_demand_paged_map_test()605 static bool vmo_demand_paged_map_test() {
606 BEGIN_TEST;
607 static const size_t alloc_size = PAGE_SIZE * 16;
608 fbl::RefPtr<VmObject> vmo;
609 zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, alloc_size, &vmo);
610 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
611 ASSERT_TRUE(vmo, "vmobject creation\n");
612
613 auto ka = VmAspace::kernel_aspace();
614 void* ptr;
615 auto ret = ka->MapObjectInternal(vmo, "test", 0, alloc_size, &ptr,
616 0, 0, kArchRwFlags);
617 ASSERT_EQ(ret, ZX_OK, "mapping object");
618
619 // fill with known pattern and test
620 if (!fill_and_test(ptr, alloc_size)) {
621 all_ok = false;
622 }
623
624 auto err = ka->FreeRegion((vaddr_t)ptr);
625 EXPECT_EQ(ZX_OK, err, "unmapping object");
626 END_TEST;
627 }
628
629 // Creates a vm object, maps it, drops ref before unmapping.
vmo_dropped_ref_test()630 static bool vmo_dropped_ref_test() {
631 BEGIN_TEST;
632 static const size_t alloc_size = PAGE_SIZE * 16;
633 fbl::RefPtr<VmObject> vmo;
634 zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, alloc_size, &vmo);
635 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
636 ASSERT_TRUE(vmo, "vmobject creation\n");
637
638 auto ka = VmAspace::kernel_aspace();
639 void* ptr;
640 auto ret = ka->MapObjectInternal(ktl::move(vmo), "test", 0, alloc_size, &ptr,
641 0, VmAspace::VMM_FLAG_COMMIT, kArchRwFlags);
642 ASSERT_EQ(ret, ZX_OK, "mapping object");
643
644 EXPECT_NULL(vmo, "dropped ref to object");
645
646 // fill with known pattern and test
647 if (!fill_and_test(ptr, alloc_size)) {
648 all_ok = false;
649 }
650
651 auto err = ka->FreeRegion((vaddr_t)ptr);
652 EXPECT_EQ(ZX_OK, err, "unmapping object");
653 END_TEST;
654 }
655
656 // Creates a vm object, maps it, fills it with data, unmaps,
657 // maps again somewhere else.
vmo_remap_test()658 static bool vmo_remap_test() {
659 BEGIN_TEST;
660 static const size_t alloc_size = PAGE_SIZE * 16;
661 fbl::RefPtr<VmObject> vmo;
662 zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, alloc_size, &vmo);
663 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
664 ASSERT_TRUE(vmo, "vmobject creation\n");
665
666 auto ka = VmAspace::kernel_aspace();
667 void* ptr;
668 auto ret = ka->MapObjectInternal(vmo, "test", 0, alloc_size, &ptr,
669 0, VmAspace::VMM_FLAG_COMMIT, kArchRwFlags);
670 ASSERT_EQ(ZX_OK, ret, "mapping object");
671
672 // fill with known pattern and test
673 if (!fill_and_test(ptr, alloc_size)) {
674 all_ok = false;
675 }
676
677 auto err = ka->FreeRegion((vaddr_t)ptr);
678 EXPECT_EQ(ZX_OK, err, "unmapping object");
679
680 // map it again
681 ret = ka->MapObjectInternal(vmo, "test", 0, alloc_size, &ptr,
682 0, VmAspace::VMM_FLAG_COMMIT, kArchRwFlags);
683 ASSERT_EQ(ret, ZX_OK, "mapping object");
684
685 // test that the pattern is still valid
686 bool result = test_region((uintptr_t)ptr, ptr, alloc_size);
687 EXPECT_TRUE(result, "testing region for corruption");
688
689 err = ka->FreeRegion((vaddr_t)ptr);
690 EXPECT_EQ(ZX_OK, err, "unmapping object");
691 END_TEST;
692 }
693
694 // Creates a vm object, maps it, fills it with data, maps it a second time and
695 // third time somwehere else.
vmo_double_remap_test()696 static bool vmo_double_remap_test() {
697 BEGIN_TEST;
698 static const size_t alloc_size = PAGE_SIZE * 16;
699 fbl::RefPtr<VmObject> vmo;
700 zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, alloc_size, &vmo);
701 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
702 ASSERT_TRUE(vmo, "vmobject creation\n");
703
704 auto ka = VmAspace::kernel_aspace();
705 void* ptr;
706 auto ret = ka->MapObjectInternal(vmo, "test0", 0, alloc_size, &ptr,
707 0, 0, kArchRwFlags);
708 ASSERT_EQ(ZX_OK, ret, "mapping object");
709
710 // fill with known pattern and test
711 if (!fill_and_test(ptr, alloc_size)) {
712 all_ok = false;
713 }
714
715 // map it again
716 void* ptr2;
717 ret = ka->MapObjectInternal(vmo, "test1", 0, alloc_size, &ptr2,
718 0, 0, kArchRwFlags);
719 ASSERT_EQ(ret, ZX_OK, "mapping object second time");
720 EXPECT_NE(ptr, ptr2, "second mapping is different");
721
722 // test that the pattern is still valid
723 bool result = test_region((uintptr_t)ptr, ptr2, alloc_size);
724 EXPECT_TRUE(result, "testing region for corruption");
725
726 // map it a third time with an offset
727 void* ptr3;
728 static const size_t alloc_offset = PAGE_SIZE;
729 ret = ka->MapObjectInternal(vmo, "test2", alloc_offset, alloc_size - alloc_offset,
730 &ptr3, 0, 0, kArchRwFlags);
731 ASSERT_EQ(ret, ZX_OK, "mapping object third time");
732 EXPECT_NE(ptr3, ptr2, "third mapping is different");
733 EXPECT_NE(ptr3, ptr, "third mapping is different");
734
735 // test that the pattern is still valid
736 int mc =
737 memcmp((uint8_t*)ptr + alloc_offset, ptr3, alloc_size - alloc_offset);
738 EXPECT_EQ(0, mc, "testing region for corruption");
739
740 ret = ka->FreeRegion((vaddr_t)ptr3);
741 EXPECT_EQ(ZX_OK, ret, "unmapping object third time");
742
743 ret = ka->FreeRegion((vaddr_t)ptr2);
744 EXPECT_EQ(ZX_OK, ret, "unmapping object second time");
745
746 ret = ka->FreeRegion((vaddr_t)ptr);
747 EXPECT_EQ(ZX_OK, ret, "unmapping object");
748 END_TEST;
749 }
750
vmo_read_write_smoke_test()751 static bool vmo_read_write_smoke_test() {
752 BEGIN_TEST;
753 static const size_t alloc_size = PAGE_SIZE * 16;
754
755 // create object
756 fbl::RefPtr<VmObject> vmo;
757 zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0, alloc_size, &vmo);
758 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
759 ASSERT_TRUE(vmo, "vmobject creation\n");
760
761 // create test buffer
762 fbl::AllocChecker ac;
763 fbl::Array<uint8_t> a(new (&ac) uint8_t[alloc_size], alloc_size);
764 ASSERT_TRUE(ac.check(), "");
765 fill_region(99, a.get(), alloc_size);
766
767 // write to it, make sure it seems to work with valid args
768 zx_status_t err = vmo->Write(a.get(), 0, 0);
769 EXPECT_EQ(ZX_OK, err, "writing to object");
770
771 err = vmo->Write(a.get(), 0, 37);
772 EXPECT_EQ(ZX_OK, err, "writing to object");
773
774 err = vmo->Write(a.get(), 99, 37);
775 EXPECT_EQ(ZX_OK, err, "writing to object");
776
777 // can't write past end
778 err = vmo->Write(a.get(), 0, alloc_size + 47);
779 EXPECT_EQ(ZX_ERR_OUT_OF_RANGE, err, "writing to object");
780
781 // can't write past end
782 err = vmo->Write(a.get(), 31, alloc_size + 47);
783 EXPECT_EQ(ZX_ERR_OUT_OF_RANGE, err, "writing to object");
784
785 // should return an error because out of range
786 err = vmo->Write(a.get(), alloc_size + 99, 42);
787 EXPECT_EQ(ZX_ERR_OUT_OF_RANGE, err, "writing to object");
788
789 // map the object
790 auto ka = VmAspace::kernel_aspace();
791 uint8_t* ptr;
792 err = ka->MapObjectInternal(vmo, "test", 0, alloc_size, (void**)&ptr,
793 0, 0, kArchRwFlags);
794 ASSERT_EQ(ZX_OK, err, "mapping object");
795
796 // write to it at odd offsets
797 err = vmo->Write(a.get(), 31, 4197);
798 EXPECT_EQ(ZX_OK, err, "writing to object");
799 int cmpres = memcmp(ptr + 31, a.get(), 4197);
800 EXPECT_EQ(0, cmpres, "reading from object");
801
802 // write to it, filling the object completely
803 err = vmo->Write(a.get(), 0, alloc_size);
804 EXPECT_EQ(ZX_OK, err, "writing to object");
805
806 // test that the data was actually written to it
807 bool result = test_region(99, ptr, alloc_size);
808 EXPECT_TRUE(result, "writing to object");
809
810 // unmap it
811 ka->FreeRegion((vaddr_t)ptr);
812
813 // test that we can read from it
814 fbl::Array<uint8_t> b(new (&ac) uint8_t[alloc_size], alloc_size);
815 ASSERT_TRUE(ac.check(), "can't allocate buffer");
816
817 err = vmo->Read(b.get(), 0, alloc_size);
818 EXPECT_EQ(ZX_OK, err, "reading from object");
819
820 // validate the buffer is valid
821 cmpres = memcmp(b.get(), a.get(), alloc_size);
822 EXPECT_EQ(0, cmpres, "reading from object");
823
824 // read from it at an offset
825 err = vmo->Read(b.get(), 31, 4197);
826 EXPECT_EQ(ZX_OK, err, "reading from object");
827 cmpres = memcmp(b.get(), a.get() + 31, 4197);
828 EXPECT_EQ(0, cmpres, "reading from object");
829 END_TEST;
830 }
831
vmo_cache_test()832 static bool vmo_cache_test() {
833 BEGIN_TEST;
834
835 paddr_t pa;
836 vm_page_t* vm_page;
837 zx_status_t status = pmm_alloc_page(0, &vm_page, &pa);
838 auto ka = VmAspace::kernel_aspace();
839 uint32_t cache_policy = ARCH_MMU_FLAG_UNCACHED_DEVICE;
840 uint32_t cache_policy_get;
841 void* ptr;
842
843 ASSERT_TRUE(vm_page, "");
844 // Test that the flags set/get properly
845 {
846 fbl::RefPtr<VmObject> vmo;
847 status = VmObjectPhysical::Create(pa, PAGE_SIZE, &vmo);
848 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
849 ASSERT_TRUE(vmo, "vmobject creation\n");
850 cache_policy_get = vmo->GetMappingCachePolicy();
851 EXPECT_NE(cache_policy, cache_policy_get, "check initial cache policy");
852 EXPECT_EQ(ZX_OK, vmo->SetMappingCachePolicy(cache_policy), "try set");
853 cache_policy_get = vmo->GetMappingCachePolicy();
854 EXPECT_EQ(cache_policy, cache_policy_get, "compare flags");
855 }
856
857 // Test valid flags
858 for (uint32_t i = 0; i <= ARCH_MMU_FLAG_CACHE_MASK; i++) {
859 fbl::RefPtr<VmObject> vmo;
860 status = VmObjectPhysical::Create(pa, PAGE_SIZE, &vmo);
861 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
862 ASSERT_TRUE(vmo, "vmobject creation\n");
863 EXPECT_EQ(ZX_OK, vmo->SetMappingCachePolicy(cache_policy), "try setting valid flags");
864 }
865
866 // Test invalid flags
867 for (uint32_t i = ARCH_MMU_FLAG_CACHE_MASK + 1; i < 32; i++) {
868 fbl::RefPtr<VmObject> vmo;
869 status = VmObjectPhysical::Create(pa, PAGE_SIZE, &vmo);
870 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
871 ASSERT_TRUE(vmo, "vmobject creation\n");
872 EXPECT_EQ(ZX_ERR_INVALID_ARGS, vmo->SetMappingCachePolicy(i), "try set with invalid flags");
873 }
874
875 // Test valid flags with invalid flags
876 {
877 fbl::RefPtr<VmObject> vmo;
878 status = VmObjectPhysical::Create(pa, PAGE_SIZE, &vmo);
879 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
880 ASSERT_TRUE(vmo, "vmobject creation\n");
881 EXPECT_EQ(ZX_ERR_INVALID_ARGS, vmo->SetMappingCachePolicy(cache_policy | 0x5), "bad 0x5");
882 EXPECT_EQ(ZX_ERR_INVALID_ARGS, vmo->SetMappingCachePolicy(cache_policy | 0xA), "bad 0xA");
883 EXPECT_EQ(ZX_ERR_INVALID_ARGS, vmo->SetMappingCachePolicy(cache_policy | 0x55), "bad 0x55");
884 EXPECT_EQ(ZX_ERR_INVALID_ARGS, vmo->SetMappingCachePolicy(cache_policy | 0xAA), "bad 0xAA");
885 }
886
887 // Test that changing policy while mapped is blocked
888 {
889 fbl::RefPtr<VmObject> vmo;
890 status = VmObjectPhysical::Create(pa, PAGE_SIZE, &vmo);
891 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
892 ASSERT_TRUE(vmo, "vmobject creation\n");
893 ASSERT_EQ(ZX_OK, ka->MapObjectInternal(vmo, "test", 0, PAGE_SIZE, (void**)&ptr, 0, 0,
894 kArchRwFlags),
895 "map vmo");
896 EXPECT_EQ(ZX_ERR_BAD_STATE, vmo->SetMappingCachePolicy(cache_policy),
897 "set flags while mapped");
898 EXPECT_EQ(ZX_OK, ka->FreeRegion((vaddr_t)ptr), "unmap vmo");
899 EXPECT_EQ(ZX_OK, vmo->SetMappingCachePolicy(cache_policy), "set flags after unmapping");
900 ASSERT_EQ(ZX_OK, ka->MapObjectInternal(vmo, "test", 0, PAGE_SIZE, (void**)&ptr, 0, 0,
901 kArchRwFlags),
902 "map vmo again");
903 EXPECT_EQ(ZX_OK, ka->FreeRegion((vaddr_t)ptr), "unmap vmo");
904 }
905
906 pmm_free_page(vm_page);
907 END_TEST;
908 }
909
vmo_lookup_test()910 static bool vmo_lookup_test() {
911 BEGIN_TEST;
912
913 static const size_t alloc_size = PAGE_SIZE * 16;
914 fbl::RefPtr<VmObject> vmo;
915 zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, alloc_size, &vmo);
916 ASSERT_EQ(status, ZX_OK, "vmobject creation\n");
917 ASSERT_TRUE(vmo, "vmobject creation\n");
918
919 size_t pages_seen = 0;
920 auto lookup_fn = [](void* context, size_t offset, size_t index, paddr_t pa) {
921 size_t* pages_seen = static_cast<size_t*>(context);
922 (*pages_seen)++;
923 return ZX_OK;
924 };
925 status = vmo->Lookup(0, alloc_size, lookup_fn, &pages_seen);
926 EXPECT_EQ(ZX_ERR_NO_MEMORY, status, "lookup on uncommitted pages\n");
927 EXPECT_EQ(0u, pages_seen, "lookup on uncommitted pages\n");
928 pages_seen = 0;
929
930 status = vmo->CommitRange(PAGE_SIZE, PAGE_SIZE);
931 EXPECT_EQ(ZX_OK, status, "committing vm object\n");
932 EXPECT_EQ(static_cast<size_t>(1), vmo->AllocatedPages(),
933 "committing vm object\n");
934
935 // Should fail, since first page isn't mapped
936 status = vmo->Lookup(0, alloc_size, lookup_fn, &pages_seen);
937 EXPECT_EQ(ZX_ERR_NO_MEMORY, status, "lookup on partially committed pages\n");
938 EXPECT_EQ(0u, pages_seen, "lookup on partially committed pages\n");
939 pages_seen = 0;
940
941 // Should fail, but see the mapped page
942 status = vmo->Lookup(PAGE_SIZE, alloc_size - PAGE_SIZE, lookup_fn, &pages_seen);
943 EXPECT_EQ(ZX_ERR_NO_MEMORY, status, "lookup on partially committed pages\n");
944 EXPECT_EQ(1u, pages_seen, "lookup on partially committed pages\n");
945 pages_seen = 0;
946
947 // Should succeed
948 status = vmo->Lookup(PAGE_SIZE, PAGE_SIZE, lookup_fn, &pages_seen);
949 EXPECT_EQ(ZX_OK, status, "lookup on partially committed pages\n");
950 EXPECT_EQ(1u, pages_seen, "lookup on partially committed pages\n");
951 pages_seen = 0;
952
953 // Commit the rest
954 status = vmo->CommitRange(0, alloc_size);
955 EXPECT_EQ(ZX_OK, status, "committing vm object\n");
956 EXPECT_EQ(alloc_size, PAGE_SIZE * vmo->AllocatedPages(), "committing vm object\n");
957
958 status = vmo->Lookup(0, alloc_size, lookup_fn, &pages_seen);
959 EXPECT_EQ(ZX_OK, status, "lookup on partially committed pages\n");
960 EXPECT_EQ(alloc_size / PAGE_SIZE, pages_seen, "lookup on partially committed pages\n");
961
962 END_TEST;
963 }
964
965 // TODO(ZX-1431): The ARM code's error codes are always ZX_ERR_INTERNAL, so
966 // special case that.
967 #if ARCH_ARM64
968 #define MMU_EXPECT_EQ(exp, act, msg) EXPECT_EQ(ZX_ERR_INTERNAL, act, msg)
969 #else
970 #define MMU_EXPECT_EQ(exp, act, msg) EXPECT_EQ(exp, act, msg)
971 #endif
972
arch_noncontiguous_map()973 static bool arch_noncontiguous_map() {
974 BEGIN_TEST;
975
976 // Get some phys pages to test on
977 paddr_t phys[3];
978 struct list_node phys_list = LIST_INITIAL_VALUE(phys_list);
979 zx_status_t status = pmm_alloc_pages(fbl::count_of(phys), 0, &phys_list);
980 ASSERT_EQ(ZX_OK, status, "non contig map alloc");
981 {
982 size_t i = 0;
983 vm_page_t* p;
984 list_for_every_entry (&phys_list, p, vm_page_t, queue_node) {
985 phys[i] = p->paddr();
986 ++i;
987 }
988 }
989
990 {
991 ArchVmAspace aspace;
992 status = aspace.Init(USER_ASPACE_BASE, USER_ASPACE_SIZE, 0);
993 ASSERT_EQ(ZX_OK, status, "failed to init aspace\n");
994
995 // Attempt to map a set of vm_page_t
996 size_t mapped;
997 vaddr_t base = USER_ASPACE_BASE + 10 * PAGE_SIZE;
998 status = aspace.Map(base, phys, fbl::count_of(phys), ARCH_MMU_FLAG_PERM_READ, &mapped);
999 ASSERT_EQ(ZX_OK, status, "failed first map\n");
1000 EXPECT_EQ(fbl::count_of(phys), mapped, "weird first map\n");
1001 for (size_t i = 0; i < fbl::count_of(phys); ++i) {
1002 paddr_t paddr;
1003 uint mmu_flags;
1004 status = aspace.Query(base + i * PAGE_SIZE, &paddr, &mmu_flags);
1005 EXPECT_EQ(ZX_OK, status, "bad first map\n");
1006 EXPECT_EQ(phys[i], paddr, "bad first map\n");
1007 EXPECT_EQ(ARCH_MMU_FLAG_PERM_READ, mmu_flags, "bad first map\n");
1008 }
1009
1010 // Attempt to map again, should fail
1011 status = aspace.Map(base, phys, fbl::count_of(phys), ARCH_MMU_FLAG_PERM_READ, &mapped);
1012 MMU_EXPECT_EQ(ZX_ERR_ALREADY_EXISTS, status, "double map\n");
1013
1014 // Attempt to map partially ovelapping, should fail
1015 status = aspace.Map(base + 2 * PAGE_SIZE, phys, fbl::count_of(phys),
1016 ARCH_MMU_FLAG_PERM_READ, &mapped);
1017 MMU_EXPECT_EQ(ZX_ERR_ALREADY_EXISTS, status, "double map\n");
1018 status = aspace.Map(base - 2 * PAGE_SIZE, phys, fbl::count_of(phys),
1019 ARCH_MMU_FLAG_PERM_READ, &mapped);
1020 MMU_EXPECT_EQ(ZX_ERR_ALREADY_EXISTS, status, "double map\n");
1021
1022 // No entries should have been created by the partial failures
1023 status = aspace.Query(base - 2 * PAGE_SIZE, nullptr, nullptr);
1024 EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "bad first map\n");
1025 status = aspace.Query(base - PAGE_SIZE, nullptr, nullptr);
1026 EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "bad first map\n");
1027 status = aspace.Query(base + 3 * PAGE_SIZE, nullptr, nullptr);
1028 EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "bad first map\n");
1029 status = aspace.Query(base + 4 * PAGE_SIZE, nullptr, nullptr);
1030 EXPECT_EQ(ZX_ERR_NOT_FOUND, status, "bad first map\n");
1031
1032 status = aspace.Destroy();
1033 EXPECT_EQ(ZX_OK, status, "failed to destroy aspace\n");
1034 }
1035
1036 pmm_free(&phys_list);
1037
1038 END_TEST;
1039 }
1040
1041 // Basic test that checks adding/removing a page
vmpl_add_remove_page_test()1042 static bool vmpl_add_remove_page_test() {
1043 BEGIN_TEST;
1044
1045 VmPageList pl;
1046 vm_page_t test_page{};
1047 pl.AddPage(&test_page, 0);
1048
1049 EXPECT_EQ(&test_page, pl.GetPage(0), "unexpected page\n");
1050
1051 vm_page* remove_page;
1052 EXPECT_TRUE(pl.RemovePage(0, &remove_page), "remove failure\n");
1053 EXPECT_EQ(&test_page, remove_page, "unexpected page\n");
1054
1055 END_TEST;
1056 }
1057
1058 // Test for freeing a range of pages
vmpl_free_pages_test()1059 static bool vmpl_free_pages_test() {
1060 BEGIN_TEST;
1061
1062 vm_page_t* first_page = nullptr;
1063 vm_page_t* last_page = nullptr;
1064
1065 VmPageList pl;
1066 constexpr uint32_t kCount = 3 * VmPageListNode::kPageFanOut;
1067 for (uint32_t i = 0; i < kCount; i++) {
1068 paddr_t pa;
1069 vm_page_t* page;
1070
1071 zx_status_t status = pmm_alloc_page(0, &page, &pa);
1072 ASSERT_EQ(ZX_OK, status, "pmm_alloc single page");
1073 ASSERT_NONNULL(page, "pmm_alloc single page");
1074 ASSERT_NE(0u, pa, "pmm_alloc single page");
1075
1076 pl.AddPage(page, i * PAGE_SIZE);
1077
1078 if (i == 0) {
1079 first_page = page;
1080 } else if (i == kCount - 1) {
1081 last_page = page;
1082 }
1083 }
1084
1085 pl.FreePages(PAGE_SIZE, (kCount - 1) * PAGE_SIZE);
1086
1087 for (uint32_t i = 0; i < kCount; i++) {
1088 vm_page* remove_page;
1089 bool res = pl.RemovePage(i * PAGE_SIZE, &remove_page);
1090 if (i == 0) {
1091 EXPECT_TRUE(res, "missing page\n");
1092 EXPECT_EQ(first_page, remove_page, "unexpected page\n");
1093 } else if (i == kCount - 1) {
1094 EXPECT_TRUE(res, "missing page\n");
1095 EXPECT_EQ(last_page, remove_page, "unexpected page\n");
1096 } else {
1097 EXPECT_FALSE(res, "extra page\n");
1098 }
1099 }
1100
1101 END_TEST;
1102 }
1103
1104 // Tests freeing the last page in a list
vmpl_free_pages_last_page_test()1105 static bool vmpl_free_pages_last_page_test() {
1106 BEGIN_TEST;
1107
1108 paddr_t pa;
1109 vm_page_t* page;
1110
1111 zx_status_t status = pmm_alloc_page(0, &page, &pa);
1112 ASSERT_EQ(ZX_OK, status, "pmm_alloc single page");
1113 ASSERT_NONNULL(page, "pmm_alloc single page");
1114 ASSERT_NE(0u, pa, "pmm_alloc single page");
1115
1116 VmPageList pl;
1117 pl.AddPage(page, 0);
1118
1119 EXPECT_EQ(page, pl.GetPage(0), "unexpected page\n");
1120
1121 pl.FreePages(0, PAGE_SIZE);
1122 EXPECT_TRUE(pl.IsEmpty(), "not empty\n");
1123
1124 END_TEST;
1125 }
1126
1127 // Tests taking a page from the start of a VmPageListNode
vmpl_take_single_page_even_test()1128 static bool vmpl_take_single_page_even_test() {
1129 BEGIN_TEST;
1130
1131 VmPageList pl;
1132 vm_page_t test_page{};
1133 vm_page_t test_page2{};
1134 pl.AddPage(&test_page, 0);
1135 pl.AddPage(&test_page2, PAGE_SIZE);
1136
1137 VmPageSpliceList splice = pl.TakePages(0, PAGE_SIZE);
1138
1139 EXPECT_EQ(&test_page, splice.Pop(), "wrong page\n");
1140 EXPECT_TRUE(splice.IsDone(), "extra page\n");
1141 EXPECT_NULL(pl.GetPage(0), "duplicate page\n");
1142
1143 vm_page* remove_page;
1144 EXPECT_TRUE(pl.RemovePage(PAGE_SIZE, &remove_page), "remove failure\n");
1145 EXPECT_EQ(&test_page2, remove_page, "unexpected page\n");
1146
1147 END_TEST;
1148 }
1149
1150 // Tests taking a page from the middle of a VmPageListNode
vmpl_take_single_page_odd_test()1151 static bool vmpl_take_single_page_odd_test() {
1152 BEGIN_TEST;
1153
1154 VmPageList pl;
1155 vm_page_t test_page{};
1156 vm_page_t test_page2{};
1157 pl.AddPage(&test_page, 0);
1158 pl.AddPage(&test_page2, PAGE_SIZE);
1159
1160 VmPageSpliceList splice = pl.TakePages(PAGE_SIZE, PAGE_SIZE);
1161
1162 EXPECT_EQ(&test_page2, splice.Pop(), "wrong page\n");
1163 EXPECT_TRUE(splice.IsDone(), "extra page\n");
1164 EXPECT_NULL(pl.GetPage(PAGE_SIZE), "duplicate page\n");
1165
1166 vm_page* remove_page;
1167 EXPECT_TRUE(pl.RemovePage(0, &remove_page), "remove failure\n");
1168 EXPECT_EQ(&test_page, remove_page, "unexpected page\n");
1169
1170 END_TEST;
1171 }
1172
1173 // Tests taking all the pages from a range of VmPageListNodes
vmpl_take_all_pages_test()1174 static bool vmpl_take_all_pages_test() {
1175 BEGIN_TEST;
1176
1177 VmPageList pl;
1178 constexpr uint32_t kCount = 3 * VmPageListNode::kPageFanOut;
1179 vm_page_t test_pages[VmPageListNode::kPageFanOut] = {};
1180 for (uint32_t i = 0; i < kCount; i++) {
1181 pl.AddPage(test_pages + i, i * PAGE_SIZE);
1182 }
1183
1184 VmPageSpliceList splice = pl.TakePages(0, kCount * PAGE_SIZE);
1185 EXPECT_TRUE(pl.IsEmpty(), "non-empty list\n");
1186
1187 for (uint32_t i = 0; i < kCount; i++) {
1188 EXPECT_EQ(test_pages + i, splice.Pop(), "wrong page\n");
1189 }
1190 EXPECT_TRUE(splice.IsDone(), "extra pages\n");
1191
1192 END_TEST;
1193 }
1194
1195 // Tests taking the middle pages from a range of VmPageListNodes
vmpl_take_middle_pages_test()1196 static bool vmpl_take_middle_pages_test() {
1197 BEGIN_TEST;
1198
1199 VmPageList pl;
1200 constexpr uint32_t kCount = 3 * VmPageListNode::kPageFanOut;
1201 vm_page_t test_pages[VmPageListNode::kPageFanOut] = {};
1202 for (uint32_t i = 0; i < kCount; i++) {
1203 pl.AddPage(test_pages + i, i * PAGE_SIZE);
1204 }
1205
1206 constexpr uint32_t kTakeOffset = VmPageListNode::kPageFanOut - 1;
1207 constexpr uint32_t kTakeCount = VmPageListNode::kPageFanOut + 2;
1208 VmPageSpliceList splice = pl.TakePages(kTakeOffset * PAGE_SIZE, kTakeCount * PAGE_SIZE);
1209 EXPECT_FALSE(pl.IsEmpty(), "non-empty list\n");
1210
1211 for (uint32_t i = 0; i < kCount; i++) {
1212 if (kTakeOffset <= i && i < kTakeOffset + kTakeCount) {
1213 EXPECT_EQ(test_pages + i, splice.Pop(), "wrong page\n");
1214 } else {
1215 vm_page* remove_page;
1216 EXPECT_TRUE(pl.RemovePage(i * PAGE_SIZE, &remove_page), "remove failure\n");
1217 EXPECT_EQ(test_pages + i, remove_page, "wrong page\n");
1218 }
1219 }
1220 EXPECT_TRUE(splice.IsDone(), "extra pages\n");
1221
1222 END_TEST;
1223 }
1224
1225 // Tests that gaps are preserved in the list
vmpl_take_gap_test()1226 static bool vmpl_take_gap_test() {
1227 BEGIN_TEST;
1228
1229 VmPageList pl;
1230 constexpr uint32_t kCount = VmPageListNode::kPageFanOut;
1231 constexpr uint32_t kGapSize = 2;
1232 vm_page_t test_pages[VmPageListNode::kPageFanOut] = {};
1233 for (uint32_t i = 0; i < kCount; i++) {
1234 uint64_t offset = (i * (kGapSize + 1)) * PAGE_SIZE;
1235 pl.AddPage(test_pages + i, offset);
1236 }
1237
1238 constexpr uint32_t kListStart = PAGE_SIZE;
1239 constexpr uint32_t kListLen = (kCount * (kGapSize + 1) - 2) * PAGE_SIZE;
1240 VmPageSpliceList splice = pl.TakePages(kListStart, kListLen);
1241
1242 vm_page* page;
1243 EXPECT_TRUE(pl.RemovePage(0, &page), "wrong page\n");
1244 EXPECT_EQ(test_pages, page, "wrong page\n");
1245 EXPECT_FALSE(pl.RemovePage(kListLen, &page), "wrong page\n");
1246
1247 for (uint64_t offset = kListStart; offset < kListStart + kListLen; offset += PAGE_SIZE) {
1248 auto page_idx = offset / PAGE_SIZE;
1249 if (page_idx % (kGapSize + 1) == 0) {
1250 EXPECT_EQ(test_pages + (page_idx / (kGapSize + 1)), splice.Pop(), "wrong page\n");
1251 } else {
1252 EXPECT_NULL(splice.Pop(), "wrong page\n");
1253 }
1254 }
1255 EXPECT_TRUE(splice.IsDone(), "extra pages\n");
1256
1257 END_TEST;
1258 }
1259
1260 // Tests that cleaning up a splice list doesn't blow up
vmpl_take_cleanup_test()1261 static bool vmpl_take_cleanup_test() {
1262 BEGIN_TEST;
1263
1264 paddr_t pa;
1265 vm_page_t* page;
1266
1267 zx_status_t status = pmm_alloc_page(0, &page, &pa);
1268 ASSERT_EQ(ZX_OK, status, "pmm_alloc single page");
1269 ASSERT_NONNULL(page, "pmm_alloc single page");
1270 ASSERT_NE(0u, pa, "pmm_alloc single page");
1271
1272 page->state = VM_PAGE_STATE_OBJECT;
1273 page->object.pin_count = 0;
1274
1275 VmPageList pl;
1276 pl.AddPage(page, 0);
1277
1278 VmPageSpliceList splice = pl.TakePages(0, PAGE_SIZE);
1279 EXPECT_TRUE(!splice.IsDone(), "missing page\n");
1280
1281 END_TEST;
1282 }
1283
1284 // Use the function name as the test name
1285 #define VM_UNITTEST(fname) UNITTEST(#fname, fname)
1286
1287 UNITTEST_START_TESTCASE(vm_tests)
1288 VM_UNITTEST(vmm_alloc_smoke_test)
1289 VM_UNITTEST(vmm_alloc_contiguous_smoke_test)
1290 VM_UNITTEST(multiple_regions_test)
1291 VM_UNITTEST(vmm_alloc_zero_size_fails)
1292 VM_UNITTEST(vmm_alloc_bad_specific_pointer_fails)
1293 VM_UNITTEST(vmm_alloc_contiguous_missing_flag_commit_fails)
1294 VM_UNITTEST(vmm_alloc_contiguous_zero_size_fails)
1295 VM_UNITTEST(vmaspace_create_smoke_test)
1296 VM_UNITTEST(vmaspace_alloc_smoke_test)
1297 VM_UNITTEST(vmo_create_test)
1298 VM_UNITTEST(vmo_pin_test)
1299 VM_UNITTEST(vmo_multiple_pin_test)
1300 VM_UNITTEST(vmo_commit_test)
1301 VM_UNITTEST(vmo_odd_size_commit_test)
1302 VM_UNITTEST(vmo_create_physical_test)
1303 VM_UNITTEST(vmo_create_contiguous_test)
1304 VM_UNITTEST(vmo_contiguous_decommit_test)
1305 VM_UNITTEST(vmo_precommitted_map_test)
1306 VM_UNITTEST(vmo_demand_paged_map_test)
1307 VM_UNITTEST(vmo_dropped_ref_test)
1308 VM_UNITTEST(vmo_remap_test)
1309 VM_UNITTEST(vmo_double_remap_test)
1310 VM_UNITTEST(vmo_read_write_smoke_test)
1311 VM_UNITTEST(vmo_cache_test)
1312 VM_UNITTEST(vmo_lookup_test)
1313 VM_UNITTEST(arch_noncontiguous_map)
1314 // Uncomment for debugging
1315 // VM_UNITTEST(dump_all_aspaces) // Run last
1316 UNITTEST_END_TESTCASE(vm_tests, "vm", "Virtual memory tests");
1317
1318 UNITTEST_START_TESTCASE(pmm_tests)
1319 VM_UNITTEST(pmm_smoke_test)
1320 VM_UNITTEST(pmm_alloc_contiguous_one_test)
1321 VM_UNITTEST(pmm_multi_alloc_test)
1322 // runs the system out of memory, uncomment for debugging
1323 //VM_UNITTEST(pmm_oversized_alloc_test)
1324 UNITTEST_END_TESTCASE(pmm_tests, "pmm", "Physical memory manager tests");
1325
1326 UNITTEST_START_TESTCASE(vm_page_list_tests)
1327 VM_UNITTEST(vmpl_add_remove_page_test)
1328 VM_UNITTEST(vmpl_free_pages_test)
1329 VM_UNITTEST(vmpl_free_pages_last_page_test)
1330 VM_UNITTEST(vmpl_take_single_page_even_test)
1331 VM_UNITTEST(vmpl_take_single_page_odd_test)
1332 VM_UNITTEST(vmpl_take_all_pages_test)
1333 VM_UNITTEST(vmpl_take_middle_pages_test)
1334 VM_UNITTEST(vmpl_take_gap_test)
1335 VM_UNITTEST(vmpl_take_cleanup_test)
1336 UNITTEST_END_TESTCASE(vm_page_list_tests, "vmpl", "VmPageList tests");
1337