1 /*
2 * Copyright (c) 2014 Travis Geiselbrecht
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8 #include <assert.h>
9 #include <kernel/mutex.h>
10 #include <kernel/vm.h>
11 #include <lk/console_cmd.h>
12 #include <lk/err.h>
13 #include <lk/trace.h>
14 #include <string.h>
15 #include "vm_priv.h"
16
17 #define LOCAL_TRACE 0
18
19 static struct list_node aspace_list = LIST_INITIAL_VALUE(aspace_list);
20 static mutex_t vmm_lock = MUTEX_INITIAL_VALUE(vmm_lock);
21
22 vmm_aspace_t _kernel_aspace;
23
24 static void dump_aspace(const vmm_aspace_t *a);
25 static void dump_region(const vmm_region_t *r);
26
vmm_init_preheap(void)27 void vmm_init_preheap(void) {
28 /* initialize the kernel address space */
29 strlcpy(_kernel_aspace.name, "kernel", sizeof(_kernel_aspace.name));
30 _kernel_aspace.base = KERNEL_ASPACE_BASE;
31 _kernel_aspace.size = KERNEL_ASPACE_SIZE;
32 _kernel_aspace.flags = VMM_ASPACE_FLAG_KERNEL;
33 list_initialize(&_kernel_aspace.region_list);
34
35 arch_mmu_init_aspace(&_kernel_aspace.arch_aspace, KERNEL_ASPACE_BASE, KERNEL_ASPACE_SIZE, ARCH_ASPACE_FLAG_KERNEL);
36
37 list_add_head(&aspace_list, &_kernel_aspace.node);
38 }
39
vmm_init(void)40 void vmm_init(void) {
41 }
42
is_inside_aspace(const vmm_aspace_t * aspace,vaddr_t vaddr)43 static inline bool is_inside_aspace(const vmm_aspace_t *aspace, vaddr_t vaddr) {
44 return (vaddr >= aspace->base && vaddr <= aspace->base + aspace->size - 1);
45 }
46
is_region_inside_aspace(const vmm_aspace_t * aspace,vaddr_t vaddr,size_t size)47 static bool is_region_inside_aspace(const vmm_aspace_t *aspace, vaddr_t vaddr, size_t size) {
48 /* is the starting address within the address space*/
49 if (!is_inside_aspace(aspace, vaddr))
50 return false;
51
52 if (size == 0)
53 return true;
54
55 /* see if the size is enough to wrap the integer */
56 if (vaddr + size - 1 < vaddr)
57 return false;
58
59 /* test to see if the end address is within the address space's */
60 if (vaddr + size - 1 > aspace->base + aspace->size - 1)
61 return false;
62
63 return true;
64 }
65
trim_to_aspace(const vmm_aspace_t * aspace,vaddr_t vaddr,size_t size)66 static size_t trim_to_aspace(const vmm_aspace_t *aspace, vaddr_t vaddr, size_t size) {
67 DEBUG_ASSERT(is_inside_aspace(aspace, vaddr));
68
69 if (size == 0)
70 return size;
71
72 size_t offset = vaddr - aspace->base;
73
74 //LTRACEF("vaddr 0x%lx size 0x%zx offset 0x%zx aspace base 0x%lx aspace size 0x%zx\n",
75 // vaddr, size, offset, aspace->base, aspace->size);
76
77 if (offset + size < offset)
78 size = ULONG_MAX - offset - 1;
79
80 //LTRACEF("size now 0x%zx\n", size);
81
82 if (offset + size >= aspace->size - 1)
83 size = aspace->size - offset;
84
85 //LTRACEF("size now 0x%zx\n", size);
86
87 return size;
88 }
89
alloc_region_struct(const char * name,vaddr_t base,size_t size,uint flags,uint arch_mmu_flags)90 static vmm_region_t *alloc_region_struct(const char *name, vaddr_t base, size_t size,
91 uint flags, uint arch_mmu_flags) {
92 DEBUG_ASSERT(name);
93
94 vmm_region_t *r = calloc(1, sizeof(vmm_region_t));
95 if (!r)
96 return NULL;
97
98 strlcpy(r->name, name, sizeof(r->name));
99 r->base = base;
100 r->size = size;
101 r->flags = flags;
102 r->arch_mmu_flags = arch_mmu_flags;
103 list_initialize(&r->page_list);
104
105 return r;
106 }
107
108 /* add a region to the appropriate spot in the address space list,
109 * testing to see if there's a space */
add_region_to_aspace(vmm_aspace_t * aspace,vmm_region_t * r)110 static status_t add_region_to_aspace(vmm_aspace_t *aspace, vmm_region_t *r) {
111 DEBUG_ASSERT(aspace);
112 DEBUG_ASSERT(r);
113
114 LTRACEF("aspace %p base 0x%lx size 0x%zx r %p base 0x%lx size 0x%zx\n",
115 aspace, aspace->base, aspace->size, r, r->base, r->size);
116
117 /* only try if the region will at least fit in the address space */
118 if (r->size == 0 || !is_region_inside_aspace(aspace, r->base, r->size)) {
119 LTRACEF("region was out of range\n");
120 return ERR_OUT_OF_RANGE;
121 }
122
123 vaddr_t r_end = r->base + r->size - 1;
124
125 /* does it fit in front */
126 vmm_region_t *last;
127 last = list_peek_head_type(&aspace->region_list, vmm_region_t, node);
128 if (!last || r_end < last->base) {
129 /* empty list or not empty and fits before the first element */
130 list_add_head(&aspace->region_list, &r->node);
131 return NO_ERROR;
132 }
133
134 /* walk the list, finding the right spot to put it */
135 list_for_every_entry(&aspace->region_list, last, vmm_region_t, node) {
136 /* does it go after last? */
137 if (r->base > last->base + last->size - 1) {
138 /* get the next element in the list */
139 vmm_region_t *next = list_next_type(&aspace->region_list, &last->node, vmm_region_t, node);
140 if (!next || (r_end < next->base)) {
141 /* end of the list or next exists and it goes between them */
142 list_add_after(&last->node, &r->node);
143 return NO_ERROR;
144 }
145 }
146 }
147
148 LTRACEF("couldn't find spot\n");
149 return ERR_NO_MEMORY;
150 }
151
152 /*
153 * Try to pick the spot within specified gap
154 *
155 * Arch can override this to impose it's own restrictions.
156 */
arch_mmu_pick_spot(arch_aspace_t * aspace,vaddr_t base,uint prev_region_arch_mmu_flags,vaddr_t end,uint next_region_arch_mmu_flags,vaddr_t align,size_t size,uint arch_mmu_flags)157 __WEAK vaddr_t arch_mmu_pick_spot(arch_aspace_t *aspace, vaddr_t base, uint prev_region_arch_mmu_flags,
158 vaddr_t end, uint next_region_arch_mmu_flags,
159 vaddr_t align, size_t size, uint arch_mmu_flags) {
160 /* just align it by default */
161 return ALIGN(base, align);
162 }
163
164 /*
165 * Returns true if the caller has to stop search
166 */
check_gap(vmm_aspace_t * aspace,vmm_region_t * prev,vmm_region_t * next,vaddr_t * pva,vaddr_t align,size_t size,uint arch_mmu_flags)167 static inline bool check_gap(vmm_aspace_t *aspace,
168 vmm_region_t *prev, vmm_region_t *next,
169 vaddr_t *pva, vaddr_t align, size_t size,
170 uint arch_mmu_flags) {
171 vaddr_t gap_beg; /* first byte of a gap */
172 vaddr_t gap_end; /* last byte of a gap */
173
174 DEBUG_ASSERT(pva);
175
176 if (prev)
177 gap_beg = prev->base + prev->size;
178 else
179 gap_beg = aspace->base;
180
181 if (next) {
182 if (gap_beg == next->base)
183 goto next_gap; /* no gap between regions */
184 gap_end = next->base - 1;
185 } else {
186 if (gap_beg == (aspace->base + aspace->size))
187 goto not_found; /* no gap at the end of address space. Stop search */
188 gap_end = aspace->base + aspace->size - 1;
189 }
190
191 *pva = arch_mmu_pick_spot(&aspace->arch_aspace, gap_beg, prev ? prev->arch_mmu_flags : ARCH_MMU_FLAG_INVALID,
192 gap_end, next ? next->arch_mmu_flags : ARCH_MMU_FLAG_INVALID,
193 align, size, arch_mmu_flags);
194 if (*pva < gap_beg)
195 goto not_found; /* address wrapped around */
196
197 if (*pva < gap_end && ((gap_end - *pva + 1) >= size)) {
198 /* we have enough room */
199 return true; /* found spot, stop search */
200 }
201
202 next_gap:
203 return false; /* continue search */
204
205 not_found:
206 *pva = -1;
207 return true; /* not_found: stop search */
208 }
209
alloc_spot(vmm_aspace_t * aspace,size_t size,uint8_t align_pow2,uint arch_mmu_flags,struct list_node ** before)210 static vaddr_t alloc_spot(vmm_aspace_t *aspace, size_t size, uint8_t align_pow2,
211 uint arch_mmu_flags, struct list_node **before) {
212 DEBUG_ASSERT(aspace);
213 DEBUG_ASSERT(size > 0 && IS_PAGE_ALIGNED(size));
214
215 LTRACEF("aspace %p size 0x%zx align %hhu\n", aspace, size, align_pow2);
216
217 if (align_pow2 < PAGE_SIZE_SHIFT)
218 align_pow2 = PAGE_SIZE_SHIFT;
219 vaddr_t align = 1UL << align_pow2;
220
221 vaddr_t spot;
222 vmm_region_t *r = NULL;
223
224 /* try to pick spot at the beginning of address space */
225 if (check_gap(aspace, NULL,
226 list_peek_head_type(&aspace->region_list, vmm_region_t, node),
227 &spot, align, size, arch_mmu_flags))
228 goto done;
229
230 /* search the middle of the list */
231 list_for_every_entry(&aspace->region_list, r, vmm_region_t, node) {
232 if (check_gap(aspace, r,
233 list_next_type(&aspace->region_list, &r->node, vmm_region_t, node),
234 &spot, align, size, arch_mmu_flags))
235 goto done;
236 }
237
238 /* couldn't find anything */
239 return -1;
240
241 done:
242 if (before)
243 *before = r ? &r->node : &aspace->region_list;
244 return spot;
245 }
246
247 /* allocate a region structure and stick it in the address space */
alloc_region(vmm_aspace_t * aspace,const char * name,size_t size,vaddr_t vaddr,uint8_t align_pow2,uint vmm_flags,uint region_flags,uint arch_mmu_flags)248 static vmm_region_t *alloc_region(vmm_aspace_t *aspace, const char *name, size_t size,
249 vaddr_t vaddr, uint8_t align_pow2,
250 uint vmm_flags, uint region_flags, uint arch_mmu_flags) {
251 /* make a region struct for it and stick it in the list */
252 vmm_region_t *r = alloc_region_struct(name, vaddr, size, region_flags, arch_mmu_flags);
253 if (!r)
254 return NULL;
255
256 /* if they ask us for a specific spot, put it there */
257 if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) {
258 /* stick it in the list, checking to see if it fits */
259 if (add_region_to_aspace(aspace, r) < 0) {
260 /* didn't fit */
261 free(r);
262 return NULL;
263 }
264 } else {
265 /* allocate a virtual slot for it */
266 struct list_node *before = NULL;
267
268 vaddr = alloc_spot(aspace, size, align_pow2, arch_mmu_flags, &before);
269 LTRACEF("alloc_spot returns 0x%lx, before %p\n", vaddr, before);
270
271 if (vaddr == (vaddr_t)-1) {
272 LTRACEF("failed to find spot\n");
273 free(r);
274 return NULL;
275 }
276
277 DEBUG_ASSERT(before != NULL);
278
279 r->base = (vaddr_t)vaddr;
280
281 /* add it to the region list */
282 list_add_after(before, &r->node);
283 }
284
285 return r;
286 }
287
vmm_reserve_space(vmm_aspace_t * aspace,const char * name,size_t size,vaddr_t vaddr)288 status_t vmm_reserve_space(vmm_aspace_t *aspace, const char *name, size_t size, vaddr_t vaddr) {
289 LTRACEF("aspace %p name '%s' size 0x%zx vaddr 0x%lx\n", aspace, name, size, vaddr);
290
291 DEBUG_ASSERT(aspace);
292 DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
293 DEBUG_ASSERT(IS_PAGE_ALIGNED(size));
294
295 if (!name)
296 name = "";
297
298 if (!aspace)
299 return ERR_INVALID_ARGS;
300 if (size == 0)
301 return NO_ERROR;
302 if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(size))
303 return ERR_INVALID_ARGS;
304
305 if (!is_inside_aspace(aspace, vaddr))
306 return ERR_OUT_OF_RANGE;
307
308 /* trim the size */
309 size = trim_to_aspace(aspace, vaddr, size);
310
311 mutex_acquire(&vmm_lock);
312
313 /* lookup how it's already mapped */
314 uint arch_mmu_flags = 0;
315 arch_mmu_query(&aspace->arch_aspace, vaddr, NULL, &arch_mmu_flags);
316
317 /* build a new region structure */
318 vmm_region_t *r = alloc_region(aspace, name, size, vaddr, 0,
319 VMM_FLAG_VALLOC_SPECIFIC, VMM_REGION_FLAG_RESERVED, arch_mmu_flags);
320
321 mutex_release(&vmm_lock);
322 return r ? NO_ERROR : ERR_NO_MEMORY;
323 }
324
vmm_alloc_physical(vmm_aspace_t * aspace,const char * name,size_t size,void ** ptr,uint8_t align_log2,paddr_t paddr,uint vmm_flags,uint arch_mmu_flags)325 status_t vmm_alloc_physical(vmm_aspace_t *aspace, const char *name, size_t size,
326 void **ptr, uint8_t align_log2, paddr_t paddr, uint vmm_flags, uint arch_mmu_flags) {
327 status_t ret;
328
329 LTRACEF("aspace %p name '%s' size 0x%zx ptr %p paddr 0x%lx vmm_flags 0x%x arch_mmu_flags 0x%x\n",
330 aspace, name, size, ptr ? *ptr : 0, paddr, vmm_flags, arch_mmu_flags);
331
332 DEBUG_ASSERT(aspace);
333 DEBUG_ASSERT(IS_PAGE_ALIGNED(paddr));
334 DEBUG_ASSERT(IS_PAGE_ALIGNED(size));
335
336 if (!name)
337 name = "";
338
339 if (!aspace)
340 return ERR_INVALID_ARGS;
341 if (size == 0)
342 return NO_ERROR;
343 if (!IS_PAGE_ALIGNED(paddr) || !IS_PAGE_ALIGNED(size))
344 return ERR_INVALID_ARGS;
345
346 vaddr_t vaddr = 0;
347
348 /* if they're asking for a specific spot, copy the address */
349 if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) {
350 /* can't ask for a specific spot and then not provide one */
351 if (!ptr) {
352 return ERR_INVALID_ARGS;
353 }
354 vaddr = (vaddr_t)*ptr;
355 }
356
357 mutex_acquire(&vmm_lock);
358
359 /* allocate a region and put it in the aspace list */
360 vmm_region_t *r = alloc_region(aspace, name, size, vaddr, align_log2, vmm_flags,
361 VMM_REGION_FLAG_PHYSICAL, arch_mmu_flags);
362 if (!r) {
363 ret = ERR_NO_MEMORY;
364 goto err_alloc_region;
365 }
366
367 /* return the vaddr if requested */
368 if (ptr)
369 *ptr = (void *)r->base;
370
371 /* map all of the pages */
372 int err = arch_mmu_map(&aspace->arch_aspace, r->base, paddr, size / PAGE_SIZE, arch_mmu_flags);
373 LTRACEF("arch_mmu_map returns %d\n", err);
374
375 ret = NO_ERROR;
376
377 err_alloc_region:
378 mutex_release(&vmm_lock);
379 return ret;
380 }
381
vmm_alloc_contiguous(vmm_aspace_t * aspace,const char * name,size_t size,void ** ptr,uint8_t align_pow2,uint vmm_flags,uint arch_mmu_flags)382 status_t vmm_alloc_contiguous(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr,
383 uint8_t align_pow2, uint vmm_flags, uint arch_mmu_flags) {
384 status_t err = NO_ERROR;
385
386 LTRACEF("aspace %p name '%s' size 0x%zx ptr %p align %hhu vmm_flags 0x%x arch_mmu_flags 0x%x\n",
387 aspace, name, size, ptr ? *ptr : 0, align_pow2, vmm_flags, arch_mmu_flags);
388
389 DEBUG_ASSERT(aspace);
390
391 size = ROUNDUP(size, PAGE_SIZE);
392 if (size == 0)
393 return ERR_INVALID_ARGS;
394
395 if (!name)
396 name = "";
397
398 vaddr_t vaddr = 0;
399
400 /* if they're asking for a specific spot, copy the address */
401 if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) {
402 /* can't ask for a specific spot and then not provide one */
403 if (!ptr) {
404 err = ERR_INVALID_ARGS;
405 goto err;
406 }
407 vaddr = (vaddr_t)*ptr;
408 }
409
410 /* allocate physical memory up front, in case it cant be satisfied */
411 struct list_node page_list;
412 list_initialize(&page_list);
413
414 paddr_t pa = 0;
415 /* allocate a run of physical pages */
416 size_t count = pmm_alloc_contiguous(size / PAGE_SIZE, align_pow2, &pa, &page_list);
417 if (count < size / PAGE_SIZE) {
418 DEBUG_ASSERT(count == 0); /* check that the pmm didn't allocate a partial run */
419 err = ERR_NO_MEMORY;
420 goto err;
421 }
422
423 mutex_acquire(&vmm_lock);
424
425 /* allocate a region and put it in the aspace list */
426 vmm_region_t *r = alloc_region(aspace, name, size, vaddr, align_pow2, vmm_flags,
427 VMM_REGION_FLAG_PHYSICAL, arch_mmu_flags);
428 if (!r) {
429 err = ERR_NO_MEMORY;
430 goto err1;
431 }
432
433 /* return the vaddr if requested */
434 if (ptr)
435 *ptr = (void *)r->base;
436
437 /* map all of the pages */
438 arch_mmu_map(&aspace->arch_aspace, r->base, pa, size / PAGE_SIZE, arch_mmu_flags);
439 // XXX deal with error mapping here
440
441 vm_page_t *p;
442 while ((p = list_remove_head_type(&page_list, vm_page_t, node))) {
443 list_add_tail(&r->page_list, &p->node);
444 }
445
446 mutex_release(&vmm_lock);
447 return NO_ERROR;
448
449 err1:
450 mutex_release(&vmm_lock);
451 pmm_free(&page_list);
452 err:
453 return err;
454 }
455
vmm_alloc(vmm_aspace_t * aspace,const char * name,size_t size,void ** ptr,uint8_t align_pow2,uint vmm_flags,uint arch_mmu_flags)456 status_t vmm_alloc(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr,
457 uint8_t align_pow2, uint vmm_flags, uint arch_mmu_flags) {
458 status_t err = NO_ERROR;
459
460 LTRACEF("aspace %p name '%s' size 0x%zx ptr %p align %hhu vmm_flags 0x%x arch_mmu_flags 0x%x\n",
461 aspace, name, size, ptr ? *ptr : 0, align_pow2, vmm_flags, arch_mmu_flags);
462
463 DEBUG_ASSERT(aspace);
464
465 size = ROUNDUP(size, PAGE_SIZE);
466 if (size == 0)
467 return ERR_INVALID_ARGS;
468
469 if (!name)
470 name = "";
471
472 vaddr_t vaddr = 0;
473
474 /* if they're asking for a specific spot, copy the address */
475 if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) {
476 /* can't ask for a specific spot and then not provide one */
477 if (!ptr) {
478 err = ERR_INVALID_ARGS;
479 goto err;
480 }
481 vaddr = (vaddr_t)*ptr;
482 }
483
484 /* allocate physical memory up front, in case it cant be satisfied */
485
486 /* allocate a random pile of pages */
487 struct list_node page_list;
488 list_initialize(&page_list);
489
490 size_t count = pmm_alloc_pages(size / PAGE_SIZE, &page_list);
491 DEBUG_ASSERT(count <= size);
492 if (count < size / PAGE_SIZE) {
493 LTRACEF("failed to allocate enough pages (asked for %zu, got %zu)\n", size / PAGE_SIZE, count);
494 pmm_free(&page_list);
495 err = ERR_NO_MEMORY;
496 goto err;
497 }
498
499 mutex_acquire(&vmm_lock);
500
501 /* allocate a region and put it in the aspace list */
502 vmm_region_t *r = alloc_region(aspace, name, size, vaddr, align_pow2, vmm_flags,
503 VMM_REGION_FLAG_PHYSICAL, arch_mmu_flags);
504 if (!r) {
505 err = ERR_NO_MEMORY;
506 goto err1;
507 }
508
509 /* return the vaddr if requested */
510 if (ptr)
511 *ptr = (void *)r->base;
512
513 /* map all of the pages */
514 /* XXX use smarter algorithm that tries to build runs */
515 vm_page_t *p;
516 vaddr_t va = r->base;
517 DEBUG_ASSERT(IS_PAGE_ALIGNED(va));
518 while ((p = list_remove_head_type(&page_list, vm_page_t, node))) {
519 DEBUG_ASSERT(va <= r->base + r->size - 1);
520
521 paddr_t pa = vm_page_to_paddr(p);
522 DEBUG_ASSERT(IS_PAGE_ALIGNED(pa));
523
524 arch_mmu_map(&aspace->arch_aspace, va, pa, 1, arch_mmu_flags);
525 // XXX deal with error mapping here
526
527 list_add_tail(&r->page_list, &p->node);
528
529 va += PAGE_SIZE;
530 }
531
532 mutex_release(&vmm_lock);
533 return NO_ERROR;
534
535 err1:
536 mutex_release(&vmm_lock);
537 pmm_free(&page_list);
538 err:
539 return err;
540 }
541
vmm_find_region(const vmm_aspace_t * aspace,vaddr_t vaddr)542 static vmm_region_t *vmm_find_region(const vmm_aspace_t *aspace, vaddr_t vaddr) {
543 vmm_region_t *r;
544
545 DEBUG_ASSERT(aspace);
546
547 if (!aspace)
548 return NULL;
549
550 /* search the region list */
551 list_for_every_entry(&aspace->region_list, r, vmm_region_t, node) {
552 if ((vaddr >= r->base) && (vaddr <= r->base + r->size - 1))
553 return r;
554 }
555
556 return NULL;
557 }
558
vmm_free_region(vmm_aspace_t * aspace,vaddr_t vaddr)559 status_t vmm_free_region(vmm_aspace_t *aspace, vaddr_t vaddr) {
560 mutex_acquire(&vmm_lock);
561
562 vmm_region_t *r = vmm_find_region (aspace, vaddr);
563 if (!r) {
564 mutex_release(&vmm_lock);
565 return ERR_NOT_FOUND;
566 }
567
568 /* remove it from aspace */
569 list_delete(&r->node);
570
571 /* unmap it */
572 arch_mmu_unmap(&aspace->arch_aspace, r->base, r->size / PAGE_SIZE);
573
574 mutex_release(&vmm_lock);
575
576 /* return physical pages if any */
577 pmm_free(&r->page_list);
578
579 /* free it */
580 free(r);
581
582 return NO_ERROR;
583 }
584
vmm_create_aspace(vmm_aspace_t ** _aspace,const char * name,uint flags)585 status_t vmm_create_aspace(vmm_aspace_t **_aspace, const char *name, uint flags) {
586 status_t err;
587
588 vmm_aspace_t *aspace = calloc(1, sizeof(vmm_aspace_t));
589 if (!aspace)
590 return ERR_NO_MEMORY;
591
592 if (name)
593 strlcpy(aspace->name, name, sizeof(aspace->name));
594 else
595 strlcpy(aspace->name, "unnamed", sizeof(aspace->name));
596
597 aspace->flags = flags;
598
599 if (aspace->flags & VMM_ASPACE_FLAG_KERNEL) {
600 aspace->base = KERNEL_ASPACE_BASE;
601 aspace->size = KERNEL_ASPACE_SIZE;
602 } else {
603 aspace->base = USER_ASPACE_BASE;
604 aspace->size = USER_ASPACE_SIZE;
605 }
606
607 /* initialize the arch specific component to our address space */
608 err = arch_mmu_init_aspace(&aspace->arch_aspace, aspace->base, aspace->size,
609 (aspace->flags & VMM_ASPACE_FLAG_KERNEL) ? ARCH_ASPACE_FLAG_KERNEL : 0);
610 if (err < 0) {
611 free(aspace);
612 return err;
613 }
614
615 list_clear_node(&aspace->node);
616 list_initialize(&aspace->region_list);
617
618 mutex_acquire(&vmm_lock);
619 list_add_head(&aspace_list, &aspace->node);
620 mutex_release(&vmm_lock);
621
622 *_aspace = aspace;
623
624 return NO_ERROR;
625 }
626
vmm_free_aspace(vmm_aspace_t * aspace)627 status_t vmm_free_aspace(vmm_aspace_t *aspace) {
628 /* pop it out of the global aspace list */
629 mutex_acquire(&vmm_lock);
630 if (!list_in_list(&aspace->node)) {
631 mutex_release(&vmm_lock);
632 return ERR_INVALID_ARGS;
633 }
634 list_delete(&aspace->node);
635
636 /* free all of the regions */
637 struct list_node region_list = LIST_INITIAL_VALUE(region_list);
638
639 vmm_region_t *r;
640 while ((r = list_remove_head_type(&aspace->region_list, vmm_region_t, node))) {
641 /* add it to our tempoary list */
642 list_add_tail(®ion_list, &r->node);
643
644 /* unmap it */
645 arch_mmu_unmap(&aspace->arch_aspace, r->base, r->size / PAGE_SIZE);
646 }
647 mutex_release(&vmm_lock);
648
649 /* without the vmm lock held, free all of the pmm pages and the structure */
650 while ((r = list_remove_head_type(®ion_list, vmm_region_t, node))) {
651 /* return physical pages if any */
652 pmm_free(&r->page_list);
653
654 /* free it */
655 free(r);
656 }
657
658 /* make sure the current thread does not map the aspace */
659 thread_t *current_thread = get_current_thread();
660 if (current_thread->aspace == aspace) {
661 THREAD_LOCK(state);
662 current_thread->aspace = NULL;
663 vmm_context_switch(aspace, NULL);
664 THREAD_UNLOCK(state);
665 }
666
667 /* destroy the arch portion of the aspace */
668 arch_mmu_destroy_aspace(&aspace->arch_aspace);
669
670 /* free the aspace */
671 free(aspace);
672
673 return NO_ERROR;
674 }
675
vmm_context_switch(vmm_aspace_t * oldspace,vmm_aspace_t * newaspace)676 void vmm_context_switch(vmm_aspace_t *oldspace, vmm_aspace_t *newaspace) {
677 DEBUG_ASSERT(thread_lock_held());
678
679 arch_mmu_context_switch(newaspace ? &newaspace->arch_aspace : NULL);
680 }
681
vmm_set_active_aspace(vmm_aspace_t * aspace)682 void vmm_set_active_aspace(vmm_aspace_t *aspace) {
683 LTRACEF("aspace %p\n", aspace);
684
685 thread_t *t = get_current_thread();
686 DEBUG_ASSERT(t);
687
688 if (aspace == t->aspace)
689 return;
690
691 /* grab the thread lock and switch to the new address space */
692 THREAD_LOCK(state);
693 vmm_aspace_t *old = t->aspace;
694 t->aspace = aspace;
695 vmm_context_switch(old, t->aspace);
696 THREAD_UNLOCK(state);
697 }
698
dump_region(const vmm_region_t * r)699 static void dump_region(const vmm_region_t *r) {
700 printf("\tregion %p: name '%s' range 0x%lx - 0x%lx size 0x%zx flags 0x%x mmu_flags 0x%x\n",
701 r, r->name, r->base, r->base + r->size - 1, r->size, r->flags, r->arch_mmu_flags);
702 }
703
dump_aspace(const vmm_aspace_t * a)704 static void dump_aspace(const vmm_aspace_t *a) {
705 printf("aspace %p: name '%s' range 0x%lx - 0x%lx size 0x%zx flags 0x%x\n",
706 a, a->name, a->base, a->base + a->size - 1, a->size, a->flags);
707
708 printf("regions:\n");
709 vmm_region_t *r;
710 list_for_every_entry(&a->region_list, r, vmm_region_t, node) {
711 dump_region(r);
712 }
713 }
714
cmd_vmm(int argc,const console_cmd_args * argv)715 static int cmd_vmm(int argc, const console_cmd_args *argv) {
716 if (argc < 2) {
717 notenoughargs:
718 printf("not enough arguments\n");
719 usage:
720 printf("usage:\n");
721 printf("%s aspaces\n", argv[0].str);
722 printf("%s alloc <size> <align_pow2>\n", argv[0].str);
723 printf("%s alloc_physical <paddr> <size> <align_pow2>\n", argv[0].str);
724 printf("%s alloc_contig <size> <align_pow2>\n", argv[0].str);
725 printf("%s free_region <address>\n", argv[0].str);
726 printf("%s create_aspace\n", argv[0].str);
727 printf("%s create_test_aspace\n", argv[0].str);
728 printf("%s free_aspace <address>\n", argv[0].str);
729 printf("%s set_test_aspace <address>\n", argv[0].str);
730 return ERR_GENERIC;
731 }
732
733 static vmm_aspace_t *test_aspace;
734 if (!test_aspace)
735 test_aspace = vmm_get_kernel_aspace();
736
737 if (!strcmp(argv[1].str, "aspaces")) {
738 vmm_aspace_t *a;
739 list_for_every_entry(&aspace_list, a, vmm_aspace_t, node) {
740 dump_aspace(a);
741 }
742 } else if (!strcmp(argv[1].str, "alloc")) {
743 if (argc < 4) goto notenoughargs;
744
745 void *ptr = (void *)0x99;
746 status_t err = vmm_alloc(test_aspace, "alloc test", argv[2].u, &ptr, argv[3].u, 0, 0);
747 printf("vmm_alloc returns %d, ptr %p\n", err, ptr);
748 } else if (!strcmp(argv[1].str, "alloc_physical")) {
749 if (argc < 4) goto notenoughargs;
750
751 void *ptr = (void *)0x99;
752 status_t err = vmm_alloc_physical(test_aspace, "physical test", argv[3].u, &ptr,
753 argv[4].u, argv[2].u, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE);
754 printf("vmm_alloc_physical returns %d, ptr %p\n", err, ptr);
755 } else if (!strcmp(argv[1].str, "alloc_contig")) {
756 if (argc < 4) goto notenoughargs;
757
758 void *ptr = (void *)0x99;
759 status_t err = vmm_alloc_contiguous(test_aspace, "contig test", argv[2].u, &ptr, argv[3].u, 0, 0);
760 printf("vmm_alloc_contig returns %d, ptr %p\n", err, ptr);
761 } else if (!strcmp(argv[1].str, "free_region")) {
762 if (argc < 2) goto notenoughargs;
763
764 status_t err = vmm_free_region(test_aspace, (vaddr_t)argv[2].u);
765 printf("vmm_free_region returns %d\n", err);
766 } else if (!strcmp(argv[1].str, "create_aspace")) {
767 vmm_aspace_t *aspace;
768 status_t err = vmm_create_aspace(&aspace, "test", 0);
769 printf("vmm_create_aspace returns %d, aspace %p\n", err, aspace);
770 } else if (!strcmp(argv[1].str, "create_test_aspace")) {
771 vmm_aspace_t *aspace;
772 status_t err = vmm_create_aspace(&aspace, "test", 0);
773 printf("vmm_create_aspace returns %d, aspace %p\n", err, aspace);
774 if (err < 0)
775 return err;
776
777 test_aspace = aspace;
778 get_current_thread()->aspace = aspace;
779 thread_sleep(1); // XXX hack to force it to reschedule and thus load the aspace
780 } else if (!strcmp(argv[1].str, "free_aspace")) {
781 if (argc < 2) goto notenoughargs;
782
783 vmm_aspace_t *aspace = (void *)argv[2].u;
784 if (test_aspace == aspace)
785 test_aspace = NULL;
786
787 if (get_current_thread()->aspace == aspace) {
788 get_current_thread()->aspace = NULL;
789 thread_sleep(1); // hack
790 }
791
792 status_t err = vmm_free_aspace(aspace);
793 printf("vmm_free_aspace returns %d\n", err);
794 } else if (!strcmp(argv[1].str, "set_test_aspace")) {
795 if (argc < 2) goto notenoughargs;
796
797 test_aspace = (void *)argv[2].u;
798 get_current_thread()->aspace = test_aspace;
799 thread_sleep(1); // XXX hack to force it to reschedule and thus load the aspace
800 } else {
801 printf("unknown command\n");
802 goto usage;
803 }
804
805 return NO_ERROR;
806 }
807
808 STATIC_COMMAND_START
809 #if LK_DEBUGLEVEL > 0
810 STATIC_COMMAND("vmm", "virtual memory manager", &cmd_vmm)
811 #endif
812 STATIC_COMMAND_END(vmm);
813
814