1 /*
2  * Copyright (c) 2014 Travis Geiselbrecht
3  *
4  * Use of this source code is governed by a MIT-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/MIT
7  */
8 #include <kernel/vm.h>
9 
10 #include <arch/mmu.h>
11 #include <kernel/thread.h>
12 #include <lk/console_cmd.h>
13 #include <lk/debug.h>
14 #include <lk/err.h>
15 #include <lk/init.h>
16 #include <lk/trace.h>
17 #include <string.h>
18 
19 #include "vm_priv.h"
20 
21 #define LOCAL_TRACE 0
22 
23 extern int _start;
24 extern int _end;
25 
26 /* mark the physical pages backing a range of virtual as in use.
27  * allocate the physical pages and throw them away */
mark_pages_in_use(vaddr_t va,size_t len)28 static void mark_pages_in_use(vaddr_t va, size_t len) {
29     LTRACEF("va 0x%lx, len 0x%zx\n", va, len);
30 
31     struct list_node list;
32     list_initialize(&list);
33 
34     /* make sure we are inclusive of all of the pages in the address range */
35     len = PAGE_ALIGN(len + (va & (PAGE_SIZE - 1)));
36     va = ROUNDDOWN(va, PAGE_SIZE);
37 
38     LTRACEF("aligned va 0x%lx, len 0x%zx\n", va, len);
39 
40     for (size_t offset = 0; offset < len; offset += PAGE_SIZE) {
41         uint flags;
42         paddr_t pa;
43 
44         status_t err = arch_mmu_query(&vmm_get_kernel_aspace()->arch_aspace, va + offset, &pa, &flags);
45         if (err >= 0) {
46             //LTRACEF("va 0x%x, pa 0x%x, flags 0x%x, err %d\n", va + offset, pa, flags, err);
47 
48             /* alloate the range, throw the results away */
49             pmm_alloc_range(pa, 1, &list);
50         } else {
51             panic("Could not find pa for va 0x%lx\n", va);
52         }
53     }
54 }
55 
vm_init_preheap(uint level)56 static void vm_init_preheap(uint level) {
57     LTRACE_ENTRY;
58 
59     /* allow the vmm a shot at initializing some of its data structures */
60     vmm_init_preheap();
61 
62     /* mark all of the kernel pages in use */
63     LTRACEF("marking all kernel pages as used\n");
64     mark_pages_in_use((vaddr_t)&_start, ((uintptr_t)&_end - (uintptr_t)&_start));
65 
66     /* mark the physical pages used by the boot time allocator */
67     if (boot_alloc_end != boot_alloc_start) {
68         LTRACEF("marking boot alloc used from 0x%lx to 0x%lx\n", boot_alloc_start, boot_alloc_end);
69 
70         mark_pages_in_use(boot_alloc_start, boot_alloc_end - boot_alloc_start);
71     }
72 }
73 
vm_init_postheap(uint level)74 static void vm_init_postheap(uint level) {
75     LTRACE_ENTRY;
76 
77     vmm_init();
78 
79     /* create vmm regions to cover what is already there from the initial mapping table */
80     struct mmu_initial_mapping *map = mmu_initial_mappings;
81     while (map->size > 0) {
82         if (!(map->flags & MMU_INITIAL_MAPPING_TEMPORARY)) {
83             vmm_reserve_space(vmm_get_kernel_aspace(), map->name, map->size, map->virt);
84         }
85 
86         map++;
87     }
88 }
89 
kvaddr_get_range(size_t * size_return)90 void *kvaddr_get_range(size_t *size_return) {
91     *size_return = mmu_initial_mappings->size;
92     return (void *)mmu_initial_mappings->virt;
93 }
94 
paddr_to_kvaddr(paddr_t pa)95 void *paddr_to_kvaddr(paddr_t pa) {
96     /* slow path to do reverse lookup */
97     struct mmu_initial_mapping *map = mmu_initial_mappings;
98     while (map->size > 0) {
99         if (!(map->flags & MMU_INITIAL_MAPPING_TEMPORARY) &&
100                 pa >= map->phys &&
101                 pa <= map->phys + map->size - 1) {
102             return (void *)(map->virt + (pa - map->phys));
103         }
104         map++;
105     }
106     return NULL;
107 }
108 
vaddr_to_paddr(void * ptr)109 paddr_t vaddr_to_paddr(void *ptr) {
110     vmm_aspace_t *aspace = vaddr_to_aspace(ptr);
111     if (!aspace)
112         return (paddr_t)NULL;
113 
114     paddr_t pa;
115     status_t rc = arch_mmu_query(&aspace->arch_aspace, (vaddr_t)ptr, &pa, NULL);
116     if (rc)
117         return (paddr_t)NULL;
118 
119     return pa;
120 }
121 
vaddr_to_aspace(void * ptr)122 vmm_aspace_t *vaddr_to_aspace(void *ptr) {
123     if (is_kernel_address((vaddr_t)ptr)) {
124         return vmm_get_kernel_aspace();
125     } else if (is_user_address((vaddr_t)ptr)) {
126         return get_current_thread()->aspace;
127     } else {
128         return NULL;
129     }
130 }
131 
cmd_vm(int argc,const console_cmd_args * argv)132 static int cmd_vm(int argc, const console_cmd_args *argv) {
133     if (argc < 2) {
134 notenoughargs:
135         printf("not enough arguments\n");
136 usage:
137         printf("usage:\n");
138         printf("%s phys2virt <address>\n", argv[0].str);
139         printf("%s virt2phys <address>\n", argv[0].str);
140         printf("%s map <phys> <virt> <count> <flags>\n", argv[0].str);
141         printf("%s unmap <virt> <count>\n", argv[0].str);
142         return ERR_GENERIC;
143     }
144 
145     if (!strcmp(argv[1].str, "phys2virt")) {
146         if (argc < 3) goto notenoughargs;
147 
148         void *ptr = paddr_to_kvaddr((paddr_t)argv[2].u);
149         printf("paddr_to_kvaddr returns %p\n", ptr);
150     } else if (!strcmp(argv[1].str, "virt2phys")) {
151         if (argc < 3) goto notenoughargs;
152 
153         vmm_aspace_t *aspace = vaddr_to_aspace((void *)argv[2].u);
154         if (!aspace) {
155             printf("ERROR: outside of any address space\n");
156             return -1;
157         }
158 
159         paddr_t pa;
160         uint flags;
161         status_t err = arch_mmu_query(&aspace->arch_aspace, argv[2].u, &pa, &flags);
162         printf("arch_mmu_query returns %d\n", err);
163         if (err >= 0) {
164             printf("\tpa 0x%lx, flags 0x%x\n", pa, flags);
165         }
166     } else if (!strcmp(argv[1].str, "map")) {
167         if (argc < 6) goto notenoughargs;
168 
169         vmm_aspace_t *aspace = vaddr_to_aspace((void *)argv[2].u);
170         if (!aspace) {
171             printf("ERROR: outside of any address space\n");
172             return -1;
173         }
174 
175         int err = arch_mmu_map(&aspace->arch_aspace, argv[3].u, argv[2].u, argv[4].u, argv[5].u);
176         printf("arch_mmu_map returns %d\n", err);
177     } else if (!strcmp(argv[1].str, "unmap")) {
178         if (argc < 4) goto notenoughargs;
179 
180         vmm_aspace_t *aspace = vaddr_to_aspace((void *)argv[2].u);
181         if (!aspace) {
182             printf("ERROR: outside of any address space\n");
183             return -1;
184         }
185 
186         int err = arch_mmu_unmap(&aspace->arch_aspace, argv[2].u, argv[3].u);
187         printf("arch_mmu_unmap returns %d\n", err);
188     } else {
189         printf("unknown command\n");
190         goto usage;
191     }
192 
193     return NO_ERROR;
194 }
195 
196 STATIC_COMMAND_START
197 #if LK_DEBUGLEVEL > 0
198 STATIC_COMMAND("vm", "vm commands", &cmd_vm)
199 #endif
200 STATIC_COMMAND_END(vm);
201 
202 LK_INIT_HOOK(vm_preheap, &vm_init_preheap, LK_INIT_LEVEL_HEAP - 1);
203 LK_INIT_HOOK(vm, &vm_init_postheap, LK_INIT_LEVEL_VM);
204