1 // Copyright 2016 The Fuchsia Authors
2 // Copyright (c) 2014 Travis Geiselbrecht
3 //
4 // Use of this source code is governed by a MIT-style
5 // license that can be found in the LICENSE file or at
6 // https://opensource.org/licenses/MIT
7 
8 #include "vm_priv.h"
9 #include <arch/mmu.h>
10 #include <assert.h>
11 #include <debug.h>
12 #include <err.h>
13 #include <fbl/algorithm.h>
14 #include <inttypes.h>
15 #include <kernel/thread.h>
16 #include <lib/console.h>
17 #include <lib/crypto/global_prng.h>
18 #include <string.h>
19 #include <trace.h>
20 #include <vm/bootalloc.h>
21 #include <vm/init.h>
22 #include <vm/physmap.h>
23 #include <vm/pmm.h>
24 #include <vm/vm.h>
25 #include <vm/vm_aspace.h>
26 #include <zircon/types.h>
27 
28 #define LOCAL_TRACE MAX(VM_GLOBAL_TRACE, 0)
29 
30 // boot time allocated page full of zeros
31 vm_page_t* zero_page;
32 paddr_t zero_page_paddr;
33 
34 // set early in arch code to record the start address of the kernel
35 paddr_t kernel_base_phys;
36 
37 namespace {
38 
39 // mark a range of physical pages as WIRED
MarkPagesInUsePhys(paddr_t pa,size_t len)40 void MarkPagesInUsePhys(paddr_t pa, size_t len) {
41     LTRACEF("pa %#" PRIxPTR ", len %#zx\n", pa, len);
42 
43     // make sure we are inclusive of all of the pages in the address range
44     len = PAGE_ALIGN(len + (pa & (PAGE_SIZE - 1)));
45     pa = ROUNDDOWN(pa, PAGE_SIZE);
46 
47     LTRACEF("aligned pa %#" PRIxPTR ", len %#zx\n", pa, len);
48 
49     list_node list = LIST_INITIAL_VALUE(list);
50 
51     zx_status_t status = pmm_alloc_range(pa, len / PAGE_SIZE, &list);
52     ASSERT_MSG(status == ZX_OK,
53                "failed to reserve memory range [%#" PRIxPTR ", %#" PRIxPTR "]\n",
54                pa, pa + len - 1);
55 
56     // mark all of the pages we allocated as WIRED
57     vm_page_t* p;
58     list_for_every_entry (&list, p, vm_page_t, queue_node) {
59         p->state = VM_PAGE_STATE_WIRED;
60     }
61 }
62 
ProtectRegion(VmAspace * aspace,vaddr_t va,uint arch_mmu_flags)63 zx_status_t ProtectRegion(VmAspace* aspace, vaddr_t va, uint arch_mmu_flags) {
64     auto r = aspace->FindRegion(va);
65     if (!r) {
66         return ZX_ERR_NOT_FOUND;
67     }
68 
69     auto vm_mapping = r->as_vm_mapping();
70     if (!vm_mapping) {
71         return ZX_ERR_NOT_FOUND;
72     }
73 
74     return vm_mapping->Protect(vm_mapping->base(), vm_mapping->size(), arch_mmu_flags);
75 }
76 
77 } // namespace
78 
vm_init_preheap()79 void vm_init_preheap() {
80     LTRACE_ENTRY;
81 
82     // allow the vmm a shot at initializing some of its data structures
83     VmAspace::KernelAspaceInitPreHeap();
84 
85     // mark the physical pages used by the boot time allocator
86     if (boot_alloc_end != boot_alloc_start) {
87         dprintf(INFO, "VM: marking boot alloc used range [%#" PRIxPTR ", %#" PRIxPTR ")\n", boot_alloc_start,
88                 boot_alloc_end);
89 
90         MarkPagesInUsePhys(boot_alloc_start, boot_alloc_end - boot_alloc_start);
91     }
92 
93     zx_status_t status;
94 
95 #if !DISABLE_KASLR // Disable random memory padding for KASLR
96     // Reserve up to 15 pages as a random padding in the kernel physical mapping
97     uchar entropy;
98     crypto::GlobalPRNG::GetInstance()->Draw(&entropy, sizeof(entropy));
99     struct list_node list;
100     list_initialize(&list);
101     size_t page_count = entropy % 16;
102     status = pmm_alloc_pages(page_count, 0, &list);
103     DEBUG_ASSERT(status == ZX_OK);
104     LTRACEF("physical mapping padding page count %#" PRIxPTR "\n", page_count);
105 #endif
106 
107     // grab a page and mark it as the zero page
108     status = pmm_alloc_page(0, &zero_page, &zero_page_paddr);
109     DEBUG_ASSERT(status == ZX_OK);
110 
111     void* ptr = paddr_to_physmap(zero_page_paddr);
112     DEBUG_ASSERT(ptr);
113 
114     arch_zero_page(ptr);
115 }
116 
vm_init()117 void vm_init() {
118     LTRACE_ENTRY;
119 
120     VmAspace* aspace = VmAspace::kernel_aspace();
121 
122     // we expect the kernel to be in a temporary mapping, define permanent
123     // regions for those now
124     struct temp_region {
125         const char* name;
126         vaddr_t base;
127         size_t size;
128         uint arch_mmu_flags;
129     } regions[] = {
130         {
131             .name = "kernel_code",
132             .base = (vaddr_t)__code_start,
133             .size = ROUNDUP((uintptr_t)__code_end - (uintptr_t)__code_start, PAGE_SIZE),
134             .arch_mmu_flags = ARCH_MMU_FLAG_PERM_READ | ARCH_MMU_FLAG_PERM_EXECUTE,
135         },
136         {
137             .name = "kernel_rodata",
138             .base = (vaddr_t)__rodata_start,
139             .size = ROUNDUP((uintptr_t)__rodata_end - (uintptr_t)__rodata_start, PAGE_SIZE),
140             .arch_mmu_flags = ARCH_MMU_FLAG_PERM_READ,
141         },
142         {
143             .name = "kernel_data",
144             .base = (vaddr_t)__data_start,
145             .size = ROUNDUP((uintptr_t)__data_end - (uintptr_t)__data_start, PAGE_SIZE),
146             .arch_mmu_flags = ARCH_MMU_FLAG_PERM_READ | ARCH_MMU_FLAG_PERM_WRITE,
147         },
148         {
149             .name = "kernel_bss",
150             .base = (vaddr_t)__bss_start,
151             .size = ROUNDUP((uintptr_t)_end - (uintptr_t)__bss_start, PAGE_SIZE),
152             .arch_mmu_flags = ARCH_MMU_FLAG_PERM_READ | ARCH_MMU_FLAG_PERM_WRITE,
153         },
154     };
155 
156     for (uint i = 0; i < fbl::count_of(regions); ++i) {
157         temp_region* region = &regions[i];
158         ASSERT(IS_PAGE_ALIGNED(region->base));
159 
160         dprintf(INFO, "VM: reserving kernel region [%#" PRIxPTR ", %#" PRIxPTR ") flags %#x name '%s'\n",
161                 region->base, region->base + region->size, region->arch_mmu_flags, region->name);
162 
163         zx_status_t status = aspace->ReserveSpace(region->name, region->size, region->base);
164         ASSERT(status == ZX_OK);
165         status = ProtectRegion(aspace, region->base, region->arch_mmu_flags);
166         ASSERT(status == ZX_OK);
167     }
168 
169     // reserve the kernel aspace where the physmap is
170     aspace->ReserveSpace("physmap", PHYSMAP_SIZE, PHYSMAP_BASE);
171 
172 #if !DISABLE_KASLR // Disable random memory padding for KASLR
173     // Reserve random padding of up to 64GB after first mapping. It will make
174     // the adjacent memory mappings (kstack_vmar, arena:handles and others) at
175     // non-static virtual addresses.
176     size_t entropy;
177     crypto::GlobalPRNG::GetInstance()->Draw(&entropy, sizeof(entropy));
178 
179     size_t random_size = PAGE_ALIGN(entropy % (64ULL * GB));
180     zx_status_t status = aspace->ReserveSpace("random_padding", random_size, PHYSMAP_BASE + PHYSMAP_SIZE);
181     ASSERT(status == ZX_OK);
182     LTRACEF("VM: aspace random padding size: %#" PRIxPTR "\n", random_size);
183 #endif
184 }
185 
vaddr_to_paddr(const void * ptr)186 paddr_t vaddr_to_paddr(const void* ptr) {
187     if (is_physmap_addr(ptr)) {
188         return physmap_to_paddr(ptr);
189     }
190 
191     auto aspace = VmAspace::vaddr_to_aspace(reinterpret_cast<uintptr_t>(ptr));
192     if (!aspace) {
193         return (paddr_t) nullptr;
194     }
195 
196     paddr_t pa;
197     zx_status_t rc = aspace->arch_aspace().Query((vaddr_t)ptr, &pa, nullptr);
198     if (rc) {
199         return (paddr_t) nullptr;
200     }
201 
202     return pa;
203 }
204 
cmd_vm(int argc,const cmd_args * argv,uint32_t flags)205 static int cmd_vm(int argc, const cmd_args* argv, uint32_t flags) {
206     if (argc < 2) {
207     notenoughargs:
208         printf("not enough arguments\n");
209     usage:
210         printf("usage:\n");
211         printf("%s phys2virt <address>\n", argv[0].str);
212         printf("%s virt2phys <address>\n", argv[0].str);
213         printf("%s map <phys> <virt> <count> <flags>\n", argv[0].str);
214         printf("%s unmap <virt> <count>\n", argv[0].str);
215         return ZX_ERR_INTERNAL;
216     }
217 
218     if (!strcmp(argv[1].str, "phys2virt")) {
219         if (argc < 3) {
220             goto notenoughargs;
221         }
222 
223         if (!is_physmap_phys_addr(argv[2].u)) {
224             printf("address isn't in physmap\n");
225             return -1;
226         }
227 
228         void* ptr = paddr_to_physmap((paddr_t)argv[2].u);
229         printf("paddr_to_physmap returns %p\n", ptr);
230     } else if (!strcmp(argv[1].str, "virt2phys")) {
231         if (argc < 3) {
232             goto notenoughargs;
233         }
234 
235         VmAspace* aspace = VmAspace::vaddr_to_aspace(argv[2].u);
236         if (!aspace) {
237             printf("ERROR: outside of any address space\n");
238             return -1;
239         }
240 
241         paddr_t pa;
242         uint flags;
243         zx_status_t err = aspace->arch_aspace().Query(argv[2].u, &pa, &flags);
244         printf("arch_mmu_query returns %d\n", err);
245         if (err >= 0) {
246             printf("\tpa %#" PRIxPTR ", flags %#x\n", pa, flags);
247         }
248     } else if (!strcmp(argv[1].str, "map")) {
249         if (argc < 6) {
250             goto notenoughargs;
251         }
252 
253         VmAspace* aspace = VmAspace::vaddr_to_aspace(argv[2].u);
254         if (!aspace) {
255             printf("ERROR: outside of any address space\n");
256             return -1;
257         }
258 
259         size_t mapped;
260         auto err =
261             aspace->arch_aspace().MapContiguous(argv[3].u, argv[2].u, (uint)argv[4].u,
262                                                 (uint)argv[5].u, &mapped);
263         printf("arch_mmu_map returns %d, mapped %zu\n", err, mapped);
264     } else if (!strcmp(argv[1].str, "unmap")) {
265         if (argc < 4) {
266             goto notenoughargs;
267         }
268 
269         VmAspace* aspace = VmAspace::vaddr_to_aspace(argv[2].u);
270         if (!aspace) {
271             printf("ERROR: outside of any address space\n");
272             return -1;
273         }
274 
275         size_t unmapped;
276         auto err = aspace->arch_aspace().Unmap(argv[2].u, (uint)argv[3].u, &unmapped);
277         printf("arch_mmu_unmap returns %d, unmapped %zu\n", err, unmapped);
278     } else {
279         printf("unknown command\n");
280         goto usage;
281     }
282 
283     return ZX_OK;
284 }
285 
286 STATIC_COMMAND_START
287 #if LK_DEBUGLEVEL > 0
288 STATIC_COMMAND("vm", "vm commands", &cmd_vm)
289 #endif
290 STATIC_COMMAND_END(vm);
291