1 /*
2  * Copyright (c) 2014 Travis Geiselbrecht
3  *
4  * Use of this source code is governed by a MIT-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/MIT
7  */
8 #pragma once
9 
10 /* some assembly #defines, need to match the structure below */
11 #if IS_64BIT
12 #define __MMU_INITIAL_MAPPING_PHYS_OFFSET 0
13 #define __MMU_INITIAL_MAPPING_VIRT_OFFSET 8
14 #define __MMU_INITIAL_MAPPING_SIZE_OFFSET 16
15 #define __MMU_INITIAL_MAPPING_FLAGS_OFFSET 24
16 #define __MMU_INITIAL_MAPPING_SIZE        40
17 #else
18 #define __MMU_INITIAL_MAPPING_PHYS_OFFSET 0
19 #define __MMU_INITIAL_MAPPING_VIRT_OFFSET 4
20 #define __MMU_INITIAL_MAPPING_SIZE_OFFSET 8
21 #define __MMU_INITIAL_MAPPING_FLAGS_OFFSET 12
22 #define __MMU_INITIAL_MAPPING_SIZE        20
23 #endif
24 
25 /* flags for initial mapping struct */
26 #define MMU_INITIAL_MAPPING_TEMPORARY     (0x1)
27 #define MMU_INITIAL_MAPPING_FLAG_UNCACHED (0x2)
28 #define MMU_INITIAL_MAPPING_FLAG_DEVICE   (0x4)
29 #define MMU_INITIAL_MAPPING_FLAG_DYNAMIC  (0x8)  /* entry has to be patched up by platform_reset */
30 
31 #if !ARCH_HAS_MMU
32 #error ARCH needs to declare mmu support
33 #endif
34 
35 #ifndef ASSEMBLY
36 
37 #include <arch.h>
38 #include <arch/mmu.h>
39 #include <lk/compiler.h>
40 #include <lk/list.h>
41 #include <stdint.h>
42 #include <stdlib.h>
43 #include <sys/types.h>
44 
45 __BEGIN_CDECLS
46 
47 #define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE)
48 #define IS_PAGE_ALIGNED(x) IS_ALIGNED(x, PAGE_SIZE)
49 
50 struct mmu_initial_mapping {
51     paddr_t phys;
52     vaddr_t virt;
53     size_t  size;
54     unsigned int flags;
55     const char *name;
56 };
57 
58 /* Assert that the assembly macros above match this struct. */
59 STATIC_ASSERT(__offsetof(struct mmu_initial_mapping, phys) == __MMU_INITIAL_MAPPING_PHYS_OFFSET);
60 STATIC_ASSERT(__offsetof(struct mmu_initial_mapping, virt) == __MMU_INITIAL_MAPPING_VIRT_OFFSET);
61 STATIC_ASSERT(__offsetof(struct mmu_initial_mapping, size) == __MMU_INITIAL_MAPPING_SIZE_OFFSET);
62 STATIC_ASSERT(__offsetof(struct mmu_initial_mapping, flags) == __MMU_INITIAL_MAPPING_FLAGS_OFFSET);
63 STATIC_ASSERT(sizeof(struct mmu_initial_mapping) == __MMU_INITIAL_MAPPING_SIZE);
64 
65 /* Platform or target must fill out one of these to set up the initial memory map
66  * for kernel and enough IO space to boot.
67  */
68 extern struct mmu_initial_mapping mmu_initial_mappings[];
69 
70 /* core per page structure */
71 typedef struct vm_page {
72     struct list_node node;
73 
74     uint flags : 8;
75     uint ref : 24;
76 } vm_page_t;
77 
78 #define VM_PAGE_FLAG_NONFREE  (0x1)
79 
80 /* kernel address space */
81 #ifndef KERNEL_ASPACE_BASE
82 #define KERNEL_ASPACE_BASE ((vaddr_t)0x80000000UL)
83 #endif
84 #ifndef KERNEL_ASPACE_SIZE
85 #define KERNEL_ASPACE_SIZE ((vaddr_t)0x80000000UL)
86 #endif
87 
88 STATIC_ASSERT(KERNEL_ASPACE_BASE + (KERNEL_ASPACE_SIZE - 1) > KERNEL_ASPACE_BASE);
89 
is_kernel_address(vaddr_t va)90 static inline bool is_kernel_address(vaddr_t va) {
91     return (va >= (vaddr_t)KERNEL_ASPACE_BASE && va <= ((vaddr_t)KERNEL_ASPACE_BASE + ((vaddr_t)KERNEL_ASPACE_SIZE - 1)));
92 }
93 
94 /* user address space, defaults to below kernel space with a 16MB guard gap on either side */
95 #ifndef USER_ASPACE_BASE
96 #define USER_ASPACE_BASE ((vaddr_t)0x01000000UL)
97 #endif
98 #ifndef USER_ASPACE_SIZE
99 #define USER_ASPACE_SIZE ((vaddr_t)KERNEL_ASPACE_BASE - USER_ASPACE_BASE - 0x01000000UL)
100 #endif
101 
102 STATIC_ASSERT(USER_ASPACE_BASE + (USER_ASPACE_SIZE - 1) > USER_ASPACE_BASE);
103 
is_user_address(vaddr_t va)104 static inline bool is_user_address(vaddr_t va) {
105     return (va >= USER_ASPACE_BASE && va <= (USER_ASPACE_BASE + (USER_ASPACE_SIZE - 1)));
106 }
107 
108 /* physical allocator */
109 typedef struct pmm_arena {
110     struct list_node node;
111     const char *name;
112 
113     uint flags;
114     uint priority;
115 
116     paddr_t base;
117     size_t  size;
118 
119     size_t free_count;
120 
121     struct vm_page *page_array;
122     struct list_node free_list;
123 } pmm_arena_t;
124 
125 #define PMM_ARENA_FLAG_KMAP (0x1) /* this arena is already mapped and useful for kallocs */
126 
127 /* Add a pre-filled memory arena to the physical allocator. */
128 status_t pmm_add_arena(pmm_arena_t *arena) __NONNULL((1));
129 
130 /* Allocate count pages of physical memory, adding to the tail of the passed list.
131  * The list must be initialized.
132  * Returns the number of pages allocated.
133  */
134 size_t pmm_alloc_pages(uint count, struct list_node *list) __NONNULL((2));
135 
136 /* Allocate a single page */
137 vm_page_t *pmm_alloc_page(void);
138 
139 /* Allocate a specific range of physical pages, adding to the tail of the passed list.
140  * The list must be initialized.
141  * Returns the number of pages allocated.
142  */
143 size_t pmm_alloc_range(paddr_t address, uint count, struct list_node *list) __NONNULL((3));
144 
145 /* Free a list of physical pages.
146  * Returns the number of pages freed.
147  */
148 size_t pmm_free(struct list_node *list) __NONNULL((1));
149 
150 /* Helper routine for the above. */
151 size_t pmm_free_page(vm_page_t *page) __NONNULL((1));
152 
153 /* Allocate a run of contiguous pages, aligned on log2 byte boundary (0-31)
154  * If the optional physical address pointer is passed, return the address.
155  * If the optional list is passed, append the allocate page structures to the tail of the list.
156  */
157 size_t pmm_alloc_contiguous(uint count, uint8_t align_log2, paddr_t *pa, struct list_node *list);
158 
159 /* Allocate a run of pages out of the kernel area and return the pointer in kernel space.
160  * If the optional list is passed, append the allocate page structures to the tail of the list.
161  */
162 void *pmm_alloc_kpages(uint count, struct list_node *list);
163 
164 /* Helper routine for pmm_alloc_kpages. */
pmm_alloc_kpage(void)165 static inline void *pmm_alloc_kpage(void) { return pmm_alloc_kpages(1, NULL); }
166 
167 size_t pmm_free_kpages(void *ptr, uint count);
168 
169 /* physical to virtual */
170 void *paddr_to_kvaddr(paddr_t pa);
171 
172 /* a hint as to which virtual addresses will be returned by pmm_alloc_kpages */
173 void *kvaddr_get_range(size_t *size_return);
174 
175 /* virtual to physical */
176 paddr_t vaddr_to_paddr(void *va);
177 
178 /* vm_page_t to physical address */
179 paddr_t vm_page_to_paddr(const vm_page_t *page);
180 
181 /* paddr to vm_page_t */
182 vm_page_t *paddr_to_vm_page(paddr_t addr);
183 
184 /* virtual allocator */
185 typedef struct vmm_aspace {
186     struct list_node node;
187     char name[32];
188 
189     uint flags;
190 
191     vaddr_t base;
192     size_t  size;
193 
194     struct list_node region_list;
195 
196     arch_aspace_t arch_aspace;
197 } vmm_aspace_t;
198 
199 #define VMM_ASPACE_FLAG_KERNEL 0x1
200 
201 typedef struct vmm_region {
202     struct list_node node;
203     char name[32];
204 
205     uint flags;
206     uint arch_mmu_flags;
207 
208     vaddr_t base;
209     size_t  size;
210 
211     struct list_node page_list;
212 } vmm_region_t;
213 
214 #define VMM_REGION_FLAG_RESERVED 0x1
215 #define VMM_REGION_FLAG_PHYSICAL 0x2
216 
217 /* grab a handle to the kernel address space */
218 extern vmm_aspace_t _kernel_aspace;
vmm_get_kernel_aspace(void)219 static inline vmm_aspace_t *vmm_get_kernel_aspace(void) {
220     return &_kernel_aspace;
221 }
222 
223 /* virtual to container address space */
224 struct vmm_aspace *vaddr_to_aspace(void *ptr);
225 
226 /* reserve a chunk of address space to prevent allocations from that space */
227 status_t vmm_reserve_space(vmm_aspace_t *aspace, const char *name, size_t size, vaddr_t vaddr)
228 __NONNULL((1));
229 
230 /* allocate a region of virtual space that maps a physical piece of address space.
231    the physical pages that back this are not allocated from the pmm. */
232 status_t vmm_alloc_physical(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint8_t align_log2, paddr_t paddr, uint vmm_flags, uint arch_mmu_flags)
233 __NONNULL((1));
234 
235 /* allocate a region of memory backed by newly allocated contiguous physical memory  */
236 status_t vmm_alloc_contiguous(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint8_t align_log2, uint vmm_flags, uint arch_mmu_flags)
237 __NONNULL((1));
238 
239 /* allocate a region of memory backed by newly allocated physical memory */
240 status_t vmm_alloc(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint8_t align_log2, uint vmm_flags, uint arch_mmu_flags)
241 __NONNULL((1));
242 
243 /* Unmap previously allocated region and free physical memory pages backing it (if any) */
244 status_t vmm_free_region(vmm_aspace_t *aspace, vaddr_t va);
245 
246 /* For the above region creation routines. Allocate virtual space at the passed in pointer. */
247 #define VMM_FLAG_VALLOC_SPECIFIC 0x1
248 
249 /* allocate a new address space */
250 status_t vmm_create_aspace(vmm_aspace_t **aspace, const char *name, uint flags)
251 __NONNULL((1));
252 
253 /* destroy everything in the address space */
254 status_t vmm_free_aspace(vmm_aspace_t *aspace)
255 __NONNULL((1));
256 
257 /* internal routine by the scheduler to swap mmu contexts */
258 void vmm_context_switch(vmm_aspace_t *oldspace, vmm_aspace_t *newaspace);
259 
260 /* set the current user aspace as active on the current thread.
261    NULL is a valid argument, which unmaps the current user address space */
262 void vmm_set_active_aspace(vmm_aspace_t *aspace);
263 
264 __END_CDECLS
265 
266 #endif // !ASSEMBLY
267