1 /*
2 * Copyright (c) 2015 Google, Inc. All rights reserved
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8
9 #include "kernel/novm.h"
10
11 #include <assert.h>
12 #include <kernel/mutex.h>
13 #include <lk/console_cmd.h>
14 #include <lk/err.h>
15 #include <lk/init.h>
16 #include <lk/trace.h>
17 #include <stdlib.h>
18 #include <string.h>
19
20 #define LOCAL_TRACE 0
21
22 struct novm_arena {
23 mutex_t lock;
24 const char *name;
25 size_t pages;
26 char *map;
27 char *base;
28 size_t size;
29
30 // We divide the memory up into pages. If there is memory we can use before
31 // the first aligned page address, then we record it here and the heap will use
32 // it.
33 #define MINIMUM_USEFUL_UNALIGNED_SIZE 64
34 void *unaligned_area;
35 size_t unaligned_size;
36 };
37
38
39 /* not a static vm, not using the kernel vm */
40 extern int _end;
41 extern int _end_of_ram;
42
43 #define END_OF_KERNEL ((uintptr_t)&_end)
44 #define MEM_START END_OF_KERNEL
45 #define MEM_SIZE ((MEMBASE + MEMSIZE) - MEM_START)
46 #define DEFAULT_MAP_SIZE (MEMSIZE >> PAGE_SIZE_SHIFT)
47
48 /* a static list of arenas */
49 #ifndef NOVM_MAX_ARENAS
50 #define NOVM_MAX_ARENAS 1
51 #endif
52 struct novm_arena arena[NOVM_MAX_ARENAS];
53
54 /* construct a default arena based on platform #defines at boot */
55 #ifndef NOVM_DEFAULT_ARENA
56 #define NOVM_DEFAULT_ARENA 1
57 #endif
58
novm_get_arenas(struct page_range * ranges,int number_of_ranges)59 int novm_get_arenas(struct page_range *ranges, int number_of_ranges) {
60 int ranges_found = 0;
61 for (int i = 0; i < number_of_ranges && i < NOVM_MAX_ARENAS; i++) {
62 if (arena[i].pages > 0) ranges_found = i + 1;
63 ranges[i].address = (void *)arena[i].base;
64 ranges[i].size = arena[i].pages << PAGE_SIZE_SHIFT;
65 }
66 return ranges_found;
67 }
68
novm_alloc_unaligned(size_t * size_return)69 void *novm_alloc_unaligned(size_t *size_return) {
70 /* only do the unaligned thing in the first arena */
71 if (arena[0].unaligned_area != NULL) {
72 *size_return = arena[0].unaligned_size;
73 void *result = arena[0].unaligned_area;
74 arena[0].unaligned_area = NULL;
75 arena[0].unaligned_size = 0;
76 return result;
77 }
78 *size_return = PAGE_SIZE;
79 return novm_alloc_pages(1, NOVM_ARENA_ANY);
80 }
81
in_arena(struct novm_arena * n,void * p)82 static bool in_arena(struct novm_arena *n, void *p) {
83 if (n->size == 0)
84 return false;
85
86 char *ptr = (char *)p;
87 char *base = n->base;
88 return ptr >= base && ptr < base + n->size;
89 }
90
novm_init_helper(struct novm_arena * n,const char * name,uintptr_t arena_start,uintptr_t arena_size,char * default_map,size_t default_map_size)91 static void novm_init_helper(struct novm_arena *n, const char *name,
92 uintptr_t arena_start, uintptr_t arena_size,
93 char *default_map, size_t default_map_size) {
94 uintptr_t start = ROUNDUP(arena_start, PAGE_SIZE);
95 uintptr_t size = ROUNDDOWN(arena_start + arena_size, PAGE_SIZE) - start;
96
97 mutex_init(&n->lock);
98
99 size_t map_size = size >> PAGE_SIZE_SHIFT;
100 char *map = default_map;
101 if (map == NULL || default_map_size < map_size) {
102 // allocate the map out of the arena itself
103 map = (char *)arena_start;
104
105 // Grab enough map for 16Mbyte of arena each time around the loop.
106 while (start - arena_start < map_size) {
107 start += PAGE_SIZE;
108 size -= PAGE_SIZE;
109 map_size--;
110 }
111
112 if ((char *)start - (map + ROUNDUP(map_size, 4)) >= MINIMUM_USEFUL_UNALIGNED_SIZE) {
113 n->unaligned_area = map + ROUNDUP(map_size, 4);
114 n->unaligned_size = (char *)start - (map + ROUNDUP(map_size, 4));
115 }
116 } else if (start - arena_start >= MINIMUM_USEFUL_UNALIGNED_SIZE) {
117 n->unaligned_area = (char *)arena_start;
118 n->unaligned_size = start - arena_start;
119 }
120 n->name = name;
121 n->map = map;
122 memset(n->map, 0, map_size);
123 n->pages = map_size;
124 n->base = (char *)start;
125 n->size = size;
126 }
127
novm_add_arena(const char * name,uintptr_t arena_start,uintptr_t arena_size)128 void novm_add_arena(const char *name, uintptr_t arena_start, uintptr_t arena_size) {
129 LTRACEF("name '%s' start %#lx size %#lx\n", name, arena_start, arena_size);
130 for (uint i = 0; i < NOVM_MAX_ARENAS; i++) {
131 if (arena[i].pages == 0) {
132 // if this arena covers where the kernel is, bump start to MEM_START
133 if (arena_start < END_OF_KERNEL && arena_start + arena_size > END_OF_KERNEL) {
134 arena_size -= END_OF_KERNEL - arena_start;
135 arena_start = END_OF_KERNEL;
136 LTRACEF("trimming arena to %#lx size %#lx\n", arena_start, arena_size);
137 }
138
139 novm_init_helper(&arena[i], name, arena_start, arena_size, NULL, 0);
140 return;
141 }
142 }
143 panic("novm_add_arena: too many arenas added, bump NOVM_MAX_ARENAS!\n");
144 }
145
146 #if NOVM_DEFAULT_ARENA
novm_init(uint level)147 static void novm_init(uint level) {
148 static char mem_allocation_map[DEFAULT_MAP_SIZE];
149 novm_init_helper(&arena[0], "main", MEM_START, MEM_SIZE, mem_allocation_map, DEFAULT_MAP_SIZE);
150 }
151
152 LK_INIT_HOOK(novm, &novm_init, LK_INIT_LEVEL_PLATFORM_EARLY - 1);
153 #endif
154
novm_alloc_helper(struct novm_arena * n,size_t pages)155 static void *novm_alloc_helper(struct novm_arena *n, size_t pages) {
156 if (pages == 0 || pages > n->pages)
157 return NULL;
158
159 mutex_acquire(&n->lock);
160 for (size_t i = 0; i <= n->pages - pages; i++) {
161 bool found = true;
162 for (size_t j = 0; j < pages; j++) {
163 if (n->map[i + j] != 0) {
164 i += j;
165 found = false;
166 break;
167 }
168 }
169 if (found) {
170 memset(n->map + i, 1, pages);
171 mutex_release(&n->lock);
172 return n->base + (i << PAGE_SIZE_SHIFT);
173 }
174 }
175 mutex_release(&n->lock);
176
177 return NULL;
178 }
179
novm_alloc_pages(size_t pages,uint32_t arena_bitmap)180 void *novm_alloc_pages(size_t pages, uint32_t arena_bitmap) {
181 LTRACEF("pages %zu\n", pages);
182
183 /* allocate from any arena */
184 for (uint i = 0; i < NOVM_MAX_ARENAS; i++) {
185 if (arena_bitmap & (1U << i)) {
186 void *result = novm_alloc_helper(&arena[i], pages);
187 if (result)
188 return result;
189 }
190 }
191
192 return NULL;
193 }
194
novm_free_pages(void * address,size_t pages)195 void novm_free_pages(void *address, size_t pages) {
196 LTRACEF("address %p, pages %zu\n", address, pages);
197
198 struct novm_arena *n = NULL;
199 for (uint i = 0; i < NOVM_MAX_ARENAS; i++) {
200 if (in_arena(&arena[i], address)) {
201 n = &arena[i];
202 break;
203 }
204 }
205 if (!n)
206 return;
207
208 DEBUG_ASSERT(in_arena(n, address));
209
210 size_t index = ((char *)address - (char *)(n->base)) >> PAGE_SIZE_SHIFT;
211 char *map = n->map;
212
213 mutex_acquire(&n->lock);
214 for (size_t i = 0; i < pages; i++) map[index + i] = 0;
215 mutex_release(&n->lock);
216 }
217
novm_alloc_specific_pages(void * address,size_t pages)218 status_t novm_alloc_specific_pages(void *address, size_t pages) {
219 LTRACEF("address %p, pages %zu\n", address, pages);
220
221 struct novm_arena *n = NULL;
222 for (uint i = 0; i < NOVM_MAX_ARENAS; i++) {
223 if (in_arena(&arena[i], address)) {
224 n = &arena[i];
225 break;
226 }
227 }
228 if (!n)
229 return ERR_NOT_FOUND;
230
231 size_t index = ((char *)address - (char *)(n->base)) >> PAGE_SIZE_SHIFT;
232 char *map = n->map;
233
234 status_t err = NO_ERROR;
235
236 mutex_acquire(&n->lock);
237 for (size_t i = 0; i < pages; i++) {
238 if (map[index + i] != 0) {
239 err = ERR_NO_MEMORY;
240 break;
241 }
242 }
243 if (err == NO_ERROR) {
244 memset(map + index, 1, pages);
245 }
246 mutex_release(&n->lock);
247
248 return err;
249 }
250
251
252 #if LK_DEBUGLEVEL > 1
253
254 static int cmd_novm(int argc, const console_cmd_args *argv);
255 static void novm_dump(void);
256
257 STATIC_COMMAND_START
258 STATIC_COMMAND("novm", "page allocator (for devices without VM support) debug commands", &cmd_novm)
259 STATIC_COMMAND_END(novm);
260
cmd_novm(int argc,const console_cmd_args * argv)261 static int cmd_novm(int argc, const console_cmd_args *argv) {
262 if (argc < 2) {
263 notenoughargs:
264 printf("not enough arguments\n");
265 usage:
266 printf("usage:\n");
267 printf("\t%s info\n", argv[0].str);
268 printf("\t%s alloc <numberofpages> [arena bitmap]\n", argv[0].str);
269 printf("\t%s free <address> [numberofpages]\n", argv[0].str);
270 return -1;
271 }
272
273 if (strcmp(argv[1].str, "info") == 0) {
274 novm_dump();
275 } else if (strcmp(argv[1].str, "alloc") == 0) {
276 if (argc < 3) goto notenoughargs;
277
278 uint32_t arena_bitmap = (argc >= 4) ? argv[3].u : NOVM_ARENA_ANY;
279 void *ptr = novm_alloc_pages(argv[2].u, arena_bitmap);
280 printf("novm_alloc_pages returns %p\n", ptr);
281 } else if (strcmp(argv[1].str, "free") == 0) {
282 if (argc < 3) goto notenoughargs;
283 size_t pages = (argc >= 4) ? argv[3].u : 1;
284 novm_free_pages(argv[2].p, pages);
285 printf("novm_free_pages: %zd pages at %p\n", pages, argv[2].p);
286 } else {
287 printf("unrecognized command\n");
288 goto usage;
289 }
290
291 return 0;
292 }
293
novm_dump_arena(struct novm_arena * n)294 static void novm_dump_arena(struct novm_arena *n) {
295 if (n->pages == 0) {
296 return;
297 }
298
299 mutex_acquire(&n->lock);
300 printf("name '%s', %zu pages, each %zdk (%zdk in all)\n", n->name, n->pages, (size_t)PAGE_SIZE >> 10, (size_t)(PAGE_SIZE * n->pages) >> 10);
301 printf(" range: %p-%p\n", (void *)n->base, (char *)n->base + n->size);
302 printf(" unaligned range: %p-%p\n", n->unaligned_area, n->unaligned_area + n->unaligned_size);
303 unsigned i;
304 size_t in_use = 0;
305 for (i = 0; i < n->pages; i++) if (n->map[i] != 0) in_use++;
306 printf(" %zd/%zd in use\n", in_use, n->pages);
307 #define MAX_PRINT 1024u
308 for (i = 0; i < MAX_PRINT && i < n->pages; i++) {
309 if ((i & 63) == 0) printf(" ");
310 printf("%c", n->map[i] ? '*' : '.');
311 if ((i & 63) == 63) printf("\n");
312 }
313 if (i == MAX_PRINT && n->pages > MAX_PRINT) {
314 printf(" etc., %zd more pages.", n->pages - MAX_PRINT);
315 }
316 printf("\n");
317 mutex_release(&n->lock);
318 }
319
novm_dump(void)320 static void novm_dump(void) {
321 for (uint i = 0; i < NOVM_MAX_ARENAS; i++) {
322 novm_dump_arena(&arena[i]);
323 }
324 }
325
326 #endif
327
328