1 /*
2  * Copyright (c) 2014 Travis Geiselbrecht
3  *
4  * Use of this source code is governed by a MIT-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/MIT
7  */
8 #include <kernel/vm.h>
9 
10 #include <assert.h>
11 #include <kernel/mutex.h>
12 #include <lk/console_cmd.h>
13 #include <lk/err.h>
14 #include <lk/list.h>
15 #include <lk/pow2.h>
16 #include <lk/trace.h>
17 #include <stdlib.h>
18 #include <string.h>
19 
20 #include "vm_priv.h"
21 
22 #define LOCAL_TRACE 0
23 
24 static struct list_node arena_list = LIST_INITIAL_VALUE(arena_list);
25 static mutex_t lock = MUTEX_INITIAL_VALUE(lock);
26 
27 #define PAGE_BELONGS_TO_ARENA(page, arena) \
28     (((uintptr_t)(page) >= (uintptr_t)(arena)->page_array) && \
29      ((uintptr_t)(page) < ((uintptr_t)(arena)->page_array + (arena)->size / PAGE_SIZE * sizeof(vm_page_t))))
30 
31 #define PAGE_ADDRESS_FROM_ARENA(page, arena) \
32     (paddr_t)(((uintptr_t)page - (uintptr_t)(arena)->page_array) / sizeof(vm_page_t)) * PAGE_SIZE + (arena)->base;
33 
34 #define ADDRESS_IN_ARENA(address, arena) \
35     ((address) >= (arena)->base && (address) <= (arena)->base + (arena)->size - 1)
36 
page_is_free(const vm_page_t * page)37 static inline bool page_is_free(const vm_page_t *page) {
38     return !(page->flags & VM_PAGE_FLAG_NONFREE);
39 }
40 
vm_page_to_paddr(const vm_page_t * page)41 paddr_t vm_page_to_paddr(const vm_page_t *page) {
42     pmm_arena_t *a;
43     list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
44         if (PAGE_BELONGS_TO_ARENA(page, a)) {
45             return PAGE_ADDRESS_FROM_ARENA(page, a);
46         }
47     }
48     return -1;
49 }
50 
paddr_to_vm_page(paddr_t addr)51 vm_page_t *paddr_to_vm_page(paddr_t addr) {
52     pmm_arena_t *a;
53     list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
54         if (addr >= a->base && addr <= a->base + a->size - 1) {
55             size_t index = (addr - a->base) / PAGE_SIZE;
56             return &a->page_array[index];
57         }
58     }
59     return NULL;
60 }
61 
pmm_add_arena(pmm_arena_t * arena)62 status_t pmm_add_arena(pmm_arena_t *arena) {
63     LTRACEF("arena %p name '%s' base 0x%lx size 0x%zx\n", arena, arena->name, arena->base, arena->size);
64 
65     DEBUG_ASSERT(IS_PAGE_ALIGNED(arena->base));
66     DEBUG_ASSERT(IS_PAGE_ALIGNED(arena->size));
67     DEBUG_ASSERT(arena->size > 0);
68 
69     /* walk the arena list and add arena based on priority order */
70     pmm_arena_t *a;
71     list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
72         if (a->priority > arena->priority) {
73             list_add_before(&a->node, &arena->node);
74             goto done_add;
75         }
76     }
77 
78     /* walked off the end, add it to the end of the list */
79     list_add_tail(&arena_list, &arena->node);
80 
81 done_add:
82 
83     /* zero out some of the structure */
84     arena->free_count = 0;
85     list_initialize(&arena->free_list);
86 
87     /* allocate an array of pages to back this one */
88     size_t page_count = arena->size / PAGE_SIZE;
89     arena->page_array = boot_alloc_mem(page_count * sizeof(vm_page_t));
90 
91     /* initialize all of the pages */
92     memset(arena->page_array, 0, page_count * sizeof(vm_page_t));
93 
94     /* add them to the free list */
95     for (size_t i = 0; i < page_count; i++) {
96         vm_page_t *p = &arena->page_array[i];
97 
98         list_add_tail(&arena->free_list, &p->node);
99 
100         arena->free_count++;
101     }
102 
103     return NO_ERROR;
104 }
105 
pmm_alloc_pages(uint count,struct list_node * list)106 size_t pmm_alloc_pages(uint count, struct list_node *list) {
107     LTRACEF("count %u\n", count);
108 
109     /* list must be initialized prior to calling this */
110     DEBUG_ASSERT(list);
111 
112     uint allocated = 0;
113     if (count == 0)
114         return 0;
115 
116     mutex_acquire(&lock);
117 
118     /* walk the arenas in order, allocating as many pages as we can from each */
119     pmm_arena_t *a;
120     list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
121         while (allocated < count) {
122             vm_page_t *page = list_remove_head_type(&a->free_list, vm_page_t, node);
123             if (!page)
124                 goto done;
125 
126             a->free_count--;
127 
128             page->flags |= VM_PAGE_FLAG_NONFREE;
129             list_add_tail(list, &page->node);
130 
131             allocated++;
132         }
133     }
134 
135 done:
136     mutex_release(&lock);
137     return allocated;
138 }
139 
pmm_alloc_page(void)140 vm_page_t *pmm_alloc_page(void) {
141     struct list_node list = LIST_INITIAL_VALUE(list);
142 
143     size_t ret = pmm_alloc_pages(1, &list);
144     if (ret == 0) {
145         return NULL;
146     }
147 
148     DEBUG_ASSERT(ret == 1);
149 
150     return list_peek_head_type(&list, vm_page_t, node);
151 }
152 
pmm_alloc_range(paddr_t address,uint count,struct list_node * list)153 size_t pmm_alloc_range(paddr_t address, uint count, struct list_node *list) {
154     LTRACEF("address 0x%lx, count %u\n", address, count);
155 
156     DEBUG_ASSERT(list);
157 
158     uint allocated = 0;
159     if (count == 0)
160         return 0;
161 
162     address = ROUNDDOWN(address, PAGE_SIZE);
163 
164     mutex_acquire(&lock);
165 
166     /* walk through the arenas, looking to see if the physical page belongs to it */
167     pmm_arena_t *a;
168     list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
169         while (allocated < count && ADDRESS_IN_ARENA(address, a)) {
170             size_t index = (address - a->base) / PAGE_SIZE;
171 
172             DEBUG_ASSERT(index < a->size / PAGE_SIZE);
173 
174             vm_page_t *page = &a->page_array[index];
175             if (page->flags & VM_PAGE_FLAG_NONFREE) {
176                 /* we hit an allocated page */
177                 break;
178             }
179 
180             DEBUG_ASSERT(list_in_list(&page->node));
181 
182             list_delete(&page->node);
183             page->flags |= VM_PAGE_FLAG_NONFREE;
184             list_add_tail(list, &page->node);
185 
186             a->free_count--;
187             allocated++;
188             address += PAGE_SIZE;
189         }
190 
191         if (allocated == count)
192             break;
193     }
194 
195     mutex_release(&lock);
196     return allocated;
197 }
198 
pmm_free(struct list_node * list)199 size_t pmm_free(struct list_node *list) {
200     LTRACEF("list %p\n", list);
201 
202     DEBUG_ASSERT(list);
203 
204     mutex_acquire(&lock);
205 
206     size_t count = 0;
207     while (!list_is_empty(list)) {
208         vm_page_t *page = list_remove_head_type(list, vm_page_t, node);
209 
210         DEBUG_ASSERT(!list_in_list(&page->node));
211         DEBUG_ASSERT(page->flags & VM_PAGE_FLAG_NONFREE);
212 
213         /* see which arena this page belongs to and add it */
214         pmm_arena_t *a;
215         list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
216             if (PAGE_BELONGS_TO_ARENA(page, a)) {
217                 page->flags &= ~VM_PAGE_FLAG_NONFREE;
218 
219                 list_add_head(&a->free_list, &page->node);
220                 a->free_count++;
221                 count++;
222                 break;
223             }
224         }
225     }
226 
227     mutex_release(&lock);
228     return count;
229 }
230 
pmm_free_page(vm_page_t * page)231 size_t pmm_free_page(vm_page_t *page) {
232     struct list_node list;
233     list_initialize(&list);
234 
235     list_add_head(&list, &page->node);
236 
237     return pmm_free(&list);
238 }
239 
240 /* physically allocate a run from arenas marked as KMAP */
pmm_alloc_kpages(uint count,struct list_node * list)241 void *pmm_alloc_kpages(uint count, struct list_node *list) {
242     LTRACEF("count %u\n", count);
243 
244     /* fast path for single page */
245     if (count == 1) {
246         vm_page_t *p = pmm_alloc_page();
247         if (!p) {
248             return NULL;
249         }
250 
251         return paddr_to_kvaddr(vm_page_to_paddr(p));
252     }
253 
254     paddr_t pa;
255     size_t alloc_count = pmm_alloc_contiguous(count, PAGE_SIZE_SHIFT, &pa, list);
256     if (alloc_count == 0)
257         return NULL;
258 
259     return paddr_to_kvaddr(pa);
260 }
261 
pmm_free_kpages(void * _ptr,uint count)262 size_t pmm_free_kpages(void *_ptr, uint count) {
263     LTRACEF("ptr %p, count %u\n", _ptr, count);
264 
265     uint8_t *ptr = (uint8_t *)_ptr;
266 
267     struct list_node list;
268     list_initialize(&list);
269 
270     while (count > 0) {
271         vm_page_t *p = paddr_to_vm_page(vaddr_to_paddr(ptr));
272         if (p) {
273             list_add_tail(&list, &p->node);
274         }
275 
276         ptr += PAGE_SIZE;
277         count--;
278     }
279 
280     return pmm_free(&list);
281 }
282 
pmm_alloc_contiguous(uint count,uint8_t alignment_log2,paddr_t * pa,struct list_node * list)283 size_t pmm_alloc_contiguous(uint count, uint8_t alignment_log2, paddr_t *pa, struct list_node *list) {
284     LTRACEF("count %u, align %u\n", count, alignment_log2);
285 
286     if (count == 0)
287         return 0;
288     if (alignment_log2 < PAGE_SIZE_SHIFT)
289         alignment_log2 = PAGE_SIZE_SHIFT;
290 
291     mutex_acquire(&lock);
292 
293     pmm_arena_t *a;
294     list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
295         // XXX make this a flag to only search kmap?
296         if (a->flags & PMM_ARENA_FLAG_KMAP) {
297             /* walk the list starting at alignment boundaries.
298              * calculate the starting offset into this arena, based on the
299              * base address of the arena to handle the case where the arena
300              * is not aligned on the same boundary requested.
301              */
302             paddr_t rounded_base = ROUNDUP(a->base, 1UL << alignment_log2);
303             if (rounded_base < a->base || rounded_base > a->base + a->size - 1)
304                 continue;
305 
306             uint aligned_offset = (rounded_base - a->base) / PAGE_SIZE;
307             uint start = aligned_offset;
308             LTRACEF("starting search at aligned offset %u\n", start);
309             LTRACEF("arena base 0x%lx size %zu\n", a->base, a->size);
310 
311 retry:
312             /* search while we're still within the arena and have a chance of finding a slot
313                (start + count < end of arena) */
314             while ((start < a->size / PAGE_SIZE) &&
315                     ((start + count) <= a->size / PAGE_SIZE)) {
316                 vm_page_t *p = &a->page_array[start];
317                 for (uint i = 0; i < count; i++) {
318                     if (p->flags & VM_PAGE_FLAG_NONFREE) {
319                         /* this run is broken, break out of the inner loop.
320                          * start over at the next alignment boundary
321                          */
322                         start = ROUNDUP(start - aligned_offset + i + 1, 1UL << (alignment_log2 - PAGE_SIZE_SHIFT)) + aligned_offset;
323                         goto retry;
324                     }
325                     p++;
326                 }
327 
328                 /* we found a run */
329                 LTRACEF("found run from pn %u to %u\n", start, start + count);
330 
331                 /* remove the pages from the run out of the free list */
332                 for (uint i = start; i < start + count; i++) {
333                     p = &a->page_array[i];
334                     DEBUG_ASSERT(!(p->flags & VM_PAGE_FLAG_NONFREE));
335                     DEBUG_ASSERT(list_in_list(&p->node));
336 
337                     list_delete(&p->node);
338                     p->flags |= VM_PAGE_FLAG_NONFREE;
339                     a->free_count--;
340 
341                     if (list)
342                         list_add_tail(list, &p->node);
343                 }
344 
345                 if (pa)
346                     *pa = a->base + start * PAGE_SIZE;
347 
348                 mutex_release(&lock);
349 
350                 return count;
351             }
352         }
353     }
354 
355     mutex_release(&lock);
356 
357     LTRACEF("couldn't find run\n");
358     return 0;
359 }
360 
dump_page(const vm_page_t * page)361 static void dump_page(const vm_page_t *page) {
362     printf("page %p: address 0x%lx flags 0x%x\n", page, vm_page_to_paddr(page), page->flags);
363 }
364 
dump_arena(const pmm_arena_t * arena,bool dump_pages)365 static void dump_arena(const pmm_arena_t *arena, bool dump_pages) {
366     printf("arena %p: name '%s' base 0x%lx size 0x%zx priority %u flags 0x%x\n",
367            arena, arena->name, arena->base, arena->size, arena->priority, arena->flags);
368     printf("\tpage_array %p, free_count %zu\n",
369            arena->page_array, arena->free_count);
370 
371     /* dump all of the pages */
372     if (dump_pages) {
373         for (size_t i = 0; i < arena->size / PAGE_SIZE; i++) {
374             dump_page(&arena->page_array[i]);
375         }
376     }
377 
378     /* dump the free pages */
379     printf("\tfree ranges:\n");
380     ssize_t last = -1;
381     for (size_t i = 0; i < arena->size / PAGE_SIZE; i++) {
382         if (page_is_free(&arena->page_array[i])) {
383             if (last == -1) {
384                 last = i;
385             }
386         } else {
387             if (last != -1) {
388                 printf("\t\t0x%lx - 0x%lx\n", arena->base + last * PAGE_SIZE, arena->base + i * PAGE_SIZE);
389             }
390             last = -1;
391         }
392     }
393 
394     if (last != -1) {
395         printf("\t\t0x%lx - 0x%lx\n",  arena->base + last * PAGE_SIZE, arena->base + arena->size);
396     }
397 }
398 
cmd_pmm(int argc,const console_cmd_args * argv)399 static int cmd_pmm(int argc, const console_cmd_args *argv) {
400     if (argc < 2) {
401 notenoughargs:
402         printf("not enough arguments\n");
403 usage:
404         printf("usage:\n");
405         printf("%s arenas\n", argv[0].str);
406         printf("%s alloc <count>\n", argv[0].str);
407         printf("%s alloc_range <address> <count>\n", argv[0].str);
408         printf("%s alloc_kpages <count>\n", argv[0].str);
409         printf("%s alloc_contig <count> <alignment>\n", argv[0].str);
410         printf("%s dump_alloced\n", argv[0].str);
411         printf("%s free_alloced\n", argv[0].str);
412         return ERR_GENERIC;
413     }
414 
415     static struct list_node allocated = LIST_INITIAL_VALUE(allocated);
416 
417     if (!strcmp(argv[1].str, "arenas")) {
418         pmm_arena_t *a;
419         list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
420             dump_arena(a, false);
421         }
422     } else if (!strcmp(argv[1].str, "alloc")) {
423         if (argc < 3) goto notenoughargs;
424 
425         struct list_node list;
426         list_initialize(&list);
427 
428         uint count = pmm_alloc_pages(argv[2].u, &list);
429         printf("alloc returns %u\n", count);
430 
431         vm_page_t *p;
432         list_for_every_entry(&list, p, vm_page_t, node) {
433             printf("\tpage %p, address 0x%lx\n", p, vm_page_to_paddr(p));
434         }
435 
436         /* add the pages to the local allocated list */
437         struct list_node *node;
438         while ((node = list_remove_head(&list))) {
439             list_add_tail(&allocated, node);
440         }
441     } else if (!strcmp(argv[1].str, "dump_alloced")) {
442         vm_page_t *page;
443 
444         list_for_every_entry(&allocated, page, vm_page_t, node) {
445             dump_page(page);
446         }
447     } else if (!strcmp(argv[1].str, "alloc_range")) {
448         if (argc < 4) goto notenoughargs;
449 
450         struct list_node list;
451         list_initialize(&list);
452 
453         uint count = pmm_alloc_range(argv[2].u, argv[3].u, &list);
454         printf("alloc returns %u\n", count);
455 
456         vm_page_t *p;
457         list_for_every_entry(&list, p, vm_page_t, node) {
458             printf("\tpage %p, address 0x%lx\n", p, vm_page_to_paddr(p));
459         }
460 
461         /* add the pages to the local allocated list */
462         struct list_node *node;
463         while ((node = list_remove_head(&list))) {
464             list_add_tail(&allocated, node);
465         }
466     } else if (!strcmp(argv[1].str, "alloc_kpages")) {
467         if (argc < 3) goto notenoughargs;
468 
469         void *ptr = pmm_alloc_kpages(argv[2].u, NULL);
470         printf("pmm_alloc_kpages returns %p\n", ptr);
471     } else if (!strcmp(argv[1].str, "alloc_contig")) {
472         if (argc < 4) goto notenoughargs;
473 
474         struct list_node list;
475         list_initialize(&list);
476 
477         paddr_t pa;
478         size_t ret = pmm_alloc_contiguous(argv[2].u, argv[3].u, &pa, &list);
479         printf("pmm_alloc_contiguous returns %zu, address 0x%lx\n", ret, pa);
480         printf("address %% align = 0x%lx\n", pa % argv[3].u);
481 
482         /* add the pages to the local allocated list */
483         struct list_node *node;
484         while ((node = list_remove_head(&list))) {
485             list_add_tail(&allocated, node);
486         }
487     } else if (!strcmp(argv[1].str, "free_alloced")) {
488         size_t err = pmm_free(&allocated);
489         printf("pmm_free returns %zu\n", err);
490     } else {
491         printf("unknown command\n");
492         goto usage;
493     }
494 
495     return NO_ERROR;
496 }
497 
498 STATIC_COMMAND_START
499 #if LK_DEBUGLEVEL > 0
500 STATIC_COMMAND("pmm", "physical memory manager", &cmd_pmm)
501 #endif
502 STATIC_COMMAND_END(pmm);
503 
504 
505 
506 
507