1 // Copyright 2016 The Fuchsia Authors
2 // Copyright (c) 2014 Travis Geiselbrecht
3 //
4 // Use of this source code is governed by a MIT-style
5 // license that can be found in the LICENSE file or at
6 // https://opensource.org/licenses/MIT
7 
8 #include <vm/pmm.h>
9 
10 #include <assert.h>
11 #include <err.h>
12 #include <inttypes.h>
13 #include <kernel/mp.h>
14 #include <kernel/timer.h>
15 #include <lib/console.h>
16 #include <lk/init.h>
17 #include <new>
18 #include <platform.h>
19 #include <pow2.h>
20 #include <stdlib.h>
21 #include <string.h>
22 #include <trace.h>
23 #include <vm/bootalloc.h>
24 #include <vm/physmap.h>
25 #include <vm/vm.h>
26 
27 #include "pmm_arena.h"
28 #include "pmm_node.h"
29 #include "vm_priv.h"
30 
31 #include <fbl/auto_lock.h>
32 #include <fbl/intrusive_double_list.h>
33 #include <fbl/mutex.h>
34 #include <zircon/thread_annotations.h>
35 #include <zircon/time.h>
36 #include <zircon/types.h>
37 
38 #define LOCAL_TRACE MAX(VM_GLOBAL_TRACE, 0)
39 
40 // The (currently) one and only pmm node
41 static PmmNode pmm_node;
42 
43 #if PMM_ENABLE_FREE_FILL
pmm_enforce_fill(uint level)44 static void pmm_enforce_fill(uint level) {
45     pmm_node.EnforceFill();
46 }
47 LK_INIT_HOOK(pmm_fill, &pmm_enforce_fill, LK_INIT_LEVEL_VM);
48 #endif
49 
paddr_to_vm_page(paddr_t addr)50 vm_page_t* paddr_to_vm_page(paddr_t addr) {
51     return pmm_node.PaddrToPage(addr);
52 }
53 
pmm_add_arena(const pmm_arena_info_t * info)54 zx_status_t pmm_add_arena(const pmm_arena_info_t* info) {
55     return pmm_node.AddArena(info);
56 }
57 
pmm_alloc_page(uint alloc_flags,paddr_t * pa)58 zx_status_t pmm_alloc_page(uint alloc_flags, paddr_t* pa) {
59     return pmm_node.AllocPage(alloc_flags, nullptr, pa);
60 }
61 
pmm_alloc_page(uint alloc_flags,vm_page_t ** page)62 zx_status_t pmm_alloc_page(uint alloc_flags, vm_page_t** page) {
63     return pmm_node.AllocPage(alloc_flags, page, nullptr);
64 }
65 
pmm_alloc_page(uint alloc_flags,vm_page_t ** page,paddr_t * pa)66 zx_status_t pmm_alloc_page(uint alloc_flags, vm_page_t** page, paddr_t* pa) {
67     return pmm_node.AllocPage(alloc_flags, page, pa);
68 }
69 
pmm_alloc_pages(size_t count,uint alloc_flags,list_node * list)70 zx_status_t pmm_alloc_pages(size_t count, uint alloc_flags, list_node* list) {
71     return pmm_node.AllocPages(count, alloc_flags, list);
72 }
73 
pmm_alloc_range(paddr_t address,size_t count,list_node * list)74 zx_status_t pmm_alloc_range(paddr_t address, size_t count, list_node* list) {
75     return pmm_node.AllocRange(address, count, list);
76 }
77 
pmm_alloc_contiguous(size_t count,uint alloc_flags,uint8_t alignment_log2,paddr_t * pa,list_node * list)78 zx_status_t pmm_alloc_contiguous(size_t count, uint alloc_flags, uint8_t alignment_log2, paddr_t* pa,
79                                  list_node* list) {
80     // if we're called with a single page, just fall through to the regular allocation routine
81     if (unlikely(count == 1 && alignment_log2 <= PAGE_SIZE_SHIFT)) {
82         vm_page_t* page;
83         zx_status_t status = pmm_node.AllocPage(alloc_flags, &page, pa);
84         if (status != ZX_OK) {
85             return status;
86         }
87         list_add_tail(list, &page->queue_node);
88         return ZX_OK;
89     }
90 
91     return pmm_node.AllocContiguous(count, alloc_flags, alignment_log2, pa, list);
92 }
93 
pmm_free(list_node * list)94 void pmm_free(list_node* list) {
95     pmm_node.FreeList(list);
96 }
97 
pmm_free_page(vm_page * page)98 void pmm_free_page(vm_page* page) {
99     pmm_node.FreePage(page);
100 }
101 
pmm_count_free_pages()102 uint64_t pmm_count_free_pages() {
103     return pmm_node.CountFreePages();
104 }
105 
pmm_count_total_bytes()106 uint64_t pmm_count_total_bytes() {
107     return pmm_node.CountTotalBytes();
108 }
109 
pmm_count_total_states(size_t state_count[VM_PAGE_STATE_COUNT_])110 void pmm_count_total_states(size_t state_count[VM_PAGE_STATE_COUNT_]) {
111     pmm_node.CountTotalStates(state_count);
112 }
113 
pmm_dump_timer(struct timer * t,zx_time_t now,void *)114 static void pmm_dump_timer(struct timer* t, zx_time_t now, void*) {
115     zx_time_t deadline = zx_time_add_duration(now, ZX_SEC(1));
116     timer_set_oneshot(t, deadline, &pmm_dump_timer, nullptr);
117     pmm_node.DumpFree();
118 }
119 
cmd_pmm(int argc,const cmd_args * argv,uint32_t flags)120 static int cmd_pmm(int argc, const cmd_args* argv, uint32_t flags) {
121     bool is_panic = flags & CMD_FLAG_PANIC;
122 
123     if (argc < 2) {
124         printf("not enough arguments\n");
125     usage:
126         printf("usage:\n");
127         printf("%s dump\n", argv[0].str);
128         if (!is_panic) {
129             printf("%s free\n", argv[0].str);
130         }
131         return ZX_ERR_INTERNAL;
132     }
133 
134     if (!strcmp(argv[1].str, "dump")) {
135         pmm_node.Dump(is_panic);
136     } else if (is_panic) {
137         // No other operations will work during a panic.
138         printf("Only the \"arenas\" command is available during a panic.\n");
139         goto usage;
140     } else if (!strcmp(argv[1].str, "free")) {
141         static bool show_mem = false;
142         static timer_t timer;
143 
144         if (!show_mem) {
145             printf("pmm free: issue the same command to stop.\n");
146             timer_init(&timer);
147             zx_time_t deadline = zx_time_add_duration(current_time(), ZX_SEC(1));
148             const TimerSlack slack{ZX_MSEC(20), TIMER_SLACK_CENTER};
149             timer_set(&timer, deadline, slack, &pmm_dump_timer, nullptr);
150             show_mem = true;
151         } else {
152             timer_cancel(&timer);
153             show_mem = false;
154         }
155     } else {
156         printf("unknown command\n");
157         goto usage;
158     }
159 
160     return ZX_OK;
161 }
162 
163 STATIC_COMMAND_START
164 #if LK_DEBUGLEVEL > 0
165 STATIC_COMMAND_MASKED("pmm", "physical memory manager", &cmd_pmm, CMD_AVAIL_ALWAYS)
166 #endif
167 STATIC_COMMAND_END(pmm);
168