1 // Copyright 2016 The Fuchsia Authors
2 // Copyright (c) 2008-2015 Travis Geiselbrecht
3 //
4 // Use of this source code is governed by a MIT-style
5 // license that can be found in the LICENSE file or at
6 // https://opensource.org/licenses/MIT
7
8 #include <lib/heap.h>
9
10 #include <arch/ops.h>
11 #include <assert.h>
12 #include <debug.h>
13 #include <err.h>
14 #include <kernel/auto_lock.h>
15 #include <kernel/spinlock.h>
16 #include <lib/cmpctmalloc.h>
17 #include <lib/console.h>
18 #include <list.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <trace.h>
22 #include <vm/physmap.h>
23 #include <vm/pmm.h>
24 #include <vm/vm.h>
25
26 #define LOCAL_TRACE 0
27
28 #ifndef HEAP_PANIC_ON_ALLOC_FAIL
29 #if LK_DEBUGLEVEL > 2
30 #define HEAP_PANIC_ON_ALLOC_FAIL 1
31 #else
32 #define HEAP_PANIC_ON_ALLOC_FAIL 0
33 #endif
34 #endif
35
36 /* heap tracing */
37 #if LK_DEBUGLEVEL > 1
38 static bool heap_trace = false;
39 #else
40 #define heap_trace (false)
41 #endif
42
43 // keep a list of unique caller:size sites in a list
44 namespace {
45
46 struct alloc_stat {
47 list_node node;
48
49 void* caller;
50 size_t size;
51
52 uint64_t count;
53 };
54
55 const size_t num_stats = 1024;
56 size_t next_unused_stat = 0;
57 alloc_stat stats[num_stats];
58
59 list_node stat_list = LIST_INITIAL_VALUE(stat_list);
60 SpinLock stat_lock;
61
add_stat(void * caller,size_t size)62 void add_stat(void* caller, size_t size) {
63 if (!HEAP_COLLECT_STATS) {
64 return;
65 }
66
67 AutoSpinLock guard(&stat_lock);
68
69 // look for an existing stat, bump the count and move to head if found
70 alloc_stat* s;
71 list_for_every_entry (&stat_list, s, alloc_stat, node) {
72 if (s->caller == caller && s->size == size) {
73 s->count++;
74 list_delete(&s->node);
75 list_add_head(&stat_list, &s->node);
76 return;
77 }
78 }
79
80 // allocate a new one and add it to the list
81 if (unlikely(next_unused_stat >= num_stats))
82 return;
83
84 s = &stats[next_unused_stat++];
85 s->caller = caller;
86 s->size = size;
87 s->count = 1;
88 list_add_head(&stat_list, &s->node);
89 }
90
dump_stats()91 void dump_stats() {
92 if (!HEAP_COLLECT_STATS) {
93 return;
94 }
95
96 AutoSpinLock guard(&stat_lock);
97
98 // remove all of them from the list
99 alloc_stat* s;
100 while ((s = list_remove_head_type(&stat_list, alloc_stat, node)))
101 ;
102
103 // reinsert all of the entries, sorted by size
104 for (size_t i = 0; i < next_unused_stat; i++) {
105 bool added = false;
106 list_for_every_entry (&stat_list, s, alloc_stat, node) {
107 if (stats[i].size >= s->size) {
108 list_add_before(&s->node, &stats[i].node);
109 added = true;
110 break;
111 }
112 }
113 // fell off the end
114 if (!added) {
115 list_add_tail(&stat_list, &stats[i].node);
116 }
117 }
118
119 // dump the list of stats
120 list_for_every_entry (&stat_list, s, alloc_stat, node) {
121 printf("size %8zu count %8" PRIu64 " caller %p\n", s->size, s->count, s->caller);
122 }
123
124 if (next_unused_stat >= num_stats) {
125 printf("WARNING: max number of unique records hit, some statistics were likely lost\n");
126 }
127 }
128
129 } // namespace
130
heap_init()131 void heap_init() {
132 cmpct_init();
133 }
134
heap_trim()135 void heap_trim() {
136 cmpct_trim();
137 }
138
malloc(size_t size)139 void* malloc(size_t size) {
140 DEBUG_ASSERT(!arch_blocking_disallowed());
141
142 LTRACEF("size %zu\n", size);
143
144 add_stat(__GET_CALLER(), size);
145
146 void* ptr = cmpct_alloc(size);
147 if (unlikely(heap_trace)) {
148 printf("caller %p malloc %zu -> %p\n", __GET_CALLER(), size, ptr);
149 }
150
151 if (HEAP_PANIC_ON_ALLOC_FAIL && unlikely(!ptr)) {
152 panic("malloc of size %zu failed\n", size);
153 }
154
155 return ptr;
156 }
157
malloc_debug_caller_(size_t size,void * caller)158 void* malloc_debug_caller_(size_t size, void* caller) {
159 DEBUG_ASSERT(!arch_blocking_disallowed());
160
161 LTRACEF("size %zu\n", size);
162
163 add_stat(caller, size);
164
165 void* ptr = cmpct_alloc(size);
166 if (unlikely(heap_trace)) {
167 printf("caller %p malloc %zu -> %p\n", caller, size, ptr);
168 }
169
170 if (HEAP_PANIC_ON_ALLOC_FAIL && unlikely(!ptr)) {
171 panic("malloc of size %zu failed\n", size);
172 }
173
174 return ptr;
175 }
176
memalign(size_t boundary,size_t size)177 void* memalign(size_t boundary, size_t size) {
178 DEBUG_ASSERT(!arch_blocking_disallowed());
179
180 LTRACEF("boundary %zu, size %zu\n", boundary, size);
181
182 add_stat(__GET_CALLER(), size);
183
184 void* ptr = cmpct_memalign(size, boundary);
185 if (unlikely(heap_trace)) {
186 printf("caller %p memalign %zu, %zu -> %p\n", __GET_CALLER(), boundary, size, ptr);
187 }
188
189 if (HEAP_PANIC_ON_ALLOC_FAIL && unlikely(!ptr)) {
190 panic("memalign of size %zu align %zu failed\n", size, boundary);
191 }
192
193 return ptr;
194 }
195
calloc(size_t count,size_t size)196 void* calloc(size_t count, size_t size) {
197 DEBUG_ASSERT(!arch_blocking_disallowed());
198
199 LTRACEF("count %zu, size %zu\n", count, size);
200
201 add_stat(__GET_CALLER(), size);
202
203 size_t realsize = count * size;
204
205 void* ptr = cmpct_alloc(realsize);
206 if (likely(ptr)) {
207 memset(ptr, 0, realsize);
208 }
209 if (unlikely(heap_trace)) {
210 printf("caller %p calloc %zu, %zu -> %p\n", __GET_CALLER(), count, size, ptr);
211 }
212 return ptr;
213 }
214
realloc(void * ptr,size_t size)215 void* realloc(void* ptr, size_t size) {
216 DEBUG_ASSERT(!arch_blocking_disallowed());
217
218 LTRACEF("ptr %p, size %zu\n", ptr, size);
219
220 add_stat(__GET_CALLER(), size);
221
222 void* ptr2 = cmpct_realloc(ptr, size);
223 if (unlikely(heap_trace)) {
224 printf("caller %p realloc %p, %zu -> %p\n", __GET_CALLER(), ptr, size, ptr2);
225 }
226
227 if (HEAP_PANIC_ON_ALLOC_FAIL && unlikely(!ptr2)) {
228 panic("realloc of size %zu old ptr %p failed\n", size, ptr);
229 }
230
231 return ptr2;
232 }
233
free(void * ptr)234 void free(void* ptr) {
235 DEBUG_ASSERT(!arch_blocking_disallowed());
236
237 LTRACEF("ptr %p\n", ptr);
238 if (unlikely(heap_trace)) {
239 printf("caller %p free %p\n", __GET_CALLER(), ptr);
240 }
241
242 cmpct_free(ptr);
243 }
244
heap_dump(bool panic_time)245 static void heap_dump(bool panic_time) {
246 cmpct_dump(panic_time);
247 }
248
heap_get_info(size_t * size_bytes,size_t * free_bytes)249 void heap_get_info(size_t* size_bytes, size_t* free_bytes) {
250 cmpct_get_info(size_bytes, free_bytes);
251 }
252
heap_test()253 static void heap_test() {
254 cmpct_test();
255 }
256
heap_page_alloc(size_t pages)257 void* heap_page_alloc(size_t pages) {
258 DEBUG_ASSERT(pages > 0);
259
260 list_node list = LIST_INITIAL_VALUE(list);
261
262 paddr_t pa;
263 zx_status_t status = pmm_alloc_contiguous(pages, 0, PAGE_SIZE_SHIFT, &pa, &list);
264 if (status != ZX_OK) {
265 return nullptr;
266 }
267
268 // mark all of the allocated page as HEAP
269 vm_page_t *p, *temp;
270 list_for_every_entry_safe (&list, p, temp, vm_page_t, queue_node) {
271 list_delete(&p->queue_node);
272 p->state = VM_PAGE_STATE_HEAP;
273 }
274
275 LTRACEF("pages %zu: pa %#lx, va %p\n", pages, pa, paddr_to_physmap(pa));
276
277 return paddr_to_physmap(pa);
278 }
279
heap_page_free(void * _ptr,size_t pages)280 void heap_page_free(void* _ptr, size_t pages) {
281 DEBUG_ASSERT(IS_PAGE_ALIGNED((uintptr_t)_ptr));
282 DEBUG_ASSERT(pages > 0);
283
284 LTRACEF("ptr %p, pages %zu\n", _ptr, pages);
285
286 uint8_t* ptr = (uint8_t*)_ptr;
287
288 list_node list;
289 list_initialize(&list);
290
291 while (pages > 0) {
292 vm_page_t* p = paddr_to_vm_page(vaddr_to_paddr(ptr));
293 if (p) {
294 DEBUG_ASSERT(p->state == VM_PAGE_STATE_HEAP);
295 DEBUG_ASSERT(!list_in_list(&p->queue_node));
296
297 list_add_tail(&list, &p->queue_node);
298 }
299
300 ptr += PAGE_SIZE;
301 pages--;
302 }
303
304 pmm_free(&list);
305 }
306
307 #if LK_DEBUGLEVEL > 1
308
309 #include <lib/console.h>
310
311 static int cmd_heap(int argc, const cmd_args* argv, uint32_t flags);
312
313 STATIC_COMMAND_START
314 STATIC_COMMAND_MASKED("heap", "heap debug commands", &cmd_heap, CMD_AVAIL_ALWAYS)
315 STATIC_COMMAND_END(heap);
316
cmd_heap(int argc,const cmd_args * argv,uint32_t flags)317 static int cmd_heap(int argc, const cmd_args* argv, uint32_t flags) {
318 if (argc < 2) {
319 usage:
320 printf("usage:\n");
321 printf("\t%s info\n", argv[0].str);
322 if (HEAP_COLLECT_STATS) {
323 printf("\t%s stats\n", argv[0].str);
324 }
325 if (!(flags & CMD_FLAG_PANIC)) {
326 printf("\t%s trace\n", argv[0].str);
327 printf("\t%s trim\n", argv[0].str);
328 printf("\t%s test\n", argv[0].str);
329 }
330 return -1;
331 }
332
333 if (strcmp(argv[1].str, "info") == 0) {
334 heap_dump(flags & CMD_FLAG_PANIC);
335 } else if (HEAP_COLLECT_STATS && strcmp(argv[1].str, "stats") == 0) {
336 dump_stats();
337 } else if (!(flags & CMD_FLAG_PANIC) && strcmp(argv[1].str, "test") == 0) {
338 heap_test();
339 } else if (!(flags & CMD_FLAG_PANIC) && strcmp(argv[1].str, "trace") == 0) {
340 heap_trace = !heap_trace;
341 printf("heap trace is now %s\n", heap_trace ? "on" : "off");
342 } else if (!(flags & CMD_FLAG_PANIC) && strcmp(argv[1].str, "trim") == 0) {
343 heap_trim();
344 } else {
345 printf("unrecognized command\n");
346 goto usage;
347 }
348
349 return 0;
350 }
351
352 #endif
353