1  /*
2   * Copyright (c) 2008-2009,2012-2015 Travis Geiselbrecht
3   * Copyright (c) 2009 Corey Tabaka
4   *
5   * Use of this source code is governed by a MIT-style
6   * license that can be found in the LICENSE file or at
7   * https://opensource.org/licenses/MIT
8   */
9  #include <lk/debug.h>
10  #include <lk/trace.h>
11  #include <assert.h>
12  #include <lk/err.h>
13  #include <lk/list.h>
14  #include <rand.h>
15  #include <stdio.h>
16  #include <stdlib.h>
17  #include <string.h>
18  #include <kernel/mutex.h>
19  #include <lib/miniheap.h>
20  #include <lib/heap.h>
21  #include <lib/page_alloc.h>
22  
23  #define LOCAL_TRACE 0
24  
25  #define DEBUG_HEAP 0
26  #define ALLOC_FILL 0x99
27  #define FREE_FILL 0x77
28  #define PADDING_FILL 0x55
29  #define PADDING_SIZE 64
30  
31  // whether or not the heap will try to trim itself every time a free happens
32  #ifndef MINIHEAP_AUTOTRIM
33  #define MINIHEAP_AUTOTRIM 0
34  #endif
35  
36  #define HEAP_MAGIC (0x48454150)  // 'HEAP'
37  
38  struct free_heap_chunk {
39      struct list_node node;
40      size_t len;
41  };
42  
43  struct heap {
44      void *base;
45      size_t len;
46      size_t remaining;
47      size_t low_watermark;
48      mutex_t lock;
49      struct list_node free_list;
50  };
51  
52  // heap static vars
53  static struct heap theheap;
54  
55  // structure placed at the beginning every allocation
56  struct alloc_struct_begin {
57  #if LK_DEBUGLEVEL > 1
58      unsigned int magic;
59  #endif
60      void *ptr;
61      size_t size;
62  #if DEBUG_HEAP
63      void *padding_start;
64      size_t padding_size;
65  #endif
66  };
67  
68  static ssize_t heap_grow(size_t len);
69  
dump_free_chunk(struct free_heap_chunk * chunk)70  static void dump_free_chunk(struct free_heap_chunk *chunk) {
71      dprintf(INFO, "\t\tbase %p, end 0x%lx, len 0x%zx\n", chunk, (vaddr_t)chunk + chunk->len, chunk->len);
72  }
73  
miniheap_dump(void)74  void miniheap_dump(void) {
75      dprintf(INFO, "Heap dump (using miniheap):\n");
76      dprintf(INFO, "\tbase %p, len 0x%zx\n", theheap.base, theheap.len);
77      dprintf(INFO, "\tfree list:\n");
78  
79      mutex_acquire(&theheap.lock);
80  
81      struct free_heap_chunk *chunk;
82      list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) {
83          dump_free_chunk(chunk);
84      }
85      mutex_release(&theheap.lock);
86  
87  }
88  
89  // try to insert this free chunk into the free list, consuming the chunk by merging it with
90  // nearby ones if possible. Returns base of whatever chunk it became in the list.
heap_insert_free_chunk(struct free_heap_chunk * chunk)91  static struct free_heap_chunk *heap_insert_free_chunk(struct free_heap_chunk *chunk) {
92      vaddr_t chunk_end = (vaddr_t)chunk + chunk->len;
93  
94      LTRACEF("chunk ptr %p, size 0x%zx\n", chunk, chunk->len);
95  
96      struct free_heap_chunk *next_chunk;
97      struct free_heap_chunk *last_chunk;
98  
99      mutex_acquire(&theheap.lock);
100  
101      theheap.remaining += chunk->len;
102  
103      // walk through the list, finding the node to insert before
104      list_for_every_entry(&theheap.free_list, next_chunk, struct free_heap_chunk, node) {
105          if (chunk < next_chunk) {
106              DEBUG_ASSERT(chunk_end <= (vaddr_t)next_chunk);
107  
108              list_add_before(&next_chunk->node, &chunk->node);
109  
110              goto try_merge;
111          }
112      }
113  
114      // walked off the end of the list, add it at the tail
115      list_add_tail(&theheap.free_list, &chunk->node);
116  
117      // try to merge with the previous chunk
118  try_merge:
119      last_chunk = list_prev_type(&theheap.free_list, &chunk->node, struct free_heap_chunk, node);
120      if (last_chunk) {
121          if ((vaddr_t)last_chunk + last_chunk->len == (vaddr_t)chunk) {
122              // easy, just extend the previous chunk
123              last_chunk->len += chunk->len;
124  
125              // remove ourself from the list
126              list_delete(&chunk->node);
127  
128              // set the chunk pointer to the newly extended chunk, in case
129              // it needs to merge with the next chunk below
130              chunk = last_chunk;
131          }
132      }
133  
134      // try to merge with the next chunk
135      if (next_chunk) {
136          if ((vaddr_t)chunk + chunk->len == (vaddr_t)next_chunk) {
137              // extend our chunk
138              chunk->len += next_chunk->len;
139  
140              // remove them from the list
141              list_delete(&next_chunk->node);
142          }
143      }
144  
145      mutex_release(&theheap.lock);
146  
147      return chunk;
148  }
149  
heap_create_free_chunk(void * ptr,size_t len,bool allow_debug)150  static struct free_heap_chunk *heap_create_free_chunk(void *ptr, size_t len, bool allow_debug) {
151      DEBUG_ASSERT((len % sizeof(void *)) == 0); // size must be aligned on pointer boundary
152      DEBUG_ASSERT(len > sizeof(struct free_heap_chunk));
153  
154  #if DEBUG_HEAP
155      if (allow_debug)
156          memset(ptr, FREE_FILL, len);
157  #endif
158  
159      struct free_heap_chunk *chunk = (struct free_heap_chunk *)ptr;
160      chunk->len = len;
161  
162      return chunk;
163  }
164  
miniheap_alloc(size_t size,unsigned int alignment)165  void *miniheap_alloc(size_t size, unsigned int alignment) {
166      void *ptr;
167  #if DEBUG_HEAP
168      size_t original_size = size;
169  #endif
170  
171      LTRACEF("size %zd, align %d\n", size, alignment);
172  
173      // alignment must be power of 2
174      if (alignment & (alignment - 1))
175          return NULL;
176  
177      // we always put a size field + base pointer + magic in front of the allocation
178      size += sizeof(struct alloc_struct_begin);
179  #if DEBUG_HEAP
180      size += PADDING_SIZE;
181  #endif
182  
183      // make sure we allocate at least the size of a struct free_heap_chunk so that
184      // when we free it, we can create a struct free_heap_chunk struct and stick it
185      // in the spot
186      if (size < sizeof(struct free_heap_chunk))
187          size = sizeof(struct free_heap_chunk);
188  
189      // round up size to a multiple of native pointer size
190      size = ROUNDUP(size, sizeof(void *));
191  
192      // deal with nonzero alignments
193      if (alignment > 0) {
194          if (alignment < 16)
195              alignment = 16;
196  
197          // add alignment for worst case fit
198          size += alignment;
199      }
200  
201      int retry_count = 0;
202  retry:
203      mutex_acquire(&theheap.lock);
204  
205      // walk through the list
206      ptr = NULL;
207      struct free_heap_chunk *chunk;
208      list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) {
209          DEBUG_ASSERT((chunk->len % sizeof(void *)) == 0); // len should always be a multiple of pointer size
210  
211          // is it big enough to service our allocation?
212          if (chunk->len >= size) {
213              ptr = chunk;
214  
215              // remove it from the list
216              struct list_node *next_node = list_next(&theheap.free_list, &chunk->node);
217              list_delete(&chunk->node);
218  
219              if (chunk->len > size + sizeof(struct free_heap_chunk)) {
220                  // there's enough space in this chunk to create a new one after the allocation
221                  struct free_heap_chunk *newchunk = heap_create_free_chunk((uint8_t *)ptr + size, chunk->len - size, true);
222  
223                  // truncate this chunk
224                  chunk->len -= chunk->len - size;
225  
226                  // add the new one where chunk used to be
227                  if (next_node)
228                      list_add_before(next_node, &newchunk->node);
229                  else
230                      list_add_tail(&theheap.free_list, &newchunk->node);
231              }
232  
233              // the allocated size is actually the length of this chunk, not the size requested
234              DEBUG_ASSERT(chunk->len >= size);
235              size = chunk->len;
236  
237  #if DEBUG_HEAP
238              memset(ptr, ALLOC_FILL, size);
239  #endif
240  
241              ptr = (void *)((addr_t)ptr + sizeof(struct alloc_struct_begin));
242  
243              // align the output if requested
244              if (alignment > 0) {
245                  ptr = (void *)ROUNDUP((addr_t)ptr, (addr_t)alignment);
246              }
247  
248              struct alloc_struct_begin *as = (struct alloc_struct_begin *)ptr;
249              as--;
250  #if LK_DEBUGLEVEL > 1
251              as->magic = HEAP_MAGIC;
252  #endif
253              as->ptr = (void *)chunk;
254              as->size = size;
255              theheap.remaining -= size;
256  
257              if (theheap.remaining < theheap.low_watermark) {
258                  theheap.low_watermark = theheap.remaining;
259              }
260  #if DEBUG_HEAP
261              as->padding_start = ((uint8_t *)ptr + original_size);
262              as->padding_size = (((addr_t)chunk + size) - ((addr_t)ptr + original_size));
263  //          printf("padding start %p, size %u, chunk %p, size %u\n", as->padding_start, as->padding_size, chunk, size);
264  
265              memset(as->padding_start, PADDING_FILL, as->padding_size);
266  #endif
267  
268              break;
269          }
270      }
271  
272      mutex_release(&theheap.lock);
273  
274      /* try to grow the heap if we can */
275      if (ptr == NULL && retry_count == 0) {
276          ssize_t err = heap_grow(size);
277          if (err >= 0) {
278              retry_count++;
279              goto retry;
280          }
281      }
282  
283      LTRACEF("returning ptr %p\n", ptr);
284  
285      return ptr;
286  }
287  
miniheap_realloc(void * ptr,size_t size)288  void *miniheap_realloc(void *ptr, size_t size) {
289      /* slow implementation */
290      if (!ptr)
291          return miniheap_alloc(size, 0);
292      if (size == 0) {
293          miniheap_free(ptr);
294          return NULL;
295      }
296  
297      // XXX better implementation
298      void *p = miniheap_alloc(size, 0);
299      if (!p)
300          return NULL;
301  
302      memcpy(p, ptr, size); // XXX wrong
303      miniheap_free(ptr);
304  
305      return p;
306  }
307  
miniheap_free(void * ptr)308  void miniheap_free(void *ptr) {
309      if (!ptr)
310          return;
311  
312      LTRACEF("ptr %p\n", ptr);
313  
314      // check for the old allocation structure
315      struct alloc_struct_begin *as = (struct alloc_struct_begin *)ptr;
316      as--;
317  
318      DEBUG_ASSERT_COND(as->magic == HEAP_MAGIC);
319  
320  #if DEBUG_HEAP
321      {
322          uint i;
323          uint8_t *pad = (uint8_t *)as->padding_start;
324  
325          for (i = 0; i < as->padding_size; i++) {
326              if (pad[i] != PADDING_FILL) {
327                  printf("free at %p scribbled outside the lines:\n", ptr);
328                  hexdump(pad, as->padding_size);
329                  panic("die\n");
330              }
331          }
332      }
333  #endif
334  
335      LTRACEF("allocation was %zd bytes long at ptr %p\n", as->size, as->ptr);
336  
337      // looks good, create a free chunk and add it to the pool
338      heap_insert_free_chunk(heap_create_free_chunk(as->ptr, as->size, true));
339  
340  #if MINIHEAP_AUTOTRIM
341      miniheap_trim();
342  #endif
343  }
344  
miniheap_trim(void)345  void miniheap_trim(void) {
346      LTRACE_ENTRY;
347  
348      mutex_acquire(&theheap.lock);
349  
350      // walk through the list, finding free chunks that can be returned to the page allocator
351      struct free_heap_chunk *chunk;
352      struct free_heap_chunk *next_chunk;
353      list_for_every_entry_safe(&theheap.free_list, chunk, next_chunk, struct free_heap_chunk, node) {
354          LTRACEF("looking at chunk %p, len 0x%zx\n", chunk, chunk->len);
355  
356          uintptr_t start = (uintptr_t)chunk;
357          uintptr_t end = start + chunk->len;
358          DEBUG_ASSERT(end > start); // make sure it doesn't wrap the address space and has a positive len
359  
360          // compute the page aligned region in this free block (if any)
361          uintptr_t start_page = ROUNDUP(start, PAGE_SIZE);
362          uintptr_t end_page = ROUNDDOWN(end, PAGE_SIZE);
363          DEBUG_ASSERT(end_page <= end);
364          DEBUG_ASSERT(start_page >= start);
365  
366          LTRACEF("start page 0x%lx, end page 0x%lx\n", start_page, end_page);
367  
368  retry:
369          // see if the free block encompasses at least one page
370          if (unlikely(end_page > start_page)) {
371              LTRACEF("could trim: start 0x%lx, end 0x%lx\n", start_page, end_page);
372  
373              // cases where the start of the block is already page aligned
374              if (start_page == start) {
375                  // look for special case, we're going to completely remove the chunk
376                  if (end_page == end) {
377                      LTRACEF("special case, free chunk completely covers page(s)\n");
378                      list_delete(&chunk->node);
379                      goto free_chunk;
380                  }
381              } else {
382                  // start of block is not page aligned,
383                  // will there be enough space before the block if we trim?
384                  if (start_page - start < sizeof(struct free_heap_chunk)) {
385                      LTRACEF("not enough space for free chunk before\n");
386                      start_page += PAGE_SIZE;
387                      goto retry;
388                  }
389              }
390  
391              // do we need to split the free block and create a new block afterwards?
392              if (end_page < end) {
393                  size_t new_chunk_size = end - end_page;
394                  LTRACEF("will have to split, new chunk will be 0x%zx bytes long\n", new_chunk_size);
395  
396                  // if there's not enough space afterwards for a free chunk, we can't free the last page
397                  if (new_chunk_size < sizeof(struct free_heap_chunk)) {
398                      LTRACEF("not enough space for free chunk afterwards\n");
399                      end_page -= PAGE_SIZE;
400                      goto retry;
401                  }
402  
403                  // trim the new space off the end of the current chunk
404                  chunk->len -= new_chunk_size;
405                  end = end_page;
406  
407                  // create a new chunk after the one we're trimming
408                  struct free_heap_chunk *new_chunk = heap_create_free_chunk((void *)end_page, new_chunk_size, false);
409  
410                  // link it with the current block
411                  list_add_after(&chunk->node, &new_chunk->node);
412              }
413  
414              // check again to see if we are now completely covering a block
415              if (start_page == start && end_page == end) {
416                  LTRACEF("special case, after splitting off new chunk, free chunk completely covers page(s)\n");
417                  list_delete(&chunk->node);
418                  goto free_chunk;
419              }
420  
421              // trim the size of the block
422              chunk->len -= end_page - start_page;
423  
424  free_chunk:
425              // return it to the allocator
426              LTRACEF("returning %p size 0x%lx to the page allocator\n", (void *)start_page, end_page - start_page);
427              page_free((void *)start_page, (end_page - start_page) / PAGE_SIZE);
428  
429              // tweak accounting
430              theheap.remaining -= end_page - start_page;
431          }
432      }
433  
434      mutex_release(&theheap.lock);
435  }
436  
miniheap_get_stats(struct miniheap_stats * ptr)437  void miniheap_get_stats(struct miniheap_stats *ptr) {
438      struct free_heap_chunk *chunk;
439  
440      ptr->heap_start = theheap.base;
441      ptr->heap_len = theheap.len;
442      ptr->heap_free=0;
443      ptr->heap_max_chunk = 0;
444  
445      mutex_acquire(&theheap.lock);
446  
447      list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) {
448          ptr->heap_free += chunk->len;
449  
450          if (chunk->len > ptr->heap_max_chunk) {
451              ptr->heap_max_chunk = chunk->len;
452          }
453      }
454  
455      ptr->heap_low_watermark = theheap.low_watermark;
456  
457      mutex_release(&theheap.lock);
458  }
459  
heap_grow(size_t size)460  static ssize_t heap_grow(size_t size) {
461      size = ROUNDUP(size, PAGE_SIZE);
462      void *ptr = page_alloc(size / PAGE_SIZE, PAGE_ALLOC_ANY_ARENA);
463      if (!ptr) {
464          TRACEF("failed to grow kernel heap by 0x%zx bytes\n", size);
465          return ERR_NO_MEMORY;
466      }
467  
468      LTRACEF("growing heap by 0x%zx bytes, new ptr %p\n", size, ptr);
469  
470      heap_insert_free_chunk(heap_create_free_chunk(ptr, size, true));
471  
472      /* change the heap start and end variables */
473      if ((uintptr_t)ptr < (uintptr_t)theheap.base || theheap.base == 0)
474          theheap.base = ptr;
475  
476      uintptr_t endptr = (uintptr_t)ptr + size;
477      if (endptr > (uintptr_t)theheap.base + theheap.len) {
478          theheap.len = (uintptr_t)endptr - (uintptr_t)theheap.base;
479      }
480  
481      return size;
482  }
483  
miniheap_init(void * ptr,size_t len)484  void miniheap_init(void *ptr, size_t len) {
485      LTRACEF("ptr %p, len %zu\n", ptr, len);
486  
487      // create a mutex
488      mutex_init(&theheap.lock);
489  
490      // initialize the free list
491      list_initialize(&theheap.free_list);
492  
493      // align the buffer to a ptr
494      if (((uintptr_t)ptr % sizeof(uintptr_t)) > 0) {
495          uintptr_t aligned_ptr = ROUNDUP((uintptr_t)ptr, sizeof(uintptr_t));
496  
497          DEBUG_ASSERT((aligned_ptr - (uintptr_t)ptr) < len);
498  
499          len -= aligned_ptr - (uintptr_t)ptr;
500          ptr = (void *)aligned_ptr;
501  
502          LTRACEF("(aligned) ptr %p, len %zu\n", ptr, len);
503      }
504  
505      // set the heap range
506      theheap.base = ptr;
507      theheap.len = len;
508      theheap.remaining = 0; // will get set by heap_insert_free_chunk()
509      theheap.low_watermark = 0;
510  
511      // if passed a default range, use it
512      if (len > 0)
513          heap_insert_free_chunk(heap_create_free_chunk(ptr, len, true));
514  }
515  
516