1 /*
2 * Copyright (c) 2015 Google, Inc. All rights reserved
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8 #include <lk/debug.h>
9 #include <lk/trace.h>
10 #include <assert.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <kernel/thread.h>
15 #include <kernel/mutex.h>
16 #include <kernel/spinlock.h>
17 #include <lib/cmpctmalloc.h>
18 #include <lib/heap.h>
19 #include <lib/page_alloc.h>
20
21 // Malloc implementation tuned for space.
22 //
23 // Allocation strategy takes place with a global mutex. Freelist entries are
24 // kept in linked lists with 8 different sizes per binary order of magnitude
25 // and the header size is two words with eager coalescing on free.
26
27 #ifdef DEBUG
28 #define CMPCT_DEBUG
29 #endif
30
31 #define LOCAL_TRACE 0
32
33 #define ALLOC_FILL 0x99
34 #define FREE_FILL 0x77
35 #define PADDING_FILL 0x55
36
37 #if WITH_KERNEL_VM && !defined(HEAP_GROW_SIZE)
38 #define HEAP_GROW_SIZE (1 * 1024 * 1024) /* Grow aggressively */
39 #elif !defined(HEAP_GROW_SIZE)
40 #define HEAP_GROW_SIZE (4 * 1024) /* Grow less aggressively */
41 #endif
42
43 STATIC_ASSERT(IS_PAGE_ALIGNED(HEAP_GROW_SIZE));
44
45 // Individual allocations above 4Mbytes are just fetched directly from the
46 // block allocator.
47 #define HEAP_ALLOC_VIRTUAL_BITS 22
48
49 // When we grow the heap we have to have somewhere in the freelist to put the
50 // resulting freelist entry, so the freelist has to have a certain number of
51 // buckets.
52 STATIC_ASSERT(HEAP_GROW_SIZE <= (1u << HEAP_ALLOC_VIRTUAL_BITS));
53
54 // Buckets for allocations. The smallest 15 buckets are 8, 16, 24, etc. up to
55 // 120 bytes. After that we round up to the nearest size that can be written
56 // /^0*1...0*$/, giving 8 buckets per order of binary magnitude. The freelist
57 // entries in a given bucket have at least the given size, plus the header
58 // size. On 64 bit, the 8 byte bucket is useless, since the freelist header
59 // is 16 bytes larger than the header, but we have it for simplicity.
60 #define NUMBER_OF_BUCKETS (1 + 15 + (HEAP_ALLOC_VIRTUAL_BITS - 7) * 8)
61
62 // All individual memory areas on the heap start with this.
63 typedef struct header_struct {
64 struct header_struct *left; // Pointer to the previous area in memory order.
65 size_t size;
66 } header_t;
67
68 typedef struct free_struct {
69 header_t header;
70 struct free_struct *next;
71 struct free_struct *prev;
72 } free_t;
73
74 struct heap {
75 size_t size;
76 size_t remaining;
77 mutex_t lock;
78 free_t *free_lists[NUMBER_OF_BUCKETS];
79 // We have some 32 bit words that tell us whether there is an entry in the
80 // freelist.
81 #define BUCKET_WORDS (((NUMBER_OF_BUCKETS) + 31) >> 5)
82 uint32_t free_list_bits[BUCKET_WORDS];
83 };
84
85 // Heap static vars.
86 static struct heap theheap;
87
88 static ssize_t heap_grow(size_t len, free_t **bucket);
89
lock(void)90 static void lock(void) {
91 mutex_acquire(&theheap.lock);
92 }
93
unlock(void)94 static void unlock(void) {
95 mutex_release(&theheap.lock);
96 }
97
dump_free(header_t * header)98 static void dump_free(header_t *header) {
99 dprintf(INFO, "\t\tbase %p, end 0x%lx, len 0x%zx\n", header, (vaddr_t)header + header->size, header->size);
100 }
101
cmpct_dump(void)102 void cmpct_dump(void) {
103 lock();
104 dprintf(INFO, "Heap dump (using cmpctmalloc):\n");
105 dprintf(INFO, "\tsize %lu, remaining %lu\n",
106 (unsigned long)theheap.size,
107 (unsigned long)theheap.remaining);
108
109 dprintf(INFO, "\tfree list:\n");
110 for (int i = 0; i < NUMBER_OF_BUCKETS; i++) {
111 bool header_printed = false;
112 free_t *free_area = theheap.free_lists[i];
113 for (; free_area != NULL; free_area = free_area->next) {
114 ASSERT(free_area != free_area->next);
115 if (!header_printed) {
116 dprintf(INFO, "\tbucket %d\n", i);
117 header_printed = true;
118 }
119 dump_free(&free_area->header);
120 }
121 }
122 unlock();
123 }
124
125 // Operates in sizes that don't include the allocation header.
size_to_index_helper(size_t size,size_t * rounded_up_out,int adjust,int increment)126 static int size_to_index_helper(
127 size_t size, size_t *rounded_up_out, int adjust, int increment) {
128 // First buckets are simply 8-spaced up to 128.
129 if (size <= 128) {
130 if (sizeof(size_t) == 8u && size <= sizeof(free_t) - sizeof(header_t)) {
131 *rounded_up_out = sizeof(free_t) - sizeof(header_t);
132 } else {
133 *rounded_up_out = size;
134 }
135 // No allocation is smaller than 8 bytes, so the first bucket is for 8
136 // byte spaces (not including the header). For 64 bit, the free list
137 // struct is 16 bytes larger than the header, so no allocation can be
138 // smaller than that (otherwise how to free it), but we have empty 8
139 // and 16 byte buckets for simplicity.
140 return (size >> 3) - 1;
141 }
142
143 // We are going to go up to the next size to round up, but if we hit a
144 // bucket size exactly we don't want to go up. By subtracting 8 here, we
145 // will do the right thing (the carry propagates up for the round numbers
146 // we are interested in).
147 size += adjust;
148 // After 128 the buckets are logarithmically spaced, every 16 up to 256,
149 // every 32 up to 512 etc. This can be thought of as rows of 8 buckets.
150 // GCC intrinsic count-leading-zeros.
151 // Eg. 128-255 has 24 leading zeros and we want row to be 4.
152 unsigned row = sizeof(size_t) * 8 - 4 - __builtin_clzl(size);
153 // For row 4 we want to shift down 4 bits.
154 unsigned column = (size >> row) & 7;
155 int row_column = (row << 3) | column;
156 row_column += increment;
157 size = (8 + (row_column & 7)) << (row_column >> 3);
158 *rounded_up_out = size;
159 // We start with 15 buckets, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96,
160 // 104, 112, 120. Then we have row 4, sizes 128 and up, with the
161 // row-column 8 and up.
162 int answer = row_column + 15 - 32;
163 DEBUG_ASSERT(answer < NUMBER_OF_BUCKETS);
164 return answer;
165 }
166
167 // Round up size to next bucket when allocating.
size_to_index_allocating(size_t size,size_t * rounded_up_out)168 static int size_to_index_allocating(size_t size, size_t *rounded_up_out) {
169 size_t rounded = ROUNDUP(size, 8);
170 return size_to_index_helper(rounded, rounded_up_out, -8, 1);
171 }
172
173 // Round down size to next bucket when freeing.
size_to_index_freeing(size_t size)174 static int size_to_index_freeing(size_t size) {
175 size_t dummy;
176 return size_to_index_helper(size, &dummy, 0, 0);
177 }
178
tag_as_free(void * left)179 static inline header_t *tag_as_free(void *left) {
180 return (header_t *)((uintptr_t)left | 1);
181 }
182
is_tagged_as_free(header_t * header)183 static inline bool is_tagged_as_free(header_t *header) {
184 return ((uintptr_t)(header->left) & 1) != 0;
185 }
186
untag(void * left)187 static inline header_t *untag(void *left) {
188 return (header_t *)((uintptr_t)left & ~1);
189 }
190
right_header(header_t * header)191 static inline header_t *right_header(header_t *header) {
192 return (header_t *)((char *)header + header->size);
193 }
194
set_free_list_bit(int index)195 static inline void set_free_list_bit(int index) {
196 theheap.free_list_bits[index >> 5] |= (1u << (31 - (index & 0x1f)));
197 }
198
clear_free_list_bit(int index)199 static inline void clear_free_list_bit(int index) {
200 theheap.free_list_bits[index >> 5] &= ~(1u << (31 - (index & 0x1f)));
201 }
202
find_nonempty_bucket(int index)203 static int find_nonempty_bucket(int index) {
204 uint32_t mask = (1u << (31 - (index & 0x1f))) - 1;
205 mask = mask * 2 + 1;
206 mask &= theheap.free_list_bits[index >> 5];
207 if (mask != 0) return (index & ~0x1f) + __builtin_clz(mask);
208 for (index = ROUNDUP(index + 1, 32); index <= NUMBER_OF_BUCKETS; index += 32) {
209 mask = theheap.free_list_bits[index >> 5];
210 if (mask != 0u) return index + __builtin_clz(mask);
211 }
212 return -1;
213 }
214
is_start_of_os_allocation(header_t * header)215 static bool is_start_of_os_allocation(header_t *header) {
216 return header->left == untag(NULL);
217 }
218
create_free_area(void * address,void * left,size_t size,free_t ** bucket)219 static void create_free_area(void *address, void *left, size_t size, free_t **bucket) {
220 free_t *free_area = (free_t *)address;
221 free_area->header.size = size;
222 free_area->header.left = tag_as_free(left);
223 if (bucket == NULL) {
224 int index = size_to_index_freeing(size - sizeof(header_t));
225 set_free_list_bit(index);
226 bucket = &theheap.free_lists[index];
227 }
228 free_t *old_head = *bucket;
229 if (old_head != NULL) old_head->prev = free_area;
230 free_area->next = old_head;
231 free_area->prev = NULL;
232 *bucket = free_area;
233 theheap.remaining += size;
234 #ifdef CMPCT_DEBUG
235 memset(free_area + 1, FREE_FILL, size - sizeof(free_t));
236 #endif
237 }
238
is_end_of_os_allocation(char * address)239 static bool is_end_of_os_allocation(char *address) {
240 return ((header_t *)address)->size == 0;
241 }
242
free_to_os(header_t * header,size_t size)243 static void free_to_os(header_t *header, size_t size) {
244 DEBUG_ASSERT(IS_PAGE_ALIGNED(size));
245 page_free(header, size >> PAGE_SIZE_SHIFT);
246 theheap.size -= size;
247 }
248
free_memory(void * address,void * left,size_t size)249 static void free_memory(void *address, void *left, size_t size) {
250 left = untag(left);
251 if (IS_PAGE_ALIGNED(left) &&
252 is_start_of_os_allocation(left) &&
253 is_end_of_os_allocation((char *)address + size)) {
254 free_to_os(left, size + ((header_t *)left)->size + sizeof(header_t));
255 } else {
256 create_free_area(address, left, size, NULL);
257 }
258 }
259
unlink_free(free_t * free_area,int bucket)260 static void unlink_free(free_t *free_area, int bucket) {
261 theheap.remaining -= free_area->header.size;
262 ASSERT(theheap.remaining < 4000000000u);
263 free_t *next = free_area->next;
264 free_t *prev = free_area->prev;
265 if (theheap.free_lists[bucket] == free_area) {
266 theheap.free_lists[bucket] = next;
267 if (next == NULL) clear_free_list_bit(bucket);
268 }
269 if (prev != NULL) prev->next = next;
270 if (next != NULL) next->prev = prev;
271 }
272
unlink_free_unknown_bucket(free_t * free_area)273 static void unlink_free_unknown_bucket(free_t *free_area) {
274 return unlink_free(free_area, size_to_index_freeing(free_area->header.size - sizeof(header_t)));
275 }
276
create_allocation_header(void * address,size_t offset,size_t size,void * left)277 static void *create_allocation_header(
278 void *address, size_t offset, size_t size, void *left) {
279 header_t *standalone = (header_t *)((char *)address + offset);
280 standalone->left = untag(left);
281 standalone->size = size;
282 return standalone + 1;
283 }
284
FixLeftPointer(header_t * right,header_t * new_left)285 static void FixLeftPointer(header_t *right, header_t *new_left) {
286 int tag = (uintptr_t)right->left & 1;
287 right->left = (header_t *)(((uintptr_t)new_left & ~1) | tag);
288 }
289
WasteFreeMemory(void)290 static void WasteFreeMemory(void) {
291 while (theheap.remaining != 0) cmpct_alloc(1);
292 }
293
294 // If we just make a big allocation it gets rounded off. If we actually
295 // want to use a reasonably accurate amount of memory for test purposes, we
296 // have to do many small allocations.
TestTrimHelper(ssize_t target)297 static void *TestTrimHelper(ssize_t target) {
298 char *answer = NULL;
299 size_t remaining = theheap.remaining;
300 while (theheap.remaining - target > 512) {
301 char *next_block = cmpct_alloc(8 + ((theheap.remaining - target) >> 2));
302 *(char **)next_block = answer;
303 answer = next_block;
304 if (theheap.remaining > remaining) return answer;
305 // Abandon attempt to hit particular freelist entry size if we accidentally got more memory
306 // from the OS.
307 remaining = theheap.remaining;
308 }
309 return answer;
310 }
311
TestTrimFreeHelper(char * block)312 static void TestTrimFreeHelper(char *block) {
313 while (block) {
314 char *next_block = *(char **)block;
315 cmpct_free(block);
316 block = next_block;
317 }
318 }
319
cmpct_test_trim(void)320 static void cmpct_test_trim(void) {
321 WasteFreeMemory();
322
323 size_t test_sizes[200];
324 int sizes = 0;
325
326 for (size_t s = 1; s < PAGE_SIZE * 4; s = (s + 1) * 1.1) {
327 test_sizes[sizes++] = s;
328 ASSERT(sizes < 200);
329 }
330 for (ssize_t s = -32; s <= 32; s += 8) {
331 test_sizes[sizes++] = PAGE_SIZE + s;
332 ASSERT(sizes < 200);
333 }
334
335 // Test allocations at the start of an OS allocation.
336 for (int with_second_alloc = 0; with_second_alloc < 2; with_second_alloc++) {
337 for (int i = 0; i < sizes; i++) {
338 size_t s = test_sizes[i];
339
340 char *a, *a2 = NULL;
341 a = cmpct_alloc(s);
342 if (with_second_alloc) {
343 a2 = cmpct_alloc(1);
344 if (s < PAGE_SIZE >> 1) {
345 // It is the intention of the test that a is at the start of an OS allocation
346 // and that a2 is "right after" it. Otherwise we are not testing what I
347 // thought. OS allocations are certainly not smaller than a page, so check in
348 // that case.
349 ASSERT((uintptr_t)(a2 - a) < s * 1.13 + 48);
350 }
351 }
352 cmpct_trim();
353 size_t remaining = theheap.remaining;
354 // We should have < 1 page on either side of the a allocation.
355 ASSERT(remaining < PAGE_SIZE * 2);
356 cmpct_free(a);
357 if (with_second_alloc) {
358 // Now only a2 is holding onto the OS allocation.
359 ASSERT(theheap.remaining > remaining);
360 } else {
361 ASSERT(theheap.remaining == 0);
362 }
363 remaining = theheap.remaining;
364 cmpct_trim();
365 ASSERT(theheap.remaining <= remaining);
366 // If a was at least one page then the trim should have freed up that page.
367 if (s >= PAGE_SIZE && with_second_alloc) ASSERT(theheap.remaining < remaining);
368 if (with_second_alloc) cmpct_free(a2);
369 }
370 ASSERT(theheap.remaining == 0);
371 }
372
373 ASSERT(theheap.remaining == 0);
374
375 // Now test allocations near the end of an OS allocation.
376 for (ssize_t wobble = -64; wobble <= 64; wobble += 8) {
377 for (int i = 0; i < sizes; i++) {
378 size_t s = test_sizes[i];
379
380 if ((ssize_t)s + wobble < 0) continue;
381
382 char *start_of_os_alloc = cmpct_alloc(1);
383
384 // If the OS allocations are very small this test does not make sense.
385 if (theheap.remaining <= s + wobble) {
386 cmpct_free(start_of_os_alloc);
387 continue;
388 }
389
390 char *big_bit_in_the_middle = TestTrimHelper(s + wobble);
391 size_t remaining = theheap.remaining;
392
393 // If the remaining is big we started a new OS allocation and the test
394 // makes no sense.
395 if (remaining > 128 + s * 1.13 + wobble) {
396 cmpct_free(start_of_os_alloc);
397 TestTrimFreeHelper(big_bit_in_the_middle);
398 continue;
399 }
400
401 cmpct_free(start_of_os_alloc);
402 remaining = theheap.remaining;
403
404 // This trim should sometimes trim a page off the end of the OS allocation.
405 cmpct_trim();
406 ASSERT(theheap.remaining <= remaining);
407 remaining = theheap.remaining;
408
409 // We should have < 1 page on either side of the big allocation.
410 ASSERT(remaining < PAGE_SIZE * 2);
411
412 TestTrimFreeHelper(big_bit_in_the_middle);
413 }
414 }
415 }
416
417
cmpct_test_buckets(void)418 static void cmpct_test_buckets(void) {
419 size_t rounded;
420 unsigned bucket;
421 // Check for the 8-spaced buckets up to 128.
422 for (unsigned i = 1; i <= 128; i++) {
423 // Round up when allocating.
424 bucket = size_to_index_allocating(i, &rounded);
425 unsigned expected = (ROUNDUP(i, 8) >> 3) - 1;
426 ASSERT(bucket == expected);
427 ASSERT(IS_ALIGNED(rounded, 8));
428 ASSERT(rounded >= i);
429 if (i >= sizeof(free_t) - sizeof(header_t)) {
430 // Once we get above the size of the free area struct (4 words), we
431 // won't round up much for these small size.
432 ASSERT(rounded - i < 8);
433 }
434 // Only rounded sizes are freed.
435 if ((i & 7) == 0) {
436 // Up to size 128 we have exact buckets for each multiple of 8.
437 ASSERT(bucket == (unsigned)size_to_index_freeing(i));
438 }
439 }
440 int bucket_base = 7;
441 for (unsigned j = 16; j < 1024; j *= 2, bucket_base += 8) {
442 // Note the "<=", which ensures that we test the powers of 2 twice to ensure
443 // that both ways of calculating the bucket number match.
444 for (unsigned i = j * 8; i <= j * 16; i++) {
445 // Round up to j multiple in this range when allocating.
446 bucket = size_to_index_allocating(i, &rounded);
447 unsigned expected = bucket_base + ROUNDUP(i, j) / j;
448 ASSERT(bucket == expected);
449 ASSERT(IS_ALIGNED(rounded, j));
450 ASSERT(rounded >= i);
451 ASSERT(rounded - i < j);
452 // Only 8-rounded sizes are freed or chopped off the end of a free area
453 // when allocating.
454 if ((i & 7) == 0) {
455 // When freeing, if we don't hit the size of the bucket precisely,
456 // we have to put the free space into a smaller bucket, because
457 // the buckets have entries that will always be big enough for
458 // the corresponding allocation size (so we don't have to
459 // traverse the free chains to find a big enough one).
460 if ((i % j) == 0) {
461 ASSERT((int)bucket == size_to_index_freeing(i));
462 } else {
463 ASSERT((int)bucket - 1 == size_to_index_freeing(i));
464 }
465 }
466 }
467 }
468 }
469
cmpct_test_get_back_newly_freed_helper(size_t size)470 static void cmpct_test_get_back_newly_freed_helper(size_t size) {
471 void *allocated = cmpct_alloc(size);
472 if (allocated == NULL) return;
473 char *allocated2 = cmpct_alloc(8);
474 char *expected_position = (char *)allocated + size;
475 if (allocated2 < expected_position || allocated2 > expected_position + 128) {
476 // If the allocated2 allocation is not in the same OS allocation as the
477 // first allocation then the test may not work as expected (the memory
478 // may be returned to the OS when we free the first allocation, and we
479 // might not get it back).
480 cmpct_free(allocated);
481 cmpct_free(allocated2);
482 return;
483 }
484
485 cmpct_free(allocated);
486 void *allocated3 = cmpct_alloc(size);
487 // To avoid churn and fragmentation we would want to get the newly freed
488 // memory back again when we allocate the same size shortly after.
489 ASSERT(allocated3 == allocated);
490 cmpct_free(allocated2);
491 cmpct_free(allocated3);
492 }
493
cmpct_test_get_back_newly_freed(void)494 static void cmpct_test_get_back_newly_freed(void) {
495 size_t increment = 16;
496 for (size_t i = 128; i <= 0x8000000; i *= 2, increment *= 2) {
497 for (size_t j = i; j < i * 2; j += increment) {
498 cmpct_test_get_back_newly_freed_helper(i - 8);
499 cmpct_test_get_back_newly_freed_helper(i);
500 cmpct_test_get_back_newly_freed_helper(i + 1);
501 }
502 }
503 for (size_t i = 1024; i <= 2048; i++) {
504 cmpct_test_get_back_newly_freed_helper(i);
505 }
506 }
507
cmpct_test_return_to_os(void)508 static void cmpct_test_return_to_os(void) {
509 cmpct_trim();
510 size_t remaining = theheap.remaining;
511 // This goes in a new OS allocation since the trim above removed any free
512 // area big enough to contain it.
513 void *a = cmpct_alloc(5000);
514 void *b = cmpct_alloc(2500);
515 cmpct_free(a);
516 cmpct_free(b);
517 // If things work as expected the new allocation is at the start of an OS
518 // allocation. There's just one sentinel and one header to the left of it.
519 // It that's not the case then the allocation was met from some space in
520 // the middle of an OS allocation, and our test won't work as expected, so
521 // bail out.
522 if (((uintptr_t)a & (PAGE_SIZE - 1)) != sizeof(header_t) * 2) return;
523 // No trim needed when the entire OS allocation is free.
524 ASSERT(remaining == theheap.remaining);
525 }
526
cmpct_test(void)527 void cmpct_test(void) {
528 cmpct_test_buckets();
529 cmpct_test_get_back_newly_freed();
530 cmpct_test_return_to_os();
531 cmpct_test_trim();
532 cmpct_dump();
533 void *ptr[16];
534
535 ptr[0] = cmpct_alloc(8);
536 ptr[1] = cmpct_alloc(32);
537 ptr[2] = cmpct_alloc(7);
538 cmpct_trim();
539 ptr[3] = cmpct_alloc(0);
540 ptr[4] = cmpct_alloc(98713);
541 ptr[5] = cmpct_alloc(16);
542
543 cmpct_free(ptr[5]);
544 cmpct_free(ptr[1]);
545 cmpct_free(ptr[3]);
546 cmpct_free(ptr[0]);
547 cmpct_free(ptr[4]);
548 cmpct_free(ptr[2]);
549
550 cmpct_dump();
551 cmpct_trim();
552 cmpct_dump();
553
554 int i;
555 for (i=0; i < 16; i++)
556 ptr[i] = 0;
557
558 for (i=0; i < 32768; i++) {
559 unsigned int index = (unsigned int)rand() % 16;
560
561 if ((i % (16*1024)) == 0)
562 printf("pass %d\n", i);
563
564 // printf("index 0x%x\n", index);
565 if (ptr[index]) {
566 // printf("freeing ptr[0x%x] = %p\n", index, ptr[index]);
567 cmpct_free(ptr[index]);
568 ptr[index] = 0;
569 }
570 unsigned int align = 1 << ((unsigned int)rand() % 8);
571 ptr[index] = cmpct_memalign((unsigned int)rand() % 32768, align);
572 // printf("ptr[0x%x] = %p, align 0x%x\n", index, ptr[index], align);
573
574 DEBUG_ASSERT(((addr_t)ptr[index] % align) == 0);
575 // cmpct_dump();
576 }
577
578 for (i=0; i < 16; i++) {
579 if (ptr[i])
580 cmpct_free(ptr[i]);
581 }
582
583 cmpct_dump();
584 }
585
large_alloc(size_t size)586 static void *large_alloc(size_t size) {
587 #ifdef CMPCT_DEBUG
588 size_t requested_size = size;
589 #endif
590 size = ROUNDUP(size, 8);
591 free_t *free_area = NULL;
592 lock();
593 if (heap_grow(size, &free_area) < 0) {
594 return 0;
595 }
596 void *result =
597 create_allocation_header(free_area, 0, free_area->header.size, free_area->header.left);
598 // Normally the 'remaining free space' counter would be decremented when we
599 // unlink the free area from its bucket. However in this case the free
600 // area was too big to go in any bucket and we had it in our own
601 // "free_area" variable so there is no unlinking and we have to adjust the
602 // counter here.
603 theheap.remaining -= free_area->header.size;
604 unlock();
605 #ifdef CMPCT_DEBUG
606 memset(result, ALLOC_FILL, requested_size);
607 memset((char *)result + requested_size, PADDING_FILL,
608 free_area->header.size - (requested_size + sizeof(header_t)));
609 #endif
610 return result;
611 }
612
cmpct_trim(void)613 void cmpct_trim(void) {
614 // Look at free list entries that are at least as large as one page plus a
615 // header. They might be at the start or the end of a block, so we can trim
616 // them and free the page(s).
617 lock();
618 for (int bucket = size_to_index_freeing(PAGE_SIZE);
619 bucket < NUMBER_OF_BUCKETS;
620 bucket++) {
621 free_t *next;
622 for (free_t *free_area = theheap.free_lists[bucket];
623 free_area != NULL;
624 free_area = next) {
625 DEBUG_ASSERT(free_area->header.size >= PAGE_SIZE + sizeof(header_t));
626 next = free_area->next;
627 header_t *right = right_header(&free_area->header);
628 if (is_end_of_os_allocation((char *)right)) {
629 char *old_os_allocation_end = (char *)ROUNDUP((uintptr_t)right, PAGE_SIZE);
630 // The page will end with a smaller free list entry and a header-sized sentinel.
631 char *new_os_allocation_end = (char *)
632 ROUNDUP((uintptr_t)free_area + sizeof(header_t) + sizeof(free_t), PAGE_SIZE);
633 size_t freed_up = old_os_allocation_end - new_os_allocation_end;
634 DEBUG_ASSERT(IS_PAGE_ALIGNED(freed_up));
635 // Rare, because we only look at large freelist entries, but unlucky rounding
636 // could mean we can't actually free anything here.
637 if (freed_up == 0) continue;
638 unlink_free(free_area, bucket);
639 size_t new_free_size = free_area->header.size - freed_up;
640 DEBUG_ASSERT(new_free_size >= sizeof(free_t));
641 // Right sentinel, not free, stops attempts to coalesce right.
642 create_allocation_header(free_area, new_free_size, 0, free_area);
643 // Also puts it in the correct bucket.
644 create_free_area(free_area, untag(free_area->header.left), new_free_size, NULL);
645 page_free(new_os_allocation_end, freed_up >> PAGE_SIZE_SHIFT);
646 theheap.size -= freed_up;
647 } else if (is_start_of_os_allocation(untag(free_area->header.left))) {
648 char *old_os_allocation_start =
649 (char *)ROUNDDOWN((uintptr_t)free_area, PAGE_SIZE);
650 // For the sentinel, we need at least one header-size of space between the page
651 // edge and the first allocation to the right of the free area.
652 char *new_os_allocation_start =
653 (char *)ROUNDDOWN((uintptr_t)(right - 1), PAGE_SIZE);
654 size_t freed_up = new_os_allocation_start - old_os_allocation_start;
655 DEBUG_ASSERT(IS_PAGE_ALIGNED(freed_up));
656 // This should not happen because we only look at the large free list buckets.
657 if (freed_up == 0) continue;
658 unlink_free(free_area, bucket);
659 size_t sentinel_size = sizeof(header_t);
660 size_t new_free_size = free_area->header.size - freed_up;
661 if (new_free_size < sizeof(free_t)) {
662 sentinel_size += new_free_size;
663 new_free_size = 0;
664 }
665 // Left sentinel, not free, stops attempts to coalesce left.
666 create_allocation_header(new_os_allocation_start, 0, sentinel_size, NULL);
667 if (new_free_size == 0) {
668 FixLeftPointer(right, (header_t *)new_os_allocation_start);
669 } else {
670 DEBUG_ASSERT(new_free_size >= sizeof(free_t));
671 char *new_free = new_os_allocation_start + sentinel_size;
672 // Also puts it in the correct bucket.
673 create_free_area(new_free, new_os_allocation_start, new_free_size, NULL);
674 FixLeftPointer(right, (header_t *)new_free);
675 }
676 page_free(old_os_allocation_start, freed_up >> PAGE_SIZE_SHIFT);
677 theheap.size -= freed_up;
678 }
679 }
680 }
681 unlock();
682 }
683
cmpct_alloc(size_t size)684 void *cmpct_alloc(size_t size) {
685 if (size == 0u) return NULL;
686
687 if (size + sizeof(header_t) > (1u << HEAP_ALLOC_VIRTUAL_BITS)) return large_alloc(size);
688
689 size_t rounded_up;
690 int start_bucket = size_to_index_allocating(size, &rounded_up);
691
692 rounded_up += sizeof(header_t);
693
694 lock();
695 int bucket = find_nonempty_bucket(start_bucket);
696 if (bucket == -1) {
697 // Grow heap by at least 12% if we can.
698 size_t growby = MIN(1u << HEAP_ALLOC_VIRTUAL_BITS,
699 MAX(theheap.size >> 3,
700 MAX(HEAP_GROW_SIZE, rounded_up)));
701 while (heap_grow(growby, NULL) < 0) {
702 if (growby <= rounded_up) {
703 unlock();
704 return NULL;
705 }
706 growby = MAX(growby >> 1, rounded_up);
707 }
708 bucket = find_nonempty_bucket(start_bucket);
709 }
710 free_t *head = theheap.free_lists[bucket];
711 size_t left_over = head->header.size - rounded_up;
712 // We can't carve off the rest for a new free space if it's smaller than the
713 // free-list linked structure. We also don't carve it off if it's less than
714 // 1.6% the size of the allocation. This is to avoid small long-lived
715 // allocations being placed right next to large allocations, hindering
716 // coalescing and returning pages to the OS.
717 if (left_over >= sizeof(free_t) && left_over > (size >> 6)) {
718 header_t *right = right_header(&head->header);
719 unlink_free(head, bucket);
720 void *free = (char *)head + rounded_up;
721 create_free_area(free, head, left_over, NULL);
722 FixLeftPointer(right, (header_t *)free);
723 head->header.size -= left_over;
724 } else {
725 unlink_free(head, bucket);
726 }
727 void *result =
728 create_allocation_header(head, 0, head->header.size, head->header.left);
729 #ifdef CMPCT_DEBUG
730 memset(result, ALLOC_FILL, size);
731 memset(((char *)result) + size, PADDING_FILL, rounded_up - size - sizeof(header_t));
732 #endif
733 unlock();
734 return result;
735 }
736
cmpct_memalign(size_t size,size_t alignment)737 void *cmpct_memalign(size_t size, size_t alignment) {
738 if (alignment < 8) return cmpct_alloc(size);
739 size_t padded_size =
740 size + alignment + sizeof(free_t) + sizeof(header_t);
741 char *unaligned = (char *)cmpct_alloc(padded_size);
742 lock();
743 size_t mask = alignment - 1;
744 uintptr_t payload_int = (uintptr_t)unaligned + sizeof(free_t) +
745 sizeof(header_t) + mask;
746 char *payload = (char *)(payload_int & ~mask);
747 if (unaligned != payload) {
748 header_t *unaligned_header = (header_t *)unaligned - 1;
749 header_t *header = (header_t *)payload - 1;
750 size_t left_over = payload - unaligned;
751 create_allocation_header(
752 header, 0, unaligned_header->size - left_over, unaligned_header);
753 header_t *right = right_header(unaligned_header);
754 unaligned_header->size = left_over;
755 FixLeftPointer(right, header);
756 unlock();
757 cmpct_free(unaligned);
758 } else {
759 unlock();
760 }
761 // TODO: Free the part after the aligned allocation.
762 return payload;
763 }
764
cmpct_free(void * payload)765 void cmpct_free(void *payload) {
766 if (payload == NULL) return;
767 header_t *header = (header_t *)payload - 1;
768 DEBUG_ASSERT(!is_tagged_as_free(header)); // Double free!
769 size_t size = header->size;
770 lock();
771 header_t *left = header->left;
772 if (left != NULL && is_tagged_as_free(left)) {
773 // Coalesce with left free object.
774 unlink_free_unknown_bucket((free_t *)left);
775 header_t *right = right_header(header);
776 if (is_tagged_as_free(right)) {
777 // Coalesce both sides.
778 unlink_free_unknown_bucket((free_t *)right);
779 header_t *right_right = right_header(right);
780 FixLeftPointer(right_right, left);
781 free_memory(left, left->left, left->size + size + right->size);
782 } else {
783 // Coalesce only left.
784 FixLeftPointer(right, left);
785 free_memory(left, left->left, left->size + size);
786 }
787 } else {
788 header_t *right = right_header(header);
789 if (is_tagged_as_free(right)) {
790 // Coalesce only right.
791 header_t *right_right = right_header(right);
792 unlink_free_unknown_bucket((free_t *)right);
793 FixLeftPointer(right_right, header);
794 free_memory(header, left, size + right->size);
795 } else {
796 free_memory(header, left, size);
797 }
798 }
799 unlock();
800 }
801
cmpct_realloc(void * payload,size_t size)802 void *cmpct_realloc(void *payload, size_t size) {
803 if (payload == NULL) return cmpct_alloc(size);
804 header_t *header = (header_t *)payload - 1;
805 size_t old_size = header->size - sizeof(header_t);
806 void *new_payload = cmpct_alloc(size);
807 memcpy(new_payload, payload, MIN(size, old_size));
808 cmpct_free(payload);
809 return new_payload;
810 }
811
add_to_heap(void * new_area,size_t size,free_t ** bucket)812 static void add_to_heap(void *new_area, size_t size, free_t **bucket) {
813 void *top = (char *)new_area + size;
814 header_t *left_sentinel = (header_t *)new_area;
815 // Not free, stops attempts to coalesce left.
816 create_allocation_header(left_sentinel, 0, sizeof(header_t), NULL);
817 header_t *new_header = left_sentinel + 1;
818 size_t free_size = size - 2 * sizeof(header_t);
819 create_free_area(new_header, left_sentinel, free_size, bucket);
820 header_t *right_sentinel = (header_t *)(top - sizeof(header_t));
821 // Not free, stops attempts to coalesce right.
822 create_allocation_header(right_sentinel, 0, 0, new_header);
823 }
824
825 // Create a new free-list entry of at least size bytes (including the
826 // allocation header). Called with the lock, apart from during init.
heap_grow(size_t size,free_t ** bucket)827 static ssize_t heap_grow(size_t size, free_t **bucket) {
828 // The new free list entry will have a header on each side (the
829 // sentinels) so we need to grow the gross heap size by this much more.
830 size += 2 * sizeof(header_t);
831 size = ROUNDUP(size, PAGE_SIZE);
832 void *ptr = page_alloc(size >> PAGE_SIZE_SHIFT, PAGE_ALLOC_ANY_ARENA);
833 if (ptr == NULL) return -1;
834 theheap.size += size;
835 LTRACEF("growing heap by 0x%zx bytes, new ptr %p\n", size, ptr);
836 add_to_heap(ptr, size, bucket);
837 return size;
838 }
839
cmpct_init(void)840 void cmpct_init(void) {
841 LTRACE_ENTRY;
842
843 // Create a mutex.
844 mutex_init(&theheap.lock);
845
846 // Initialize the free list.
847 for (int i = 0; i < NUMBER_OF_BUCKETS; i++) {
848 theheap.free_lists[i] = NULL;
849 }
850 for (int i = 0; i < BUCKET_WORDS; i++) {
851 theheap.free_list_bits[i] = 0;
852 }
853
854 size_t initial_alloc = HEAP_GROW_SIZE - 2 * sizeof(header_t);
855
856 theheap.remaining = 0;
857
858 heap_grow(initial_alloc, NULL);
859 }
860