1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4
5 // The following copyright has been added because the code in this file is
6 // based on the objmanager_simple allocator from OKL4 3.0
7
8 // Australian Public Licence B (OZPLB)
9 //
10 // Version 1-0
11 //
12 // Copyright (c) 2006-2010, Open Kernel Labs, Inc.
13 //
14 // All rights reserved.
15 //
16 // Developed by: Embedded, Real-time and Operating Systems Program (ERTOS)
17 // National ICT Australia
18 // http://www.ertos.nicta.com.au
19 //
20 // Permission is granted by Open Kernel Labs, Inc., free of charge, to
21 // any person obtaining a copy of this software and any associated
22 // documentation files (the "Software") to deal with the Software without
23 // restriction, including (without limitation) the rights to use, copy,
24 // modify, adapt, merge, publish, distribute, communicate to the public,
25 // sublicense, and/or sell, lend or rent out copies of the Software, and
26 // to permit persons to whom the Software is furnished to do so, subject
27 // to the following conditions:
28 //
29 // * Redistributions of source code must retain the above copyright
30 // notice, this list of conditions and the following disclaimers.
31 //
32 // * Redistributions in binary form must reproduce the above
33 // copyright notice, this list of conditions and the following
34 // disclaimers in the documentation and/or other materials provided
35 // with the distribution.
36 //
37 // * Neither the name of Open Kernel Labs, Inc., nor the names of its
38 // contributors, may be used to endorse or promote products derived
39 // from this Software without specific prior written permission.
40 //
41 // EXCEPT AS EXPRESSLY STATED IN THIS LICENCE AND TO THE FULL EXTENT
42 // PERMITTED BY APPLICABLE LAW, THE SOFTWARE IS PROVIDED "AS-IS", AND
43 // NATIONAL ICT AUSTRALIA AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS,
44 // WARRANTIES OR CONDITIONS OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
45 // BUT NOT LIMITED TO ANY REPRESENTATIONS, WARRANTIES OR CONDITIONS
46 // REGARDING THE CONTENTS OR ACCURACY OF THE SOFTWARE, OR OF TITLE,
47 // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT,
48 // THE ABSENCE OF LATENT OR OTHER DEFECTS, OR THE PRESENCE OR ABSENCE OF
49 // ERRORS, WHETHER OR NOT DISCOVERABLE.
50 //
51 // TO THE FULL EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL
52 // NATIONAL ICT AUSTRALIA OR ITS CONTRIBUTORS BE LIABLE ON ANY LEGAL
53 // THEORY (INCLUDING, WITHOUT LIMITATION, IN AN ACTION OF CONTRACT,
54 // NEGLIGENCE OR OTHERWISE) FOR ANY CLAIM, LOSS, DAMAGES OR OTHER
55 // LIABILITY, INCLUDING (WITHOUT LIMITATION) LOSS OF PRODUCTION OR
56 // OPERATION TIME, LOSS, DAMAGE OR CORRUPTION OF DATA OR RECORDS; OR LOSS
57 // OF ANTICIPATED SAVINGS, OPPORTUNITY, REVENUE, PROFIT OR GOODWILL, OR
58 // OTHER ECONOMIC LOSS; OR ANY SPECIAL, INCIDENTAL, INDIRECT,
59 // CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES, ARISING OUT OF OR IN
60 // CONNECTION WITH THIS LICENCE, THE SOFTWARE OR THE USE OF OR OTHER
61 // DEALINGS WITH THE SOFTWARE, EVEN IF NATIONAL ICT AUSTRALIA OR ITS
62 // CONTRIBUTORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH CLAIM, LOSS,
63 // DAMAGES OR OTHER LIABILITY.
64 //
65 // If applicable legislation implies representations, warranties, or
66 // conditions, or imposes obligations or liability on Open Kernel Labs, Inc.
67 // or one of its contributors in respect of the Software that
68 // cannot be wholly or partly excluded, restricted or modified, the
69 // liability of Open Kernel Labs, Inc. or the contributor is limited, to
70 // the full extent permitted by the applicable legislation, at its
71 // option, to:
72 // a. in the case of goods, any one or more of the following:
73 // i. the replacement of the goods or the supply of equivalent goods;
74 // ii. the repair of the goods;
75 // iii. the payment of the cost of replacing the goods or of acquiring
76 // equivalent goods;
77 // iv. the payment of the cost of having the goods repaired; or
78 // b. in the case of services:
79 // i. the supplying of the services again; or
80 // ii. the payment of the cost of having the services supplied again.
81 //
82 // The construction, validity and performance of this licence is governed
83 // by the laws in force in New South Wales, Australia.
84
85 #include <assert.h>
86 #include <hyptypes.h>
87 #include <string.h>
88
89 #include <allocator.h>
90 #include <attributes.h>
91 #include <spinlock.h>
92 #include <util.h>
93
94 #include "event_handlers.h"
95
96 // Maximum supported heap allocation size or alignment size. We filter out
97 // really large allocations so we can avoid having to think about corner-cases
98 // causing overflow.
99 #define MAX_ALLOC_SIZE (256UL * 1024UL * 1024UL)
100 #define MAX_ALIGNMENT_SIZE (16UL * 1024UL * 1024UL)
101
102 #define NODE_HEADER_SIZE (sizeof(allocator_node_t))
103
104 // Minimum allocation size from the heap.
105 #define HEAP_MIN_ALLOC NODE_HEADER_SIZE
106 #define HEAP_MIN_ALIGN NODE_HEADER_SIZE
107
108 // -------------- DEBUGGING --------------
109 #if defined(ALLOCATOR_DEBUG)
110 #define OVERFLOW_DEBUG
111 #define OVERFLOW_REDZONE_SIZE NODE_HEADER_SIZE
112 // #define DEBUG_PRINT
113 #endif
114 // ---------------------------------------
115
116 #if defined(ALLOCATOR_DEBUG)
117 #define CHECK_HEAP(x) check_heap_consistency(x)
118 #else
119 #define CHECK_HEAP(x)
120 #endif
121
122 #if defined(ALLOCATOR_DEBUG)
123 // Checking heap consistency:
124 // - Previous block should have virtual address before current block.
125 // - Blocks should not overlap, otherwise they should be merged.
126 // - Each block should be 8-byte aligned, and have a size of at least 8 bytes.
127 static void
check_heap_consistency(allocator_node_t * head)128 check_heap_consistency(allocator_node_t *head)
129 {
130 if (head != NULL) {
131 allocator_node_t *previous = head;
132 allocator_node_t *current = head->next;
133
134 while (current != NULL) {
135 #if defined(DEBUG_PRINT)
136 printf("[%p : %lx] -> ", current, current->size);
137 #endif
138
139 assert((uint64_t)previous < (uint64_t)current);
140 assert(((uint64_t)previous + previous->size) <=
141 (uint64_t)current);
142 assert(((uint64_t)current % NODE_HEADER_SIZE) == 0UL);
143 assert(current->size >= NODE_HEADER_SIZE);
144
145 previous = current;
146 current = current->next;
147 }
148 }
149 }
150 #endif
151
152 // Valid cases: 5 .------------.
153 // | 7 .------V
154 // 2 3 | | |
155 // .--. .---. 4 .-. 6 .---. V 8 .---. 9 .----.
156 // | | | | | | | | | | | |
157 // | V | V--------------+ V | V +------------------+ V | V
158 // | | | | | | | |
159 // X X | X X | | X
160 // +--------------+ +------------------+
161 // head last block
162 static error_t
list_add(allocator_node_t ** head,allocator_node_t * node,size_t size)163 list_add(allocator_node_t **head, allocator_node_t *node, size_t size)
164 {
165 error_t ret = ERROR_ALLOCATOR_RANGE_OVERLAPPING;
166
167 if (*head == NULL) {
168 // 1. Add head to empty list
169 *head = node;
170 (*head)->size = size;
171 (*head)->next = NULL;
172 } else if (((uint64_t)node + size) < (uint64_t)(*head)) {
173 // 2. Prepend to head if address range is before head
174 node->next = *head;
175 node->size = size;
176 *head = node;
177 } else if (((uint64_t)node + size) == (uint64_t)(*head)) {
178 // 3. Merge with head
179 node->size = size + (*head)->size;
180 node->next = (*head)->next;
181 *head = node;
182 } else {
183 allocator_node_t *previous = *head;
184 allocator_node_t *current = (*head)->next;
185
186 while ((current != NULL) &&
187 ((uint64_t)node >= (uint64_t)(current))) {
188 previous = current;
189 current = current->next;
190 }
191
192 if (current != NULL) {
193 if ((((uint64_t)previous + previous->size) ==
194 (uint64_t)node)) {
195 if ((node + size) < current) {
196 // 4. Merge with previous
197 previous->size += size;
198 } else if (((uint64_t)node + size) ==
199 (uint64_t)current) {
200 // 5. Merge with previous & current
201 previous->size += size + current->size;
202 previous->next = current->next;
203 } else {
204 goto out;
205 }
206 } else if (((uint64_t)previous + previous->size) <
207 (uint64_t)node) {
208 if (((uint64_t)node + size) <
209 (uint64_t)current) {
210 // 6. Add between previous & current
211 node->next = current;
212 node->size = size;
213 previous->next = node;
214 } else if ((uint64_t)(node + size) ==
215 (uint64_t)current) {
216 // 7. Merge with current
217 node->size = size + current->size;
218 node->next = current->next;
219 previous->next = node;
220 } else {
221 goto out;
222 }
223 } else {
224 goto out;
225 }
226 } else {
227 if (((uint64_t)previous + previous->size) ==
228 (uint64_t)node) {
229 // 8. Merge with previous
230 previous->size += size;
231 } else if (((uint64_t)previous + previous->size) <
232 (uint64_t)node) {
233 // 9. Append node to list
234 node->next = NULL;
235 node->size = size;
236 previous->next = node;
237 } else {
238 goto out;
239 }
240 }
241 }
242
243 ret = OK;
244
245 out:
246 #if defined(DEBUG_PRINT)
247 if (ret != OK) {
248 printf("ERROR: failed due to addresses overlapping\n");
249 }
250 #endif
251
252 return ret;
253 }
254
255 static error_t NOINLINE
allocator_heap_add_memory(allocator_t * allocator,uintptr_t addr,size_t size)256 allocator_heap_add_memory(allocator_t *allocator, uintptr_t addr, size_t size)
257 {
258 allocator_node_t *block;
259 error_t ret = OK;
260
261 assert(addr != 0U);
262
263 // Check input arguments
264 if (!util_is_baligned(addr, NODE_HEADER_SIZE)) {
265 uintptr_t new_addr = util_balign_up(addr, NODE_HEADER_SIZE);
266 size -= (new_addr - addr);
267 addr = new_addr;
268 }
269 if (!util_is_baligned(size, NODE_HEADER_SIZE)) {
270 size = util_balign_down(size, NODE_HEADER_SIZE);
271 }
272 if (util_add_overflows(addr, size)) {
273 ret = ERROR_ADDR_OVERFLOW;
274 } else if (size < (2UL * NODE_HEADER_SIZE)) {
275 ret = ERROR_ARGUMENT_SIZE;
276 } else {
277 // FIXME: Check if added memory is in kernel address space
278
279 block = (allocator_node_t *)addr;
280
281 // Add memory to the freelist
282 spinlock_acquire(&allocator->lock);
283
284 ret = list_add(&allocator->heap, block, size);
285 if (ret == OK) {
286 allocator->total_size += size;
287 }
288
289 spinlock_release(&allocator->lock);
290 }
291
292 return ret;
293 }
294
295 error_t
allocator_list_handle_allocator_add_ram_range(partition_t * owner,paddr_t phys_base,uintptr_t virt_base,size_t size)296 allocator_list_handle_allocator_add_ram_range(partition_t *owner,
297 paddr_t phys_base,
298 uintptr_t virt_base, size_t size)
299 {
300 assert(owner != NULL);
301
302 (void)phys_base;
303
304 return allocator_heap_add_memory(&owner->allocator, virt_base, size);
305 }
306
307 // Cases:
308 // 1 .-----------------------.
309 // | |
310 // | V
311 // 3 |-----. 4 .----. 2 .---.
312 // | | | | | |
313 // | V | V | V
314 // X X X
315 // +-----------------------+
316 // | current | X = aligned_alloc_start
317 // | node | V = aligned_alloc_end
318 // +-----------------------+
319 // ^ ^
320 // node_start node_end
321 static void_ptr_result_t
allocate_from_node(allocator_node_t ** head,allocator_node_t ** previous,allocator_node_t ** current,size_t alloc_size,size_t alloc_alignment)322 allocate_from_node(allocator_node_t **head, allocator_node_t **previous,
323 allocator_node_t **current, size_t alloc_size,
324 size_t alloc_alignment)
325 {
326 void_ptr_result_t ret;
327
328 assert(*current != NULL);
329 assert(util_is_p2(alloc_alignment));
330 assert(alloc_size >= NODE_HEADER_SIZE);
331 assert((alloc_size % NODE_HEADER_SIZE) == 0UL);
332
333 uint64_t node_start = (uint64_t)*current;
334 uint64_t node_end = (uint64_t)*current + (*current)->size;
335 #if defined(OVERFLOW_DEBUG)
336 node_start += OVERFLOW_REDZONE_SIZE;
337 #endif
338 uint64_t aligned_alloc_start =
339 util_balign_up(node_start, alloc_alignment);
340 #if defined(OVERFLOW_DEBUG)
341 node_start -= OVERFLOW_REDZONE_SIZE;
342 aligned_alloc_start -= OVERFLOW_REDZONE_SIZE;
343 #endif
344
345 uint64_t aligned_alloc_end = aligned_alloc_start + alloc_size;
346
347 if (util_add_overflows(aligned_alloc_start, alloc_size)) {
348 ret = void_ptr_result_error(ERROR_ADDR_OVERFLOW);
349 } else if ((aligned_alloc_end > node_end) ||
350 (aligned_alloc_start > node_end)) {
351 ret = void_ptr_result_error(ERROR_NOMEM);
352 } else {
353 if (node_end == aligned_alloc_end) {
354 if (node_start == aligned_alloc_start) {
355 // 1. Allocate from entire node. Remove it from
356 // list
357 if (*previous != NULL) {
358 (*previous)->next = (*current)->next;
359 } else {
360 *head = (*current)->next;
361 }
362 } else {
363 // 2. Allocate from end of node
364 (*current)->size -= alloc_size;
365 }
366 } else {
367 if (node_start == aligned_alloc_start) {
368 // 3. Allocate from start of node. Change start
369 // addr
370 allocator_node_t *next =
371 (allocator_node_t *)aligned_alloc_end;
372
373 next->next = (*current)->next;
374 next->size = (*current)->size - alloc_size;
375
376 if (*previous != NULL) {
377 (*previous)->next = next;
378 } else {
379 *head = next;
380 }
381 } else {
382 // 4. Allocate from middle of node. Create new
383 // node after allocated section
384 allocator_node_t *next =
385 (allocator_node_t *)aligned_alloc_end;
386
387 next->next = (*current)->next;
388 next->size = node_end - aligned_alloc_end;
389 (*current)->next = next;
390 (*current)->size =
391 aligned_alloc_start - node_start;
392 }
393 }
394
395 ret = void_ptr_result_ok((void *)aligned_alloc_start);
396 }
397
398 return ret;
399 }
400
401 static void_ptr_result_t
allocate_block(allocator_node_t ** head,size_t size,size_t alignment)402 allocate_block(allocator_node_t **head, size_t size, size_t alignment)
403 {
404 void_ptr_result_t ret;
405
406 assert(*head != NULL);
407 assert((size % NODE_HEADER_SIZE) == 0UL);
408 assert(size > 0UL);
409 assert(util_is_p2(alignment));
410 assert(alignment >= (size_t)sizeof(size_t));
411
412 allocator_node_t *previous = NULL;
413 allocator_node_t *current = *head;
414
415 #if defined(DEBUG_PRINT)
416 printf("%s: head %p: size %zu (0x%lx), alignment %zu\n", __func__,
417 *head, size, size, alignment);
418 #endif
419
420 while (current != NULL) {
421 void_ptr_result_t result = allocate_from_node(
422 head, &previous, ¤t, size, alignment);
423
424 if (result.e == OK) {
425 #if defined(DEBUG_PRINT)
426 printf(" -- allocated %p\n", result);
427 #endif
428 ret = result;
429 goto out;
430 }
431
432 previous = current;
433 current = current->next;
434 }
435
436 #if defined(DEBUG_PRINT)
437 printf(" -- out of memory\n");
438 #endif
439 ret = void_ptr_result_error(ERROR_NOMEM);
440
441 out:
442 return ret;
443 }
444
445 void_ptr_result_t
allocator_allocate_object(allocator_t * allocator,size_t size,size_t min_alignment)446 allocator_allocate_object(allocator_t *allocator, size_t size,
447 size_t min_alignment)
448 {
449 void_ptr_result_t ret;
450
451 size_t alignment = util_max(min_alignment, alignof(size_t));
452
453 spinlock_acquire(&allocator->lock);
454
455 if (allocator->heap == NULL) {
456 ret = void_ptr_result_error(ERROR_NOMEM);
457 goto error;
458 }
459
460 assert(size > 0UL);
461 assert(alignment > 0UL);
462 assert((alignment & (alignment - 1UL)) == 0UL);
463 assert((alignment >= (size_t)sizeof(size_t)));
464
465 #if defined(DEBUG_PRINT)
466 printf("%s:\nheap %p: size %zu, alignment %zu\n", __func__,
467 allocator->heap, size, alignment);
468 #endif
469 if ((size > MAX_ALLOC_SIZE) || (alignment > MAX_ALIGNMENT_SIZE)) {
470 ret = void_ptr_result_error(ERROR_ARGUMENT_INVALID);
471 goto error;
472 }
473
474 size = util_balign_up(size, HEAP_MIN_ALLOC);
475
476 if (alignment < HEAP_MIN_ALIGN) {
477 alignment = HEAP_MIN_ALIGN;
478 }
479
480 #if defined(DEBUG_PRINT)
481 printf("After alignment. size: %zu alignment: %zu\n", size, alignment);
482 #endif
483
484 #if defined(OVERFLOW_DEBUG)
485 size += 2UL * OVERFLOW_REDZONE_SIZE;
486 #endif
487
488 CHECK_HEAP(allocator->heap);
489 ret = allocate_block(&allocator->heap, size, alignment);
490 CHECK_HEAP(allocator->heap);
491
492 if (ret.e != OK) {
493 goto error;
494 }
495
496 allocator->alloc_size += size;
497
498 #if defined(ALLOCATOR_DEBUG)
499 char *data = (char *)ret.r;
500 size_t start = 0UL;
501 size_t end = size;
502
503 #if defined(OVERFLOW_DEBUG)
504 end = OVERFLOW_REDZONE_SIZE;
505 memset(&data[start], 0xe7, end - start);
506 start = end;
507 end = size - OVERFLOW_REDZONE_SIZE;
508 #endif
509 memset(&data[start], 0xa5, end - start);
510 #if defined(OVERFLOW_DEBUG)
511 start = end;
512 end = size;
513 memset(&data[start], 0xe8, end - start);
514
515 // Return address after the overflow check values
516 ret.r = (void *)&data[OVERFLOW_REDZONE_SIZE];
517 #endif
518 #endif
519
520 error:
521 spinlock_release(&allocator->lock);
522 return ret;
523 }
524
525 // We will probably not be using list_remove() and heap_remove memory()
526 // functions since we will only have the possibility of adding memory to the
527 // heap. We will maybe remove when deleting a partition.
528 static void
list_remove(allocator_node_t ** head,allocator_node_t * remove,allocator_node_t * previous)529 list_remove(allocator_node_t **head, allocator_node_t *remove,
530 allocator_node_t *previous)
531 {
532 if (previous == NULL) {
533 *head = remove->next;
534 } else {
535 previous->next = remove->next;
536 }
537 }
538
539 // TODO: Exported only for test code currently
540 error_t
541 allocator_heap_remove_memory(allocator_t *allocator, void *obj, size_t size);
542
543 // Returns -1 if addresses are still being used and therefore cannot be freed.
544 error_t NOINLINE
allocator_heap_remove_memory(allocator_t * allocator,void * obj,size_t size)545 allocator_heap_remove_memory(allocator_t *allocator, void *obj, size_t size)
546 {
547 error_t ret = ERROR_ALLOCATOR_MEM_INUSE;
548
549 assert(obj != NULL);
550 assert(allocator->heap != NULL);
551
552 allocator_node_t *previous = NULL;
553 allocator_node_t *current = allocator->heap;
554 uint64_t object_location;
555 uint64_t current_location;
556 uint64_t previous_location;
557 uint64_t aligned_alloc_end;
558
559 spinlock_acquire(&allocator->lock);
560
561 size = util_balign_up(size, HEAP_MIN_ALLOC);
562
563 while (((uint64_t)obj > (uint64_t)current) && (current != NULL)) {
564 assert((uint64_t)previous < (uint64_t)obj);
565 previous = current;
566 current = current->next;
567 }
568
569 object_location = (uint64_t)obj;
570 current_location = (uint64_t)current;
571 previous_location = (uint64_t)previous;
572
573 assert(!util_add_overflows(object_location, size));
574 aligned_alloc_end = object_location + size;
575
576 assert((object_location <= current_location) ||
577 (current_location == 0UL));
578 assert(object_location > previous_location);
579
580 if (current_location == object_location) {
581 if ((current_location + current->size) < aligned_alloc_end) {
582 goto out;
583 } else if ((current_location + current->size) ==
584 aligned_alloc_end) {
585 list_remove(&allocator->heap, current, previous);
586 } else {
587 // Divide current into 2 nodes and remove first one
588 allocator_node_t *new;
589
590 new = (allocator_node_t *)aligned_alloc_end;
591 new->next = current->next;
592 new->size = current->size - size;
593 current->next = new;
594 current->size = size;
595 list_remove(&allocator->heap, current, previous);
596 }
597 } else if (previous != NULL) {
598 if ((previous_location + previous->size) < aligned_alloc_end) {
599 goto out;
600 }
601
602 if ((previous_location + previous->size) == aligned_alloc_end) {
603 // Reduce size of previous
604 previous->size -= size;
605 } else {
606 // Divide previous into 3 nodes & remove middle one
607 allocator_node_t *new;
608
609 new = (allocator_node_t *)aligned_alloc_end;
610 new->next = current;
611 new->size = previous_location + previous->size -
612 aligned_alloc_end;
613
614 previous->next = new;
615 previous->size = object_location - previous_location;
616 }
617 } else {
618 goto out;
619 }
620
621 ret = OK;
622 allocator->total_size -= size;
623
624 out:
625 spinlock_release(&allocator->lock);
626 return ret;
627 }
628
629 static void
deallocate_block(allocator_node_t ** head,void * object,size_t size)630 deallocate_block(allocator_node_t **head, void *object, size_t size)
631 {
632 assert(object != NULL);
633 assert(size >= NODE_HEADER_SIZE);
634 assert((size % NODE_HEADER_SIZE) == 0UL);
635
636 allocator_node_t *previous = NULL;
637 allocator_node_t *next = NULL;
638 allocator_node_t *freed_node = NULL;
639
640 uint64_t object_location;
641 uint64_t next_location;
642 uint64_t previous_location;
643
644 if (*head == NULL) {
645 freed_node = object;
646 freed_node->size = size;
647 freed_node->next = NULL;
648 *head = freed_node;
649 goto out;
650 }
651
652 previous = *head;
653 next = (*head)->next;
654
655 while (((uint64_t)object > (uint64_t)next) && (next != NULL)) {
656 assert((uint64_t)previous < (uint64_t)next);
657 previous = next;
658 next = next->next;
659 }
660
661 object_location = (uint64_t)object;
662 next_location = (uint64_t)next;
663 if (previous != NULL) {
664 previous_location = (uint64_t)previous;
665 } else {
666 previous_location = (uint64_t)~0UL;
667 }
668
669 assert((object_location <= next_location) || (next_location == 0UL));
670
671 if ((previous != NULL) &&
672 (previous_location + previous->size) == object_location) {
673 // Combine the free memory into the previous node.
674 assert(!util_add_overflows(previous->size, size));
675 previous->size += size;
676 freed_node = previous;
677
678 // If necessary, connect the freed node with the next one.
679 if ((next != NULL) && (((uint64_t)freed_node +
680 freed_node->size) == next_location)) {
681 freed_node->size += next->size;
682 freed_node->next = next->next;
683 }
684 } else if ((previous == NULL) ||
685 (object_location < previous_location)) {
686 // Create node as head
687 freed_node = object;
688 freed_node->size = size;
689 freed_node->next = previous;
690 *head = freed_node;
691
692 // If necessary, connect the freed node with the next one.
693 if ((previous != NULL) &&
694 (((uint64_t)freed_node + freed_node->size) ==
695 previous_location)) {
696 freed_node->size += previous->size;
697 freed_node->next = previous->next;
698 }
699 } else {
700 assert(previous != NULL);
701
702 // Create a new header in the object.
703 freed_node = object;
704 freed_node->size = size;
705 freed_node->next = next;
706 previous->next = freed_node;
707
708 // If necessary, connect the freed node with the next one.
709 if ((next != NULL) && (((uint64_t)freed_node +
710 freed_node->size) == next_location)) {
711 freed_node->size += next->size;
712 freed_node->next = next->next;
713 }
714 }
715
716 out:
717 return;
718 }
719
720 error_t
allocator_deallocate_object(allocator_t * allocator,void * object,size_t size)721 allocator_deallocate_object(allocator_t *allocator, void *object, size_t size)
722 {
723 assert(object != NULL);
724 assert(size > 0UL);
725
726 spinlock_acquire(&allocator->lock);
727
728 #if defined(DEBUG_PRINT)
729 if (allocator->heap != NULL) {
730 printf("%s: heap %p: size %zu / --> %p\n", __func__,
731 allocator->heap, size, object);
732 }
733 #endif
734
735 size = util_balign_up(size, HEAP_MIN_ALLOC);
736
737 #if defined(OVERFLOW_DEBUG)
738 // Increment size +(2*NODE_HEADER_SIZE) to also free the overflow check
739 // values in NODE_HEADER_SIZE addresses before and after the object
740 // And go to the address where the overflow check values start
741 size += 2UL * OVERFLOW_REDZONE_SIZE;
742 object = (void *)((uintptr_t)object - OVERFLOW_REDZONE_SIZE);
743 #endif
744
745 #if defined(ALLOCATOR_DEBUG)
746 memset(object, 0xe3, size);
747 #endif
748
749 CHECK_HEAP(allocator->heap);
750 deallocate_block(&allocator->heap, object, size);
751 CHECK_HEAP(allocator->heap);
752
753 allocator->alloc_size -= size;
754
755 spinlock_release(&allocator->lock);
756
757 return OK;
758 }
759
760 error_t
allocator_init(allocator_t * allocator)761 allocator_init(allocator_t *allocator)
762 {
763 assert(allocator->heap == NULL);
764
765 allocator->total_size = 0UL;
766 allocator->alloc_size = 0UL;
767
768 spinlock_init(&allocator->lock);
769 return OK;
770 }
771