1 /*
2 * Copyright (c) 2017 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <string.h>
9 #include <zephyr/sys/math_extras.h>
10 #include <zephyr/sys/util.h>
11
12 typedef void * (sys_heap_allocator_t)(struct sys_heap *heap, size_t align, size_t bytes);
13
z_alloc_helper(struct k_heap * heap,size_t align,size_t size,sys_heap_allocator_t sys_heap_allocator)14 static void *z_alloc_helper(struct k_heap *heap, size_t align, size_t size,
15 sys_heap_allocator_t sys_heap_allocator)
16 {
17 void *mem;
18 struct k_heap **heap_ref;
19 size_t __align;
20 k_spinlock_key_t key;
21
22 /* A power of 2 as well as 0 is OK */
23 __ASSERT((align & (align - 1)) == 0,
24 "align must be a power of 2");
25
26 /*
27 * Adjust the size to make room for our heap reference.
28 * Merge a rewind bit with align value (see sys_heap_aligned_alloc()).
29 * This allows for storing the heap pointer right below the aligned
30 * boundary without wasting any memory.
31 */
32 if (size_add_overflow(size, sizeof(heap_ref), &size)) {
33 return NULL;
34 }
35 __align = align | sizeof(heap_ref);
36
37 /*
38 * No point calling k_heap_malloc/k_heap_aligned_alloc with K_NO_WAIT.
39 * Better bypass them and go directly to sys_heap_*() instead.
40 */
41 key = k_spin_lock(&heap->lock);
42 mem = sys_heap_allocator(&heap->heap, __align, size);
43 k_spin_unlock(&heap->lock, key);
44
45 if (mem == NULL) {
46 return NULL;
47 }
48
49 heap_ref = mem;
50 *heap_ref = heap;
51 mem = ++heap_ref;
52 __ASSERT(align == 0 || ((uintptr_t)mem & (align - 1)) == 0,
53 "misaligned memory at %p (align = %zu)", mem, align);
54
55 return mem;
56 }
57
k_free(void * ptr)58 void k_free(void *ptr)
59 {
60 struct k_heap **heap_ref;
61
62 if (ptr != NULL) {
63 heap_ref = ptr;
64 --heap_ref;
65 ptr = heap_ref;
66
67 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap_sys, k_free, *heap_ref, heap_ref);
68
69 k_heap_free(*heap_ref, ptr);
70
71 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_free, *heap_ref, heap_ref);
72 }
73 }
74
75 #if (K_HEAP_MEM_POOL_SIZE > 0)
76
77 K_HEAP_DEFINE(_system_heap, K_HEAP_MEM_POOL_SIZE);
78 #define _SYSTEM_HEAP (&_system_heap)
79
k_aligned_alloc(size_t align,size_t size)80 void *k_aligned_alloc(size_t align, size_t size)
81 {
82 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap_sys, k_aligned_alloc, _SYSTEM_HEAP);
83
84 void *ret = z_alloc_helper(_SYSTEM_HEAP, align, size, sys_heap_aligned_alloc);
85
86 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_aligned_alloc, _SYSTEM_HEAP, ret);
87
88 return ret;
89 }
90
k_malloc(size_t size)91 void *k_malloc(size_t size)
92 {
93 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap_sys, k_malloc, _SYSTEM_HEAP);
94
95 void *ret = z_alloc_helper(_SYSTEM_HEAP, 0, size, sys_heap_noalign_alloc);
96
97 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_malloc, _SYSTEM_HEAP, ret);
98
99 return ret;
100 }
101
k_calloc(size_t nmemb,size_t size)102 void *k_calloc(size_t nmemb, size_t size)
103 {
104 void *ret;
105 size_t bounds;
106
107 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap_sys, k_calloc, _SYSTEM_HEAP);
108
109 if (size_mul_overflow(nmemb, size, &bounds)) {
110 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_calloc, _SYSTEM_HEAP, NULL);
111
112 return NULL;
113 }
114
115 ret = k_malloc(bounds);
116 if (ret != NULL) {
117 (void)memset(ret, 0, bounds);
118 }
119
120 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_calloc, _SYSTEM_HEAP, ret);
121
122 return ret;
123 }
124
k_realloc(void * ptr,size_t size)125 void *k_realloc(void *ptr, size_t size)
126 {
127 struct k_heap *heap, **heap_ref;
128 k_spinlock_key_t key;
129 void *ret;
130
131 if (size == 0) {
132 k_free(ptr);
133 return NULL;
134 }
135 if (ptr == NULL) {
136 return k_malloc(size);
137 }
138 heap_ref = ptr;
139 ptr = --heap_ref;
140 heap = *heap_ref;
141
142 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap_sys, k_realloc, heap, ptr);
143
144 if (size_add_overflow(size, sizeof(heap_ref), &size)) {
145 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_realloc, heap, ptr, NULL);
146 return NULL;
147 }
148
149 /*
150 * No point calling k_heap_realloc() with K_NO_WAIT here.
151 * Better bypass it and go directly to sys_heap_realloc() instead.
152 */
153 key = k_spin_lock(&heap->lock);
154 ret = sys_heap_realloc(&heap->heap, ptr, size);
155 k_spin_unlock(&heap->lock, key);
156
157 if (ret != NULL) {
158 heap_ref = ret;
159 ret = ++heap_ref;
160 }
161
162 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_realloc, heap, ptr, ret);
163
164 return ret;
165 }
166
k_thread_system_pool_assign(struct k_thread * thread)167 void k_thread_system_pool_assign(struct k_thread *thread)
168 {
169 thread->resource_pool = _SYSTEM_HEAP;
170 }
171 #else
172 #define _SYSTEM_HEAP NULL
173 #endif /* K_HEAP_MEM_POOL_SIZE */
174
z_thread_alloc_helper(size_t align,size_t size,sys_heap_allocator_t sys_heap_allocator)175 static void *z_thread_alloc_helper(size_t align, size_t size,
176 sys_heap_allocator_t sys_heap_allocator)
177 {
178 void *ret;
179 struct k_heap *heap;
180
181 if (k_is_in_isr()) {
182 heap = _SYSTEM_HEAP;
183 } else {
184 heap = _current->resource_pool;
185 }
186
187 if (heap != NULL) {
188 ret = z_alloc_helper(heap, align, size, sys_heap_allocator);
189 } else {
190 ret = NULL;
191 }
192
193 return ret;
194 }
195
z_thread_aligned_alloc(size_t align,size_t size)196 void *z_thread_aligned_alloc(size_t align, size_t size)
197 {
198 return z_thread_alloc_helper(align, size, sys_heap_aligned_alloc);
199 }
200
z_thread_malloc(size_t size)201 void *z_thread_malloc(size_t size)
202 {
203 return z_thread_alloc_helper(0, size, sys_heap_noalign_alloc);
204 }
205