1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * Copyright (c) 2018-2019, Linaro Limited
5 */
6
7
8 #include <assert.h>
9 #include <compiler.h>
10 #include <malloc.h>
11 #include <mempool.h>
12 #include <string.h>
13 #include <util.h>
14
15 #if defined(__KERNEL__)
16 #include <kernel/mutex.h>
17 #include <kernel/panic.h>
18 #endif
19
20 /*
21 * Allocation of temporary memory buffers which are used in a stack like
22 * fashion. One exmaple is when a Big Number is needed for a temporary
23 * variable in a Big Number computation: Big Number operations (add,...),
24 * crypto algorithms (rsa, ecc,,...).
25 *
26 * The allocation algorithm takes memory buffers from a pool,
27 * characterized by (cf. struct mempool):
28 * - the total size (in bytes) of the pool
29 * - the offset of the last item allocated in the pool (struct
30 * mempool_item). This offset is -1 is nothing is allocated yet.
31 *
32 * Each item consists of (struct mempool_item)
33 * - the size of the item
34 * - the offsets, in the pool, of the previous and next items
35 *
36 * The allocation allocates an item for a given size.
37 * The allocation is performed in the pool after the last
38 * allocated items. This means:
39 * - the heap is never used.
40 * - there is no assumption on the size of the allocated memory buffers. Only
41 * the size of the pool will limit the allocation.
42 * - a constant time allocation and free as there is no list scan
43 * - but a potentially fragmented memory as the allocation does not take into
44 * account "holes" in the pool (allocation is performed after the last
45 * allocated variable). Indeed, this interface is supposed to be used
46 * with stack like allocations to avoid this issue. This means that
47 * allocated items:
48 * - should have a short life cycle
49 * - if an item A is allocated before another item B, then A should be
50 * released after B.
51 * So the potential fragmentation is mitigated.
52 */
53
54
55 struct mempool {
56 size_t size; /* size of the memory pool, in bytes */
57 vaddr_t data;
58 struct malloc_ctx *mctx;
59 #ifdef CFG_MEMPOOL_REPORT_LAST_OFFSET
60 size_t max_allocated;
61 #endif
62 #if defined(__KERNEL__)
63 void (*release_mem)(void *ptr, size_t size);
64 struct recursive_mutex mu;
65 #endif
66 };
67
68 #if defined(__KERNEL__)
69 struct mempool *mempool_default;
70 #endif
71
init_mpool(struct mempool * pool)72 static void init_mpool(struct mempool *pool)
73 {
74 size_t sz = pool->size - raw_malloc_get_ctx_size();
75 vaddr_t v = ROUNDDOWN(pool->data + sz, sizeof(long) * 2);
76
77 /*
78 * v is the placed as close to the end of the data pool as possible
79 * where the struct malloc_ctx can be placed. This location is selected
80 * as an optimization for the pager case to get better data
81 * locality since raw_malloc() starts to allocate from the end of
82 * the supplied data pool.
83 */
84 assert(v > pool->data);
85 pool->mctx = (struct malloc_ctx *)v;
86 raw_malloc_init_ctx(pool->mctx);
87 raw_malloc_add_pool(pool->mctx, (void *)pool->data, v - pool->data);
88 }
89
get_pool(struct mempool * pool __maybe_unused)90 static void get_pool(struct mempool *pool __maybe_unused)
91 {
92 #if defined(__KERNEL__)
93 mutex_lock_recursive(&pool->mu);
94 if (!pool->mctx)
95 init_mpool(pool);
96
97 #endif
98 }
99
put_pool(struct mempool * pool __maybe_unused)100 static void put_pool(struct mempool *pool __maybe_unused)
101 {
102 #if defined(__KERNEL__)
103 if (mutex_get_recursive_lock_depth(&pool->mu) == 1) {
104 /*
105 * As the refcount is about to become 0 there should be no items
106 * left
107 */
108 if (pool->release_mem) {
109 pool->mctx = NULL;
110 pool->release_mem((void *)pool->data, pool->size);
111 }
112 }
113 mutex_unlock_recursive(&pool->mu);
114 #endif
115 }
116
117 struct mempool *
mempool_alloc_pool(void * data,size_t size,void (* release_mem)(void * ptr,size_t size)__maybe_unused)118 mempool_alloc_pool(void *data, size_t size,
119 void (*release_mem)(void *ptr, size_t size) __maybe_unused)
120 {
121 struct mempool *pool = calloc(1, sizeof(*pool));
122
123 COMPILE_TIME_ASSERT(MEMPOOL_ALIGN >= __alignof__(struct mempool_item));
124 assert(!((vaddr_t)data & (MEMPOOL_ALIGN - 1)));
125
126 if (pool) {
127 pool->size = size;
128 pool->data = (vaddr_t)data;
129 #if defined(__KERNEL__)
130 pool->release_mem = release_mem;
131 mutex_init_recursive(&pool->mu);
132 #else
133 init_mpool(pool);
134 #endif
135 }
136
137 return pool;
138 }
139
mempool_alloc(struct mempool * pool,size_t size)140 void *mempool_alloc(struct mempool *pool, size_t size)
141 {
142 void *p = NULL;
143
144 get_pool(pool);
145
146 p = raw_malloc(0, 0, size, pool->mctx);
147 if (p) {
148 #ifdef CFG_MEMPOOL_REPORT_LAST_OFFSET
149 struct malloc_stats stats = { };
150
151 raw_malloc_get_stats(pool->mctx, &stats);
152 if (stats.max_allocated > pool->max_allocated) {
153 pool->max_allocated = stats.max_allocated;
154 DMSG("Max memory usage increased to %zu",
155 pool->max_allocated);
156 }
157 #endif
158 return p;
159 }
160
161 EMSG("Failed to allocate %zu bytes, please tune the pool size", size);
162 put_pool(pool);
163 return NULL;
164 }
165
mempool_calloc(struct mempool * pool,size_t nmemb,size_t size)166 void *mempool_calloc(struct mempool *pool, size_t nmemb, size_t size)
167 {
168 size_t sz;
169 void *p;
170
171 if (MUL_OVERFLOW(nmemb, size, &sz))
172 return NULL;
173
174 p = mempool_alloc(pool, sz);
175 if (p)
176 memset(p, 0, sz);
177
178 return p;
179 }
180
mempool_free(struct mempool * pool,void * ptr)181 void mempool_free(struct mempool *pool, void *ptr)
182 {
183 raw_free(ptr, pool->mctx, false /*!wipe*/);
184 put_pool(pool);
185 }
186