1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * Copyright (c) 2018-2019, Linaro Limited
5 */
6
7
8 #include <assert.h>
9 #include <compiler.h>
10 #include <malloc.h>
11 #include <mempool.h>
12 #include <pta_stats.h>
13 #include <string.h>
14 #include <util.h>
15
16 #if defined(__KERNEL__)
17 #include <kernel/mutex.h>
18 #include <kernel/panic.h>
19 #endif
20
21 /*
22 * Allocation of temporary memory buffers which are used in a stack like
23 * fashion. One exmaple is when a Big Number is needed for a temporary
24 * variable in a Big Number computation: Big Number operations (add,...),
25 * crypto algorithms (rsa, ecc,,...).
26 *
27 * The allocation algorithm takes memory buffers from a pool,
28 * characterized by (cf. struct mempool):
29 * - the total size (in bytes) of the pool
30 * - the offset of the last item allocated in the pool (struct
31 * mempool_item). This offset is -1 is nothing is allocated yet.
32 *
33 * Each item consists of (struct mempool_item)
34 * - the size of the item
35 * - the offsets, in the pool, of the previous and next items
36 *
37 * The allocation allocates an item for a given size.
38 * The allocation is performed in the pool after the last
39 * allocated items. This means:
40 * - the heap is never used.
41 * - there is no assumption on the size of the allocated memory buffers. Only
42 * the size of the pool will limit the allocation.
43 * - a constant time allocation and free as there is no list scan
44 * - but a potentially fragmented memory as the allocation does not take into
45 * account "holes" in the pool (allocation is performed after the last
46 * allocated variable). Indeed, this interface is supposed to be used
47 * with stack like allocations to avoid this issue. This means that
48 * allocated items:
49 * - should have a short life cycle
50 * - if an item A is allocated before another item B, then A should be
51 * released after B.
52 * So the potential fragmentation is mitigated.
53 */
54
55
56 struct mempool {
57 size_t size; /* size of the memory pool, in bytes */
58 vaddr_t data;
59 struct malloc_ctx *mctx;
60 #ifdef CFG_MEMPOOL_REPORT_LAST_OFFSET
61 size_t max_allocated;
62 #endif
63 #if defined(__KERNEL__)
64 void (*release_mem)(void *ptr, size_t size);
65 struct recursive_mutex mu;
66 #endif
67 };
68
69 #if defined(__KERNEL__)
70 struct mempool *mempool_default;
71 #endif
72
init_mpool(struct mempool * pool)73 static void init_mpool(struct mempool *pool)
74 {
75 size_t sz = pool->size - raw_malloc_get_ctx_size();
76 vaddr_t v = ROUNDDOWN(pool->data + sz, sizeof(long) * 2);
77
78 /*
79 * v is the placed as close to the end of the data pool as possible
80 * where the struct malloc_ctx can be placed. This location is selected
81 * as an optimization for the pager case to get better data
82 * locality since raw_malloc() starts to allocate from the end of
83 * the supplied data pool.
84 */
85 assert(v > pool->data);
86 pool->mctx = (struct malloc_ctx *)v;
87 raw_malloc_init_ctx(pool->mctx);
88 raw_malloc_add_pool(pool->mctx, (void *)pool->data, v - pool->data);
89 }
90
get_pool(struct mempool * pool __maybe_unused)91 static void get_pool(struct mempool *pool __maybe_unused)
92 {
93 #if defined(__KERNEL__)
94 mutex_lock_recursive(&pool->mu);
95 if (!pool->mctx)
96 init_mpool(pool);
97
98 #endif
99 }
100
put_pool(struct mempool * pool __maybe_unused)101 static void put_pool(struct mempool *pool __maybe_unused)
102 {
103 #if defined(__KERNEL__)
104 if (mutex_get_recursive_lock_depth(&pool->mu) == 1) {
105 /*
106 * As the refcount is about to become 0 there should be no items
107 * left
108 */
109 if (pool->release_mem) {
110 pool->mctx = NULL;
111 pool->release_mem((void *)pool->data, pool->size);
112 }
113 }
114 mutex_unlock_recursive(&pool->mu);
115 #endif
116 }
117
118 struct mempool *
mempool_alloc_pool(void * data,size_t size,void (* release_mem)(void * ptr,size_t size)__maybe_unused)119 mempool_alloc_pool(void *data, size_t size,
120 void (*release_mem)(void *ptr, size_t size) __maybe_unused)
121 {
122 struct mempool *pool = calloc(1, sizeof(*pool));
123
124 COMPILE_TIME_ASSERT(MEMPOOL_ALIGN >= __alignof__(struct mempool_item));
125 assert(!((vaddr_t)data & (MEMPOOL_ALIGN - 1)));
126
127 if (pool) {
128 pool->size = size;
129 pool->data = (vaddr_t)data;
130 #if defined(__KERNEL__)
131 pool->release_mem = release_mem;
132 mutex_init_recursive(&pool->mu);
133 #else
134 init_mpool(pool);
135 #endif
136 }
137
138 return pool;
139 }
140
mempool_alloc(struct mempool * pool,size_t size)141 void *mempool_alloc(struct mempool *pool, size_t size)
142 {
143 void *p = NULL;
144
145 get_pool(pool);
146
147 p = raw_malloc(0, 0, size, pool->mctx);
148 if (p) {
149 #ifdef CFG_MEMPOOL_REPORT_LAST_OFFSET
150 struct pta_stats_alloc stats = { };
151
152 raw_malloc_get_stats(pool->mctx, &stats);
153 if (stats.max_allocated > pool->max_allocated) {
154 pool->max_allocated = stats.max_allocated;
155 DMSG("Max memory usage increased to %zu",
156 pool->max_allocated);
157 }
158 #endif
159 return p;
160 }
161
162 EMSG("Failed to allocate %zu bytes, please tune the pool size", size);
163 put_pool(pool);
164 return NULL;
165 }
166
mempool_calloc(struct mempool * pool,size_t nmemb,size_t size)167 void *mempool_calloc(struct mempool *pool, size_t nmemb, size_t size)
168 {
169 size_t sz;
170 void *p;
171
172 if (MUL_OVERFLOW(nmemb, size, &sz))
173 return NULL;
174
175 p = mempool_alloc(pool, sz);
176 if (p)
177 memset(p, 0, sz);
178
179 return p;
180 }
181
mempool_free(struct mempool * pool,void * ptr)182 void mempool_free(struct mempool *pool, void *ptr)
183 {
184 if (ptr) {
185 raw_free(ptr, pool->mctx, false /*!wipe*/);
186 put_pool(pool);
187 }
188 }
189