1 /*
2 * Copyright 2018 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/mpool.h"
10
11 #include "hf/arch/std.h"
12
13 struct mpool_chunk {
14 struct mpool_chunk *next_chunk;
15 struct mpool_chunk *limit;
16 };
17
18 struct mpool_entry {
19 struct mpool_entry *next;
20 };
21
22 static bool mpool_locks_enabled = false;
23
24 /**
25 * Enables the locks protecting memory pools. Before this function is called,
26 * the locks are disabled, that is, acquiring/releasing them is a no-op.
27 */
mpool_enable_locks(void)28 void mpool_enable_locks(void)
29 {
30 mpool_locks_enabled = true;
31 }
32
33 /**
34 * Acquires the lock protecting the given memory pool, if locks are enabled.
35 */
mpool_lock(struct mpool * p)36 static void mpool_lock(struct mpool *p)
37 {
38 if (mpool_locks_enabled) {
39 sl_lock(&p->lock);
40 }
41 }
42
43 /**
44 * Releases the lock protecting the given memory pool, if locks are enabled.
45 */
mpool_unlock(struct mpool * p)46 static void mpool_unlock(struct mpool *p)
47 {
48 if (mpool_locks_enabled) {
49 sl_unlock(&p->lock);
50 }
51 }
52
53 /**
54 * Initialises the given memory pool with the given entry size, which must be
55 * at least the size of two pointers.
56 *
57 * All entries stored in the memory pool will be aligned to at least the entry
58 * size.
59 */
mpool_init(struct mpool * p,size_t entry_size)60 void mpool_init(struct mpool *p, size_t entry_size)
61 {
62 p->entry_size = entry_size;
63 p->chunk_list = NULL;
64 p->entry_list = NULL;
65 p->fallback = NULL;
66 sl_init(&p->lock);
67 }
68
69 /**
70 * Initialises the given memory pool by replicating the properties of `from`. It
71 * also pulls the chunk and free lists from `from`, consuming all its resources
72 * and making them available via the new memory pool.
73 */
mpool_init_from(struct mpool * p,struct mpool * from)74 void mpool_init_from(struct mpool *p, struct mpool *from)
75 {
76 mpool_init(p, from->entry_size);
77
78 mpool_lock(from);
79 p->chunk_list = from->chunk_list;
80 p->entry_list = from->entry_list;
81 p->fallback = from->fallback;
82
83 from->chunk_list = NULL;
84 from->entry_list = NULL;
85 from->fallback = NULL;
86 mpool_unlock(from);
87 }
88
89 /**
90 * Initialises the given memory pool with a fallback memory pool if this pool
91 * runs out of memory.
92 */
mpool_init_with_fallback(struct mpool * p,struct mpool * fallback)93 void mpool_init_with_fallback(struct mpool *p, struct mpool *fallback)
94 {
95 mpool_init(p, fallback->entry_size);
96 p->fallback = fallback;
97 }
98
99 /**
100 * Finishes the given memory pool, giving all free memory to the fallback pool
101 * if there is one.
102 */
mpool_fini(struct mpool * p)103 void mpool_fini(struct mpool *p)
104 {
105 struct mpool_entry *entry;
106 struct mpool_chunk *chunk;
107
108 if (!p->fallback) {
109 return;
110 }
111
112 mpool_lock(p);
113
114 /* Merge the freelist into the fallback. */
115 entry = p->entry_list;
116 while (entry != NULL) {
117 void *ptr = entry;
118
119 entry = entry->next;
120 mpool_free(p->fallback, ptr);
121 }
122
123 /* Merge the chunk list into the fallback. */
124 chunk = p->chunk_list;
125 while (chunk != NULL) {
126 void *ptr = chunk;
127 size_t size = (uintptr_t)chunk->limit - (uintptr_t)chunk;
128
129 chunk = chunk->next_chunk;
130 mpool_add_chunk(p->fallback, ptr, size);
131 }
132
133 p->chunk_list = NULL;
134 p->entry_list = NULL;
135 p->fallback = NULL;
136
137 mpool_unlock(p);
138 }
139
140 /**
141 * Adds a contiguous chunk of memory to the given memory pool. The chunk will
142 * eventually be broken up into entries of the size held by the memory pool.
143 *
144 * Only the portions aligned to the entry size will be added to the pool.
145 *
146 * Returns true if at least a portion of the chunk was added to pool, or false
147 * if none of the buffer was usable in the pool.
148 */
mpool_add_chunk(struct mpool * p,void * begin,size_t size)149 bool mpool_add_chunk(struct mpool *p, void *begin, size_t size)
150 {
151 struct mpool_chunk *chunk;
152 char *new_begin;
153 char *new_end;
154
155 /* Round begin address up, and end address down. */
156 new_begin = (void *)align_up((char *)begin, p->entry_size);
157 new_end = (void *)align_down((char *)begin + size, p->entry_size);
158
159 /* Nothing to do if there isn't enough room for an entry. */
160 if (new_begin >= new_end ||
161 (size_t)(new_end - new_begin) < p->entry_size) {
162 return false;
163 }
164
165 chunk = (struct mpool_chunk *)new_begin;
166 chunk->limit = (struct mpool_chunk *)new_end;
167
168 mpool_lock(p);
169 chunk->next_chunk = p->chunk_list;
170 p->chunk_list = chunk;
171 mpool_unlock(p);
172
173 return true;
174 }
175
176 /**
177 * Allocates an entry from the given memory pool, if one is available. The
178 * fallback will not be used even if there is one.
179 */
mpool_alloc_no_fallback(struct mpool * p)180 static void *mpool_alloc_no_fallback(struct mpool *p)
181 {
182 void *ret;
183 struct mpool_chunk *chunk;
184 struct mpool_chunk *new_chunk;
185
186 /* Fetch an entry from the free list if one is available. */
187 mpool_lock(p);
188 if (p->entry_list != NULL) {
189 struct mpool_entry *entry = p->entry_list;
190
191 p->entry_list = entry->next;
192 ret = entry;
193 goto exit;
194 }
195
196 /* There was no free list available. Try a chunk instead. */
197 chunk = p->chunk_list;
198 if (chunk == NULL) {
199 /* The chunk list is also empty, we're out of entries. */
200 ret = NULL;
201 goto exit;
202 }
203
204 new_chunk = (struct mpool_chunk *)((char *)chunk + p->entry_size);
205 if (new_chunk >= chunk->limit) {
206 p->chunk_list = chunk->next_chunk;
207 } else {
208 *new_chunk = *chunk;
209 p->chunk_list = new_chunk;
210 }
211
212 ret = chunk;
213
214 exit:
215 mpool_unlock(p);
216
217 return ret;
218 }
219
220 /**
221 * Allocates an entry from the given memory pool, if one is available. If there
222 * isn't one available, try and allocate from the fallback if there is one.
223 */
mpool_alloc(struct mpool * p)224 void *mpool_alloc(struct mpool *p)
225 {
226 do {
227 void *ret = mpool_alloc_no_fallback(p);
228
229 if (ret != NULL) {
230 return ret;
231 }
232
233 p = p->fallback;
234 } while (p != NULL);
235
236 return NULL;
237 }
238
239 /**
240 * Frees an entry back into the memory pool, making it available for reuse.
241 *
242 * This is meant to be used for freeing single entries. To free multiple
243 * entries, one must call mpool_add_chunk instead.
244 */
mpool_free(struct mpool * p,void * ptr)245 void mpool_free(struct mpool *p, void *ptr)
246 {
247 struct mpool_entry *e = ptr;
248
249 /* Store the newly freed entry in the front of the free list. */
250 mpool_lock(p);
251 e->next = p->entry_list;
252 p->entry_list = e;
253 mpool_unlock(p);
254 }
255
256 /**
257 * Allocates a number of contiguous and aligned entries. If a suitable
258 * allocation could not be found, the fallback will not be used even if there is
259 * one.
260 */
mpool_alloc_contiguous_no_fallback(struct mpool * p,size_t count,size_t align)261 void *mpool_alloc_contiguous_no_fallback(struct mpool *p, size_t count,
262 size_t align)
263 {
264 struct mpool_chunk **prev;
265 void *ret = NULL;
266
267 align *= p->entry_size;
268
269 mpool_lock(p);
270
271 /*
272 * Go through the chunk list in search of one with enough room for the
273 * requested allocation
274 */
275 prev = &p->chunk_list;
276 while (*prev != NULL) {
277 char *start;
278 struct mpool_chunk *new_chunk;
279 struct mpool_chunk *chunk = *prev;
280
281 /* Round start address up to the required alignment. */
282 start = (void *)align_up((char *)chunk, align);
283
284 /*
285 * Calculate where the new chunk would be if we consume the
286 * requested number of entries. Then check if this chunk is big
287 * enough to satisfy the request.
288 */
289 new_chunk =
290 (struct mpool_chunk *)(start + (count * p->entry_size));
291 if (new_chunk <= chunk->limit) {
292 /* Remove the consumed area. */
293 if (new_chunk == chunk->limit) {
294 *prev = chunk->next_chunk;
295 } else {
296 *new_chunk = *chunk;
297 *prev = new_chunk;
298 }
299
300 /*
301 * Add back the space consumed by the alignment
302 * requirement, if it's big enough to fit an entry.
303 */
304 if ((size_t)(start - (char *)chunk) >= p->entry_size) {
305 chunk->next_chunk = *prev;
306 *prev = chunk;
307 chunk->limit = (struct mpool_chunk *)start;
308 }
309
310 ret = (void *)start;
311 break;
312 }
313
314 prev = &chunk->next_chunk;
315 }
316
317 mpool_unlock(p);
318
319 return ret;
320 }
321
322 /**
323 * Allocates a number of contiguous and aligned entries. This is a best-effort
324 * operation and only succeeds if such entries can be found in the chunks list
325 * or the chunks of the fallbacks (i.e., the entry list is never used to satisfy
326 * these allocations).
327 *
328 * The alignment is specified as the number of entries, that is, if `align` is
329 * 4, the alignment in bytes will be 4 * entry_size.
330 *
331 * The caller can enventually free the returned entries by calling
332 * mpool_add_chunk.
333 */
mpool_alloc_contiguous(struct mpool * p,size_t count,size_t align)334 void *mpool_alloc_contiguous(struct mpool *p, size_t count, size_t align)
335 {
336 do {
337 void *ret = mpool_alloc_contiguous_no_fallback(p, count, align);
338
339 if (ret != NULL) {
340 return ret;
341 }
342
343 p = p->fallback;
344 } while (p != NULL);
345
346 return NULL;
347 }
348