1 // Copyright 2016 The Fuchsia Authors
2 // Copyright (c) 2016, Google, Inc. All rights reserved
3 //
4 // Use of this source code is governed by a MIT-style
5 // license that can be found in the LICENSE file or at
6 // https://opensource.org/licenses/MIT
7 
8 #include <lib/pow2_range_allocator.h>
9 
10 #include <assert.h>
11 #include <debug.h>
12 #include <fbl/auto_call.h>
13 #include <fbl/auto_lock.h>
14 #include <pow2.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <trace.h>
18 
19 #define LOCAL_TRACE 0
20 
21 typedef struct p2ra_block {
22     struct list_node node;
23     uint bucket;
24     uint start;
25 } p2ra_block_t;
26 
27 typedef struct p2ra_range {
28     struct list_node node;
29     uint start, len;
30 } p2ra_range_t;
31 
p2ra_get_unused_block(p2ra_state_t * state)32 static inline p2ra_block_t* p2ra_get_unused_block(p2ra_state_t* state) {
33     DEBUG_ASSERT(state);
34 
35     if (!list_is_empty(&state->unused_blocks))
36         return list_remove_head_type(&state->unused_blocks, p2ra_block_t, node);
37 
38     return (p2ra_block_t*)calloc(1, sizeof(p2ra_block_t));
39 }
40 
p2ra_free_block_list(struct list_node * block_list)41 static inline void p2ra_free_block_list(struct list_node* block_list) {
42     p2ra_block_t* block;
43     while ((block = list_remove_head_type(block_list, p2ra_block_t, node)) != NULL)
44         free(block);
45 }
46 
p2ra_free_range_list(struct list_node * range_list)47 static inline void p2ra_free_range_list(struct list_node* range_list) {
48     p2ra_range_t* range;
49     while ((range = list_remove_head_type(range_list, p2ra_range_t, node)) != NULL)
50         free(range);
51 }
52 
p2ra_return_free_block(p2ra_state_t * state,p2ra_block_t * block,bool merge_allowed)53 static void p2ra_return_free_block(p2ra_state_t* state,
54                                    p2ra_block_t* block,
55                                    bool merge_allowed) {
56     DEBUG_ASSERT(state);
57     DEBUG_ASSERT(block);
58     DEBUG_ASSERT(block->bucket < state->bucket_count);
59     DEBUG_ASSERT(!list_in_list(&block->node));
60     DEBUG_ASSERT(!(block->start & ((1u << block->bucket) - 1)));
61 
62     /* Return the block to its proper free bucket, sorted by base ID.  Start by
63      * finding the block which should come after this block in the list. */
64     struct list_node* l = &state->free_block_buckets[block->bucket];
65     p2ra_block_t* after = list_peek_head_type(l, p2ra_block_t, node);
66     uint block_len = 1u << block->bucket;
67 
68     while (after) {
69         /* We do not allow ranges to overlap */
70         __UNUSED uint after_len = 1u << after->bucket;
71         DEBUG_ASSERT((block->start >= (after->start + after_len)) ||
72                      (after->start >= (block->start + block_len)));
73 
74         if (after->start > block->start) {
75             list_add_before(&after->node, &block->node);
76             break;
77         }
78 
79         /* Advance the iterator */
80         after = list_next_type(l, &after->node, p2ra_block_t, node);
81     }
82 
83     /* If no block comes after this one, it goes on the end of the list */
84     if (!after)
85         list_add_tail(l, &block->node);
86 
87     /* Don't merge blocks in the largest bucket. */
88     if (block->bucket + 1 == state->bucket_count)
89         return;
90 
91     /* Check to see if we should be merging this block into a larger aligned block. */
92     p2ra_block_t* first;
93     p2ra_block_t* second;
94     if (block->start & ((block_len << 1) - 1)) {
95         /* Odd alignment.  This might be the second block of a merge pair */
96         second = block;
97         first = list_prev_type(l, &block->node, p2ra_block_t, node);
98     } else {
99         /* Even alignment.  This might be the first block of a merge pair */
100         first = block;
101         second = list_next_type(l, &block->node, p2ra_block_t, node);
102     }
103 
104     /* Do these chunks fit together? */
105     if (first && second) {
106         uint first_len = 1u << first->bucket;
107         if ((first->start + first_len) == second->start) {
108             /* Assert that we are allowed to perform a merge.  If the caller is
109              * not expecting us to have to merge anything, then there is a fatal
110              * bookkeeping error somewhere */
111             DEBUG_ASSERT(merge_allowed);
112             DEBUG_ASSERT(first->bucket == second->bucket);
113 
114             /* Remove the two blocks' bookkeeping from their bucket */
115             list_delete(&first->node);
116             list_delete(&second->node);
117 
118             /* Place one half of the bookkeeping back on the unused list */
119             list_add_tail(&state->unused_blocks, &second->node);
120 
121             /* Reuse the other half to track the newly merged block, and place
122              * it in the next bucket size up. */
123             first->bucket++;
124             p2ra_return_free_block(state, first, merge_allowed);
125         }
126     }
127 }
128 
p2ra_init(p2ra_state_t * state,uint max_alloc_size)129 zx_status_t p2ra_init(p2ra_state_t* state, uint max_alloc_size) {
130     if (!state)
131         return ZX_ERR_INVALID_ARGS;
132 
133     if (!max_alloc_size || !ispow2(max_alloc_size)) {
134         TRACEF("max_alloc_size (%u) is not an integer power of two!\n", max_alloc_size);
135         return ZX_ERR_INVALID_ARGS;
136     }
137 
138     /* Allocate the storage for our free buckets */
139     state->bucket_count = log2_uint_floor(max_alloc_size) + 1;
140     const size_t size = state->bucket_count * sizeof(state->free_block_buckets[0]);
141     state->free_block_buckets = static_cast<list_node*>(malloc(size));
142     if (!state->free_block_buckets) {
143         TRACEF("Failed to allocate storage for %u free bucket lists!\n", state->bucket_count);
144         return ZX_ERR_NO_MEMORY;
145     }
146 
147     /* Initialize the rest of our bookeeping */
148     mutex_init(&state->lock);
149     list_initialize(&state->ranges);
150     list_initialize(&state->unused_blocks);
151     list_initialize(&state->allocated_blocks);
152     for (uint i = 0; i < state->bucket_count; ++i)
153         list_initialize(&state->free_block_buckets[i]);
154 
155     return ZX_OK;
156 }
157 
p2ra_free(p2ra_state_t * state)158 void p2ra_free(p2ra_state_t* state) {
159     DEBUG_ASSERT(state);
160     DEBUG_ASSERT(state->bucket_count);
161     DEBUG_ASSERT(state->free_block_buckets);
162     DEBUG_ASSERT(list_is_empty(&state->allocated_blocks));
163 
164     p2ra_free_range_list(&state->ranges);
165     p2ra_free_block_list(&state->unused_blocks);
166     p2ra_free_block_list(&state->allocated_blocks);
167     for (uint i = 0; i < state->bucket_count; ++i)
168         p2ra_free_block_list(&state->free_block_buckets[i]);
169 
170     mutex_destroy(&state->lock);
171     memset(state, 0, sizeof(*state));
172 }
173 
p2ra_add_range(p2ra_state_t * state,uint range_start,uint range_len)174 zx_status_t p2ra_add_range(p2ra_state_t* state, uint range_start, uint range_len) {
175     LTRACEF("Adding range [%u, %u]\n", range_start, range_start + range_len - 1);
176 
177     if (!state ||
178         !range_len ||
179         ((range_start + range_len) < range_start))
180         return ZX_ERR_INVALID_ARGS;
181 
182     zx_status_t ret = ZX_OK;
183     p2ra_range_t* new_range = NULL;
184     struct list_node new_blocks;
185     list_initialize(&new_blocks);
186 
187     // if we're exiting with a failure, clean up anything we've allocated
188     auto ac = fbl::MakeAutoCall([&]() {
189         if (ret != ZX_OK) {
190             if (new_range) {
191                 DEBUG_ASSERT(!list_in_list(&new_range->node));
192                 free(new_range);
193             }
194 
195             p2ra_free_block_list(&new_blocks);
196         }
197     });
198 
199     /* Enter the lock and check for overlap with pre-existing ranges */
200     fbl::AutoLock guard(&state->lock);
201 
202     p2ra_range_t* range;
203     list_for_every_entry (&state->ranges, range, p2ra_range_t, node) {
204         if (((range->start >= range_start) && (range->start < (range_start + range_len))) ||
205             ((range_start >= range->start) && (range_start < (range->start + range->len)))) {
206             TRACEF("Range [%u, %u] overlaps with existing range [%u, %u].\n",
207                    range_start, range_start + range_len - 1,
208                    range->start, range->start + range->len - 1);
209             ret = ZX_ERR_ALREADY_EXISTS;
210             return ret;
211         }
212     }
213 
214     /* Allocate our range state */
215     new_range = static_cast<p2ra_range_t*>(calloc(1, sizeof(*new_range)));
216     if (!new_range) {
217         ret = ZX_ERR_NO_MEMORY;
218         return ret;
219     }
220     new_range->start = range_start;
221     new_range->len = range_len;
222 
223     /* Break the range we were given into power of two aligned chunks, and place
224      * them on the new blocks list to be added to the free-blocks buckets */
225     DEBUG_ASSERT(state->bucket_count && state->free_block_buckets);
226     uint bucket = state->bucket_count - 1;
227     uint csize = (1u << bucket);
228     uint max_csize = csize;
229     while (range_len) {
230         /* Shrink the chunk size until it is aligned with the start of the
231          * range, and not larger than the number of irqs we have left. */
232         bool shrunk = false;
233         while ((range_start & (csize - 1)) || (range_len < csize)) {
234             csize >>= 1;
235             bucket--;
236             shrunk = true;
237         }
238 
239         /* If we didn't need to shrink the chunk size, perhaps we can grow it
240          * instead. */
241         if (!shrunk) {
242             uint tmp = csize << 1;
243             while ((tmp <= max_csize) &&
244                    (tmp <= range_len) &&
245                    (!(range_start & (tmp - 1)))) {
246                 bucket++;
247                 csize = tmp;
248                 tmp <<= 1;
249                 DEBUG_ASSERT(bucket < state->bucket_count);
250             }
251         }
252 
253         /* Break off a chunk of the range */
254         DEBUG_ASSERT((1u << bucket) == csize);
255         DEBUG_ASSERT(bucket < state->bucket_count);
256         DEBUG_ASSERT(!(range_start & (csize - 1)));
257         DEBUG_ASSERT(csize <= range_len);
258         DEBUG_ASSERT(csize);
259 
260         p2ra_block_t* block = p2ra_get_unused_block(state);
261         if (!block) {
262             TRACEF("WARNING! Failed to allocate block bookkeeping with sub-range "
263                    "[%u, %u] still left to track.\n",
264                    range_start, range_start + range_len - 1);
265             ret = ZX_ERR_NO_MEMORY;
266             return ret;
267         }
268 
269         block->bucket = bucket;
270         block->start = range_start;
271         list_add_tail(&new_blocks, &block->node);
272 
273         range_start += csize;
274         range_len -= csize;
275     }
276 
277     /* Looks like we managed to allocate everything we needed to.  Go ahead and
278      * add all of our newly allocated bookkeeping to the state. */
279     list_add_tail(&state->ranges, &new_range->node);
280 
281     p2ra_block_t* block;
282     while ((block = list_remove_head_type(&new_blocks, p2ra_block_t, node)) != NULL)
283         p2ra_return_free_block(state, block, false);
284 
285     return ret;
286 }
287 
p2ra_allocate_range(p2ra_state_t * state,uint size,uint * out_range_start)288 zx_status_t p2ra_allocate_range(p2ra_state_t* state, uint size, uint* out_range_start) {
289     if (!state || !out_range_start)
290         return ZX_ERR_INVALID_ARGS;
291 
292     if (!size || !ispow2(size)) {
293         TRACEF("Size (%u) is not an integer power of 2.\n", size);
294         return ZX_ERR_INVALID_ARGS;
295     }
296 
297     uint orig_bucket = log2_uint_floor(size);
298     uint bucket = orig_bucket;
299     if (bucket >= state->bucket_count) {
300         TRACEF("Invalid size (%u).  Valid sizes are integer powers of 2 from [1, %u]\n",
301                size, 1u << (state->bucket_count - 1));
302         return ZX_ERR_INVALID_ARGS;
303     }
304 
305     /* Lock state during allocation */
306     p2ra_block_t* block = NULL;
307 
308     fbl::AutoLock guard(&state->lock);
309 
310     /* Find the smallest sized chunk which can hold the allocation and is
311      * compatible with the requested addressing capabilities */
312     while (bucket < state->bucket_count) {
313         block = list_remove_head_type(&state->free_block_buckets[bucket], p2ra_block_t, node);
314         if (block)
315             break;
316         bucket++;
317     }
318 
319     /* Nothing found, unlock and get out */
320     if (!block) {
321         return ZX_ERR_NO_RESOURCES;
322     }
323 
324     /* Looks like we have a chunk which can satisfy this allocation request.
325      * Split it as many times as needed to match the requested size. */
326     DEBUG_ASSERT(block->bucket == bucket);
327     DEBUG_ASSERT(bucket >= orig_bucket);
328 
329     while (bucket > orig_bucket) {
330         p2ra_block_t* split_block = p2ra_get_unused_block(state);
331 
332         /* If we failed to allocate bookkeeping for the split block, put the block
333          * we failed to split back into the free list (merging if required),
334          * then fail the allocation */
335         if (!split_block) {
336             TRACEF("Failed to allocated free bookkeeping block when attempting to "
337                    "split for allocation\n");
338             p2ra_return_free_block(state, block, true);
339             return ZX_ERR_NO_MEMORY;
340         }
341 
342         DEBUG_ASSERT(bucket);
343         bucket--;
344 
345         /* Cut the first chunk in half */
346         block->bucket = bucket;
347 
348         /* Fill out the bookkeeping for the second half of the chunk */
349         split_block->start = block->start + (1u << block->bucket);
350         split_block->bucket = bucket;
351 
352         /* Return the second half of the chunk to the free pool */
353         p2ra_return_free_block(state, split_block, false);
354     }
355 
356     /* Success! Mark the block as allocated and return the block to the user */
357     list_add_head(&state->allocated_blocks, &block->node);
358     *out_range_start = block->start;
359 
360     return ZX_OK;
361 }
362 
p2ra_free_range(p2ra_state_t * state,uint range_start,uint size)363 void p2ra_free_range(p2ra_state_t* state, uint range_start, uint size) {
364     DEBUG_ASSERT(state);
365     DEBUG_ASSERT(size && ispow2(size));
366 
367     uint bucket = log2_uint_floor(size);
368 
369     fbl::AutoLock guard(&state->lock);
370 
371     /* In a debug build, find the specific block being returned in the list of
372      * allocated blocks and use it as the bookkeeping for returning to the free
373      * bucket.  Because this is an O(n) operation, and serves only as a sanity
374      * check, we only do this in debug builds.  In release builds, we just grab
375      * any piece of bookkeeping memory off the allocated_blocks list and use
376      * that instead. */
377     p2ra_block_t* block;
378 #if DEBUG_ASSERT_IMPLEMENTED
379     block = list_peek_head_type(&state->allocated_blocks, p2ra_block_t, node);
380     while (block) {
381         if ((block->start == range_start) && (block->bucket == bucket)) {
382             list_delete(&block->node);
383             break;
384         }
385         block = list_next_type(&state->allocated_blocks, &block->node, p2ra_block_t, node);
386     }
387     ASSERT(block);
388 #else
389     block = list_remove_head_type(&state->allocated_blocks, p2ra_block_t, node);
390     ASSERT(block);
391     block->start = range_start;
392     block->bucket = bucket;
393 #endif
394 
395     /* Return the block to the free buckets (merging as needed) and we are done */
396     p2ra_return_free_block(state, block, true);
397 }
398