1 // Copyright 2016 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #pragma once
6
7 #include <zircon/compiler.h>
8 #include <zircon/types.h>
9 #include <fbl/auto_lock.h>
10 #include <fbl/mutex.h>
11 #include <stdbool.h>
12 #include <stddef.h>
13
14 extern int some_function(void);
15 extern int some_variable;
16 // RegionAllocator
17 //
18 // == Overview ==
19 // A RegionAllocator is a utility class designed to help with the bookkeeping
20 // involved in managing the allocation/partitioning of a 64-bit space into
21 // non-overlapping "Regions". In addition to the RegionAllocator, there are two
22 // other classes involved in the use of a RegionAllcator;
23 // RegionAllocator::Region and RegionAllocator::RegionPool.
24 //
25 // A Region consists of an unsigned 64-bit base address and an unsigned 64-bit
26 // size. A Region is considered valid iff its size is non-zero, and it does not
27 // wrap its 64-bit space.
28 //
29 // See the "Memory Allocation" section for a discussion of the RegionPool.
30 //
31 // RegionAllocator users can create an allocator and then add any number of
32 // non-overlapping Regions to its pool of regions available for allocation.
33 // They may then request that regions be allocated from the pool either by
34 // requesting that a region be allocated with a particular size/alignment, or
35 // by asking for a specific base/size. The RegionAllocator will manage all of
36 // the bookkeeping involved in breaking available regions into smaller chunks,
37 // tracking allocated regions, and re-merging regions when they are returned to
38 // the allocator.
39 //
40 // == Memory Allocaion ==
41 // RegionAllocators require dynamically allocated memory in order to store the
42 // bookkeeping required for managing available regions. In order to control
43 // heap fragmentation and the frequency of heap interaction, A RegionPool object
44 // is used to allocate bookkeeping overhead in larger slabs which are carved up
45 // and placed on a free list to be used by a RegionAllocator. RegionPools are
46 // created with a defined slab size as well as a maximum memory limit. The pool
47 // will initially allocate a single slab, but will attempt to grow any time
48 // bookkeeping is needed but the free list is empty and the allocation of
49 // another slab would not push the allocator over its maximum memory limit.
50 //
51 // RegionPools are ref-counted objects that may be shared by multiple
52 // RegionAllocators. This allows sub-systems which use multiple allocators to
53 // impose system-wide limits on bookkeeping overhead. A RegionPool must be
54 // assigned to a RegionAllocator before it can be used, and the pool may not be
55 // re-assigned while the allocator is using any bookkeeping from the pool.
56 //
57 // == APIs and Object lifecycle management ==
58 // Both C and C++ APIs are provided for using the RegionAllocator. The C++ API
59 // makes use of fbl managed pointer types in order to simplify lifecycle
60 // management. RegionPools are managed with fbl::RefPtr while Regions are
61 // handed out via fbl::unique_ptr. RegionAllocators themselves impose no
62 // lifecycle restrictions and may be heap allocated, stack allocated, or
63 // embedded directly in objects as the user sees fit. It is an error to allow a
64 // RegionAllocator to destruct while there are allocations in flight.
65 //
66 // The C API is a wrapper over the C++ API. Because of this, automatic
67 // lifecycle management is lost. Users must take care to manually return their
68 // allocated Regions to their RegionAllocator, to manually destroy their
69 // RegionAllocators, and to manually release their reference on their RegionPool
70 // when they are finished using them. In addition, because the C compiler
71 // cannot know the size of a RegionAllocator object, it is not possible to
72 // either stack allocate or embed a RegionAllocator via the C API. Dynamic
73 // allocation is the only option.
74 //
75 // == Dependencies ==
76 // The RegionAllocator depends only on malloc/free and fbl. The fbl
77 // dependency is not visible to users of the C API. new/delete implementations
78 // are provided internally, no global new/delete behavior needs to be defined by
79 // the user.
80 //
81 // == Thread Safety ==
82 // RegionAllocator and RegionPools use fbl::Mutex objects to provide thread
83 // safety in multi-threaded environments. As such, RegionAllocators are not
84 // currently suitable for use in code which may run at IRQ context, or which
85 // must never block.
86 //
87 // Each RegionAllocator has its own mutex allowing for concurrent access across
88 // multiple allocators, even when the allocators share the same RegionPool.
89 // RegionPools also hold their own mutex which may be obtained by an Allocator
90 // while holding the Allocator's Mutex.
91 //
92 // == Simple Usage Example ==
93 //
94 // /* Create a pool and assign it to a stack allocated allocator. Limit the
95 // * bookkeeping memory to 32KB allocated from a single slab. This will ensure
96 // * that no heap interactions take place after startup (during operation).
97 // */
98 // RegionAllocator alloc(
99 // RegionAllocator::RegionPool::Create(32 << 10, 32 << 10));
100 //
101 // /* Add regions to the pool which can be allocated from */
102 // alloc.AddRegion({ .base = 0xC0000000, . size = 0x40000000 }); // [3GB, 4GB)
103 // alloc.AddRegion({ .base = 0x4000000000, .size = 0x40000000 }); // [256GB, 257GB)
104 //
105 // /* Grab some specific regions out of the available regions.
106 // * Note: auto here will expand to RegionAllocator::Region::UPtr. Feel free
107 // * to add your own using statement to lessen the C++ naming pain.
108 // */
109 // auto r1 = alloc.GetRegion({ 0xC0100000, 0x100000 }); // [3GB + 1MB, 3GB + 2MB)
110 // auto r2 = alloc.GetRegion({ 0x4000100000, 0x100000 }); // [256GB + 1MB, 256GB + 2MB)
111 //
112 // /* Grab some pointer aligned regions of various sizes */
113 // auto r3 = alloc.GetRegion(1024);
114 // auto r4 = alloc.GetRegion(75);
115 // auto r5 = alloc.GetRegion(80000);
116 //
117 // /* Grab some page aligned regions of various sizes */
118 // auto r6 = alloc.GetRegion(1024, 4 << 10);
119 // auto r7 = alloc.GetRegion(75, 4 << 10);
120 // auto r8 = alloc.GetRegion(80000, 4 << 10);
121 //
122 // /* Print some stuff about some of the allocations */
123 // ZX_DEBUG_ASSERT(r3 != nullptr);
124 // printf("R3 base %llx size %llx\n", r3->base, r3->size)
125 //
126 // ZX_DEBUG_ASSERT(r8 != nullptr);
127 // printf("R8 base %llx size %llx\n", r8->base, r8->size)
128 //
129 // /* No need to clean up. Regions will automatically be returned to the
130 // * allocator as they go out of scope. Then the allocator will return all of
131 // * its available regions to the pool when it goes out of scope. Finally, the
132 // * pool will free all of its memory as the allocator releases it reference
133 // * to the pool.
134 // */
135 //
136 __BEGIN_CDECLS
137
138 // C API
139
140 // C Version of RegionAllocator::RegionPool
141 // This type is opaque to users; 'struct ralloc_pool' is not actually
142 // defined anywhere, but this provides a distinct type for pointers.
143 typedef struct ralloc_pool ralloc_pool_t;
144
145 // C Version of RegionAllocator
146
147 // This type is opaque to users; 'struct ralloc_allocator' is not actually
148 // defined anywhere, but this provides a distinct type for pointers.
149 typedef struct ralloc_allocator ralloc_allocator_t;
150
151 // C Version of RegionAllocator::Region
152 typedef struct ralloc_region {
153 uint64_t base;
154 uint64_t size;
155 } ralloc_region_t;
156
157 // RegionAllocator::RegionPool interface. Valid operations are...
158 //
159 // ++ Create (specific memory limits at the time of creation)
160 // ++ Release (release the C reference to the object.
161 //
162 #define REGION_POOL_SLAB_SIZE (4u << 10)
163 zx_status_t ralloc_create_pool(size_t max_memory, ralloc_pool_t** out_pool);
164 void ralloc_release_pool(ralloc_pool_t* pool);
165
166 // RegionAllocator interface. Valid operations are...
167 //
168 // ++ Create
169 // ++ SetRegionPool
170 // ++ Reset (destroys all regions which are available for allocation and returns them to the pool)
171 // ++ Destroy
172 // ++ AddRegion (adds a region to the set of regions available for allocation)
173 // ++ SubtractRegion (subtracts a region from the set of regions available for allocation)
174 // ++ GetBySize (allocates a region based on size/alignment requirements)
175 // ++ GetSpecific (allocates a region based on specific base/size requirements)
176 //
177 zx_status_t ralloc_create_allocator(ralloc_allocator_t** out_allocator);
178 zx_status_t ralloc_set_region_pool(ralloc_allocator_t* allocator, ralloc_pool_t* pool);
179 void ralloc_reset_allocator(ralloc_allocator_t* allocator);
180 void ralloc_destroy_allocator(ralloc_allocator_t* allocator);
181 zx_status_t ralloc_add_region(ralloc_allocator_t* allocator,
182 const ralloc_region_t* region,
183 bool allow_overlap);
184 zx_status_t ralloc_sub_region(ralloc_allocator_t* allocator,
185 const ralloc_region_t* region,
186 bool allow_incomplete);
187
188 zx_status_t ralloc_get_sized_region_ex(
189 ralloc_allocator_t* allocator,
190 uint64_t size,
191 uint64_t alignment,
192 const ralloc_region_t** out_region);
193
194 zx_status_t ralloc_get_specific_region_ex(
195 ralloc_allocator_t* allocator,
196 const ralloc_region_t* requested_region,
197 const ralloc_region_t** out_region);
198
199 // Wrapper versions of the _ex functions for those who don't care about the
200 // specific reason for failure.
ralloc_get_sized_region(ralloc_allocator_t * allocator,uint64_t size,uint64_t alignment)201 static inline const ralloc_region_t* ralloc_get_sized_region(
202 ralloc_allocator_t* allocator,
203 uint64_t size,
204 uint64_t alignment) {
205 const ralloc_region_t* ret;
206 ralloc_get_sized_region_ex(allocator, size, alignment, &ret);
207 return ret;
208 }
209
ralloc_get_specific_region(ralloc_allocator_t * allocator,const ralloc_region_t * requested_region)210 static inline const ralloc_region_t* ralloc_get_specific_region(
211 ralloc_allocator_t* allocator,
212 const ralloc_region_t* requested_region) {
213 const ralloc_region_t* ret;
214 ralloc_get_specific_region_ex(allocator, requested_region, &ret);
215 return ret;
216 }
217
218 // Report the number of regions which are available for allocation, or which are
219 // currently allocated.
220 size_t ralloc_get_allocated_region_count(const ralloc_allocator_t* allocator);
221 size_t ralloc_get_available_region_count(const ralloc_allocator_t* allocator);
222
223 // Walk the allocated region list and call region_walk_cb for each region found
224 typedef bool (*region_walk_cb)(const ralloc_region_t*, void*);
225 zx_status_t ralloc_walk_allocated_regions(const ralloc_allocator_t*, region_walk_cb, void*);
226
227 // RegionAllocator::Region interface. In addition to the base/size members
228 // which may be used to determine the location of the allocation, valid
229 // operations are...
230 //
231 // Put (return an allocated region to its allocator).
232 //
233 void ralloc_put_region(const ralloc_region_t* region);
234
235 __END_CDECLS
236
237 #ifdef __cplusplus
238
239 #include <fbl/intrusive_single_list.h>
240 #include <fbl/intrusive_wavl_tree.h>
241 #include <fbl/ref_counted.h>
242 #include <fbl/ref_ptr.h>
243 #include <fbl/slab_allocator.h>
244
245 #ifdef _KERNEL
246 #include <ktl/unique_ptr.h>
247 #else
248 #include <fbl/unique_ptr.h>
249 #endif
250
251 #include <utility>
252
253 // C++ API
254 class RegionAllocator {
255 public:
256 class Region;
257 using RegionSlabTraits = fbl::ManualDeleteSlabAllocatorTraits<Region*, REGION_POOL_SLAB_SIZE>;
258
259 class Region : public ralloc_region_t,
260 public fbl::SlabAllocated<RegionSlabTraits> {
261 private:
262 struct RegionDeleter;
263
264 public:
265 #ifdef _KERNEL
266 using UPtr = ktl::unique_ptr<const Region, RegionDeleter>;
267 #else
268 using UPtr = std::unique_ptr<const Region, RegionDeleter>;
269 #endif
270
271 private:
272 using WAVLTreeNodeState = fbl::WAVLTreeNodeState<Region*>;
273 using KeyTraitsSortByBase = fbl::DefaultKeyedObjectTraits<uint64_t, Region>;
274
275 struct WAVLTreeNodeTraitsSortByBase {
node_stateWAVLTreeNodeTraitsSortByBase276 static WAVLTreeNodeState& node_state(Region& r) { return r.ns_tree_sort_by_base_; }
277 };
278
279 struct WAVLTreeNodeTraitsSortBySize {
node_stateWAVLTreeNodeTraitsSortBySize280 static WAVLTreeNodeState& node_state(Region& r) { return r.ns_tree_sort_by_size_; }
281 };
282
283 struct KeyTraitsSortBySize {
GetKeyKeyTraitsSortBySize284 static const ralloc_region_t& GetKey(const Region& r) { return r; }
285
LessThanKeyTraitsSortBySize286 static bool LessThan(const ralloc_region_t& k1, const ralloc_region_t& k2) {
287 return (k1.size < k2.size) || ((k1.size == k2.size) && (k1.base < k2.base));
288 }
289
EqualToKeyTraitsSortBySize290 static bool EqualTo(const ralloc_region_t& k1, const ralloc_region_t& k2) {
291 return (k1.size == k2.size) && (k1.base == k2.base);
292 }
293 };
294
295 // When a user's unique_ptr<> reference to this region goes out of
296 // scope, Don't actually delete the region. Instead, recycle it back
297 // into the set of available regions. The memory for the bookkeeping
298 // will eventually be deleted when it merges with another available
299 // region, or when the allocator finally shuts down.
300 struct RegionDeleter {
operatorRegionDeleter301 void operator()(const Region* region) const noexcept {
302 // Note: The external std::unique_ptrs that we hand out to our
303 // users are deliberately limited to being const Region*s. For
304 // our users, regions are read only constructs; they check them
305 // out from the allocator and can read the bookkeeping values,
306 // but not change them.
307 //
308 // When the region is finally returned to our pool via the
309 // deleter, the std::unique_ptr will hand it to the deleter
310 // class as a const pointer (it has no choice). We need to cast
311 // that const away here. When the region re-joins the pool, its
312 // bookkeeping information (which is logically owned by the
313 // pool) needs to be mutable in order to merge with other
314 // regions in the pool.
315 ZX_DEBUG_ASSERT(region->owner_ != nullptr);
316 Region* r = const_cast<Region*>(region);
317 r->owner_->ReleaseRegion(r);
318 }
319 };
320
321 using WAVLTreeSortByBase = fbl::WAVLTree<uint64_t, Region*,
322 KeyTraitsSortByBase,
323 WAVLTreeNodeTraitsSortByBase>;
324 using WAVLTreeSortBySize = fbl::WAVLTree<ralloc_region_t, Region*,
325 KeyTraitsSortBySize,
326 WAVLTreeNodeTraitsSortBySize>;
327
328 // Used by SortByBase key traits
GetKey()329 uint64_t GetKey() const { return base; }
330
331 // So many friends! I'm the most popular class in the build!!
332 friend class RegionAllocator;
333 friend class RegionPool;
334 friend KeyTraitsSortByBase;
335 friend struct KeyTraitsSortBySize;
336 friend struct WAVLTreeNodeTraitsSortByBase;
337 friend struct WAVLTreeNodeTraitsSortBySize;
338 friend class fbl::SlabAllocator<RegionSlabTraits>;
339
340 // Regions can only be placement new'ed by the RegionPool slab
341 // allocator. They cannot be copied, assigned, or deleted. Externally,
342 // they should only be handled by their unique_ptr<>s.
Region(RegionAllocator * owner)343 explicit Region(RegionAllocator* owner) : owner_(owner) { }
344 ~Region() = default;
345 DISALLOW_COPY_ASSIGN_AND_MOVE(Region);
346
347 RegionAllocator* owner_;
348 WAVLTreeNodeState ns_tree_sort_by_base_;
349 WAVLTreeNodeState ns_tree_sort_by_size_;
350 };
351
352 class RegionPool : public fbl::RefCounted<RegionPool>,
353 public fbl::SlabAllocator<RegionSlabTraits> {
354 public:
355 using RefPtr = fbl::RefPtr<RegionPool>;
356
357 static constexpr size_t SLAB_SIZE = RegionSlabTraits::SLAB_SIZE;
358
359 static RefPtr Create(size_t max_memory);
360
361 private:
362 // Only our RefPtr's are allowed to destroy us.
363 friend fbl::RefPtr<RegionPool>;
364
365 // Attempt to allocate at least one slab up front when we are created.
RegionPool(size_t num_slabs)366 explicit RegionPool(size_t num_slabs)
367 : fbl::SlabAllocator<RegionSlabTraits>(num_slabs, true) { }
368
~RegionPool()369 ~RegionPool() { }
370
371 // No one may copy, assign or move us.
372 DISALLOW_COPY_ASSIGN_AND_MOVE(RegionPool);
373 };
374
RegionAllocator()375 RegionAllocator() { }
RegionAllocator(const RegionPool::RefPtr & region_pool)376 explicit RegionAllocator(const RegionPool::RefPtr& region_pool)
377 : region_pool_(region_pool) { }
RegionAllocator(RegionPool::RefPtr && region_pool)378 explicit RegionAllocator(RegionPool::RefPtr&& region_pool)
379 : region_pool_(std::move(region_pool)) { }
380 RegionAllocator(const RegionAllocator& c) = delete;
381 RegionAllocator& operator=(const RegionAllocator& c) = delete;
382
383 ~RegionAllocator();
384
385 // Set the RegionPool this RegionAllocator will obtain bookkeeping structures from.
386 //
387 // Possible return values
388 // ++ ZX_ERR_BAD_STATE : The RegionAllocator currently has a RegionPool
389 // assigned and currently has allocations from this pool.
390 zx_status_t SetRegionPool(const RegionPool::RefPtr& region_pool) __TA_EXCLUDES(alloc_lock_);
SetRegionPool(RegionPool::RefPtr && region_pool)391 zx_status_t SetRegionPool(RegionPool::RefPtr&& region_pool) __TA_EXCLUDES(alloc_lock_) {
392 RegionPool::RefPtr ref(std::move(region_pool));
393 return SetRegionPool(ref);
394 }
395
396 // Reset allocator. Returns all available regions back to the RegionPool.
397 // Has no effect on currently allocated regions.
398 void Reset() __TA_EXCLUDES(alloc_lock_);
399
400 // Add a region to the set of allocatable regions.
401 //
402 // If allow_overlap is false, the added region may not overlap with any
403 // previously added region and will be rejected if it does. If
404 // allow_overlap is true, the added region will be union'ed with existing
405 // available regions, provided it does not intersect any currently allocated
406 // region.
407 //
408 // Possible return values
409 // ++ ZX_ERR_BAD_STATE : Allocator has no RegionPool assigned.
410 // ++ ZX_ERR_NO_MEMORY : not enough bookkeeping memory available in our
411 // assigned region pool to add the region.
412 // ++ ZX_ERR_INVALID_ARGS : One of the following conditions applies.
413 // ++++ The region is invalid (wraps the space, or size is zero)
414 // ++++ The region being added intersects one or more currently
415 // allocated regions.
416 // ++++ The region being added intersects one ore more of the currently
417 // available regions, and allow_overlap is false.
418 zx_status_t AddRegion(const ralloc_region_t& region, bool allow_overlap = false)
419 __TA_EXCLUDES(alloc_lock_);
420
421 // Subtract a region from the set of allocatable regions.
422 //
423 // If allow_incomplete is false, the subtracted region must exist entirely
424 // within the set of available regions. If allow_incomplete is true, the
425 // subracted region will remove any portion of any availble region it
426 // intersects with.
427 //
428 // Regardless of the value of the allow_incomplete flag, it is illegal to
429 // attempt to subtract a region which intersects any currently allocated
430 // region.
431 //
432 // Possible return values
433 // ++ ZX_ERR_BAD_STATE : Allocator has no RegionPool assigned.
434 // ++ ZX_ERR_NO_MEMORY : not enough bookkeeping memory available in our
435 // assigned region pool to subtract the region.
436 // ++ ZX_ERR_INVALID_ARGS : One of the following conditions applies.
437 // ++++ The region is invalid (wraps the space, or size is zero)
438 // ++++ The region being subtracted intersects one or more currently
439 // allocated regions.
440 // ++++ The region being subtracted intersects portions of the space which
441 // are absent from both the allocated and available sets, and
442 // allow_incomplete is false.
443 zx_status_t SubtractRegion(const ralloc_region_t& region, bool allow_incomplete = false)
444 __TA_EXCLUDES(alloc_lock_);
445
446 // Get a region out of the set of currently available regions which has a
447 // specified size and alignment. Note; the alignment must be a power of
448 // two. Pass 1 if alignment does not matter.
449 //
450 // Possible return values
451 // ++ ZX_ERR_BAD_STATE : Allocator has no RegionPool assigned.
452 // ++ ZX_ERR_NO_MEMORY : not enough bookkeeping memory available in our
453 // assigned region pool to perform the allocation.
454 // ++ ZX_ERR_INVALID_ARGS : size is zero, or alignment is not a power of two.
455 // ++ ZX_ERR_NOT_FOUND : No suitable region could be found in the set of
456 // currently available regions which can satisfy the request.
457 zx_status_t GetRegion(uint64_t size, uint64_t alignment, Region::UPtr& out_region)
458 __TA_EXCLUDES(alloc_lock_);
459
460 // Get a region with a specific location and size out of the set of
461 // currently available regions.
462 //
463 // Possible return values
464 // ++ ZX_ERR_BAD_STATE : Allocator has no RegionPool assigned.
465 // ++ ZX_ERR_NO_MEMORY : not enough bookkeeping memory available in our
466 // assigned region pool to perform the allocation.
467 // ++ ZX_ERR_INVALID_ARGS : The size of the requested region is zero.
468 // ++ ZX_ERR_NOT_FOUND : No suitable region could be found in the set of
469 // currently available regions which can satisfy the request.
470 zx_status_t GetRegion(const ralloc_region_t& requested_region, Region::UPtr& out_region)
471 __TA_EXCLUDES(alloc_lock_);;
472
473 // Helper which defaults the alignment of a size/alignment based allocation
474 // to pointer-aligned.
GetRegion(uint64_t size,Region::UPtr & out_region)475 zx_status_t GetRegion(uint64_t size, Region::UPtr& out_region) __TA_EXCLUDES(alloc_lock_) {
476 return GetRegion(size, sizeof(void*), out_region);
477 }
478
479 // Helper versions of the GetRegion methods for those who don't care
480 // about the specific reason for failure (nullptr will be returned on
481 // failure).
GetRegion(uint64_t size,uint64_t alignment)482 Region::UPtr GetRegion(uint64_t size, uint64_t alignment) __TA_EXCLUDES(alloc_lock_) {
483 Region::UPtr ret;
484 GetRegion(size, alignment, ret);
485 return ret;
486 }
487
GetRegion(uint64_t size)488 Region::UPtr GetRegion(uint64_t size) __TA_EXCLUDES(alloc_lock_) {
489 Region::UPtr ret;
490 GetRegion(size, ret);
491 return ret;
492 }
493
GetRegion(const ralloc_region_t & requested_region)494 Region::UPtr GetRegion(const ralloc_region_t& requested_region) __TA_EXCLUDES(alloc_lock_) {
495 Region::UPtr ret;
496 GetRegion(requested_region, ret);
497 return ret;
498 }
499
AllocatedRegionCount()500 size_t AllocatedRegionCount() const __TA_EXCLUDES(alloc_lock_) {
501 fbl::AutoLock alloc_lock(&alloc_lock_);
502 return allocated_regions_by_base_.size();
503 }
504
AvailableRegionCount()505 size_t AvailableRegionCount() const __TA_EXCLUDES(alloc_lock_) {
506 fbl::AutoLock alloc_lock(&alloc_lock_);
507 return avail_regions_by_base_.size();
508 }
509
510 // Walk the allocated regions and call the user provided callback for each
511 // entry. Stop when out of entries or the callback returns false.
512 //
513 // *** It is absolutely required that the user callback not call into any other
514 // RegionAllocator public APIs, and should likely not acquire any locks of any
515 // kind. This method cannot protect against deadlocks and lock inversions that
516 // are possible by acquiring the allocation lock before calling the user provided
517 // callback.
518 template<typename WalkCallback>
WalkAllocatedRegions(WalkCallback && cb)519 void WalkAllocatedRegions(WalkCallback&& cb) const
520 __TA_EXCLUDES(alloc_lock_) {
521 fbl::AutoLock alloc_lock(&alloc_lock_);
522 for (const auto& region : allocated_regions_by_base_) {
523 if (!std::forward<WalkCallback>(cb)(®ion)) {
524 break;
525 }
526 }
527 }
528
529 private:
530 zx_status_t AddSubtractSanityCheckLocked(const ralloc_region_t& region)
531 __TA_REQUIRES(alloc_lock_);
532 void ReleaseRegion(Region* region) __TA_EXCLUDES(alloc_lock_);
533 void AddRegionToAvailLocked(Region* region, bool allow_overlap = false)
534 __TA_REQUIRES(alloc_lock_);
535
536 zx_status_t AllocFromAvailLocked(Region::WAVLTreeSortBySize::iterator source,
537 Region::UPtr& out_region,
538 uint64_t base,
539 uint64_t size) __TA_REQUIRES(alloc_lock_);
540
541 bool IntersectsLocked(const Region::WAVLTreeSortByBase& tree,
542 const ralloc_region_t& region) __TA_REQUIRES(alloc_lock_);
543
544 /* Locking notes:
545 *
546 * alloc_lock_ protects all of the bookkeeping members of the
547 * RegionAllocator. This includes the allocated index, the available
548 * indices (by base and by size) and the region pool.
549 *
550 * The alloc_lock_ may be held while calling into a RegionAllocator's
551 * assigned RegionPool, but code from the RegionPool will never call into
552 * the RegionAllocator.
553 */
554 mutable fbl::Mutex alloc_lock_;
555 Region::WAVLTreeSortByBase allocated_regions_by_base_ __TA_GUARDED(alloc_lock_);
556 Region::WAVLTreeSortByBase avail_regions_by_base_ __TA_GUARDED(alloc_lock_);
557 Region::WAVLTreeSortBySize avail_regions_by_size_ __TA_GUARDED(alloc_lock_);
558 RegionPool::RefPtr region_pool_ __TA_GUARDED(alloc_lock_);
559 };
560
561 // If this is C++, clear out this pre-processor constant. People can get to the
562 // constant using more C++-ish methods (like RegionAllocator::RegionPool::SLAB_SIZE)
563 #undef REGION_POOL_SLAB_SIZE
564
565 #endif // ifdef __cplusplus
566