1 // Copyright 2016 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <zircon/listnode.h> // for containerof
6 #include <region-alloc/region-alloc.h>
7
8 extern "C" {
9
ralloc_create_pool(size_t max_memory,ralloc_pool_t ** out_pool)10 zx_status_t ralloc_create_pool(size_t max_memory, ralloc_pool_t** out_pool) {
11 if (out_pool == nullptr)
12 return ZX_ERR_INVALID_ARGS;
13
14 auto pool = RegionAllocator::RegionPool::Create(max_memory);
15 if (pool == nullptr)
16 return ZX_ERR_NO_MEMORY;
17
18 // Everything looks good. Deliberately leak our reference out into the cold
19 // cruel world of C. I sure hope that it comes back some day...
20 *out_pool = reinterpret_cast<ralloc_pool_t*>(pool.leak_ref());
21
22 return ZX_OK;
23 }
24
ralloc_release_pool(ralloc_pool_t * pool)25 void ralloc_release_pool(ralloc_pool_t* pool) {
26 ZX_DEBUG_ASSERT(pool != nullptr);
27
28 // Reclaim our reference back from the land of C by turning the pointer
29 // back into a RefPtr, then deliberately let it go out of scope, dropping
30 // its reference and destructing the RegionPool if need be.
31 auto release_me = fbl::internal::MakeRefPtrNoAdopt(
32 reinterpret_cast<RegionAllocator::RegionPool*>(pool));
33 }
34
ralloc_create_allocator(ralloc_allocator_t ** out_allocator)35 zx_status_t ralloc_create_allocator(ralloc_allocator_t** out_allocator) {
36 if (!out_allocator)
37 return ZX_ERR_INVALID_ARGS;
38
39 void* mem = ::malloc(sizeof(RegionAllocator));
40 if (!mem)
41 return ZX_ERR_NO_MEMORY;
42
43 *out_allocator = reinterpret_cast<ralloc_allocator_t*>(new (mem) RegionAllocator());
44 return ZX_OK;
45 }
46
ralloc_set_region_pool(ralloc_allocator_t * allocator,ralloc_pool * pool)47 zx_status_t ralloc_set_region_pool(ralloc_allocator_t* allocator, ralloc_pool* pool) {
48 if (!allocator || !pool)
49 return ZX_ERR_INVALID_ARGS;
50
51 RegionAllocator& alloc = *(reinterpret_cast<RegionAllocator*>(allocator));
52
53 // Turn our C-style pointer back into a RefPtr<> without adding a reference,
54 // then use it to call the RegionAllocator::SetRegionPool method. Finally,
55 // deliberately leak the reference again so we are not accidentally removing
56 // the unmanaged reference held by our C user.
57 auto pool_ref = fbl::internal::MakeRefPtrNoAdopt(
58 reinterpret_cast<RegionAllocator::RegionPool*>(pool));
59 zx_status_t ret = alloc.SetRegionPool(pool_ref);
60 __UNUSED auto leak = pool_ref.leak_ref();
61
62 return ret;
63 }
64
ralloc_reset_allocator(ralloc_allocator_t * allocator)65 void ralloc_reset_allocator(ralloc_allocator_t* allocator) {
66 ZX_DEBUG_ASSERT(allocator);
67 reinterpret_cast<RegionAllocator*>(allocator)->Reset();
68 }
69
ralloc_destroy_allocator(ralloc_allocator_t * allocator)70 void ralloc_destroy_allocator(ralloc_allocator_t* allocator) {
71 ZX_DEBUG_ASSERT(allocator);
72
73 RegionAllocator* alloc = reinterpret_cast<RegionAllocator*>(allocator);
74 alloc->~RegionAllocator();
75 ::free(alloc);
76 }
77
ralloc_add_region(ralloc_allocator_t * allocator,const ralloc_region_t * region,bool allow_overlap)78 zx_status_t ralloc_add_region(ralloc_allocator_t* allocator,
79 const ralloc_region_t* region,
80 bool allow_overlap) {
81 if (!allocator || !region)
82 return ZX_ERR_INVALID_ARGS;
83
84 return reinterpret_cast<RegionAllocator*>(allocator)->AddRegion(*region, allow_overlap);
85 }
86
ralloc_sub_region(ralloc_allocator_t * allocator,const ralloc_region_t * region,bool allow_incomplete)87 zx_status_t ralloc_sub_region(ralloc_allocator_t* allocator,
88 const ralloc_region_t* region,
89 bool allow_incomplete) {
90 if (!allocator || !region)
91 return ZX_ERR_INVALID_ARGS;
92
93 return reinterpret_cast<RegionAllocator*>(allocator)->SubtractRegion(*region, allow_incomplete);
94 }
95
ralloc_get_sized_region_ex(ralloc_allocator_t * allocator,uint64_t size,uint64_t alignment,const ralloc_region_t ** out_region)96 zx_status_t ralloc_get_sized_region_ex(ralloc_allocator_t* allocator,
97 uint64_t size,
98 uint64_t alignment,
99 const ralloc_region_t** out_region) {
100 if (!allocator || !out_region)
101 return ZX_ERR_INVALID_ARGS;
102
103 RegionAllocator::Region::UPtr managed_region;
104 RegionAllocator& alloc = *(reinterpret_cast<RegionAllocator*>(allocator));
105 zx_status_t result = alloc.GetRegion(size, alignment, managed_region);
106
107 if (result == ZX_OK) {
108 // Everything looks good. Detach the managed_region our unique_ptr<>
109 // and send the unmanaged pointer to the inner ralloc_region_t back
110 // to the caller.
111 ZX_DEBUG_ASSERT(managed_region != nullptr);
112 const RegionAllocator::Region* raw_region = managed_region.release();
113 *out_region = static_cast<const ralloc_region_t*>(raw_region);
114 } else {
115 ZX_DEBUG_ASSERT(managed_region == nullptr);
116 *out_region = nullptr;
117 }
118
119 return result;
120 }
121
ralloc_get_specific_region_ex(ralloc_allocator_t * allocator,const ralloc_region_t * requested_region,const ralloc_region_t ** out_region)122 zx_status_t ralloc_get_specific_region_ex(
123 ralloc_allocator_t* allocator,
124 const ralloc_region_t* requested_region,
125 const ralloc_region_t** out_region) {
126 if (!allocator || !requested_region || !out_region)
127 return ZX_ERR_INVALID_ARGS;
128
129 RegionAllocator::Region::UPtr managed_region;
130 RegionAllocator& alloc = *(reinterpret_cast<RegionAllocator*>(allocator));
131 zx_status_t result = alloc.GetRegion(*requested_region, managed_region);
132
133 if (result == ZX_OK) {
134 // Everything looks good. Detach the managed_region our unique_ptr<>
135 // and send the unmanaged pointer to the inner ralloc_region_t back
136 // to the caller.
137 ZX_DEBUG_ASSERT(managed_region != nullptr);
138 const RegionAllocator::Region* raw_region = managed_region.release();
139 *out_region = static_cast<const ralloc_region_t*>(raw_region);
140 } else {
141 ZX_DEBUG_ASSERT(managed_region == nullptr);
142 *out_region = nullptr;
143 }
144
145 return result;
146 }
147
ralloc_get_allocated_region_count(const ralloc_allocator_t * allocator)148 size_t ralloc_get_allocated_region_count(const ralloc_allocator_t* allocator) {
149 ZX_DEBUG_ASSERT(allocator != nullptr);
150 const RegionAllocator& alloc = *(reinterpret_cast<const RegionAllocator*>(allocator));
151 return alloc.AllocatedRegionCount();
152 }
153
ralloc_get_available_region_count(const ralloc_allocator_t * allocator)154 size_t ralloc_get_available_region_count(const ralloc_allocator_t* allocator) {
155 ZX_DEBUG_ASSERT(allocator != nullptr);
156 const RegionAllocator& alloc = *(reinterpret_cast<const RegionAllocator*>(allocator));
157 return alloc.AvailableRegionCount();
158 }
159
ralloc_put_region(const ralloc_region_t * region)160 void ralloc_put_region(const ralloc_region_t* region) {
161 ZX_DEBUG_ASSERT(region);
162
163 // Reclaim our reference back from the land of C by turning the pointer
164 // back into a unique_ptr, then deliberately let it go out of scope, destroying the
165 // RegionAllocator::Region in the process..
166 auto raw_region = static_cast<const RegionAllocator::Region*>(region);
167 RegionAllocator::Region::UPtr release_me(raw_region);
168 }
169
ralloc_walk_allocated_regions(const ralloc_allocator_t * allocator,region_walk_cb cb,void * ctx)170 zx_status_t ralloc_walk_allocated_regions(const ralloc_allocator_t* allocator,
171 region_walk_cb cb,
172 void* ctx) {
173 ZX_DEBUG_ASSERT(allocator != nullptr);
174 if (cb == NULL) {
175 return ZX_ERR_INVALID_ARGS;
176 }
177
178 const RegionAllocator& alloc = *(reinterpret_cast<const RegionAllocator*>(allocator));
179 alloc.WalkAllocatedRegions([cb, ctx](const ralloc_region_t* r) -> bool {
180 return cb(r, ctx);
181 });
182
183 return ZX_OK;
184 }
185
186 } // extern "C"
187