1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6 
7 #include <fbl/arena.h>
8 
9 #include <assert.h>
10 #include <err.h>
11 #include <new>
12 #include <stdio.h>
13 #include <string.h>
14 #include <trace.h>
15 
16 #include <vm/vm.h>
17 #include <vm/vm_aspace.h>
18 #include <vm/vm_object_paged.h>
19 #include <fbl/auto_call.h>
20 
21 #define LOCAL_TRACE 0
22 
23 namespace fbl {
24 
~Arena()25 Arena::~Arena() {
26     free_.clear();
27     if (vmar_ != nullptr) {
28         // Unmap all of our memory.
29         // The VMAR's parent holds a ref, so it won't be destroyed
30         // automatically when we're destroyed.
31         vmar_->Destroy();
32         vmar_.reset();
33     }
34 }
35 
Init(const char * name,size_t ob_size,size_t count)36 zx_status_t Arena::Init(const char* name, size_t ob_size, size_t count) {
37     if ((ob_size == 0) || (ob_size > PAGE_SIZE))
38         return ZX_ERR_INVALID_ARGS;
39     if (!count)
40         return ZX_ERR_INVALID_ARGS;
41     LTRACEF("Arena '%s': ob_size %zu, count %zu\n", name, ob_size, count);
42 
43     // Carve out the memory:
44     // - Kernel root VMAR
45     //   + Sub VMAR
46     //     + Control pool mapping
47     //     + Unmapped guard page
48     //     + Data pool mapping
49     // Both mappings are backed by a single VMO.
50     const size_t control_mem_sz = ROUNDUP(count * sizeof(Node), PAGE_SIZE);
51     const size_t data_mem_sz = ROUNDUP(count * ob_size, PAGE_SIZE);
52     const size_t guard_sz = PAGE_SIZE;
53     const size_t vmo_sz = control_mem_sz + data_mem_sz;
54     const size_t vmar_sz = vmo_sz + guard_sz;
55 
56     // Create the VMO.
57     fbl::RefPtr<VmObject> vmo;
58     zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, vmo_sz, &vmo);
59     if (status != ZX_OK) {
60         LTRACEF("Arena '%s': can't create %zu-byte VMO\n", name, vmo_sz);
61         return status;
62     }
63 
64     auto kspace = VmAspace::kernel_aspace();
65     DEBUG_ASSERT(kspace != nullptr);
66     auto root_vmar = kspace->RootVmar();
67     DEBUG_ASSERT(root_vmar != nullptr);
68 
69     char vname[32];
70     snprintf(vname, sizeof(vname), "arena:%s", name);
71     vmo->set_name(vname, sizeof(vname));
72 
73     // Create the VMAR.
74     fbl::RefPtr<VmAddressRegion> vmar;
75     zx_status_t st = root_vmar->CreateSubVmar(0, // offset (ignored)
76                                               vmar_sz,
77                                               false, // align_pow2
78                                               VMAR_FLAG_CAN_MAP_READ |
79                                                   VMAR_FLAG_CAN_MAP_WRITE |
80                                                   VMAR_FLAG_CAN_MAP_SPECIFIC,
81                                               vname, &vmar);
82     if (st != ZX_OK || vmar == nullptr) {
83         LTRACEF("Arena '%s': can't create %zu-byte VMAR (%d)\n",
84                 name, vmar_sz, st);
85         return ZX_ERR_NO_MEMORY;
86     }
87     // The VMAR's parent holds a ref, so it won't be destroyed
88     // automatically when we return.
89     auto destroy_vmar = fbl::MakeAutoCall([&vmar]() { vmar->Destroy(); });
90 
91     // Create a mapping for the control pool.
92     fbl::RefPtr<VmMapping> control_mapping;
93     st = vmar->CreateVmMapping(0, // mapping_offset
94                                control_mem_sz,
95                                false, // align_pow2
96                                VMAR_FLAG_SPECIFIC,
97                                vmo,
98                                0, // vmo_offset
99                                ARCH_MMU_FLAG_PERM_READ |
100                                    ARCH_MMU_FLAG_PERM_WRITE,
101                                "control", &control_mapping);
102     if (st != ZX_OK || control_mapping == nullptr) {
103         LTRACEF("Arena '%s': can't create %zu-byte control mapping (%d)\n",
104                 name, control_mem_sz, st);
105         return ZX_ERR_NO_MEMORY;
106     }
107 
108     // Create a mapping for the data pool, leaving an unmapped gap
109     // between it and the control pool.
110     fbl::RefPtr<VmMapping> data_mapping;
111     st = vmar->CreateVmMapping(control_mem_sz + guard_sz, // mapping_offset
112                                data_mem_sz,
113                                false, // align_pow2
114                                VMAR_FLAG_SPECIFIC,
115                                vmo,
116                                control_mem_sz, // vmo_offset
117                                ARCH_MMU_FLAG_PERM_READ |
118                                    ARCH_MMU_FLAG_PERM_WRITE,
119                                "data", &data_mapping);
120     if (st != ZX_OK || data_mapping == nullptr) {
121         LTRACEF("Arena '%s': can't create %zu-byte data mapping (%d)\n",
122                 name, data_mem_sz, st);
123         return ZX_ERR_NO_MEMORY;
124     }
125 
126     // TODO(dbort): Add a VmMapping flag that says "do not demand page",
127     // requiring and ensuring that we commit our pages manually.
128 
129     control_.Init("control", control_mapping, sizeof(Node));
130     data_.Init("data", data_mapping, ob_size);
131 
132     count_ = 0u;
133 
134     vmar_ = vmar;
135     destroy_vmar.cancel();
136 
137     if (LOCAL_TRACE) {
138         Dump();
139     }
140     return ZX_OK;
141 }
142 
Init(const char * name,fbl::RefPtr<VmMapping> mapping,size_t slot_size)143 void Arena::Pool::Init(const char* name, fbl::RefPtr<VmMapping> mapping,
144                        size_t slot_size) {
145     DEBUG_ASSERT(mapping != nullptr);
146     DEBUG_ASSERT(slot_size > 0);
147 
148     strlcpy(const_cast<char*>(name_), name, sizeof(name_));
149     slot_size_ = slot_size;
150     mapping_ = mapping;
151     committed_max_ = committed_ = top_ = start_ =
152         reinterpret_cast<char*>(mapping_->base());
153     end_ = start_ + mapping_->size();
154 
155     DEBUG_ASSERT(IS_PAGE_ALIGNED(start_));
156     DEBUG_ASSERT(IS_PAGE_ALIGNED(end_));
157 }
158 
159 // Pick values that avoid lots of commits + decommits when
160 // right on the edge.
161 static const size_t kPoolCommitIncrease = 4 * PAGE_SIZE;
162 static const size_t kPoolDecommitThreshold = 8 * PAGE_SIZE;
163 static_assert(kPoolCommitIncrease < kPoolDecommitThreshold, "");
164 
Pop()165 void* Arena::Pool::Pop() {
166     if (static_cast<size_t>(end_ - top_) < slot_size_) {
167         LTRACEF("%s: no room\n", name_);
168         return nullptr;
169     }
170     if (top_ + slot_size_ > committed_) {
171         // We've hit the end of our committed pages; commit some more.
172         char* nc = committed_ + kPoolCommitIncrease;
173         if (nc > end_) {
174             nc = end_;
175         }
176         LTRACEF("%s: commit 0x%p..0x%p\n", name_, committed_, nc);
177         const size_t offset =
178             reinterpret_cast<vaddr_t>(committed_) - mapping_->base();
179         const size_t len = nc - committed_;
180         zx_status_t st = mapping_->MapRange(offset, len, /* commit */ true);
181         if (st != ZX_OK) {
182             LTRACEF("%s: can't map range 0x%p..0x%p: %d\n",
183                     name_, committed_, nc, st);
184             // Try to clean up any committed pages, but don't require
185             // that it succeeds.
186             mapping_->DecommitRange(offset, len);
187             return nullptr;
188         }
189         committed_ = nc;
190         if (committed_ > committed_max_) {
191             committed_max_ = committed_;
192         }
193     }
194     char* slot = top_;
195     top_ += slot_size_;
196     return slot;
197 }
198 
Push(void * p)199 void Arena::Pool::Push(void* p) {
200     // Can only push the most-recently-popped slot.
201     ASSERT(reinterpret_cast<char*>(p) + slot_size_ == top_);
202     top_ -= slot_size_;
203     if (static_cast<size_t>(committed_ - top_) >= kPoolDecommitThreshold) {
204         char* nc = reinterpret_cast<char*>(
205             ROUNDUP(reinterpret_cast<uintptr_t>(top_ + kPoolCommitIncrease),
206                     PAGE_SIZE));
207         if (nc > end_) {
208             nc = end_;
209         }
210         if (nc >= committed_) {
211             return;
212         }
213         LTRACEF("%s: decommit 0x%p..0x%p\n", name_, nc, committed_);
214         const size_t offset = reinterpret_cast<vaddr_t>(nc) - mapping_->base();
215         const size_t len = committed_ - nc;
216         // If this fails or decommits less than we asked for, oh well.
217         mapping_->DecommitRange(offset, len);
218         committed_ = nc;
219     }
220 }
221 
Dump() const222 void Arena::Pool::Dump() const {
223     printf("  pool '%s' slot size %zu, %zu pages committed:\n",
224            name_, slot_size_, mapping_->AllocatedPages());
225     printf("  |     start 0x%p\n", start_);
226     size_t nslots = static_cast<size_t>(top_ - start_) / slot_size_;
227     printf("  |       top 0x%p (%zu slots popped)\n", top_, nslots);
228     const size_t np = static_cast<size_t>(committed_ - start_) / PAGE_SIZE;
229     const size_t npmax = static_cast<size_t>(committed_max_ - start_) / PAGE_SIZE;
230     printf("  | committed 0x%p (%zu pages; %zu pages max)\n",
231            committed_, np, npmax);
232     nslots = static_cast<size_t>(end_ - start_) / slot_size_;
233     printf("  \\       end 0x%p (%zu slots total)\n", end_, nslots);
234 }
235 
Alloc()236 void* Arena::Alloc() {
237     DEBUG_ASSERT(vmar_ != nullptr);
238     // Prefers to return the most-recently-freed slot in the hopes that
239     // it is still hot in the cache.
240     void* allocation;
241     if (!free_.is_empty()) {
242         // TODO(dbort): Replace this linked list with a stack of offsets into
243         // the data pool; simpler, and uses less memory.
244         Node* node = free_.pop_front();
245         auto slot = node->slot;
246         control_.Push(node);
247         allocation = slot;
248     } else {
249         allocation = data_.Pop();
250     }
251     if (allocation != nullptr) {
252         ++count_;
253     }
254     return allocation;
255 }
256 
Free(void * addr)257 void Arena::Free(void* addr) {
258     DEBUG_ASSERT(vmar_ != nullptr);
259     if (addr == nullptr) {
260         return;
261     }
262     --count_;
263     DEBUG_ASSERT(data_.InRange(addr));
264     Node* node = new (reinterpret_cast<void*>(control_.Pop())) Node{addr};
265     free_.push_front(node);
266 }
267 
Dump() const268 void Arena::Dump() const {
269     DEBUG_ASSERT(vmar_ != nullptr);
270     printf("%s mappings:\n", vmar_->name());
271     // Not completely safe, since we don't hold the VMAR's aspace
272     // lock, but we're the only ones who know about the mappings
273     // and we're not modifying anything.
274     vmar_->Dump(/* depth */ 1, /* verbose */ true);
275     printf("%s pools:\n", vmar_->name());
276     control_.Dump();
277     data_.Dump();
278     printf("%s free list: %zu nodes\n", vmar_->name(), free_.size_slow());
279 }
280 
281 } // namespace fbl
282