1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6 
7 #include <fbl/arena.h>
8 
9 #include <fbl/alloc_checker.h>
10 #include <lib/unittest/unittest.h>
11 #include <vm/vm_aspace.h>
12 
13 using fbl::Arena;
14 
15 struct TestObj {
16     int xx, yy, zz;
17 };
18 
init_null_name_succeeds()19 static bool init_null_name_succeeds() {
20     BEGIN_TEST;
21     Arena arena;
22     EXPECT_EQ(ZX_OK, arena.Init(nullptr, sizeof(TestObj), 16), "");
23     END_TEST;
24 }
25 
init_zero_ob_size_fails()26 static bool init_zero_ob_size_fails() {
27     BEGIN_TEST;
28     Arena arena;
29     EXPECT_EQ(ZX_ERR_INVALID_ARGS, arena.Init("name", 0, 16), "");
30     END_TEST;
31 }
32 
init_large_ob_size_fails()33 static bool init_large_ob_size_fails() {
34     BEGIN_TEST;
35     Arena arena;
36     EXPECT_EQ(ZX_ERR_INVALID_ARGS, arena.Init("name", PAGE_SIZE + 1, 16), "");
37     END_TEST;
38 }
39 
init_zero_count_fails()40 static bool init_zero_count_fails() {
41     BEGIN_TEST;
42     Arena arena;
43     EXPECT_EQ(ZX_ERR_INVALID_ARGS, arena.Init("name", sizeof(TestObj), 0), "");
44     END_TEST;
45 }
46 
start_and_end_look_good()47 static bool start_and_end_look_good() {
48     BEGIN_TEST;
49     static const size_t num_slots = (2 * PAGE_SIZE) / sizeof(TestObj);
50     static const size_t expected_size = num_slots * sizeof(TestObj);
51 
52     Arena arena;
53     EXPECT_EQ(ZX_OK, arena.Init("name", sizeof(TestObj), num_slots), "");
54 
55     EXPECT_NONNULL(arena.start(), "");
56     EXPECT_NONNULL(arena.end(), "");
57     auto start = reinterpret_cast<vaddr_t>(arena.start());
58     auto end = reinterpret_cast<vaddr_t>(arena.end());
59     EXPECT_LT(start, end, "");
60     EXPECT_GE(end - start, expected_size, "");
61     END_TEST;
62 }
63 
in_range_tests()64 static bool in_range_tests() {
65     BEGIN_TEST;
66     static const size_t num_slots = (2 * PAGE_SIZE) / sizeof(TestObj);
67 
68     Arena arena;
69     EXPECT_EQ(ZX_OK, arena.Init("name", sizeof(TestObj), num_slots), "");
70 
71     auto start = reinterpret_cast<char*>(arena.start());
72 
73     // Nothing is allocated yet, so not even the start address
74     // should be in range.
75     EXPECT_FALSE(arena.in_range(start), "");
76 
77     // Allocate some objects, and check that each is within range.
78     static const int nobjs = 16;
79     void* objs[nobjs];
80     for (int i = 0; i < nobjs; i++) {
81         char msg[32];
82         snprintf(msg, sizeof(msg), "[%d]", i);
83         objs[i] = arena.Alloc();
84         EXPECT_NONNULL(objs[i], msg);
85         // The allocated object should be in range.
86         EXPECT_TRUE(arena.in_range(objs[i]), msg);
87         // The slot just after this object should not be in range.
88         // FRAGILE: assumes that objects are allocated in increasing order.
89         EXPECT_FALSE(
90             arena.in_range(reinterpret_cast<TestObj*>(objs[i]) + 1), msg);
91         // The count should correspond to the number of times we have
92         // allocated.
93         EXPECT_EQ(static_cast<size_t>(i + 1), arena.DiagnosticCount(), msg);
94     }
95 
96     // Deallocate the objects and check whether they're in range.
97     for (int i = nobjs - 1; i >= 0; i--) {
98         char msg[32];
99         snprintf(msg, sizeof(msg), "[%d]", i);
100         // The object should still be in range.
101         EXPECT_TRUE(arena.in_range(objs[i]), msg);
102 
103         arena.Free(objs[i]);
104 
105         // The free slot will still be in range.
106         // NOTE: If Arena ever learns to coalesce and decommit whole pages of
107         // free objects, this test will need to change.
108         EXPECT_TRUE(arena.in_range(objs[i]), msg);
109 
110         // The count should correspond to the number of times we have
111         // deallocated.
112         EXPECT_EQ(static_cast<size_t>(i), arena.DiagnosticCount(), msg);
113     }
114     END_TEST;
115 }
116 
out_of_memory()117 static bool out_of_memory() {
118     BEGIN_TEST;
119     static const size_t num_slots = (2 * PAGE_SIZE) / sizeof(TestObj);
120 
121     Arena arena;
122     EXPECT_EQ(ZX_OK, arena.Init("name", sizeof(TestObj), num_slots), "");
123 
124     // Allocate all of the data objects.
125     fbl::AllocChecker ac;
126     ktl::unique_ptr<void*[]> objs = ktl::unique_ptr<void*[]>(new (&ac) void*[num_slots]);
127     EXPECT_TRUE(ac.check(), "");
128     void** top = &objs[0];
129     for (size_t i = 0; i < num_slots; i++) {
130         char msg[32];
131         snprintf(msg, sizeof(msg), "[%zu]", i);
132         *top++ = arena.Alloc();
133         EXPECT_NONNULL(top[-1], msg);
134     }
135 
136     // Any further allocations should return nullptr.
137     EXPECT_NULL(arena.Alloc(), "");
138     EXPECT_NULL(arena.Alloc(), "");
139     EXPECT_NULL(arena.Alloc(), "");
140     EXPECT_NULL(arena.Alloc(), "");
141 
142     // Free two objects.
143     arena.Free(*--top);
144     arena.Free(*--top);
145 
146     // Two allocations should succeed; any further should fail.
147     *top++ = arena.Alloc();
148     EXPECT_NONNULL(top[-1], "");
149     *top++ = arena.Alloc();
150     EXPECT_NONNULL(top[-1], "");
151     EXPECT_NULL(arena.Alloc(), "");
152     EXPECT_NULL(arena.Alloc(), "");
153 
154     // Free all objects.
155     // Nothing much to check except that it doesn't crash.
156     while (top > objs.get()) {
157         arena.Free(*--top);
158     }
159 
160     END_TEST;
161 }
162 
163 // Test helper. Counts the number of committed and uncommitted pages in the
164 // range. Returns {*committed, *uncommitted} = {0, 0} if |start| doesn't
165 // correspond to a live VmMapping.
count_committed_pages(vaddr_t start,vaddr_t end,size_t * committed,size_t * uncommitted)166 static bool count_committed_pages(
167     vaddr_t start, vaddr_t end, size_t* committed, size_t* uncommitted) {
168     BEGIN_TEST; // Not a test, but we need these guards to use ASSERT_*
169     *committed = 0;
170     *uncommitted = 0;
171 
172     // Find the VmMapping that covers |start|. Assume that it covers |end-1|.
173     const auto region = VmAspace::kernel_aspace()->FindRegion(start);
174     ASSERT_NONNULL(region, "FindRegion");
175     const auto mapping = region->as_vm_mapping();
176     if (mapping == nullptr) {
177         // It's a VMAR, not a mapping, so no pages are committed.
178         // Return 0/0.
179         return true;
180     }
181 
182     // Ask the VMO how many pages it's allocated within the range.
183     auto start_off = ROUNDDOWN(start, PAGE_SIZE) - mapping->base();
184     auto end_off = ROUNDUP(end, PAGE_SIZE) - mapping->base();
185     *committed = mapping->vmo()->AllocatedPagesInRange(
186         start_off + mapping->object_offset(), end_off - start_off);
187     *uncommitted = (end_off - start_off) / PAGE_SIZE - *committed;
188     END_TEST;
189 }
190 
committing_tests()191 static bool committing_tests() {
192     BEGIN_TEST;
193     static const size_t num_slots = (64 * PAGE_SIZE) / sizeof(TestObj);
194 
195     Arena arena;
196     EXPECT_EQ(ZX_OK, arena.Init("name", sizeof(TestObj), num_slots), "");
197 
198     auto start = reinterpret_cast<vaddr_t>(arena.start());
199     auto end = reinterpret_cast<vaddr_t>(arena.end());
200 
201     // Nothing is allocated yet, so no pages should be committed.
202     size_t committed;
203     size_t uncommitted;
204     EXPECT_TRUE(
205         count_committed_pages(start, end, &committed, &uncommitted), "");
206     EXPECT_EQ(0u, committed, "");
207     EXPECT_GT(uncommitted, 0u, "");
208 
209     // Allocate an object.
210     auto obj = reinterpret_cast<vaddr_t>(arena.Alloc());
211     EXPECT_NE(0u, obj, "");
212     size_t atotal = sizeof(TestObj);
213 
214     // The page containing the object should be committed.
215     auto ps = ROUNDDOWN(obj, PAGE_SIZE);
216     auto pe = ROUNDUP(obj + sizeof(TestObj), PAGE_SIZE);
217     EXPECT_TRUE(count_committed_pages(ps, pe, &committed, &uncommitted), "");
218     EXPECT_GT(committed, 0u, "");
219     EXPECT_EQ(0u, uncommitted, "");
220 
221     // The first handful of pages should also become committed, but the rest
222     // should stay ucommitted.
223     EXPECT_TRUE(
224         count_committed_pages(start, end, &committed, &uncommitted), "");
225     EXPECT_GT(committed, 0u, "");
226     EXPECT_GT(uncommitted, 0u, "");
227 
228     // Fill the committed pages with objects; the set of committed pages
229     // shouldn't change.
230     auto orig_committed = committed;
231     auto orig_uncommitted = uncommitted;
232     while (atotal + sizeof(TestObj) <= orig_committed * PAGE_SIZE) {
233         EXPECT_NONNULL(arena.Alloc(), "");
234         atotal += sizeof(TestObj);
235     }
236     EXPECT_TRUE(
237         count_committed_pages(start, end, &committed, &uncommitted), "");
238     EXPECT_EQ(orig_committed, committed, "");
239     EXPECT_EQ(orig_uncommitted, uncommitted, "");
240 
241     // Allocating one more object should cause more pages to be committed.
242     EXPECT_NONNULL(arena.Alloc(), "");
243     atotal += sizeof(TestObj);
244     EXPECT_TRUE(
245         count_committed_pages(start, end, &committed, &uncommitted), "");
246     EXPECT_LT(orig_committed, committed, "");
247     EXPECT_GT(orig_uncommitted, uncommitted, "");
248 
249     // TODO(dbort): Test uncommitting if Arena ever learns to decommit pages.
250     END_TEST;
251 }
252 
253 // Friend class that can see inside an Arena.
254 namespace fbl {
255 class ArenaTestFriend {
256 public:
ArenaTestFriend(const Arena & arena)257     ArenaTestFriend(const Arena& arena)
258         : arena_(arena) {}
259 
control_slot_size()260     constexpr static size_t control_slot_size() {
261         return sizeof(Arena::Node);
262     }
263 
control_start() const264     vaddr_t control_start() const {
265         return reinterpret_cast<vaddr_t>(arena_.control_.start());
266     }
267 
control_end() const268     vaddr_t control_end() const {
269         return reinterpret_cast<vaddr_t>(arena_.control_.end());
270     }
271 
272 private:
273     const Arena& arena_;
274 };
275 } // namespace fbl
276 using fbl::ArenaTestFriend;
277 
278 // Hit the decommit code path. We can't observe it without peeking inside the
279 // control pool, since the data pool doesn't currently decommit.
uncommitting_tests()280 static bool uncommitting_tests() {
281     BEGIN_TEST;
282     // Create an arena with a 16-page control pool.
283     static const size_t num_pages = 16;
284     static const size_t num_slots =
285         (num_pages * PAGE_SIZE) / ArenaTestFriend::control_slot_size();
286 
287     Arena arena;
288     // Use a small data slot size (1) to keep our memory usage down.
289     EXPECT_EQ(ZX_OK, arena.Init("name", 1, num_slots), "");
290 
291     // Get the extent of the control pool.
292     vaddr_t start;
293     vaddr_t end;
294     {
295         ArenaTestFriend atf(arena);
296         start = reinterpret_cast<vaddr_t>(atf.control_start());
297         end = reinterpret_cast<vaddr_t>(atf.control_end());
298     }
299 
300     // Nothing is allocated yet, so no control pages should be committed.
301     size_t committed;
302     size_t uncommitted;
303     EXPECT_TRUE(
304         count_committed_pages(start, end, &committed, &uncommitted), "");
305     EXPECT_EQ(num_pages, committed + uncommitted, "");
306     EXPECT_EQ(0u, committed, "");
307     EXPECT_GT(uncommitted, 0u, "");
308 
309     // Allocate all of the data objects. Hold onto the pointers so we can free
310     // them.
311     fbl::AllocChecker ac;
312     ktl::unique_ptr<void*[]> objs = ktl::unique_ptr<void*[]>(new (&ac) void*[num_slots]);
313     EXPECT_TRUE(ac.check(), "");
314     void** top = &objs[0];
315     for (size_t i = 0; i < num_slots; i++) {
316         char msg[32];
317         snprintf(msg, sizeof(msg), "[%zu]", i);
318         *top++ = arena.Alloc();
319         EXPECT_NONNULL(top[-1], msg);
320     }
321 
322     // We still shouldn't see any control pages committed, becase no objects
323     // have been freed yet.
324     EXPECT_TRUE(
325         count_committed_pages(start, end, &committed, &uncommitted), "");
326     EXPECT_EQ(num_pages, committed + uncommitted, "");
327     EXPECT_EQ(0u, committed, "");
328     EXPECT_GT(uncommitted, 0u, "");
329 
330     // Demonstrate that we've allocated all of the data objects.
331     void* no = arena.Alloc();
332     EXPECT_NULL(no, "");
333 
334     // Free a data object.
335     arena.Free(*--top);
336 
337     // We should now see some committed pages inside the control pool.
338     EXPECT_TRUE(
339         count_committed_pages(start, end, &committed, &uncommitted), "");
340     EXPECT_EQ(num_pages, committed + uncommitted, "");
341     EXPECT_GT(committed, 0u, "");
342     EXPECT_GT(uncommitted, 0u, "");
343 
344     // Free all of the data objects.
345     while (top > objs.get()) {
346         arena.Free(*--top);
347     }
348 
349     // All of the control pages should be committed.
350     EXPECT_TRUE(
351         count_committed_pages(start, end, &committed, &uncommitted), "");
352     EXPECT_EQ(num_pages, committed + uncommitted, "");
353     EXPECT_EQ(committed, num_pages, "");
354     EXPECT_EQ(uncommitted, 0u, "");
355 
356     // Allocate half of the data objects, freeing up half of the free nodes
357     // (and thus half of the control slots).
358     auto orig_committed = committed;
359     ASSERT_EQ(top, objs.get(), "");
360     for (size_t i = 0; i < num_slots / 2; i++) {
361         char msg[32];
362         snprintf(msg, sizeof(msg), "[%zu]", i);
363         *top++ = arena.Alloc();
364         EXPECT_NONNULL(top[-1], msg);
365     }
366 
367     // The number of committed pages should have dropped.
368     EXPECT_TRUE(
369         count_committed_pages(start, end, &committed, &uncommitted), "");
370     EXPECT_EQ(num_pages, committed + uncommitted, "");
371     EXPECT_LT(committed, orig_committed, "");
372     EXPECT_GT(uncommitted, 0u, "");
373 
374     // Free more control slots (by allocating data objects) until we see more
375     // unmapping happen.
376     orig_committed = committed;
377     for (size_t i = num_slots / 2; i < num_slots; i++) {
378         char msg[32];
379         snprintf(msg, sizeof(msg), "[%zu]", i);
380         *top++ = arena.Alloc();
381         EXPECT_NONNULL(top[-1], msg);
382 
383         EXPECT_TRUE(
384             count_committed_pages(start, end, &committed, &uncommitted), "");
385         if (committed != orig_committed) {
386             break;
387         }
388     }
389     EXPECT_GT(orig_committed, committed, "");
390 
391     // Allocating one more control slot (by freeing a data object) should not
392     // cause the number of committed pages to change: there should be some
393     // hysteresis built into the system to avoid flickering back and forth.
394     orig_committed = committed;
395     arena.Free(*--top);
396     EXPECT_TRUE(
397         count_committed_pages(start, end, &committed, &uncommitted), "");
398     EXPECT_EQ(num_pages, committed + uncommitted, "");
399     EXPECT_EQ(committed, orig_committed, "");
400     EXPECT_GT(uncommitted, 0u, "");
401 
402     // Same for freeing a couple more control slots (by allocating more data
403     // objects).
404     *top++ = arena.Alloc();
405     *top++ = arena.Alloc();
406     EXPECT_TRUE(
407         count_committed_pages(start, end, &committed, &uncommitted), "");
408     EXPECT_EQ(num_pages, committed + uncommitted, "");
409     EXPECT_EQ(committed, orig_committed, "");
410     EXPECT_GT(uncommitted, 0u, "");
411 
412     END_TEST;
413 }
414 
415 // Checks that destroying an arena unmaps all of its pages.
memory_cleanup()416 static bool memory_cleanup() {
417     BEGIN_TEST;
418     static const size_t num_slots = (16 * PAGE_SIZE) / sizeof(TestObj);
419 
420     fbl::AllocChecker ac;
421     Arena* arena = new (&ac) Arena();
422     EXPECT_TRUE(ac.check(), "");
423     EXPECT_EQ(ZX_OK, arena->Init("name", sizeof(TestObj), num_slots), "");
424 
425     auto start = reinterpret_cast<vaddr_t>(arena->start());
426     auto end = reinterpret_cast<vaddr_t>(arena->end());
427 
428     // Allocate and leak a bunch of objects.
429     for (size_t i = 0; i < num_slots; i++) {
430         char msg[32];
431         snprintf(msg, sizeof(msg), "[%zu]", i);
432         EXPECT_NONNULL(arena->Alloc(), msg);
433     }
434 
435     // Should see some committed pages.
436     size_t committed;
437     size_t uncommitted;
438     EXPECT_TRUE(
439         count_committed_pages(start, end, &committed, &uncommitted), "");
440     EXPECT_GT(committed, 0u, "");
441 
442     // Destroying the Arena should destroy the underlying VmMapping,
443     // along with all of its pages.
444     delete arena;
445     EXPECT_TRUE(
446         count_committed_pages(start, end, &committed, &uncommitted), "");
447     // 0/0 means "no mapping at this address".
448     // FLAKY: Another thread could could come in and allocate a mapping at the
449     // old location.
450     EXPECT_EQ(committed, 0u, "");
451     EXPECT_EQ(uncommitted, 0u, "");
452     END_TEST;
453 }
454 
455 // Basic checks that the contents of allocated objects stick around, aren't
456 // stomped on.
content_preservation()457 static bool content_preservation() {
458     BEGIN_TEST;
459     Arena arena;
460     zx_status_t s = arena.Init("arena_tests", sizeof(TestObj), 1000);
461     ASSERT_EQ(ZX_OK, s, "arena.Init()");
462 
463     const int count = 30;
464 
465     for (int times = 0; times != 5; ++times) {
466         TestObj* afp[count] = {0};
467 
468         for (int ix = 0; ix != count; ++ix) {
469             afp[ix] = reinterpret_cast<TestObj*>(arena.Alloc());
470             ASSERT_NONNULL(afp[ix], "arena.Alloc()");
471             *afp[ix] = {17, 5, ix + 100};
472         }
473 
474         arena.Free(afp[3]);
475         arena.Free(afp[4]);
476         arena.Free(afp[5]);
477         afp[3] = afp[4] = afp[5] = nullptr;
478 
479         afp[4] = reinterpret_cast<TestObj*>(arena.Alloc());
480         ASSERT_NONNULL(afp[4], "arena.Alloc()");
481         *afp[4] = {17, 5, 104};
482 
483         for (int ix = 0; ix != count; ++ix) {
484             if (!afp[ix])
485                 continue;
486 
487             EXPECT_EQ(17, afp[ix]->xx, "");
488             EXPECT_EQ(5, afp[ix]->yy, "");
489             EXPECT_EQ(ix + 100, afp[ix]->zz, "");
490 
491             arena.Free(afp[ix]);
492         }
493 
494         // Leak a few objects.
495         for (int ix = 0; ix != 7; ++ix) {
496             TestObj* leak = reinterpret_cast<TestObj*>(arena.Alloc());
497             ASSERT_NONNULL(leak, "arena.Alloc()");
498             *leak = {2121, 77, 55};
499         }
500     }
501     END_TEST;
502 }
503 
504 #define ARENA_UNITTEST(fname) UNITTEST(#fname, fname)
505 
506 UNITTEST_START_TESTCASE(arena_tests)
507 ARENA_UNITTEST(init_null_name_succeeds)
508 ARENA_UNITTEST(init_zero_ob_size_fails)
509 ARENA_UNITTEST(init_large_ob_size_fails)
510 ARENA_UNITTEST(init_zero_count_fails)
511 ARENA_UNITTEST(start_and_end_look_good)
512 ARENA_UNITTEST(in_range_tests)
513 ARENA_UNITTEST(out_of_memory)
514 ARENA_UNITTEST(committing_tests)
515 ARENA_UNITTEST(uncommitting_tests)
516 ARENA_UNITTEST(memory_cleanup)
517 ARENA_UNITTEST(content_preservation)
518 UNITTEST_END_TESTCASE(arena_tests, "arenatests", "Arena allocator test");
519