1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <blobfs/allocator.h>
6 #include <unittest/unittest.h>
7 
8 #include "utils.h"
9 
10 namespace blobfs {
11 namespace {
12 
NullTest()13 bool NullTest() {
14     BEGIN_TEST;
15 
16     MockSpaceManager space_manager;
17     RawBitmap block_map;
18     fzl::ResizeableVmoMapper node_map;
19     Allocator allocator(&space_manager, std::move(block_map), std::move(node_map));
20     allocator.SetLogging(false);
21 
22     fbl::Vector<ReservedExtent> extents;
23     ASSERT_EQ(ZX_ERR_NO_SPACE, allocator.ReserveBlocks(1, &extents));
24     ASSERT_FALSE(allocator.ReserveNode());
25 
26     END_TEST;
27 }
28 
SingleTest()29 bool SingleTest() {
30     BEGIN_TEST;
31 
32     MockSpaceManager space_manager;
33     fbl::unique_ptr<Allocator> allocator;
34     ASSERT_TRUE(InitializeAllocator(1, 1, &space_manager, &allocator));
35 
36     // We can allocate a single unit.
37     fbl::Vector<ReservedExtent> extents;
38     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(1, &extents));
39     std::optional<ReservedNode> node = allocator->ReserveNode();
40     ASSERT_TRUE(node);
41 
42     END_TEST;
43 }
44 
SingleCollisionTest()45 bool SingleCollisionTest() {
46     BEGIN_TEST;
47 
48     MockSpaceManager space_manager;
49     fbl::unique_ptr<Allocator> allocator;
50     ASSERT_TRUE(InitializeAllocator(1, 1, &space_manager, &allocator));
51 
52     fbl::Vector<ReservedExtent> extents;
53     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(1, &extents));
54     std::optional<ReservedNode> maybe_node = allocator->ReserveNode();
55     ASSERT_TRUE(maybe_node);
56     ReservedNode& node = *maybe_node;
57 
58     // Check the situation where allocation intersects with the in-flight reservation map.
59     fbl::Vector<ReservedExtent> failed_extents;
60     ASSERT_EQ(ZX_ERR_NO_SPACE, allocator->ReserveBlocks(1, &failed_extents));
61     ASSERT_FALSE(allocator->ReserveNode());
62 
63     // Check the situation where allocation intersects with the committed map.
64     allocator->MarkBlocksAllocated(extents[0]);
65     allocator->MarkInodeAllocated(node);
66     ASSERT_EQ(ZX_ERR_NO_SPACE, allocator->ReserveBlocks(1, &failed_extents));
67     ASSERT_FALSE(allocator->ReserveNode());
68 
69     // Check that freeing the space (and releasing the reservation) makes it
70     // available for use once more.
71     allocator->FreeBlocks(extents[0].extent());
72     allocator->FreeNode(node.index());
73     node.Reset();
74     extents.reset();
75     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(1, &extents));
76     ASSERT_TRUE(allocator->ReserveNode());
77 
78     END_TEST;
79 }
80 
81 // Test the condition where we cannot allocate because (while looking for
82 // blocks) we hit an already-allocated prefix of reserved / committed blocks.
PrefixCollisionTest()83 bool PrefixCollisionTest() {
84     BEGIN_TEST;
85 
86     MockSpaceManager space_manager;
87     fbl::unique_ptr<Allocator> allocator;
88     ASSERT_TRUE(InitializeAllocator(4, 4, &space_manager, &allocator));
89 
90     // Allocate a single extent of two blocks.
91     fbl::Vector<ReservedExtent> extents;
92     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(2, &extents));
93     ASSERT_EQ(1, extents.size());
94 
95     // We have two blocks left; we cannot allocate three blocks.
96     fbl::Vector<ReservedExtent> failed_extents;
97     ASSERT_EQ(ZX_ERR_NO_SPACE, allocator->ReserveBlocks(3, &failed_extents));
98     allocator->MarkBlocksAllocated(extents[0]);
99     Extent extent = extents[0].extent();
100     extents.reset();
101 
102     // After the extents are committed (and unreserved), we still cannot
103     // utilize their space.
104     ASSERT_EQ(ZX_ERR_NO_SPACE, allocator->ReserveBlocks(3, &failed_extents));
105 
106     // After freeing the allocated blocks, we can re-allocate.
107     allocator->FreeBlocks(extent);
108     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(3, &extents));
109 
110     END_TEST;
111 }
112 
113 // Test the condition where we cannot allocate because (while looking for
114 // blocks) we hit an already-allocated suffix of reserved / committed blocks.
SuffixCollisionTest()115 bool SuffixCollisionTest() {
116     BEGIN_TEST;
117 
118     MockSpaceManager space_manager;
119     fbl::unique_ptr<Allocator> allocator;
120     ASSERT_TRUE(InitializeAllocator(4, 4, &space_manager, &allocator));
121 
122     // Allocate a single extent of two blocks.
123     fbl::Vector<ReservedExtent> prefix_extents;
124     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(2, &prefix_extents));
125     ASSERT_EQ(1, prefix_extents.size());
126 
127     // Allocate another extent of two blocks.
128     fbl::Vector<ReservedExtent> suffix_extents;
129     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(2, &suffix_extents));
130     ASSERT_EQ(1, suffix_extents.size());
131 
132     // Release the prefix allocation so we can test against the suffix.
133     prefix_extents.reset();
134 
135     // We have two blocks left; we cannot allocate three blocks.
136     fbl::Vector<ReservedExtent> failed_extents;
137     ASSERT_EQ(ZX_ERR_NO_SPACE, allocator->ReserveBlocks(3, &failed_extents));
138     allocator->MarkBlocksAllocated(suffix_extents[0]);
139     Extent extent = suffix_extents[0].extent();
140     suffix_extents.reset();
141 
142     // After the extents are committed (and unreserved), we still cannot
143     // utilize their space.
144     ASSERT_EQ(ZX_ERR_NO_SPACE, allocator->ReserveBlocks(3, &failed_extents));
145 
146     // After freeing the allocated blocks, we can re-allocate.
147     allocator->FreeBlocks(extent);
148     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(3, &suffix_extents));
149     END_TEST;
150 }
151 
152 // Test the condition where our allocation request overlaps with both a
153 // previously allocated and reserved region.
AllocatedBeforeReservedTest()154 bool AllocatedBeforeReservedTest() {
155     BEGIN_TEST;
156 
157     MockSpaceManager space_manager;
158     fbl::unique_ptr<Allocator> allocator;
159     ASSERT_TRUE(InitializeAllocator(4, 4, &space_manager, &allocator));
160 
161     // Allocate a single extent of one block.
162     {
163         fbl::Vector<ReservedExtent> prefix_extents;
164         ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(1, &prefix_extents));
165         ASSERT_EQ(1, prefix_extents.size());
166         allocator->MarkBlocksAllocated(prefix_extents[0]);
167     }
168 
169     // Reserve another extent of one block.
170     fbl::Vector<ReservedExtent> suffix_extents;
171     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(1, &suffix_extents));
172     ASSERT_EQ(1, suffix_extents.size());
173 
174     // We should still be able to reserve the remaining two blocks in a single
175     // extent.
176     fbl::Vector<ReservedExtent> extents;
177     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(2, &extents));
178     ASSERT_EQ(1, extents.size());
179 
180     END_TEST;
181 }
182 
183 // Test the condition where our allocation request overlaps with both a
184 // previously allocated and reserved region.
ReservedBeforeAllocatedTest()185 bool ReservedBeforeAllocatedTest() {
186     BEGIN_TEST;
187 
188     MockSpaceManager space_manager;
189     fbl::unique_ptr<Allocator> allocator;
190     ASSERT_TRUE(InitializeAllocator(4, 4, &space_manager, &allocator));
191 
192     // Reserve an extent of one block.
193     fbl::Vector<ReservedExtent> reserved_extents;
194     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(1, &reserved_extents));
195     ASSERT_EQ(1, reserved_extents.size());
196 
197     // Allocate a single extent of one block, immediately following the prior
198     // reservation.
199     {
200         fbl::Vector<ReservedExtent> committed_extents;
201         ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(1, &committed_extents));
202         ASSERT_EQ(1, committed_extents.size());
203         allocator->MarkBlocksAllocated(committed_extents[0]);
204     }
205 
206     // We should still be able to reserve the remaining two blocks in a single
207     // extent.
208     fbl::Vector<ReservedExtent> extents;
209     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(2, &extents));
210     ASSERT_EQ(1, extents.size());
211 
212     END_TEST;
213 }
214 
215 // Tests a case where navigation between multiple reserved and committed blocks
216 // requires non-trivial logic.
217 //
218 // This acts as a regression test against a bug encountered during prototyping,
219 // where navigating reserved blocks could unintentionally ignore collisions with
220 // the committed blocks.
InterleavedReservationTest()221 bool InterleavedReservationTest() {
222     BEGIN_TEST;
223 
224     MockSpaceManager space_manager;
225     fbl::unique_ptr<Allocator> allocator;
226     ASSERT_TRUE(InitializeAllocator(10, 5, &space_manager, &allocator));
227 
228     // R: Reserved
229     // C: Committed
230     // F: Free
231     //
232     // [R F F F F F F F F F]
233     // Reserve an extent of one block.
234     fbl::Vector<ReservedExtent> reservation_group_a;
235     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(1, &reservation_group_a));
236     ASSERT_EQ(1, reservation_group_a.size());
237 
238     // [R R F F F F F F F F]
239     // Reserve an extent of one block.
240     fbl::Vector<ReservedExtent> reservation_group_b;
241     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(1, &reservation_group_b));
242     ASSERT_EQ(1, reservation_group_b.size());
243 
244     // [R R C F F F F F F F]
245     // Allocate a single extent of one block, immediately following the prior
246     // reservations.
247     {
248         fbl::Vector<ReservedExtent> committed_extents;
249         ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(1, &committed_extents));
250         ASSERT_EQ(1, committed_extents.size());
251         allocator->MarkBlocksAllocated(committed_extents[0]);
252     }
253 
254     // [R R C R F F F F F F]
255     // Reserve an extent of one block.
256     fbl::Vector<ReservedExtent> reservation_group_c;
257     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(1, &reservation_group_c));
258     ASSERT_EQ(1, reservation_group_c.size());
259 
260     // [F R C R F F F F F F]
261     // Free the first extent.
262     reservation_group_a.reset();
263 
264     // We should still be able to reserve the remaining two extents, split
265     // across the reservations and the committed block.
266     fbl::Vector<ReservedExtent> extents;
267     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(4, &extents));
268     ASSERT_EQ(2, extents.size());
269 
270     END_TEST;
271 }
272 
273 // Create a highly fragmented allocation pool, by allocating every other block,
274 // and observe that even in the prescence of fragmentation we may still acquire
275 // 100% space utilization.
276 template <bool EvensReserved>
FragmentationTest()277 bool FragmentationTest() {
278     BEGIN_TEST;
279 
280     MockSpaceManager space_manager;
281     fbl::unique_ptr<Allocator> allocator;
282     constexpr uint64_t kBlockCount = 16;
283     ASSERT_TRUE(InitializeAllocator(kBlockCount, 4, &space_manager, &allocator));
284 
285     // Allocate kBlockCount extents of length one.
286     fbl::Vector<ReservedExtent> fragmentation_extents[kBlockCount];
287     for (uint64_t i = 0; i < kBlockCount; i++) {
288         ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(1, &fragmentation_extents[i]));
289     }
290 
291     // At this point, there shouldn't be a single block of space left.
292     fbl::Vector<ReservedExtent> failed_extents;
293     ASSERT_EQ(ZX_ERR_NO_SPACE, allocator->ReserveBlocks(1, &failed_extents));
294 
295     // Free half of the extents, and demonstrate that we can use all the
296     // remaining fragmented space.
297     fbl::Vector<ReservedExtent> big_extent;
298     static_assert(kBlockCount % 2 == 0, "Test assumes an even-sized allocation pool");
299     for (uint64_t i = EvensReserved ? 1 : 0; i < kBlockCount; i += 2) {
300         fragmentation_extents[i].reset();
301     }
302     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(kBlockCount / 2, &big_extent));
303     big_extent.reset();
304 
305     // Commit the reserved extents, and observe that our ability to allocate
306     // fragmented extents still persists.
307     for (uint64_t i = EvensReserved ? 0 : 1; i < kBlockCount; i += 2) {
308         ASSERT_EQ(1, fragmentation_extents[i].size());
309         allocator->MarkBlocksAllocated(fragmentation_extents[i][0]);
310         fragmentation_extents[i].reset();
311     }
312     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(kBlockCount / 2, &big_extent));
313     ASSERT_EQ(kBlockCount / 2, big_extent.size());
314 
315     // After the big extent is reserved (or committed), however, we cannot reserve
316     // anything more.
317     ASSERT_EQ(ZX_ERR_NO_SPACE, allocator->ReserveBlocks(1, &failed_extents));
318     for (uint64_t i = 0; i < big_extent.size(); i++) {
319         allocator->MarkBlocksAllocated(big_extent[i]);
320     }
321     big_extent.reset();
322     ASSERT_EQ(ZX_ERR_NO_SPACE, allocator->ReserveBlocks(1, &failed_extents));
323 
324     END_TEST;
325 }
326 
327 // Test a case of allocation where we try allocating more blocks than can fit
328 // within a single extent.
MaxExtentTest()329 bool MaxExtentTest() {
330     BEGIN_TEST;
331 
332     MockSpaceManager space_manager;
333     fbl::unique_ptr<Allocator> allocator;
334     constexpr uint64_t kBlockCount = kBlockCountMax * 2;
335     ASSERT_TRUE(InitializeAllocator(kBlockCount, 4, &space_manager, &allocator));
336 
337     // Allocate a region which may be contained within one extent.
338     fbl::Vector<ReservedExtent> extents;
339     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(kBlockCountMax, &extents));
340     ASSERT_EQ(1, extents.size());
341     extents.reset();
342 
343     // Allocate a region which may not be contined within one extent.
344     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(kBlockCountMax + 1, &extents));
345     ASSERT_EQ(2, extents.size());
346 
347     // Demonstrate that the remaining blocks are still available.
348     fbl::Vector<ReservedExtent> remainder;
349     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(kBlockCount - (kBlockCountMax + 1), &remainder));
350 
351     // But nothing more.
352     fbl::Vector<ReservedExtent> failed_extent;
353     ASSERT_EQ(ZX_ERR_NO_SPACE, allocator->ReserveBlocks(1, &failed_extent));
354 
355     END_TEST;
356 }
357 
CheckNodeMapSize(Allocator * allocator,uint64_t size)358 bool CheckNodeMapSize(Allocator* allocator, uint64_t size) {
359     BEGIN_HELPER;
360 
361     // Verify that we can allocate |size| nodes...
362     fbl::Vector<ReservedNode> nodes;
363     ASSERT_EQ(ZX_OK, allocator->ReserveNodes(size, &nodes));
364 
365     // ... But no more.
366     ASSERT_FALSE(allocator->ReserveNode());
367     ASSERT_EQ(size, allocator->ReservedNodeCount());
368 
369     END_HELPER;
370 }
371 
CheckBlockMapSize(Allocator * allocator,uint64_t size)372 bool CheckBlockMapSize(Allocator* allocator, uint64_t size) {
373     BEGIN_HELPER;
374 
375     // Verify that we can allocate |size| blocks...
376     ASSERT_EQ(0, allocator->ReservedBlockCount());
377     fbl::Vector<ReservedExtent> extents;
378     ASSERT_EQ(ZX_OK, allocator->ReserveBlocks(size, &extents));
379 
380     // ... But no more.
381     fbl::Vector<ReservedExtent> failed_extents;
382     ASSERT_EQ(ZX_ERR_NO_SPACE, allocator->ReserveBlocks(size, &failed_extents));
383 
384     END_HELPER;
385 }
386 
ResetSizeHelper(uint64_t before_blocks,uint64_t before_nodes,uint64_t after_blocks,uint64_t after_nodes)387 bool ResetSizeHelper(uint64_t before_blocks, uint64_t before_nodes,
388                      uint64_t after_blocks, uint64_t after_nodes) {
389     BEGIN_HELPER;
390 
391     // Initialize the allocator with a given size.
392     MockSpaceManager space_manager;
393     RawBitmap block_map;
394     ASSERT_EQ(ZX_OK, block_map.Reset(before_blocks));
395     fzl::ResizeableVmoMapper node_map;
396     size_t map_size = fbl::round_up(before_nodes * kBlobfsInodeSize, kBlobfsBlockSize);
397     ASSERT_EQ(ZX_OK, node_map.CreateAndMap(map_size, "node map"));
398     space_manager.MutableInfo().inode_count = before_nodes;
399     space_manager.MutableInfo().data_block_count = before_blocks;
400     Allocator allocator(&space_manager, std::move(block_map), std::move(node_map));
401     allocator.SetLogging(false);
402     ASSERT_TRUE(CheckNodeMapSize(&allocator, before_nodes));
403     ASSERT_TRUE(CheckBlockMapSize(&allocator, before_blocks));
404 
405     // Update the superblock and reset the sizes.
406     space_manager.MutableInfo().inode_count = after_nodes;
407     space_manager.MutableInfo().data_block_count = after_blocks;
408     ASSERT_EQ(ZX_OK, allocator.ResetBlockMapSize());
409     ASSERT_EQ(ZX_OK, allocator.ResetNodeMapSize());
410 
411     ASSERT_TRUE(CheckNodeMapSize(&allocator, after_nodes));
412     ASSERT_TRUE(CheckBlockMapSize(&allocator, after_blocks));
413 
414     END_HELPER;
415 }
416 
417 // Test the functions which can alter the size of the block / node maps after
418 // initialization.
ResetSizeTest()419 bool ResetSizeTest() {
420     BEGIN_TEST;
421 
422     constexpr uint64_t kNodesPerBlock = kBlobfsBlockSize / kBlobfsInodeSize;
423 
424     // Test no changes in size.
425     ASSERT_TRUE(ResetSizeHelper(1, kNodesPerBlock, 1, kNodesPerBlock));
426     // Test 2x growth.
427     ASSERT_TRUE(ResetSizeHelper(1, kNodesPerBlock, 2, kNodesPerBlock * 2));
428     // Test 8x growth.
429     ASSERT_TRUE(ResetSizeHelper(1, kNodesPerBlock, 8, kNodesPerBlock * 8));
430     // Test 2048x growth.
431     ASSERT_TRUE(ResetSizeHelper(1, kNodesPerBlock, 2048, kNodesPerBlock * 2048));
432 
433     // Test 2x shrinking.
434     ASSERT_TRUE(ResetSizeHelper(2, kNodesPerBlock * 2, 1, kNodesPerBlock));
435     // Test 8x shrinking.
436     ASSERT_TRUE(ResetSizeHelper(8, kNodesPerBlock * 8, 1, kNodesPerBlock));
437     // Test 2048x shrinking.
438     ASSERT_TRUE(ResetSizeHelper(2048, kNodesPerBlock * 2048, 1, kNodesPerBlock));
439 
440     END_TEST;
441 }
442 
443 } // namespace
444 } // namespace blobfs
445 
446 BEGIN_TEST_CASE(blobfsAllocatorTests)
447 RUN_TEST(blobfs::NullTest)
448 RUN_TEST(blobfs::SingleTest)
449 RUN_TEST(blobfs::SingleCollisionTest)
450 RUN_TEST(blobfs::PrefixCollisionTest)
451 RUN_TEST(blobfs::SuffixCollisionTest)
452 RUN_TEST(blobfs::AllocatedBeforeReservedTest)
453 RUN_TEST(blobfs::ReservedBeforeAllocatedTest)
454 RUN_TEST(blobfs::InterleavedReservationTest)
455 RUN_TEST(blobfs::FragmentationTest</* EvensReserved = */ true>)
456 RUN_TEST(blobfs::FragmentationTest</* EvensReserved = */ false>)
457 RUN_TEST(blobfs::MaxExtentTest)
458 RUN_TEST(blobfs::ResetSizeTest)
459 END_TEST_CASE(blobfsAllocatorTests);
460