1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <blobfs/iterator/allocated-extent-iterator.h>
6 #include <blobfs/iterator/block-iterator.h>
7 #include <blobfs/iterator/node-populator.h>
8 #include <unittest/unittest.h>
9
10 #include "utils.h"
11
12 namespace blobfs {
13 namespace {
14
15 // Allocates a blob with the provided number of extents / nodes.
16 //
17 // Returns the allocator, the extents, and nodes used.
TestSetup(size_t allocated_blocks,size_t allocated_nodes,bool fragmented,MockSpaceManager * space_manager,fbl::unique_ptr<Allocator> * out_allocator,fbl::Vector<Extent> * out_extents,fbl::Vector<uint32_t> * out_nodes)18 bool TestSetup(size_t allocated_blocks, size_t allocated_nodes, bool fragmented,
19 MockSpaceManager* space_manager, fbl::unique_ptr<Allocator>* out_allocator,
20 fbl::Vector<Extent>* out_extents, fbl::Vector<uint32_t>* out_nodes) {
21 BEGIN_HELPER;
22
23 // Block count is large enough to allow for both fragmentation and the
24 // allocation of |allocated_blocks| extents.
25 size_t block_count = 3 * allocated_blocks;
26 ASSERT_TRUE(InitializeAllocator(block_count, allocated_nodes, space_manager, out_allocator));
27 if (fragmented) {
28 ASSERT_TRUE(ForceFragmentation(out_allocator->get(), block_count));
29 }
30
31 // Allocate the initial nodes and blocks.
32 fbl::Vector<ReservedNode> nodes;
33 fbl::Vector<ReservedExtent> extents;
34 ASSERT_EQ(ZX_OK, (*out_allocator)->ReserveNodes(allocated_nodes, &nodes));
35 ASSERT_EQ(ZX_OK, (*out_allocator)->ReserveBlocks(allocated_blocks, &extents));
36 if (fragmented) {
37 ASSERT_EQ(allocated_blocks, extents.size());
38 }
39
40 // Keep a copy of the nodes and blocks, since we are passing both to the
41 // node populator, but want to verify them afterwards.
42 CopyExtents(extents, out_extents);
43 CopyNodes(nodes, out_nodes);
44
45 // Actually populate the node with the provided extents and nodes.
46 auto on_node = [&](const ReservedNode& node) {};
47 auto on_extent = [&](ReservedExtent& extent) {
48 return NodePopulator::IterationCommand::Continue;
49 };
50 NodePopulator populator(out_allocator->get(), std::move(extents), std::move(nodes));
51 ASSERT_EQ(ZX_OK, populator.Walk(on_node, on_extent));
52
53 END_HELPER;
54 }
55
56 // Iterate over the null blob.
NullTest()57 bool NullTest() {
58 BEGIN_TEST;
59
60 MockSpaceManager space_manager;
61 fbl::unique_ptr<Allocator> allocator;
62 fbl::Vector<Extent> allocated_extents;
63 fbl::Vector<uint32_t> allocated_nodes;
64 constexpr size_t kAllocatedExtents = 0;
65 constexpr size_t kAllocatedNodes = 1;
66
67 ASSERT_TRUE(TestSetup(kAllocatedExtents, kAllocatedNodes, /* fragmented=*/ true, &space_manager,
68 &allocator, &allocated_extents, &allocated_nodes));
69
70 // After walking, observe that the inode is allocated.
71 const uint32_t node_index = allocated_nodes[0];
72 const Inode* inode = allocator->GetNode(node_index);
73 ASSERT_TRUE(inode->header.IsAllocated());
74 ASSERT_EQ(kAllocatedExtents, inode->extent_count);
75
76 AllocatedExtentIterator iter(allocator.get(), node_index);
77 ASSERT_TRUE(iter.Done());
78 ASSERT_EQ(0, iter.BlockIndex());
79 ASSERT_EQ(0, iter.ExtentIndex());
80
81 END_TEST;
82 }
83
84 // Iterate over a blob with inline extents.
InlineNodeTest()85 bool InlineNodeTest() {
86 BEGIN_TEST;
87
88 MockSpaceManager space_manager;
89 fbl::unique_ptr<Allocator> allocator;
90 fbl::Vector<Extent> allocated_extents;
91 fbl::Vector<uint32_t> allocated_nodes;
92 constexpr size_t kAllocatedExtents = kInlineMaxExtents;
93 constexpr size_t kAllocatedNodes = 1;
94
95 ASSERT_TRUE(TestSetup(kAllocatedExtents, kAllocatedNodes, /* fragmented=*/ true, &space_manager,
96 &allocator, &allocated_extents, &allocated_nodes));
97
98 // After walking, observe that the inode is allocated.
99 const uint32_t node_index = allocated_nodes[0];
100 const Inode* inode = allocator->GetNode(node_index);
101 ASSERT_TRUE(inode->header.IsAllocated());
102 ASSERT_EQ(kAllocatedExtents, inode->extent_count);
103
104 AllocatedExtentIterator iter(allocator.get(), node_index);
105 ASSERT_EQ(0, iter.BlockIndex());
106 uint32_t blocks_seen = 0;
107
108 for (size_t i = 0; i < allocated_extents.size(); i++) {
109 ASSERT_FALSE(iter.Done());
110 ASSERT_EQ(node_index, iter.NodeIndex());
111 ASSERT_EQ(i, iter.ExtentIndex());
112 ASSERT_EQ(blocks_seen, iter.BlockIndex());
113
114 const Extent* extent;
115 ASSERT_EQ(ZX_OK, iter.Next(&extent));
116 ASSERT_TRUE(allocated_extents[i] == *extent);
117 blocks_seen += extent->Length();
118 }
119
120 ASSERT_TRUE(iter.Done());
121 ASSERT_EQ(allocated_extents.size(), iter.ExtentIndex());
122 ASSERT_EQ(blocks_seen, iter.BlockIndex());
123
124 END_TEST;
125 }
126
127 // Iterate over a blob with multiple nodes.
MultiNodeTest()128 bool MultiNodeTest() {
129 BEGIN_TEST;
130
131 MockSpaceManager space_manager;
132 fbl::unique_ptr<Allocator> allocator;
133 fbl::Vector<Extent> allocated_extents;
134 fbl::Vector<uint32_t> allocated_nodes;
135 constexpr size_t kAllocatedExtents = kInlineMaxExtents + kContainerMaxExtents + 1;
136 constexpr size_t kAllocatedNodes = 3;
137
138 ASSERT_TRUE(TestSetup(kAllocatedExtents, kAllocatedNodes, /* fragmented=*/ true, &space_manager,
139 &allocator, &allocated_extents, &allocated_nodes));
140
141 // After walking, observe that the inode is allocated.
142 const uint32_t node_index = allocated_nodes[0];
143 const Inode* inode = allocator->GetNode(node_index);
144 ASSERT_TRUE(inode->header.IsAllocated());
145 ASSERT_EQ(kAllocatedExtents, inode->extent_count);
146
147 AllocatedExtentIterator iter(allocator.get(), node_index);
148 ASSERT_EQ(0, iter.ExtentIndex());
149 ASSERT_EQ(0, iter.BlockIndex());
150 uint32_t blocks_seen = 0;
151
152 for (size_t i = 0; i < allocated_extents.size(); i++) {
153 ASSERT_FALSE(iter.Done());
154 if (i < kInlineMaxExtents) {
155 ASSERT_EQ(allocated_nodes[0], iter.NodeIndex());
156 } else if (i < kInlineMaxExtents + kContainerMaxExtents) {
157 ASSERT_EQ(allocated_nodes[1], iter.NodeIndex());
158 } else {
159 ASSERT_EQ(allocated_nodes[2], iter.NodeIndex());
160 }
161 ASSERT_EQ(i, iter.ExtentIndex());
162 ASSERT_EQ(blocks_seen, iter.BlockIndex());
163
164 const Extent* extent;
165 ASSERT_EQ(ZX_OK, iter.Next(&extent));
166 ASSERT_TRUE(allocated_extents[i] == *extent);
167 blocks_seen += extent->Length();
168 }
169
170 ASSERT_TRUE(iter.Done());
171 ASSERT_EQ(allocated_extents.size(), iter.ExtentIndex());
172 ASSERT_EQ(blocks_seen, iter.BlockIndex());
173
174 END_TEST;
175 }
176
177 // Demonstrate that the allocated extent iterator won't let us access invalid
178 // nodes.
BadInodeNextNodeTest()179 bool BadInodeNextNodeTest() {
180 BEGIN_TEST;
181
182 MockSpaceManager space_manager;
183 fbl::unique_ptr<Allocator> allocator;
184 fbl::Vector<Extent> allocated_extents;
185 fbl::Vector<uint32_t> allocated_nodes;
186 constexpr size_t kAllocatedExtents = kInlineMaxExtents + kContainerMaxExtents + 1;
187 constexpr size_t kAllocatedNodes = 4;
188
189 ASSERT_TRUE(TestSetup(kAllocatedExtents, kAllocatedNodes, /* fragmented=*/ true, &space_manager,
190 &allocator, &allocated_extents, &allocated_nodes));
191
192 // After walking, observe that the inode is allocated.
193 const uint32_t node_index = allocated_nodes[0];
194 Inode* inode = allocator->GetNode(node_index);
195 ASSERT_TRUE(inode->header.IsAllocated());
196 ASSERT_EQ(kAllocatedExtents, inode->extent_count);
197
198 // Manually corrupt the next inode to point to itself.
199 inode->header.next_node = node_index;
200
201 // The iterator should reflect this corruption by returning an error during
202 // traversal from the node to the container.
203 {
204 AllocatedExtentIterator iter(allocator.get(), node_index);
205 ASSERT_TRUE(!iter.Done());
206 const Extent* extent;
207 for (size_t i = 0; i < kInlineMaxExtents - 1; i++) {
208 ASSERT_EQ(ZX_OK, iter.Next(&extent));
209 }
210 ASSERT_EQ(ZX_ERR_IO_DATA_INTEGRITY, iter.Next(&extent));
211 }
212
213 // Manually corrupt the next inode to point to an unallocated (but otherwise
214 // valid) node.
215 inode->header.next_node = allocated_nodes[kAllocatedNodes - 1];
216
217 // The iterator should reflect this corruption by returning an error during
218 // traversal from the node to the container.
219 {
220 AllocatedExtentIterator iter(allocator.get(), node_index);
221 ASSERT_TRUE(!iter.Done());
222 const Extent* extent;
223 for (size_t i = 0; i < kInlineMaxExtents - 1; i++) {
224 ASSERT_EQ(ZX_OK, iter.Next(&extent));
225 }
226 ASSERT_EQ(ZX_ERR_IO_DATA_INTEGRITY, iter.Next(&extent));
227 }
228
229 // TODO(smklein): Currently, this fails because Allocator::GetNode asserts on failure,
230 // rather than returning a status.
231 //
232 // // Manually corrupt the next inode to point to a completely invalid node.
233 // inode->header.next_node = 0xFFFFFFFF;
234 //
235 // // The iterator should reflect this corruption by returning an error during
236 // // traversal from the node to the container.
237 // {
238 // AllocatedExtentIterator iter(allocator.get(), node_index);
239 // ASSERT_TRUE(!iter.Done());
240 // const Extent* extent;
241 // for (size_t i = 0; i < kInlineMaxExtents - 1; i++) {
242 // ASSERT_EQ(ZX_OK, iter.Next(&extent));
243 // }
244 // ASSERT_EQ(ZX_ERR_IO_DATA_INTEGRITY, iter.Next(&extent));
245 // }
246
247 END_TEST;
248 }
249
250 // Test utilization of the BlockIterator over the allocated extent iterator
251 // while the underlying storage is maximally fragmented.
BlockIteratorFragmentedTest()252 bool BlockIteratorFragmentedTest() {
253 BEGIN_TEST;
254
255 MockSpaceManager space_manager;
256 fbl::unique_ptr<Allocator> allocator;
257 fbl::Vector<Extent> allocated_extents;
258 fbl::Vector<uint32_t> allocated_nodes;
259 constexpr size_t kAllocatedExtents = kInlineMaxExtents + kContainerMaxExtents + 1;
260 constexpr size_t kAllocatedNodes = 3;
261
262 ASSERT_TRUE(TestSetup(kAllocatedExtents, kAllocatedNodes, /* fragmented=*/ true, &space_manager,
263 &allocator, &allocated_extents, &allocated_nodes));
264
265 // After walking, observe that the inode is allocated.
266 const uint32_t node_index = allocated_nodes[0];
267 const Inode* inode = allocator->GetNode(node_index);
268 ASSERT_TRUE(inode->header.IsAllocated());
269 ASSERT_EQ(kAllocatedExtents, inode->extent_count);
270
271 AllocatedExtentIterator allocated_iter(allocator.get(), node_index);
272 BlockIterator iter(&allocated_iter);
273 ASSERT_EQ(0, iter.BlockIndex());
274 ASSERT_FALSE(iter.Done());
275
276 // Since we are maximally fragmented, we're polling for single block
277 // extents. This means that each call to "Next" will return at most one.
278 uint32_t blocks_seen = 0;
279
280 for (size_t i = 0; i < allocated_extents.size(); i++) {
281 ASSERT_FALSE(iter.Done());
282 uint32_t actual_length;
283 uint64_t actual_start;
284 // "i + 1" is arbitrary, but it checks trying a request for "at least
285 // one" block, and some additional request sizes. It doesn't matter in
286 // the fragmented case, since the |actual_length| should always be one.
287 ASSERT_EQ(ZX_OK, iter.Next(static_cast<uint32_t>(i + 1), &actual_length, &actual_start));
288 ASSERT_EQ(1, actual_length);
289 ASSERT_EQ(allocated_extents[i].Start(), actual_start);
290 blocks_seen += actual_length;
291 ASSERT_EQ(blocks_seen, iter.BlockIndex());
292 }
293
294 ASSERT_TRUE(iter.Done());
295 END_TEST;
296 }
297
298 // Test utilization of the BlockIterator over the allocated extent iterator
299 // while the underlying storage is unfragmented.
BlockIteratorUnfragmentedTest()300 bool BlockIteratorUnfragmentedTest() {
301 BEGIN_TEST;
302
303 MockSpaceManager space_manager;
304 fbl::unique_ptr<Allocator> allocator;
305 fbl::Vector<Extent> allocated_extents;
306 fbl::Vector<uint32_t> allocated_nodes;
307 constexpr size_t kAllocatedBlocks = 100;
308 constexpr size_t kAllocatedNodes = 1;
309
310 ASSERT_TRUE(TestSetup(kAllocatedBlocks, kAllocatedNodes, /* fragmented=*/ false,
311 &space_manager, &allocator, &allocated_extents, &allocated_nodes));
312
313 // After walking, observe that the inode is allocated.
314 const uint32_t node_index = allocated_nodes[0];
315 const Inode* inode = allocator->GetNode(node_index);
316 ASSERT_TRUE(inode->header.IsAllocated());
317 ASSERT_EQ(1, inode->extent_count);
318
319 // The allocation is contiguous, so the number of blocks we see is
320 // completely dependent on the amount we ask for.
321
322 // Try asking for all the blocks.
323 {
324 AllocatedExtentIterator allocated_iter(allocator.get(), node_index);
325 BlockIterator iter(&allocated_iter);
326 ASSERT_EQ(0, iter.BlockIndex());
327 ASSERT_FALSE(iter.Done());
328 uint32_t actual_length;
329 uint64_t actual_start;
330 ASSERT_EQ(ZX_OK, iter.Next(10000, &actual_length, &actual_start));
331 ASSERT_EQ(kAllocatedBlocks, actual_length);
332 ASSERT_EQ(allocated_extents[0].Start(), actual_start);
333 ASSERT_TRUE(iter.Done());
334 }
335
336 // Try asking for some of the blocks (in a linearly increasing size).
337 {
338 AllocatedExtentIterator allocated_iter(allocator.get(), node_index);
339 BlockIterator iter(&allocated_iter);
340 ASSERT_EQ(0, iter.BlockIndex());
341 ASSERT_FALSE(iter.Done());
342
343 uint32_t blocks_seen = 0;
344 size_t request_size = 1;
345 while (!iter.Done()) {
346 uint32_t actual_length;
347 uint64_t actual_start;
348 ASSERT_EQ(ZX_OK, iter.Next(static_cast<uint32_t>(request_size),
349 &actual_length, &actual_start));
350 ASSERT_EQ(fbl::min(request_size, kAllocatedBlocks - blocks_seen), actual_length);
351 ASSERT_EQ(allocated_extents[0].Start() + blocks_seen, actual_start);
352 request_size++;
353 blocks_seen += actual_length;
354 }
355 ASSERT_EQ(kAllocatedBlocks, iter.BlockIndex());
356 }
357
358 END_TEST;
359 }
360
361 // TODO(smklein): Test against chains of extents which cause loops, such as:
362 // - Inode -> Itself
363 // - Inode -> Container A -> Container B -> Container A
364 // TODO(smklein): Test that we can defend against manually corrupted extent counts.
365
366 } // namespace
367 } // namespace blobfs
368
369 BEGIN_TEST_CASE(blobfsAllocatedExtentIteratorTests)
370 RUN_TEST(blobfs::NullTest)
371 RUN_TEST(blobfs::InlineNodeTest)
372 RUN_TEST(blobfs::MultiNodeTest)
373 RUN_TEST(blobfs::BadInodeNextNodeTest);
374 RUN_TEST(blobfs::BlockIteratorFragmentedTest);
375 RUN_TEST(blobfs::BlockIteratorUnfragmentedTest);
376 END_TEST_CASE(blobfsAllocatedExtentIteratorTests);
377