1 // Copyright 2017 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <errno.h>
6 #include <fcntl.h>
7 #include <stdint.h>
8 #include <sys/stat.h>
9 
10 #include <blobfs/format.h>
11 #include <digest/digest.h>
12 #include <digest/merkle-tree.h>
13 #include <fbl/algorithm.h>
14 #include <fbl/function.h>
15 #include <fbl/string.h>
16 #include <fbl/string_buffer.h>
17 #include <fbl/string_printf.h>
18 #include <fbl/unique_fd.h>
19 #include <fs-management/mount.h>
20 #include <fs-test-utils/fixture.h>
21 #include <fs-test-utils/perftest.h>
22 #include <perftest/perftest.h>
23 #include <unittest/unittest.h>
24 
25 #include <utility>
26 
27 namespace {
28 
29 using digest::Digest;
30 using digest::MerkleTree;
31 using fs_test_utils::Fixture;
32 using fs_test_utils::FixtureOptions;
33 using fs_test_utils::PerformanceTestOptions;
34 using fs_test_utils::TestCaseInfo;
35 using fs_test_utils::TestInfo;
36 
37 // Supported read orders for this benchmark.
38 enum class ReadOrder {
39     // Blobs are read in the order they were written
40     kSequentialForward,
41     // Blobs are read in the inverse order they were written
42     kSequentialReverse,
43     // Blobs are read in a random order
44     kRandom,
45 };
46 
47 // An in-memory representation of a blob.
48 struct BlobInfo {
49     // Path to the generated blob.
50     fbl::StringBuffer<fs_test_utils::kPathSize> path;
51 
52     fbl::unique_ptr<char[]> merkle;
53     size_t size_merkle;
54 
55     fbl::unique_ptr<char[]> data;
56     size_t size_data;
57 };
58 
59 // Describes the parameters of the test case.
60 struct BlobfsInfo {
61     // Total number of blobs in blobfs.
62     ssize_t blob_count;
63 
64     // Size in bytes of each blob in BlobFs.
65     size_t blob_size;
66 
67     // Path to every blob in Blobfs
68     fbl::Vector<fbl::StringBuffer<fs_test_utils::kPathSize>> paths;
69 
70     // Order in which to read the blobs from blobfs.
71     fbl::Vector<uint64_t> path_index;
72 };
73 
74 // Helper for streaming operations (such as read, write) which may need to be
75 // repeated multiple times.
76 template <typename T, typename U>
StreamAll(T func,int fd,U * buf,size_t max)77 inline int StreamAll(T func, int fd, U* buf, size_t max) {
78     size_t n = 0;
79     while (n != max) {
80         ssize_t d = func(fd, &buf[n], max - n);
81         if (d < 0) {
82             return -1;
83         }
84         n += d;
85     }
86     return 0;
87 }
88 
89 // Get a readable name for a given number of bytes.
GetNameForSize(size_t size_in_bytes)90 fbl::String GetNameForSize(size_t size_in_bytes) {
91     static const char* const kUnits[] = {"bytes", "Kbytes", "Mbytes", "Gbytes"};
92     size_t current_unit = 0;
93     size_t current_size = size_in_bytes;
94     size_t size;
95     while (current_unit < fbl::count_of(kUnits) &&
96            current_size >= (1u << (10 * (current_unit + 1)))) {
97         current_size = current_size / (1 << 10 * current_unit);
98         ++current_unit;
99     }
100 
101     size = (size_in_bytes >> (10 * current_unit));
102     return fbl::StringPrintf("%lu%s", size, kUnits[current_unit]);
103 }
104 
GetNameForOrder(ReadOrder order)105 fbl::String GetNameForOrder(ReadOrder order) {
106     switch (order) {
107     case ReadOrder::kSequentialForward:
108         return "Sequential";
109     case ReadOrder::kSequentialReverse:
110         return "Reverse";
111     case ReadOrder::kRandom:
112         return "Random";
113     }
114 
115     return "";
116 }
117 
118 // Creates a an in memory blob.
MakeBlob(fbl::String fs_path,size_t blob_size,unsigned int * seed,fbl::unique_ptr<BlobInfo> * out)119 bool MakeBlob(fbl::String fs_path, size_t blob_size, unsigned int* seed,
120               fbl::unique_ptr<BlobInfo>* out) {
121     BEGIN_HELPER;
122     // Generate a Blob of random data
123     fbl::AllocChecker ac;
124     fbl::unique_ptr<BlobInfo> info(new (&ac) BlobInfo);
125     EXPECT_EQ(ac.check(), true);
126     info->data.reset(new (&ac) char[blob_size]);
127     EXPECT_EQ(ac.check(), true);
128     // rand_r produces a cyclic sequence, in order to avoid hitting that cap
129     // and generating identical blobs, we avoid consuming an element of the
130     // sequence for each byte. We did hit this issue, which translates into
131     // test failures.
132     unsigned int initial_seed = rand_r(seed);
133     for (size_t i = 0; i < blob_size; i++) {
134         info->data[i] = static_cast<char>(rand_r(&initial_seed));
135     }
136     info->size_data = blob_size;
137 
138     // Generate the Merkle Tree
139     info->size_merkle = MerkleTree::GetTreeLength(blob_size);
140     if (info->size_merkle == 0) {
141         info->merkle = nullptr;
142     } else {
143         info->merkle.reset(new (&ac) char[info->size_merkle]);
144         ASSERT_EQ(ac.check(), true);
145     }
146     Digest digest;
147     ASSERT_EQ(MerkleTree::Create(&info->data[0], info->size_data, &info->merkle[0],
148                                  info->size_merkle, &digest),
149               ZX_OK, "Couldn't create Merkle Tree");
150     fbl::StringBuffer<sizeof(info->path)> path;
151     path.AppendPrintf("%s/", fs_path.c_str());
152     digest.ToString(path.data() + path.size(), path.capacity() - path.size());
153     strcpy(info->path.data(), path.c_str());
154 
155     // Sanity-check the merkle tree
156     ASSERT_EQ(MerkleTree::Verify(&info->data[0], info->size_data, &info->merkle[0],
157                                  info->size_merkle, 0, info->size_data, digest),
158               ZX_OK, "Failed to validate Merkle Tree");
159 
160     *out = std::move(info);
161     END_HELPER;
162 }
163 
164 // Returns a path within the fs such that it is a valid blobpath.
165 // The generated path is 'root_path/0....0'.
GetNegativeLookupPath(const fbl::String & fs_path)166 fbl::String GetNegativeLookupPath(const fbl::String& fs_path) {
167     fbl::String negative_path =
168         fbl::StringPrintf("%s/%*d", fs_path.c_str(), static_cast<int>(2 * Digest::kLength), 0);
169     return negative_path;
170 }
171 
172 class BlobfsTest {
173 public:
BlobfsTest(BlobfsInfo && info)174     BlobfsTest(BlobfsInfo&& info)
175         : info_(std::move(info)) {}
176 
177     // Measure how much time each of the operations in the Fs takes, for a known size.
178     // First we add as many blobs as we need to, and then, we proceed to execute each operation.
ApiTest(perftest::RepeatState * state,Fixture * fixture)179     bool ApiTest(perftest::RepeatState* state, Fixture* fixture) {
180         BEGIN_HELPER;
181 
182         // How many blobs do we need to add.
183         fbl::unique_ptr<BlobInfo> new_blob;
184 
185         for (int64_t curr = 0; curr < info_.blob_count; ++curr) {
186             MakeBlob(fixture->fs_path(), info_.blob_size, fixture->mutable_seed(), &new_blob);
187             fbl::unique_fd fd(open(new_blob->path.c_str(), O_CREAT | O_RDWR));
188             ASSERT_TRUE(fd, strerror(errno));
189             ASSERT_EQ(ftruncate(fd.get(), info_.blob_size), 0, strerror(errno));
190             ASSERT_EQ(StreamAll(write, fd.get(), new_blob->data.get(), new_blob->size_data), 0,
191                       strerror(errno));
192             info_.paths.push_back(new_blob->path);
193             info_.path_index.push_back(curr);
194             new_blob.reset();
195         }
196 
197         fbl::AllocChecker ac;
198         fbl::unique_ptr<char[]> buffer(new (&ac) char[info_.blob_size]);
199         ASSERT_TRUE(ac.check());
200 
201         state->DeclareStep("generate_blob");
202         state->DeclareStep("create");
203         state->DeclareStep("truncate");
204         state->DeclareStep("write");
205         state->DeclareStep("close_write_fd");
206         state->DeclareStep("open");
207         state->DeclareStep("read");
208         state->DeclareStep("unlink");
209         state->DeclareStep("close_read_fd");
210 
211         // At this specific state, measure how much time in average it takes to perform each of the
212         // operations declared.
213         while (state->KeepRunning()) {
214             MakeBlob(fixture->fs_path(), info_.blob_size, fixture->mutable_seed(), &new_blob);
215             state->NextStep();
216 
217             fbl::unique_fd fd(open(new_blob->path.c_str(), O_CREAT | O_RDWR));
218             ASSERT_TRUE(fd);
219             state->NextStep();
220 
221             ASSERT_EQ(ftruncate(fd.get(), info_.blob_size), 0);
222             state->NextStep();
223 
224             ASSERT_EQ(StreamAll(write, fd.get(), new_blob->data.get(), info_.blob_size), 0,
225                       "Failed to write Data");
226             // Force pending writes to be sent to the underlying device.
227             ASSERT_EQ(fsync(fd.get()), 0);
228             state->NextStep();
229 
230             ASSERT_EQ(close(fd.release()), 0);
231             state->NextStep();
232 
233             fd.reset(open(new_blob->path.c_str(), O_RDONLY));
234             ASSERT_TRUE(fd);
235             state->NextStep();
236 
237             ASSERT_EQ(StreamAll(read, fd.get(), &buffer[0], info_.blob_size), 0);
238             ASSERT_EQ(memcmp(buffer.get(), new_blob->data.get(), new_blob->size_data), 0);
239             state->NextStep();
240 
241             unlink(new_blob->path.c_str());
242             ASSERT_EQ(fsync(fd.get()), 0);
243 
244             state->NextStep();
245             ASSERT_EQ(close(fd.release()), 0);
246         }
247         END_HELPER;
248     }
249 
250     // After doing the API test, we use the written blobs to measure, lookup, negative-lookup
251     // read
ReadTest(ReadOrder order,perftest::RepeatState * state,Fixture * fixture)252     bool ReadTest(ReadOrder order, perftest::RepeatState* state, Fixture* fixture) {
253         BEGIN_HELPER;
254         state->DeclareStep("lookup");
255         state->DeclareStep("read");
256         state->DeclareStep("negative_lookup");
257         ASSERT_EQ(info_.path_index.size(), info_.paths.size());
258         ASSERT_GT(info_.path_index.size(), 0);
259         SortPathsByOrder(order, fixture->mutable_seed());
260 
261         fbl::AllocChecker ac;
262         fbl::unique_ptr<char[]> buffer(new (&ac) char[info_.blob_size]);
263         ASSERT_TRUE(ac.check());
264 
265         uint64_t current = 0;
266         fbl::String negative_path = GetNegativeLookupPath(fixture->fs_path());
267 
268         while (state->KeepRunning()) {
269             size_t path_index = info_.path_index[current % info_.paths.size()];
270             fbl::unique_fd fd(open(info_.paths[path_index].c_str(), O_RDONLY));
271             ASSERT_TRUE(fd);
272             state->NextStep();
273             ASSERT_EQ(StreamAll(read, fd.get(), &buffer[0], info_.blob_size), 0);
274             state->NextStep();
275             fbl::unique_fd no_fd(open(negative_path.c_str(), O_RDONLY));
276             ASSERT_FALSE(no_fd);
277             ++current;
278         }
279         END_HELPER;
280     }
281 
282 private:
SortPathsByOrder(ReadOrder order,unsigned int * seed)283     void SortPathsByOrder(ReadOrder order, unsigned int* seed) {
284         switch (order) {
285         case ReadOrder::kSequentialForward:
286             for (uint64_t curr = 0; curr < info_.paths.size(); ++curr) {
287                 info_.path_index[curr] = curr;
288             }
289             break;
290 
291         case ReadOrder::kSequentialReverse:
292             for (uint64_t curr = 0; curr < info_.paths.size(); ++curr) {
293                 info_.path_index[curr] = info_.paths.size() - curr - 1;
294             }
295             break;
296 
297         case ReadOrder::kRandom:
298             int64_t swaps = info_.paths.size();
299             while (swaps > 0) {
300                 size_t src = rand_r(seed) % info_.paths.size();
301                 size_t target = rand_r(seed) % info_.paths.size();
302                 info_.path_index[src] = info_.path_index[target];
303                 info_.path_index[target] = info_.path_index[src];
304                 --swaps;
305             }
306             break;
307         }
308     };
309 
310     BlobfsInfo info_;
311 };
312 
RunBenchmark(int argc,char ** argv)313 bool RunBenchmark(int argc, char** argv) {
314     FixtureOptions f_opts = FixtureOptions::Default(DISK_FORMAT_BLOBFS);
315     PerformanceTestOptions p_opts;
316     // 30 Samples for each operation at each stage.
317     constexpr uint32_t kSampleCount = 100;
318     const size_t blob_sizes[] = {
319         128,         // 128 b
320         128 * 1024,  // 128 Kb
321         1024 * 1024, // 1 MB
322     };
323     const size_t blob_counts[] = {
324         10,
325         100,
326         1000,
327         10000,
328     };
329     const ReadOrder orders[] = {
330         ReadOrder::kSequentialForward,
331         ReadOrder::kSequentialReverse,
332         ReadOrder::kRandom,
333     };
334 
335     if (!fs_test_utils::ParseCommandLineArgs(argc, argv, &f_opts, &p_opts)) {
336         return false;
337     }
338 
339     fbl::Vector<TestCaseInfo> testcases;
340     fbl::Vector<BlobfsTest> blobfs_tests;
341     size_t test_index = 0;
342     for (auto blob_size : blob_sizes) {
343         for (auto blob_count : blob_counts) {
344             BlobfsInfo fs_info;
345             fs_info.blob_count = (p_opts.is_unittest) ? 1 : blob_count;
346             fs_info.blob_size = blob_size;
347             blobfs_tests.push_back(std::move(fs_info));
348             TestCaseInfo testcase;
349             testcase.teardown = false;
350             testcase.sample_count = kSampleCount;
351 
352             fbl::String size = GetNameForSize(blob_size);
353 
354             TestInfo api_test;
355             api_test.name =
356                 fbl::StringPrintf("%s/%s/%luBlobs/Api", disk_format_string_[f_opts.fs_type],
357                                   size.c_str(), blob_count);
358             // There should be enough space for each blob, the merkle tree nodes, and the inodes.
359             api_test.required_disk_space =
360                 blob_count * (blob_size + 2 * MerkleTree::kNodeSize + blobfs::kBlobfsInodeSize);
361             api_test.test_fn = [test_index, &blobfs_tests](perftest::RepeatState* state,
362                                                            fs_test_utils::Fixture* fixture) {
363                 return blobfs_tests[test_index].ApiTest(state, fixture);
364             };
365             testcase.tests.push_back(std::move(api_test));
366 
367             if (blob_count > 0) {
368                 for (auto order : orders) {
369                     TestInfo read_test;
370                     read_test.name = fbl::StringPrintf(
371                         "%s/%s/%luBlobs/Read%s", disk_format_string_[f_opts.fs_type], size.c_str(),
372                         blob_count, GetNameForOrder(order).c_str());
373                     read_test.test_fn = [test_index, order,
374                                          &blobfs_tests](perftest::RepeatState* state,
375                                                         fs_test_utils::Fixture* fixture) {
376                         return blobfs_tests[test_index].ReadTest(order, state, fixture);
377                     };
378                     read_test.required_disk_space =
379                         blob_count *
380                         (blob_size + 2 * MerkleTree::kNodeSize + blobfs::kBlobfsInodeSize);
381                     testcase.tests.push_back(std::move(read_test));
382                 }
383             }
384             testcases.push_back(std::move(testcase));
385             ++test_index;
386         }
387     }
388 
389     return fs_test_utils::RunTestCases(f_opts, p_opts, testcases);
390 }
391 
392 } // namespace
393 
main(int argc,char ** argv)394 int main(int argc, char** argv) {
395     return fs_test_utils::RunWithMemFs(
396         [argc, argv]() { return RunBenchmark(argc, argv) ? 0 : -1; });
397 }
398