1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <perftest/perftest.h>
6 #include <perftest/runner.h>
7 #include <unittest/unittest.h>
8 #include <zircon/assert.h>
9
10 #include <fbl/algorithm.h>
11
12 #include <utility>
13
14 // This is a helper for creating a FILE* that we can redirect output to, in
15 // order to make the tests below less noisy. We don't look at the output
16 // that is sent to the stream.
17 class DummyOutputStream {
18 public:
DummyOutputStream()19 DummyOutputStream() {
20 fp_ = fmemopen(buf_, sizeof(buf_), "w+");
21 ZX_ASSERT(fp_);
22 }
~DummyOutputStream()23 ~DummyOutputStream() {
24 ZX_ASSERT(fclose(fp_) == 0);
25 }
26
fp()27 FILE* fp() { return fp_; }
28
29 private:
30 FILE* fp_;
31 // Non-zero-size dummy buffer that fmemopen() will accept.
32 char buf_[1];
33 };
34
35 // Example of a valid test that passes.
NoOpTest(perftest::RepeatState * state)36 static bool NoOpTest(perftest::RepeatState* state) {
37 while (state->KeepRunning()) {}
38 return true;
39 }
40
41 // Example of a test that fails by returning false.
FailingTest(perftest::RepeatState * state)42 static bool FailingTest(perftest::RepeatState* state) {
43 while (state->KeepRunning()) {}
44 return false;
45 }
46
47 // Sanity-check time values.
check_times(perftest::TestCaseResults * test_case)48 static bool check_times(perftest::TestCaseResults* test_case) {
49 for (auto time_taken : test_case->values) {
50 EXPECT_GE(time_taken, 0);
51 // Check for unreasonably large values, which suggest that we
52 // subtracted timestamps incorrectly.
53 EXPECT_LT(time_taken, static_cast<double>(1ULL << 60));
54 }
55 return true;
56 }
57
58 // Test that a successful run of a perf test produces sensible results.
TestResults()59 static bool TestResults() {
60 BEGIN_TEST;
61
62 perftest::internal::TestList test_list;
63 perftest::internal::NamedTest test{"no_op_example_test", NoOpTest};
64 test_list.push_back(std::move(test));
65
66 const uint32_t kRunCount = 7;
67 perftest::ResultsSet results;
68 DummyOutputStream out;
69 EXPECT_TRUE(perftest::internal::RunTests(
70 "test-suite", &test_list, kRunCount, "", out.fp(),
71 &results));
72
73 auto* test_cases = results.results();
74 ASSERT_EQ(test_cases->size(), 1);
75 // The output should have time values for the number of runs we requested.
76 auto* test_case = &(*test_cases)[0];
77 EXPECT_EQ(test_case->values.size(), kRunCount);
78 EXPECT_STR_EQ(test_case->label.c_str(), "no_op_example_test");
79 EXPECT_TRUE(check_times(test_case));
80 EXPECT_EQ(test_case->bytes_processed_per_run, 0);
81
82 END_TEST;
83 }
84
85 // Test that if a perf test fails by returning "false", the failure gets
86 // propagated correctly.
TestFailingTest()87 static bool TestFailingTest() {
88 BEGIN_TEST;
89
90 perftest::internal::TestList test_list;
91 perftest::internal::NamedTest test{"example_test", FailingTest};
92 test_list.push_back(std::move(test));
93
94 const uint32_t kRunCount = 7;
95 perftest::ResultsSet results;
96 DummyOutputStream out;
97 EXPECT_FALSE(perftest::internal::RunTests(
98 "test-suite", &test_list, kRunCount, "", out.fp(),
99 &results));
100 EXPECT_EQ(results.results()->size(), 0);
101
102 END_TEST;
103 }
104
105 // Test that we report a test as failed if it calls KeepRunning() too many
106 // or too few times. Make sure that we don't overrun the array of
107 // timestamps or report uninitialized data from that array.
TestBadKeepRunningCalls()108 static bool TestBadKeepRunningCalls() {
109 BEGIN_TEST;
110
111 for (int actual_runs = 0; actual_runs < 10; ++actual_runs) {
112 // Example test function which might call KeepRunning() the wrong
113 // number of times.
114 auto test_func = [=](perftest::RepeatState* state) {
115 for (int i = 0; i < actual_runs + 1; ++i)
116 state->KeepRunning();
117 return true;
118 };
119
120 perftest::internal::TestList test_list;
121 perftest::internal::NamedTest test{"example_bad_test", test_func};
122 test_list.push_back(std::move(test));
123
124 const uint32_t kRunCount = 5;
125 perftest::ResultsSet results;
126 DummyOutputStream out;
127 bool success = perftest::internal::RunTests(
128 "test-suite", &test_list, kRunCount, "", out.fp(), &results);
129 EXPECT_EQ(success, kRunCount == actual_runs);
130 EXPECT_EQ(results.results()->size(),
131 (size_t)(kRunCount == actual_runs ? 1 : 0));
132 }
133
134 END_TEST;
135 }
136
MultistepTest(perftest::RepeatState * state)137 static bool MultistepTest(perftest::RepeatState* state) {
138 state->DeclareStep("step1");
139 state->DeclareStep("step2");
140 state->DeclareStep("step3");
141 while (state->KeepRunning()) {
142 // Step 1 would go here.
143 state->NextStep();
144 // Step 2 would go here.
145 state->NextStep();
146 // Step 3 would go here.
147 }
148 return true;
149 }
150
151 // Test the results for a simple multi-step test.
TestMultistepTest()152 static bool TestMultistepTest() {
153 BEGIN_TEST;
154
155 perftest::internal::TestList test_list;
156 perftest::internal::NamedTest test{"example_test", MultistepTest};
157 test_list.push_back(std::move(test));
158
159 const uint32_t kRunCount = 7;
160 perftest::ResultsSet results;
161 DummyOutputStream out;
162 EXPECT_TRUE(perftest::internal::RunTests(
163 "test-suite", &test_list, kRunCount, "", out.fp(),
164 &results));
165 ASSERT_EQ(results.results()->size(), 3);
166 EXPECT_STR_EQ((*results.results())[0].label.c_str(), "example_test.step1");
167 EXPECT_STR_EQ((*results.results())[1].label.c_str(), "example_test.step2");
168 EXPECT_STR_EQ((*results.results())[2].label.c_str(), "example_test.step3");
169 for (auto& test_case : *results.results()) {
170 EXPECT_EQ(test_case.values.size(), kRunCount);
171 EXPECT_TRUE(check_times(&test_case));
172 }
173
174 END_TEST;
175 }
176
177 // Test that we report a test as failed if it calls NextStep() before
178 // KeepRunning(), which is invalid.
TestNextStepCalledBeforeKeepRunning()179 static bool TestNextStepCalledBeforeKeepRunning() {
180 BEGIN_TEST;
181
182 bool keeprunning_retval = true;
183 // Invalid test function that calls NextStep() at the wrong time,
184 // before calling KeepRunning().
185 auto test_func = [&](perftest::RepeatState* state) {
186 state->NextStep();
187 keeprunning_retval = state->KeepRunning();
188 return true;
189 };
190
191 perftest::internal::TestList test_list;
192 perftest::internal::NamedTest test{"example_bad_test", test_func};
193 test_list.push_back(std::move(test));
194 const uint32_t kRunCount = 5;
195 perftest::ResultsSet results;
196 DummyOutputStream out;
197 bool success = perftest::internal::RunTests(
198 "test-suite", &test_list, kRunCount, "", out.fp(), &results);
199 EXPECT_FALSE(success);
200 EXPECT_FALSE(keeprunning_retval);
201
202 END_TEST;
203 }
204
205 // Test that we report a test as failed if it calls NextStep() too many or
206 // too few times.
TestBadNextStepCalls()207 static bool TestBadNextStepCalls() {
208 BEGIN_TEST;
209
210 for (int actual_calls = 0; actual_calls < 10; ++actual_calls) {
211 // Example test function which might call NextStep() the wrong
212 // number of times.
213 auto test_func = [=](perftest::RepeatState* state) {
214 state->DeclareStep("step1");
215 state->DeclareStep("step2");
216 state->DeclareStep("step3");
217 while (state->KeepRunning()) {
218 for (int i = 0; i < actual_calls; ++i) {
219 state->NextStep();
220 }
221 }
222 return true;
223 };
224
225 perftest::internal::TestList test_list;
226 perftest::internal::NamedTest test{"example_bad_test", test_func};
227 test_list.push_back(std::move(test));
228
229 const uint32_t kRunCount = 5;
230 perftest::ResultsSet results;
231 DummyOutputStream out;
232 bool success = perftest::internal::RunTests(
233 "test-suite", &test_list, kRunCount, "", out.fp(), &results);
234 const int kCorrectNumberOfCalls = 2;
235 EXPECT_EQ(success, actual_calls == kCorrectNumberOfCalls);
236 EXPECT_EQ(results.results()->size(),
237 static_cast<size_t>(actual_calls == kCorrectNumberOfCalls
238 ? 3 : 0));
239 }
240
241 END_TEST;
242 }
243
244 // Check that the bytes_processed_per_run parameter is propagated through.
TestBytesProcessedParameter()245 static bool TestBytesProcessedParameter() {
246 BEGIN_TEST;
247
248 auto test_func = [&](perftest::RepeatState* state) {
249 state->SetBytesProcessedPerRun(1234);
250 while (state->KeepRunning()) {}
251 return true;
252 };
253 perftest::internal::TestList test_list;
254 perftest::internal::NamedTest test{"throughput_test", test_func};
255 test_list.push_back(std::move(test));
256
257 const uint32_t kRunCount = 5;
258 perftest::ResultsSet results;
259 DummyOutputStream out;
260 EXPECT_TRUE(perftest::internal::RunTests(
261 "test-suite", &test_list, kRunCount, "", out.fp(),
262 &results));
263 auto* test_cases = results.results();
264 ASSERT_EQ(test_cases->size(), 1);
265 EXPECT_EQ((*test_cases)[0].bytes_processed_per_run, 1234);
266
267 END_TEST;
268 }
269
270 // If we have a multi-step test that specifies a bytes_processed_per_run
271 // parameter, we should get a result reported for the overall times with a
272 // bytes_processed_per_run value. The results for the individual steps
273 // should not report bytes_processed_per_run.
TestBytesProcessedParameterMultistep()274 static bool TestBytesProcessedParameterMultistep() {
275 BEGIN_TEST;
276
277 auto test_func = [&](perftest::RepeatState* state) {
278 state->SetBytesProcessedPerRun(1234);
279 state->DeclareStep("step1");
280 state->DeclareStep("step2");
281 while (state->KeepRunning()) {
282 state->NextStep();
283 }
284 return true;
285 };
286 perftest::internal::TestList test_list;
287 perftest::internal::NamedTest test{"throughput_test", test_func};
288 test_list.push_back(std::move(test));
289
290 const uint32_t kRunCount = 5;
291 perftest::ResultsSet results;
292 DummyOutputStream out;
293 EXPECT_TRUE(perftest::internal::RunTests(
294 "test-suite", &test_list, kRunCount, "", out.fp(),
295 &results));
296 auto* test_cases = results.results();
297 ASSERT_EQ(test_cases->size(), 3);
298 EXPECT_STR_EQ((*test_cases)[0].label.c_str(), "throughput_test");
299 EXPECT_STR_EQ((*test_cases)[1].label.c_str(), "throughput_test.step1");
300 EXPECT_STR_EQ((*test_cases)[2].label.c_str(), "throughput_test.step2");
301 EXPECT_EQ((*test_cases)[0].bytes_processed_per_run, 1234);
302 EXPECT_EQ((*test_cases)[1].bytes_processed_per_run, 0);
303 EXPECT_EQ((*test_cases)[2].bytes_processed_per_run, 0);
304
305 END_TEST;
306 }
307
TestParsingCommandArgs()308 static bool TestParsingCommandArgs() {
309 BEGIN_TEST;
310
311 const char* argv[] = {"unused_argv0", "--runs", "123", "--out", "dest_file",
312 "--filter", "some_regex", "--enable-tracing",
313 "--startup-delay=456"};
314 perftest::internal::CommandArgs args;
315 perftest::internal::ParseCommandArgs(
316 fbl::count_of(argv), const_cast<char**>(argv), &args);
317 EXPECT_EQ(args.run_count, 123);
318 EXPECT_STR_EQ(args.output_filename, "dest_file");
319 EXPECT_STR_EQ(args.filter_regex, "some_regex");
320 EXPECT_TRUE(args.enable_tracing);
321 EXPECT_EQ(args.startup_delay_seconds, 456);
322
323 END_TEST;
324 }
325
326 BEGIN_TEST_CASE(perftest_runner_test)
RUN_TEST(TestResults)327 RUN_TEST(TestResults)
328 RUN_TEST(TestFailingTest)
329 RUN_TEST(TestBadKeepRunningCalls)
330 RUN_TEST(TestMultistepTest)
331 RUN_TEST(TestNextStepCalledBeforeKeepRunning)
332 RUN_TEST(TestBadNextStepCalls)
333 RUN_TEST(TestBytesProcessedParameter)
334 RUN_TEST(TestBytesProcessedParameterMultistep)
335 RUN_TEST(TestParsingCommandArgs)
336 END_TEST_CASE(perftest_runner_test)
337
338 int main(int argc, char** argv) {
339 return perftest::PerfTestMain(argc, argv, "fuchsia.zircon.perf_test");
340 }
341