1 // Copyright 2015 Google Inc. All rights reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "benchmark_runner.h"
16
17 #include "benchmark/benchmark.h"
18 #include "benchmark_api_internal.h"
19 #include "internal_macros.h"
20
21 #ifndef BENCHMARK_OS_WINDOWS
22 #if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT)
23 #include <sys/resource.h>
24 #endif
25 #include <sys/time.h>
26 #include <unistd.h>
27 #endif
28
29 #include <algorithm>
30 #include <atomic>
31 #include <climits>
32 #include <cmath>
33 #include <condition_variable>
34 #include <cstdio>
35 #include <cstdlib>
36 #include <fstream>
37 #include <iostream>
38 #include <limits>
39 #include <memory>
40 #include <string>
41 #include <thread>
42 #include <utility>
43
44 #include "check.h"
45 #include "colorprint.h"
46 #include "commandlineflags.h"
47 #include "complexity.h"
48 #include "counter.h"
49 #include "internal_macros.h"
50 #include "log.h"
51 #include "mutex.h"
52 #include "perf_counters.h"
53 #include "re.h"
54 #include "statistics.h"
55 #include "string_util.h"
56 #include "thread_manager.h"
57 #include "thread_timer.h"
58
59 namespace benchmark {
60
61 namespace internal {
62
63 MemoryManager* memory_manager = nullptr;
64
65 namespace {
66
67 static constexpr IterationCount kMaxIterations = 1000000000;
68 const double kDefaultMinTime =
69 std::strtod(::benchmark::kDefaultMinTimeStr, /*p_end*/ nullptr);
70
CreateRunReport(const benchmark::internal::BenchmarkInstance & b,const internal::ThreadManager::Result & results,IterationCount memory_iterations,const MemoryManager::Result * memory_result,double seconds,int64_t repetition_index,int64_t repeats)71 BenchmarkReporter::Run CreateRunReport(
72 const benchmark::internal::BenchmarkInstance& b,
73 const internal::ThreadManager::Result& results,
74 IterationCount memory_iterations,
75 const MemoryManager::Result* memory_result, double seconds,
76 int64_t repetition_index, int64_t repeats) {
77 // Create report about this benchmark run.
78 BenchmarkReporter::Run report;
79
80 report.run_name = b.name();
81 report.family_index = b.family_index();
82 report.per_family_instance_index = b.per_family_instance_index();
83 report.skipped = results.skipped_;
84 report.skip_message = results.skip_message_;
85 report.report_label = results.report_label_;
86 // This is the total iterations across all threads.
87 report.iterations = results.iterations;
88 report.time_unit = b.time_unit();
89 report.threads = b.threads();
90 report.repetition_index = repetition_index;
91 report.repetitions = repeats;
92
93 if (!report.skipped) {
94 if (b.use_manual_time()) {
95 report.real_accumulated_time = results.manual_time_used;
96 } else {
97 report.real_accumulated_time = results.real_time_used;
98 }
99 report.cpu_accumulated_time = results.cpu_time_used;
100 report.complexity_n = results.complexity_n;
101 report.complexity = b.complexity();
102 report.complexity_lambda = b.complexity_lambda();
103 report.statistics = &b.statistics();
104 report.counters = results.counters;
105
106 if (memory_iterations > 0) {
107 assert(memory_result != nullptr);
108 report.memory_result = memory_result;
109 report.allocs_per_iter =
110 memory_iterations ? static_cast<double>(memory_result->num_allocs) /
111 memory_iterations
112 : 0;
113 }
114
115 internal::Finish(&report.counters, results.iterations, seconds,
116 b.threads());
117 }
118 return report;
119 }
120
121 // Execute one thread of benchmark b for the specified number of iterations.
122 // Adds the stats collected for the thread into manager->results.
RunInThread(const BenchmarkInstance * b,IterationCount iters,int thread_id,ThreadManager * manager,PerfCountersMeasurement * perf_counters_measurement)123 void RunInThread(const BenchmarkInstance* b, IterationCount iters,
124 int thread_id, ThreadManager* manager,
125 PerfCountersMeasurement* perf_counters_measurement) {
126 internal::ThreadTimer timer(
127 b->measure_process_cpu_time()
128 ? internal::ThreadTimer::CreateProcessCpuTime()
129 : internal::ThreadTimer::Create());
130
131 State st =
132 b->Run(iters, thread_id, &timer, manager, perf_counters_measurement);
133 BM_CHECK(st.skipped() || st.iterations() >= st.max_iterations)
134 << "Benchmark returned before State::KeepRunning() returned false!";
135 {
136 MutexLock l(manager->GetBenchmarkMutex());
137 internal::ThreadManager::Result& results = manager->results;
138 results.iterations += st.iterations();
139 results.cpu_time_used += timer.cpu_time_used();
140 results.real_time_used += timer.real_time_used();
141 results.manual_time_used += timer.manual_time_used();
142 results.complexity_n += st.complexity_length_n();
143 internal::Increment(&results.counters, st.counters);
144 }
145 manager->NotifyThreadComplete();
146 }
147
ComputeMinTime(const benchmark::internal::BenchmarkInstance & b,const BenchTimeType & iters_or_time)148 double ComputeMinTime(const benchmark::internal::BenchmarkInstance& b,
149 const BenchTimeType& iters_or_time) {
150 if (!IsZero(b.min_time())) return b.min_time();
151 // If the flag was used to specify number of iters, then return the default
152 // min_time.
153 if (iters_or_time.tag == BenchTimeType::ITERS) return kDefaultMinTime;
154
155 return iters_or_time.time;
156 }
157
ComputeIters(const benchmark::internal::BenchmarkInstance & b,const BenchTimeType & iters_or_time)158 IterationCount ComputeIters(const benchmark::internal::BenchmarkInstance& b,
159 const BenchTimeType& iters_or_time) {
160 if (b.iterations() != 0) return b.iterations();
161
162 // We've already concluded that this flag is currently used to pass
163 // iters but do a check here again anyway.
164 BM_CHECK(iters_or_time.tag == BenchTimeType::ITERS);
165 return iters_or_time.iters;
166 }
167
168 } // end namespace
169
ParseBenchMinTime(const std::string & value)170 BenchTimeType ParseBenchMinTime(const std::string& value) {
171 BenchTimeType ret;
172
173 if (value.empty()) {
174 ret.tag = BenchTimeType::TIME;
175 ret.time = 0.0;
176 return ret;
177 }
178
179 if (value.back() == 'x') {
180 char* p_end;
181 // Reset errno before it's changed by strtol.
182 errno = 0;
183 IterationCount num_iters = std::strtol(value.c_str(), &p_end, 10);
184
185 // After a valid parse, p_end should have been set to
186 // point to the 'x' suffix.
187 BM_CHECK(errno == 0 && p_end != nullptr && *p_end == 'x')
188 << "Malformed iters value passed to --benchmark_min_time: `" << value
189 << "`. Expected --benchmark_min_time=<integer>x.";
190
191 ret.tag = BenchTimeType::ITERS;
192 ret.iters = num_iters;
193 return ret;
194 }
195
196 bool has_suffix = value.back() == 's';
197 if (!has_suffix) {
198 BM_VLOG(0) << "Value passed to --benchmark_min_time should have a suffix. "
199 "Eg., `30s` for 30-seconds.";
200 }
201
202 char* p_end;
203 // Reset errno before it's changed by strtod.
204 errno = 0;
205 double min_time = std::strtod(value.c_str(), &p_end);
206
207 // After a successful parse, p_end should point to the suffix 's',
208 // or the end of the string if the suffix was omitted.
209 BM_CHECK(errno == 0 && p_end != nullptr &&
210 ((has_suffix && *p_end == 's') || *p_end == '\0'))
211 << "Malformed seconds value passed to --benchmark_min_time: `" << value
212 << "`. Expected --benchmark_min_time=<float>x.";
213
214 ret.tag = BenchTimeType::TIME;
215 ret.time = min_time;
216
217 return ret;
218 }
219
BenchmarkRunner(const benchmark::internal::BenchmarkInstance & b_,PerfCountersMeasurement * pcm_,BenchmarkReporter::PerFamilyRunReports * reports_for_family_)220 BenchmarkRunner::BenchmarkRunner(
221 const benchmark::internal::BenchmarkInstance& b_,
222 PerfCountersMeasurement* pcm_,
223 BenchmarkReporter::PerFamilyRunReports* reports_for_family_)
224 : b(b_),
225 reports_for_family(reports_for_family_),
226 parsed_benchtime_flag(ParseBenchMinTime(FLAGS_benchmark_min_time)),
227 min_time(ComputeMinTime(b_, parsed_benchtime_flag)),
228 min_warmup_time((!IsZero(b.min_time()) && b.min_warmup_time() > 0.0)
229 ? b.min_warmup_time()
230 : FLAGS_benchmark_min_warmup_time),
231 warmup_done(!(min_warmup_time > 0.0)),
232 repeats(b.repetitions() != 0 ? b.repetitions()
233 : FLAGS_benchmark_repetitions),
234 has_explicit_iteration_count(b.iterations() != 0 ||
235 parsed_benchtime_flag.tag ==
236 BenchTimeType::ITERS),
237 pool(b.threads() - 1),
238 iters(has_explicit_iteration_count
239 ? ComputeIters(b_, parsed_benchtime_flag)
240 : 1),
241 perf_counters_measurement_ptr(pcm_) {
242 run_results.display_report_aggregates_only =
243 (FLAGS_benchmark_report_aggregates_only ||
244 FLAGS_benchmark_display_aggregates_only);
245 run_results.file_report_aggregates_only =
246 FLAGS_benchmark_report_aggregates_only;
247 if (b.aggregation_report_mode() != internal::ARM_Unspecified) {
248 run_results.display_report_aggregates_only =
249 (b.aggregation_report_mode() &
250 internal::ARM_DisplayReportAggregatesOnly);
251 run_results.file_report_aggregates_only =
252 (b.aggregation_report_mode() & internal::ARM_FileReportAggregatesOnly);
253 BM_CHECK(FLAGS_benchmark_perf_counters.empty() ||
254 (perf_counters_measurement_ptr->num_counters() == 0))
255 << "Perf counters were requested but could not be set up.";
256 }
257 }
258
DoNIterations()259 BenchmarkRunner::IterationResults BenchmarkRunner::DoNIterations() {
260 BM_VLOG(2) << "Running " << b.name().str() << " for " << iters << "\n";
261
262 std::unique_ptr<internal::ThreadManager> manager;
263 manager.reset(new internal::ThreadManager(b.threads()));
264
265 // Run all but one thread in separate threads
266 for (std::size_t ti = 0; ti < pool.size(); ++ti) {
267 pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1),
268 manager.get(), perf_counters_measurement_ptr);
269 }
270 // And run one thread here directly.
271 // (If we were asked to run just one thread, we don't create new threads.)
272 // Yes, we need to do this here *after* we start the separate threads.
273 RunInThread(&b, iters, 0, manager.get(), perf_counters_measurement_ptr);
274
275 // The main thread has finished. Now let's wait for the other threads.
276 manager->WaitForAllThreads();
277 for (std::thread& thread : pool) thread.join();
278
279 IterationResults i;
280 // Acquire the measurements/counters from the manager, UNDER THE LOCK!
281 {
282 MutexLock l(manager->GetBenchmarkMutex());
283 i.results = manager->results;
284 }
285
286 // And get rid of the manager.
287 manager.reset();
288
289 // Adjust real/manual time stats since they were reported per thread.
290 i.results.real_time_used /= b.threads();
291 i.results.manual_time_used /= b.threads();
292 // If we were measuring whole-process CPU usage, adjust the CPU time too.
293 if (b.measure_process_cpu_time()) i.results.cpu_time_used /= b.threads();
294
295 BM_VLOG(2) << "Ran in " << i.results.cpu_time_used << "/"
296 << i.results.real_time_used << "\n";
297
298 // By using KeepRunningBatch a benchmark can iterate more times than
299 // requested, so take the iteration count from i.results.
300 i.iters = i.results.iterations / b.threads();
301
302 // Base decisions off of real time if requested by this benchmark.
303 i.seconds = i.results.cpu_time_used;
304 if (b.use_manual_time()) {
305 i.seconds = i.results.manual_time_used;
306 } else if (b.use_real_time()) {
307 i.seconds = i.results.real_time_used;
308 }
309
310 return i;
311 }
312
PredictNumItersNeeded(const IterationResults & i) const313 IterationCount BenchmarkRunner::PredictNumItersNeeded(
314 const IterationResults& i) const {
315 // See how much iterations should be increased by.
316 // Note: Avoid division by zero with max(seconds, 1ns).
317 double multiplier = GetMinTimeToApply() * 1.4 / std::max(i.seconds, 1e-9);
318 // If our last run was at least 10% of FLAGS_benchmark_min_time then we
319 // use the multiplier directly.
320 // Otherwise we use at most 10 times expansion.
321 // NOTE: When the last run was at least 10% of the min time the max
322 // expansion should be 14x.
323 const bool is_significant = (i.seconds / GetMinTimeToApply()) > 0.1;
324 multiplier = is_significant ? multiplier : 10.0;
325
326 // So what seems to be the sufficiently-large iteration count? Round up.
327 const IterationCount max_next_iters = static_cast<IterationCount>(
328 std::lround(std::max(multiplier * static_cast<double>(i.iters),
329 static_cast<double>(i.iters) + 1.0)));
330 // But we do have *some* limits though..
331 const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
332
333 BM_VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
334 return next_iters; // round up before conversion to integer.
335 }
336
ShouldReportIterationResults(const IterationResults & i) const337 bool BenchmarkRunner::ShouldReportIterationResults(
338 const IterationResults& i) const {
339 // Determine if this run should be reported;
340 // Either it has run for a sufficient amount of time
341 // or because an error was reported.
342 return i.results.skipped_ ||
343 i.iters >= kMaxIterations || // Too many iterations already.
344 i.seconds >=
345 GetMinTimeToApply() || // The elapsed time is large enough.
346 // CPU time is specified but the elapsed real time greatly exceeds
347 // the minimum time.
348 // Note that user provided timers are except from this test.
349 ((i.results.real_time_used >= 5 * GetMinTimeToApply()) &&
350 !b.use_manual_time());
351 }
352
GetMinTimeToApply() const353 double BenchmarkRunner::GetMinTimeToApply() const {
354 // In order to re-use functionality to run and measure benchmarks for running
355 // a warmup phase of the benchmark, we need a way of telling whether to apply
356 // min_time or min_warmup_time. This function will figure out if we are in the
357 // warmup phase and therefore need to apply min_warmup_time or if we already
358 // in the benchmarking phase and min_time needs to be applied.
359 return warmup_done ? min_time : min_warmup_time;
360 }
361
FinishWarmUp(const IterationCount & i)362 void BenchmarkRunner::FinishWarmUp(const IterationCount& i) {
363 warmup_done = true;
364 iters = i;
365 }
366
RunWarmUp()367 void BenchmarkRunner::RunWarmUp() {
368 // Use the same mechanisms for warming up the benchmark as used for actually
369 // running and measuring the benchmark.
370 IterationResults i_warmup;
371 // Dont use the iterations determined in the warmup phase for the actual
372 // measured benchmark phase. While this may be a good starting point for the
373 // benchmark and it would therefore get rid of the need to figure out how many
374 // iterations are needed if min_time is set again, this may also be a complete
375 // wrong guess since the warmup loops might be considerably slower (e.g
376 // because of caching effects).
377 const IterationCount i_backup = iters;
378
379 for (;;) {
380 b.Setup();
381 i_warmup = DoNIterations();
382 b.Teardown();
383
384 const bool finish = ShouldReportIterationResults(i_warmup);
385
386 if (finish) {
387 FinishWarmUp(i_backup);
388 break;
389 }
390
391 // Although we are running "only" a warmup phase where running enough
392 // iterations at once without measuring time isn't as important as it is for
393 // the benchmarking phase, we still do it the same way as otherwise it is
394 // very confusing for the user to know how to choose a proper value for
395 // min_warmup_time if a different approach on running it is used.
396 iters = PredictNumItersNeeded(i_warmup);
397 assert(iters > i_warmup.iters &&
398 "if we did more iterations than we want to do the next time, "
399 "then we should have accepted the current iteration run.");
400 }
401 }
402
DoOneRepetition()403 void BenchmarkRunner::DoOneRepetition() {
404 assert(HasRepeatsRemaining() && "Already done all repetitions?");
405
406 const bool is_the_first_repetition = num_repetitions_done == 0;
407
408 // In case a warmup phase is requested by the benchmark, run it now.
409 // After running the warmup phase the BenchmarkRunner should be in a state as
410 // this warmup never happened except the fact that warmup_done is set. Every
411 // other manipulation of the BenchmarkRunner instance would be a bug! Please
412 // fix it.
413 if (!warmup_done) RunWarmUp();
414
415 IterationResults i;
416 // We *may* be gradually increasing the length (iteration count)
417 // of the benchmark until we decide the results are significant.
418 // And once we do, we report those last results and exit.
419 // Please do note that the if there are repetitions, the iteration count
420 // is *only* calculated for the *first* repetition, and other repetitions
421 // simply use that precomputed iteration count.
422 for (;;) {
423 b.Setup();
424 i = DoNIterations();
425 b.Teardown();
426
427 // Do we consider the results to be significant?
428 // If we are doing repetitions, and the first repetition was already done,
429 // it has calculated the correct iteration time, so we have run that very
430 // iteration count just now. No need to calculate anything. Just report.
431 // Else, the normal rules apply.
432 const bool results_are_significant = !is_the_first_repetition ||
433 has_explicit_iteration_count ||
434 ShouldReportIterationResults(i);
435
436 if (results_are_significant) break; // Good, let's report them!
437
438 // Nope, bad iteration. Let's re-estimate the hopefully-sufficient
439 // iteration count, and run the benchmark again...
440
441 iters = PredictNumItersNeeded(i);
442 assert(iters > i.iters &&
443 "if we did more iterations than we want to do the next time, "
444 "then we should have accepted the current iteration run.");
445 }
446
447 // Oh, one last thing, we need to also produce the 'memory measurements'..
448 MemoryManager::Result* memory_result = nullptr;
449 IterationCount memory_iterations = 0;
450 if (memory_manager != nullptr) {
451 // TODO(vyng): Consider making BenchmarkReporter::Run::memory_result an
452 // optional so we don't have to own the Result here.
453 // Can't do it now due to cxx03.
454 memory_results.push_back(MemoryManager::Result());
455 memory_result = &memory_results.back();
456 // Only run a few iterations to reduce the impact of one-time
457 // allocations in benchmarks that are not properly managed.
458 memory_iterations = std::min<IterationCount>(16, iters);
459 memory_manager->Start();
460 std::unique_ptr<internal::ThreadManager> manager;
461 manager.reset(new internal::ThreadManager(1));
462 b.Setup();
463 RunInThread(&b, memory_iterations, 0, manager.get(),
464 perf_counters_measurement_ptr);
465 manager->WaitForAllThreads();
466 manager.reset();
467 b.Teardown();
468 memory_manager->Stop(*memory_result);
469 }
470
471 // Ok, now actually report.
472 BenchmarkReporter::Run report =
473 CreateRunReport(b, i.results, memory_iterations, memory_result, i.seconds,
474 num_repetitions_done, repeats);
475
476 if (reports_for_family) {
477 ++reports_for_family->num_runs_done;
478 if (!report.skipped) reports_for_family->Runs.push_back(report);
479 }
480
481 run_results.non_aggregates.push_back(report);
482
483 ++num_repetitions_done;
484 }
485
GetResults()486 RunResults&& BenchmarkRunner::GetResults() {
487 assert(!HasRepeatsRemaining() && "Did not run all repetitions yet?");
488
489 // Calculate additional statistics over the repetitions of this instance.
490 run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
491
492 return std::move(run_results);
493 }
494
495 } // end namespace internal
496
497 } // end namespace benchmark
498