1 // Copyright 2015 Google Inc. All rights reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "benchmark/benchmark.h"
16
17 #include "benchmark_api_internal.h"
18 #include "benchmark_runner.h"
19 #include "internal_macros.h"
20
21 #ifndef BENCHMARK_OS_WINDOWS
22 #if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT)
23 #include <sys/resource.h>
24 #endif
25 #include <sys/time.h>
26 #include <unistd.h>
27 #endif
28
29 #include <algorithm>
30 #include <atomic>
31 #include <condition_variable>
32 #include <cstdio>
33 #include <cstdlib>
34 #include <fstream>
35 #include <iostream>
36 #include <limits>
37 #include <map>
38 #include <memory>
39 #include <random>
40 #include <string>
41 #include <thread>
42 #include <utility>
43
44 #include "check.h"
45 #include "colorprint.h"
46 #include "commandlineflags.h"
47 #include "complexity.h"
48 #include "counter.h"
49 #include "internal_macros.h"
50 #include "log.h"
51 #include "mutex.h"
52 #include "perf_counters.h"
53 #include "re.h"
54 #include "statistics.h"
55 #include "string_util.h"
56 #include "thread_manager.h"
57 #include "thread_timer.h"
58
59 namespace benchmark {
60 // Print a list of benchmarks. This option overrides all other options.
61 BM_DEFINE_bool(benchmark_list_tests, false);
62
63 // A regular expression that specifies the set of benchmarks to execute. If
64 // this flag is empty, or if this flag is the string \"all\", all benchmarks
65 // linked into the binary are run.
66 BM_DEFINE_string(benchmark_filter, "");
67
68 // Specification of how long to run the benchmark.
69 //
70 // It can be either an exact number of iterations (specified as `<integer>x`),
71 // or a minimum number of seconds (specified as `<float>s`). If the latter
72 // format (ie., min seconds) is used, the system may run the benchmark longer
73 // until the results are considered significant.
74 //
75 // For backward compatibility, the `s` suffix may be omitted, in which case,
76 // the specified number is interpreted as the number of seconds.
77 //
78 // For cpu-time based tests, this is the lower bound
79 // on the total cpu time used by all threads that make up the test. For
80 // real-time based tests, this is the lower bound on the elapsed time of the
81 // benchmark execution, regardless of number of threads.
82 BM_DEFINE_string(benchmark_min_time, kDefaultMinTimeStr);
83
84 // Minimum number of seconds a benchmark should be run before results should be
85 // taken into account. This e.g can be necessary for benchmarks of code which
86 // needs to fill some form of cache before performance is of interest.
87 // Note: results gathered within this period are discarded and not used for
88 // reported result.
89 BM_DEFINE_double(benchmark_min_warmup_time, 0.0);
90
91 // The number of runs of each benchmark. If greater than 1, the mean and
92 // standard deviation of the runs will be reported.
93 BM_DEFINE_int32(benchmark_repetitions, 1);
94
95 // If set, enable random interleaving of repetitions of all benchmarks.
96 // See http://github.com/google/benchmark/issues/1051 for details.
97 BM_DEFINE_bool(benchmark_enable_random_interleaving, false);
98
99 // Report the result of each benchmark repetitions. When 'true' is specified
100 // only the mean, standard deviation, and other statistics are reported for
101 // repeated benchmarks. Affects all reporters.
102 BM_DEFINE_bool(benchmark_report_aggregates_only, false);
103
104 // Display the result of each benchmark repetitions. When 'true' is specified
105 // only the mean, standard deviation, and other statistics are displayed for
106 // repeated benchmarks. Unlike benchmark_report_aggregates_only, only affects
107 // the display reporter, but *NOT* file reporter, which will still contain
108 // all the output.
109 BM_DEFINE_bool(benchmark_display_aggregates_only, false);
110
111 // The format to use for console output.
112 // Valid values are 'console', 'json', or 'csv'.
113 BM_DEFINE_string(benchmark_format, "console");
114
115 // The format to use for file output.
116 // Valid values are 'console', 'json', or 'csv'.
117 BM_DEFINE_string(benchmark_out_format, "json");
118
119 // The file to write additional output to.
120 BM_DEFINE_string(benchmark_out, "");
121
122 // Whether to use colors in the output. Valid values:
123 // 'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use colors if
124 // the output is being sent to a terminal and the TERM environment variable is
125 // set to a terminal type that supports colors.
126 BM_DEFINE_string(benchmark_color, "auto");
127
128 // Whether to use tabular format when printing user counters to the console.
129 // Valid values: 'true'/'yes'/1, 'false'/'no'/0. Defaults to false.
130 BM_DEFINE_bool(benchmark_counters_tabular, false);
131
132 // List of additional perf counters to collect, in libpfm format. For more
133 // information about libpfm: https://man7.org/linux/man-pages/man3/libpfm.3.html
134 BM_DEFINE_string(benchmark_perf_counters, "");
135
136 // Extra context to include in the output formatted as comma-separated key-value
137 // pairs. Kept internal as it's only used for parsing from env/command line.
138 BM_DEFINE_kvpairs(benchmark_context, {});
139
140 // Set the default time unit to use for reports
141 // Valid values are 'ns', 'us', 'ms' or 's'
142 BM_DEFINE_string(benchmark_time_unit, "");
143
144 // The level of verbose logging to output
145 BM_DEFINE_int32(v, 0);
146
147 namespace internal {
148
149 std::map<std::string, std::string>* global_context = nullptr;
150
GetGlobalContext()151 BENCHMARK_EXPORT std::map<std::string, std::string>*& GetGlobalContext() {
152 return global_context;
153 }
154
155 // FIXME: wouldn't LTO mess this up?
UseCharPointer(char const volatile *)156 void UseCharPointer(char const volatile*) {}
157
158 } // namespace internal
159
State(std::string name,IterationCount max_iters,const std::vector<int64_t> & ranges,int thread_i,int n_threads,internal::ThreadTimer * timer,internal::ThreadManager * manager,internal::PerfCountersMeasurement * perf_counters_measurement)160 State::State(std::string name, IterationCount max_iters,
161 const std::vector<int64_t>& ranges, int thread_i, int n_threads,
162 internal::ThreadTimer* timer, internal::ThreadManager* manager,
163 internal::PerfCountersMeasurement* perf_counters_measurement)
164 : total_iterations_(0),
165 batch_leftover_(0),
166 max_iterations(max_iters),
167 started_(false),
168 finished_(false),
169 skipped_(internal::NotSkipped),
170 range_(ranges),
171 complexity_n_(0),
172 name_(std::move(name)),
173 thread_index_(thread_i),
174 threads_(n_threads),
175 timer_(timer),
176 manager_(manager),
177 perf_counters_measurement_(perf_counters_measurement) {
178 BM_CHECK(max_iterations != 0) << "At least one iteration must be run";
179 BM_CHECK_LT(thread_index_, threads_)
180 << "thread_index must be less than threads";
181
182 // Add counters with correct flag now. If added with `counters[name]` in
183 // `PauseTiming`, a new `Counter` will be inserted the first time, which
184 // won't have the flag. Inserting them now also reduces the allocations
185 // during the benchmark.
186 if (perf_counters_measurement_) {
187 for (const std::string& counter_name :
188 perf_counters_measurement_->names()) {
189 counters[counter_name] = Counter(0.0, Counter::kAvgIterations);
190 }
191 }
192
193 // Note: The use of offsetof below is technically undefined until C++17
194 // because State is not a standard layout type. However, all compilers
195 // currently provide well-defined behavior as an extension (which is
196 // demonstrated since constexpr evaluation must diagnose all undefined
197 // behavior). However, GCC and Clang also warn about this use of offsetof,
198 // which must be suppressed.
199 #if defined(__INTEL_COMPILER)
200 #pragma warning push
201 #pragma warning(disable : 1875)
202 #elif defined(__GNUC__)
203 #pragma GCC diagnostic push
204 #pragma GCC diagnostic ignored "-Winvalid-offsetof"
205 #endif
206 #if defined(__NVCC__)
207 #pragma nv_diagnostic push
208 #pragma nv_diag_suppress 1427
209 #endif
210 #if defined(__NVCOMPILER)
211 #pragma diagnostic push
212 #pragma diag_suppress offset_in_non_POD_nonstandard
213 #endif
214 // Offset tests to ensure commonly accessed data is on the first cache line.
215 const int cache_line_size = 64;
216 static_assert(
217 offsetof(State, skipped_) <= (cache_line_size - sizeof(skipped_)), "");
218 #if defined(__INTEL_COMPILER)
219 #pragma warning pop
220 #elif defined(__GNUC__)
221 #pragma GCC diagnostic pop
222 #endif
223 #if defined(__NVCC__)
224 #pragma nv_diagnostic pop
225 #endif
226 #if defined(__NVCOMPILER)
227 #pragma diagnostic pop
228 #endif
229 }
230
PauseTiming()231 void State::PauseTiming() {
232 // Add in time accumulated so far
233 BM_CHECK(started_ && !finished_ && !skipped());
234 timer_->StopTimer();
235 if (perf_counters_measurement_) {
236 std::vector<std::pair<std::string, double>> measurements;
237 if (!perf_counters_measurement_->Stop(measurements)) {
238 BM_CHECK(false) << "Perf counters read the value failed.";
239 }
240 for (const auto& name_and_measurement : measurements) {
241 const std::string& name = name_and_measurement.first;
242 const double measurement = name_and_measurement.second;
243 // Counter was inserted with `kAvgIterations` flag by the constructor.
244 assert(counters.find(name) != counters.end());
245 counters[name].value += measurement;
246 }
247 }
248 }
249
ResumeTiming()250 void State::ResumeTiming() {
251 BM_CHECK(started_ && !finished_ && !skipped());
252 timer_->StartTimer();
253 if (perf_counters_measurement_) {
254 perf_counters_measurement_->Start();
255 }
256 }
257
SkipWithMessage(const std::string & msg)258 void State::SkipWithMessage(const std::string& msg) {
259 skipped_ = internal::SkippedWithMessage;
260 {
261 MutexLock l(manager_->GetBenchmarkMutex());
262 if (internal::NotSkipped == manager_->results.skipped_) {
263 manager_->results.skip_message_ = msg;
264 manager_->results.skipped_ = skipped_;
265 }
266 }
267 total_iterations_ = 0;
268 if (timer_->running()) timer_->StopTimer();
269 }
270
SkipWithError(const std::string & msg)271 void State::SkipWithError(const std::string& msg) {
272 skipped_ = internal::SkippedWithError;
273 {
274 MutexLock l(manager_->GetBenchmarkMutex());
275 if (internal::NotSkipped == manager_->results.skipped_) {
276 manager_->results.skip_message_ = msg;
277 manager_->results.skipped_ = skipped_;
278 }
279 }
280 total_iterations_ = 0;
281 if (timer_->running()) timer_->StopTimer();
282 }
283
SetIterationTime(double seconds)284 void State::SetIterationTime(double seconds) {
285 timer_->SetIterationTime(seconds);
286 }
287
SetLabel(const std::string & label)288 void State::SetLabel(const std::string& label) {
289 MutexLock l(manager_->GetBenchmarkMutex());
290 manager_->results.report_label_ = label;
291 }
292
StartKeepRunning()293 void State::StartKeepRunning() {
294 BM_CHECK(!started_ && !finished_);
295 started_ = true;
296 total_iterations_ = skipped() ? 0 : max_iterations;
297 manager_->StartStopBarrier();
298 if (!skipped()) ResumeTiming();
299 }
300
FinishKeepRunning()301 void State::FinishKeepRunning() {
302 BM_CHECK(started_ && (!finished_ || skipped()));
303 if (!skipped()) {
304 PauseTiming();
305 }
306 // Total iterations has now wrapped around past 0. Fix this.
307 total_iterations_ = 0;
308 finished_ = true;
309 manager_->StartStopBarrier();
310 }
311
312 namespace internal {
313 namespace {
314
315 // Flushes streams after invoking reporter methods that write to them. This
316 // ensures users get timely updates even when streams are not line-buffered.
FlushStreams(BenchmarkReporter * reporter)317 void FlushStreams(BenchmarkReporter* reporter) {
318 if (!reporter) return;
319 std::flush(reporter->GetOutputStream());
320 std::flush(reporter->GetErrorStream());
321 }
322
323 // Reports in both display and file reporters.
Report(BenchmarkReporter * display_reporter,BenchmarkReporter * file_reporter,const RunResults & run_results)324 void Report(BenchmarkReporter* display_reporter,
325 BenchmarkReporter* file_reporter, const RunResults& run_results) {
326 auto report_one = [](BenchmarkReporter* reporter, bool aggregates_only,
327 const RunResults& results) {
328 assert(reporter);
329 // If there are no aggregates, do output non-aggregates.
330 aggregates_only &= !results.aggregates_only.empty();
331 if (!aggregates_only) reporter->ReportRuns(results.non_aggregates);
332 if (!results.aggregates_only.empty())
333 reporter->ReportRuns(results.aggregates_only);
334 };
335
336 report_one(display_reporter, run_results.display_report_aggregates_only,
337 run_results);
338 if (file_reporter)
339 report_one(file_reporter, run_results.file_report_aggregates_only,
340 run_results);
341
342 FlushStreams(display_reporter);
343 FlushStreams(file_reporter);
344 }
345
RunBenchmarks(const std::vector<BenchmarkInstance> & benchmarks,BenchmarkReporter * display_reporter,BenchmarkReporter * file_reporter)346 void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
347 BenchmarkReporter* display_reporter,
348 BenchmarkReporter* file_reporter) {
349 // Note the file_reporter can be null.
350 BM_CHECK(display_reporter != nullptr);
351
352 // Determine the width of the name field using a minimum width of 10.
353 bool might_have_aggregates = FLAGS_benchmark_repetitions > 1;
354 size_t name_field_width = 10;
355 size_t stat_field_width = 0;
356 for (const BenchmarkInstance& benchmark : benchmarks) {
357 name_field_width =
358 std::max<size_t>(name_field_width, benchmark.name().str().size());
359 might_have_aggregates |= benchmark.repetitions() > 1;
360
361 for (const auto& Stat : benchmark.statistics())
362 stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
363 }
364 if (might_have_aggregates) name_field_width += 1 + stat_field_width;
365
366 // Print header here
367 BenchmarkReporter::Context context;
368 context.name_field_width = name_field_width;
369
370 // Keep track of running times of all instances of each benchmark family.
371 std::map<int /*family_index*/, BenchmarkReporter::PerFamilyRunReports>
372 per_family_reports;
373
374 if (display_reporter->ReportContext(context) &&
375 (!file_reporter || file_reporter->ReportContext(context))) {
376 FlushStreams(display_reporter);
377 FlushStreams(file_reporter);
378
379 size_t num_repetitions_total = 0;
380
381 // This perfcounters object needs to be created before the runners vector
382 // below so it outlasts their lifetime.
383 PerfCountersMeasurement perfcounters(
384 StrSplit(FLAGS_benchmark_perf_counters, ','));
385
386 // Vector of benchmarks to run
387 std::vector<internal::BenchmarkRunner> runners;
388 runners.reserve(benchmarks.size());
389
390 // Count the number of benchmarks with threads to warn the user in case
391 // performance counters are used.
392 int benchmarks_with_threads = 0;
393
394 // Loop through all benchmarks
395 for (const BenchmarkInstance& benchmark : benchmarks) {
396 BenchmarkReporter::PerFamilyRunReports* reports_for_family = nullptr;
397 if (benchmark.complexity() != oNone)
398 reports_for_family = &per_family_reports[benchmark.family_index()];
399 benchmarks_with_threads += (benchmark.threads() > 1);
400 runners.emplace_back(benchmark, &perfcounters, reports_for_family);
401 int num_repeats_of_this_instance = runners.back().GetNumRepeats();
402 num_repetitions_total += num_repeats_of_this_instance;
403 if (reports_for_family)
404 reports_for_family->num_runs_total += num_repeats_of_this_instance;
405 }
406 assert(runners.size() == benchmarks.size() && "Unexpected runner count.");
407
408 // The use of performance counters with threads would be unintuitive for
409 // the average user so we need to warn them about this case
410 if ((benchmarks_with_threads > 0) && (perfcounters.num_counters() > 0)) {
411 GetErrorLogInstance()
412 << "***WARNING*** There are " << benchmarks_with_threads
413 << " benchmarks with threads and " << perfcounters.num_counters()
414 << " performance counters were requested. Beware counters will "
415 "reflect the combined usage across all "
416 "threads.\n";
417 }
418
419 std::vector<size_t> repetition_indices;
420 repetition_indices.reserve(num_repetitions_total);
421 for (size_t runner_index = 0, num_runners = runners.size();
422 runner_index != num_runners; ++runner_index) {
423 const internal::BenchmarkRunner& runner = runners[runner_index];
424 std::fill_n(std::back_inserter(repetition_indices),
425 runner.GetNumRepeats(), runner_index);
426 }
427 assert(repetition_indices.size() == num_repetitions_total &&
428 "Unexpected number of repetition indexes.");
429
430 if (FLAGS_benchmark_enable_random_interleaving) {
431 std::random_device rd;
432 std::mt19937 g(rd());
433 std::shuffle(repetition_indices.begin(), repetition_indices.end(), g);
434 }
435
436 for (size_t repetition_index : repetition_indices) {
437 internal::BenchmarkRunner& runner = runners[repetition_index];
438 runner.DoOneRepetition();
439 if (runner.HasRepeatsRemaining()) continue;
440 // FIXME: report each repetition separately, not all of them in bulk.
441
442 display_reporter->ReportRunsConfig(
443 runner.GetMinTime(), runner.HasExplicitIters(), runner.GetIters());
444 if (file_reporter)
445 file_reporter->ReportRunsConfig(
446 runner.GetMinTime(), runner.HasExplicitIters(), runner.GetIters());
447
448 RunResults run_results = runner.GetResults();
449
450 // Maybe calculate complexity report
451 if (const auto* reports_for_family = runner.GetReportsForFamily()) {
452 if (reports_for_family->num_runs_done ==
453 reports_for_family->num_runs_total) {
454 auto additional_run_stats = ComputeBigO(reports_for_family->Runs);
455 run_results.aggregates_only.insert(run_results.aggregates_only.end(),
456 additional_run_stats.begin(),
457 additional_run_stats.end());
458 per_family_reports.erase(
459 static_cast<int>(reports_for_family->Runs.front().family_index));
460 }
461 }
462
463 Report(display_reporter, file_reporter, run_results);
464 }
465 }
466 display_reporter->Finalize();
467 if (file_reporter) file_reporter->Finalize();
468 FlushStreams(display_reporter);
469 FlushStreams(file_reporter);
470 }
471
472 // Disable deprecated warnings temporarily because we need to reference
473 // CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations
474 BENCHMARK_DISABLE_DEPRECATED_WARNING
475
CreateReporter(std::string const & name,ConsoleReporter::OutputOptions output_opts)476 std::unique_ptr<BenchmarkReporter> CreateReporter(
477 std::string const& name, ConsoleReporter::OutputOptions output_opts) {
478 typedef std::unique_ptr<BenchmarkReporter> PtrType;
479 if (name == "console") {
480 return PtrType(new ConsoleReporter(output_opts));
481 }
482 if (name == "json") {
483 return PtrType(new JSONReporter());
484 }
485 if (name == "csv") {
486 return PtrType(new CSVReporter());
487 }
488 std::cerr << "Unexpected format: '" << name << "'\n";
489 std::exit(1);
490 }
491
492 BENCHMARK_RESTORE_DEPRECATED_WARNING
493
494 } // end namespace
495
IsZero(double n)496 bool IsZero(double n) {
497 return std::abs(n) < std::numeric_limits<double>::epsilon();
498 }
499
GetOutputOptions(bool force_no_color)500 ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
501 int output_opts = ConsoleReporter::OO_Defaults;
502 auto is_benchmark_color = [force_no_color]() -> bool {
503 if (force_no_color) {
504 return false;
505 }
506 if (FLAGS_benchmark_color == "auto") {
507 return IsColorTerminal();
508 }
509 return IsTruthyFlagValue(FLAGS_benchmark_color);
510 };
511 if (is_benchmark_color()) {
512 output_opts |= ConsoleReporter::OO_Color;
513 } else {
514 output_opts &= ~ConsoleReporter::OO_Color;
515 }
516 if (FLAGS_benchmark_counters_tabular) {
517 output_opts |= ConsoleReporter::OO_Tabular;
518 } else {
519 output_opts &= ~ConsoleReporter::OO_Tabular;
520 }
521 return static_cast<ConsoleReporter::OutputOptions>(output_opts);
522 }
523
524 } // end namespace internal
525
CreateDefaultDisplayReporter()526 BenchmarkReporter* CreateDefaultDisplayReporter() {
527 static auto default_display_reporter =
528 internal::CreateReporter(FLAGS_benchmark_format,
529 internal::GetOutputOptions())
530 .release();
531 return default_display_reporter;
532 }
533
RunSpecifiedBenchmarks()534 size_t RunSpecifiedBenchmarks() {
535 return RunSpecifiedBenchmarks(nullptr, nullptr, FLAGS_benchmark_filter);
536 }
537
RunSpecifiedBenchmarks(std::string spec)538 size_t RunSpecifiedBenchmarks(std::string spec) {
539 return RunSpecifiedBenchmarks(nullptr, nullptr, std::move(spec));
540 }
541
RunSpecifiedBenchmarks(BenchmarkReporter * display_reporter)542 size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) {
543 return RunSpecifiedBenchmarks(display_reporter, nullptr,
544 FLAGS_benchmark_filter);
545 }
546
RunSpecifiedBenchmarks(BenchmarkReporter * display_reporter,std::string spec)547 size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
548 std::string spec) {
549 return RunSpecifiedBenchmarks(display_reporter, nullptr, std::move(spec));
550 }
551
RunSpecifiedBenchmarks(BenchmarkReporter * display_reporter,BenchmarkReporter * file_reporter)552 size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
553 BenchmarkReporter* file_reporter) {
554 return RunSpecifiedBenchmarks(display_reporter, file_reporter,
555 FLAGS_benchmark_filter);
556 }
557
RunSpecifiedBenchmarks(BenchmarkReporter * display_reporter,BenchmarkReporter * file_reporter,std::string spec)558 size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
559 BenchmarkReporter* file_reporter,
560 std::string spec) {
561 if (spec.empty() || spec == "all")
562 spec = "."; // Regexp that matches all benchmarks
563
564 // Setup the reporters
565 std::ofstream output_file;
566 std::unique_ptr<BenchmarkReporter> default_display_reporter;
567 std::unique_ptr<BenchmarkReporter> default_file_reporter;
568 if (!display_reporter) {
569 default_display_reporter.reset(CreateDefaultDisplayReporter());
570 display_reporter = default_display_reporter.get();
571 }
572 auto& Out = display_reporter->GetOutputStream();
573 auto& Err = display_reporter->GetErrorStream();
574
575 std::string const& fname = FLAGS_benchmark_out;
576 if (fname.empty() && file_reporter) {
577 Err << "A custom file reporter was provided but "
578 "--benchmark_out=<file> was not specified."
579 << std::endl;
580 std::exit(1);
581 }
582 if (!fname.empty()) {
583 output_file.open(fname);
584 if (!output_file.is_open()) {
585 Err << "invalid file name: '" << fname << "'" << std::endl;
586 std::exit(1);
587 }
588 if (!file_reporter) {
589 default_file_reporter = internal::CreateReporter(
590 FLAGS_benchmark_out_format, FLAGS_benchmark_counters_tabular
591 ? ConsoleReporter::OO_Tabular
592 : ConsoleReporter::OO_None);
593 file_reporter = default_file_reporter.get();
594 }
595 file_reporter->SetOutputStream(&output_file);
596 file_reporter->SetErrorStream(&output_file);
597 }
598
599 std::vector<internal::BenchmarkInstance> benchmarks;
600 if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0;
601
602 if (benchmarks.empty()) {
603 Err << "Failed to match any benchmarks against regex: " << spec << "\n";
604 return 0;
605 }
606
607 if (FLAGS_benchmark_list_tests) {
608 for (auto const& benchmark : benchmarks)
609 Out << benchmark.name().str() << "\n";
610 } else {
611 internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
612 }
613
614 return benchmarks.size();
615 }
616
617 namespace {
618 // stores the time unit benchmarks use by default
619 TimeUnit default_time_unit = kNanosecond;
620 } // namespace
621
GetDefaultTimeUnit()622 TimeUnit GetDefaultTimeUnit() { return default_time_unit; }
623
SetDefaultTimeUnit(TimeUnit unit)624 void SetDefaultTimeUnit(TimeUnit unit) { default_time_unit = unit; }
625
GetBenchmarkFilter()626 std::string GetBenchmarkFilter() { return FLAGS_benchmark_filter; }
627
SetBenchmarkFilter(std::string value)628 void SetBenchmarkFilter(std::string value) {
629 FLAGS_benchmark_filter = std::move(value);
630 }
631
GetBenchmarkVerbosity()632 int32_t GetBenchmarkVerbosity() { return FLAGS_v; }
633
RegisterMemoryManager(MemoryManager * manager)634 void RegisterMemoryManager(MemoryManager* manager) {
635 internal::memory_manager = manager;
636 }
637
AddCustomContext(const std::string & key,const std::string & value)638 void AddCustomContext(const std::string& key, const std::string& value) {
639 if (internal::global_context == nullptr) {
640 internal::global_context = new std::map<std::string, std::string>();
641 }
642 if (!internal::global_context->emplace(key, value).second) {
643 std::cerr << "Failed to add custom context \"" << key << "\" as it already "
644 << "exists with value \"" << value << "\"\n";
645 }
646 }
647
648 namespace internal {
649
650 void (*HelperPrintf)();
651
PrintUsageAndExit()652 void PrintUsageAndExit() {
653 HelperPrintf();
654 exit(0);
655 }
656
SetDefaultTimeUnitFromFlag(const std::string & time_unit_flag)657 void SetDefaultTimeUnitFromFlag(const std::string& time_unit_flag) {
658 if (time_unit_flag == "s") {
659 return SetDefaultTimeUnit(kSecond);
660 }
661 if (time_unit_flag == "ms") {
662 return SetDefaultTimeUnit(kMillisecond);
663 }
664 if (time_unit_flag == "us") {
665 return SetDefaultTimeUnit(kMicrosecond);
666 }
667 if (time_unit_flag == "ns") {
668 return SetDefaultTimeUnit(kNanosecond);
669 }
670 if (!time_unit_flag.empty()) {
671 PrintUsageAndExit();
672 }
673 }
674
ParseCommandLineFlags(int * argc,char ** argv)675 void ParseCommandLineFlags(int* argc, char** argv) {
676 using namespace benchmark;
677 BenchmarkReporter::Context::executable_name =
678 (argc && *argc > 0) ? argv[0] : "unknown";
679 for (int i = 1; argc && i < *argc; ++i) {
680 if (ParseBoolFlag(argv[i], "benchmark_list_tests",
681 &FLAGS_benchmark_list_tests) ||
682 ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
683 ParseStringFlag(argv[i], "benchmark_min_time",
684 &FLAGS_benchmark_min_time) ||
685 ParseDoubleFlag(argv[i], "benchmark_min_warmup_time",
686 &FLAGS_benchmark_min_warmup_time) ||
687 ParseInt32Flag(argv[i], "benchmark_repetitions",
688 &FLAGS_benchmark_repetitions) ||
689 ParseBoolFlag(argv[i], "benchmark_enable_random_interleaving",
690 &FLAGS_benchmark_enable_random_interleaving) ||
691 ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
692 &FLAGS_benchmark_report_aggregates_only) ||
693 ParseBoolFlag(argv[i], "benchmark_display_aggregates_only",
694 &FLAGS_benchmark_display_aggregates_only) ||
695 ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
696 ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
697 ParseStringFlag(argv[i], "benchmark_out_format",
698 &FLAGS_benchmark_out_format) ||
699 ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
700 ParseBoolFlag(argv[i], "benchmark_counters_tabular",
701 &FLAGS_benchmark_counters_tabular) ||
702 ParseStringFlag(argv[i], "benchmark_perf_counters",
703 &FLAGS_benchmark_perf_counters) ||
704 ParseKeyValueFlag(argv[i], "benchmark_context",
705 &FLAGS_benchmark_context) ||
706 ParseStringFlag(argv[i], "benchmark_time_unit",
707 &FLAGS_benchmark_time_unit) ||
708 ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
709 for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
710
711 --(*argc);
712 --i;
713 } else if (IsFlag(argv[i], "help")) {
714 PrintUsageAndExit();
715 }
716 }
717 for (auto const* flag :
718 {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format}) {
719 if (*flag != "console" && *flag != "json" && *flag != "csv") {
720 PrintUsageAndExit();
721 }
722 }
723 SetDefaultTimeUnitFromFlag(FLAGS_benchmark_time_unit);
724 if (FLAGS_benchmark_color.empty()) {
725 PrintUsageAndExit();
726 }
727 for (const auto& kv : FLAGS_benchmark_context) {
728 AddCustomContext(kv.first, kv.second);
729 }
730 }
731
InitializeStreams()732 int InitializeStreams() {
733 static std::ios_base::Init init;
734 return 0;
735 }
736
737 } // end namespace internal
738
PrintDefaultHelp()739 void PrintDefaultHelp() {
740 fprintf(stdout,
741 "benchmark"
742 " [--benchmark_list_tests={true|false}]\n"
743 " [--benchmark_filter=<regex>]\n"
744 " [--benchmark_min_time=`<integer>x` OR `<float>s` ]\n"
745 " [--benchmark_min_warmup_time=<min_warmup_time>]\n"
746 " [--benchmark_repetitions=<num_repetitions>]\n"
747 " [--benchmark_enable_random_interleaving={true|false}]\n"
748 " [--benchmark_report_aggregates_only={true|false}]\n"
749 " [--benchmark_display_aggregates_only={true|false}]\n"
750 " [--benchmark_format=<console|json|csv>]\n"
751 " [--benchmark_out=<filename>]\n"
752 " [--benchmark_out_format=<json|console|csv>]\n"
753 " [--benchmark_color={auto|true|false}]\n"
754 " [--benchmark_counters_tabular={true|false}]\n"
755 #if defined HAVE_LIBPFM
756 " [--benchmark_perf_counters=<counter>,...]\n"
757 #endif
758 " [--benchmark_context=<key>=<value>,...]\n"
759 " [--benchmark_time_unit={ns|us|ms|s}]\n"
760 " [--v=<verbosity>]\n");
761 }
762
Initialize(int * argc,char ** argv,void (* HelperPrintf)())763 void Initialize(int* argc, char** argv, void (*HelperPrintf)()) {
764 internal::HelperPrintf = HelperPrintf;
765 internal::ParseCommandLineFlags(argc, argv);
766 internal::LogLevel() = FLAGS_v;
767 }
768
Shutdown()769 void Shutdown() { delete internal::global_context; }
770
ReportUnrecognizedArguments(int argc,char ** argv)771 bool ReportUnrecognizedArguments(int argc, char** argv) {
772 for (int i = 1; i < argc; ++i) {
773 fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0],
774 argv[i]);
775 }
776 return argc > 1;
777 }
778
779 } // end namespace benchmark
780