1 // Copyright 2015 Google Inc. All rights reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "benchmark/benchmark.h"
16 #include "benchmark_api_internal.h"
17 #include "internal_macros.h"
18
19 #ifndef BENCHMARK_OS_WINDOWS
20 #include <sys/resource.h>
21 #include <sys/time.h>
22 #include <unistd.h>
23 #endif
24
25 #include <algorithm>
26 #include <atomic>
27 #include <condition_variable>
28 #include <cstdio>
29 #include <cstdlib>
30 #include <cstring>
31 #include <fstream>
32 #include <iostream>
33 #include <memory>
34 #include <thread>
35
36 #include "check.h"
37 #include "colorprint.h"
38 #include "commandlineflags.h"
39 #include "complexity.h"
40 #include "counter.h"
41 #include "log.h"
42 #include "mutex.h"
43 #include "re.h"
44 #include "stat.h"
45 #include "string_util.h"
46 #include "sysinfo.h"
47 #include "timers.h"
48
49 DEFINE_bool(benchmark_list_tests, false,
50 "Print a list of benchmarks. This option overrides all other "
51 "options.");
52
53 DEFINE_string(benchmark_filter, ".",
54 "A regular expression that specifies the set of benchmarks "
55 "to execute. If this flag is empty, no benchmarks are run. "
56 "If this flag is the string \"all\", all benchmarks linked "
57 "into the process are run.");
58
59 DEFINE_double(benchmark_min_time, 0.5,
60 "Minimum number of seconds we should run benchmark before "
61 "results are considered significant. For cpu-time based "
62 "tests, this is the lower bound on the total cpu time "
63 "used by all threads that make up the test. For real-time "
64 "based tests, this is the lower bound on the elapsed time "
65 "of the benchmark execution, regardless of number of "
66 "threads.");
67
68 DEFINE_int32(benchmark_repetitions, 1,
69 "The number of runs of each benchmark. If greater than 1, the "
70 "mean and standard deviation of the runs will be reported.");
71
72 DEFINE_bool(benchmark_report_aggregates_only, false,
73 "Report the result of each benchmark repetitions. When 'true' is "
74 "specified only the mean, standard deviation, and other statistics "
75 "are reported for repeated benchmarks.");
76
77 DEFINE_string(benchmark_format, "console",
78 "The format to use for console output. Valid values are "
79 "'console', 'json', or 'csv'.");
80
81 DEFINE_string(benchmark_out_format, "json",
82 "The format to use for file output. Valid values are "
83 "'console', 'json', or 'csv'.");
84
85 DEFINE_string(benchmark_out, "", "The file to write additonal output to");
86
87 DEFINE_string(benchmark_color, "auto",
88 "Whether to use colors in the output. Valid values: "
89 "'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use "
90 "colors if the output is being sent to a terminal and the TERM "
91 "environment variable is set to a terminal type that supports "
92 "colors.");
93
94 DEFINE_bool(benchmark_counters_tabular, false,
95 "Whether to use tabular format when printing user counters to "
96 "the console. Valid values: 'true'/'yes'/1, 'false'/'no'/0."
97 "Defaults to false.");
98
99 DEFINE_int32(v, 0, "The level of verbose logging to output");
100
101 namespace benchmark {
102 namespace internal {
103
UseCharPointer(char const volatile *)104 void UseCharPointer(char const volatile*) {}
105
106 } // end namespace internal
107
108 namespace {
109
110 static const size_t kMaxIterations = 1000000000;
111
112 } // end namespace
113
114 namespace internal {
115
116 class ThreadManager {
117 public:
ThreadManager(int num_threads)118 ThreadManager(int num_threads)
119 : alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
120
GetBenchmarkMutex() const121 Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
122 return benchmark_mutex_;
123 }
124
StartStopBarrier()125 bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
126 return start_stop_barrier_.wait();
127 }
128
NotifyThreadComplete()129 void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
130 start_stop_barrier_.removeThread();
131 if (--alive_threads_ == 0) {
132 MutexLock lock(end_cond_mutex_);
133 end_condition_.notify_all();
134 }
135 }
136
WaitForAllThreads()137 void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
138 MutexLock lock(end_cond_mutex_);
139 end_condition_.wait(lock.native_handle(),
140 [this]() { return alive_threads_ == 0; });
141 }
142
143 public:
144 struct Result {
145 double real_time_used = 0;
146 double cpu_time_used = 0;
147 double manual_time_used = 0;
148 int64_t bytes_processed = 0;
149 int64_t items_processed = 0;
150 int complexity_n = 0;
151 std::string report_label_;
152 std::string error_message_;
153 bool has_error_ = false;
154 UserCounters counters;
155 };
156 GUARDED_BY(GetBenchmarkMutex()) Result results;
157
158 private:
159 mutable Mutex benchmark_mutex_;
160 std::atomic<int> alive_threads_;
161 Barrier start_stop_barrier_;
162 Mutex end_cond_mutex_;
163 Condition end_condition_;
164 };
165
166 // Timer management class
167 class ThreadTimer {
168 public:
169 ThreadTimer() = default;
170
171 // Called by each thread
StartTimer()172 void StartTimer() {
173 running_ = true;
174 start_real_time_ = ChronoClockNow();
175 start_cpu_time_ = ThreadCPUUsage();
176 }
177
178 // Called by each thread
StopTimer()179 void StopTimer() {
180 CHECK(running_);
181 running_ = false;
182 real_time_used_ += ChronoClockNow() - start_real_time_;
183 cpu_time_used_ += ThreadCPUUsage() - start_cpu_time_;
184 }
185
186 // Called by each thread
SetIterationTime(double seconds)187 void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
188
running() const189 bool running() const { return running_; }
190
191 // REQUIRES: timer is not running
real_time_used()192 double real_time_used() {
193 CHECK(!running_);
194 return real_time_used_;
195 }
196
197 // REQUIRES: timer is not running
cpu_time_used()198 double cpu_time_used() {
199 CHECK(!running_);
200 return cpu_time_used_;
201 }
202
203 // REQUIRES: timer is not running
manual_time_used()204 double manual_time_used() {
205 CHECK(!running_);
206 return manual_time_used_;
207 }
208
209 private:
210 bool running_ = false; // Is the timer running
211 double start_real_time_ = 0; // If running_
212 double start_cpu_time_ = 0; // If running_
213
214 // Accumulated time so far (does not contain current slice if running_)
215 double real_time_used_ = 0;
216 double cpu_time_used_ = 0;
217 // Manually set iteration time. User sets this with SetIterationTime(seconds).
218 double manual_time_used_ = 0;
219 };
220
221 namespace {
222
CreateRunReport(const benchmark::internal::Benchmark::Instance & b,const internal::ThreadManager::Result & results,size_t iters,double seconds)223 BenchmarkReporter::Run CreateRunReport(
224 const benchmark::internal::Benchmark::Instance& b,
225 const internal::ThreadManager::Result& results, size_t iters,
226 double seconds) {
227 // Create report about this benchmark run.
228 BenchmarkReporter::Run report;
229
230 report.benchmark_name = b.name;
231 report.error_occurred = results.has_error_;
232 report.error_message = results.error_message_;
233 report.report_label = results.report_label_;
234 // Report the total iterations across all threads.
235 report.iterations = static_cast<int64_t>(iters) * b.threads;
236 report.time_unit = b.time_unit;
237
238 if (!report.error_occurred) {
239 double bytes_per_second = 0;
240 if (results.bytes_processed > 0 && seconds > 0.0) {
241 bytes_per_second = (results.bytes_processed / seconds);
242 }
243 double items_per_second = 0;
244 if (results.items_processed > 0 && seconds > 0.0) {
245 items_per_second = (results.items_processed / seconds);
246 }
247
248 if (b.use_manual_time) {
249 report.real_accumulated_time = results.manual_time_used;
250 } else {
251 report.real_accumulated_time = results.real_time_used;
252 }
253 report.cpu_accumulated_time = results.cpu_time_used;
254 report.bytes_per_second = bytes_per_second;
255 report.items_per_second = items_per_second;
256 report.complexity_n = results.complexity_n;
257 report.complexity = b.complexity;
258 report.complexity_lambda = b.complexity_lambda;
259 report.counters = results.counters;
260 internal::Finish(&report.counters, seconds, b.threads);
261 }
262 return report;
263 }
264
265 // Execute one thread of benchmark b for the specified number of iterations.
266 // Adds the stats collected for the thread into *total.
RunInThread(const benchmark::internal::Benchmark::Instance * b,size_t iters,int thread_id,internal::ThreadManager * manager)267 void RunInThread(const benchmark::internal::Benchmark::Instance* b,
268 size_t iters, int thread_id,
269 internal::ThreadManager* manager) {
270 internal::ThreadTimer timer;
271 State st(iters, b->arg, thread_id, b->threads, &timer, manager);
272 b->benchmark->Run(st);
273 CHECK(st.iterations() == st.max_iterations)
274 << "Benchmark returned before State::KeepRunning() returned false!";
275 {
276 MutexLock l(manager->GetBenchmarkMutex());
277 internal::ThreadManager::Result& results = manager->results;
278 results.cpu_time_used += timer.cpu_time_used();
279 results.real_time_used += timer.real_time_used();
280 results.manual_time_used += timer.manual_time_used();
281 results.bytes_processed += st.bytes_processed();
282 results.items_processed += st.items_processed();
283 results.complexity_n += st.complexity_length_n();
284 internal::Increment(&results.counters, st.counters);
285 }
286 manager->NotifyThreadComplete();
287 }
288
RunBenchmark(const benchmark::internal::Benchmark::Instance & b,std::vector<BenchmarkReporter::Run> * complexity_reports)289 std::vector<BenchmarkReporter::Run> RunBenchmark(
290 const benchmark::internal::Benchmark::Instance& b,
291 std::vector<BenchmarkReporter::Run>* complexity_reports) {
292 std::vector<BenchmarkReporter::Run> reports; // return value
293
294 const bool has_explicit_iteration_count = b.iterations != 0;
295 size_t iters = has_explicit_iteration_count ? b.iterations : 1;
296 std::unique_ptr<internal::ThreadManager> manager;
297 std::vector<std::thread> pool(b.threads - 1);
298 const int repeats =
299 b.repetitions != 0 ? b.repetitions : FLAGS_benchmark_repetitions;
300 const bool report_aggregates_only =
301 repeats != 1 &&
302 (b.report_mode == internal::RM_Unspecified
303 ? FLAGS_benchmark_report_aggregates_only
304 : b.report_mode == internal::RM_ReportAggregatesOnly);
305 for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
306 for (;;) {
307 // Try benchmark
308 VLOG(2) << "Running " << b.name << " for " << iters << "\n";
309
310 manager.reset(new internal::ThreadManager(b.threads));
311 for (std::size_t ti = 0; ti < pool.size(); ++ti) {
312 pool[ti] = std::thread(&RunInThread, &b, iters,
313 static_cast<int>(ti + 1), manager.get());
314 }
315 RunInThread(&b, iters, 0, manager.get());
316 manager->WaitForAllThreads();
317 for (std::thread& thread : pool) thread.join();
318 internal::ThreadManager::Result results;
319 {
320 MutexLock l(manager->GetBenchmarkMutex());
321 results = manager->results;
322 }
323 manager.reset();
324 // Adjust real/manual time stats since they were reported per thread.
325 results.real_time_used /= b.threads;
326 results.manual_time_used /= b.threads;
327
328 VLOG(2) << "Ran in " << results.cpu_time_used << "/"
329 << results.real_time_used << "\n";
330
331 // Base decisions off of real time if requested by this benchmark.
332 double seconds = results.cpu_time_used;
333 if (b.use_manual_time) {
334 seconds = results.manual_time_used;
335 } else if (b.use_real_time) {
336 seconds = results.real_time_used;
337 }
338
339 const double min_time =
340 !IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time;
341
342 // Determine if this run should be reported; Either it has
343 // run for a sufficient amount of time or because an error was reported.
344 const bool should_report = repetition_num > 0
345 || has_explicit_iteration_count // An exact iteration count was requested
346 || results.has_error_
347 || iters >= kMaxIterations
348 || seconds >= min_time // the elapsed time is large enough
349 // CPU time is specified but the elapsed real time greatly exceeds the
350 // minimum time. Note that user provided timers are except from this
351 // sanity check.
352 || ((results.real_time_used >= 5 * min_time) && !b.use_manual_time);
353
354 if (should_report) {
355 BenchmarkReporter::Run report =
356 CreateRunReport(b, results, iters, seconds);
357 if (!report.error_occurred && b.complexity != oNone)
358 complexity_reports->push_back(report);
359 reports.push_back(report);
360 break;
361 }
362
363 // See how much iterations should be increased by
364 // Note: Avoid division by zero with max(seconds, 1ns).
365 double multiplier = min_time * 1.4 / std::max(seconds, 1e-9);
366 // If our last run was at least 10% of FLAGS_benchmark_min_time then we
367 // use the multiplier directly. Otherwise we use at most 10 times
368 // expansion.
369 // NOTE: When the last run was at least 10% of the min time the max
370 // expansion should be 14x.
371 bool is_significant = (seconds / min_time) > 0.1;
372 multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
373 if (multiplier <= 1.0) multiplier = 2.0;
374 double next_iters = std::max(multiplier * iters, iters + 1.0);
375 if (next_iters > kMaxIterations) {
376 next_iters = kMaxIterations;
377 }
378 VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
379 iters = static_cast<int>(next_iters + 0.5);
380 }
381 }
382 // Calculate additional statistics
383 auto stat_reports = ComputeStats(reports);
384 if ((b.complexity != oNone) && b.last_benchmark_instance) {
385 auto additional_run_stats = ComputeBigO(*complexity_reports);
386 stat_reports.insert(stat_reports.end(), additional_run_stats.begin(),
387 additional_run_stats.end());
388 complexity_reports->clear();
389 }
390
391 if (report_aggregates_only) reports.clear();
392 reports.insert(reports.end(), stat_reports.begin(), stat_reports.end());
393 return reports;
394 }
395
396 } // namespace
397 } // namespace internal
398
State(size_t max_iters,const std::vector<int> & ranges,int thread_i,int n_threads,internal::ThreadTimer * timer,internal::ThreadManager * manager)399 State::State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
400 int n_threads, internal::ThreadTimer* timer,
401 internal::ThreadManager* manager)
402 : started_(false),
403 finished_(false),
404 total_iterations_(0),
405 range_(ranges),
406 bytes_processed_(0),
407 items_processed_(0),
408 complexity_n_(0),
409 error_occurred_(false),
410 counters(),
411 thread_index(thread_i),
412 threads(n_threads),
413 max_iterations(max_iters),
414 timer_(timer),
415 manager_(manager) {
416 CHECK(max_iterations != 0) << "At least one iteration must be run";
417 CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
418 }
419
PauseTiming()420 void State::PauseTiming() {
421 // Add in time accumulated so far
422 CHECK(started_ && !finished_ && !error_occurred_);
423 timer_->StopTimer();
424 }
425
ResumeTiming()426 void State::ResumeTiming() {
427 CHECK(started_ && !finished_ && !error_occurred_);
428 timer_->StartTimer();
429 }
430
SkipWithError(const char * msg)431 void State::SkipWithError(const char* msg) {
432 CHECK(msg);
433 error_occurred_ = true;
434 {
435 MutexLock l(manager_->GetBenchmarkMutex());
436 if (manager_->results.has_error_ == false) {
437 manager_->results.error_message_ = msg;
438 manager_->results.has_error_ = true;
439 }
440 }
441 total_iterations_ = max_iterations;
442 if (timer_->running()) timer_->StopTimer();
443 }
444
SetIterationTime(double seconds)445 void State::SetIterationTime(double seconds) {
446 timer_->SetIterationTime(seconds);
447 }
448
SetLabel(const char * label)449 void State::SetLabel(const char* label) {
450 MutexLock l(manager_->GetBenchmarkMutex());
451 manager_->results.report_label_ = label;
452 }
453
StartKeepRunning()454 void State::StartKeepRunning() {
455 CHECK(!started_ && !finished_);
456 started_ = true;
457 manager_->StartStopBarrier();
458 if (!error_occurred_) ResumeTiming();
459 }
460
FinishKeepRunning()461 void State::FinishKeepRunning() {
462 CHECK(started_ && (!finished_ || error_occurred_));
463 if (!error_occurred_) {
464 PauseTiming();
465 }
466 // Total iterations now is one greater than max iterations. Fix this.
467 total_iterations_ = max_iterations;
468 finished_ = true;
469 manager_->StartStopBarrier();
470 }
471
472 namespace internal {
473 namespace {
474
RunBenchmarks(const std::vector<Benchmark::Instance> & benchmarks,BenchmarkReporter * console_reporter,BenchmarkReporter * file_reporter)475 void RunBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
476 BenchmarkReporter* console_reporter,
477 BenchmarkReporter* file_reporter) {
478 // Note the file_reporter can be null.
479 CHECK(console_reporter != nullptr);
480
481 // Determine the width of the name field using a minimum width of 10.
482 bool has_repetitions = FLAGS_benchmark_repetitions > 1;
483 size_t name_field_width = 10;
484 for (const Benchmark::Instance& benchmark : benchmarks) {
485 name_field_width =
486 std::max<size_t>(name_field_width, benchmark.name.size());
487 has_repetitions |= benchmark.repetitions > 1;
488 }
489 if (has_repetitions) name_field_width += std::strlen("_stddev");
490
491 // Print header here
492 BenchmarkReporter::Context context;
493 context.num_cpus = NumCPUs();
494 context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f;
495
496 context.cpu_scaling_enabled = CpuScalingEnabled();
497 context.name_field_width = name_field_width;
498
499 // Keep track of runing times of all instances of current benchmark
500 std::vector<BenchmarkReporter::Run> complexity_reports;
501
502 // We flush streams after invoking reporter methods that write to them. This
503 // ensures users get timely updates even when streams are not line-buffered.
504 auto flushStreams = [](BenchmarkReporter* reporter) {
505 if (!reporter) return;
506 std::flush(reporter->GetOutputStream());
507 std::flush(reporter->GetErrorStream());
508 };
509
510 if (console_reporter->ReportContext(context) &&
511 (!file_reporter || file_reporter->ReportContext(context))) {
512 flushStreams(console_reporter);
513 flushStreams(file_reporter);
514 for (const auto& benchmark : benchmarks) {
515 std::vector<BenchmarkReporter::Run> reports =
516 RunBenchmark(benchmark, &complexity_reports);
517 console_reporter->ReportRuns(reports);
518 if (file_reporter) file_reporter->ReportRuns(reports);
519 flushStreams(console_reporter);
520 flushStreams(file_reporter);
521 }
522 }
523 console_reporter->Finalize();
524 if (file_reporter) file_reporter->Finalize();
525 flushStreams(console_reporter);
526 flushStreams(file_reporter);
527 }
528
CreateReporter(std::string const & name,ConsoleReporter::OutputOptions output_opts)529 std::unique_ptr<BenchmarkReporter> CreateReporter(
530 std::string const& name, ConsoleReporter::OutputOptions output_opts) {
531 typedef std::unique_ptr<BenchmarkReporter> PtrType;
532 if (name == "console") {
533 return PtrType(new ConsoleReporter(output_opts));
534 } else if (name == "json") {
535 return PtrType(new JSONReporter);
536 } else if (name == "csv") {
537 return PtrType(new CSVReporter);
538 } else {
539 std::cerr << "Unexpected format: '" << name << "'\n";
540 std::exit(1);
541 }
542 }
543
544 } // end namespace
545
IsZero(double n)546 bool IsZero(double n) {
547 return std::abs(n) < std::numeric_limits<double>::epsilon();
548 }
549
GetOutputOptions(bool force_no_color)550 ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
551 int output_opts = ConsoleReporter::OO_Defaults;
552 if ((FLAGS_benchmark_color == "auto" && IsColorTerminal()) ||
553 IsTruthyFlagValue(FLAGS_benchmark_color)) {
554 output_opts |= ConsoleReporter::OO_Color;
555 } else {
556 output_opts &= ~ConsoleReporter::OO_Color;
557 }
558 if(force_no_color) {
559 output_opts &= ~ConsoleReporter::OO_Color;
560 }
561 if(FLAGS_benchmark_counters_tabular) {
562 output_opts |= ConsoleReporter::OO_Tabular;
563 } else {
564 output_opts &= ~ConsoleReporter::OO_Tabular;
565 }
566 return static_cast< ConsoleReporter::OutputOptions >(output_opts);
567 }
568
569 } // end namespace internal
570
RunSpecifiedBenchmarks()571 size_t RunSpecifiedBenchmarks() {
572 return RunSpecifiedBenchmarks(nullptr, nullptr);
573 }
574
RunSpecifiedBenchmarks(BenchmarkReporter * console_reporter)575 size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter) {
576 return RunSpecifiedBenchmarks(console_reporter, nullptr);
577 }
578
RunSpecifiedBenchmarks(BenchmarkReporter * console_reporter,BenchmarkReporter * file_reporter)579 size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
580 BenchmarkReporter* file_reporter) {
581 std::string spec = FLAGS_benchmark_filter;
582 if (spec.empty() || spec == "all")
583 spec = "."; // Regexp that matches all benchmarks
584
585 // Setup the reporters
586 std::ofstream output_file;
587 std::unique_ptr<BenchmarkReporter> default_console_reporter;
588 std::unique_ptr<BenchmarkReporter> default_file_reporter;
589 if (!console_reporter) {
590 default_console_reporter = internal::CreateReporter(
591 FLAGS_benchmark_format, internal::GetOutputOptions());
592 console_reporter = default_console_reporter.get();
593 }
594 auto& Out = console_reporter->GetOutputStream();
595 auto& Err = console_reporter->GetErrorStream();
596
597 std::string const& fname = FLAGS_benchmark_out;
598 if (fname == "" && file_reporter) {
599 Err << "A custom file reporter was provided but "
600 "--benchmark_out=<file> was not specified."
601 << std::endl;
602 std::exit(1);
603 }
604 if (fname != "") {
605 output_file.open(fname);
606 if (!output_file.is_open()) {
607 Err << "invalid file name: '" << fname << std::endl;
608 std::exit(1);
609 }
610 if (!file_reporter) {
611 default_file_reporter = internal::CreateReporter(
612 FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
613 file_reporter = default_file_reporter.get();
614 }
615 file_reporter->SetOutputStream(&output_file);
616 file_reporter->SetErrorStream(&output_file);
617 }
618
619 std::vector<internal::Benchmark::Instance> benchmarks;
620 if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0;
621
622 if (benchmarks.empty()) {
623 Err << "Failed to match any benchmarks against regex: " << spec << "\n";
624 return 0;
625 }
626
627 if (FLAGS_benchmark_list_tests) {
628 for (auto const& benchmark : benchmarks) Out << benchmark.name << "\n";
629 } else {
630 internal::RunBenchmarks(benchmarks, console_reporter, file_reporter);
631 }
632
633 return benchmarks.size();
634 }
635
636 namespace internal {
637
PrintUsageAndExit()638 void PrintUsageAndExit() {
639 fprintf(stdout,
640 "benchmark"
641 " [--benchmark_list_tests={true|false}]\n"
642 " [--benchmark_filter=<regex>]\n"
643 " [--benchmark_min_time=<min_time>]\n"
644 " [--benchmark_repetitions=<num_repetitions>]\n"
645 " [--benchmark_report_aggregates_only={true|false}\n"
646 " [--benchmark_format=<console|json|csv>]\n"
647 " [--benchmark_out=<filename>]\n"
648 " [--benchmark_out_format=<json|console|csv>]\n"
649 " [--benchmark_color={auto|true|false}]\n"
650 " [--benchmark_counters_tabular={true|false}]\n"
651 " [--v=<verbosity>]\n");
652 exit(0);
653 }
654
ParseCommandLineFlags(int * argc,char ** argv)655 void ParseCommandLineFlags(int* argc, char** argv) {
656 using namespace benchmark;
657 for (int i = 1; i < *argc; ++i) {
658 if (ParseBoolFlag(argv[i], "benchmark_list_tests",
659 &FLAGS_benchmark_list_tests) ||
660 ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
661 ParseDoubleFlag(argv[i], "benchmark_min_time",
662 &FLAGS_benchmark_min_time) ||
663 ParseInt32Flag(argv[i], "benchmark_repetitions",
664 &FLAGS_benchmark_repetitions) ||
665 ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
666 &FLAGS_benchmark_report_aggregates_only) ||
667 ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
668 ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
669 ParseStringFlag(argv[i], "benchmark_out_format",
670 &FLAGS_benchmark_out_format) ||
671 ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
672 // "color_print" is the deprecated name for "benchmark_color".
673 // TODO: Remove this.
674 ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
675 ParseBoolFlag(argv[i], "benchmark_counters_tabular",
676 &FLAGS_benchmark_counters_tabular) ||
677 ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
678 for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
679
680 --(*argc);
681 --i;
682 } else if (IsFlag(argv[i], "help")) {
683 PrintUsageAndExit();
684 }
685 }
686 for (auto const* flag :
687 {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format})
688 if (*flag != "console" && *flag != "json" && *flag != "csv") {
689 PrintUsageAndExit();
690 }
691 if (FLAGS_benchmark_color.empty()) {
692 PrintUsageAndExit();
693 }
694 }
695
InitializeStreams()696 int InitializeStreams() {
697 static std::ios_base::Init init;
698 return 0;
699 }
700
701 } // end namespace internal
702
Initialize(int * argc,char ** argv)703 void Initialize(int* argc, char** argv) {
704 internal::ParseCommandLineFlags(argc, argv);
705 internal::LogLevel() = FLAGS_v;
706 }
707
ReportUnrecognizedArguments(int argc,char ** argv)708 bool ReportUnrecognizedArguments(int argc, char** argv) {
709 for (int i = 1; i < argc; ++i) {
710 fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], argv[i]);
711 }
712 return argc > 1;
713 }
714
715 } // end namespace benchmark
716