1 // Copyright 2015 Google Inc. All rights reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 // Support for registering benchmarks for functions.
16
17 /* Example usage:
18 // Define a function that executes the code to be measured a
19 // specified number of times:
20 static void BM_StringCreation(benchmark::State& state) {
21 for (auto _ : state)
22 std::string empty_string;
23 }
24
25 // Register the function as a benchmark
26 BENCHMARK(BM_StringCreation);
27
28 // Define another benchmark
29 static void BM_StringCopy(benchmark::State& state) {
30 std::string x = "hello";
31 for (auto _ : state)
32 std::string copy(x);
33 }
34 BENCHMARK(BM_StringCopy);
35
36 // Augment the main() program to invoke benchmarks if specified
37 // via the --benchmark_filter command line flag. E.g.,
38 // my_unittest --benchmark_filter=all
39 // my_unittest --benchmark_filter=BM_StringCreation
40 // my_unittest --benchmark_filter=String
41 // my_unittest --benchmark_filter='Copy|Creation'
42 int main(int argc, char** argv) {
43 benchmark::Initialize(&argc, argv);
44 benchmark::RunSpecifiedBenchmarks();
45 benchmark::Shutdown();
46 return 0;
47 }
48
49 // Sometimes a family of microbenchmarks can be implemented with
50 // just one routine that takes an extra argument to specify which
51 // one of the family of benchmarks to run. For example, the following
52 // code defines a family of microbenchmarks for measuring the speed
53 // of memcpy() calls of different lengths:
54
55 static void BM_memcpy(benchmark::State& state) {
56 char* src = new char[state.range(0)]; char* dst = new char[state.range(0)];
57 memset(src, 'x', state.range(0));
58 for (auto _ : state)
59 memcpy(dst, src, state.range(0));
60 state.SetBytesProcessed(state.iterations() * state.range(0));
61 delete[] src; delete[] dst;
62 }
63 BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10);
64
65 // The preceding code is quite repetitive, and can be replaced with the
66 // following short-hand. The following invocation will pick a few
67 // appropriate arguments in the specified range and will generate a
68 // microbenchmark for each such argument.
69 BENCHMARK(BM_memcpy)->Range(8, 8<<10);
70
71 // You might have a microbenchmark that depends on two inputs. For
72 // example, the following code defines a family of microbenchmarks for
73 // measuring the speed of set insertion.
74 static void BM_SetInsert(benchmark::State& state) {
75 set<int> data;
76 for (auto _ : state) {
77 state.PauseTiming();
78 data = ConstructRandomSet(state.range(0));
79 state.ResumeTiming();
80 for (int j = 0; j < state.range(1); ++j)
81 data.insert(RandomNumber());
82 }
83 }
84 BENCHMARK(BM_SetInsert)
85 ->Args({1<<10, 128})
86 ->Args({2<<10, 128})
87 ->Args({4<<10, 128})
88 ->Args({8<<10, 128})
89 ->Args({1<<10, 512})
90 ->Args({2<<10, 512})
91 ->Args({4<<10, 512})
92 ->Args({8<<10, 512});
93
94 // The preceding code is quite repetitive, and can be replaced with
95 // the following short-hand. The following macro will pick a few
96 // appropriate arguments in the product of the two specified ranges
97 // and will generate a microbenchmark for each such pair.
98 BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}});
99
100 // For more complex patterns of inputs, passing a custom function
101 // to Apply allows programmatic specification of an
102 // arbitrary set of arguments to run the microbenchmark on.
103 // The following example enumerates a dense range on
104 // one parameter, and a sparse range on the second.
105 static void CustomArguments(benchmark::internal::Benchmark* b) {
106 for (int i = 0; i <= 10; ++i)
107 for (int j = 32; j <= 1024*1024; j *= 8)
108 b->Args({i, j});
109 }
110 BENCHMARK(BM_SetInsert)->Apply(CustomArguments);
111
112 // Templated microbenchmarks work the same way:
113 // Produce then consume 'size' messages 'iters' times
114 // Measures throughput in the absence of multiprogramming.
115 template <class Q> int BM_Sequential(benchmark::State& state) {
116 Q q;
117 typename Q::value_type v;
118 for (auto _ : state) {
119 for (int i = state.range(0); i--; )
120 q.push(v);
121 for (int e = state.range(0); e--; )
122 q.Wait(&v);
123 }
124 // actually messages, not bytes:
125 state.SetBytesProcessed(state.iterations() * state.range(0));
126 }
127 BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue<int>)->Range(1<<0, 1<<10);
128
129 Use `Benchmark::MinTime(double t)` to set the minimum time used to run the
130 benchmark. This option overrides the `benchmark_min_time` flag.
131
132 void BM_test(benchmark::State& state) {
133 ... body ...
134 }
135 BENCHMARK(BM_test)->MinTime(2.0); // Run for at least 2 seconds.
136
137 In a multithreaded test, it is guaranteed that none of the threads will start
138 until all have reached the loop start, and all will have finished before any
139 thread exits the loop body. As such, any global setup or teardown you want to
140 do can be wrapped in a check against the thread index:
141
142 static void BM_MultiThreaded(benchmark::State& state) {
143 if (state.thread_index() == 0) {
144 // Setup code here.
145 }
146 for (auto _ : state) {
147 // Run the test as normal.
148 }
149 if (state.thread_index() == 0) {
150 // Teardown code here.
151 }
152 }
153 BENCHMARK(BM_MultiThreaded)->Threads(4);
154
155
156 If a benchmark runs a few milliseconds it may be hard to visually compare the
157 measured times, since the output data is given in nanoseconds per default. In
158 order to manually set the time unit, you can specify it manually:
159
160 BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
161 */
162
163 #ifndef BENCHMARK_BENCHMARK_H_
164 #define BENCHMARK_BENCHMARK_H_
165
166 // The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer.
167 #if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L)
168 #define BENCHMARK_HAS_CXX11
169 #endif
170
171 // This _MSC_VER check should detect VS 2017 v15.3 and newer.
172 #if __cplusplus >= 201703L || \
173 (defined(_MSC_VER) && _MSC_VER >= 1911 && _MSVC_LANG >= 201703L)
174 #define BENCHMARK_HAS_CXX17
175 #endif
176
177 #include <stdint.h>
178
179 #include <algorithm>
180 #include <cassert>
181 #include <cstddef>
182 #include <iosfwd>
183 #include <limits>
184 #include <map>
185 #include <set>
186 #include <string>
187 #include <utility>
188 #include <vector>
189
190 #include "benchmark/export.h"
191
192 #if defined(BENCHMARK_HAS_CXX11)
193 #include <atomic>
194 #include <initializer_list>
195 #include <type_traits>
196 #include <utility>
197 #endif
198
199 #if defined(_MSC_VER)
200 #include <intrin.h> // for _ReadWriteBarrier
201 #endif
202
203 #ifndef BENCHMARK_HAS_CXX11
204 #define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \
205 TypeName(const TypeName&); \
206 TypeName& operator=(const TypeName&)
207 #else
208 #define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \
209 TypeName(const TypeName&) = delete; \
210 TypeName& operator=(const TypeName&) = delete
211 #endif
212
213 #ifdef BENCHMARK_HAS_CXX17
214 #define BENCHMARK_UNUSED [[maybe_unused]]
215 #elif defined(__GNUC__) || defined(__clang__)
216 #define BENCHMARK_UNUSED __attribute__((unused))
217 #else
218 #define BENCHMARK_UNUSED
219 #endif
220
221 // Used to annotate functions, methods and classes so they
222 // are not optimized by the compiler. Useful for tests
223 // where you expect loops to stay in place churning cycles
224 #if defined(__clang__)
225 #define BENCHMARK_DONT_OPTIMIZE __attribute__((optnone))
226 #elif defined(__GNUC__) || defined(__GNUG__)
227 #define BENCHMARK_DONT_OPTIMIZE __attribute__((optimize(0)))
228 #else
229 // MSVC & Intel do not have a no-optimize attribute, only line pragmas
230 #define BENCHMARK_DONT_OPTIMIZE
231 #endif
232
233 #if defined(__GNUC__) || defined(__clang__)
234 #define BENCHMARK_ALWAYS_INLINE __attribute__((always_inline))
235 #elif defined(_MSC_VER) && !defined(__clang__)
236 #define BENCHMARK_ALWAYS_INLINE __forceinline
237 #define __func__ __FUNCTION__
238 #else
239 #define BENCHMARK_ALWAYS_INLINE
240 #endif
241
242 #define BENCHMARK_INTERNAL_TOSTRING2(x) #x
243 #define BENCHMARK_INTERNAL_TOSTRING(x) BENCHMARK_INTERNAL_TOSTRING2(x)
244
245 // clang-format off
246 #if (defined(__GNUC__) && !defined(__NVCC__) && !defined(__NVCOMPILER)) || defined(__clang__)
247 #define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y)
248 #define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg)))
249 #define BENCHMARK_DISABLE_DEPRECATED_WARNING \
250 _Pragma("GCC diagnostic push") \
251 _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
252 #define BENCHMARK_RESTORE_DEPRECATED_WARNING _Pragma("GCC diagnostic pop")
253 #elif defined(__NVCOMPILER)
254 #define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y)
255 #define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg)))
256 #define BENCHMARK_DISABLE_DEPRECATED_WARNING \
257 _Pragma("diagnostic push") \
258 _Pragma("diag_suppress deprecated_entity_with_custom_message")
259 #define BENCHMARK_RESTORE_DEPRECATED_WARNING _Pragma("diagnostic pop")
260 #else
261 #define BENCHMARK_BUILTIN_EXPECT(x, y) x
262 #define BENCHMARK_DEPRECATED_MSG(msg)
263 #define BENCHMARK_WARNING_MSG(msg) \
264 __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING( \
265 __LINE__) ") : warning note: " msg))
266 #define BENCHMARK_DISABLE_DEPRECATED_WARNING
267 #define BENCHMARK_RESTORE_DEPRECATED_WARNING
268 #endif
269 // clang-format on
270
271 #if defined(__GNUC__) && !defined(__clang__)
272 #define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
273 #endif
274
275 #ifndef __has_builtin
276 #define __has_builtin(x) 0
277 #endif
278
279 #if defined(__GNUC__) || __has_builtin(__builtin_unreachable)
280 #define BENCHMARK_UNREACHABLE() __builtin_unreachable()
281 #elif defined(_MSC_VER)
282 #define BENCHMARK_UNREACHABLE() __assume(false)
283 #else
284 #define BENCHMARK_UNREACHABLE() ((void)0)
285 #endif
286
287 #ifdef BENCHMARK_HAS_CXX11
288 #define BENCHMARK_OVERRIDE override
289 #else
290 #define BENCHMARK_OVERRIDE
291 #endif
292
293 #if defined(_MSC_VER)
294 #pragma warning(push)
295 // C4251: <symbol> needs to have dll-interface to be used by clients of class
296 #pragma warning(disable : 4251)
297 #endif
298
299 namespace benchmark {
300 class BenchmarkReporter;
301
302 // Default number of minimum benchmark running time in seconds.
303 const char kDefaultMinTimeStr[] = "0.5s";
304
305 BENCHMARK_EXPORT void PrintDefaultHelp();
306
307 BENCHMARK_EXPORT void Initialize(int* argc, char** argv,
308 void (*HelperPrinterf)() = PrintDefaultHelp);
309 BENCHMARK_EXPORT void Shutdown();
310
311 // Report to stdout all arguments in 'argv' as unrecognized except the first.
312 // Returns true there is at least on unrecognized argument (i.e. 'argc' > 1).
313 BENCHMARK_EXPORT bool ReportUnrecognizedArguments(int argc, char** argv);
314
315 // Returns the current value of --benchmark_filter.
316 BENCHMARK_EXPORT std::string GetBenchmarkFilter();
317
318 // Sets a new value to --benchmark_filter. (This will override this flag's
319 // current value).
320 // Should be called after `benchmark::Initialize()`, as
321 // `benchmark::Initialize()` will override the flag's value.
322 BENCHMARK_EXPORT void SetBenchmarkFilter(std::string value);
323
324 // Returns the current value of --v (command line value for verbosity).
325 BENCHMARK_EXPORT int32_t GetBenchmarkVerbosity();
326
327 // Creates a default display reporter. Used by the library when no display
328 // reporter is provided, but also made available for external use in case a
329 // custom reporter should respect the `--benchmark_format` flag as a fallback
330 BENCHMARK_EXPORT BenchmarkReporter* CreateDefaultDisplayReporter();
331
332 // Generate a list of benchmarks matching the specified --benchmark_filter flag
333 // and if --benchmark_list_tests is specified return after printing the name
334 // of each matching benchmark. Otherwise run each matching benchmark and
335 // report the results.
336 //
337 // spec : Specify the benchmarks to run. If users do not specify this arg,
338 // then the value of FLAGS_benchmark_filter
339 // will be used.
340 //
341 // The second and third overload use the specified 'display_reporter' and
342 // 'file_reporter' respectively. 'file_reporter' will write to the file
343 // specified
344 // by '--benchmark_output'. If '--benchmark_output' is not given the
345 // 'file_reporter' is ignored.
346 //
347 // RETURNS: The number of matching benchmarks.
348 BENCHMARK_EXPORT size_t RunSpecifiedBenchmarks();
349 BENCHMARK_EXPORT size_t RunSpecifiedBenchmarks(std::string spec);
350
351 BENCHMARK_EXPORT size_t
352 RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter);
353 BENCHMARK_EXPORT size_t
354 RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, std::string spec);
355
356 BENCHMARK_EXPORT size_t RunSpecifiedBenchmarks(
357 BenchmarkReporter* display_reporter, BenchmarkReporter* file_reporter);
358 BENCHMARK_EXPORT size_t
359 RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
360 BenchmarkReporter* file_reporter, std::string spec);
361
362 // TimeUnit is passed to a benchmark in order to specify the order of magnitude
363 // for the measured time.
364 enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond, kSecond };
365
366 BENCHMARK_EXPORT TimeUnit GetDefaultTimeUnit();
367
368 // Sets the default time unit the benchmarks use
369 // Has to be called before the benchmark loop to take effect
370 BENCHMARK_EXPORT void SetDefaultTimeUnit(TimeUnit unit);
371
372 // If a MemoryManager is registered (via RegisterMemoryManager()),
373 // it can be used to collect and report allocation metrics for a run of the
374 // benchmark.
375 class MemoryManager {
376 public:
377 static const int64_t TombstoneValue;
378
379 struct Result {
ResultResult380 Result()
381 : num_allocs(0),
382 max_bytes_used(0),
383 total_allocated_bytes(TombstoneValue),
384 net_heap_growth(TombstoneValue) {}
385
386 // The number of allocations made in total between Start and Stop.
387 int64_t num_allocs;
388
389 // The peak memory use between Start and Stop.
390 int64_t max_bytes_used;
391
392 // The total memory allocated, in bytes, between Start and Stop.
393 // Init'ed to TombstoneValue if metric not available.
394 int64_t total_allocated_bytes;
395
396 // The net changes in memory, in bytes, between Start and Stop.
397 // ie., total_allocated_bytes - total_deallocated_bytes.
398 // Init'ed to TombstoneValue if metric not available.
399 int64_t net_heap_growth;
400 };
401
~MemoryManager()402 virtual ~MemoryManager() {}
403
404 // Implement this to start recording allocation information.
405 virtual void Start() = 0;
406
407 // Implement this to stop recording and fill out the given Result structure.
408 virtual void Stop(Result& result) = 0;
409 };
410
411 // Register a MemoryManager instance that will be used to collect and report
412 // allocation measurements for benchmark runs.
413 BENCHMARK_EXPORT
414 void RegisterMemoryManager(MemoryManager* memory_manager);
415
416 // Add a key-value pair to output as part of the context stanza in the report.
417 BENCHMARK_EXPORT
418 void AddCustomContext(const std::string& key, const std::string& value);
419
420 namespace internal {
421 class Benchmark;
422 class BenchmarkImp;
423 class BenchmarkFamilies;
424
425 BENCHMARK_EXPORT std::map<std::string, std::string>*& GetGlobalContext();
426
427 BENCHMARK_EXPORT
428 void UseCharPointer(char const volatile*);
429
430 // Take ownership of the pointer and register the benchmark. Return the
431 // registered benchmark.
432 BENCHMARK_EXPORT Benchmark* RegisterBenchmarkInternal(Benchmark*);
433
434 // Ensure that the standard streams are properly initialized in every TU.
435 BENCHMARK_EXPORT int InitializeStreams();
436 BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams();
437
438 } // namespace internal
439
440 #if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \
441 defined(__EMSCRIPTEN__)
442 #define BENCHMARK_HAS_NO_INLINE_ASSEMBLY
443 #endif
444
445 // Force the compiler to flush pending writes to global memory. Acts as an
446 // effective read/write barrier
447 #ifdef BENCHMARK_HAS_CXX11
ClobberMemory()448 inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() {
449 std::atomic_signal_fence(std::memory_order_acq_rel);
450 }
451 #endif
452
453 // The DoNotOptimize(...) function can be used to prevent a value or
454 // expression from being optimized away by the compiler. This function is
455 // intended to add little to no overhead.
456 // See: https://youtu.be/nXaxk27zwlk?t=2441
457 #ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY
458 #if !defined(__GNUC__) || defined(__llvm__) || defined(__INTEL_COMPILER)
459 template <class Tp>
460 BENCHMARK_DEPRECATED_MSG(
461 "The const-ref version of this method can permit "
462 "undesired compiler optimizations in benchmarks")
DoNotOptimize(Tp const & value)463 inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
464 asm volatile("" : : "r,m"(value) : "memory");
465 }
466
467 template <class Tp>
DoNotOptimize(Tp & value)468 inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) {
469 #if defined(__clang__)
470 asm volatile("" : "+r,m"(value) : : "memory");
471 #else
472 asm volatile("" : "+m,r"(value) : : "memory");
473 #endif
474 }
475
476 #ifdef BENCHMARK_HAS_CXX11
477 template <class Tp>
DoNotOptimize(Tp && value)478 inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp&& value) {
479 #if defined(__clang__)
480 asm volatile("" : "+r,m"(value) : : "memory");
481 #else
482 asm volatile("" : "+m,r"(value) : : "memory");
483 #endif
484 }
485 #endif
486 #elif defined(BENCHMARK_HAS_CXX11) && (__GNUC__ >= 5)
487 // Workaround for a bug with full argument copy overhead with GCC.
488 // See: #1340 and https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105519
489 template <class Tp>
490 BENCHMARK_DEPRECATED_MSG(
491 "The const-ref version of this method can permit "
492 "undesired compiler optimizations in benchmarks")
493 inline BENCHMARK_ALWAYS_INLINE
494 typename std::enable_if<std::is_trivially_copyable<Tp>::value &&
495 (sizeof(Tp) <= sizeof(Tp*))>::type
DoNotOptimize(Tp const & value)496 DoNotOptimize(Tp const& value) {
497 asm volatile("" : : "r,m"(value) : "memory");
498 }
499
500 template <class Tp>
501 BENCHMARK_DEPRECATED_MSG(
502 "The const-ref version of this method can permit "
503 "undesired compiler optimizations in benchmarks")
504 inline BENCHMARK_ALWAYS_INLINE
505 typename std::enable_if<!std::is_trivially_copyable<Tp>::value ||
506 (sizeof(Tp) > sizeof(Tp*))>::type
DoNotOptimize(Tp const & value)507 DoNotOptimize(Tp const& value) {
508 asm volatile("" : : "m"(value) : "memory");
509 }
510
511 template <class Tp>
512 inline BENCHMARK_ALWAYS_INLINE
513 typename std::enable_if<std::is_trivially_copyable<Tp>::value &&
514 (sizeof(Tp) <= sizeof(Tp*))>::type
DoNotOptimize(Tp & value)515 DoNotOptimize(Tp& value) {
516 asm volatile("" : "+m,r"(value) : : "memory");
517 }
518
519 template <class Tp>
520 inline BENCHMARK_ALWAYS_INLINE
521 typename std::enable_if<!std::is_trivially_copyable<Tp>::value ||
522 (sizeof(Tp) > sizeof(Tp*))>::type
DoNotOptimize(Tp & value)523 DoNotOptimize(Tp& value) {
524 asm volatile("" : "+m"(value) : : "memory");
525 }
526
527 template <class Tp>
528 inline BENCHMARK_ALWAYS_INLINE
529 typename std::enable_if<std::is_trivially_copyable<Tp>::value &&
530 (sizeof(Tp) <= sizeof(Tp*))>::type
DoNotOptimize(Tp && value)531 DoNotOptimize(Tp&& value) {
532 asm volatile("" : "+m,r"(value) : : "memory");
533 }
534
535 template <class Tp>
536 inline BENCHMARK_ALWAYS_INLINE
537 typename std::enable_if<!std::is_trivially_copyable<Tp>::value ||
538 (sizeof(Tp) > sizeof(Tp*))>::type
DoNotOptimize(Tp && value)539 DoNotOptimize(Tp&& value) {
540 asm volatile("" : "+m"(value) : : "memory");
541 }
542
543 #else
544 // Fallback for GCC < 5. Can add some overhead because the compiler is forced
545 // to use memory operations instead of operations with registers.
546 // TODO: Remove if GCC < 5 will be unsupported.
547 template <class Tp>
548 BENCHMARK_DEPRECATED_MSG(
549 "The const-ref version of this method can permit "
550 "undesired compiler optimizations in benchmarks")
DoNotOptimize(Tp const & value)551 inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
552 asm volatile("" : : "m"(value) : "memory");
553 }
554
555 template <class Tp>
DoNotOptimize(Tp & value)556 inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) {
557 asm volatile("" : "+m"(value) : : "memory");
558 }
559
560 #ifdef BENCHMARK_HAS_CXX11
561 template <class Tp>
DoNotOptimize(Tp && value)562 inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp&& value) {
563 asm volatile("" : "+m"(value) : : "memory");
564 }
565 #endif
566 #endif
567
568 #ifndef BENCHMARK_HAS_CXX11
ClobberMemory()569 inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() {
570 asm volatile("" : : : "memory");
571 }
572 #endif
573 #elif defined(_MSC_VER)
574 template <class Tp>
575 BENCHMARK_DEPRECATED_MSG(
576 "The const-ref version of this method can permit "
577 "undesired compiler optimizations in benchmarks")
DoNotOptimize(Tp const & value)578 inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
579 internal::UseCharPointer(&reinterpret_cast<char const volatile&>(value));
580 _ReadWriteBarrier();
581 }
582
583 #ifndef BENCHMARK_HAS_CXX11
ClobberMemory()584 inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { _ReadWriteBarrier(); }
585 #endif
586 #else
587 template <class Tp>
588 BENCHMARK_DEPRECATED_MSG(
589 "The const-ref version of this method can permit "
590 "undesired compiler optimizations in benchmarks")
DoNotOptimize(Tp const & value)591 inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
592 internal::UseCharPointer(&reinterpret_cast<char const volatile&>(value));
593 }
594 // FIXME Add ClobberMemory() for non-gnu and non-msvc compilers, before C++11.
595 #endif
596
597 // This class is used for user-defined counters.
598 class Counter {
599 public:
600 enum Flags {
601 kDefaults = 0,
602 // Mark the counter as a rate. It will be presented divided
603 // by the duration of the benchmark.
604 kIsRate = 1 << 0,
605 // Mark the counter as a thread-average quantity. It will be
606 // presented divided by the number of threads.
607 kAvgThreads = 1 << 1,
608 // Mark the counter as a thread-average rate. See above.
609 kAvgThreadsRate = kIsRate | kAvgThreads,
610 // Mark the counter as a constant value, valid/same for *every* iteration.
611 // When reporting, it will be *multiplied* by the iteration count.
612 kIsIterationInvariant = 1 << 2,
613 // Mark the counter as a constant rate.
614 // When reporting, it will be *multiplied* by the iteration count
615 // and then divided by the duration of the benchmark.
616 kIsIterationInvariantRate = kIsRate | kIsIterationInvariant,
617 // Mark the counter as a iteration-average quantity.
618 // It will be presented divided by the number of iterations.
619 kAvgIterations = 1 << 3,
620 // Mark the counter as a iteration-average rate. See above.
621 kAvgIterationsRate = kIsRate | kAvgIterations,
622
623 // In the end, invert the result. This is always done last!
624 kInvert = 1 << 31
625 };
626
627 enum OneK {
628 // 1'000 items per 1k
629 kIs1000 = 1000,
630 // 1'024 items per 1k
631 kIs1024 = 1024
632 };
633
634 double value;
635 Flags flags;
636 OneK oneK;
637
638 BENCHMARK_ALWAYS_INLINE
639 Counter(double v = 0., Flags f = kDefaults, OneK k = kIs1000)
value(v)640 : value(v), flags(f), oneK(k) {}
641
642 BENCHMARK_ALWAYS_INLINE operator double const &() const { return value; }
643 BENCHMARK_ALWAYS_INLINE operator double&() { return value; }
644 };
645
646 // A helper for user code to create unforeseen combinations of Flags, without
647 // having to do this cast manually each time, or providing this operator.
648 Counter::Flags inline operator|(const Counter::Flags& LHS,
649 const Counter::Flags& RHS) {
650 return static_cast<Counter::Flags>(static_cast<int>(LHS) |
651 static_cast<int>(RHS));
652 }
653
654 // This is the container for the user-defined counters.
655 typedef std::map<std::string, Counter> UserCounters;
656
657 // BigO is passed to a benchmark in order to specify the asymptotic
658 // computational
659 // complexity for the benchmark. In case oAuto is selected, complexity will be
660 // calculated automatically to the best fit.
661 enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda };
662
663 typedef int64_t IterationCount;
664
665 enum StatisticUnit { kTime, kPercentage };
666
667 // BigOFunc is passed to a benchmark in order to specify the asymptotic
668 // computational complexity for the benchmark.
669 typedef double(BigOFunc)(IterationCount);
670
671 // StatisticsFunc is passed to a benchmark in order to compute some descriptive
672 // statistics over all the measurements of some type
673 typedef double(StatisticsFunc)(const std::vector<double>&);
674
675 namespace internal {
676 struct Statistics {
677 std::string name_;
678 StatisticsFunc* compute_;
679 StatisticUnit unit_;
680
681 Statistics(const std::string& name, StatisticsFunc* compute,
682 StatisticUnit unit = kTime)
name_Statistics683 : name_(name), compute_(compute), unit_(unit) {}
684 };
685
686 class BenchmarkInstance;
687 class ThreadTimer;
688 class ThreadManager;
689 class PerfCountersMeasurement;
690
691 enum AggregationReportMode
692 #if defined(BENCHMARK_HAS_CXX11)
693 : unsigned
694 #else
695 #endif
696 {
697 // The mode has not been manually specified
698 ARM_Unspecified = 0,
699 // The mode is user-specified.
700 // This may or may not be set when the following bit-flags are set.
701 ARM_Default = 1U << 0U,
702 // File reporter should only output aggregates.
703 ARM_FileReportAggregatesOnly = 1U << 1U,
704 // Display reporter should only output aggregates
705 ARM_DisplayReportAggregatesOnly = 1U << 2U,
706 // Both reporters should only display aggregates.
707 ARM_ReportAggregatesOnly =
708 ARM_FileReportAggregatesOnly | ARM_DisplayReportAggregatesOnly
709 };
710
711 enum Skipped
712 #if defined(BENCHMARK_HAS_CXX11)
713 : unsigned
714 #endif
715 {
716 NotSkipped = 0,
717 SkippedWithMessage,
718 SkippedWithError
719 };
720
721 } // namespace internal
722
723 // State is passed to a running Benchmark and contains state for the
724 // benchmark to use.
725 class BENCHMARK_EXPORT State {
726 public:
727 struct StateIterator;
728 friend struct StateIterator;
729
730 // Returns iterators used to run each iteration of a benchmark using a
731 // C++11 ranged-based for loop. These functions should not be called directly.
732 //
733 // REQUIRES: The benchmark has not started running yet. Neither begin nor end
734 // have been called previously.
735 //
736 // NOTE: KeepRunning may not be used after calling either of these functions.
737 BENCHMARK_ALWAYS_INLINE StateIterator begin();
738 BENCHMARK_ALWAYS_INLINE StateIterator end();
739
740 // Returns true if the benchmark should continue through another iteration.
741 // NOTE: A benchmark may not return from the test until KeepRunning() has
742 // returned false.
743 bool KeepRunning();
744
745 // Returns true iff the benchmark should run n more iterations.
746 // REQUIRES: 'n' > 0.
747 // NOTE: A benchmark must not return from the test until KeepRunningBatch()
748 // has returned false.
749 // NOTE: KeepRunningBatch() may overshoot by up to 'n' iterations.
750 //
751 // Intended usage:
752 // while (state.KeepRunningBatch(1000)) {
753 // // process 1000 elements
754 // }
755 bool KeepRunningBatch(IterationCount n);
756
757 // REQUIRES: timer is running and 'SkipWithMessage(...)' or
758 // 'SkipWithError(...)' has not been called by the current thread.
759 // Stop the benchmark timer. If not called, the timer will be
760 // automatically stopped after the last iteration of the benchmark loop.
761 //
762 // For threaded benchmarks the PauseTiming() function only pauses the timing
763 // for the current thread.
764 //
765 // NOTE: The "real time" measurement is per-thread. If different threads
766 // report different measurements the largest one is reported.
767 //
768 // NOTE: PauseTiming()/ResumeTiming() are relatively
769 // heavyweight, and so their use should generally be avoided
770 // within each benchmark iteration, if possible.
771 void PauseTiming();
772
773 // REQUIRES: timer is not running and 'SkipWithMessage(...)' or
774 // 'SkipWithError(...)' has not been called by the current thread.
775 // Start the benchmark timer. The timer is NOT running on entrance to the
776 // benchmark function. It begins running after control flow enters the
777 // benchmark loop.
778 //
779 // NOTE: PauseTiming()/ResumeTiming() are relatively
780 // heavyweight, and so their use should generally be avoided
781 // within each benchmark iteration, if possible.
782 void ResumeTiming();
783
784 // REQUIRES: 'SkipWithMessage(...)' or 'SkipWithError(...)' has not been
785 // called previously by the current thread.
786 // Report the benchmark as resulting in being skipped with the specified
787 // 'msg'.
788 // After this call the user may explicitly 'return' from the benchmark.
789 //
790 // If the ranged-for style of benchmark loop is used, the user must explicitly
791 // break from the loop, otherwise all future iterations will be run.
792 // If the 'KeepRunning()' loop is used the current thread will automatically
793 // exit the loop at the end of the current iteration.
794 //
795 // For threaded benchmarks only the current thread stops executing and future
796 // calls to `KeepRunning()` will block until all threads have completed
797 // the `KeepRunning()` loop. If multiple threads report being skipped only the
798 // first skip message is used.
799 //
800 // NOTE: Calling 'SkipWithMessage(...)' does not cause the benchmark to exit
801 // the current scope immediately. If the function is called from within
802 // the 'KeepRunning()' loop the current iteration will finish. It is the users
803 // responsibility to exit the scope as needed.
804 void SkipWithMessage(const std::string& msg);
805
806 // REQUIRES: 'SkipWithMessage(...)' or 'SkipWithError(...)' has not been
807 // called previously by the current thread.
808 // Report the benchmark as resulting in an error with the specified 'msg'.
809 // After this call the user may explicitly 'return' from the benchmark.
810 //
811 // If the ranged-for style of benchmark loop is used, the user must explicitly
812 // break from the loop, otherwise all future iterations will be run.
813 // If the 'KeepRunning()' loop is used the current thread will automatically
814 // exit the loop at the end of the current iteration.
815 //
816 // For threaded benchmarks only the current thread stops executing and future
817 // calls to `KeepRunning()` will block until all threads have completed
818 // the `KeepRunning()` loop. If multiple threads report an error only the
819 // first error message is used.
820 //
821 // NOTE: Calling 'SkipWithError(...)' does not cause the benchmark to exit
822 // the current scope immediately. If the function is called from within
823 // the 'KeepRunning()' loop the current iteration will finish. It is the users
824 // responsibility to exit the scope as needed.
825 void SkipWithError(const std::string& msg);
826
827 // Returns true if 'SkipWithMessage(...)' or 'SkipWithError(...)' was called.
skipped()828 bool skipped() const { return internal::NotSkipped != skipped_; }
829
830 // Returns true if an error has been reported with 'SkipWithError(...)'.
error_occurred()831 bool error_occurred() const { return internal::SkippedWithError == skipped_; }
832
833 // REQUIRES: called exactly once per iteration of the benchmarking loop.
834 // Set the manually measured time for this benchmark iteration, which
835 // is used instead of automatically measured time if UseManualTime() was
836 // specified.
837 //
838 // For threaded benchmarks the final value will be set to the largest
839 // reported values.
840 void SetIterationTime(double seconds);
841
842 // Set the number of bytes processed by the current benchmark
843 // execution. This routine is typically called once at the end of a
844 // throughput oriented benchmark.
845 //
846 // REQUIRES: a benchmark has exited its benchmarking loop.
847 BENCHMARK_ALWAYS_INLINE
SetBytesProcessed(int64_t bytes)848 void SetBytesProcessed(int64_t bytes) {
849 counters["bytes_per_second"] =
850 Counter(static_cast<double>(bytes), Counter::kIsRate, Counter::kIs1024);
851 }
852
853 BENCHMARK_ALWAYS_INLINE
bytes_processed()854 int64_t bytes_processed() const {
855 if (counters.find("bytes_per_second") != counters.end())
856 return static_cast<int64_t>(counters.at("bytes_per_second"));
857 return 0;
858 }
859
860 // If this routine is called with complexity_n > 0 and complexity report is
861 // requested for the
862 // family benchmark, then current benchmark will be part of the computation
863 // and complexity_n will
864 // represent the length of N.
865 BENCHMARK_ALWAYS_INLINE
SetComplexityN(int64_t complexity_n)866 void SetComplexityN(int64_t complexity_n) { complexity_n_ = complexity_n; }
867
868 BENCHMARK_ALWAYS_INLINE
complexity_length_n()869 int64_t complexity_length_n() const { return complexity_n_; }
870
871 // If this routine is called with items > 0, then an items/s
872 // label is printed on the benchmark report line for the currently
873 // executing benchmark. It is typically called at the end of a processing
874 // benchmark where a processing items/second output is desired.
875 //
876 // REQUIRES: a benchmark has exited its benchmarking loop.
877 BENCHMARK_ALWAYS_INLINE
SetItemsProcessed(int64_t items)878 void SetItemsProcessed(int64_t items) {
879 counters["items_per_second"] =
880 Counter(static_cast<double>(items), benchmark::Counter::kIsRate);
881 }
882
883 BENCHMARK_ALWAYS_INLINE
items_processed()884 int64_t items_processed() const {
885 if (counters.find("items_per_second") != counters.end())
886 return static_cast<int64_t>(counters.at("items_per_second"));
887 return 0;
888 }
889
890 // If this routine is called, the specified label is printed at the
891 // end of the benchmark report line for the currently executing
892 // benchmark. Example:
893 // static void BM_Compress(benchmark::State& state) {
894 // ...
895 // double compress = input_size / output_size;
896 // state.SetLabel(StrFormat("compress:%.1f%%", 100.0*compression));
897 // }
898 // Produces output that looks like:
899 // BM_Compress 50 50 14115038 compress:27.3%
900 //
901 // REQUIRES: a benchmark has exited its benchmarking loop.
902 void SetLabel(const std::string& label);
903
904 // Range arguments for this run. CHECKs if the argument has been set.
905 BENCHMARK_ALWAYS_INLINE
906 int64_t range(std::size_t pos = 0) const {
907 assert(range_.size() > pos);
908 return range_[pos];
909 }
910
911 BENCHMARK_DEPRECATED_MSG("use 'range(0)' instead")
range_x()912 int64_t range_x() const { return range(0); }
913
914 BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead")
range_y()915 int64_t range_y() const { return range(1); }
916
917 // Number of threads concurrently executing the benchmark.
918 BENCHMARK_ALWAYS_INLINE
threads()919 int threads() const { return threads_; }
920
921 // Index of the executing thread. Values from [0, threads).
922 BENCHMARK_ALWAYS_INLINE
thread_index()923 int thread_index() const { return thread_index_; }
924
925 BENCHMARK_ALWAYS_INLINE
iterations()926 IterationCount iterations() const {
927 if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
928 return 0;
929 }
930 return max_iterations - total_iterations_ + batch_leftover_;
931 }
932
933 BENCHMARK_ALWAYS_INLINE
name()934 std::string name() const { return name_; }
935
936 private:
937 // items we expect on the first cache line (ie 64 bytes of the struct)
938 // When total_iterations_ is 0, KeepRunning() and friends will return false.
939 // May be larger than max_iterations.
940 IterationCount total_iterations_;
941
942 // When using KeepRunningBatch(), batch_leftover_ holds the number of
943 // iterations beyond max_iters that were run. Used to track
944 // completed_iterations_ accurately.
945 IterationCount batch_leftover_;
946
947 public:
948 const IterationCount max_iterations;
949
950 private:
951 bool started_;
952 bool finished_;
953 internal::Skipped skipped_;
954
955 // items we don't need on the first cache line
956 std::vector<int64_t> range_;
957
958 int64_t complexity_n_;
959
960 public:
961 // Container for user-defined counters.
962 UserCounters counters;
963
964 private:
965 State(std::string name, IterationCount max_iters,
966 const std::vector<int64_t>& ranges, int thread_i, int n_threads,
967 internal::ThreadTimer* timer, internal::ThreadManager* manager,
968 internal::PerfCountersMeasurement* perf_counters_measurement);
969
970 void StartKeepRunning();
971 // Implementation of KeepRunning() and KeepRunningBatch().
972 // is_batch must be true unless n is 1.
973 bool KeepRunningInternal(IterationCount n, bool is_batch);
974 void FinishKeepRunning();
975
976 const std::string name_;
977 const int thread_index_;
978 const int threads_;
979
980 internal::ThreadTimer* const timer_;
981 internal::ThreadManager* const manager_;
982 internal::PerfCountersMeasurement* const perf_counters_measurement_;
983
984 friend class internal::BenchmarkInstance;
985 };
986
KeepRunning()987 inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunning() {
988 return KeepRunningInternal(1, /*is_batch=*/false);
989 }
990
KeepRunningBatch(IterationCount n)991 inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningBatch(IterationCount n) {
992 return KeepRunningInternal(n, /*is_batch=*/true);
993 }
994
KeepRunningInternal(IterationCount n,bool is_batch)995 inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningInternal(IterationCount n,
996 bool is_batch) {
997 // total_iterations_ is set to 0 by the constructor, and always set to a
998 // nonzero value by StartKepRunning().
999 assert(n > 0);
1000 // n must be 1 unless is_batch is true.
1001 assert(is_batch || n == 1);
1002 if (BENCHMARK_BUILTIN_EXPECT(total_iterations_ >= n, true)) {
1003 total_iterations_ -= n;
1004 return true;
1005 }
1006 if (!started_) {
1007 StartKeepRunning();
1008 if (!skipped() && total_iterations_ >= n) {
1009 total_iterations_ -= n;
1010 return true;
1011 }
1012 }
1013 // For non-batch runs, total_iterations_ must be 0 by now.
1014 if (is_batch && total_iterations_ != 0) {
1015 batch_leftover_ = n - total_iterations_;
1016 total_iterations_ = 0;
1017 return true;
1018 }
1019 FinishKeepRunning();
1020 return false;
1021 }
1022
1023 struct State::StateIterator {
1024 struct BENCHMARK_UNUSED Value {};
1025 typedef std::forward_iterator_tag iterator_category;
1026 typedef Value value_type;
1027 typedef Value reference;
1028 typedef Value pointer;
1029 typedef std::ptrdiff_t difference_type;
1030
1031 private:
1032 friend class State;
1033 BENCHMARK_ALWAYS_INLINE
StateIteratorStateIterator1034 StateIterator() : cached_(0), parent_() {}
1035
1036 BENCHMARK_ALWAYS_INLINE
StateIteratorStateIterator1037 explicit StateIterator(State* st)
1038 : cached_(st->skipped() ? 0 : st->max_iterations), parent_(st) {}
1039
1040 public:
1041 BENCHMARK_ALWAYS_INLINE
1042 Value operator*() const { return Value(); }
1043
1044 BENCHMARK_ALWAYS_INLINE
1045 StateIterator& operator++() {
1046 assert(cached_ > 0);
1047 --cached_;
1048 return *this;
1049 }
1050
1051 BENCHMARK_ALWAYS_INLINE
1052 bool operator!=(StateIterator const&) const {
1053 if (BENCHMARK_BUILTIN_EXPECT(cached_ != 0, true)) return true;
1054 parent_->FinishKeepRunning();
1055 return false;
1056 }
1057
1058 private:
1059 IterationCount cached_;
1060 State* const parent_;
1061 };
1062
begin()1063 inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::begin() {
1064 return StateIterator(this);
1065 }
end()1066 inline BENCHMARK_ALWAYS_INLINE State::StateIterator State::end() {
1067 StartKeepRunning();
1068 return StateIterator();
1069 }
1070
1071 namespace internal {
1072
1073 typedef void(Function)(State&);
1074
1075 // ------------------------------------------------------
1076 // Benchmark registration object. The BENCHMARK() macro expands
1077 // into an internal::Benchmark* object. Various methods can
1078 // be called on this object to change the properties of the benchmark.
1079 // Each method returns "this" so that multiple method calls can
1080 // chained into one expression.
1081 class BENCHMARK_EXPORT Benchmark {
1082 public:
1083 virtual ~Benchmark();
1084
1085 // Note: the following methods all return "this" so that multiple
1086 // method calls can be chained together in one expression.
1087
1088 // Specify the name of the benchmark
1089 Benchmark* Name(const std::string& name);
1090
1091 // Run this benchmark once with "x" as the extra argument passed
1092 // to the function.
1093 // REQUIRES: The function passed to the constructor must accept an arg1.
1094 Benchmark* Arg(int64_t x);
1095
1096 // Run this benchmark with the given time unit for the generated output report
1097 Benchmark* Unit(TimeUnit unit);
1098
1099 // Run this benchmark once for a number of values picked from the
1100 // range [start..limit]. (start and limit are always picked.)
1101 // REQUIRES: The function passed to the constructor must accept an arg1.
1102 Benchmark* Range(int64_t start, int64_t limit);
1103
1104 // Run this benchmark once for all values in the range [start..limit] with
1105 // specific step
1106 // REQUIRES: The function passed to the constructor must accept an arg1.
1107 Benchmark* DenseRange(int64_t start, int64_t limit, int step = 1);
1108
1109 // Run this benchmark once with "args" as the extra arguments passed
1110 // to the function.
1111 // REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
1112 Benchmark* Args(const std::vector<int64_t>& args);
1113
1114 // Equivalent to Args({x, y})
1115 // NOTE: This is a legacy C++03 interface provided for compatibility only.
1116 // New code should use 'Args'.
ArgPair(int64_t x,int64_t y)1117 Benchmark* ArgPair(int64_t x, int64_t y) {
1118 std::vector<int64_t> args;
1119 args.push_back(x);
1120 args.push_back(y);
1121 return Args(args);
1122 }
1123
1124 // Run this benchmark once for a number of values picked from the
1125 // ranges [start..limit]. (starts and limits are always picked.)
1126 // REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
1127 Benchmark* Ranges(const std::vector<std::pair<int64_t, int64_t> >& ranges);
1128
1129 // Run this benchmark once for each combination of values in the (cartesian)
1130 // product of the supplied argument lists.
1131 // REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
1132 Benchmark* ArgsProduct(const std::vector<std::vector<int64_t> >& arglists);
1133
1134 // Equivalent to ArgNames({name})
1135 Benchmark* ArgName(const std::string& name);
1136
1137 // Set the argument names to display in the benchmark name. If not called,
1138 // only argument values will be shown.
1139 Benchmark* ArgNames(const std::vector<std::string>& names);
1140
1141 // Equivalent to Ranges({{lo1, hi1}, {lo2, hi2}}).
1142 // NOTE: This is a legacy C++03 interface provided for compatibility only.
1143 // New code should use 'Ranges'.
RangePair(int64_t lo1,int64_t hi1,int64_t lo2,int64_t hi2)1144 Benchmark* RangePair(int64_t lo1, int64_t hi1, int64_t lo2, int64_t hi2) {
1145 std::vector<std::pair<int64_t, int64_t> > ranges;
1146 ranges.push_back(std::make_pair(lo1, hi1));
1147 ranges.push_back(std::make_pair(lo2, hi2));
1148 return Ranges(ranges);
1149 }
1150
1151 // Have "setup" and/or "teardown" invoked once for every benchmark run.
1152 // If the benchmark is multi-threaded (will run in k threads concurrently),
1153 // the setup callback will be be invoked exactly once (not k times) before
1154 // each run with k threads. Time allowing (e.g. for a short benchmark), there
1155 // may be multiple such runs per benchmark, each run with its own
1156 // "setup"/"teardown".
1157 //
1158 // If the benchmark uses different size groups of threads (e.g. via
1159 // ThreadRange), the above will be true for each size group.
1160 //
1161 // The callback will be passed a State object, which includes the number
1162 // of threads, thread-index, benchmark arguments, etc.
1163 //
1164 // The callback must not be NULL or self-deleting.
1165 Benchmark* Setup(void (*setup)(const benchmark::State&));
1166 Benchmark* Teardown(void (*teardown)(const benchmark::State&));
1167
1168 // Pass this benchmark object to *func, which can customize
1169 // the benchmark by calling various methods like Arg, Args,
1170 // Threads, etc.
1171 Benchmark* Apply(void (*func)(Benchmark* benchmark));
1172
1173 // Set the range multiplier for non-dense range. If not called, the range
1174 // multiplier kRangeMultiplier will be used.
1175 Benchmark* RangeMultiplier(int multiplier);
1176
1177 // Set the minimum amount of time to use when running this benchmark. This
1178 // option overrides the `benchmark_min_time` flag.
1179 // REQUIRES: `t > 0` and `Iterations` has not been called on this benchmark.
1180 Benchmark* MinTime(double t);
1181
1182 // Set the minimum amount of time to run the benchmark before taking runtimes
1183 // of this benchmark into account. This
1184 // option overrides the `benchmark_min_warmup_time` flag.
1185 // REQUIRES: `t >= 0` and `Iterations` has not been called on this benchmark.
1186 Benchmark* MinWarmUpTime(double t);
1187
1188 // Specify the amount of iterations that should be run by this benchmark.
1189 // This option overrides the `benchmark_min_time` flag.
1190 // REQUIRES: 'n > 0' and `MinTime` has not been called on this benchmark.
1191 //
1192 // NOTE: This function should only be used when *exact* iteration control is
1193 // needed and never to control or limit how long a benchmark runs, where
1194 // `--benchmark_min_time=<N>s` or `MinTime(...)` should be used instead.
1195 Benchmark* Iterations(IterationCount n);
1196
1197 // Specify the amount of times to repeat this benchmark. This option overrides
1198 // the `benchmark_repetitions` flag.
1199 // REQUIRES: `n > 0`
1200 Benchmark* Repetitions(int n);
1201
1202 // Specify if each repetition of the benchmark should be reported separately
1203 // or if only the final statistics should be reported. If the benchmark
1204 // is not repeated then the single result is always reported.
1205 // Applies to *ALL* reporters (display and file).
1206 Benchmark* ReportAggregatesOnly(bool value = true);
1207
1208 // Same as ReportAggregatesOnly(), but applies to display reporter only.
1209 Benchmark* DisplayAggregatesOnly(bool value = true);
1210
1211 // By default, the CPU time is measured only for the main thread, which may
1212 // be unrepresentative if the benchmark uses threads internally. If called,
1213 // the total CPU time spent by all the threads will be measured instead.
1214 // By default, only the main thread CPU time will be measured.
1215 Benchmark* MeasureProcessCPUTime();
1216
1217 // If a particular benchmark should use the Wall clock instead of the CPU time
1218 // (be it either the CPU time of the main thread only (default), or the
1219 // total CPU usage of the benchmark), call this method. If called, the elapsed
1220 // (wall) time will be used to control how many iterations are run, and in the
1221 // printing of items/second or MB/seconds values.
1222 // If not called, the CPU time used by the benchmark will be used.
1223 Benchmark* UseRealTime();
1224
1225 // If a benchmark must measure time manually (e.g. if GPU execution time is
1226 // being
1227 // measured), call this method. If called, each benchmark iteration should
1228 // call
1229 // SetIterationTime(seconds) to report the measured time, which will be used
1230 // to control how many iterations are run, and in the printing of items/second
1231 // or MB/second values.
1232 Benchmark* UseManualTime();
1233
1234 // Set the asymptotic computational complexity for the benchmark. If called
1235 // the asymptotic computational complexity will be shown on the output.
1236 Benchmark* Complexity(BigO complexity = benchmark::oAuto);
1237
1238 // Set the asymptotic computational complexity for the benchmark. If called
1239 // the asymptotic computational complexity will be shown on the output.
1240 Benchmark* Complexity(BigOFunc* complexity);
1241
1242 // Add this statistics to be computed over all the values of benchmark run
1243 Benchmark* ComputeStatistics(const std::string& name,
1244 StatisticsFunc* statistics,
1245 StatisticUnit unit = kTime);
1246
1247 // Support for running multiple copies of the same benchmark concurrently
1248 // in multiple threads. This may be useful when measuring the scaling
1249 // of some piece of code.
1250
1251 // Run one instance of this benchmark concurrently in t threads.
1252 Benchmark* Threads(int t);
1253
1254 // Pick a set of values T from [min_threads,max_threads].
1255 // min_threads and max_threads are always included in T. Run this
1256 // benchmark once for each value in T. The benchmark run for a
1257 // particular value t consists of t threads running the benchmark
1258 // function concurrently. For example, consider:
1259 // BENCHMARK(Foo)->ThreadRange(1,16);
1260 // This will run the following benchmarks:
1261 // Foo in 1 thread
1262 // Foo in 2 threads
1263 // Foo in 4 threads
1264 // Foo in 8 threads
1265 // Foo in 16 threads
1266 Benchmark* ThreadRange(int min_threads, int max_threads);
1267
1268 // For each value n in the range, run this benchmark once using n threads.
1269 // min_threads and max_threads are always included in the range.
1270 // stride specifies the increment. E.g. DenseThreadRange(1, 8, 3) starts
1271 // a benchmark with 1, 4, 7 and 8 threads.
1272 Benchmark* DenseThreadRange(int min_threads, int max_threads, int stride = 1);
1273
1274 // Equivalent to ThreadRange(NumCPUs(), NumCPUs())
1275 Benchmark* ThreadPerCpu();
1276
1277 virtual void Run(State& state) = 0;
1278
1279 TimeUnit GetTimeUnit() const;
1280
1281 protected:
1282 explicit Benchmark(const std::string& name);
1283 void SetName(const std::string& name);
1284
1285 public:
1286 const char* GetName() const;
1287 int ArgsCnt() const;
1288 const char* GetArgName(int arg) const;
1289
1290 private:
1291 friend class BenchmarkFamilies;
1292 friend class BenchmarkInstance;
1293
1294 std::string name_;
1295 AggregationReportMode aggregation_report_mode_;
1296 std::vector<std::string> arg_names_; // Args for all benchmark runs
1297 std::vector<std::vector<int64_t> > args_; // Args for all benchmark runs
1298
1299 TimeUnit time_unit_;
1300 bool use_default_time_unit_;
1301
1302 int range_multiplier_;
1303 double min_time_;
1304 double min_warmup_time_;
1305 IterationCount iterations_;
1306 int repetitions_;
1307 bool measure_process_cpu_time_;
1308 bool use_real_time_;
1309 bool use_manual_time_;
1310 BigO complexity_;
1311 BigOFunc* complexity_lambda_;
1312 std::vector<Statistics> statistics_;
1313 std::vector<int> thread_counts_;
1314
1315 typedef void (*callback_function)(const benchmark::State&);
1316 callback_function setup_;
1317 callback_function teardown_;
1318
1319 Benchmark(Benchmark const&)
1320 #if defined(BENCHMARK_HAS_CXX11)
1321 = delete
1322 #endif
1323 ;
1324
1325 Benchmark& operator=(Benchmark const&)
1326 #if defined(BENCHMARK_HAS_CXX11)
1327 = delete
1328 #endif
1329 ;
1330 };
1331
1332 } // namespace internal
1333
1334 // Create and register a benchmark with the specified 'name' that invokes
1335 // the specified functor 'fn'.
1336 //
1337 // RETURNS: A pointer to the registered benchmark.
1338 internal::Benchmark* RegisterBenchmark(const std::string& name,
1339 internal::Function* fn);
1340
1341 #if defined(BENCHMARK_HAS_CXX11)
1342 template <class Lambda>
1343 internal::Benchmark* RegisterBenchmark(const std::string& name, Lambda&& fn);
1344 #endif
1345
1346 // Remove all registered benchmarks. All pointers to previously registered
1347 // benchmarks are invalidated.
1348 BENCHMARK_EXPORT void ClearRegisteredBenchmarks();
1349
1350 namespace internal {
1351 // The class used to hold all Benchmarks created from static function.
1352 // (ie those created using the BENCHMARK(...) macros.
1353 class BENCHMARK_EXPORT FunctionBenchmark : public Benchmark {
1354 public:
FunctionBenchmark(const std::string & name,Function * func)1355 FunctionBenchmark(const std::string& name, Function* func)
1356 : Benchmark(name), func_(func) {}
1357
1358 void Run(State& st) BENCHMARK_OVERRIDE;
1359
1360 private:
1361 Function* func_;
1362 };
1363
1364 #ifdef BENCHMARK_HAS_CXX11
1365 template <class Lambda>
1366 class LambdaBenchmark : public Benchmark {
1367 public:
Run(State & st)1368 void Run(State& st) BENCHMARK_OVERRIDE { lambda_(st); }
1369
1370 private:
1371 template <class OLambda>
LambdaBenchmark(const std::string & name,OLambda && lam)1372 LambdaBenchmark(const std::string& name, OLambda&& lam)
1373 : Benchmark(name), lambda_(std::forward<OLambda>(lam)) {}
1374
1375 LambdaBenchmark(LambdaBenchmark const&) = delete;
1376
1377 template <class Lam> // NOLINTNEXTLINE(readability-redundant-declaration)
1378 friend Benchmark* ::benchmark::RegisterBenchmark(const std::string&, Lam&&);
1379
1380 Lambda lambda_;
1381 };
1382 #endif
1383 } // namespace internal
1384
RegisterBenchmark(const std::string & name,internal::Function * fn)1385 inline internal::Benchmark* RegisterBenchmark(const std::string& name,
1386 internal::Function* fn) {
1387 // FIXME: this should be a `std::make_unique<>()` but we don't have C++14.
1388 // codechecker_intentional [cplusplus.NewDeleteLeaks]
1389 return internal::RegisterBenchmarkInternal(
1390 ::new internal::FunctionBenchmark(name, fn));
1391 }
1392
1393 #ifdef BENCHMARK_HAS_CXX11
1394 template <class Lambda>
RegisterBenchmark(const std::string & name,Lambda && fn)1395 internal::Benchmark* RegisterBenchmark(const std::string& name, Lambda&& fn) {
1396 using BenchType =
1397 internal::LambdaBenchmark<typename std::decay<Lambda>::type>;
1398 // FIXME: this should be a `std::make_unique<>()` but we don't have C++14.
1399 // codechecker_intentional [cplusplus.NewDeleteLeaks]
1400 return internal::RegisterBenchmarkInternal(
1401 ::new BenchType(name, std::forward<Lambda>(fn)));
1402 }
1403 #endif
1404
1405 #if defined(BENCHMARK_HAS_CXX11) && \
1406 (!defined(BENCHMARK_GCC_VERSION) || BENCHMARK_GCC_VERSION >= 409)
1407 template <class Lambda, class... Args>
RegisterBenchmark(const std::string & name,Lambda && fn,Args &&...args)1408 internal::Benchmark* RegisterBenchmark(const std::string& name, Lambda&& fn,
1409 Args&&... args) {
1410 return benchmark::RegisterBenchmark(
1411 name, [=](benchmark::State& st) { fn(st, args...); });
1412 }
1413 #else
1414 #define BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
1415 #endif
1416
1417 // The base class for all fixture tests.
1418 class Fixture : public internal::Benchmark {
1419 public:
Fixture()1420 Fixture() : internal::Benchmark("") {}
1421
Run(State & st)1422 void Run(State& st) BENCHMARK_OVERRIDE {
1423 this->SetUp(st);
1424 this->BenchmarkCase(st);
1425 this->TearDown(st);
1426 }
1427
1428 // These will be deprecated ...
SetUp(const State &)1429 virtual void SetUp(const State&) {}
TearDown(const State &)1430 virtual void TearDown(const State&) {}
1431 // ... In favor of these.
SetUp(State & st)1432 virtual void SetUp(State& st) { SetUp(const_cast<const State&>(st)); }
TearDown(State & st)1433 virtual void TearDown(State& st) { TearDown(const_cast<const State&>(st)); }
1434
1435 protected:
1436 virtual void BenchmarkCase(State&) = 0;
1437 };
1438 } // namespace benchmark
1439
1440 // ------------------------------------------------------
1441 // Macro to register benchmarks
1442
1443 // Check that __COUNTER__ is defined and that __COUNTER__ increases by 1
1444 // every time it is expanded. X + 1 == X + 0 is used in case X is defined to be
1445 // empty. If X is empty the expression becomes (+1 == +0).
1446 #if defined(__COUNTER__) && (__COUNTER__ + 1 == __COUNTER__ + 0)
1447 #define BENCHMARK_PRIVATE_UNIQUE_ID __COUNTER__
1448 #else
1449 #define BENCHMARK_PRIVATE_UNIQUE_ID __LINE__
1450 #endif
1451
1452 // Helpers for generating unique variable names
1453 #ifdef BENCHMARK_HAS_CXX11
1454 #define BENCHMARK_PRIVATE_NAME(...) \
1455 BENCHMARK_PRIVATE_CONCAT(benchmark_uniq_, BENCHMARK_PRIVATE_UNIQUE_ID, \
1456 __VA_ARGS__)
1457 #else
1458 #define BENCHMARK_PRIVATE_NAME(n) \
1459 BENCHMARK_PRIVATE_CONCAT(benchmark_uniq_, BENCHMARK_PRIVATE_UNIQUE_ID, n)
1460 #endif // BENCHMARK_HAS_CXX11
1461
1462 #define BENCHMARK_PRIVATE_CONCAT(a, b, c) BENCHMARK_PRIVATE_CONCAT2(a, b, c)
1463 #define BENCHMARK_PRIVATE_CONCAT2(a, b, c) a##b##c
1464 // Helper for concatenation with macro name expansion
1465 #define BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method) \
1466 BaseClass##_##Method##_Benchmark
1467
1468 #define BENCHMARK_PRIVATE_DECLARE(n) \
1469 static ::benchmark::internal::Benchmark* BENCHMARK_PRIVATE_NAME(n) \
1470 BENCHMARK_UNUSED
1471
1472 #ifdef BENCHMARK_HAS_CXX11
1473 #define BENCHMARK(...) \
1474 BENCHMARK_PRIVATE_DECLARE(_benchmark_) = \
1475 (::benchmark::internal::RegisterBenchmarkInternal( \
1476 new ::benchmark::internal::FunctionBenchmark(#__VA_ARGS__, \
1477 __VA_ARGS__)))
1478 #else
1479 #define BENCHMARK(n) \
1480 BENCHMARK_PRIVATE_DECLARE(n) = \
1481 (::benchmark::internal::RegisterBenchmarkInternal( \
1482 new ::benchmark::internal::FunctionBenchmark(#n, n)))
1483 #endif // BENCHMARK_HAS_CXX11
1484
1485 // Old-style macros
1486 #define BENCHMARK_WITH_ARG(n, a) BENCHMARK(n)->Arg((a))
1487 #define BENCHMARK_WITH_ARG2(n, a1, a2) BENCHMARK(n)->Args({(a1), (a2)})
1488 #define BENCHMARK_WITH_UNIT(n, t) BENCHMARK(n)->Unit((t))
1489 #define BENCHMARK_RANGE(n, lo, hi) BENCHMARK(n)->Range((lo), (hi))
1490 #define BENCHMARK_RANGE2(n, l1, h1, l2, h2) \
1491 BENCHMARK(n)->RangePair({{(l1), (h1)}, {(l2), (h2)}})
1492
1493 #ifdef BENCHMARK_HAS_CXX11
1494
1495 // Register a benchmark which invokes the function specified by `func`
1496 // with the additional arguments specified by `...`.
1497 //
1498 // For example:
1499 //
1500 // template <class ...ExtraArgs>`
1501 // void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) {
1502 // [...]
1503 //}
1504 // /* Registers a benchmark named "BM_takes_args/int_string_test` */
1505 // BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc"));
1506 #define BENCHMARK_CAPTURE(func, test_case_name, ...) \
1507 BENCHMARK_PRIVATE_DECLARE(func) = \
1508 (::benchmark::internal::RegisterBenchmarkInternal( \
1509 new ::benchmark::internal::FunctionBenchmark( \
1510 #func "/" #test_case_name, \
1511 [](::benchmark::State& st) { func(st, __VA_ARGS__); })))
1512
1513 #endif // BENCHMARK_HAS_CXX11
1514
1515 // This will register a benchmark for a templatized function. For example:
1516 //
1517 // template<int arg>
1518 // void BM_Foo(int iters);
1519 //
1520 // BENCHMARK_TEMPLATE(BM_Foo, 1);
1521 //
1522 // will register BM_Foo<1> as a benchmark.
1523 #define BENCHMARK_TEMPLATE1(n, a) \
1524 BENCHMARK_PRIVATE_DECLARE(n) = \
1525 (::benchmark::internal::RegisterBenchmarkInternal( \
1526 new ::benchmark::internal::FunctionBenchmark(#n "<" #a ">", n<a>)))
1527
1528 #define BENCHMARK_TEMPLATE2(n, a, b) \
1529 BENCHMARK_PRIVATE_DECLARE(n) = \
1530 (::benchmark::internal::RegisterBenchmarkInternal( \
1531 new ::benchmark::internal::FunctionBenchmark(#n "<" #a "," #b ">", \
1532 n<a, b>)))
1533
1534 #ifdef BENCHMARK_HAS_CXX11
1535 #define BENCHMARK_TEMPLATE(n, ...) \
1536 BENCHMARK_PRIVATE_DECLARE(n) = \
1537 (::benchmark::internal::RegisterBenchmarkInternal( \
1538 new ::benchmark::internal::FunctionBenchmark( \
1539 #n "<" #__VA_ARGS__ ">", n<__VA_ARGS__>)))
1540 #else
1541 #define BENCHMARK_TEMPLATE(n, a) BENCHMARK_TEMPLATE1(n, a)
1542 #endif
1543
1544 #define BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
1545 class BaseClass##_##Method##_Benchmark : public BaseClass { \
1546 public: \
1547 BaseClass##_##Method##_Benchmark() { \
1548 this->SetName(#BaseClass "/" #Method); \
1549 } \
1550 \
1551 protected: \
1552 void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \
1553 };
1554
1555 #define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
1556 class BaseClass##_##Method##_Benchmark : public BaseClass<a> { \
1557 public: \
1558 BaseClass##_##Method##_Benchmark() { \
1559 this->SetName(#BaseClass "<" #a ">/" #Method); \
1560 } \
1561 \
1562 protected: \
1563 void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \
1564 };
1565
1566 #define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
1567 class BaseClass##_##Method##_Benchmark : public BaseClass<a, b> { \
1568 public: \
1569 BaseClass##_##Method##_Benchmark() { \
1570 this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \
1571 } \
1572 \
1573 protected: \
1574 void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \
1575 };
1576
1577 #ifdef BENCHMARK_HAS_CXX11
1578 #define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, ...) \
1579 class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__> { \
1580 public: \
1581 BaseClass##_##Method##_Benchmark() { \
1582 this->SetName(#BaseClass "<" #__VA_ARGS__ ">/" #Method); \
1583 } \
1584 \
1585 protected: \
1586 void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \
1587 };
1588 #else
1589 #define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) \
1590 BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a)
1591 #endif
1592
1593 #define BENCHMARK_DEFINE_F(BaseClass, Method) \
1594 BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
1595 void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
1596
1597 #define BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) \
1598 BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
1599 void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
1600
1601 #define BENCHMARK_TEMPLATE2_DEFINE_F(BaseClass, Method, a, b) \
1602 BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
1603 void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
1604
1605 #ifdef BENCHMARK_HAS_CXX11
1606 #define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, ...) \
1607 BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
1608 void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
1609 #else
1610 #define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) \
1611 BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a)
1612 #endif
1613
1614 #define BENCHMARK_REGISTER_F(BaseClass, Method) \
1615 BENCHMARK_PRIVATE_REGISTER_F(BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method))
1616
1617 #define BENCHMARK_PRIVATE_REGISTER_F(TestName) \
1618 BENCHMARK_PRIVATE_DECLARE(TestName) = \
1619 (::benchmark::internal::RegisterBenchmarkInternal(new TestName()))
1620
1621 // This macro will define and register a benchmark within a fixture class.
1622 #define BENCHMARK_F(BaseClass, Method) \
1623 BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
1624 BENCHMARK_REGISTER_F(BaseClass, Method); \
1625 void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
1626
1627 #define BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) \
1628 BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
1629 BENCHMARK_REGISTER_F(BaseClass, Method); \
1630 void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
1631
1632 #define BENCHMARK_TEMPLATE2_F(BaseClass, Method, a, b) \
1633 BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
1634 BENCHMARK_REGISTER_F(BaseClass, Method); \
1635 void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
1636
1637 #ifdef BENCHMARK_HAS_CXX11
1638 #define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \
1639 BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
1640 BENCHMARK_REGISTER_F(BaseClass, Method); \
1641 void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
1642 #else
1643 #define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) \
1644 BENCHMARK_TEMPLATE1_F(BaseClass, Method, a)
1645 #endif
1646
1647 // Helper macro to create a main routine in a test that runs the benchmarks
1648 // Note the workaround for Hexagon simulator passing argc != 0, argv = NULL.
1649 #define BENCHMARK_MAIN() \
1650 int main(int argc, char** argv) { \
1651 char arg0_default[] = "benchmark"; \
1652 char* args_default = arg0_default; \
1653 if (!argv) { \
1654 argc = 1; \
1655 argv = &args_default; \
1656 } \
1657 ::benchmark::Initialize(&argc, argv); \
1658 if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \
1659 ::benchmark::RunSpecifiedBenchmarks(); \
1660 ::benchmark::Shutdown(); \
1661 return 0; \
1662 } \
1663 int main(int, char**)
1664
1665 // ------------------------------------------------------
1666 // Benchmark Reporters
1667
1668 namespace benchmark {
1669
1670 struct BENCHMARK_EXPORT CPUInfo {
1671 struct CacheInfo {
1672 std::string type;
1673 int level;
1674 int size;
1675 int num_sharing;
1676 };
1677
1678 enum Scaling { UNKNOWN, ENABLED, DISABLED };
1679
1680 int num_cpus;
1681 Scaling scaling;
1682 double cycles_per_second;
1683 std::vector<CacheInfo> caches;
1684 std::vector<double> load_avg;
1685
1686 static const CPUInfo& Get();
1687
1688 private:
1689 CPUInfo();
1690 BENCHMARK_DISALLOW_COPY_AND_ASSIGN(CPUInfo);
1691 };
1692
1693 // Adding Struct for System Information
1694 struct BENCHMARK_EXPORT SystemInfo {
1695 std::string name;
1696 static const SystemInfo& Get();
1697
1698 private:
1699 SystemInfo();
1700 BENCHMARK_DISALLOW_COPY_AND_ASSIGN(SystemInfo);
1701 };
1702
1703 // BenchmarkName contains the components of the Benchmark's name
1704 // which allows individual fields to be modified or cleared before
1705 // building the final name using 'str()'.
1706 struct BENCHMARK_EXPORT BenchmarkName {
1707 std::string function_name;
1708 std::string args;
1709 std::string min_time;
1710 std::string min_warmup_time;
1711 std::string iterations;
1712 std::string repetitions;
1713 std::string time_type;
1714 std::string threads;
1715
1716 // Return the full name of the benchmark with each non-empty
1717 // field separated by a '/'
1718 std::string str() const;
1719 };
1720
1721 // Interface for custom benchmark result printers.
1722 // By default, benchmark reports are printed to stdout. However an application
1723 // can control the destination of the reports by calling
1724 // RunSpecifiedBenchmarks and passing it a custom reporter object.
1725 // The reporter object must implement the following interface.
1726 class BENCHMARK_EXPORT BenchmarkReporter {
1727 public:
1728 struct Context {
1729 CPUInfo const& cpu_info;
1730 SystemInfo const& sys_info;
1731 // The number of chars in the longest benchmark name.
1732 size_t name_field_width;
1733 static const char* executable_name;
1734 Context();
1735 };
1736
1737 struct BENCHMARK_EXPORT Run {
1738 static const int64_t no_repetition_index = -1;
1739 enum RunType { RT_Iteration, RT_Aggregate };
1740
RunRun1741 Run()
1742 : run_type(RT_Iteration),
1743 aggregate_unit(kTime),
1744 skipped(internal::NotSkipped),
1745 iterations(1),
1746 threads(1),
1747 time_unit(GetDefaultTimeUnit()),
1748 real_accumulated_time(0),
1749 cpu_accumulated_time(0),
1750 max_heapbytes_used(0),
1751 complexity(oNone),
1752 complexity_lambda(),
1753 complexity_n(0),
1754 report_big_o(false),
1755 report_rms(false),
1756 memory_result(NULL),
1757 allocs_per_iter(0.0) {}
1758
1759 std::string benchmark_name() const;
1760 BenchmarkName run_name;
1761 int64_t family_index;
1762 int64_t per_family_instance_index;
1763 RunType run_type;
1764 std::string aggregate_name;
1765 StatisticUnit aggregate_unit;
1766 std::string report_label; // Empty if not set by benchmark.
1767 internal::Skipped skipped;
1768 std::string skip_message;
1769
1770 IterationCount iterations;
1771 int64_t threads;
1772 int64_t repetition_index;
1773 int64_t repetitions;
1774 TimeUnit time_unit;
1775 double real_accumulated_time;
1776 double cpu_accumulated_time;
1777
1778 // Return a value representing the real time per iteration in the unit
1779 // specified by 'time_unit'.
1780 // NOTE: If 'iterations' is zero the returned value represents the
1781 // accumulated time.
1782 double GetAdjustedRealTime() const;
1783
1784 // Return a value representing the cpu time per iteration in the unit
1785 // specified by 'time_unit'.
1786 // NOTE: If 'iterations' is zero the returned value represents the
1787 // accumulated time.
1788 double GetAdjustedCPUTime() const;
1789
1790 // This is set to 0.0 if memory tracing is not enabled.
1791 double max_heapbytes_used;
1792
1793 // Keep track of arguments to compute asymptotic complexity
1794 BigO complexity;
1795 BigOFunc* complexity_lambda;
1796 int64_t complexity_n;
1797
1798 // what statistics to compute from the measurements
1799 const std::vector<internal::Statistics>* statistics;
1800
1801 // Inform print function whether the current run is a complexity report
1802 bool report_big_o;
1803 bool report_rms;
1804
1805 UserCounters counters;
1806
1807 // Memory metrics.
1808 const MemoryManager::Result* memory_result;
1809 double allocs_per_iter;
1810 };
1811
1812 struct PerFamilyRunReports {
PerFamilyRunReportsPerFamilyRunReports1813 PerFamilyRunReports() : num_runs_total(0), num_runs_done(0) {}
1814
1815 // How many runs will all instances of this benchmark perform?
1816 int num_runs_total;
1817
1818 // How many runs have happened already?
1819 int num_runs_done;
1820
1821 // The reports about (non-errneous!) runs of this family.
1822 std::vector<BenchmarkReporter::Run> Runs;
1823 };
1824
1825 // Construct a BenchmarkReporter with the output stream set to 'std::cout'
1826 // and the error stream set to 'std::cerr'
1827 BenchmarkReporter();
1828
1829 // Called once for every suite of benchmarks run.
1830 // The parameter "context" contains information that the
1831 // reporter may wish to use when generating its report, for example the
1832 // platform under which the benchmarks are running. The benchmark run is
1833 // never started if this function returns false, allowing the reporter
1834 // to skip runs based on the context information.
1835 virtual bool ReportContext(const Context& context) = 0;
1836
1837 // Called once for each group of benchmark runs, gives information about
1838 // the configurations of the runs.
ReportRunsConfig(double,bool,IterationCount)1839 virtual void ReportRunsConfig(double /*min_time*/,
1840 bool /*has_explicit_iters*/,
1841 IterationCount /*iters*/) {}
1842
1843 // Called once for each group of benchmark runs, gives information about
1844 // cpu-time and heap memory usage during the benchmark run. If the group
1845 // of runs contained more than two entries then 'report' contains additional
1846 // elements representing the mean and standard deviation of those runs.
1847 // Additionally if this group of runs was the last in a family of benchmarks
1848 // 'reports' contains additional entries representing the asymptotic
1849 // complexity and RMS of that benchmark family.
1850 virtual void ReportRuns(const std::vector<Run>& report) = 0;
1851
1852 // Called once and only once after ever group of benchmarks is run and
1853 // reported.
Finalize()1854 virtual void Finalize() {}
1855
1856 // REQUIRES: The object referenced by 'out' is valid for the lifetime
1857 // of the reporter.
SetOutputStream(std::ostream * out)1858 void SetOutputStream(std::ostream* out) {
1859 assert(out);
1860 output_stream_ = out;
1861 }
1862
1863 // REQUIRES: The object referenced by 'err' is valid for the lifetime
1864 // of the reporter.
SetErrorStream(std::ostream * err)1865 void SetErrorStream(std::ostream* err) {
1866 assert(err);
1867 error_stream_ = err;
1868 }
1869
GetOutputStream()1870 std::ostream& GetOutputStream() const { return *output_stream_; }
1871
GetErrorStream()1872 std::ostream& GetErrorStream() const { return *error_stream_; }
1873
1874 virtual ~BenchmarkReporter();
1875
1876 // Write a human readable string to 'out' representing the specified
1877 // 'context'.
1878 // REQUIRES: 'out' is non-null.
1879 static void PrintBasicContext(std::ostream* out, Context const& context);
1880
1881 private:
1882 std::ostream* output_stream_;
1883 std::ostream* error_stream_;
1884 };
1885
1886 // Simple reporter that outputs benchmark data to the console. This is the
1887 // default reporter used by RunSpecifiedBenchmarks().
1888 class BENCHMARK_EXPORT ConsoleReporter : public BenchmarkReporter {
1889 public:
1890 enum OutputOptions {
1891 OO_None = 0,
1892 OO_Color = 1,
1893 OO_Tabular = 2,
1894 OO_ColorTabular = OO_Color | OO_Tabular,
1895 OO_Defaults = OO_ColorTabular
1896 };
1897 explicit ConsoleReporter(OutputOptions opts_ = OO_Defaults)
output_options_(opts_)1898 : output_options_(opts_), name_field_width_(0), printed_header_(false) {}
1899
1900 bool ReportContext(const Context& context) BENCHMARK_OVERRIDE;
1901 void ReportRuns(const std::vector<Run>& reports) BENCHMARK_OVERRIDE;
1902
1903 protected:
1904 virtual void PrintRunData(const Run& report);
1905 virtual void PrintHeader(const Run& report);
1906
1907 OutputOptions output_options_;
1908 size_t name_field_width_;
1909 UserCounters prev_counters_;
1910 bool printed_header_;
1911 };
1912
1913 class BENCHMARK_EXPORT JSONReporter : public BenchmarkReporter {
1914 public:
JSONReporter()1915 JSONReporter() : first_report_(true) {}
1916 bool ReportContext(const Context& context) BENCHMARK_OVERRIDE;
1917 void ReportRuns(const std::vector<Run>& reports) BENCHMARK_OVERRIDE;
1918 void Finalize() BENCHMARK_OVERRIDE;
1919
1920 private:
1921 void PrintRunData(const Run& report);
1922
1923 bool first_report_;
1924 };
1925
1926 class BENCHMARK_EXPORT BENCHMARK_DEPRECATED_MSG(
1927 "The CSV Reporter will be removed in a future release") CSVReporter
1928 : public BenchmarkReporter {
1929 public:
CSVReporter()1930 CSVReporter() : printed_header_(false) {}
1931 bool ReportContext(const Context& context) BENCHMARK_OVERRIDE;
1932 void ReportRuns(const std::vector<Run>& reports) BENCHMARK_OVERRIDE;
1933
1934 private:
1935 void PrintRunData(const Run& report);
1936
1937 bool printed_header_;
1938 std::set<std::string> user_counter_names_;
1939 };
1940
GetTimeUnitString(TimeUnit unit)1941 inline const char* GetTimeUnitString(TimeUnit unit) {
1942 switch (unit) {
1943 case kSecond:
1944 return "s";
1945 case kMillisecond:
1946 return "ms";
1947 case kMicrosecond:
1948 return "us";
1949 case kNanosecond:
1950 return "ns";
1951 }
1952 BENCHMARK_UNREACHABLE();
1953 }
1954
GetTimeUnitMultiplier(TimeUnit unit)1955 inline double GetTimeUnitMultiplier(TimeUnit unit) {
1956 switch (unit) {
1957 case kSecond:
1958 return 1;
1959 case kMillisecond:
1960 return 1e3;
1961 case kMicrosecond:
1962 return 1e6;
1963 case kNanosecond:
1964 return 1e9;
1965 }
1966 BENCHMARK_UNREACHABLE();
1967 }
1968
1969 // Creates a list of integer values for the given range and multiplier.
1970 // This can be used together with ArgsProduct() to allow multiple ranges
1971 // with different multipliers.
1972 // Example:
1973 // ArgsProduct({
1974 // CreateRange(0, 1024, /*multi=*/32),
1975 // CreateRange(0, 100, /*multi=*/4),
1976 // CreateDenseRange(0, 4, /*step=*/1),
1977 // });
1978 BENCHMARK_EXPORT
1979 std::vector<int64_t> CreateRange(int64_t lo, int64_t hi, int multi);
1980
1981 // Creates a list of integer values for the given range and step.
1982 BENCHMARK_EXPORT
1983 std::vector<int64_t> CreateDenseRange(int64_t start, int64_t limit, int step);
1984
1985 } // namespace benchmark
1986
1987 #if defined(_MSC_VER)
1988 #pragma warning(pop)
1989 #endif
1990
1991 #endif // BENCHMARK_BENCHMARK_H_
1992