1 2 #include "benchmark/benchmark.h" 3 4 #define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192) 5 BM_empty(benchmark::State & state)6void BM_empty(benchmark::State& state) { 7 for (auto _ : state) { 8 benchmark::DoNotOptimize(state.iterations()); 9 } 10 } 11 BENCHMARK(BM_empty); 12 BENCHMARK(BM_empty)->ThreadPerCpu(); 13 BM_spin_empty(benchmark::State & state)14void BM_spin_empty(benchmark::State& state) { 15 for (auto _ : state) { 16 for (auto x = 0; x < state.range(0); ++x) { 17 benchmark::DoNotOptimize(x); 18 } 19 } 20 } 21 BASIC_BENCHMARK_TEST(BM_spin_empty); 22 BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu(); 23 BM_spin_pause_before(benchmark::State & state)24void BM_spin_pause_before(benchmark::State& state) { 25 for (auto i = 0; i < state.range(0); ++i) { 26 benchmark::DoNotOptimize(i); 27 } 28 for (auto _ : state) { 29 for (auto i = 0; i < state.range(0); ++i) { 30 benchmark::DoNotOptimize(i); 31 } 32 } 33 } 34 BASIC_BENCHMARK_TEST(BM_spin_pause_before); 35 BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu(); 36 BM_spin_pause_during(benchmark::State & state)37void BM_spin_pause_during(benchmark::State& state) { 38 for (auto _ : state) { 39 state.PauseTiming(); 40 for (auto i = 0; i < state.range(0); ++i) { 41 benchmark::DoNotOptimize(i); 42 } 43 state.ResumeTiming(); 44 for (auto i = 0; i < state.range(0); ++i) { 45 benchmark::DoNotOptimize(i); 46 } 47 } 48 } 49 BASIC_BENCHMARK_TEST(BM_spin_pause_during); 50 BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu(); 51 BM_pause_during(benchmark::State & state)52void BM_pause_during(benchmark::State& state) { 53 for (auto _ : state) { 54 state.PauseTiming(); 55 state.ResumeTiming(); 56 } 57 } 58 BENCHMARK(BM_pause_during); 59 BENCHMARK(BM_pause_during)->ThreadPerCpu(); 60 BENCHMARK(BM_pause_during)->UseRealTime(); 61 BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu(); 62 BM_spin_pause_after(benchmark::State & state)63void BM_spin_pause_after(benchmark::State& state) { 64 for (auto _ : state) { 65 for (auto i = 0; i < state.range(0); ++i) { 66 benchmark::DoNotOptimize(i); 67 } 68 } 69 for (auto i = 0; i < state.range(0); ++i) { 70 benchmark::DoNotOptimize(i); 71 } 72 } 73 BASIC_BENCHMARK_TEST(BM_spin_pause_after); 74 BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu(); 75 BM_spin_pause_before_and_after(benchmark::State & state)76void BM_spin_pause_before_and_after(benchmark::State& state) { 77 for (auto i = 0; i < state.range(0); ++i) { 78 benchmark::DoNotOptimize(i); 79 } 80 for (auto _ : state) { 81 for (auto i = 0; i < state.range(0); ++i) { 82 benchmark::DoNotOptimize(i); 83 } 84 } 85 for (auto i = 0; i < state.range(0); ++i) { 86 benchmark::DoNotOptimize(i); 87 } 88 } 89 BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after); 90 BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu(); 91 BM_empty_stop_start(benchmark::State & state)92void BM_empty_stop_start(benchmark::State& state) { 93 for (auto _ : state) { 94 } 95 } 96 BENCHMARK(BM_empty_stop_start); 97 BENCHMARK(BM_empty_stop_start)->ThreadPerCpu(); 98 BM_KeepRunning(benchmark::State & state)99void BM_KeepRunning(benchmark::State& state) { 100 benchmark::IterationCount iter_count = 0; 101 assert(iter_count == state.iterations()); 102 while (state.KeepRunning()) { 103 ++iter_count; 104 } 105 assert(iter_count == state.iterations()); 106 } 107 BENCHMARK(BM_KeepRunning); 108 BM_KeepRunningBatch(benchmark::State & state)109void BM_KeepRunningBatch(benchmark::State& state) { 110 // Choose a batch size >1000 to skip the typical runs with iteration 111 // targets of 10, 100 and 1000. If these are not actually skipped the 112 // bug would be detectable as consecutive runs with the same iteration 113 // count. Below we assert that this does not happen. 114 const benchmark::IterationCount batch_size = 1009; 115 116 static benchmark::IterationCount prior_iter_count = 0; 117 benchmark::IterationCount iter_count = 0; 118 while (state.KeepRunningBatch(batch_size)) { 119 iter_count += batch_size; 120 } 121 assert(state.iterations() == iter_count); 122 123 // Verify that the iteration count always increases across runs (see 124 // comment above). 125 assert(iter_count == batch_size // max_iterations == 1 126 || iter_count > prior_iter_count); // max_iterations > batch_size 127 prior_iter_count = iter_count; 128 } 129 // Register with a fixed repetition count to establish the invariant that 130 // the iteration count should always change across runs. This overrides 131 // the --benchmark_repetitions command line flag, which would otherwise 132 // cause this test to fail if set > 1. 133 BENCHMARK(BM_KeepRunningBatch)->Repetitions(1); 134 BM_RangedFor(benchmark::State & state)135void BM_RangedFor(benchmark::State& state) { 136 benchmark::IterationCount iter_count = 0; 137 for (auto _ : state) { 138 ++iter_count; 139 } 140 assert(iter_count == state.max_iterations); 141 } 142 BENCHMARK(BM_RangedFor); 143 144 #ifdef BENCHMARK_HAS_CXX11 145 template <typename T> BM_OneTemplateFunc(benchmark::State & state)146void BM_OneTemplateFunc(benchmark::State& state) { 147 auto arg = state.range(0); 148 T sum = 0; 149 for (auto _ : state) { 150 sum += arg; 151 } 152 } 153 BENCHMARK(BM_OneTemplateFunc<int>)->Arg(1); 154 BENCHMARK(BM_OneTemplateFunc<double>)->Arg(1); 155 156 template <typename A, typename B> BM_TwoTemplateFunc(benchmark::State & state)157void BM_TwoTemplateFunc(benchmark::State& state) { 158 auto arg = state.range(0); 159 A sum = 0; 160 B prod = 1; 161 for (auto _ : state) { 162 sum += arg; 163 prod *= arg; 164 } 165 } 166 BENCHMARK(BM_TwoTemplateFunc<int, double>)->Arg(1); 167 BENCHMARK(BM_TwoTemplateFunc<double, int>)->Arg(1); 168 169 #endif // BENCHMARK_HAS_CXX11 170 171 // Ensure that StateIterator provides all the necessary typedefs required to 172 // instantiate std::iterator_traits. 173 static_assert( 174 std::is_same<typename std::iterator_traits< 175 benchmark::State::StateIterator>::value_type, 176 typename benchmark::State::StateIterator::value_type>::value, 177 ""); 178 179 BENCHMARK_MAIN(); 180