1 // Copyright 2017 Google Inc. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #ifndef ABSL_RANDOM_INTERNAL_NANOBENCHMARK_H_
16 #define ABSL_RANDOM_INTERNAL_NANOBENCHMARK_H_
17
18 // Benchmarks functions of a single integer argument with realistic branch
19 // prediction hit rates. Uses a robust estimator to summarize the measurements.
20 // The precision is about 0.2%.
21 //
22 // Examples: see nanobenchmark_test.cc.
23 //
24 // Background: Microbenchmarks such as http://github.com/google/benchmark
25 // can measure elapsed times on the order of a microsecond. Shorter functions
26 // are typically measured by repeating them thousands of times and dividing
27 // the total elapsed time by this count. Unfortunately, repetition (especially
28 // with the same input parameter!) influences the runtime. In time-critical
29 // code, it is reasonable to expect warm instruction/data caches and TLBs,
30 // but a perfect record of which branches will be taken is unrealistic.
31 // Unless the application also repeatedly invokes the measured function with
32 // the same parameter, the benchmark is measuring something very different -
33 // a best-case result, almost as if the parameter were made a compile-time
34 // constant. This may lead to erroneous conclusions about branch-heavy
35 // algorithms outperforming branch-free alternatives.
36 //
37 // Our approach differs in three ways. Adding fences to the timer functions
38 // reduces variability due to instruction reordering, improving the timer
39 // resolution to about 40 CPU cycles. However, shorter functions must still
40 // be invoked repeatedly. For more realistic branch prediction performance,
41 // we vary the input parameter according to a user-specified distribution.
42 // Thus, instead of VaryInputs(Measure(Repeat(func))), we change the
43 // loop nesting to Measure(Repeat(VaryInputs(func))). We also estimate the
44 // central tendency of the measurement samples with the "half sample mode",
45 // which is more robust to outliers and skewed data than the mean or median.
46
47 // NOTE: for compatibility with multiple translation units compiled with
48 // distinct flags, avoid #including headers that define functions.
49
50 #include <stddef.h>
51 #include <stdint.h>
52
53 #include "absl/base/config.h"
54
55 namespace absl {
56 ABSL_NAMESPACE_BEGIN
57 namespace random_internal_nanobenchmark {
58
59 // Input influencing the function being measured (e.g. number of bytes to copy).
60 using FuncInput = size_t;
61
62 // "Proof of work" returned by Func to ensure the compiler does not elide it.
63 using FuncOutput = uint64_t;
64
65 // Function to measure: either 1) a captureless lambda or function with two
66 // arguments or 2) a lambda with capture, in which case the first argument
67 // is reserved for use by MeasureClosure.
68 using Func = FuncOutput (*)(const void*, FuncInput);
69
70 // Internal parameters that determine precision/resolution/measuring time.
71 struct Params {
72 // For measuring timer overhead/resolution. Used in a nested loop =>
73 // quadratic time, acceptable because we know timer overhead is "low".
74 // constexpr because this is used to define array bounds.
75 static constexpr size_t kTimerSamples = 256;
76
77 // Best-case precision, expressed as a divisor of the timer resolution.
78 // Larger => more calls to Func and higher precision.
79 size_t precision_divisor = 1024;
80
81 // Ratio between full and subset input distribution sizes. Cannot be less
82 // than 2; larger values increase measurement time but more faithfully
83 // model the given input distribution.
84 size_t subset_ratio = 2;
85
86 // Together with the estimated Func duration, determines how many times to
87 // call Func before checking the sample variability. Larger values increase
88 // measurement time, memory/cache use and precision.
89 double seconds_per_eval = 4E-3;
90
91 // The minimum number of samples before estimating the central tendency.
92 size_t min_samples_per_eval = 7;
93
94 // The mode is better than median for estimating the central tendency of
95 // skewed/fat-tailed distributions, but it requires sufficient samples
96 // relative to the width of half-ranges.
97 size_t min_mode_samples = 64;
98
99 // Maximum permissible variability (= median absolute deviation / center).
100 double target_rel_mad = 0.002;
101
102 // Abort after this many evals without reaching target_rel_mad. This
103 // prevents infinite loops.
104 size_t max_evals = 9;
105
106 // Retry the measure loop up to this many times.
107 size_t max_measure_retries = 2;
108
109 // Whether to print additional statistics to stdout.
110 bool verbose = true;
111 };
112
113 // Measurement result for each unique input.
114 struct Result {
115 FuncInput input;
116
117 // Robust estimate (mode or median) of duration.
118 float ticks;
119
120 // Measure of variability (median absolute deviation relative to "ticks").
121 float variability;
122 };
123
124 // Ensures the thread is running on the specified cpu, and no others.
125 // Reduces noise due to desynchronized socket RDTSC and context switches.
126 // If "cpu" is negative, pin to the currently running core.
127 void PinThreadToCPU(const int cpu = -1);
128
129 // Returns tick rate, useful for converting measurements to seconds. Invariant
130 // means the tick counter frequency is independent of CPU throttling or sleep.
131 // This call may be expensive, callers should cache the result.
132 double InvariantTicksPerSecond();
133
134 // Precisely measures the number of ticks elapsed when calling "func" with the
135 // given inputs, shuffled to ensure realistic branch prediction hit rates.
136 //
137 // "func" returns a 'proof of work' to ensure its computations are not elided.
138 // "arg" is passed to Func, or reserved for internal use by MeasureClosure.
139 // "inputs" is an array of "num_inputs" (not necessarily unique) arguments to
140 // "func". The values should be chosen to maximize coverage of "func". This
141 // represents a distribution, so a value's frequency should reflect its
142 // probability in the real application. Order does not matter; for example, a
143 // uniform distribution over [0, 4) could be represented as {3,0,2,1}.
144 // Returns how many Result were written to "results": one per unique input, or
145 // zero if the measurement failed (an error message goes to stderr).
146 size_t Measure(const Func func, const void* arg, const FuncInput* inputs,
147 const size_t num_inputs, Result* results,
148 const Params& p = Params());
149
150 // Calls operator() of the given closure (lambda function).
151 template <class Closure>
CallClosure(const void * f,const FuncInput input)152 static FuncOutput CallClosure(const void* f, const FuncInput input) {
153 return (*reinterpret_cast<const Closure*>(f))(input);
154 }
155
156 // Same as Measure, except "closure" is typically a lambda function of
157 // FuncInput -> FuncOutput with a capture list.
158 template <class Closure>
159 static inline size_t MeasureClosure(const Closure& closure,
160 const FuncInput* inputs,
161 const size_t num_inputs, Result* results,
162 const Params& p = Params()) {
163 return Measure(reinterpret_cast<Func>(&CallClosure<Closure>),
164 reinterpret_cast<const void*>(&closure), inputs, num_inputs,
165 results, p);
166 }
167
168 } // namespace random_internal_nanobenchmark
169 ABSL_NAMESPACE_END
170 } // namespace absl
171
172 #endif // ABSL_RANDOM_INTERNAL_NANOBENCHMARK_H_
173