1 // Copyright 2019 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include <array>
16 #include <string>
17 #include <vector>
18
19 #include "benchmark/benchmark.h"
20 #include "absl/base/internal/raw_logging.h"
21 #include "absl/base/macros.h"
22 #include "absl/container/inlined_vector.h"
23 #include "absl/strings/str_cat.h"
24
25 namespace {
26
BM_InlinedVectorFill(benchmark::State & state)27 void BM_InlinedVectorFill(benchmark::State& state) {
28 const int len = state.range(0);
29 absl::InlinedVector<int, 8> v;
30 v.reserve(len);
31 for (auto _ : state) {
32 v.resize(0); // Use resize(0) as InlinedVector releases storage on clear().
33 for (int i = 0; i < len; ++i) {
34 v.push_back(i);
35 }
36 benchmark::DoNotOptimize(v);
37 }
38 }
39 BENCHMARK(BM_InlinedVectorFill)->Range(1, 256);
40
BM_InlinedVectorFillRange(benchmark::State & state)41 void BM_InlinedVectorFillRange(benchmark::State& state) {
42 const int len = state.range(0);
43 const std::vector<int> src(len, len);
44 absl::InlinedVector<int, 8> v;
45 v.reserve(len);
46 for (auto _ : state) {
47 benchmark::DoNotOptimize(src);
48 v.assign(src.begin(), src.end());
49 benchmark::DoNotOptimize(v);
50 }
51 }
52 BENCHMARK(BM_InlinedVectorFillRange)->Range(1, 256);
53
BM_StdVectorFill(benchmark::State & state)54 void BM_StdVectorFill(benchmark::State& state) {
55 const int len = state.range(0);
56 std::vector<int> v;
57 v.reserve(len);
58 for (auto _ : state) {
59 v.clear();
60 for (int i = 0; i < len; ++i) {
61 v.push_back(i);
62 }
63 benchmark::DoNotOptimize(v);
64 }
65 }
66 BENCHMARK(BM_StdVectorFill)->Range(1, 256);
67
68 // The purpose of the next two benchmarks is to verify that
69 // absl::InlinedVector is efficient when moving is more efficent than
70 // copying. To do so, we use strings that are larger than the short
71 // string optimization.
StringRepresentedInline(std::string s)72 bool StringRepresentedInline(std::string s) {
73 const char* chars = s.data();
74 std::string s1 = std::move(s);
75 return s1.data() != chars;
76 }
77
GetNonShortStringOptimizationSize()78 int GetNonShortStringOptimizationSize() {
79 for (int i = 24; i <= 192; i *= 2) {
80 if (!StringRepresentedInline(std::string(i, 'A'))) {
81 return i;
82 }
83 }
84 ABSL_RAW_LOG(
85 FATAL,
86 "Failed to find a std::string larger than the short std::string optimization");
87 return -1;
88 }
89
BM_InlinedVectorFillString(benchmark::State & state)90 void BM_InlinedVectorFillString(benchmark::State& state) {
91 const int len = state.range(0);
92 const int no_sso = GetNonShortStringOptimizationSize();
93 std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'),
94 std::string(no_sso, 'C'), std::string(no_sso, 'D')};
95
96 for (auto _ : state) {
97 absl::InlinedVector<std::string, 8> v;
98 for (int i = 0; i < len; i++) {
99 v.push_back(strings[i & 3]);
100 }
101 }
102 state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
103 }
104 BENCHMARK(BM_InlinedVectorFillString)->Range(0, 1024);
105
BM_StdVectorFillString(benchmark::State & state)106 void BM_StdVectorFillString(benchmark::State& state) {
107 const int len = state.range(0);
108 const int no_sso = GetNonShortStringOptimizationSize();
109 std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'),
110 std::string(no_sso, 'C'), std::string(no_sso, 'D')};
111
112 for (auto _ : state) {
113 std::vector<std::string> v;
114 for (int i = 0; i < len; i++) {
115 v.push_back(strings[i & 3]);
116 }
117 }
118 state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
119 }
120 BENCHMARK(BM_StdVectorFillString)->Range(0, 1024);
121
122 struct Buffer { // some arbitrary structure for benchmarking.
123 char* base;
124 int length;
125 int capacity;
126 void* user_data;
127 };
128
BM_InlinedVectorAssignments(benchmark::State & state)129 void BM_InlinedVectorAssignments(benchmark::State& state) {
130 const int len = state.range(0);
131 using BufferVec = absl::InlinedVector<Buffer, 2>;
132
133 BufferVec src;
134 src.resize(len);
135
136 BufferVec dst;
137 for (auto _ : state) {
138 benchmark::DoNotOptimize(dst);
139 benchmark::DoNotOptimize(src);
140 dst = src;
141 }
142 }
143 BENCHMARK(BM_InlinedVectorAssignments)
144 ->Arg(0)
145 ->Arg(1)
146 ->Arg(2)
147 ->Arg(3)
148 ->Arg(4)
149 ->Arg(20);
150
BM_CreateFromContainer(benchmark::State & state)151 void BM_CreateFromContainer(benchmark::State& state) {
152 for (auto _ : state) {
153 absl::InlinedVector<int, 4> src{1, 2, 3};
154 benchmark::DoNotOptimize(src);
155 absl::InlinedVector<int, 4> dst(std::move(src));
156 benchmark::DoNotOptimize(dst);
157 }
158 }
159 BENCHMARK(BM_CreateFromContainer);
160
161 struct LargeCopyableOnly {
LargeCopyableOnly__anond9fba72c0111::LargeCopyableOnly162 LargeCopyableOnly() : d(1024, 17) {}
163 LargeCopyableOnly(const LargeCopyableOnly& o) = default;
164 LargeCopyableOnly& operator=(const LargeCopyableOnly& o) = default;
165
166 std::vector<int> d;
167 };
168
169 struct LargeCopyableSwappable {
LargeCopyableSwappable__anond9fba72c0111::LargeCopyableSwappable170 LargeCopyableSwappable() : d(1024, 17) {}
171
172 LargeCopyableSwappable(const LargeCopyableSwappable& o) = default;
173
operator =__anond9fba72c0111::LargeCopyableSwappable174 LargeCopyableSwappable& operator=(LargeCopyableSwappable o) {
175 using std::swap;
176 swap(*this, o);
177 return *this;
178 }
179
swap(LargeCopyableSwappable & a,LargeCopyableSwappable & b)180 friend void swap(LargeCopyableSwappable& a, LargeCopyableSwappable& b) {
181 using std::swap;
182 swap(a.d, b.d);
183 }
184
185 std::vector<int> d;
186 };
187
188 struct LargeCopyableMovable {
LargeCopyableMovable__anond9fba72c0111::LargeCopyableMovable189 LargeCopyableMovable() : d(1024, 17) {}
190 // Use implicitly defined copy and move.
191
192 std::vector<int> d;
193 };
194
195 struct LargeCopyableMovableSwappable {
LargeCopyableMovableSwappable__anond9fba72c0111::LargeCopyableMovableSwappable196 LargeCopyableMovableSwappable() : d(1024, 17) {}
197 LargeCopyableMovableSwappable(const LargeCopyableMovableSwappable& o) =
198 default;
199 LargeCopyableMovableSwappable(LargeCopyableMovableSwappable&& o) = default;
200
operator =__anond9fba72c0111::LargeCopyableMovableSwappable201 LargeCopyableMovableSwappable& operator=(LargeCopyableMovableSwappable o) {
202 using std::swap;
203 swap(*this, o);
204 return *this;
205 }
206 LargeCopyableMovableSwappable& operator=(LargeCopyableMovableSwappable&& o) =
207 default;
208
swap(LargeCopyableMovableSwappable & a,LargeCopyableMovableSwappable & b)209 friend void swap(LargeCopyableMovableSwappable& a,
210 LargeCopyableMovableSwappable& b) {
211 using std::swap;
212 swap(a.d, b.d);
213 }
214
215 std::vector<int> d;
216 };
217
218 template <typename ElementType>
BM_SwapElements(benchmark::State & state)219 void BM_SwapElements(benchmark::State& state) {
220 const int len = state.range(0);
221 using Vec = absl::InlinedVector<ElementType, 32>;
222 Vec a(len);
223 Vec b;
224 for (auto _ : state) {
225 using std::swap;
226 benchmark::DoNotOptimize(a);
227 benchmark::DoNotOptimize(b);
228 swap(a, b);
229 }
230 }
231 BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableOnly)->Range(0, 1024);
232 BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableSwappable)->Range(0, 1024);
233 BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableMovable)->Range(0, 1024);
234 BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableMovableSwappable)
235 ->Range(0, 1024);
236
237 // The following benchmark is meant to track the efficiency of the vector size
238 // as a function of stored type via the benchmark label. It is not meant to
239 // output useful sizeof operator performance. The loop is a dummy operation
240 // to fulfill the requirement of running the benchmark.
241 template <typename VecType>
BM_Sizeof(benchmark::State & state)242 void BM_Sizeof(benchmark::State& state) {
243 int size = 0;
244 for (auto _ : state) {
245 VecType vec;
246 size = sizeof(vec);
247 }
248 state.SetLabel(absl::StrCat("sz=", size));
249 }
250 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 1>);
251 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 4>);
252 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 7>);
253 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 8>);
254
255 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 1>);
256 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 4>);
257 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 7>);
258 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 8>);
259
260 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 1>);
261 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 4>);
262 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 7>);
263 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 8>);
264
265 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 1>);
266 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 4>);
267 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 7>);
268 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 8>);
269
BM_InlinedVectorIndexInlined(benchmark::State & state)270 void BM_InlinedVectorIndexInlined(benchmark::State& state) {
271 absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
272 for (auto _ : state) {
273 benchmark::DoNotOptimize(v);
274 benchmark::DoNotOptimize(v[4]);
275 }
276 }
277 BENCHMARK(BM_InlinedVectorIndexInlined);
278
BM_InlinedVectorIndexExternal(benchmark::State & state)279 void BM_InlinedVectorIndexExternal(benchmark::State& state) {
280 absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
281 for (auto _ : state) {
282 benchmark::DoNotOptimize(v);
283 benchmark::DoNotOptimize(v[4]);
284 }
285 }
286 BENCHMARK(BM_InlinedVectorIndexExternal);
287
BM_StdVectorIndex(benchmark::State & state)288 void BM_StdVectorIndex(benchmark::State& state) {
289 std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
290 for (auto _ : state) {
291 benchmark::DoNotOptimize(v);
292 benchmark::DoNotOptimize(v[4]);
293 }
294 }
295 BENCHMARK(BM_StdVectorIndex);
296
BM_InlinedVectorDataInlined(benchmark::State & state)297 void BM_InlinedVectorDataInlined(benchmark::State& state) {
298 absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
299 for (auto _ : state) {
300 benchmark::DoNotOptimize(v);
301 benchmark::DoNotOptimize(v.data());
302 }
303 }
304 BENCHMARK(BM_InlinedVectorDataInlined);
305
BM_InlinedVectorDataExternal(benchmark::State & state)306 void BM_InlinedVectorDataExternal(benchmark::State& state) {
307 absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
308 for (auto _ : state) {
309 benchmark::DoNotOptimize(v);
310 benchmark::DoNotOptimize(v.data());
311 }
312 state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
313 }
314 BENCHMARK(BM_InlinedVectorDataExternal);
315
BM_StdVectorData(benchmark::State & state)316 void BM_StdVectorData(benchmark::State& state) {
317 std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
318 for (auto _ : state) {
319 benchmark::DoNotOptimize(v);
320 benchmark::DoNotOptimize(v.data());
321 }
322 state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
323 }
324 BENCHMARK(BM_StdVectorData);
325
BM_InlinedVectorSizeInlined(benchmark::State & state)326 void BM_InlinedVectorSizeInlined(benchmark::State& state) {
327 absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
328 for (auto _ : state) {
329 benchmark::DoNotOptimize(v);
330 benchmark::DoNotOptimize(v.size());
331 }
332 }
333 BENCHMARK(BM_InlinedVectorSizeInlined);
334
BM_InlinedVectorSizeExternal(benchmark::State & state)335 void BM_InlinedVectorSizeExternal(benchmark::State& state) {
336 absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
337 for (auto _ : state) {
338 benchmark::DoNotOptimize(v);
339 benchmark::DoNotOptimize(v.size());
340 }
341 }
342 BENCHMARK(BM_InlinedVectorSizeExternal);
343
BM_StdVectorSize(benchmark::State & state)344 void BM_StdVectorSize(benchmark::State& state) {
345 std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
346 for (auto _ : state) {
347 benchmark::DoNotOptimize(v);
348 benchmark::DoNotOptimize(v.size());
349 }
350 }
351 BENCHMARK(BM_StdVectorSize);
352
BM_InlinedVectorEmptyInlined(benchmark::State & state)353 void BM_InlinedVectorEmptyInlined(benchmark::State& state) {
354 absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
355 for (auto _ : state) {
356 benchmark::DoNotOptimize(v);
357 benchmark::DoNotOptimize(v.empty());
358 }
359 }
360 BENCHMARK(BM_InlinedVectorEmptyInlined);
361
BM_InlinedVectorEmptyExternal(benchmark::State & state)362 void BM_InlinedVectorEmptyExternal(benchmark::State& state) {
363 absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
364 for (auto _ : state) {
365 benchmark::DoNotOptimize(v);
366 benchmark::DoNotOptimize(v.empty());
367 }
368 }
369 BENCHMARK(BM_InlinedVectorEmptyExternal);
370
BM_StdVectorEmpty(benchmark::State & state)371 void BM_StdVectorEmpty(benchmark::State& state) {
372 std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
373 for (auto _ : state) {
374 benchmark::DoNotOptimize(v);
375 benchmark::DoNotOptimize(v.empty());
376 }
377 }
378 BENCHMARK(BM_StdVectorEmpty);
379
380 constexpr size_t kInlinedCapacity = 4;
381 constexpr size_t kLargeSize = kInlinedCapacity * 2;
382 constexpr size_t kSmallSize = kInlinedCapacity / 2;
383 constexpr size_t kBatchSize = 100;
384
385 #define ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_FunctionTemplate, T) \
386 BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize); \
387 BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize)
388
389 #define ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_FunctionTemplate, T) \
390 BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kLargeSize); \
391 BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kSmallSize); \
392 BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kLargeSize); \
393 BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kSmallSize)
394
395 template <typename T>
396 using InlVec = absl::InlinedVector<T, kInlinedCapacity>;
397
398 struct TrivialType {
399 size_t val;
400 };
401
402 class NontrivialType {
403 public:
NontrivialType()404 ABSL_ATTRIBUTE_NOINLINE NontrivialType() : val_() {
405 benchmark::DoNotOptimize(*this);
406 }
407
NontrivialType(const NontrivialType & other)408 ABSL_ATTRIBUTE_NOINLINE NontrivialType(const NontrivialType& other)
409 : val_(other.val_) {
410 benchmark::DoNotOptimize(*this);
411 }
412
operator =(const NontrivialType & other)413 ABSL_ATTRIBUTE_NOINLINE NontrivialType& operator=(
414 const NontrivialType& other) {
415 val_ = other.val_;
416 benchmark::DoNotOptimize(*this);
417 return *this;
418 }
419
~NontrivialType()420 ABSL_ATTRIBUTE_NOINLINE ~NontrivialType() noexcept {
421 benchmark::DoNotOptimize(*this);
422 }
423
424 private:
425 size_t val_;
426 };
427
428 template <typename T, typename PrepareVecFn, typename TestVecFn>
BatchedBenchmark(benchmark::State & state,PrepareVecFn prepare_vec,TestVecFn test_vec)429 void BatchedBenchmark(benchmark::State& state, PrepareVecFn prepare_vec,
430 TestVecFn test_vec) {
431 std::array<InlVec<T>, kBatchSize> vector_batch{};
432
433 while (state.KeepRunningBatch(kBatchSize)) {
434 // Prepare batch
435 state.PauseTiming();
436 for (size_t i = 0; i < kBatchSize; ++i) {
437 prepare_vec(vector_batch.data() + i, i);
438 }
439 benchmark::DoNotOptimize(vector_batch);
440 state.ResumeTiming();
441
442 // Test batch
443 for (size_t i = 0; i < kBatchSize; ++i) {
444 test_vec(vector_batch.data() + i, i);
445 }
446 }
447 }
448
449 template <typename T, size_t ToSize>
BM_ConstructFromSize(benchmark::State & state)450 void BM_ConstructFromSize(benchmark::State& state) {
451 using VecT = InlVec<T>;
452 auto size = ToSize;
453 BatchedBenchmark<T>(
454 state,
455 /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
456 /* test_vec = */
457 [&](void* ptr, size_t) {
458 benchmark::DoNotOptimize(size);
459 ::new (ptr) VecT(size);
460 });
461 }
462 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, TrivialType);
463 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, NontrivialType);
464
465 template <typename T, size_t ToSize>
BM_ConstructFromSizeRef(benchmark::State & state)466 void BM_ConstructFromSizeRef(benchmark::State& state) {
467 using VecT = InlVec<T>;
468 auto size = ToSize;
469 auto ref = T();
470 BatchedBenchmark<T>(
471 state,
472 /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
473 /* test_vec = */
474 [&](void* ptr, size_t) {
475 benchmark::DoNotOptimize(size);
476 benchmark::DoNotOptimize(ref);
477 ::new (ptr) VecT(size, ref);
478 });
479 }
480 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, TrivialType);
481 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, NontrivialType);
482
483 template <typename T, size_t ToSize>
BM_ConstructFromRange(benchmark::State & state)484 void BM_ConstructFromRange(benchmark::State& state) {
485 using VecT = InlVec<T>;
486 std::array<T, ToSize> arr{};
487 BatchedBenchmark<T>(
488 state,
489 /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
490 /* test_vec = */
491 [&](void* ptr, size_t) {
492 benchmark::DoNotOptimize(arr);
493 ::new (ptr) VecT(arr.begin(), arr.end());
494 });
495 }
496 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, TrivialType);
497 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, NontrivialType);
498
499 template <typename T, size_t ToSize>
BM_ConstructFromCopy(benchmark::State & state)500 void BM_ConstructFromCopy(benchmark::State& state) {
501 using VecT = InlVec<T>;
502 VecT other_vec(ToSize);
503 BatchedBenchmark<T>(
504 state,
505 /* prepare_vec = */
506 [](InlVec<T>* vec, size_t) { vec->~VecT(); },
507 /* test_vec = */
508 [&](void* ptr, size_t) {
509 benchmark::DoNotOptimize(other_vec);
510 ::new (ptr) VecT(other_vec);
511 });
512 }
513 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, TrivialType);
514 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, NontrivialType);
515
516 template <typename T, size_t ToSize>
BM_ConstructFromMove(benchmark::State & state)517 void BM_ConstructFromMove(benchmark::State& state) {
518 using VecT = InlVec<T>;
519 std::array<VecT, kBatchSize> vector_batch{};
520 BatchedBenchmark<T>(
521 state,
522 /* prepare_vec = */
523 [&](InlVec<T>* vec, size_t i) {
524 vector_batch[i].clear();
525 vector_batch[i].resize(ToSize);
526 vec->~VecT();
527 },
528 /* test_vec = */
529 [&](void* ptr, size_t i) {
530 benchmark::DoNotOptimize(vector_batch[i]);
531 ::new (ptr) VecT(std::move(vector_batch[i]));
532 });
533 }
534 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType);
535 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType);
536
537 template <typename T, size_t FromSize, size_t ToSize>
BM_AssignSizeRef(benchmark::State & state)538 void BM_AssignSizeRef(benchmark::State& state) {
539 auto size = ToSize;
540 auto ref = T();
541 BatchedBenchmark<T>(
542 state,
543 /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
544 /* test_vec = */
545 [&](InlVec<T>* vec, size_t) {
546 benchmark::DoNotOptimize(size);
547 benchmark::DoNotOptimize(ref);
548 vec->assign(size, ref);
549 });
550 }
551 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, TrivialType);
552 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, NontrivialType);
553
554 template <typename T, size_t FromSize, size_t ToSize>
BM_AssignRange(benchmark::State & state)555 void BM_AssignRange(benchmark::State& state) {
556 std::array<T, ToSize> arr{};
557 BatchedBenchmark<T>(
558 state,
559 /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
560 /* test_vec = */
561 [&](InlVec<T>* vec, size_t) {
562 benchmark::DoNotOptimize(arr);
563 vec->assign(arr.begin(), arr.end());
564 });
565 }
566 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, TrivialType);
567 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, NontrivialType);
568
569 template <typename T, size_t FromSize, size_t ToSize>
BM_AssignFromCopy(benchmark::State & state)570 void BM_AssignFromCopy(benchmark::State& state) {
571 InlVec<T> other_vec(ToSize);
572 BatchedBenchmark<T>(
573 state,
574 /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
575 /* test_vec = */
576 [&](InlVec<T>* vec, size_t) {
577 benchmark::DoNotOptimize(other_vec);
578 *vec = other_vec;
579 });
580 }
581 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, TrivialType);
582 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, NontrivialType);
583
584 template <typename T, size_t FromSize, size_t ToSize>
BM_AssignFromMove(benchmark::State & state)585 void BM_AssignFromMove(benchmark::State& state) {
586 using VecT = InlVec<T>;
587 std::array<VecT, kBatchSize> vector_batch{};
588 BatchedBenchmark<T>(
589 state,
590 /* prepare_vec = */
591 [&](InlVec<T>* vec, size_t i) {
592 vector_batch[i].clear();
593 vector_batch[i].resize(ToSize);
594 vec->resize(FromSize);
595 },
596 /* test_vec = */
597 [&](InlVec<T>* vec, size_t i) {
598 benchmark::DoNotOptimize(vector_batch[i]);
599 *vec = std::move(vector_batch[i]);
600 });
601 }
602 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, TrivialType);
603 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, NontrivialType);
604
605 template <typename T, size_t FromSize, size_t ToSize>
BM_ResizeSize(benchmark::State & state)606 void BM_ResizeSize(benchmark::State& state) {
607 BatchedBenchmark<T>(
608 state,
609 /* prepare_vec = */
610 [](InlVec<T>* vec, size_t) {
611 vec->clear();
612 vec->resize(FromSize);
613 },
614 /* test_vec = */
615 [](InlVec<T>* vec, size_t) { vec->resize(ToSize); });
616 }
617 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, TrivialType);
618 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, NontrivialType);
619
620 template <typename T, size_t FromSize, size_t ToSize>
BM_ResizeSizeRef(benchmark::State & state)621 void BM_ResizeSizeRef(benchmark::State& state) {
622 auto t = T();
623 BatchedBenchmark<T>(
624 state,
625 /* prepare_vec = */
626 [](InlVec<T>* vec, size_t) {
627 vec->clear();
628 vec->resize(FromSize);
629 },
630 /* test_vec = */
631 [&](InlVec<T>* vec, size_t) {
632 benchmark::DoNotOptimize(t);
633 vec->resize(ToSize, t);
634 });
635 }
636 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, TrivialType);
637 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, NontrivialType);
638
639 template <typename T, size_t FromSize, size_t ToSize>
BM_InsertSizeRef(benchmark::State & state)640 void BM_InsertSizeRef(benchmark::State& state) {
641 auto t = T();
642 BatchedBenchmark<T>(
643 state,
644 /* prepare_vec = */
645 [](InlVec<T>* vec, size_t) {
646 vec->clear();
647 vec->resize(FromSize);
648 },
649 /* test_vec = */
650 [&](InlVec<T>* vec, size_t) {
651 benchmark::DoNotOptimize(t);
652 auto* pos = vec->data() + (vec->size() / 2);
653 vec->insert(pos, t);
654 });
655 }
656 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, TrivialType);
657 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, NontrivialType);
658
659 template <typename T, size_t FromSize, size_t ToSize>
BM_InsertRange(benchmark::State & state)660 void BM_InsertRange(benchmark::State& state) {
661 InlVec<T> other_vec(ToSize);
662 BatchedBenchmark<T>(
663 state,
664 /* prepare_vec = */
665 [](InlVec<T>* vec, size_t) {
666 vec->clear();
667 vec->resize(FromSize);
668 },
669 /* test_vec = */
670 [&](InlVec<T>* vec, size_t) {
671 benchmark::DoNotOptimize(other_vec);
672 auto* pos = vec->data() + (vec->size() / 2);
673 vec->insert(pos, other_vec.begin(), other_vec.end());
674 });
675 }
676 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, TrivialType);
677 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, NontrivialType);
678
679 template <typename T, size_t FromSize>
BM_EmplaceBack(benchmark::State & state)680 void BM_EmplaceBack(benchmark::State& state) {
681 BatchedBenchmark<T>(
682 state,
683 /* prepare_vec = */
684 [](InlVec<T>* vec, size_t) {
685 vec->clear();
686 vec->resize(FromSize);
687 },
688 /* test_vec = */
689 [](InlVec<T>* vec, size_t) { vec->emplace_back(); });
690 }
691 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, TrivialType);
692 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, NontrivialType);
693
694 template <typename T, size_t FromSize>
BM_PopBack(benchmark::State & state)695 void BM_PopBack(benchmark::State& state) {
696 BatchedBenchmark<T>(
697 state,
698 /* prepare_vec = */
699 [](InlVec<T>* vec, size_t) {
700 vec->clear();
701 vec->resize(FromSize);
702 },
703 /* test_vec = */
704 [](InlVec<T>* vec, size_t) { vec->pop_back(); });
705 }
706 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, TrivialType);
707 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, NontrivialType);
708
709 template <typename T, size_t FromSize>
BM_EraseOne(benchmark::State & state)710 void BM_EraseOne(benchmark::State& state) {
711 BatchedBenchmark<T>(
712 state,
713 /* prepare_vec = */
714 [](InlVec<T>* vec, size_t) {
715 vec->clear();
716 vec->resize(FromSize);
717 },
718 /* test_vec = */
719 [](InlVec<T>* vec, size_t) {
720 auto* pos = vec->data() + (vec->size() / 2);
721 vec->erase(pos);
722 });
723 }
724 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, TrivialType);
725 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, NontrivialType);
726
727 template <typename T, size_t FromSize>
BM_EraseRange(benchmark::State & state)728 void BM_EraseRange(benchmark::State& state) {
729 BatchedBenchmark<T>(
730 state,
731 /* prepare_vec = */
732 [](InlVec<T>* vec, size_t) {
733 vec->clear();
734 vec->resize(FromSize);
735 },
736 /* test_vec = */
737 [](InlVec<T>* vec, size_t) {
738 auto* pos = vec->data() + (vec->size() / 2);
739 vec->erase(pos, pos + 1);
740 });
741 }
742 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, TrivialType);
743 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, NontrivialType);
744
745 template <typename T, size_t FromSize>
BM_Clear(benchmark::State & state)746 void BM_Clear(benchmark::State& state) {
747 BatchedBenchmark<T>(
748 state,
749 /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
750 /* test_vec = */ [](InlVec<T>* vec, size_t) { vec->clear(); });
751 }
752 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, TrivialType);
753 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, NontrivialType);
754
755 template <typename T, size_t FromSize, size_t ToCapacity>
BM_Reserve(benchmark::State & state)756 void BM_Reserve(benchmark::State& state) {
757 BatchedBenchmark<T>(
758 state,
759 /* prepare_vec = */
760 [](InlVec<T>* vec, size_t) {
761 vec->clear();
762 vec->resize(FromSize);
763 },
764 /* test_vec = */
765 [](InlVec<T>* vec, size_t) { vec->reserve(ToCapacity); });
766 }
767 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, TrivialType);
768 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, NontrivialType);
769
770 template <typename T, size_t FromCapacity, size_t ToCapacity>
BM_ShrinkToFit(benchmark::State & state)771 void BM_ShrinkToFit(benchmark::State& state) {
772 BatchedBenchmark<T>(
773 state,
774 /* prepare_vec = */
775 [](InlVec<T>* vec, size_t) {
776 vec->clear();
777 vec->resize(ToCapacity);
778 vec->reserve(FromCapacity);
779 },
780 /* test_vec = */ [](InlVec<T>* vec, size_t) { vec->shrink_to_fit(); });
781 }
782 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, TrivialType);
783 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, NontrivialType);
784
785 template <typename T, size_t FromSize, size_t ToSize>
BM_Swap(benchmark::State & state)786 void BM_Swap(benchmark::State& state) {
787 using VecT = InlVec<T>;
788 std::array<VecT, kBatchSize> vector_batch{};
789 BatchedBenchmark<T>(
790 state,
791 /* prepare_vec = */
792 [&](InlVec<T>* vec, size_t i) {
793 vector_batch[i].clear();
794 vector_batch[i].resize(ToSize);
795 vec->resize(FromSize);
796 },
797 /* test_vec = */
798 [&](InlVec<T>* vec, size_t i) {
799 using std::swap;
800 benchmark::DoNotOptimize(vector_batch[i]);
801 swap(*vec, vector_batch[i]);
802 });
803 }
804 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, TrivialType);
805 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, NontrivialType);
806
807 } // namespace
808