• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include <stdint.h>
16 
17 #include <algorithm>
18 #include <functional>
19 #include <map>
20 #include <numeric>
21 #include <random>
22 #include <set>
23 #include <string>
24 #include <type_traits>
25 #include <unordered_map>
26 #include <unordered_set>
27 #include <vector>
28 
29 #include "benchmark/benchmark.h"
30 #include "absl/base/internal/raw_logging.h"
31 #include "absl/container/btree_map.h"
32 #include "absl/container/btree_set.h"
33 #include "absl/container/btree_test.h"
34 #include "absl/container/flat_hash_map.h"
35 #include "absl/container/flat_hash_set.h"
36 #include "absl/container/internal/hashtable_debug.h"
37 #include "absl/flags/flag.h"
38 #include "absl/hash/hash.h"
39 #include "absl/memory/memory.h"
40 #include "absl/strings/cord.h"
41 #include "absl/strings/str_format.h"
42 #include "absl/time/time.h"
43 
44 namespace absl {
45 ABSL_NAMESPACE_BEGIN
46 namespace container_internal {
47 namespace {
48 
49 constexpr size_t kBenchmarkValues = 1 << 20;
50 
51 // How many times we add and remove sub-batches in one batch of *AddRem
52 // benchmarks.
53 constexpr size_t kAddRemBatchSize = 1 << 2;
54 
55 // Generates n values in the range [0, 4 * n].
56 template <typename V>
GenerateValues(int n)57 std::vector<V> GenerateValues(int n) {
58   constexpr int kSeed = 23;
59   return GenerateValuesWithSeed<V>(n, 4 * n, kSeed);
60 }
61 
62 // Benchmark insertion of values into a container.
63 template <typename T>
BM_InsertImpl(benchmark::State & state,bool sorted)64 void BM_InsertImpl(benchmark::State& state, bool sorted) {
65   using V = typename remove_pair_const<typename T::value_type>::type;
66   typename KeyOfValue<typename T::key_type, V>::type key_of_value;
67 
68   std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
69   if (sorted) {
70     std::sort(values.begin(), values.end());
71   }
72   T container(values.begin(), values.end());
73 
74   // Remove and re-insert 10% of the keys per batch.
75   const int batch_size = (kBenchmarkValues + 9) / 10;
76   while (state.KeepRunningBatch(batch_size)) {
77     state.PauseTiming();
78     const auto i = static_cast<int>(state.iterations());
79 
80     for (int j = i; j < i + batch_size; j++) {
81       int x = j % kBenchmarkValues;
82       container.erase(key_of_value(values[x]));
83     }
84 
85     state.ResumeTiming();
86 
87     for (int j = i; j < i + batch_size; j++) {
88       int x = j % kBenchmarkValues;
89       container.insert(values[x]);
90     }
91   }
92 }
93 
94 template <typename T>
BM_Insert(benchmark::State & state)95 void BM_Insert(benchmark::State& state) {
96   BM_InsertImpl<T>(state, false);
97 }
98 
99 template <typename T>
BM_InsertSorted(benchmark::State & state)100 void BM_InsertSorted(benchmark::State& state) {
101   BM_InsertImpl<T>(state, true);
102 }
103 
104 // Benchmark inserting the first few elements in a container. In b-tree, this is
105 // when the root node grows.
106 template <typename T>
BM_InsertSmall(benchmark::State & state)107 void BM_InsertSmall(benchmark::State& state) {
108   using V = typename remove_pair_const<typename T::value_type>::type;
109 
110   const int kSize = 8;
111   std::vector<V> values = GenerateValues<V>(kSize);
112   T container;
113 
114   while (state.KeepRunningBatch(kSize)) {
115     for (int i = 0; i < kSize; ++i) {
116       benchmark::DoNotOptimize(container.insert(values[i]));
117     }
118     state.PauseTiming();
119     // Do not measure the time it takes to clear the container.
120     container.clear();
121     state.ResumeTiming();
122   }
123 }
124 
125 template <typename T>
BM_LookupImpl(benchmark::State & state,bool sorted)126 void BM_LookupImpl(benchmark::State& state, bool sorted) {
127   using V = typename remove_pair_const<typename T::value_type>::type;
128   typename KeyOfValue<typename T::key_type, V>::type key_of_value;
129 
130   std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
131   if (sorted) {
132     std::sort(values.begin(), values.end());
133   }
134   T container(values.begin(), values.end());
135 
136   while (state.KeepRunning()) {
137     int idx = state.iterations() % kBenchmarkValues;
138     benchmark::DoNotOptimize(container.find(key_of_value(values[idx])));
139   }
140 }
141 
142 // Benchmark lookup of values in a container.
143 template <typename T>
BM_Lookup(benchmark::State & state)144 void BM_Lookup(benchmark::State& state) {
145   BM_LookupImpl<T>(state, false);
146 }
147 
148 // Benchmark lookup of values in a full container, meaning that values
149 // are inserted in-order to take advantage of biased insertion, which
150 // yields a full tree.
151 template <typename T>
BM_FullLookup(benchmark::State & state)152 void BM_FullLookup(benchmark::State& state) {
153   BM_LookupImpl<T>(state, true);
154 }
155 
156 // Benchmark deletion of values from a container.
157 template <typename T>
BM_Delete(benchmark::State & state)158 void BM_Delete(benchmark::State& state) {
159   using V = typename remove_pair_const<typename T::value_type>::type;
160   typename KeyOfValue<typename T::key_type, V>::type key_of_value;
161   std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
162   T container(values.begin(), values.end());
163 
164   // Remove and re-insert 10% of the keys per batch.
165   const int batch_size = (kBenchmarkValues + 9) / 10;
166   while (state.KeepRunningBatch(batch_size)) {
167     const int i = state.iterations();
168 
169     for (int j = i; j < i + batch_size; j++) {
170       int x = j % kBenchmarkValues;
171       container.erase(key_of_value(values[x]));
172     }
173 
174     state.PauseTiming();
175     for (int j = i; j < i + batch_size; j++) {
176       int x = j % kBenchmarkValues;
177       container.insert(values[x]);
178     }
179     state.ResumeTiming();
180   }
181 }
182 
183 // Benchmark deletion of multiple values from a container.
184 template <typename T>
BM_DeleteRange(benchmark::State & state)185 void BM_DeleteRange(benchmark::State& state) {
186   using V = typename remove_pair_const<typename T::value_type>::type;
187   typename KeyOfValue<typename T::key_type, V>::type key_of_value;
188   std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
189   T container(values.begin(), values.end());
190 
191   // Remove and re-insert 10% of the keys per batch.
192   const int batch_size = (kBenchmarkValues + 9) / 10;
193   while (state.KeepRunningBatch(batch_size)) {
194     const int i = state.iterations();
195 
196     const int start_index = i % kBenchmarkValues;
197 
198     state.PauseTiming();
199     {
200       std::vector<V> removed;
201       removed.reserve(batch_size);
202       auto itr = container.find(key_of_value(values[start_index]));
203       auto start = itr;
204       for (int j = 0; j < batch_size; j++) {
205         if (itr == container.end()) {
206           state.ResumeTiming();
207           container.erase(start, itr);
208           state.PauseTiming();
209           itr = container.begin();
210           start = itr;
211         }
212         removed.push_back(*itr++);
213       }
214 
215       state.ResumeTiming();
216       container.erase(start, itr);
217       state.PauseTiming();
218 
219       container.insert(removed.begin(), removed.end());
220     }
221     state.ResumeTiming();
222   }
223 }
224 
225 // Benchmark steady-state insert (into first half of range) and remove (from
226 // second half of range), treating the container approximately like a queue with
227 // log-time access for all elements. This benchmark does not test the case where
228 // insertion and removal happen in the same region of the tree.  This benchmark
229 // counts two value constructors.
230 template <typename T>
BM_QueueAddRem(benchmark::State & state)231 void BM_QueueAddRem(benchmark::State& state) {
232   using V = typename remove_pair_const<typename T::value_type>::type;
233   typename KeyOfValue<typename T::key_type, V>::type key_of_value;
234 
235   ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance");
236 
237   T container;
238 
239   const size_t half = kBenchmarkValues / 2;
240   std::vector<int> remove_keys(half);
241   std::vector<int> add_keys(half);
242 
243   // We want to do the exact same work repeatedly, and the benchmark can end
244   // after a different number of iterations depending on the speed of the
245   // individual run so we use a large batch size here and ensure that we do
246   // deterministic work every batch.
247   while (state.KeepRunningBatch(half * kAddRemBatchSize)) {
248     state.PauseTiming();
249 
250     container.clear();
251 
252     for (size_t i = 0; i < half; ++i) {
253       remove_keys[i] = i;
254       add_keys[i] = i;
255     }
256     constexpr int kSeed = 5;
257     std::mt19937_64 rand(kSeed);
258     std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
259     std::shuffle(add_keys.begin(), add_keys.end(), rand);
260 
261     // Note needs lazy generation of values.
262     Generator<V> g(kBenchmarkValues * kAddRemBatchSize);
263 
264     for (size_t i = 0; i < half; ++i) {
265       container.insert(g(add_keys[i]));
266       container.insert(g(half + remove_keys[i]));
267     }
268 
269     // There are three parts each of size "half":
270     // 1 is being deleted from  [offset - half, offset)
271     // 2 is standing            [offset, offset + half)
272     // 3 is being inserted into [offset + half, offset + 2 * half)
273     size_t offset = 0;
274 
275     for (size_t i = 0; i < kAddRemBatchSize; ++i) {
276       std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
277       std::shuffle(add_keys.begin(), add_keys.end(), rand);
278       offset += half;
279 
280       state.ResumeTiming();
281       for (size_t idx = 0; idx < half; ++idx) {
282         container.erase(key_of_value(g(offset - half + remove_keys[idx])));
283         container.insert(g(offset + half + add_keys[idx]));
284       }
285       state.PauseTiming();
286     }
287     state.ResumeTiming();
288   }
289 }
290 
291 // Mixed insertion and deletion in the same range using pre-constructed values.
292 template <typename T>
BM_MixedAddRem(benchmark::State & state)293 void BM_MixedAddRem(benchmark::State& state) {
294   using V = typename remove_pair_const<typename T::value_type>::type;
295   typename KeyOfValue<typename T::key_type, V>::type key_of_value;
296 
297   ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance");
298 
299   T container;
300 
301   // Create two random shuffles
302   std::vector<int> remove_keys(kBenchmarkValues);
303   std::vector<int> add_keys(kBenchmarkValues);
304 
305   // We want to do the exact same work repeatedly, and the benchmark can end
306   // after a different number of iterations depending on the speed of the
307   // individual run so we use a large batch size here and ensure that we do
308   // deterministic work every batch.
309   while (state.KeepRunningBatch(kBenchmarkValues * kAddRemBatchSize)) {
310     state.PauseTiming();
311 
312     container.clear();
313 
314     constexpr int kSeed = 7;
315     std::mt19937_64 rand(kSeed);
316 
317     std::vector<V> values = GenerateValues<V>(kBenchmarkValues * 2);
318 
319     // Insert the first half of the values (already in random order)
320     container.insert(values.begin(), values.begin() + kBenchmarkValues);
321 
322     // Insert the first half of the values (already in random order)
323     for (size_t i = 0; i < kBenchmarkValues; ++i) {
324       // remove_keys and add_keys will be swapped before each round,
325       // therefore fill add_keys here w/ the keys being inserted, so
326       // they'll be the first to be removed.
327       remove_keys[i] = i + kBenchmarkValues;
328       add_keys[i] = i;
329     }
330 
331     for (size_t i = 0; i < kAddRemBatchSize; ++i) {
332       remove_keys.swap(add_keys);
333       std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
334       std::shuffle(add_keys.begin(), add_keys.end(), rand);
335 
336       state.ResumeTiming();
337       for (size_t idx = 0; idx < kBenchmarkValues; ++idx) {
338         container.erase(key_of_value(values[remove_keys[idx]]));
339         container.insert(values[add_keys[idx]]);
340       }
341       state.PauseTiming();
342     }
343     state.ResumeTiming();
344   }
345 }
346 
347 // Insertion at end, removal from the beginning.  This benchmark
348 // counts two value constructors.
349 // TODO(ezb): we could add a GenerateNext version of generator that could reduce
350 // noise for string-like types.
351 template <typename T>
BM_Fifo(benchmark::State & state)352 void BM_Fifo(benchmark::State& state) {
353   using V = typename remove_pair_const<typename T::value_type>::type;
354 
355   T container;
356   // Need lazy generation of values as state.max_iterations is large.
357   Generator<V> g(kBenchmarkValues + state.max_iterations);
358 
359   for (int i = 0; i < kBenchmarkValues; i++) {
360     container.insert(g(i));
361   }
362 
363   while (state.KeepRunning()) {
364     container.erase(container.begin());
365     container.insert(container.end(), g(state.iterations() + kBenchmarkValues));
366   }
367 }
368 
369 // Iteration (forward) through the tree
370 template <typename T>
BM_FwdIter(benchmark::State & state)371 void BM_FwdIter(benchmark::State& state) {
372   using V = typename remove_pair_const<typename T::value_type>::type;
373   using R = typename T::value_type const*;
374 
375   std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
376   T container(values.begin(), values.end());
377 
378   auto iter = container.end();
379 
380   R r = nullptr;
381 
382   while (state.KeepRunning()) {
383     if (iter == container.end()) iter = container.begin();
384     r = &(*iter);
385     ++iter;
386   }
387 
388   benchmark::DoNotOptimize(r);
389 }
390 
391 // Benchmark random range-construction of a container.
392 template <typename T>
BM_RangeConstructionImpl(benchmark::State & state,bool sorted)393 void BM_RangeConstructionImpl(benchmark::State& state, bool sorted) {
394   using V = typename remove_pair_const<typename T::value_type>::type;
395 
396   std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
397   if (sorted) {
398     std::sort(values.begin(), values.end());
399   }
400   {
401     T container(values.begin(), values.end());
402   }
403 
404   while (state.KeepRunning()) {
405     T container(values.begin(), values.end());
406     benchmark::DoNotOptimize(container);
407   }
408 }
409 
410 template <typename T>
BM_InsertRangeRandom(benchmark::State & state)411 void BM_InsertRangeRandom(benchmark::State& state) {
412   BM_RangeConstructionImpl<T>(state, false);
413 }
414 
415 template <typename T>
BM_InsertRangeSorted(benchmark::State & state)416 void BM_InsertRangeSorted(benchmark::State& state) {
417   BM_RangeConstructionImpl<T>(state, true);
418 }
419 
420 #define STL_ORDERED_TYPES(value)                     \
421   using stl_set_##value = std::set<value>;           \
422   using stl_map_##value = std::map<value, intptr_t>; \
423   using stl_multiset_##value = std::multiset<value>; \
424   using stl_multimap_##value = std::multimap<value, intptr_t>
425 
426 using StdString = std::string;
427 STL_ORDERED_TYPES(int32_t);
428 STL_ORDERED_TYPES(int64_t);
429 STL_ORDERED_TYPES(StdString);
430 STL_ORDERED_TYPES(Cord);
431 STL_ORDERED_TYPES(Time);
432 
433 #define STL_UNORDERED_TYPES(value)                                       \
434   using stl_unordered_set_##value = std::unordered_set<value>;           \
435   using stl_unordered_map_##value = std::unordered_map<value, intptr_t>; \
436   using flat_hash_set_##value = flat_hash_set<value>;                    \
437   using flat_hash_map_##value = flat_hash_map<value, intptr_t>;          \
438   using stl_unordered_multiset_##value = std::unordered_multiset<value>; \
439   using stl_unordered_multimap_##value =                                 \
440       std::unordered_multimap<value, intptr_t>
441 
442 #define STL_UNORDERED_TYPES_CUSTOM_HASH(value, hash)                           \
443   using stl_unordered_set_##value = std::unordered_set<value, hash>;           \
444   using stl_unordered_map_##value = std::unordered_map<value, intptr_t, hash>; \
445   using flat_hash_set_##value = flat_hash_set<value, hash>;                    \
446   using flat_hash_map_##value = flat_hash_map<value, intptr_t, hash>;          \
447   using stl_unordered_multiset_##value = std::unordered_multiset<value, hash>; \
448   using stl_unordered_multimap_##value =                                       \
449       std::unordered_multimap<value, intptr_t, hash>
450 
451 STL_UNORDERED_TYPES_CUSTOM_HASH(Cord, absl::Hash<absl::Cord>);
452 
453 STL_UNORDERED_TYPES(int32_t);
454 STL_UNORDERED_TYPES(int64_t);
455 STL_UNORDERED_TYPES(StdString);
456 STL_UNORDERED_TYPES_CUSTOM_HASH(Time, absl::Hash<absl::Time>);
457 
458 #define BTREE_TYPES(value)                                            \
459   using btree_256_set_##value =                                       \
460       btree_set<value, std::less<value>, std::allocator<value>>;      \
461   using btree_256_map_##value =                                       \
462       btree_map<value, intptr_t, std::less<value>,                    \
463                 std::allocator<std::pair<const value, intptr_t>>>;    \
464   using btree_256_multiset_##value =                                  \
465       btree_multiset<value, std::less<value>, std::allocator<value>>; \
466   using btree_256_multimap_##value =                                  \
467       btree_multimap<value, intptr_t, std::less<value>,               \
468                      std::allocator<std::pair<const value, intptr_t>>>
469 
470 BTREE_TYPES(int32_t);
471 BTREE_TYPES(int64_t);
472 BTREE_TYPES(StdString);
473 BTREE_TYPES(Cord);
474 BTREE_TYPES(Time);
475 
476 #define MY_BENCHMARK4(type, func)                                              \
477   void BM_##type##_##func(benchmark::State& state) { BM_##func<type>(state); } \
478   BENCHMARK(BM_##type##_##func)
479 
480 #define MY_BENCHMARK3(type)               \
481   MY_BENCHMARK4(type, Insert);            \
482   MY_BENCHMARK4(type, InsertSorted);      \
483   MY_BENCHMARK4(type, InsertSmall);       \
484   MY_BENCHMARK4(type, Lookup);            \
485   MY_BENCHMARK4(type, FullLookup);        \
486   MY_BENCHMARK4(type, Delete);            \
487   MY_BENCHMARK4(type, DeleteRange);       \
488   MY_BENCHMARK4(type, QueueAddRem);       \
489   MY_BENCHMARK4(type, MixedAddRem);       \
490   MY_BENCHMARK4(type, Fifo);              \
491   MY_BENCHMARK4(type, FwdIter);           \
492   MY_BENCHMARK4(type, InsertRangeRandom); \
493   MY_BENCHMARK4(type, InsertRangeSorted)
494 
495 #define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \
496   MY_BENCHMARK3(stl_##type);                    \
497   MY_BENCHMARK3(stl_unordered_##type);          \
498   MY_BENCHMARK3(btree_256_##type)
499 
500 #define MY_BENCHMARK2(type)                \
501   MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type); \
502   MY_BENCHMARK3(flat_hash_##type)
503 
504 // Define MULTI_TESTING to see benchmarks for multi-containers also.
505 //
506 // You can use --copt=-DMULTI_TESTING.
507 #ifdef MULTI_TESTING
508 #define MY_BENCHMARK(type)                            \
509   MY_BENCHMARK2(set_##type);                          \
510   MY_BENCHMARK2(map_##type);                          \
511   MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multiset_##type); \
512   MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multimap_##type)
513 #else
514 #define MY_BENCHMARK(type)   \
515   MY_BENCHMARK2(set_##type); \
516   MY_BENCHMARK2(map_##type)
517 #endif
518 
519 MY_BENCHMARK(int32_t);
520 MY_BENCHMARK(int64_t);
521 MY_BENCHMARK(StdString);
522 MY_BENCHMARK(Cord);
523 MY_BENCHMARK(Time);
524 
525 // Define a type whose size and cost of moving are independently customizable.
526 // When sizeof(value_type) increases, we expect btree to no longer have as much
527 // cache-locality advantage over STL. When cost of moving increases, we expect
528 // btree to actually do more work than STL because it has to move values around
529 // and STL doesn't have to.
530 template <int Size, int Copies>
531 struct BigType {
BigTypeabsl::container_internal::__anon63c1ac2e0111::BigType532   BigType() : BigType(0) {}
BigTypeabsl::container_internal::__anon63c1ac2e0111::BigType533   explicit BigType(int x) { std::iota(values.begin(), values.end(), x); }
534 
Copyabsl::container_internal::__anon63c1ac2e0111::BigType535   void Copy(const BigType& other) {
536     for (int i = 0; i < Size && i < Copies; ++i) values[i] = other.values[i];
537     // If Copies > Size, do extra copies.
538     for (int i = Size, idx = 0; i < Copies; ++i) {
539       int64_t tmp = other.values[idx];
540       benchmark::DoNotOptimize(tmp);
541       idx = idx + 1 == Size ? 0 : idx + 1;
542     }
543   }
544 
BigTypeabsl::container_internal::__anon63c1ac2e0111::BigType545   BigType(const BigType& other) { Copy(other); }
operator =absl::container_internal::__anon63c1ac2e0111::BigType546   BigType& operator=(const BigType& other) {
547     Copy(other);
548     return *this;
549   }
550 
551   // Compare only the first Copies elements if Copies is less than Size.
operator <absl::container_internal::__anon63c1ac2e0111::BigType552   bool operator<(const BigType& other) const {
553     return std::lexicographical_compare(
554         values.begin(), values.begin() + std::min(Size, Copies),
555         other.values.begin(), other.values.begin() + std::min(Size, Copies));
556   }
operator ==absl::container_internal::__anon63c1ac2e0111::BigType557   bool operator==(const BigType& other) const {
558     return std::equal(values.begin(), values.begin() + std::min(Size, Copies),
559                       other.values.begin());
560   }
561 
562   // Support absl::Hash.
563   template <typename State>
AbslHashValue(State h,const BigType & b)564   friend State AbslHashValue(State h, const BigType& b) {
565     for (int i = 0; i < Size && i < Copies; ++i)
566       h = State::combine(std::move(h), b.values[i]);
567     return h;
568   }
569 
570   std::array<int64_t, Size> values;
571 };
572 
573 #define BIG_TYPE_BENCHMARKS(SIZE, COPIES)                                     \
574   using stl_set_size##SIZE##copies##COPIES = std::set<BigType<SIZE, COPIES>>; \
575   using stl_map_size##SIZE##copies##COPIES =                                  \
576       std::map<BigType<SIZE, COPIES>, intptr_t>;                              \
577   using stl_multiset_size##SIZE##copies##COPIES =                             \
578       std::multiset<BigType<SIZE, COPIES>>;                                   \
579   using stl_multimap_size##SIZE##copies##COPIES =                             \
580       std::multimap<BigType<SIZE, COPIES>, intptr_t>;                         \
581   using stl_unordered_set_size##SIZE##copies##COPIES =                        \
582       std::unordered_set<BigType<SIZE, COPIES>,                               \
583                          absl::Hash<BigType<SIZE, COPIES>>>;                  \
584   using stl_unordered_map_size##SIZE##copies##COPIES =                        \
585       std::unordered_map<BigType<SIZE, COPIES>, intptr_t,                     \
586                          absl::Hash<BigType<SIZE, COPIES>>>;                  \
587   using flat_hash_set_size##SIZE##copies##COPIES =                            \
588       flat_hash_set<BigType<SIZE, COPIES>>;                                   \
589   using flat_hash_map_size##SIZE##copies##COPIES =                            \
590       flat_hash_map<BigType<SIZE, COPIES>, intptr_t>;                         \
591   using stl_unordered_multiset_size##SIZE##copies##COPIES =                   \
592       std::unordered_multiset<BigType<SIZE, COPIES>,                          \
593                               absl::Hash<BigType<SIZE, COPIES>>>;             \
594   using stl_unordered_multimap_size##SIZE##copies##COPIES =                   \
595       std::unordered_multimap<BigType<SIZE, COPIES>, intptr_t,                \
596                               absl::Hash<BigType<SIZE, COPIES>>>;             \
597   using btree_256_set_size##SIZE##copies##COPIES =                            \
598       btree_set<BigType<SIZE, COPIES>>;                                       \
599   using btree_256_map_size##SIZE##copies##COPIES =                            \
600       btree_map<BigType<SIZE, COPIES>, intptr_t>;                             \
601   using btree_256_multiset_size##SIZE##copies##COPIES =                       \
602       btree_multiset<BigType<SIZE, COPIES>>;                                  \
603   using btree_256_multimap_size##SIZE##copies##COPIES =                       \
604       btree_multimap<BigType<SIZE, COPIES>, intptr_t>;                        \
605   MY_BENCHMARK(size##SIZE##copies##COPIES)
606 
607 // Define BIG_TYPE_TESTING to see benchmarks for more big types.
608 //
609 // You can use --copt=-DBIG_TYPE_TESTING.
610 #ifndef NODESIZE_TESTING
611 #ifdef BIG_TYPE_TESTING
612 BIG_TYPE_BENCHMARKS(1, 4);
613 BIG_TYPE_BENCHMARKS(4, 1);
614 BIG_TYPE_BENCHMARKS(4, 4);
615 BIG_TYPE_BENCHMARKS(1, 8);
616 BIG_TYPE_BENCHMARKS(8, 1);
617 BIG_TYPE_BENCHMARKS(8, 8);
618 BIG_TYPE_BENCHMARKS(1, 16);
619 BIG_TYPE_BENCHMARKS(16, 1);
620 BIG_TYPE_BENCHMARKS(16, 16);
621 BIG_TYPE_BENCHMARKS(1, 32);
622 BIG_TYPE_BENCHMARKS(32, 1);
623 BIG_TYPE_BENCHMARKS(32, 32);
624 #else
625 BIG_TYPE_BENCHMARKS(32, 32);
626 #endif
627 #endif
628 
629 // Benchmark using unique_ptrs to large value types. In order to be able to use
630 // the same benchmark code as the other types, use a type that holds a
631 // unique_ptr and has a copy constructor.
632 template <int Size>
633 struct BigTypePtr {
BigTypePtrabsl::container_internal::__anon63c1ac2e0111::BigTypePtr634   BigTypePtr() : BigTypePtr(0) {}
BigTypePtrabsl::container_internal::__anon63c1ac2e0111::BigTypePtr635   explicit BigTypePtr(int x) {
636     ptr = absl::make_unique<BigType<Size, Size>>(x);
637   }
BigTypePtrabsl::container_internal::__anon63c1ac2e0111::BigTypePtr638   BigTypePtr(const BigTypePtr& other) {
639     ptr = absl::make_unique<BigType<Size, Size>>(*other.ptr);
640   }
641   BigTypePtr(BigTypePtr&& other) noexcept = default;
operator =absl::container_internal::__anon63c1ac2e0111::BigTypePtr642   BigTypePtr& operator=(const BigTypePtr& other) {
643     ptr = absl::make_unique<BigType<Size, Size>>(*other.ptr);
644   }
645   BigTypePtr& operator=(BigTypePtr&& other) noexcept = default;
646 
operator <absl::container_internal::__anon63c1ac2e0111::BigTypePtr647   bool operator<(const BigTypePtr& other) const { return *ptr < *other.ptr; }
operator ==absl::container_internal::__anon63c1ac2e0111::BigTypePtr648   bool operator==(const BigTypePtr& other) const { return *ptr == *other.ptr; }
649 
650   std::unique_ptr<BigType<Size, Size>> ptr;
651 };
652 
653 template <int Size>
ContainerInfo(const btree_set<BigTypePtr<Size>> & b)654 double ContainerInfo(const btree_set<BigTypePtr<Size>>& b) {
655   const double bytes_used =
656       b.bytes_used() + b.size() * sizeof(BigType<Size, Size>);
657   const double bytes_per_value = bytes_used / b.size();
658   BtreeContainerInfoLog(b, bytes_used, bytes_per_value);
659   return bytes_per_value;
660 }
661 template <int Size>
ContainerInfo(const btree_map<int,BigTypePtr<Size>> & b)662 double ContainerInfo(const btree_map<int, BigTypePtr<Size>>& b) {
663   const double bytes_used =
664       b.bytes_used() + b.size() * sizeof(BigType<Size, Size>);
665   const double bytes_per_value = bytes_used / b.size();
666   BtreeContainerInfoLog(b, bytes_used, bytes_per_value);
667   return bytes_per_value;
668 }
669 
670 #define BIG_TYPE_PTR_BENCHMARKS(SIZE)                                          \
671   using stl_set_size##SIZE##copies##SIZE##ptr = std::set<BigType<SIZE, SIZE>>; \
672   using stl_map_size##SIZE##copies##SIZE##ptr =                                \
673       std::map<int, BigType<SIZE, SIZE>>;                                      \
674   using stl_unordered_set_size##SIZE##copies##SIZE##ptr =                      \
675       std::unordered_set<BigType<SIZE, SIZE>,                                  \
676                          absl::Hash<BigType<SIZE, SIZE>>>;                     \
677   using stl_unordered_map_size##SIZE##copies##SIZE##ptr =                      \
678       std::unordered_map<int, BigType<SIZE, SIZE>>;                            \
679   using flat_hash_set_size##SIZE##copies##SIZE##ptr =                          \
680       flat_hash_set<BigType<SIZE, SIZE>>;                                      \
681   using flat_hash_map_size##SIZE##copies##SIZE##ptr =                          \
682       flat_hash_map<int, BigTypePtr<SIZE>>;                                    \
683   using btree_256_set_size##SIZE##copies##SIZE##ptr =                          \
684       btree_set<BigTypePtr<SIZE>>;                                             \
685   using btree_256_map_size##SIZE##copies##SIZE##ptr =                          \
686       btree_map<int, BigTypePtr<SIZE>>;                                        \
687   MY_BENCHMARK3(stl_set_size##SIZE##copies##SIZE##ptr);                        \
688   MY_BENCHMARK3(stl_unordered_set_size##SIZE##copies##SIZE##ptr);              \
689   MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr);                  \
690   MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr);                  \
691   MY_BENCHMARK3(stl_map_size##SIZE##copies##SIZE##ptr);                        \
692   MY_BENCHMARK3(stl_unordered_map_size##SIZE##copies##SIZE##ptr);              \
693   MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr);                  \
694   MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr)
695 
696 BIG_TYPE_PTR_BENCHMARKS(32);
697 
698 }  // namespace
699 }  // namespace container_internal
700 ABSL_NAMESPACE_END
701 }  // namespace absl
702