• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "absl/container/internal/raw_hash_set.h"
16 
17 #include <cmath>
18 #include <cstdint>
19 #include <deque>
20 #include <functional>
21 #include <memory>
22 #include <numeric>
23 #include <random>
24 #include <string>
25 
26 #include "gmock/gmock.h"
27 #include "gtest/gtest.h"
28 #include "absl/base/attributes.h"
29 #include "absl/base/config.h"
30 #include "absl/base/internal/cycleclock.h"
31 #include "absl/base/internal/raw_logging.h"
32 #include "absl/container/internal/container_memory.h"
33 #include "absl/container/internal/hash_function_defaults.h"
34 #include "absl/container/internal/hash_policy_testing.h"
35 #include "absl/container/internal/hashtable_debug.h"
36 #include "absl/strings/string_view.h"
37 
38 namespace absl {
39 ABSL_NAMESPACE_BEGIN
40 namespace container_internal {
41 
42 struct RawHashSetTestOnlyAccess {
43   template <typename C>
GetSlotsabsl::container_internal::RawHashSetTestOnlyAccess44   static auto GetSlots(const C& c) -> decltype(c.slots_) {
45     return c.slots_;
46   }
47 };
48 
49 namespace {
50 
51 using ::testing::DoubleNear;
52 using ::testing::ElementsAre;
53 using ::testing::Ge;
54 using ::testing::Lt;
55 using ::testing::Optional;
56 using ::testing::Pair;
57 using ::testing::UnorderedElementsAre;
58 
TEST(Util,NormalizeCapacity)59 TEST(Util, NormalizeCapacity) {
60   EXPECT_EQ(1, NormalizeCapacity(0));
61   EXPECT_EQ(1, NormalizeCapacity(1));
62   EXPECT_EQ(3, NormalizeCapacity(2));
63   EXPECT_EQ(3, NormalizeCapacity(3));
64   EXPECT_EQ(7, NormalizeCapacity(4));
65   EXPECT_EQ(7, NormalizeCapacity(7));
66   EXPECT_EQ(15, NormalizeCapacity(8));
67   EXPECT_EQ(15, NormalizeCapacity(15));
68   EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1));
69   EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2));
70 }
71 
TEST(Util,GrowthAndCapacity)72 TEST(Util, GrowthAndCapacity) {
73   // Verify that GrowthToCapacity gives the minimum capacity that has enough
74   // growth.
75   for (size_t growth = 0; growth < 10000; ++growth) {
76     SCOPED_TRACE(growth);
77     size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
78     // The capacity is large enough for `growth`
79     EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
80     if (growth != 0 && capacity > 1) {
81       // There is no smaller capacity that works.
82       EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
83     }
84   }
85 
86   for (size_t capacity = Group::kWidth - 1; capacity < 10000;
87        capacity = 2 * capacity + 1) {
88     SCOPED_TRACE(capacity);
89     size_t growth = CapacityToGrowth(capacity);
90     EXPECT_THAT(growth, Lt(capacity));
91     EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity);
92     EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity);
93   }
94 }
95 
TEST(Util,probe_seq)96 TEST(Util, probe_seq) {
97   probe_seq<16> seq(0, 127);
98   auto gen = [&]() {
99     size_t res = seq.offset();
100     seq.next();
101     return res;
102   };
103   std::vector<size_t> offsets(8);
104   std::generate_n(offsets.begin(), 8, gen);
105   EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
106   seq = probe_seq<16>(128, 127);
107   std::generate_n(offsets.begin(), 8, gen);
108   EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
109 }
110 
TEST(BitMask,Smoke)111 TEST(BitMask, Smoke) {
112   EXPECT_FALSE((BitMask<uint8_t, 8>(0)));
113   EXPECT_TRUE((BitMask<uint8_t, 8>(5)));
114 
115   EXPECT_THAT((BitMask<uint8_t, 8>(0)), ElementsAre());
116   EXPECT_THAT((BitMask<uint8_t, 8>(0x1)), ElementsAre(0));
117   EXPECT_THAT((BitMask<uint8_t, 8>(0x2)), ElementsAre(1));
118   EXPECT_THAT((BitMask<uint8_t, 8>(0x3)), ElementsAre(0, 1));
119   EXPECT_THAT((BitMask<uint8_t, 8>(0x4)), ElementsAre(2));
120   EXPECT_THAT((BitMask<uint8_t, 8>(0x5)), ElementsAre(0, 2));
121   EXPECT_THAT((BitMask<uint8_t, 8>(0x55)), ElementsAre(0, 2, 4, 6));
122   EXPECT_THAT((BitMask<uint8_t, 8>(0xAA)), ElementsAre(1, 3, 5, 7));
123 }
124 
TEST(BitMask,WithShift)125 TEST(BitMask, WithShift) {
126   // See the non-SSE version of Group for details on what this math is for.
127   uint64_t ctrl = 0x1716151413121110;
128   uint64_t hash = 0x12;
129   constexpr uint64_t msbs = 0x8080808080808080ULL;
130   constexpr uint64_t lsbs = 0x0101010101010101ULL;
131   auto x = ctrl ^ (lsbs * hash);
132   uint64_t mask = (x - lsbs) & ~x & msbs;
133   EXPECT_EQ(0x0000000080800000, mask);
134 
135   BitMask<uint64_t, 8, 3> b(mask);
136   EXPECT_EQ(*b, 2);
137 }
138 
TEST(BitMask,LeadingTrailing)139 TEST(BitMask, LeadingTrailing) {
140   EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).LeadingZeros()), 3);
141   EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).TrailingZeros()), 6);
142 
143   EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).LeadingZeros()), 15);
144   EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).TrailingZeros()), 0);
145 
146   EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).LeadingZeros()), 0);
147   EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).TrailingZeros()), 15);
148 
149   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3);
150   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1);
151 
152   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).LeadingZeros()), 7);
153   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).TrailingZeros()), 0);
154 
155   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).LeadingZeros()), 0);
156   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).TrailingZeros()), 7);
157 }
158 
TEST(Group,EmptyGroup)159 TEST(Group, EmptyGroup) {
160   for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h));
161 }
162 
TEST(Group,Match)163 TEST(Group, Match) {
164   if (Group::kWidth == 16) {
165     ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
166                       7,      5, 3,        1, 1,      1, 1,         1};
167     EXPECT_THAT(Group{group}.Match(0), ElementsAre());
168     EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
169     EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10));
170     EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9));
171     EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8));
172   } else if (Group::kWidth == 8) {
173     ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
174     EXPECT_THAT(Group{group}.Match(0), ElementsAre());
175     EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7));
176     EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4));
177   } else {
178     FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
179   }
180 }
181 
TEST(Group,MatchEmpty)182 TEST(Group, MatchEmpty) {
183   if (Group::kWidth == 16) {
184     ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
185                       7,      5, 3,        1, 1,      1, 1,         1};
186     EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4));
187   } else if (Group::kWidth == 8) {
188     ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
189     EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0));
190   } else {
191     FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
192   }
193 }
194 
TEST(Group,MatchEmptyOrDeleted)195 TEST(Group, MatchEmptyOrDeleted) {
196   if (Group::kWidth == 16) {
197     ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
198                       7,      5, 3,        1, 1,      1, 1,         1};
199     EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4));
200   } else if (Group::kWidth == 8) {
201     ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
202     EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3));
203   } else {
204     FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
205   }
206 }
207 
TEST(Batch,DropDeletes)208 TEST(Batch, DropDeletes) {
209   constexpr size_t kCapacity = 63;
210   constexpr size_t kGroupWidth = container_internal::Group::kWidth;
211   std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth);
212   ctrl[kCapacity] = kSentinel;
213   std::vector<ctrl_t> pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted};
214   for (size_t i = 0; i != kCapacity; ++i) {
215     ctrl[i] = pattern[i % pattern.size()];
216     if (i < kGroupWidth - 1)
217       ctrl[i + kCapacity + 1] = pattern[i % pattern.size()];
218   }
219   ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
220   ASSERT_EQ(ctrl[kCapacity], kSentinel);
221   for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) {
222     ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
223     if (i == kCapacity) expected = kSentinel;
224     if (expected == kDeleted) expected = kEmpty;
225     if (IsFull(expected)) expected = kDeleted;
226     EXPECT_EQ(ctrl[i], expected)
227         << i << " " << int{pattern[i % pattern.size()]};
228   }
229 }
230 
TEST(Group,CountLeadingEmptyOrDeleted)231 TEST(Group, CountLeadingEmptyOrDeleted) {
232   const std::vector<ctrl_t> empty_examples = {kEmpty, kDeleted};
233   const std::vector<ctrl_t> full_examples = {0, 1, 2, 3, 5, 9, 127, kSentinel};
234 
235   for (ctrl_t empty : empty_examples) {
236     std::vector<ctrl_t> e(Group::kWidth, empty);
237     EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted());
238     for (ctrl_t full : full_examples) {
239       for (size_t i = 0; i != Group::kWidth; ++i) {
240         std::vector<ctrl_t> f(Group::kWidth, empty);
241         f[i] = full;
242         EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted());
243       }
244       std::vector<ctrl_t> f(Group::kWidth, empty);
245       f[Group::kWidth * 2 / 3] = full;
246       f[Group::kWidth / 2] = full;
247       EXPECT_EQ(
248           Group::kWidth / 2, Group{f.data()}.CountLeadingEmptyOrDeleted());
249     }
250   }
251 }
252 
253 struct IntPolicy {
254   using slot_type = int64_t;
255   using key_type = int64_t;
256   using init_type = int64_t;
257 
constructabsl::container_internal::__anon046b8d900111::IntPolicy258   static void construct(void*, int64_t* slot, int64_t v) { *slot = v; }
destroyabsl::container_internal::__anon046b8d900111::IntPolicy259   static void destroy(void*, int64_t*) {}
transferabsl::container_internal::__anon046b8d900111::IntPolicy260   static void transfer(void*, int64_t* new_slot, int64_t* old_slot) {
261     *new_slot = *old_slot;
262   }
263 
elementabsl::container_internal::__anon046b8d900111::IntPolicy264   static int64_t& element(slot_type* slot) { return *slot; }
265 
266   template <class F>
applyabsl::container_internal::__anon046b8d900111::IntPolicy267   static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) {
268     return std::forward<F>(f)(x, x);
269   }
270 };
271 
272 class StringPolicy {
273   template <class F, class K, class V,
274             class = typename std::enable_if<
275                 std::is_convertible<const K&, absl::string_view>::value>::type>
276   decltype(std::declval<F>()(
277       std::declval<const absl::string_view&>(), std::piecewise_construct,
278       std::declval<std::tuple<K>>(),
apply_impl(F && f,std::pair<std::tuple<K>,V> p)279       std::declval<V>())) static apply_impl(F&& f,
280                                             std::pair<std::tuple<K>, V> p) {
281     const absl::string_view& key = std::get<0>(p.first);
282     return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
283                               std::move(p.second));
284   }
285 
286  public:
287   struct slot_type {
288     struct ctor {};
289 
290     template <class... Ts>
slot_typeabsl::container_internal::__anon046b8d900111::StringPolicy::slot_type291     slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
292 
293     std::pair<std::string, std::string> pair;
294   };
295 
296   using key_type = std::string;
297   using init_type = std::pair<std::string, std::string>;
298 
299   template <class allocator_type, class... Args>
construct(allocator_type * alloc,slot_type * slot,Args...args)300   static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
301     std::allocator_traits<allocator_type>::construct(
302         *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
303   }
304 
305   template <class allocator_type>
destroy(allocator_type * alloc,slot_type * slot)306   static void destroy(allocator_type* alloc, slot_type* slot) {
307     std::allocator_traits<allocator_type>::destroy(*alloc, slot);
308   }
309 
310   template <class allocator_type>
transfer(allocator_type * alloc,slot_type * new_slot,slot_type * old_slot)311   static void transfer(allocator_type* alloc, slot_type* new_slot,
312                        slot_type* old_slot) {
313     construct(alloc, new_slot, std::move(old_slot->pair));
314     destroy(alloc, old_slot);
315   }
316 
element(slot_type * slot)317   static std::pair<std::string, std::string>& element(slot_type* slot) {
318     return slot->pair;
319   }
320 
321   template <class F, class... Args>
apply(F && f,Args &&...args)322   static auto apply(F&& f, Args&&... args)
323       -> decltype(apply_impl(std::forward<F>(f),
324                              PairArgs(std::forward<Args>(args)...))) {
325     return apply_impl(std::forward<F>(f),
326                       PairArgs(std::forward<Args>(args)...));
327   }
328 };
329 
330 struct StringHash : absl::Hash<absl::string_view> {
331   using is_transparent = void;
332 };
333 struct StringEq : std::equal_to<absl::string_view> {
334   using is_transparent = void;
335 };
336 
337 struct StringTable
338     : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
339   using Base = typename StringTable::raw_hash_set;
StringTableabsl::container_internal::__anon046b8d900111::StringTable340   StringTable() {}
341   using Base::Base;
342 };
343 
344 struct IntTable
345     : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
346                    std::equal_to<int64_t>, std::allocator<int64_t>> {
347   using Base = typename IntTable::raw_hash_set;
348   using Base::Base;
349 };
350 
351 template <typename T>
352 struct CustomAlloc : std::allocator<T> {
CustomAllocabsl::container_internal::__anon046b8d900111::CustomAlloc353   CustomAlloc() {}
354 
355   template <typename U>
CustomAllocabsl::container_internal::__anon046b8d900111::CustomAlloc356   CustomAlloc(const CustomAlloc<U>& other) {}
357 
358   template<class U> struct rebind {
359     using other = CustomAlloc<U>;
360   };
361 };
362 
363 struct CustomAllocIntTable
364     : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
365                    std::equal_to<int64_t>, CustomAlloc<int64_t>> {
366   using Base = typename CustomAllocIntTable::raw_hash_set;
367   using Base::Base;
368 };
369 
370 struct BadFastHash {
371   template <class T>
operator ()absl::container_internal::__anon046b8d900111::BadFastHash372   size_t operator()(const T&) const {
373     return 0;
374   }
375 };
376 
377 struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
378                                std::allocator<int>> {
379   using Base = typename BadTable::raw_hash_set;
BadTableabsl::container_internal::__anon046b8d900111::BadTable380   BadTable() {}
381   using Base::Base;
382 };
383 
TEST(Table,EmptyFunctorOptimization)384 TEST(Table, EmptyFunctorOptimization) {
385   static_assert(std::is_empty<std::equal_to<absl::string_view>>::value, "");
386   static_assert(std::is_empty<std::allocator<int>>::value, "");
387 
388   struct MockTable {
389     void* ctrl;
390     void* slots;
391     size_t size;
392     size_t capacity;
393     size_t growth_left;
394     void* infoz;
395   };
396   struct StatelessHash {
397     size_t operator()(absl::string_view) const { return 0; }
398   };
399   struct StatefulHash : StatelessHash {
400     size_t dummy;
401   };
402 
403   EXPECT_EQ(
404       sizeof(MockTable),
405       sizeof(
406           raw_hash_set<StringPolicy, StatelessHash,
407                        std::equal_to<absl::string_view>, std::allocator<int>>));
408 
409   EXPECT_EQ(
410       sizeof(MockTable) + sizeof(StatefulHash),
411       sizeof(
412           raw_hash_set<StringPolicy, StatefulHash,
413                        std::equal_to<absl::string_view>, std::allocator<int>>));
414 }
415 
TEST(Table,Empty)416 TEST(Table, Empty) {
417   IntTable t;
418   EXPECT_EQ(0, t.size());
419   EXPECT_TRUE(t.empty());
420 }
421 
TEST(Table,LookupEmpty)422 TEST(Table, LookupEmpty) {
423   IntTable t;
424   auto it = t.find(0);
425   EXPECT_TRUE(it == t.end());
426 }
427 
TEST(Table,Insert1)428 TEST(Table, Insert1) {
429   IntTable t;
430   EXPECT_TRUE(t.find(0) == t.end());
431   auto res = t.emplace(0);
432   EXPECT_TRUE(res.second);
433   EXPECT_THAT(*res.first, 0);
434   EXPECT_EQ(1, t.size());
435   EXPECT_THAT(*t.find(0), 0);
436 }
437 
TEST(Table,Insert2)438 TEST(Table, Insert2) {
439   IntTable t;
440   EXPECT_TRUE(t.find(0) == t.end());
441   auto res = t.emplace(0);
442   EXPECT_TRUE(res.second);
443   EXPECT_THAT(*res.first, 0);
444   EXPECT_EQ(1, t.size());
445   EXPECT_TRUE(t.find(1) == t.end());
446   res = t.emplace(1);
447   EXPECT_TRUE(res.second);
448   EXPECT_THAT(*res.first, 1);
449   EXPECT_EQ(2, t.size());
450   EXPECT_THAT(*t.find(0), 0);
451   EXPECT_THAT(*t.find(1), 1);
452 }
453 
TEST(Table,InsertCollision)454 TEST(Table, InsertCollision) {
455   BadTable t;
456   EXPECT_TRUE(t.find(1) == t.end());
457   auto res = t.emplace(1);
458   EXPECT_TRUE(res.second);
459   EXPECT_THAT(*res.first, 1);
460   EXPECT_EQ(1, t.size());
461 
462   EXPECT_TRUE(t.find(2) == t.end());
463   res = t.emplace(2);
464   EXPECT_THAT(*res.first, 2);
465   EXPECT_TRUE(res.second);
466   EXPECT_EQ(2, t.size());
467 
468   EXPECT_THAT(*t.find(1), 1);
469   EXPECT_THAT(*t.find(2), 2);
470 }
471 
472 // Test that we do not add existent element in case we need to search through
473 // many groups with deleted elements
TEST(Table,InsertCollisionAndFindAfterDelete)474 TEST(Table, InsertCollisionAndFindAfterDelete) {
475   BadTable t;  // all elements go to the same group.
476   // Have at least 2 groups with Group::kWidth collisions
477   // plus some extra collisions in the last group.
478   constexpr size_t kNumInserts = Group::kWidth * 2 + 5;
479   for (size_t i = 0; i < kNumInserts; ++i) {
480     auto res = t.emplace(i);
481     EXPECT_TRUE(res.second);
482     EXPECT_THAT(*res.first, i);
483     EXPECT_EQ(i + 1, t.size());
484   }
485 
486   // Remove elements one by one and check
487   // that we still can find all other elements.
488   for (size_t i = 0; i < kNumInserts; ++i) {
489     EXPECT_EQ(1, t.erase(i)) << i;
490     for (size_t j = i + 1; j < kNumInserts; ++j) {
491       EXPECT_THAT(*t.find(j), j);
492       auto res = t.emplace(j);
493       EXPECT_FALSE(res.second) << i << " " << j;
494       EXPECT_THAT(*res.first, j);
495       EXPECT_EQ(kNumInserts - i - 1, t.size());
496     }
497   }
498   EXPECT_TRUE(t.empty());
499 }
500 
TEST(Table,LazyEmplace)501 TEST(Table, LazyEmplace) {
502   StringTable t;
503   bool called = false;
504   auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
505     called = true;
506     f("abc", "ABC");
507   });
508   EXPECT_TRUE(called);
509   EXPECT_THAT(*it, Pair("abc", "ABC"));
510   called = false;
511   it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
512     called = true;
513     f("abc", "DEF");
514   });
515   EXPECT_FALSE(called);
516   EXPECT_THAT(*it, Pair("abc", "ABC"));
517 }
518 
TEST(Table,ContainsEmpty)519 TEST(Table, ContainsEmpty) {
520   IntTable t;
521 
522   EXPECT_FALSE(t.contains(0));
523 }
524 
TEST(Table,Contains1)525 TEST(Table, Contains1) {
526   IntTable t;
527 
528   EXPECT_TRUE(t.insert(0).second);
529   EXPECT_TRUE(t.contains(0));
530   EXPECT_FALSE(t.contains(1));
531 
532   EXPECT_EQ(1, t.erase(0));
533   EXPECT_FALSE(t.contains(0));
534 }
535 
TEST(Table,Contains2)536 TEST(Table, Contains2) {
537   IntTable t;
538 
539   EXPECT_TRUE(t.insert(0).second);
540   EXPECT_TRUE(t.contains(0));
541   EXPECT_FALSE(t.contains(1));
542 
543   t.clear();
544   EXPECT_FALSE(t.contains(0));
545 }
546 
547 int decompose_constructed;
548 struct DecomposeType {
DecomposeTypeabsl::container_internal::__anon046b8d900111::DecomposeType549   DecomposeType(int i) : i(i) {  // NOLINT
550     ++decompose_constructed;
551   }
552 
DecomposeTypeabsl::container_internal::__anon046b8d900111::DecomposeType553   explicit DecomposeType(const char* d) : DecomposeType(*d) {}
554 
555   int i;
556 };
557 
558 struct DecomposeHash {
559   using is_transparent = void;
operator ()absl::container_internal::__anon046b8d900111::DecomposeHash560   size_t operator()(DecomposeType a) const { return a.i; }
operator ()absl::container_internal::__anon046b8d900111::DecomposeHash561   size_t operator()(int a) const { return a; }
operator ()absl::container_internal::__anon046b8d900111::DecomposeHash562   size_t operator()(const char* a) const { return *a; }
563 };
564 
565 struct DecomposeEq {
566   using is_transparent = void;
operator ()absl::container_internal::__anon046b8d900111::DecomposeEq567   bool operator()(DecomposeType a, DecomposeType b) const { return a.i == b.i; }
operator ()absl::container_internal::__anon046b8d900111::DecomposeEq568   bool operator()(DecomposeType a, int b) const { return a.i == b; }
operator ()absl::container_internal::__anon046b8d900111::DecomposeEq569   bool operator()(DecomposeType a, const char* b) const { return a.i == *b; }
570 };
571 
572 struct DecomposePolicy {
573   using slot_type = DecomposeType;
574   using key_type = DecomposeType;
575   using init_type = DecomposeType;
576 
577   template <typename T>
constructabsl::container_internal::__anon046b8d900111::DecomposePolicy578   static void construct(void*, DecomposeType* slot, T&& v) {
579     *slot = DecomposeType(std::forward<T>(v));
580   }
destroyabsl::container_internal::__anon046b8d900111::DecomposePolicy581   static void destroy(void*, DecomposeType*) {}
elementabsl::container_internal::__anon046b8d900111::DecomposePolicy582   static DecomposeType& element(slot_type* slot) { return *slot; }
583 
584   template <class F, class T>
applyabsl::container_internal::__anon046b8d900111::DecomposePolicy585   static auto apply(F&& f, const T& x) -> decltype(std::forward<F>(f)(x, x)) {
586     return std::forward<F>(f)(x, x);
587   }
588 };
589 
590 template <typename Hash, typename Eq>
TestDecompose(bool construct_three)591 void TestDecompose(bool construct_three) {
592   DecomposeType elem{0};
593   const int one = 1;
594   const char* three_p = "3";
595   const auto& three = three_p;
596 
597   raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>> set1;
598 
599   decompose_constructed = 0;
600   int expected_constructed = 0;
601   EXPECT_EQ(expected_constructed, decompose_constructed);
602   set1.insert(elem);
603   EXPECT_EQ(expected_constructed, decompose_constructed);
604   set1.insert(1);
605   EXPECT_EQ(++expected_constructed, decompose_constructed);
606   set1.emplace("3");
607   EXPECT_EQ(++expected_constructed, decompose_constructed);
608   EXPECT_EQ(expected_constructed, decompose_constructed);
609 
610   {  // insert(T&&)
611     set1.insert(1);
612     EXPECT_EQ(expected_constructed, decompose_constructed);
613   }
614 
615   {  // insert(const T&)
616     set1.insert(one);
617     EXPECT_EQ(expected_constructed, decompose_constructed);
618   }
619 
620   {  // insert(hint, T&&)
621     set1.insert(set1.begin(), 1);
622     EXPECT_EQ(expected_constructed, decompose_constructed);
623   }
624 
625   {  // insert(hint, const T&)
626     set1.insert(set1.begin(), one);
627     EXPECT_EQ(expected_constructed, decompose_constructed);
628   }
629 
630   {  // emplace(...)
631     set1.emplace(1);
632     EXPECT_EQ(expected_constructed, decompose_constructed);
633     set1.emplace("3");
634     expected_constructed += construct_three;
635     EXPECT_EQ(expected_constructed, decompose_constructed);
636     set1.emplace(one);
637     EXPECT_EQ(expected_constructed, decompose_constructed);
638     set1.emplace(three);
639     expected_constructed += construct_three;
640     EXPECT_EQ(expected_constructed, decompose_constructed);
641   }
642 
643   {  // emplace_hint(...)
644     set1.emplace_hint(set1.begin(), 1);
645     EXPECT_EQ(expected_constructed, decompose_constructed);
646     set1.emplace_hint(set1.begin(), "3");
647     expected_constructed += construct_three;
648     EXPECT_EQ(expected_constructed, decompose_constructed);
649     set1.emplace_hint(set1.begin(), one);
650     EXPECT_EQ(expected_constructed, decompose_constructed);
651     set1.emplace_hint(set1.begin(), three);
652     expected_constructed += construct_three;
653     EXPECT_EQ(expected_constructed, decompose_constructed);
654   }
655 }
656 
TEST(Table,Decompose)657 TEST(Table, Decompose) {
658   TestDecompose<DecomposeHash, DecomposeEq>(false);
659 
660   struct TransparentHashIntOverload {
661     size_t operator()(DecomposeType a) const { return a.i; }
662     size_t operator()(int a) const { return a; }
663   };
664   struct TransparentEqIntOverload {
665     bool operator()(DecomposeType a, DecomposeType b) const {
666       return a.i == b.i;
667     }
668     bool operator()(DecomposeType a, int b) const { return a.i == b; }
669   };
670   TestDecompose<TransparentHashIntOverload, DecomposeEq>(true);
671   TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true);
672   TestDecompose<DecomposeHash, TransparentEqIntOverload>(true);
673 }
674 
675 // Returns the largest m such that a table with m elements has the same number
676 // of buckets as a table with n elements.
MaxDensitySize(size_t n)677 size_t MaxDensitySize(size_t n) {
678   IntTable t;
679   t.reserve(n);
680   for (size_t i = 0; i != n; ++i) t.emplace(i);
681   const size_t c = t.bucket_count();
682   while (c == t.bucket_count()) t.emplace(n++);
683   return t.size() - 1;
684 }
685 
686 struct Modulo1000Hash {
operator ()absl::container_internal::__anon046b8d900111::Modulo1000Hash687   size_t operator()(int x) const { return x % 1000; }
688 };
689 
690 struct Modulo1000HashTable
691     : public raw_hash_set<IntPolicy, Modulo1000Hash, std::equal_to<int>,
692                           std::allocator<int>> {};
693 
694 // Test that rehash with no resize happen in case of many deleted slots.
TEST(Table,RehashWithNoResize)695 TEST(Table, RehashWithNoResize) {
696   Modulo1000HashTable t;
697   // Adding the same length (and the same hash) strings
698   // to have at least kMinFullGroups groups
699   // with Group::kWidth collisions. Then fill up to MaxDensitySize;
700   const size_t kMinFullGroups = 7;
701   std::vector<int> keys;
702   for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) {
703     int k = i * 1000;
704     t.emplace(k);
705     keys.push_back(k);
706   }
707   const size_t capacity = t.capacity();
708 
709   // Remove elements from all groups except the first and the last one.
710   // All elements removed from full groups will be marked as kDeleted.
711   const size_t erase_begin = Group::kWidth / 2;
712   const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth;
713   for (size_t i = erase_begin; i < erase_end; ++i) {
714     EXPECT_EQ(1, t.erase(keys[i])) << i;
715   }
716   keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end);
717 
718   auto last_key = keys.back();
719   size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key);
720 
721   // Make sure that we have to make a lot of probes for last key.
722   ASSERT_GT(last_key_num_probes, kMinFullGroups);
723 
724   int x = 1;
725   // Insert and erase one element, before inplace rehash happen.
726   while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) {
727     t.emplace(x);
728     ASSERT_EQ(capacity, t.capacity());
729     // All elements should be there.
730     ASSERT_TRUE(t.find(x) != t.end()) << x;
731     for (const auto& k : keys) {
732       ASSERT_TRUE(t.find(k) != t.end()) << k;
733     }
734     t.erase(x);
735     ++x;
736   }
737 }
738 
TEST(Table,InsertEraseStressTest)739 TEST(Table, InsertEraseStressTest) {
740   IntTable t;
741   const size_t kMinElementCount = 250;
742   std::deque<int> keys;
743   size_t i = 0;
744   for (; i < MaxDensitySize(kMinElementCount); ++i) {
745     t.emplace(i);
746     keys.push_back(i);
747   }
748   const size_t kNumIterations = 1000000;
749   for (; i < kNumIterations; ++i) {
750     ASSERT_EQ(1, t.erase(keys.front()));
751     keys.pop_front();
752     t.emplace(i);
753     keys.push_back(i);
754   }
755 }
756 
TEST(Table,InsertOverloads)757 TEST(Table, InsertOverloads) {
758   StringTable t;
759   // These should all trigger the insert(init_type) overload.
760   t.insert({{}, {}});
761   t.insert({"ABC", {}});
762   t.insert({"DEF", "!!!"});
763 
764   EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""),
765                                       Pair("DEF", "!!!")));
766 }
767 
TEST(Table,LargeTable)768 TEST(Table, LargeTable) {
769   IntTable t;
770   for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40);
771   for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40));
772 }
773 
774 // Timeout if copy is quadratic as it was in Rust.
TEST(Table,EnsureNonQuadraticAsInRust)775 TEST(Table, EnsureNonQuadraticAsInRust) {
776   static const size_t kLargeSize = 1 << 15;
777 
778   IntTable t;
779   for (size_t i = 0; i != kLargeSize; ++i) {
780     t.insert(i);
781   }
782 
783   // If this is quadratic, the test will timeout.
784   IntTable t2;
785   for (const auto& entry : t) t2.insert(entry);
786 }
787 
TEST(Table,ClearBug)788 TEST(Table, ClearBug) {
789   IntTable t;
790   constexpr size_t capacity = container_internal::Group::kWidth - 1;
791   constexpr size_t max_size = capacity / 2 + 1;
792   for (size_t i = 0; i < max_size; ++i) {
793     t.insert(i);
794   }
795   ASSERT_EQ(capacity, t.capacity());
796   intptr_t original = reinterpret_cast<intptr_t>(&*t.find(2));
797   t.clear();
798   ASSERT_EQ(capacity, t.capacity());
799   for (size_t i = 0; i < max_size; ++i) {
800     t.insert(i);
801   }
802   ASSERT_EQ(capacity, t.capacity());
803   intptr_t second = reinterpret_cast<intptr_t>(&*t.find(2));
804   // We are checking that original and second are close enough to each other
805   // that they are probably still in the same group.  This is not strictly
806   // guaranteed.
807   EXPECT_LT(std::abs(original - second),
808             capacity * sizeof(IntTable::value_type));
809 }
810 
TEST(Table,Erase)811 TEST(Table, Erase) {
812   IntTable t;
813   EXPECT_TRUE(t.find(0) == t.end());
814   auto res = t.emplace(0);
815   EXPECT_TRUE(res.second);
816   EXPECT_EQ(1, t.size());
817   t.erase(res.first);
818   EXPECT_EQ(0, t.size());
819   EXPECT_TRUE(t.find(0) == t.end());
820 }
821 
TEST(Table,EraseMaintainsValidIterator)822 TEST(Table, EraseMaintainsValidIterator) {
823   IntTable t;
824   const int kNumElements = 100;
825   for (int i = 0; i < kNumElements; i ++) {
826     EXPECT_TRUE(t.emplace(i).second);
827   }
828   EXPECT_EQ(t.size(), kNumElements);
829 
830   int num_erase_calls = 0;
831   auto it = t.begin();
832   while (it != t.end()) {
833     t.erase(it++);
834     num_erase_calls++;
835   }
836 
837   EXPECT_TRUE(t.empty());
838   EXPECT_EQ(num_erase_calls, kNumElements);
839 }
840 
841 // Collect N bad keys by following algorithm:
842 // 1. Create an empty table and reserve it to 2 * N.
843 // 2. Insert N random elements.
844 // 3. Take first Group::kWidth - 1 to bad_keys array.
845 // 4. Clear the table without resize.
846 // 5. Go to point 2 while N keys not collected
CollectBadMergeKeys(size_t N)847 std::vector<int64_t> CollectBadMergeKeys(size_t N) {
848   static constexpr int kGroupSize = Group::kWidth - 1;
849 
850   auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector<int64_t> {
851     for (size_t i = b; i != e; ++i) {
852       t->emplace(i);
853     }
854     std::vector<int64_t> res;
855     res.reserve(kGroupSize);
856     auto it = t->begin();
857     for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) {
858       res.push_back(*it);
859     }
860     return res;
861   };
862 
863   std::vector<int64_t> bad_keys;
864   bad_keys.reserve(N);
865   IntTable t;
866   t.reserve(N * 2);
867 
868   for (size_t b = 0; bad_keys.size() < N; b += N) {
869     auto keys = topk_range(b, b + N, &t);
870     bad_keys.insert(bad_keys.end(), keys.begin(), keys.end());
871     t.erase(t.begin(), t.end());
872     EXPECT_TRUE(t.empty());
873   }
874   return bad_keys;
875 }
876 
877 struct ProbeStats {
878   // Number of elements with specific probe length over all tested tables.
879   std::vector<size_t> all_probes_histogram;
880   // Ratios total_probe_length/size for every tested table.
881   std::vector<double> single_table_ratios;
882 
operator +(const ProbeStats & a,const ProbeStats & b)883   friend ProbeStats operator+(const ProbeStats& a, const ProbeStats& b) {
884     ProbeStats res = a;
885     res.all_probes_histogram.resize(std::max(res.all_probes_histogram.size(),
886                                              b.all_probes_histogram.size()));
887     std::transform(b.all_probes_histogram.begin(), b.all_probes_histogram.end(),
888                    res.all_probes_histogram.begin(),
889                    res.all_probes_histogram.begin(), std::plus<size_t>());
890     res.single_table_ratios.insert(res.single_table_ratios.end(),
891                                    b.single_table_ratios.begin(),
892                                    b.single_table_ratios.end());
893     return res;
894   }
895 
896   // Average ratio total_probe_length/size over tables.
AvgRatioabsl::container_internal::__anon046b8d900111::ProbeStats897   double AvgRatio() const {
898     return std::accumulate(single_table_ratios.begin(),
899                            single_table_ratios.end(), 0.0) /
900            single_table_ratios.size();
901   }
902 
903   // Maximum ratio total_probe_length/size over tables.
MaxRatioabsl::container_internal::__anon046b8d900111::ProbeStats904   double MaxRatio() const {
905     return *std::max_element(single_table_ratios.begin(),
906                              single_table_ratios.end());
907   }
908 
909   // Percentile ratio total_probe_length/size over tables.
PercentileRatioabsl::container_internal::__anon046b8d900111::ProbeStats910   double PercentileRatio(double Percentile = 0.95) const {
911     auto r = single_table_ratios;
912     auto mid = r.begin() + static_cast<size_t>(r.size() * Percentile);
913     if (mid != r.end()) {
914       std::nth_element(r.begin(), mid, r.end());
915       return *mid;
916     } else {
917       return MaxRatio();
918     }
919   }
920 
921   // Maximum probe length over all elements and all tables.
MaxProbeabsl::container_internal::__anon046b8d900111::ProbeStats922   size_t MaxProbe() const { return all_probes_histogram.size(); }
923 
924   // Fraction of elements with specified probe length.
ProbeNormalizedHistogramabsl::container_internal::__anon046b8d900111::ProbeStats925   std::vector<double> ProbeNormalizedHistogram() const {
926     double total_elements = std::accumulate(all_probes_histogram.begin(),
927                                             all_probes_histogram.end(), 0ull);
928     std::vector<double> res;
929     for (size_t p : all_probes_histogram) {
930       res.push_back(p / total_elements);
931     }
932     return res;
933   }
934 
PercentileProbeabsl::container_internal::__anon046b8d900111::ProbeStats935   size_t PercentileProbe(double Percentile = 0.99) const {
936     size_t idx = 0;
937     for (double p : ProbeNormalizedHistogram()) {
938       if (Percentile > p) {
939         Percentile -= p;
940         ++idx;
941       } else {
942         return idx;
943       }
944     }
945     return idx;
946   }
947 
operator <<(std::ostream & out,const ProbeStats & s)948   friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) {
949     out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio()
950         << ", PercentileRatio:" << s.PercentileRatio()
951         << ", MaxProbe:" << s.MaxProbe() << ", Probes=[";
952     for (double p : s.ProbeNormalizedHistogram()) {
953       out << p << ",";
954     }
955     out << "]}";
956 
957     return out;
958   }
959 };
960 
961 struct ExpectedStats {
962   double avg_ratio;
963   double max_ratio;
964   std::vector<std::pair<double, double>> pecentile_ratios;
965   std::vector<std::pair<double, double>> pecentile_probes;
966 
operator <<(std::ostream & out,const ExpectedStats & s)967   friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) {
968     out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio
969         << ", PercentileRatios: [";
970     for (auto el : s.pecentile_ratios) {
971       out << el.first << ":" << el.second << ", ";
972     }
973     out << "], PercentileProbes: [";
974     for (auto el : s.pecentile_probes) {
975       out << el.first << ":" << el.second << ", ";
976     }
977     out << "]}";
978 
979     return out;
980   }
981 };
982 
VerifyStats(size_t size,const ExpectedStats & exp,const ProbeStats & stats)983 void VerifyStats(size_t size, const ExpectedStats& exp,
984                  const ProbeStats& stats) {
985   EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats;
986   EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats;
987   for (auto pr : exp.pecentile_ratios) {
988     EXPECT_LE(stats.PercentileRatio(pr.first), pr.second)
989         << size << " " << pr.first << " " << stats;
990   }
991 
992   for (auto pr : exp.pecentile_probes) {
993     EXPECT_LE(stats.PercentileProbe(pr.first), pr.second)
994         << size << " " << pr.first << " " << stats;
995   }
996 }
997 
998 using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
999 
1000 // Collect total ProbeStats on num_iters iterations of the following algorithm:
1001 // 1. Create new table and reserve it to keys.size() * 2
1002 // 2. Insert all keys xored with seed
1003 // 3. Collect ProbeStats from final table.
CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t> & keys,size_t num_iters)1004 ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys,
1005                                                 size_t num_iters) {
1006   const size_t reserve_size = keys.size() * 2;
1007 
1008   ProbeStats stats;
1009 
1010   int64_t seed = 0x71b1a19b907d6e33;
1011   while (num_iters--) {
1012     seed = static_cast<int64_t>(static_cast<uint64_t>(seed) * 17 + 13);
1013     IntTable t1;
1014     t1.reserve(reserve_size);
1015     for (const auto& key : keys) {
1016       t1.emplace(key ^ seed);
1017     }
1018 
1019     auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
1020     stats.all_probes_histogram.resize(
1021         std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
1022     std::transform(probe_histogram.begin(), probe_histogram.end(),
1023                    stats.all_probes_histogram.begin(),
1024                    stats.all_probes_histogram.begin(), std::plus<size_t>());
1025 
1026     size_t total_probe_seq_length = 0;
1027     for (size_t i = 0; i < probe_histogram.size(); ++i) {
1028       total_probe_seq_length += i * probe_histogram[i];
1029     }
1030     stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
1031                                         keys.size());
1032     t1.erase(t1.begin(), t1.end());
1033   }
1034   return stats;
1035 }
1036 
XorSeedExpectedStats()1037 ExpectedStats XorSeedExpectedStats() {
1038   constexpr bool kRandomizesInserts =
1039 #ifdef NDEBUG
1040       false;
1041 #else   // NDEBUG
1042       true;
1043 #endif  // NDEBUG
1044 
1045   // The effective load factor is larger in non-opt mode because we insert
1046   // elements out of order.
1047   switch (container_internal::Group::kWidth) {
1048     case 8:
1049       if (kRandomizesInserts) {
1050   return {0.05,
1051           1.0,
1052           {{0.95, 0.5}},
1053           {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
1054       } else {
1055   return {0.05,
1056           2.0,
1057           {{0.95, 0.1}},
1058           {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
1059       }
1060     case 16:
1061       if (kRandomizesInserts) {
1062         return {0.1,
1063                 1.0,
1064                 {{0.95, 0.1}},
1065                 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1066       } else {
1067         return {0.05,
1068                 1.0,
1069                 {{0.95, 0.05}},
1070                 {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
1071       }
1072   }
1073   ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
1074   return {};
1075 }
1076 
TEST(Table,DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength)1077 TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
1078   ProbeStatsPerSize stats;
1079   std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
1080   for (size_t size : sizes) {
1081     stats[size] =
1082         CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200);
1083   }
1084   auto expected = XorSeedExpectedStats();
1085   for (size_t size : sizes) {
1086     auto& stat = stats[size];
1087     VerifyStats(size, expected, stat);
1088   }
1089 }
1090 
1091 // Collect total ProbeStats on num_iters iterations of the following algorithm:
1092 // 1. Create new table
1093 // 2. Select 10% of keys and insert 10 elements key * 17 + j * 13
1094 // 3. Collect ProbeStats from final table
CollectProbeStatsOnLinearlyTransformedKeys(const std::vector<int64_t> & keys,size_t num_iters)1095 ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
1096     const std::vector<int64_t>& keys, size_t num_iters) {
1097   ProbeStats stats;
1098 
1099   std::random_device rd;
1100   std::mt19937 rng(rd());
1101   auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; };
1102   std::uniform_int_distribution<size_t> dist(0, keys.size()-1);
1103   while (num_iters--) {
1104     IntTable t1;
1105     size_t num_keys = keys.size() / 10;
1106     size_t start = dist(rng);
1107     for (size_t i = 0; i != num_keys; ++i) {
1108       for (size_t j = 0; j != 10; ++j) {
1109         t1.emplace(linear_transform(keys[(i + start) % keys.size()], j));
1110       }
1111     }
1112 
1113     auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
1114     stats.all_probes_histogram.resize(
1115         std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
1116     std::transform(probe_histogram.begin(), probe_histogram.end(),
1117                    stats.all_probes_histogram.begin(),
1118                    stats.all_probes_histogram.begin(), std::plus<size_t>());
1119 
1120     size_t total_probe_seq_length = 0;
1121     for (size_t i = 0; i < probe_histogram.size(); ++i) {
1122       total_probe_seq_length += i * probe_histogram[i];
1123     }
1124     stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
1125                                         t1.size());
1126     t1.erase(t1.begin(), t1.end());
1127   }
1128   return stats;
1129 }
1130 
LinearTransformExpectedStats()1131 ExpectedStats LinearTransformExpectedStats() {
1132   constexpr bool kRandomizesInserts =
1133 #ifdef NDEBUG
1134       false;
1135 #else   // NDEBUG
1136       true;
1137 #endif  // NDEBUG
1138 
1139   // The effective load factor is larger in non-opt mode because we insert
1140   // elements out of order.
1141   switch (container_internal::Group::kWidth) {
1142     case 8:
1143       if (kRandomizesInserts) {
1144         return {0.1,
1145                 0.5,
1146                 {{0.95, 0.3}},
1147                 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1148       } else {
1149         return {0.15,
1150                 0.5,
1151                 {{0.95, 0.3}},
1152                 {{0.95, 0}, {0.99, 3}, {0.999, 15}, {0.9999, 25}}};
1153       }
1154     case 16:
1155       if (kRandomizesInserts) {
1156         return {0.1,
1157                 0.4,
1158                 {{0.95, 0.3}},
1159                 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1160       } else {
1161         return {0.05,
1162                 0.2,
1163                 {{0.95, 0.1}},
1164                 {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
1165       }
1166   }
1167   ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
1168   return {};
1169 }
1170 
TEST(Table,DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength)1171 TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
1172   ProbeStatsPerSize stats;
1173   std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
1174   for (size_t size : sizes) {
1175     stats[size] = CollectProbeStatsOnLinearlyTransformedKeys(
1176         CollectBadMergeKeys(size), 300);
1177   }
1178   auto expected = LinearTransformExpectedStats();
1179   for (size_t size : sizes) {
1180     auto& stat = stats[size];
1181     VerifyStats(size, expected, stat);
1182   }
1183 }
1184 
TEST(Table,EraseCollision)1185 TEST(Table, EraseCollision) {
1186   BadTable t;
1187 
1188   // 1 2 3
1189   t.emplace(1);
1190   t.emplace(2);
1191   t.emplace(3);
1192   EXPECT_THAT(*t.find(1), 1);
1193   EXPECT_THAT(*t.find(2), 2);
1194   EXPECT_THAT(*t.find(3), 3);
1195   EXPECT_EQ(3, t.size());
1196 
1197   // 1 DELETED 3
1198   t.erase(t.find(2));
1199   EXPECT_THAT(*t.find(1), 1);
1200   EXPECT_TRUE(t.find(2) == t.end());
1201   EXPECT_THAT(*t.find(3), 3);
1202   EXPECT_EQ(2, t.size());
1203 
1204   // DELETED DELETED 3
1205   t.erase(t.find(1));
1206   EXPECT_TRUE(t.find(1) == t.end());
1207   EXPECT_TRUE(t.find(2) == t.end());
1208   EXPECT_THAT(*t.find(3), 3);
1209   EXPECT_EQ(1, t.size());
1210 
1211   // DELETED DELETED DELETED
1212   t.erase(t.find(3));
1213   EXPECT_TRUE(t.find(1) == t.end());
1214   EXPECT_TRUE(t.find(2) == t.end());
1215   EXPECT_TRUE(t.find(3) == t.end());
1216   EXPECT_EQ(0, t.size());
1217 }
1218 
TEST(Table,EraseInsertProbing)1219 TEST(Table, EraseInsertProbing) {
1220   BadTable t(100);
1221 
1222   // 1 2 3 4
1223   t.emplace(1);
1224   t.emplace(2);
1225   t.emplace(3);
1226   t.emplace(4);
1227 
1228   // 1 DELETED 3 DELETED
1229   t.erase(t.find(2));
1230   t.erase(t.find(4));
1231 
1232   // 1 10 3 11 12
1233   t.emplace(10);
1234   t.emplace(11);
1235   t.emplace(12);
1236 
1237   EXPECT_EQ(5, t.size());
1238   EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12));
1239 }
1240 
TEST(Table,Clear)1241 TEST(Table, Clear) {
1242   IntTable t;
1243   EXPECT_TRUE(t.find(0) == t.end());
1244   t.clear();
1245   EXPECT_TRUE(t.find(0) == t.end());
1246   auto res = t.emplace(0);
1247   EXPECT_TRUE(res.second);
1248   EXPECT_EQ(1, t.size());
1249   t.clear();
1250   EXPECT_EQ(0, t.size());
1251   EXPECT_TRUE(t.find(0) == t.end());
1252 }
1253 
TEST(Table,Swap)1254 TEST(Table, Swap) {
1255   IntTable t;
1256   EXPECT_TRUE(t.find(0) == t.end());
1257   auto res = t.emplace(0);
1258   EXPECT_TRUE(res.second);
1259   EXPECT_EQ(1, t.size());
1260   IntTable u;
1261   t.swap(u);
1262   EXPECT_EQ(0, t.size());
1263   EXPECT_EQ(1, u.size());
1264   EXPECT_TRUE(t.find(0) == t.end());
1265   EXPECT_THAT(*u.find(0), 0);
1266 }
1267 
TEST(Table,Rehash)1268 TEST(Table, Rehash) {
1269   IntTable t;
1270   EXPECT_TRUE(t.find(0) == t.end());
1271   t.emplace(0);
1272   t.emplace(1);
1273   EXPECT_EQ(2, t.size());
1274   t.rehash(128);
1275   EXPECT_EQ(2, t.size());
1276   EXPECT_THAT(*t.find(0), 0);
1277   EXPECT_THAT(*t.find(1), 1);
1278 }
1279 
TEST(Table,RehashDoesNotRehashWhenNotNecessary)1280 TEST(Table, RehashDoesNotRehashWhenNotNecessary) {
1281   IntTable t;
1282   t.emplace(0);
1283   t.emplace(1);
1284   auto* p = &*t.find(0);
1285   t.rehash(1);
1286   EXPECT_EQ(p, &*t.find(0));
1287 }
1288 
TEST(Table,RehashZeroDoesNotAllocateOnEmptyTable)1289 TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) {
1290   IntTable t;
1291   t.rehash(0);
1292   EXPECT_EQ(0, t.bucket_count());
1293 }
1294 
TEST(Table,RehashZeroDeallocatesEmptyTable)1295 TEST(Table, RehashZeroDeallocatesEmptyTable) {
1296   IntTable t;
1297   t.emplace(0);
1298   t.clear();
1299   EXPECT_NE(0, t.bucket_count());
1300   t.rehash(0);
1301   EXPECT_EQ(0, t.bucket_count());
1302 }
1303 
TEST(Table,RehashZeroForcesRehash)1304 TEST(Table, RehashZeroForcesRehash) {
1305   IntTable t;
1306   t.emplace(0);
1307   t.emplace(1);
1308   auto* p = &*t.find(0);
1309   t.rehash(0);
1310   EXPECT_NE(p, &*t.find(0));
1311 }
1312 
TEST(Table,ConstructFromInitList)1313 TEST(Table, ConstructFromInitList) {
1314   using P = std::pair<std::string, std::string>;
1315   struct Q {
1316     operator P() const { return {}; }
1317   };
1318   StringTable t = {P(), Q(), {}, {{}, {}}};
1319 }
1320 
TEST(Table,CopyConstruct)1321 TEST(Table, CopyConstruct) {
1322   IntTable t;
1323   t.emplace(0);
1324   EXPECT_EQ(1, t.size());
1325   {
1326     IntTable u(t);
1327     EXPECT_EQ(1, u.size());
1328     EXPECT_THAT(*u.find(0), 0);
1329   }
1330   {
1331     IntTable u{t};
1332     EXPECT_EQ(1, u.size());
1333     EXPECT_THAT(*u.find(0), 0);
1334   }
1335   {
1336     IntTable u = t;
1337     EXPECT_EQ(1, u.size());
1338     EXPECT_THAT(*u.find(0), 0);
1339   }
1340 }
1341 
TEST(Table,CopyConstructWithAlloc)1342 TEST(Table, CopyConstructWithAlloc) {
1343   StringTable t;
1344   t.emplace("a", "b");
1345   EXPECT_EQ(1, t.size());
1346   StringTable u(t, Alloc<std::pair<std::string, std::string>>());
1347   EXPECT_EQ(1, u.size());
1348   EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1349 }
1350 
1351 struct ExplicitAllocIntTable
1352     : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
1353                    std::equal_to<int64_t>, Alloc<int64_t>> {
ExplicitAllocIntTableabsl::container_internal::__anon046b8d900111::ExplicitAllocIntTable1354   ExplicitAllocIntTable() {}
1355 };
1356 
TEST(Table,AllocWithExplicitCtor)1357 TEST(Table, AllocWithExplicitCtor) {
1358   ExplicitAllocIntTable t;
1359   EXPECT_EQ(0, t.size());
1360 }
1361 
TEST(Table,MoveConstruct)1362 TEST(Table, MoveConstruct) {
1363   {
1364     StringTable t;
1365     t.emplace("a", "b");
1366     EXPECT_EQ(1, t.size());
1367 
1368     StringTable u(std::move(t));
1369     EXPECT_EQ(1, u.size());
1370     EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1371   }
1372   {
1373     StringTable t;
1374     t.emplace("a", "b");
1375     EXPECT_EQ(1, t.size());
1376 
1377     StringTable u{std::move(t)};
1378     EXPECT_EQ(1, u.size());
1379     EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1380   }
1381   {
1382     StringTable t;
1383     t.emplace("a", "b");
1384     EXPECT_EQ(1, t.size());
1385 
1386     StringTable u = std::move(t);
1387     EXPECT_EQ(1, u.size());
1388     EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1389   }
1390 }
1391 
TEST(Table,MoveConstructWithAlloc)1392 TEST(Table, MoveConstructWithAlloc) {
1393   StringTable t;
1394   t.emplace("a", "b");
1395   EXPECT_EQ(1, t.size());
1396   StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>());
1397   EXPECT_EQ(1, u.size());
1398   EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1399 }
1400 
TEST(Table,CopyAssign)1401 TEST(Table, CopyAssign) {
1402   StringTable t;
1403   t.emplace("a", "b");
1404   EXPECT_EQ(1, t.size());
1405   StringTable u;
1406   u = t;
1407   EXPECT_EQ(1, u.size());
1408   EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1409 }
1410 
TEST(Table,CopySelfAssign)1411 TEST(Table, CopySelfAssign) {
1412   StringTable t;
1413   t.emplace("a", "b");
1414   EXPECT_EQ(1, t.size());
1415   t = *&t;
1416   EXPECT_EQ(1, t.size());
1417   EXPECT_THAT(*t.find("a"), Pair("a", "b"));
1418 }
1419 
TEST(Table,MoveAssign)1420 TEST(Table, MoveAssign) {
1421   StringTable t;
1422   t.emplace("a", "b");
1423   EXPECT_EQ(1, t.size());
1424   StringTable u;
1425   u = std::move(t);
1426   EXPECT_EQ(1, u.size());
1427   EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1428 }
1429 
TEST(Table,Equality)1430 TEST(Table, Equality) {
1431   StringTable t;
1432   std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
1433                                                         {"aa", "bb"}};
1434   t.insert(std::begin(v), std::end(v));
1435   StringTable u = t;
1436   EXPECT_EQ(u, t);
1437 }
1438 
TEST(Table,Equality2)1439 TEST(Table, Equality2) {
1440   StringTable t;
1441   std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"},
1442                                                          {"aa", "bb"}};
1443   t.insert(std::begin(v1), std::end(v1));
1444   StringTable u;
1445   std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
1446                                                          {"aa", "aa"}};
1447   u.insert(std::begin(v2), std::end(v2));
1448   EXPECT_NE(u, t);
1449 }
1450 
TEST(Table,Equality3)1451 TEST(Table, Equality3) {
1452   StringTable t;
1453   std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"},
1454                                                          {"bb", "bb"}};
1455   t.insert(std::begin(v1), std::end(v1));
1456   StringTable u;
1457   std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
1458                                                          {"aa", "aa"}};
1459   u.insert(std::begin(v2), std::end(v2));
1460   EXPECT_NE(u, t);
1461 }
1462 
TEST(Table,NumDeletedRegression)1463 TEST(Table, NumDeletedRegression) {
1464   IntTable t;
1465   t.emplace(0);
1466   t.erase(t.find(0));
1467   // construct over a deleted slot.
1468   t.emplace(0);
1469   t.clear();
1470 }
1471 
TEST(Table,FindFullDeletedRegression)1472 TEST(Table, FindFullDeletedRegression) {
1473   IntTable t;
1474   for (int i = 0; i < 1000; ++i) {
1475     t.emplace(i);
1476     t.erase(t.find(i));
1477   }
1478   EXPECT_EQ(0, t.size());
1479 }
1480 
TEST(Table,ReplacingDeletedSlotDoesNotRehash)1481 TEST(Table, ReplacingDeletedSlotDoesNotRehash) {
1482   size_t n;
1483   {
1484     // Compute n such that n is the maximum number of elements before rehash.
1485     IntTable t;
1486     t.emplace(0);
1487     size_t c = t.bucket_count();
1488     for (n = 1; c == t.bucket_count(); ++n) t.emplace(n);
1489     --n;
1490   }
1491   IntTable t;
1492   t.rehash(n);
1493   const size_t c = t.bucket_count();
1494   for (size_t i = 0; i != n; ++i) t.emplace(i);
1495   EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
1496   t.erase(0);
1497   t.emplace(0);
1498   EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
1499 }
1500 
TEST(Table,NoThrowMoveConstruct)1501 TEST(Table, NoThrowMoveConstruct) {
1502   ASSERT_TRUE(
1503       std::is_nothrow_copy_constructible<absl::Hash<absl::string_view>>::value);
1504   ASSERT_TRUE(std::is_nothrow_copy_constructible<
1505               std::equal_to<absl::string_view>>::value);
1506   ASSERT_TRUE(std::is_nothrow_copy_constructible<std::allocator<int>>::value);
1507   EXPECT_TRUE(std::is_nothrow_move_constructible<StringTable>::value);
1508 }
1509 
TEST(Table,NoThrowMoveAssign)1510 TEST(Table, NoThrowMoveAssign) {
1511   ASSERT_TRUE(
1512       std::is_nothrow_move_assignable<absl::Hash<absl::string_view>>::value);
1513   ASSERT_TRUE(
1514       std::is_nothrow_move_assignable<std::equal_to<absl::string_view>>::value);
1515   ASSERT_TRUE(std::is_nothrow_move_assignable<std::allocator<int>>::value);
1516   ASSERT_TRUE(
1517       absl::allocator_traits<std::allocator<int>>::is_always_equal::value);
1518   EXPECT_TRUE(std::is_nothrow_move_assignable<StringTable>::value);
1519 }
1520 
TEST(Table,NoThrowSwappable)1521 TEST(Table, NoThrowSwappable) {
1522   ASSERT_TRUE(
1523       container_internal::IsNoThrowSwappable<absl::Hash<absl::string_view>>());
1524   ASSERT_TRUE(container_internal::IsNoThrowSwappable<
1525               std::equal_to<absl::string_view>>());
1526   ASSERT_TRUE(container_internal::IsNoThrowSwappable<std::allocator<int>>());
1527   EXPECT_TRUE(container_internal::IsNoThrowSwappable<StringTable>());
1528 }
1529 
TEST(Table,HeterogeneousLookup)1530 TEST(Table, HeterogeneousLookup) {
1531   struct Hash {
1532     size_t operator()(int64_t i) const { return i; }
1533     size_t operator()(double i) const {
1534       ADD_FAILURE();
1535       return i;
1536     }
1537   };
1538   struct Eq {
1539     bool operator()(int64_t a, int64_t b) const { return a == b; }
1540     bool operator()(double a, int64_t b) const {
1541       ADD_FAILURE();
1542       return a == b;
1543     }
1544     bool operator()(int64_t a, double b) const {
1545       ADD_FAILURE();
1546       return a == b;
1547     }
1548     bool operator()(double a, double b) const {
1549       ADD_FAILURE();
1550       return a == b;
1551     }
1552   };
1553 
1554   struct THash {
1555     using is_transparent = void;
1556     size_t operator()(int64_t i) const { return i; }
1557     size_t operator()(double i) const { return i; }
1558   };
1559   struct TEq {
1560     using is_transparent = void;
1561     bool operator()(int64_t a, int64_t b) const { return a == b; }
1562     bool operator()(double a, int64_t b) const { return a == b; }
1563     bool operator()(int64_t a, double b) const { return a == b; }
1564     bool operator()(double a, double b) const { return a == b; }
1565   };
1566 
1567   raw_hash_set<IntPolicy, Hash, Eq, Alloc<int64_t>> s{0, 1, 2};
1568   // It will convert to int64_t before the query.
1569   EXPECT_EQ(1, *s.find(double{1.1}));
1570 
1571   raw_hash_set<IntPolicy, THash, TEq, Alloc<int64_t>> ts{0, 1, 2};
1572   // It will try to use the double, and fail to find the object.
1573   EXPECT_TRUE(ts.find(1.1) == ts.end());
1574 }
1575 
1576 template <class Table>
1577 using CallFind = decltype(std::declval<Table&>().find(17));
1578 
1579 template <class Table>
1580 using CallErase = decltype(std::declval<Table&>().erase(17));
1581 
1582 template <class Table>
1583 using CallExtract = decltype(std::declval<Table&>().extract(17));
1584 
1585 template <class Table>
1586 using CallPrefetch = decltype(std::declval<Table&>().prefetch(17));
1587 
1588 template <class Table>
1589 using CallCount = decltype(std::declval<Table&>().count(17));
1590 
1591 template <template <typename> class C, class Table, class = void>
1592 struct VerifyResultOf : std::false_type {};
1593 
1594 template <template <typename> class C, class Table>
1595 struct VerifyResultOf<C, Table, absl::void_t<C<Table>>> : std::true_type {};
1596 
TEST(Table,HeterogeneousLookupOverloads)1597 TEST(Table, HeterogeneousLookupOverloads) {
1598   using NonTransparentTable =
1599       raw_hash_set<StringPolicy, absl::Hash<absl::string_view>,
1600                    std::equal_to<absl::string_view>, std::allocator<int>>;
1601 
1602   EXPECT_FALSE((VerifyResultOf<CallFind, NonTransparentTable>()));
1603   EXPECT_FALSE((VerifyResultOf<CallErase, NonTransparentTable>()));
1604   EXPECT_FALSE((VerifyResultOf<CallExtract, NonTransparentTable>()));
1605   EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>()));
1606   EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>()));
1607 
1608   using TransparentTable = raw_hash_set<
1609       StringPolicy,
1610       absl::container_internal::hash_default_hash<absl::string_view>,
1611       absl::container_internal::hash_default_eq<absl::string_view>,
1612       std::allocator<int>>;
1613 
1614   EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>()));
1615   EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>()));
1616   EXPECT_TRUE((VerifyResultOf<CallExtract, TransparentTable>()));
1617   EXPECT_TRUE((VerifyResultOf<CallPrefetch, TransparentTable>()));
1618   EXPECT_TRUE((VerifyResultOf<CallCount, TransparentTable>()));
1619 }
1620 
1621 // TODO(alkis): Expand iterator tests.
TEST(Iterator,IsDefaultConstructible)1622 TEST(Iterator, IsDefaultConstructible) {
1623   StringTable::iterator i;
1624   EXPECT_TRUE(i == StringTable::iterator());
1625 }
1626 
TEST(ConstIterator,IsDefaultConstructible)1627 TEST(ConstIterator, IsDefaultConstructible) {
1628   StringTable::const_iterator i;
1629   EXPECT_TRUE(i == StringTable::const_iterator());
1630 }
1631 
TEST(Iterator,ConvertsToConstIterator)1632 TEST(Iterator, ConvertsToConstIterator) {
1633   StringTable::iterator i;
1634   EXPECT_TRUE(i == StringTable::const_iterator());
1635 }
1636 
TEST(Iterator,Iterates)1637 TEST(Iterator, Iterates) {
1638   IntTable t;
1639   for (size_t i = 3; i != 6; ++i) EXPECT_TRUE(t.emplace(i).second);
1640   EXPECT_THAT(t, UnorderedElementsAre(3, 4, 5));
1641 }
1642 
TEST(Table,Merge)1643 TEST(Table, Merge) {
1644   StringTable t1, t2;
1645   t1.emplace("0", "-0");
1646   t1.emplace("1", "-1");
1647   t2.emplace("0", "~0");
1648   t2.emplace("2", "~2");
1649 
1650   EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1")));
1651   EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"), Pair("2", "~2")));
1652 
1653   t1.merge(t2);
1654   EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1"),
1655                                        Pair("2", "~2")));
1656   EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
1657 }
1658 
TEST(Nodes,EmptyNodeType)1659 TEST(Nodes, EmptyNodeType) {
1660   using node_type = StringTable::node_type;
1661   node_type n;
1662   EXPECT_FALSE(n);
1663   EXPECT_TRUE(n.empty());
1664 
1665   EXPECT_TRUE((std::is_same<node_type::allocator_type,
1666                             StringTable::allocator_type>::value));
1667 }
1668 
TEST(Nodes,ExtractInsert)1669 TEST(Nodes, ExtractInsert) {
1670   constexpr char k0[] = "Very long string zero.";
1671   constexpr char k1[] = "Very long string one.";
1672   constexpr char k2[] = "Very long string two.";
1673   StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}};
1674   EXPECT_THAT(t,
1675               UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, "")));
1676 
1677   auto node = t.extract(k0);
1678   EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
1679   EXPECT_TRUE(node);
1680   EXPECT_FALSE(node.empty());
1681 
1682   StringTable t2;
1683   StringTable::insert_return_type res = t2.insert(std::move(node));
1684   EXPECT_TRUE(res.inserted);
1685   EXPECT_THAT(*res.position, Pair(k0, ""));
1686   EXPECT_FALSE(res.node);
1687   EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
1688 
1689   // Not there.
1690   EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
1691   node = t.extract("Not there!");
1692   EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
1693   EXPECT_FALSE(node);
1694 
1695   // Inserting nothing.
1696   res = t2.insert(std::move(node));
1697   EXPECT_FALSE(res.inserted);
1698   EXPECT_EQ(res.position, t2.end());
1699   EXPECT_FALSE(res.node);
1700   EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
1701 
1702   t.emplace(k0, "1");
1703   node = t.extract(k0);
1704 
1705   // Insert duplicate.
1706   res = t2.insert(std::move(node));
1707   EXPECT_FALSE(res.inserted);
1708   EXPECT_THAT(*res.position, Pair(k0, ""));
1709   EXPECT_TRUE(res.node);
1710   EXPECT_FALSE(node);
1711 }
1712 
MakeSimpleTable(size_t size)1713 IntTable MakeSimpleTable(size_t size) {
1714   IntTable t;
1715   while (t.size() < size) t.insert(t.size());
1716   return t;
1717 }
1718 
OrderOfIteration(const IntTable & t)1719 std::vector<int> OrderOfIteration(const IntTable& t) {
1720   return {t.begin(), t.end()};
1721 }
1722 
1723 // These IterationOrderChanges tests depend on non-deterministic behavior.
1724 // We are injecting non-determinism from the pointer of the table, but do so in
1725 // a way that only the page matters. We have to retry enough times to make sure
1726 // we are touching different memory pages to cause the ordering to change.
1727 // We also need to keep the old tables around to avoid getting the same memory
1728 // blocks over and over.
TEST(Table,IterationOrderChangesByInstance)1729 TEST(Table, IterationOrderChangesByInstance) {
1730   for (size_t size : {2, 6, 12, 20}) {
1731     const auto reference_table = MakeSimpleTable(size);
1732     const auto reference = OrderOfIteration(reference_table);
1733 
1734     std::vector<IntTable> tables;
1735     bool found_difference = false;
1736     for (int i = 0; !found_difference && i < 5000; ++i) {
1737       tables.push_back(MakeSimpleTable(size));
1738       found_difference = OrderOfIteration(tables.back()) != reference;
1739     }
1740     if (!found_difference) {
1741       FAIL()
1742           << "Iteration order remained the same across many attempts with size "
1743           << size;
1744     }
1745   }
1746 }
1747 
TEST(Table,IterationOrderChangesOnRehash)1748 TEST(Table, IterationOrderChangesOnRehash) {
1749   std::vector<IntTable> garbage;
1750   for (int i = 0; i < 5000; ++i) {
1751     auto t = MakeSimpleTable(20);
1752     const auto reference = OrderOfIteration(t);
1753     // Force rehash to the same size.
1754     t.rehash(0);
1755     auto trial = OrderOfIteration(t);
1756     if (trial != reference) {
1757       // We are done.
1758       return;
1759     }
1760     garbage.push_back(std::move(t));
1761   }
1762   FAIL() << "Iteration order remained the same across many attempts.";
1763 }
1764 
1765 // Verify that pointers are invalidated as soon as a second element is inserted.
1766 // This prevents dependency on pointer stability on small tables.
TEST(Table,UnstablePointers)1767 TEST(Table, UnstablePointers) {
1768   IntTable table;
1769 
1770   const auto addr = [&](int i) {
1771     return reinterpret_cast<uintptr_t>(&*table.find(i));
1772   };
1773 
1774   table.insert(0);
1775   const uintptr_t old_ptr = addr(0);
1776 
1777   // This causes a rehash.
1778   table.insert(1);
1779 
1780   EXPECT_NE(old_ptr, addr(0));
1781 }
1782 
1783 // Confirm that we assert if we try to erase() end().
TEST(TableDeathTest,EraseOfEndAsserts)1784 TEST(TableDeathTest, EraseOfEndAsserts) {
1785   // Use an assert with side-effects to figure out if they are actually enabled.
1786   bool assert_enabled = false;
1787   assert([&]() {
1788     assert_enabled = true;
1789     return true;
1790   }());
1791   if (!assert_enabled) return;
1792 
1793   IntTable t;
1794   // Extra simple "regexp" as regexp support is highly varied across platforms.
1795   constexpr char kDeathMsg[] = "Invalid operation on iterator";
1796   EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
1797 }
1798 
1799 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(RawHashSamplerTest,Sample)1800 TEST(RawHashSamplerTest, Sample) {
1801   // Enable the feature even if the prod default is off.
1802   SetHashtablezEnabled(true);
1803   SetHashtablezSampleParameter(100);
1804 
1805   auto& sampler = HashtablezSampler::Global();
1806   size_t start_size = 0;
1807   start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
1808 
1809   std::vector<IntTable> tables;
1810   for (int i = 0; i < 1000000; ++i) {
1811     tables.emplace_back();
1812     tables.back().insert(1);
1813   }
1814   size_t end_size = 0;
1815   end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
1816 
1817   EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
1818               0.01, 0.005);
1819 }
1820 #endif  // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
1821 
TEST(RawHashSamplerTest,DoNotSampleCustomAllocators)1822 TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
1823   // Enable the feature even if the prod default is off.
1824   SetHashtablezEnabled(true);
1825   SetHashtablezSampleParameter(100);
1826 
1827   auto& sampler = HashtablezSampler::Global();
1828   size_t start_size = 0;
1829   start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
1830 
1831   std::vector<CustomAllocIntTable> tables;
1832   for (int i = 0; i < 1000000; ++i) {
1833     tables.emplace_back();
1834     tables.back().insert(1);
1835   }
1836   size_t end_size = 0;
1837   end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
1838 
1839   EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
1840               0.00, 0.001);
1841 }
1842 
1843 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
TEST(Sanitizer,PoisoningUnused)1844 TEST(Sanitizer, PoisoningUnused) {
1845   IntTable t;
1846   t.reserve(5);
1847   // Insert something to force an allocation.
1848   int64_t& v1 = *t.insert(0).first;
1849 
1850   // Make sure there is something to test.
1851   ASSERT_GT(t.capacity(), 1);
1852 
1853   int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
1854   for (size_t i = 0; i < t.capacity(); ++i) {
1855     EXPECT_EQ(slots + i != &v1, __asan_address_is_poisoned(slots + i));
1856   }
1857 }
1858 
TEST(Sanitizer,PoisoningOnErase)1859 TEST(Sanitizer, PoisoningOnErase) {
1860   IntTable t;
1861   int64_t& v = *t.insert(0).first;
1862 
1863   EXPECT_FALSE(__asan_address_is_poisoned(&v));
1864   t.erase(0);
1865   EXPECT_TRUE(__asan_address_is_poisoned(&v));
1866 }
1867 #endif  // ABSL_HAVE_ADDRESS_SANITIZER
1868 
1869 }  // namespace
1870 }  // namespace container_internal
1871 ABSL_NAMESPACE_END
1872 }  // namespace absl
1873