• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "absl/container/internal/raw_hash_set.h"
16 
17 #include <cmath>
18 #include <cstdint>
19 #include <deque>
20 #include <functional>
21 #include <memory>
22 #include <numeric>
23 #include <random>
24 #include <string>
25 
26 #include "gmock/gmock.h"
27 #include "gtest/gtest.h"
28 #include "absl/base/attributes.h"
29 #include "absl/base/internal/cycleclock.h"
30 #include "absl/base/internal/raw_logging.h"
31 #include "absl/container/internal/container_memory.h"
32 #include "absl/container/internal/hash_function_defaults.h"
33 #include "absl/container/internal/hash_policy_testing.h"
34 #include "absl/container/internal/hashtable_debug.h"
35 #include "absl/strings/string_view.h"
36 
37 namespace absl {
38 ABSL_NAMESPACE_BEGIN
39 namespace container_internal {
40 
41 struct RawHashSetTestOnlyAccess {
42   template <typename C>
GetSlotsabsl::container_internal::RawHashSetTestOnlyAccess43   static auto GetSlots(const C& c) -> decltype(c.slots_) {
44     return c.slots_;
45   }
46 };
47 
48 namespace {
49 
50 using ::testing::DoubleNear;
51 using ::testing::ElementsAre;
52 using ::testing::Ge;
53 using ::testing::Lt;
54 using ::testing::Optional;
55 using ::testing::Pair;
56 using ::testing::UnorderedElementsAre;
57 
TEST(Util,NormalizeCapacity)58 TEST(Util, NormalizeCapacity) {
59   EXPECT_EQ(1, NormalizeCapacity(0));
60   EXPECT_EQ(1, NormalizeCapacity(1));
61   EXPECT_EQ(3, NormalizeCapacity(2));
62   EXPECT_EQ(3, NormalizeCapacity(3));
63   EXPECT_EQ(7, NormalizeCapacity(4));
64   EXPECT_EQ(7, NormalizeCapacity(7));
65   EXPECT_EQ(15, NormalizeCapacity(8));
66   EXPECT_EQ(15, NormalizeCapacity(15));
67   EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1));
68   EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2));
69 }
70 
TEST(Util,GrowthAndCapacity)71 TEST(Util, GrowthAndCapacity) {
72   // Verify that GrowthToCapacity gives the minimum capacity that has enough
73   // growth.
74   for (size_t growth = 0; growth < 10000; ++growth) {
75     SCOPED_TRACE(growth);
76     size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
77     // The capacity is large enough for `growth`
78     EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
79     if (growth != 0 && capacity > 1) {
80       // There is no smaller capacity that works.
81       EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
82     }
83   }
84 
85   for (size_t capacity = Group::kWidth - 1; capacity < 10000;
86        capacity = 2 * capacity + 1) {
87     SCOPED_TRACE(capacity);
88     size_t growth = CapacityToGrowth(capacity);
89     EXPECT_THAT(growth, Lt(capacity));
90     EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity);
91     EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity);
92   }
93 }
94 
TEST(Util,probe_seq)95 TEST(Util, probe_seq) {
96   probe_seq<16> seq(0, 127);
97   auto gen = [&]() {
98     size_t res = seq.offset();
99     seq.next();
100     return res;
101   };
102   std::vector<size_t> offsets(8);
103   std::generate_n(offsets.begin(), 8, gen);
104   EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
105   seq = probe_seq<16>(128, 127);
106   std::generate_n(offsets.begin(), 8, gen);
107   EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
108 }
109 
TEST(BitMask,Smoke)110 TEST(BitMask, Smoke) {
111   EXPECT_FALSE((BitMask<uint8_t, 8>(0)));
112   EXPECT_TRUE((BitMask<uint8_t, 8>(5)));
113 
114   EXPECT_THAT((BitMask<uint8_t, 8>(0)), ElementsAre());
115   EXPECT_THAT((BitMask<uint8_t, 8>(0x1)), ElementsAre(0));
116   EXPECT_THAT((BitMask<uint8_t, 8>(0x2)), ElementsAre(1));
117   EXPECT_THAT((BitMask<uint8_t, 8>(0x3)), ElementsAre(0, 1));
118   EXPECT_THAT((BitMask<uint8_t, 8>(0x4)), ElementsAre(2));
119   EXPECT_THAT((BitMask<uint8_t, 8>(0x5)), ElementsAre(0, 2));
120   EXPECT_THAT((BitMask<uint8_t, 8>(0x55)), ElementsAre(0, 2, 4, 6));
121   EXPECT_THAT((BitMask<uint8_t, 8>(0xAA)), ElementsAre(1, 3, 5, 7));
122 }
123 
TEST(BitMask,WithShift)124 TEST(BitMask, WithShift) {
125   // See the non-SSE version of Group for details on what this math is for.
126   uint64_t ctrl = 0x1716151413121110;
127   uint64_t hash = 0x12;
128   constexpr uint64_t msbs = 0x8080808080808080ULL;
129   constexpr uint64_t lsbs = 0x0101010101010101ULL;
130   auto x = ctrl ^ (lsbs * hash);
131   uint64_t mask = (x - lsbs) & ~x & msbs;
132   EXPECT_EQ(0x0000000080800000, mask);
133 
134   BitMask<uint64_t, 8, 3> b(mask);
135   EXPECT_EQ(*b, 2);
136 }
137 
TEST(BitMask,LeadingTrailing)138 TEST(BitMask, LeadingTrailing) {
139   EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).LeadingZeros()), 3);
140   EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).TrailingZeros()), 6);
141 
142   EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).LeadingZeros()), 15);
143   EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).TrailingZeros()), 0);
144 
145   EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).LeadingZeros()), 0);
146   EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).TrailingZeros()), 15);
147 
148   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3);
149   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1);
150 
151   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).LeadingZeros()), 7);
152   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).TrailingZeros()), 0);
153 
154   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).LeadingZeros()), 0);
155   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).TrailingZeros()), 7);
156 }
157 
TEST(Group,EmptyGroup)158 TEST(Group, EmptyGroup) {
159   for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h));
160 }
161 
TEST(Group,Match)162 TEST(Group, Match) {
163   if (Group::kWidth == 16) {
164     ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
165                       7,      5, 3,        1, 1,      1, 1,         1};
166     EXPECT_THAT(Group{group}.Match(0), ElementsAre());
167     EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
168     EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10));
169     EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9));
170     EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8));
171   } else if (Group::kWidth == 8) {
172     ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
173     EXPECT_THAT(Group{group}.Match(0), ElementsAre());
174     EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7));
175     EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4));
176   } else {
177     FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
178   }
179 }
180 
TEST(Group,MatchEmpty)181 TEST(Group, MatchEmpty) {
182   if (Group::kWidth == 16) {
183     ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
184                       7,      5, 3,        1, 1,      1, 1,         1};
185     EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4));
186   } else if (Group::kWidth == 8) {
187     ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
188     EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0));
189   } else {
190     FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
191   }
192 }
193 
TEST(Group,MatchEmptyOrDeleted)194 TEST(Group, MatchEmptyOrDeleted) {
195   if (Group::kWidth == 16) {
196     ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
197                       7,      5, 3,        1, 1,      1, 1,         1};
198     EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4));
199   } else if (Group::kWidth == 8) {
200     ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
201     EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3));
202   } else {
203     FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
204   }
205 }
206 
TEST(Batch,DropDeletes)207 TEST(Batch, DropDeletes) {
208   constexpr size_t kCapacity = 63;
209   constexpr size_t kGroupWidth = container_internal::Group::kWidth;
210   std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth);
211   ctrl[kCapacity] = kSentinel;
212   std::vector<ctrl_t> pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted};
213   for (size_t i = 0; i != kCapacity; ++i) {
214     ctrl[i] = pattern[i % pattern.size()];
215     if (i < kGroupWidth - 1)
216       ctrl[i + kCapacity + 1] = pattern[i % pattern.size()];
217   }
218   ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
219   ASSERT_EQ(ctrl[kCapacity], kSentinel);
220   for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) {
221     ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
222     if (i == kCapacity) expected = kSentinel;
223     if (expected == kDeleted) expected = kEmpty;
224     if (IsFull(expected)) expected = kDeleted;
225     EXPECT_EQ(ctrl[i], expected)
226         << i << " " << int{pattern[i % pattern.size()]};
227   }
228 }
229 
TEST(Group,CountLeadingEmptyOrDeleted)230 TEST(Group, CountLeadingEmptyOrDeleted) {
231   const std::vector<ctrl_t> empty_examples = {kEmpty, kDeleted};
232   const std::vector<ctrl_t> full_examples = {0, 1, 2, 3, 5, 9, 127, kSentinel};
233 
234   for (ctrl_t empty : empty_examples) {
235     std::vector<ctrl_t> e(Group::kWidth, empty);
236     EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted());
237     for (ctrl_t full : full_examples) {
238       for (size_t i = 0; i != Group::kWidth; ++i) {
239         std::vector<ctrl_t> f(Group::kWidth, empty);
240         f[i] = full;
241         EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted());
242       }
243       std::vector<ctrl_t> f(Group::kWidth, empty);
244       f[Group::kWidth * 2 / 3] = full;
245       f[Group::kWidth / 2] = full;
246       EXPECT_EQ(
247           Group::kWidth / 2, Group{f.data()}.CountLeadingEmptyOrDeleted());
248     }
249   }
250 }
251 
252 struct IntPolicy {
253   using slot_type = int64_t;
254   using key_type = int64_t;
255   using init_type = int64_t;
256 
constructabsl::container_internal::__anon46dfdfe20111::IntPolicy257   static void construct(void*, int64_t* slot, int64_t v) { *slot = v; }
destroyabsl::container_internal::__anon46dfdfe20111::IntPolicy258   static void destroy(void*, int64_t*) {}
transferabsl::container_internal::__anon46dfdfe20111::IntPolicy259   static void transfer(void*, int64_t* new_slot, int64_t* old_slot) {
260     *new_slot = *old_slot;
261   }
262 
elementabsl::container_internal::__anon46dfdfe20111::IntPolicy263   static int64_t& element(slot_type* slot) { return *slot; }
264 
265   template <class F>
applyabsl::container_internal::__anon46dfdfe20111::IntPolicy266   static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) {
267     return std::forward<F>(f)(x, x);
268   }
269 };
270 
271 class StringPolicy {
272   template <class F, class K, class V,
273             class = typename std::enable_if<
274                 std::is_convertible<const K&, absl::string_view>::value>::type>
275   decltype(std::declval<F>()(
276       std::declval<const absl::string_view&>(), std::piecewise_construct,
277       std::declval<std::tuple<K>>(),
apply_impl(F && f,std::pair<std::tuple<K>,V> p)278       std::declval<V>())) static apply_impl(F&& f,
279                                             std::pair<std::tuple<K>, V> p) {
280     const absl::string_view& key = std::get<0>(p.first);
281     return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
282                               std::move(p.second));
283   }
284 
285  public:
286   struct slot_type {
287     struct ctor {};
288 
289     template <class... Ts>
slot_typeabsl::container_internal::__anon46dfdfe20111::StringPolicy::slot_type290     slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
291 
292     std::pair<std::string, std::string> pair;
293   };
294 
295   using key_type = std::string;
296   using init_type = std::pair<std::string, std::string>;
297 
298   template <class allocator_type, class... Args>
construct(allocator_type * alloc,slot_type * slot,Args...args)299   static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
300     std::allocator_traits<allocator_type>::construct(
301         *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
302   }
303 
304   template <class allocator_type>
destroy(allocator_type * alloc,slot_type * slot)305   static void destroy(allocator_type* alloc, slot_type* slot) {
306     std::allocator_traits<allocator_type>::destroy(*alloc, slot);
307   }
308 
309   template <class allocator_type>
transfer(allocator_type * alloc,slot_type * new_slot,slot_type * old_slot)310   static void transfer(allocator_type* alloc, slot_type* new_slot,
311                        slot_type* old_slot) {
312     construct(alloc, new_slot, std::move(old_slot->pair));
313     destroy(alloc, old_slot);
314   }
315 
element(slot_type * slot)316   static std::pair<std::string, std::string>& element(slot_type* slot) {
317     return slot->pair;
318   }
319 
320   template <class F, class... Args>
apply(F && f,Args &&...args)321   static auto apply(F&& f, Args&&... args)
322       -> decltype(apply_impl(std::forward<F>(f),
323                              PairArgs(std::forward<Args>(args)...))) {
324     return apply_impl(std::forward<F>(f),
325                       PairArgs(std::forward<Args>(args)...));
326   }
327 };
328 
329 struct StringHash : absl::Hash<absl::string_view> {
330   using is_transparent = void;
331 };
332 struct StringEq : std::equal_to<absl::string_view> {
333   using is_transparent = void;
334 };
335 
336 struct StringTable
337     : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
338   using Base = typename StringTable::raw_hash_set;
StringTableabsl::container_internal::__anon46dfdfe20111::StringTable339   StringTable() {}
340   using Base::Base;
341 };
342 
343 struct IntTable
344     : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
345                    std::equal_to<int64_t>, std::allocator<int64_t>> {
346   using Base = typename IntTable::raw_hash_set;
347   using Base::Base;
348 };
349 
350 template <typename T>
351 struct CustomAlloc : std::allocator<T> {
CustomAllocabsl::container_internal::__anon46dfdfe20111::CustomAlloc352   CustomAlloc() {}
353 
354   template <typename U>
CustomAllocabsl::container_internal::__anon46dfdfe20111::CustomAlloc355   CustomAlloc(const CustomAlloc<U>& other) {}
356 
357   template<class U> struct rebind {
358     using other = CustomAlloc<U>;
359   };
360 };
361 
362 struct CustomAllocIntTable
363     : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
364                    std::equal_to<int64_t>, CustomAlloc<int64_t>> {
365   using Base = typename CustomAllocIntTable::raw_hash_set;
366   using Base::Base;
367 };
368 
369 struct BadFastHash {
370   template <class T>
operator ()absl::container_internal::__anon46dfdfe20111::BadFastHash371   size_t operator()(const T&) const {
372     return 0;
373   }
374 };
375 
376 struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
377                                std::allocator<int>> {
378   using Base = typename BadTable::raw_hash_set;
BadTableabsl::container_internal::__anon46dfdfe20111::BadTable379   BadTable() {}
380   using Base::Base;
381 };
382 
TEST(Table,EmptyFunctorOptimization)383 TEST(Table, EmptyFunctorOptimization) {
384   static_assert(std::is_empty<std::equal_to<absl::string_view>>::value, "");
385   static_assert(std::is_empty<std::allocator<int>>::value, "");
386 
387   struct MockTable {
388     void* ctrl;
389     void* slots;
390     size_t size;
391     size_t capacity;
392     size_t growth_left;
393     void* infoz;
394   };
395   struct StatelessHash {
396     size_t operator()(absl::string_view) const { return 0; }
397   };
398   struct StatefulHash : StatelessHash {
399     size_t dummy;
400   };
401 
402   EXPECT_EQ(
403       sizeof(MockTable),
404       sizeof(
405           raw_hash_set<StringPolicy, StatelessHash,
406                        std::equal_to<absl::string_view>, std::allocator<int>>));
407 
408   EXPECT_EQ(
409       sizeof(MockTable) + sizeof(StatefulHash),
410       sizeof(
411           raw_hash_set<StringPolicy, StatefulHash,
412                        std::equal_to<absl::string_view>, std::allocator<int>>));
413 }
414 
TEST(Table,Empty)415 TEST(Table, Empty) {
416   IntTable t;
417   EXPECT_EQ(0, t.size());
418   EXPECT_TRUE(t.empty());
419 }
420 
TEST(Table,LookupEmpty)421 TEST(Table, LookupEmpty) {
422   IntTable t;
423   auto it = t.find(0);
424   EXPECT_TRUE(it == t.end());
425 }
426 
TEST(Table,Insert1)427 TEST(Table, Insert1) {
428   IntTable t;
429   EXPECT_TRUE(t.find(0) == t.end());
430   auto res = t.emplace(0);
431   EXPECT_TRUE(res.second);
432   EXPECT_THAT(*res.first, 0);
433   EXPECT_EQ(1, t.size());
434   EXPECT_THAT(*t.find(0), 0);
435 }
436 
TEST(Table,Insert2)437 TEST(Table, Insert2) {
438   IntTable t;
439   EXPECT_TRUE(t.find(0) == t.end());
440   auto res = t.emplace(0);
441   EXPECT_TRUE(res.second);
442   EXPECT_THAT(*res.first, 0);
443   EXPECT_EQ(1, t.size());
444   EXPECT_TRUE(t.find(1) == t.end());
445   res = t.emplace(1);
446   EXPECT_TRUE(res.second);
447   EXPECT_THAT(*res.first, 1);
448   EXPECT_EQ(2, t.size());
449   EXPECT_THAT(*t.find(0), 0);
450   EXPECT_THAT(*t.find(1), 1);
451 }
452 
TEST(Table,InsertCollision)453 TEST(Table, InsertCollision) {
454   BadTable t;
455   EXPECT_TRUE(t.find(1) == t.end());
456   auto res = t.emplace(1);
457   EXPECT_TRUE(res.second);
458   EXPECT_THAT(*res.first, 1);
459   EXPECT_EQ(1, t.size());
460 
461   EXPECT_TRUE(t.find(2) == t.end());
462   res = t.emplace(2);
463   EXPECT_THAT(*res.first, 2);
464   EXPECT_TRUE(res.second);
465   EXPECT_EQ(2, t.size());
466 
467   EXPECT_THAT(*t.find(1), 1);
468   EXPECT_THAT(*t.find(2), 2);
469 }
470 
471 // Test that we do not add existent element in case we need to search through
472 // many groups with deleted elements
TEST(Table,InsertCollisionAndFindAfterDelete)473 TEST(Table, InsertCollisionAndFindAfterDelete) {
474   BadTable t;  // all elements go to the same group.
475   // Have at least 2 groups with Group::kWidth collisions
476   // plus some extra collisions in the last group.
477   constexpr size_t kNumInserts = Group::kWidth * 2 + 5;
478   for (size_t i = 0; i < kNumInserts; ++i) {
479     auto res = t.emplace(i);
480     EXPECT_TRUE(res.second);
481     EXPECT_THAT(*res.first, i);
482     EXPECT_EQ(i + 1, t.size());
483   }
484 
485   // Remove elements one by one and check
486   // that we still can find all other elements.
487   for (size_t i = 0; i < kNumInserts; ++i) {
488     EXPECT_EQ(1, t.erase(i)) << i;
489     for (size_t j = i + 1; j < kNumInserts; ++j) {
490       EXPECT_THAT(*t.find(j), j);
491       auto res = t.emplace(j);
492       EXPECT_FALSE(res.second) << i << " " << j;
493       EXPECT_THAT(*res.first, j);
494       EXPECT_EQ(kNumInserts - i - 1, t.size());
495     }
496   }
497   EXPECT_TRUE(t.empty());
498 }
499 
TEST(Table,LazyEmplace)500 TEST(Table, LazyEmplace) {
501   StringTable t;
502   bool called = false;
503   auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
504     called = true;
505     f("abc", "ABC");
506   });
507   EXPECT_TRUE(called);
508   EXPECT_THAT(*it, Pair("abc", "ABC"));
509   called = false;
510   it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
511     called = true;
512     f("abc", "DEF");
513   });
514   EXPECT_FALSE(called);
515   EXPECT_THAT(*it, Pair("abc", "ABC"));
516 }
517 
TEST(Table,ContainsEmpty)518 TEST(Table, ContainsEmpty) {
519   IntTable t;
520 
521   EXPECT_FALSE(t.contains(0));
522 }
523 
TEST(Table,Contains1)524 TEST(Table, Contains1) {
525   IntTable t;
526 
527   EXPECT_TRUE(t.insert(0).second);
528   EXPECT_TRUE(t.contains(0));
529   EXPECT_FALSE(t.contains(1));
530 
531   EXPECT_EQ(1, t.erase(0));
532   EXPECT_FALSE(t.contains(0));
533 }
534 
TEST(Table,Contains2)535 TEST(Table, Contains2) {
536   IntTable t;
537 
538   EXPECT_TRUE(t.insert(0).second);
539   EXPECT_TRUE(t.contains(0));
540   EXPECT_FALSE(t.contains(1));
541 
542   t.clear();
543   EXPECT_FALSE(t.contains(0));
544 }
545 
546 int decompose_constructed;
547 struct DecomposeType {
DecomposeTypeabsl::container_internal::__anon46dfdfe20111::DecomposeType548   DecomposeType(int i) : i(i) {  // NOLINT
549     ++decompose_constructed;
550   }
551 
DecomposeTypeabsl::container_internal::__anon46dfdfe20111::DecomposeType552   explicit DecomposeType(const char* d) : DecomposeType(*d) {}
553 
554   int i;
555 };
556 
557 struct DecomposeHash {
558   using is_transparent = void;
operator ()absl::container_internal::__anon46dfdfe20111::DecomposeHash559   size_t operator()(DecomposeType a) const { return a.i; }
operator ()absl::container_internal::__anon46dfdfe20111::DecomposeHash560   size_t operator()(int a) const { return a; }
operator ()absl::container_internal::__anon46dfdfe20111::DecomposeHash561   size_t operator()(const char* a) const { return *a; }
562 };
563 
564 struct DecomposeEq {
565   using is_transparent = void;
operator ()absl::container_internal::__anon46dfdfe20111::DecomposeEq566   bool operator()(DecomposeType a, DecomposeType b) const { return a.i == b.i; }
operator ()absl::container_internal::__anon46dfdfe20111::DecomposeEq567   bool operator()(DecomposeType a, int b) const { return a.i == b; }
operator ()absl::container_internal::__anon46dfdfe20111::DecomposeEq568   bool operator()(DecomposeType a, const char* b) const { return a.i == *b; }
569 };
570 
571 struct DecomposePolicy {
572   using slot_type = DecomposeType;
573   using key_type = DecomposeType;
574   using init_type = DecomposeType;
575 
576   template <typename T>
constructabsl::container_internal::__anon46dfdfe20111::DecomposePolicy577   static void construct(void*, DecomposeType* slot, T&& v) {
578     *slot = DecomposeType(std::forward<T>(v));
579   }
destroyabsl::container_internal::__anon46dfdfe20111::DecomposePolicy580   static void destroy(void*, DecomposeType*) {}
elementabsl::container_internal::__anon46dfdfe20111::DecomposePolicy581   static DecomposeType& element(slot_type* slot) { return *slot; }
582 
583   template <class F, class T>
applyabsl::container_internal::__anon46dfdfe20111::DecomposePolicy584   static auto apply(F&& f, const T& x) -> decltype(std::forward<F>(f)(x, x)) {
585     return std::forward<F>(f)(x, x);
586   }
587 };
588 
589 template <typename Hash, typename Eq>
TestDecompose(bool construct_three)590 void TestDecompose(bool construct_three) {
591   DecomposeType elem{0};
592   const int one = 1;
593   const char* three_p = "3";
594   const auto& three = three_p;
595 
596   raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>> set1;
597 
598   decompose_constructed = 0;
599   int expected_constructed = 0;
600   EXPECT_EQ(expected_constructed, decompose_constructed);
601   set1.insert(elem);
602   EXPECT_EQ(expected_constructed, decompose_constructed);
603   set1.insert(1);
604   EXPECT_EQ(++expected_constructed, decompose_constructed);
605   set1.emplace("3");
606   EXPECT_EQ(++expected_constructed, decompose_constructed);
607   EXPECT_EQ(expected_constructed, decompose_constructed);
608 
609   {  // insert(T&&)
610     set1.insert(1);
611     EXPECT_EQ(expected_constructed, decompose_constructed);
612   }
613 
614   {  // insert(const T&)
615     set1.insert(one);
616     EXPECT_EQ(expected_constructed, decompose_constructed);
617   }
618 
619   {  // insert(hint, T&&)
620     set1.insert(set1.begin(), 1);
621     EXPECT_EQ(expected_constructed, decompose_constructed);
622   }
623 
624   {  // insert(hint, const T&)
625     set1.insert(set1.begin(), one);
626     EXPECT_EQ(expected_constructed, decompose_constructed);
627   }
628 
629   {  // emplace(...)
630     set1.emplace(1);
631     EXPECT_EQ(expected_constructed, decompose_constructed);
632     set1.emplace("3");
633     expected_constructed += construct_three;
634     EXPECT_EQ(expected_constructed, decompose_constructed);
635     set1.emplace(one);
636     EXPECT_EQ(expected_constructed, decompose_constructed);
637     set1.emplace(three);
638     expected_constructed += construct_three;
639     EXPECT_EQ(expected_constructed, decompose_constructed);
640   }
641 
642   {  // emplace_hint(...)
643     set1.emplace_hint(set1.begin(), 1);
644     EXPECT_EQ(expected_constructed, decompose_constructed);
645     set1.emplace_hint(set1.begin(), "3");
646     expected_constructed += construct_three;
647     EXPECT_EQ(expected_constructed, decompose_constructed);
648     set1.emplace_hint(set1.begin(), one);
649     EXPECT_EQ(expected_constructed, decompose_constructed);
650     set1.emplace_hint(set1.begin(), three);
651     expected_constructed += construct_three;
652     EXPECT_EQ(expected_constructed, decompose_constructed);
653   }
654 }
655 
TEST(Table,Decompose)656 TEST(Table, Decompose) {
657   TestDecompose<DecomposeHash, DecomposeEq>(false);
658 
659   struct TransparentHashIntOverload {
660     size_t operator()(DecomposeType a) const { return a.i; }
661     size_t operator()(int a) const { return a; }
662   };
663   struct TransparentEqIntOverload {
664     bool operator()(DecomposeType a, DecomposeType b) const {
665       return a.i == b.i;
666     }
667     bool operator()(DecomposeType a, int b) const { return a.i == b; }
668   };
669   TestDecompose<TransparentHashIntOverload, DecomposeEq>(true);
670   TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true);
671   TestDecompose<DecomposeHash, TransparentEqIntOverload>(true);
672 }
673 
674 // Returns the largest m such that a table with m elements has the same number
675 // of buckets as a table with n elements.
MaxDensitySize(size_t n)676 size_t MaxDensitySize(size_t n) {
677   IntTable t;
678   t.reserve(n);
679   for (size_t i = 0; i != n; ++i) t.emplace(i);
680   const size_t c = t.bucket_count();
681   while (c == t.bucket_count()) t.emplace(n++);
682   return t.size() - 1;
683 }
684 
685 struct Modulo1000Hash {
operator ()absl::container_internal::__anon46dfdfe20111::Modulo1000Hash686   size_t operator()(int x) const { return x % 1000; }
687 };
688 
689 struct Modulo1000HashTable
690     : public raw_hash_set<IntPolicy, Modulo1000Hash, std::equal_to<int>,
691                           std::allocator<int>> {};
692 
693 // Test that rehash with no resize happen in case of many deleted slots.
TEST(Table,RehashWithNoResize)694 TEST(Table, RehashWithNoResize) {
695   Modulo1000HashTable t;
696   // Adding the same length (and the same hash) strings
697   // to have at least kMinFullGroups groups
698   // with Group::kWidth collisions. Then fill up to MaxDensitySize;
699   const size_t kMinFullGroups = 7;
700   std::vector<int> keys;
701   for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) {
702     int k = i * 1000;
703     t.emplace(k);
704     keys.push_back(k);
705   }
706   const size_t capacity = t.capacity();
707 
708   // Remove elements from all groups except the first and the last one.
709   // All elements removed from full groups will be marked as kDeleted.
710   const size_t erase_begin = Group::kWidth / 2;
711   const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth;
712   for (size_t i = erase_begin; i < erase_end; ++i) {
713     EXPECT_EQ(1, t.erase(keys[i])) << i;
714   }
715   keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end);
716 
717   auto last_key = keys.back();
718   size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key);
719 
720   // Make sure that we have to make a lot of probes for last key.
721   ASSERT_GT(last_key_num_probes, kMinFullGroups);
722 
723   int x = 1;
724   // Insert and erase one element, before inplace rehash happen.
725   while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) {
726     t.emplace(x);
727     ASSERT_EQ(capacity, t.capacity());
728     // All elements should be there.
729     ASSERT_TRUE(t.find(x) != t.end()) << x;
730     for (const auto& k : keys) {
731       ASSERT_TRUE(t.find(k) != t.end()) << k;
732     }
733     t.erase(x);
734     ++x;
735   }
736 }
737 
TEST(Table,InsertEraseStressTest)738 TEST(Table, InsertEraseStressTest) {
739   IntTable t;
740   const size_t kMinElementCount = 250;
741   std::deque<int> keys;
742   size_t i = 0;
743   for (; i < MaxDensitySize(kMinElementCount); ++i) {
744     t.emplace(i);
745     keys.push_back(i);
746   }
747   const size_t kNumIterations = 1000000;
748   for (; i < kNumIterations; ++i) {
749     ASSERT_EQ(1, t.erase(keys.front()));
750     keys.pop_front();
751     t.emplace(i);
752     keys.push_back(i);
753   }
754 }
755 
TEST(Table,InsertOverloads)756 TEST(Table, InsertOverloads) {
757   StringTable t;
758   // These should all trigger the insert(init_type) overload.
759   t.insert({{}, {}});
760   t.insert({"ABC", {}});
761   t.insert({"DEF", "!!!"});
762 
763   EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""),
764                                       Pair("DEF", "!!!")));
765 }
766 
TEST(Table,LargeTable)767 TEST(Table, LargeTable) {
768   IntTable t;
769   for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40);
770   for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40));
771 }
772 
773 // Timeout if copy is quadratic as it was in Rust.
TEST(Table,EnsureNonQuadraticAsInRust)774 TEST(Table, EnsureNonQuadraticAsInRust) {
775   static const size_t kLargeSize = 1 << 15;
776 
777   IntTable t;
778   for (size_t i = 0; i != kLargeSize; ++i) {
779     t.insert(i);
780   }
781 
782   // If this is quadratic, the test will timeout.
783   IntTable t2;
784   for (const auto& entry : t) t2.insert(entry);
785 }
786 
TEST(Table,ClearBug)787 TEST(Table, ClearBug) {
788   IntTable t;
789   constexpr size_t capacity = container_internal::Group::kWidth - 1;
790   constexpr size_t max_size = capacity / 2 + 1;
791   for (size_t i = 0; i < max_size; ++i) {
792     t.insert(i);
793   }
794   ASSERT_EQ(capacity, t.capacity());
795   intptr_t original = reinterpret_cast<intptr_t>(&*t.find(2));
796   t.clear();
797   ASSERT_EQ(capacity, t.capacity());
798   for (size_t i = 0; i < max_size; ++i) {
799     t.insert(i);
800   }
801   ASSERT_EQ(capacity, t.capacity());
802   intptr_t second = reinterpret_cast<intptr_t>(&*t.find(2));
803   // We are checking that original and second are close enough to each other
804   // that they are probably still in the same group.  This is not strictly
805   // guaranteed.
806   EXPECT_LT(std::abs(original - second),
807             capacity * sizeof(IntTable::value_type));
808 }
809 
TEST(Table,Erase)810 TEST(Table, Erase) {
811   IntTable t;
812   EXPECT_TRUE(t.find(0) == t.end());
813   auto res = t.emplace(0);
814   EXPECT_TRUE(res.second);
815   EXPECT_EQ(1, t.size());
816   t.erase(res.first);
817   EXPECT_EQ(0, t.size());
818   EXPECT_TRUE(t.find(0) == t.end());
819 }
820 
TEST(Table,EraseMaintainsValidIterator)821 TEST(Table, EraseMaintainsValidIterator) {
822   IntTable t;
823   const int kNumElements = 100;
824   for (int i = 0; i < kNumElements; i ++) {
825     EXPECT_TRUE(t.emplace(i).second);
826   }
827   EXPECT_EQ(t.size(), kNumElements);
828 
829   int num_erase_calls = 0;
830   auto it = t.begin();
831   while (it != t.end()) {
832     t.erase(it++);
833     num_erase_calls++;
834   }
835 
836   EXPECT_TRUE(t.empty());
837   EXPECT_EQ(num_erase_calls, kNumElements);
838 }
839 
840 // Collect N bad keys by following algorithm:
841 // 1. Create an empty table and reserve it to 2 * N.
842 // 2. Insert N random elements.
843 // 3. Take first Group::kWidth - 1 to bad_keys array.
844 // 4. Clear the table without resize.
845 // 5. Go to point 2 while N keys not collected
CollectBadMergeKeys(size_t N)846 std::vector<int64_t> CollectBadMergeKeys(size_t N) {
847   static constexpr int kGroupSize = Group::kWidth - 1;
848 
849   auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector<int64_t> {
850     for (size_t i = b; i != e; ++i) {
851       t->emplace(i);
852     }
853     std::vector<int64_t> res;
854     res.reserve(kGroupSize);
855     auto it = t->begin();
856     for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) {
857       res.push_back(*it);
858     }
859     return res;
860   };
861 
862   std::vector<int64_t> bad_keys;
863   bad_keys.reserve(N);
864   IntTable t;
865   t.reserve(N * 2);
866 
867   for (size_t b = 0; bad_keys.size() < N; b += N) {
868     auto keys = topk_range(b, b + N, &t);
869     bad_keys.insert(bad_keys.end(), keys.begin(), keys.end());
870     t.erase(t.begin(), t.end());
871     EXPECT_TRUE(t.empty());
872   }
873   return bad_keys;
874 }
875 
876 struct ProbeStats {
877   // Number of elements with specific probe length over all tested tables.
878   std::vector<size_t> all_probes_histogram;
879   // Ratios total_probe_length/size for every tested table.
880   std::vector<double> single_table_ratios;
881 
operator +(const ProbeStats & a,const ProbeStats & b)882   friend ProbeStats operator+(const ProbeStats& a, const ProbeStats& b) {
883     ProbeStats res = a;
884     res.all_probes_histogram.resize(std::max(res.all_probes_histogram.size(),
885                                              b.all_probes_histogram.size()));
886     std::transform(b.all_probes_histogram.begin(), b.all_probes_histogram.end(),
887                    res.all_probes_histogram.begin(),
888                    res.all_probes_histogram.begin(), std::plus<size_t>());
889     res.single_table_ratios.insert(res.single_table_ratios.end(),
890                                    b.single_table_ratios.begin(),
891                                    b.single_table_ratios.end());
892     return res;
893   }
894 
895   // Average ratio total_probe_length/size over tables.
AvgRatioabsl::container_internal::__anon46dfdfe20111::ProbeStats896   double AvgRatio() const {
897     return std::accumulate(single_table_ratios.begin(),
898                            single_table_ratios.end(), 0.0) /
899            single_table_ratios.size();
900   }
901 
902   // Maximum ratio total_probe_length/size over tables.
MaxRatioabsl::container_internal::__anon46dfdfe20111::ProbeStats903   double MaxRatio() const {
904     return *std::max_element(single_table_ratios.begin(),
905                              single_table_ratios.end());
906   }
907 
908   // Percentile ratio total_probe_length/size over tables.
PercentileRatioabsl::container_internal::__anon46dfdfe20111::ProbeStats909   double PercentileRatio(double Percentile = 0.95) const {
910     auto r = single_table_ratios;
911     auto mid = r.begin() + static_cast<size_t>(r.size() * Percentile);
912     if (mid != r.end()) {
913       std::nth_element(r.begin(), mid, r.end());
914       return *mid;
915     } else {
916       return MaxRatio();
917     }
918   }
919 
920   // Maximum probe length over all elements and all tables.
MaxProbeabsl::container_internal::__anon46dfdfe20111::ProbeStats921   size_t MaxProbe() const { return all_probes_histogram.size(); }
922 
923   // Fraction of elements with specified probe length.
ProbeNormalizedHistogramabsl::container_internal::__anon46dfdfe20111::ProbeStats924   std::vector<double> ProbeNormalizedHistogram() const {
925     double total_elements = std::accumulate(all_probes_histogram.begin(),
926                                             all_probes_histogram.end(), 0ull);
927     std::vector<double> res;
928     for (size_t p : all_probes_histogram) {
929       res.push_back(p / total_elements);
930     }
931     return res;
932   }
933 
PercentileProbeabsl::container_internal::__anon46dfdfe20111::ProbeStats934   size_t PercentileProbe(double Percentile = 0.99) const {
935     size_t idx = 0;
936     for (double p : ProbeNormalizedHistogram()) {
937       if (Percentile > p) {
938         Percentile -= p;
939         ++idx;
940       } else {
941         return idx;
942       }
943     }
944     return idx;
945   }
946 
operator <<(std::ostream & out,const ProbeStats & s)947   friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) {
948     out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio()
949         << ", PercentileRatio:" << s.PercentileRatio()
950         << ", MaxProbe:" << s.MaxProbe() << ", Probes=[";
951     for (double p : s.ProbeNormalizedHistogram()) {
952       out << p << ",";
953     }
954     out << "]}";
955 
956     return out;
957   }
958 };
959 
960 struct ExpectedStats {
961   double avg_ratio;
962   double max_ratio;
963   std::vector<std::pair<double, double>> pecentile_ratios;
964   std::vector<std::pair<double, double>> pecentile_probes;
965 
operator <<(std::ostream & out,const ExpectedStats & s)966   friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) {
967     out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio
968         << ", PercentileRatios: [";
969     for (auto el : s.pecentile_ratios) {
970       out << el.first << ":" << el.second << ", ";
971     }
972     out << "], PercentileProbes: [";
973     for (auto el : s.pecentile_probes) {
974       out << el.first << ":" << el.second << ", ";
975     }
976     out << "]}";
977 
978     return out;
979   }
980 };
981 
VerifyStats(size_t size,const ExpectedStats & exp,const ProbeStats & stats)982 void VerifyStats(size_t size, const ExpectedStats& exp,
983                  const ProbeStats& stats) {
984   EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats;
985   EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats;
986   for (auto pr : exp.pecentile_ratios) {
987     EXPECT_LE(stats.PercentileRatio(pr.first), pr.second)
988         << size << " " << pr.first << " " << stats;
989   }
990 
991   for (auto pr : exp.pecentile_probes) {
992     EXPECT_LE(stats.PercentileProbe(pr.first), pr.second)
993         << size << " " << pr.first << " " << stats;
994   }
995 }
996 
997 using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
998 
999 // Collect total ProbeStats on num_iters iterations of the following algorithm:
1000 // 1. Create new table and reserve it to keys.size() * 2
1001 // 2. Insert all keys xored with seed
1002 // 3. Collect ProbeStats from final table.
CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t> & keys,size_t num_iters)1003 ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys,
1004                                                 size_t num_iters) {
1005   const size_t reserve_size = keys.size() * 2;
1006 
1007   ProbeStats stats;
1008 
1009   int64_t seed = 0x71b1a19b907d6e33;
1010   while (num_iters--) {
1011     seed = static_cast<int64_t>(static_cast<uint64_t>(seed) * 17 + 13);
1012     IntTable t1;
1013     t1.reserve(reserve_size);
1014     for (const auto& key : keys) {
1015       t1.emplace(key ^ seed);
1016     }
1017 
1018     auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
1019     stats.all_probes_histogram.resize(
1020         std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
1021     std::transform(probe_histogram.begin(), probe_histogram.end(),
1022                    stats.all_probes_histogram.begin(),
1023                    stats.all_probes_histogram.begin(), std::plus<size_t>());
1024 
1025     size_t total_probe_seq_length = 0;
1026     for (size_t i = 0; i < probe_histogram.size(); ++i) {
1027       total_probe_seq_length += i * probe_histogram[i];
1028     }
1029     stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
1030                                         keys.size());
1031     t1.erase(t1.begin(), t1.end());
1032   }
1033   return stats;
1034 }
1035 
XorSeedExpectedStats()1036 ExpectedStats XorSeedExpectedStats() {
1037   constexpr bool kRandomizesInserts =
1038 #ifdef NDEBUG
1039       false;
1040 #else   // NDEBUG
1041       true;
1042 #endif  // NDEBUG
1043 
1044   // The effective load factor is larger in non-opt mode because we insert
1045   // elements out of order.
1046   switch (container_internal::Group::kWidth) {
1047     case 8:
1048       if (kRandomizesInserts) {
1049   return {0.05,
1050           1.0,
1051           {{0.95, 0.5}},
1052           {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
1053       } else {
1054   return {0.05,
1055           2.0,
1056           {{0.95, 0.1}},
1057           {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
1058       }
1059     case 16:
1060       if (kRandomizesInserts) {
1061         return {0.1,
1062                 1.0,
1063                 {{0.95, 0.1}},
1064                 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1065       } else {
1066         return {0.05,
1067                 1.0,
1068                 {{0.95, 0.05}},
1069                 {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
1070       }
1071   }
1072   ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
1073   return {};
1074 }
1075 
TEST(Table,DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength)1076 TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
1077   ProbeStatsPerSize stats;
1078   std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
1079   for (size_t size : sizes) {
1080     stats[size] =
1081         CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200);
1082   }
1083   auto expected = XorSeedExpectedStats();
1084   for (size_t size : sizes) {
1085     auto& stat = stats[size];
1086     VerifyStats(size, expected, stat);
1087   }
1088 }
1089 
1090 // Collect total ProbeStats on num_iters iterations of the following algorithm:
1091 // 1. Create new table
1092 // 2. Select 10% of keys and insert 10 elements key * 17 + j * 13
1093 // 3. Collect ProbeStats from final table
CollectProbeStatsOnLinearlyTransformedKeys(const std::vector<int64_t> & keys,size_t num_iters)1094 ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
1095     const std::vector<int64_t>& keys, size_t num_iters) {
1096   ProbeStats stats;
1097 
1098   std::random_device rd;
1099   std::mt19937 rng(rd());
1100   auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; };
1101   std::uniform_int_distribution<size_t> dist(0, keys.size()-1);
1102   while (num_iters--) {
1103     IntTable t1;
1104     size_t num_keys = keys.size() / 10;
1105     size_t start = dist(rng);
1106     for (size_t i = 0; i != num_keys; ++i) {
1107       for (size_t j = 0; j != 10; ++j) {
1108         t1.emplace(linear_transform(keys[(i + start) % keys.size()], j));
1109       }
1110     }
1111 
1112     auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
1113     stats.all_probes_histogram.resize(
1114         std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
1115     std::transform(probe_histogram.begin(), probe_histogram.end(),
1116                    stats.all_probes_histogram.begin(),
1117                    stats.all_probes_histogram.begin(), std::plus<size_t>());
1118 
1119     size_t total_probe_seq_length = 0;
1120     for (size_t i = 0; i < probe_histogram.size(); ++i) {
1121       total_probe_seq_length += i * probe_histogram[i];
1122     }
1123     stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
1124                                         t1.size());
1125     t1.erase(t1.begin(), t1.end());
1126   }
1127   return stats;
1128 }
1129 
LinearTransformExpectedStats()1130 ExpectedStats LinearTransformExpectedStats() {
1131   constexpr bool kRandomizesInserts =
1132 #ifdef NDEBUG
1133       false;
1134 #else   // NDEBUG
1135       true;
1136 #endif  // NDEBUG
1137 
1138   // The effective load factor is larger in non-opt mode because we insert
1139   // elements out of order.
1140   switch (container_internal::Group::kWidth) {
1141     case 8:
1142       if (kRandomizesInserts) {
1143         return {0.1,
1144                 0.5,
1145                 {{0.95, 0.3}},
1146                 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1147       } else {
1148         return {0.15,
1149                 0.5,
1150                 {{0.95, 0.3}},
1151                 {{0.95, 0}, {0.99, 3}, {0.999, 15}, {0.9999, 25}}};
1152       }
1153     case 16:
1154       if (kRandomizesInserts) {
1155         return {0.1,
1156                 0.4,
1157                 {{0.95, 0.3}},
1158                 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1159       } else {
1160         return {0.05,
1161                 0.2,
1162                 {{0.95, 0.1}},
1163                 {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
1164       }
1165   }
1166   ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
1167   return {};
1168 }
1169 
TEST(Table,DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength)1170 TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
1171   ProbeStatsPerSize stats;
1172   std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
1173   for (size_t size : sizes) {
1174     stats[size] = CollectProbeStatsOnLinearlyTransformedKeys(
1175         CollectBadMergeKeys(size), 300);
1176   }
1177   auto expected = LinearTransformExpectedStats();
1178   for (size_t size : sizes) {
1179     auto& stat = stats[size];
1180     VerifyStats(size, expected, stat);
1181   }
1182 }
1183 
TEST(Table,EraseCollision)1184 TEST(Table, EraseCollision) {
1185   BadTable t;
1186 
1187   // 1 2 3
1188   t.emplace(1);
1189   t.emplace(2);
1190   t.emplace(3);
1191   EXPECT_THAT(*t.find(1), 1);
1192   EXPECT_THAT(*t.find(2), 2);
1193   EXPECT_THAT(*t.find(3), 3);
1194   EXPECT_EQ(3, t.size());
1195 
1196   // 1 DELETED 3
1197   t.erase(t.find(2));
1198   EXPECT_THAT(*t.find(1), 1);
1199   EXPECT_TRUE(t.find(2) == t.end());
1200   EXPECT_THAT(*t.find(3), 3);
1201   EXPECT_EQ(2, t.size());
1202 
1203   // DELETED DELETED 3
1204   t.erase(t.find(1));
1205   EXPECT_TRUE(t.find(1) == t.end());
1206   EXPECT_TRUE(t.find(2) == t.end());
1207   EXPECT_THAT(*t.find(3), 3);
1208   EXPECT_EQ(1, t.size());
1209 
1210   // DELETED DELETED DELETED
1211   t.erase(t.find(3));
1212   EXPECT_TRUE(t.find(1) == t.end());
1213   EXPECT_TRUE(t.find(2) == t.end());
1214   EXPECT_TRUE(t.find(3) == t.end());
1215   EXPECT_EQ(0, t.size());
1216 }
1217 
TEST(Table,EraseInsertProbing)1218 TEST(Table, EraseInsertProbing) {
1219   BadTable t(100);
1220 
1221   // 1 2 3 4
1222   t.emplace(1);
1223   t.emplace(2);
1224   t.emplace(3);
1225   t.emplace(4);
1226 
1227   // 1 DELETED 3 DELETED
1228   t.erase(t.find(2));
1229   t.erase(t.find(4));
1230 
1231   // 1 10 3 11 12
1232   t.emplace(10);
1233   t.emplace(11);
1234   t.emplace(12);
1235 
1236   EXPECT_EQ(5, t.size());
1237   EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12));
1238 }
1239 
TEST(Table,Clear)1240 TEST(Table, Clear) {
1241   IntTable t;
1242   EXPECT_TRUE(t.find(0) == t.end());
1243   t.clear();
1244   EXPECT_TRUE(t.find(0) == t.end());
1245   auto res = t.emplace(0);
1246   EXPECT_TRUE(res.second);
1247   EXPECT_EQ(1, t.size());
1248   t.clear();
1249   EXPECT_EQ(0, t.size());
1250   EXPECT_TRUE(t.find(0) == t.end());
1251 }
1252 
TEST(Table,Swap)1253 TEST(Table, Swap) {
1254   IntTable t;
1255   EXPECT_TRUE(t.find(0) == t.end());
1256   auto res = t.emplace(0);
1257   EXPECT_TRUE(res.second);
1258   EXPECT_EQ(1, t.size());
1259   IntTable u;
1260   t.swap(u);
1261   EXPECT_EQ(0, t.size());
1262   EXPECT_EQ(1, u.size());
1263   EXPECT_TRUE(t.find(0) == t.end());
1264   EXPECT_THAT(*u.find(0), 0);
1265 }
1266 
TEST(Table,Rehash)1267 TEST(Table, Rehash) {
1268   IntTable t;
1269   EXPECT_TRUE(t.find(0) == t.end());
1270   t.emplace(0);
1271   t.emplace(1);
1272   EXPECT_EQ(2, t.size());
1273   t.rehash(128);
1274   EXPECT_EQ(2, t.size());
1275   EXPECT_THAT(*t.find(0), 0);
1276   EXPECT_THAT(*t.find(1), 1);
1277 }
1278 
TEST(Table,RehashDoesNotRehashWhenNotNecessary)1279 TEST(Table, RehashDoesNotRehashWhenNotNecessary) {
1280   IntTable t;
1281   t.emplace(0);
1282   t.emplace(1);
1283   auto* p = &*t.find(0);
1284   t.rehash(1);
1285   EXPECT_EQ(p, &*t.find(0));
1286 }
1287 
TEST(Table,RehashZeroDoesNotAllocateOnEmptyTable)1288 TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) {
1289   IntTable t;
1290   t.rehash(0);
1291   EXPECT_EQ(0, t.bucket_count());
1292 }
1293 
TEST(Table,RehashZeroDeallocatesEmptyTable)1294 TEST(Table, RehashZeroDeallocatesEmptyTable) {
1295   IntTable t;
1296   t.emplace(0);
1297   t.clear();
1298   EXPECT_NE(0, t.bucket_count());
1299   t.rehash(0);
1300   EXPECT_EQ(0, t.bucket_count());
1301 }
1302 
TEST(Table,RehashZeroForcesRehash)1303 TEST(Table, RehashZeroForcesRehash) {
1304   IntTable t;
1305   t.emplace(0);
1306   t.emplace(1);
1307   auto* p = &*t.find(0);
1308   t.rehash(0);
1309   EXPECT_NE(p, &*t.find(0));
1310 }
1311 
TEST(Table,ConstructFromInitList)1312 TEST(Table, ConstructFromInitList) {
1313   using P = std::pair<std::string, std::string>;
1314   struct Q {
1315     operator P() const { return {}; }
1316   };
1317   StringTable t = {P(), Q(), {}, {{}, {}}};
1318 }
1319 
TEST(Table,CopyConstruct)1320 TEST(Table, CopyConstruct) {
1321   IntTable t;
1322   t.emplace(0);
1323   EXPECT_EQ(1, t.size());
1324   {
1325     IntTable u(t);
1326     EXPECT_EQ(1, u.size());
1327     EXPECT_THAT(*u.find(0), 0);
1328   }
1329   {
1330     IntTable u{t};
1331     EXPECT_EQ(1, u.size());
1332     EXPECT_THAT(*u.find(0), 0);
1333   }
1334   {
1335     IntTable u = t;
1336     EXPECT_EQ(1, u.size());
1337     EXPECT_THAT(*u.find(0), 0);
1338   }
1339 }
1340 
TEST(Table,CopyConstructWithAlloc)1341 TEST(Table, CopyConstructWithAlloc) {
1342   StringTable t;
1343   t.emplace("a", "b");
1344   EXPECT_EQ(1, t.size());
1345   StringTable u(t, Alloc<std::pair<std::string, std::string>>());
1346   EXPECT_EQ(1, u.size());
1347   EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1348 }
1349 
1350 struct ExplicitAllocIntTable
1351     : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
1352                    std::equal_to<int64_t>, Alloc<int64_t>> {
ExplicitAllocIntTableabsl::container_internal::__anon46dfdfe20111::ExplicitAllocIntTable1353   ExplicitAllocIntTable() {}
1354 };
1355 
TEST(Table,AllocWithExplicitCtor)1356 TEST(Table, AllocWithExplicitCtor) {
1357   ExplicitAllocIntTable t;
1358   EXPECT_EQ(0, t.size());
1359 }
1360 
TEST(Table,MoveConstruct)1361 TEST(Table, MoveConstruct) {
1362   {
1363     StringTable t;
1364     t.emplace("a", "b");
1365     EXPECT_EQ(1, t.size());
1366 
1367     StringTable u(std::move(t));
1368     EXPECT_EQ(1, u.size());
1369     EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1370   }
1371   {
1372     StringTable t;
1373     t.emplace("a", "b");
1374     EXPECT_EQ(1, t.size());
1375 
1376     StringTable u{std::move(t)};
1377     EXPECT_EQ(1, u.size());
1378     EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1379   }
1380   {
1381     StringTable t;
1382     t.emplace("a", "b");
1383     EXPECT_EQ(1, t.size());
1384 
1385     StringTable u = std::move(t);
1386     EXPECT_EQ(1, u.size());
1387     EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1388   }
1389 }
1390 
TEST(Table,MoveConstructWithAlloc)1391 TEST(Table, MoveConstructWithAlloc) {
1392   StringTable t;
1393   t.emplace("a", "b");
1394   EXPECT_EQ(1, t.size());
1395   StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>());
1396   EXPECT_EQ(1, u.size());
1397   EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1398 }
1399 
TEST(Table,CopyAssign)1400 TEST(Table, CopyAssign) {
1401   StringTable t;
1402   t.emplace("a", "b");
1403   EXPECT_EQ(1, t.size());
1404   StringTable u;
1405   u = t;
1406   EXPECT_EQ(1, u.size());
1407   EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1408 }
1409 
TEST(Table,CopySelfAssign)1410 TEST(Table, CopySelfAssign) {
1411   StringTable t;
1412   t.emplace("a", "b");
1413   EXPECT_EQ(1, t.size());
1414   t = *&t;
1415   EXPECT_EQ(1, t.size());
1416   EXPECT_THAT(*t.find("a"), Pair("a", "b"));
1417 }
1418 
TEST(Table,MoveAssign)1419 TEST(Table, MoveAssign) {
1420   StringTable t;
1421   t.emplace("a", "b");
1422   EXPECT_EQ(1, t.size());
1423   StringTable u;
1424   u = std::move(t);
1425   EXPECT_EQ(1, u.size());
1426   EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1427 }
1428 
TEST(Table,Equality)1429 TEST(Table, Equality) {
1430   StringTable t;
1431   std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
1432                                                         {"aa", "bb"}};
1433   t.insert(std::begin(v), std::end(v));
1434   StringTable u = t;
1435   EXPECT_EQ(u, t);
1436 }
1437 
TEST(Table,Equality2)1438 TEST(Table, Equality2) {
1439   StringTable t;
1440   std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"},
1441                                                          {"aa", "bb"}};
1442   t.insert(std::begin(v1), std::end(v1));
1443   StringTable u;
1444   std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
1445                                                          {"aa", "aa"}};
1446   u.insert(std::begin(v2), std::end(v2));
1447   EXPECT_NE(u, t);
1448 }
1449 
TEST(Table,Equality3)1450 TEST(Table, Equality3) {
1451   StringTable t;
1452   std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"},
1453                                                          {"bb", "bb"}};
1454   t.insert(std::begin(v1), std::end(v1));
1455   StringTable u;
1456   std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
1457                                                          {"aa", "aa"}};
1458   u.insert(std::begin(v2), std::end(v2));
1459   EXPECT_NE(u, t);
1460 }
1461 
TEST(Table,NumDeletedRegression)1462 TEST(Table, NumDeletedRegression) {
1463   IntTable t;
1464   t.emplace(0);
1465   t.erase(t.find(0));
1466   // construct over a deleted slot.
1467   t.emplace(0);
1468   t.clear();
1469 }
1470 
TEST(Table,FindFullDeletedRegression)1471 TEST(Table, FindFullDeletedRegression) {
1472   IntTable t;
1473   for (int i = 0; i < 1000; ++i) {
1474     t.emplace(i);
1475     t.erase(t.find(i));
1476   }
1477   EXPECT_EQ(0, t.size());
1478 }
1479 
TEST(Table,ReplacingDeletedSlotDoesNotRehash)1480 TEST(Table, ReplacingDeletedSlotDoesNotRehash) {
1481   size_t n;
1482   {
1483     // Compute n such that n is the maximum number of elements before rehash.
1484     IntTable t;
1485     t.emplace(0);
1486     size_t c = t.bucket_count();
1487     for (n = 1; c == t.bucket_count(); ++n) t.emplace(n);
1488     --n;
1489   }
1490   IntTable t;
1491   t.rehash(n);
1492   const size_t c = t.bucket_count();
1493   for (size_t i = 0; i != n; ++i) t.emplace(i);
1494   EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
1495   t.erase(0);
1496   t.emplace(0);
1497   EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
1498 }
1499 
TEST(Table,NoThrowMoveConstruct)1500 TEST(Table, NoThrowMoveConstruct) {
1501   ASSERT_TRUE(
1502       std::is_nothrow_copy_constructible<absl::Hash<absl::string_view>>::value);
1503   ASSERT_TRUE(std::is_nothrow_copy_constructible<
1504               std::equal_to<absl::string_view>>::value);
1505   ASSERT_TRUE(std::is_nothrow_copy_constructible<std::allocator<int>>::value);
1506   EXPECT_TRUE(std::is_nothrow_move_constructible<StringTable>::value);
1507 }
1508 
TEST(Table,NoThrowMoveAssign)1509 TEST(Table, NoThrowMoveAssign) {
1510   ASSERT_TRUE(
1511       std::is_nothrow_move_assignable<absl::Hash<absl::string_view>>::value);
1512   ASSERT_TRUE(
1513       std::is_nothrow_move_assignable<std::equal_to<absl::string_view>>::value);
1514   ASSERT_TRUE(std::is_nothrow_move_assignable<std::allocator<int>>::value);
1515   ASSERT_TRUE(
1516       absl::allocator_traits<std::allocator<int>>::is_always_equal::value);
1517   EXPECT_TRUE(std::is_nothrow_move_assignable<StringTable>::value);
1518 }
1519 
TEST(Table,NoThrowSwappable)1520 TEST(Table, NoThrowSwappable) {
1521   ASSERT_TRUE(
1522       container_internal::IsNoThrowSwappable<absl::Hash<absl::string_view>>());
1523   ASSERT_TRUE(container_internal::IsNoThrowSwappable<
1524               std::equal_to<absl::string_view>>());
1525   ASSERT_TRUE(container_internal::IsNoThrowSwappable<std::allocator<int>>());
1526   EXPECT_TRUE(container_internal::IsNoThrowSwappable<StringTable>());
1527 }
1528 
TEST(Table,HeterogeneousLookup)1529 TEST(Table, HeterogeneousLookup) {
1530   struct Hash {
1531     size_t operator()(int64_t i) const { return i; }
1532     size_t operator()(double i) const {
1533       ADD_FAILURE();
1534       return i;
1535     }
1536   };
1537   struct Eq {
1538     bool operator()(int64_t a, int64_t b) const { return a == b; }
1539     bool operator()(double a, int64_t b) const {
1540       ADD_FAILURE();
1541       return a == b;
1542     }
1543     bool operator()(int64_t a, double b) const {
1544       ADD_FAILURE();
1545       return a == b;
1546     }
1547     bool operator()(double a, double b) const {
1548       ADD_FAILURE();
1549       return a == b;
1550     }
1551   };
1552 
1553   struct THash {
1554     using is_transparent = void;
1555     size_t operator()(int64_t i) const { return i; }
1556     size_t operator()(double i) const { return i; }
1557   };
1558   struct TEq {
1559     using is_transparent = void;
1560     bool operator()(int64_t a, int64_t b) const { return a == b; }
1561     bool operator()(double a, int64_t b) const { return a == b; }
1562     bool operator()(int64_t a, double b) const { return a == b; }
1563     bool operator()(double a, double b) const { return a == b; }
1564   };
1565 
1566   raw_hash_set<IntPolicy, Hash, Eq, Alloc<int64_t>> s{0, 1, 2};
1567   // It will convert to int64_t before the query.
1568   EXPECT_EQ(1, *s.find(double{1.1}));
1569 
1570   raw_hash_set<IntPolicy, THash, TEq, Alloc<int64_t>> ts{0, 1, 2};
1571   // It will try to use the double, and fail to find the object.
1572   EXPECT_TRUE(ts.find(1.1) == ts.end());
1573 }
1574 
1575 template <class Table>
1576 using CallFind = decltype(std::declval<Table&>().find(17));
1577 
1578 template <class Table>
1579 using CallErase = decltype(std::declval<Table&>().erase(17));
1580 
1581 template <class Table>
1582 using CallExtract = decltype(std::declval<Table&>().extract(17));
1583 
1584 template <class Table>
1585 using CallPrefetch = decltype(std::declval<Table&>().prefetch(17));
1586 
1587 template <class Table>
1588 using CallCount = decltype(std::declval<Table&>().count(17));
1589 
1590 template <template <typename> class C, class Table, class = void>
1591 struct VerifyResultOf : std::false_type {};
1592 
1593 template <template <typename> class C, class Table>
1594 struct VerifyResultOf<C, Table, absl::void_t<C<Table>>> : std::true_type {};
1595 
TEST(Table,HeterogeneousLookupOverloads)1596 TEST(Table, HeterogeneousLookupOverloads) {
1597   using NonTransparentTable =
1598       raw_hash_set<StringPolicy, absl::Hash<absl::string_view>,
1599                    std::equal_to<absl::string_view>, std::allocator<int>>;
1600 
1601   EXPECT_FALSE((VerifyResultOf<CallFind, NonTransparentTable>()));
1602   EXPECT_FALSE((VerifyResultOf<CallErase, NonTransparentTable>()));
1603   EXPECT_FALSE((VerifyResultOf<CallExtract, NonTransparentTable>()));
1604   EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>()));
1605   EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>()));
1606 
1607   using TransparentTable = raw_hash_set<
1608       StringPolicy,
1609       absl::container_internal::hash_default_hash<absl::string_view>,
1610       absl::container_internal::hash_default_eq<absl::string_view>,
1611       std::allocator<int>>;
1612 
1613   EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>()));
1614   EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>()));
1615   EXPECT_TRUE((VerifyResultOf<CallExtract, TransparentTable>()));
1616   EXPECT_TRUE((VerifyResultOf<CallPrefetch, TransparentTable>()));
1617   EXPECT_TRUE((VerifyResultOf<CallCount, TransparentTable>()));
1618 }
1619 
1620 // TODO(alkis): Expand iterator tests.
TEST(Iterator,IsDefaultConstructible)1621 TEST(Iterator, IsDefaultConstructible) {
1622   StringTable::iterator i;
1623   EXPECT_TRUE(i == StringTable::iterator());
1624 }
1625 
TEST(ConstIterator,IsDefaultConstructible)1626 TEST(ConstIterator, IsDefaultConstructible) {
1627   StringTable::const_iterator i;
1628   EXPECT_TRUE(i == StringTable::const_iterator());
1629 }
1630 
TEST(Iterator,ConvertsToConstIterator)1631 TEST(Iterator, ConvertsToConstIterator) {
1632   StringTable::iterator i;
1633   EXPECT_TRUE(i == StringTable::const_iterator());
1634 }
1635 
TEST(Iterator,Iterates)1636 TEST(Iterator, Iterates) {
1637   IntTable t;
1638   for (size_t i = 3; i != 6; ++i) EXPECT_TRUE(t.emplace(i).second);
1639   EXPECT_THAT(t, UnorderedElementsAre(3, 4, 5));
1640 }
1641 
TEST(Table,Merge)1642 TEST(Table, Merge) {
1643   StringTable t1, t2;
1644   t1.emplace("0", "-0");
1645   t1.emplace("1", "-1");
1646   t2.emplace("0", "~0");
1647   t2.emplace("2", "~2");
1648 
1649   EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1")));
1650   EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"), Pair("2", "~2")));
1651 
1652   t1.merge(t2);
1653   EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1"),
1654                                        Pair("2", "~2")));
1655   EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
1656 }
1657 
TEST(Nodes,EmptyNodeType)1658 TEST(Nodes, EmptyNodeType) {
1659   using node_type = StringTable::node_type;
1660   node_type n;
1661   EXPECT_FALSE(n);
1662   EXPECT_TRUE(n.empty());
1663 
1664   EXPECT_TRUE((std::is_same<node_type::allocator_type,
1665                             StringTable::allocator_type>::value));
1666 }
1667 
TEST(Nodes,ExtractInsert)1668 TEST(Nodes, ExtractInsert) {
1669   constexpr char k0[] = "Very long std::string zero.";
1670   constexpr char k1[] = "Very long std::string one.";
1671   constexpr char k2[] = "Very long std::string two.";
1672   StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}};
1673   EXPECT_THAT(t,
1674               UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, "")));
1675 
1676   auto node = t.extract(k0);
1677   EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
1678   EXPECT_TRUE(node);
1679   EXPECT_FALSE(node.empty());
1680 
1681   StringTable t2;
1682   StringTable::insert_return_type res = t2.insert(std::move(node));
1683   EXPECT_TRUE(res.inserted);
1684   EXPECT_THAT(*res.position, Pair(k0, ""));
1685   EXPECT_FALSE(res.node);
1686   EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
1687 
1688   // Not there.
1689   EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
1690   node = t.extract("Not there!");
1691   EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
1692   EXPECT_FALSE(node);
1693 
1694   // Inserting nothing.
1695   res = t2.insert(std::move(node));
1696   EXPECT_FALSE(res.inserted);
1697   EXPECT_EQ(res.position, t2.end());
1698   EXPECT_FALSE(res.node);
1699   EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
1700 
1701   t.emplace(k0, "1");
1702   node = t.extract(k0);
1703 
1704   // Insert duplicate.
1705   res = t2.insert(std::move(node));
1706   EXPECT_FALSE(res.inserted);
1707   EXPECT_THAT(*res.position, Pair(k0, ""));
1708   EXPECT_TRUE(res.node);
1709   EXPECT_FALSE(node);
1710 }
1711 
MakeSimpleTable(size_t size)1712 IntTable MakeSimpleTable(size_t size) {
1713   IntTable t;
1714   while (t.size() < size) t.insert(t.size());
1715   return t;
1716 }
1717 
OrderOfIteration(const IntTable & t)1718 std::vector<int> OrderOfIteration(const IntTable& t) {
1719   return {t.begin(), t.end()};
1720 }
1721 
1722 // These IterationOrderChanges tests depend on non-deterministic behavior.
1723 // We are injecting non-determinism from the pointer of the table, but do so in
1724 // a way that only the page matters. We have to retry enough times to make sure
1725 // we are touching different memory pages to cause the ordering to change.
1726 // We also need to keep the old tables around to avoid getting the same memory
1727 // blocks over and over.
TEST(Table,IterationOrderChangesByInstance)1728 TEST(Table, IterationOrderChangesByInstance) {
1729   for (size_t size : {2, 6, 12, 20}) {
1730     const auto reference_table = MakeSimpleTable(size);
1731     const auto reference = OrderOfIteration(reference_table);
1732 
1733     std::vector<IntTable> tables;
1734     bool found_difference = false;
1735     for (int i = 0; !found_difference && i < 5000; ++i) {
1736       tables.push_back(MakeSimpleTable(size));
1737       found_difference = OrderOfIteration(tables.back()) != reference;
1738     }
1739     if (!found_difference) {
1740       FAIL()
1741           << "Iteration order remained the same across many attempts with size "
1742           << size;
1743     }
1744   }
1745 }
1746 
TEST(Table,IterationOrderChangesOnRehash)1747 TEST(Table, IterationOrderChangesOnRehash) {
1748   std::vector<IntTable> garbage;
1749   for (int i = 0; i < 5000; ++i) {
1750     auto t = MakeSimpleTable(20);
1751     const auto reference = OrderOfIteration(t);
1752     // Force rehash to the same size.
1753     t.rehash(0);
1754     auto trial = OrderOfIteration(t);
1755     if (trial != reference) {
1756       // We are done.
1757       return;
1758     }
1759     garbage.push_back(std::move(t));
1760   }
1761   FAIL() << "Iteration order remained the same across many attempts.";
1762 }
1763 
1764 // Verify that pointers are invalidated as soon as a second element is inserted.
1765 // This prevents dependency on pointer stability on small tables.
TEST(Table,UnstablePointers)1766 TEST(Table, UnstablePointers) {
1767   IntTable table;
1768 
1769   const auto addr = [&](int i) {
1770     return reinterpret_cast<uintptr_t>(&*table.find(i));
1771   };
1772 
1773   table.insert(0);
1774   const uintptr_t old_ptr = addr(0);
1775 
1776   // This causes a rehash.
1777   table.insert(1);
1778 
1779   EXPECT_NE(old_ptr, addr(0));
1780 }
1781 
1782 // Confirm that we assert if we try to erase() end().
TEST(TableDeathTest,EraseOfEndAsserts)1783 TEST(TableDeathTest, EraseOfEndAsserts) {
1784   // Use an assert with side-effects to figure out if they are actually enabled.
1785   bool assert_enabled = false;
1786   assert([&]() {
1787     assert_enabled = true;
1788     return true;
1789   }());
1790   if (!assert_enabled) return;
1791 
1792   IntTable t;
1793   // Extra simple "regexp" as regexp support is highly varied across platforms.
1794   constexpr char kDeathMsg[] = "IsFull";
1795   EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
1796 }
1797 
1798 #if defined(ABSL_HASHTABLEZ_SAMPLE)
TEST(RawHashSamplerTest,Sample)1799 TEST(RawHashSamplerTest, Sample) {
1800   // Enable the feature even if the prod default is off.
1801   SetHashtablezEnabled(true);
1802   SetHashtablezSampleParameter(100);
1803 
1804   auto& sampler = HashtablezSampler::Global();
1805   size_t start_size = 0;
1806   start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
1807 
1808   std::vector<IntTable> tables;
1809   for (int i = 0; i < 1000000; ++i) {
1810     tables.emplace_back();
1811     tables.back().insert(1);
1812   }
1813   size_t end_size = 0;
1814   end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
1815 
1816   EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
1817               0.01, 0.005);
1818 }
1819 #endif  // ABSL_HASHTABLEZ_SAMPLER
1820 
TEST(RawHashSamplerTest,DoNotSampleCustomAllocators)1821 TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
1822   // Enable the feature even if the prod default is off.
1823   SetHashtablezEnabled(true);
1824   SetHashtablezSampleParameter(100);
1825 
1826   auto& sampler = HashtablezSampler::Global();
1827   size_t start_size = 0;
1828   start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
1829 
1830   std::vector<CustomAllocIntTable> tables;
1831   for (int i = 0; i < 1000000; ++i) {
1832     tables.emplace_back();
1833     tables.back().insert(1);
1834   }
1835   size_t end_size = 0;
1836   end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
1837 
1838   EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
1839               0.00, 0.001);
1840 }
1841 
1842 #ifdef ADDRESS_SANITIZER
TEST(Sanitizer,PoisoningUnused)1843 TEST(Sanitizer, PoisoningUnused) {
1844   IntTable t;
1845   t.reserve(5);
1846   // Insert something to force an allocation.
1847   int64_t& v1 = *t.insert(0).first;
1848 
1849   // Make sure there is something to test.
1850   ASSERT_GT(t.capacity(), 1);
1851 
1852   int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
1853   for (size_t i = 0; i < t.capacity(); ++i) {
1854     EXPECT_EQ(slots + i != &v1, __asan_address_is_poisoned(slots + i));
1855   }
1856 }
1857 
TEST(Sanitizer,PoisoningOnErase)1858 TEST(Sanitizer, PoisoningOnErase) {
1859   IntTable t;
1860   int64_t& v = *t.insert(0).first;
1861 
1862   EXPECT_FALSE(__asan_address_is_poisoned(&v));
1863   t.erase(0);
1864   EXPECT_TRUE(__asan_address_is_poisoned(&v));
1865 }
1866 #endif  // ADDRESS_SANITIZER
1867 
1868 }  // namespace
1869 }  // namespace container_internal
1870 ABSL_NAMESPACE_END
1871 }  // namespace absl
1872