• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "absl/container/internal/raw_hash_set.h"
16 
17 #include <algorithm>
18 #include <array>
19 #include <atomic>
20 #include <cmath>
21 #include <cstddef>
22 #include <cstdint>
23 #include <deque>
24 #include <functional>
25 #include <iostream>
26 #include <iterator>
27 #include <list>
28 #include <map>
29 #include <memory>
30 #include <numeric>
31 #include <ostream>
32 #include <random>
33 #include <string>
34 #include <tuple>
35 #include <type_traits>
36 #include <unordered_map>
37 #include <unordered_set>
38 #include <utility>
39 #include <vector>
40 
41 #include "gmock/gmock.h"
42 #include "gtest/gtest.h"
43 #include "absl/base/attributes.h"
44 #include "absl/base/config.h"
45 #include "absl/base/internal/cycleclock.h"
46 #include "absl/base/prefetch.h"
47 #include "absl/container/flat_hash_map.h"
48 #include "absl/container/flat_hash_set.h"
49 #include "absl/container/internal/container_memory.h"
50 #include "absl/container/internal/hash_function_defaults.h"
51 #include "absl/container/internal/hash_policy_testing.h"
52 #include "absl/container/internal/hashtable_debug.h"
53 #include "absl/container/internal/hashtablez_sampler.h"
54 #include "absl/container/internal/test_allocator.h"
55 #include "absl/functional/function_ref.h"
56 #include "absl/hash/hash.h"
57 #include "absl/log/check.h"
58 #include "absl/log/log.h"
59 #include "absl/memory/memory.h"
60 #include "absl/meta/type_traits.h"
61 #include "absl/strings/string_view.h"
62 
63 namespace absl {
64 ABSL_NAMESPACE_BEGIN
65 namespace container_internal {
66 
67 struct RawHashSetTestOnlyAccess {
68   template <typename C>
GetCommonabsl::container_internal::RawHashSetTestOnlyAccess69   static auto GetCommon(const C& c) -> decltype(c.common()) {
70     return c.common();
71   }
72   template <typename C>
GetSlotsabsl::container_internal::RawHashSetTestOnlyAccess73   static auto GetSlots(const C& c) -> decltype(c.slot_array()) {
74     return c.slot_array();
75   }
76   template <typename C>
CountTombstonesabsl::container_internal::RawHashSetTestOnlyAccess77   static size_t CountTombstones(const C& c) {
78     return c.common().TombstonesCount();
79   }
80 };
81 
82 namespace {
83 
84 using ::testing::ElementsAre;
85 using ::testing::ElementsAreArray;
86 using ::testing::Eq;
87 using ::testing::Ge;
88 using ::testing::Lt;
89 using ::testing::Pair;
90 using ::testing::UnorderedElementsAre;
91 
92 // Convenience function to static cast to ctrl_t.
CtrlT(int i)93 ctrl_t CtrlT(int i) { return static_cast<ctrl_t>(i); }
94 
TEST(GrowthInfoTest,GetGrowthLeft)95 TEST(GrowthInfoTest, GetGrowthLeft) {
96   GrowthInfo gi;
97   gi.InitGrowthLeftNoDeleted(5);
98   EXPECT_EQ(gi.GetGrowthLeft(), 5);
99   gi.OverwriteFullAsDeleted();
100   EXPECT_EQ(gi.GetGrowthLeft(), 5);
101 }
102 
TEST(GrowthInfoTest,HasNoDeleted)103 TEST(GrowthInfoTest, HasNoDeleted) {
104   GrowthInfo gi;
105   gi.InitGrowthLeftNoDeleted(5);
106   EXPECT_TRUE(gi.HasNoDeleted());
107   gi.OverwriteFullAsDeleted();
108   EXPECT_FALSE(gi.HasNoDeleted());
109   // After reinitialization we have no deleted slots.
110   gi.InitGrowthLeftNoDeleted(5);
111   EXPECT_TRUE(gi.HasNoDeleted());
112 }
113 
TEST(GrowthInfoTest,HasNoDeletedAndGrowthLeft)114 TEST(GrowthInfoTest, HasNoDeletedAndGrowthLeft) {
115   GrowthInfo gi;
116   gi.InitGrowthLeftNoDeleted(5);
117   EXPECT_TRUE(gi.HasNoDeletedAndGrowthLeft());
118   gi.OverwriteFullAsDeleted();
119   EXPECT_FALSE(gi.HasNoDeletedAndGrowthLeft());
120   gi.InitGrowthLeftNoDeleted(0);
121   EXPECT_FALSE(gi.HasNoDeletedAndGrowthLeft());
122   gi.OverwriteFullAsDeleted();
123   EXPECT_FALSE(gi.HasNoDeletedAndGrowthLeft());
124   // After reinitialization we have no deleted slots.
125   gi.InitGrowthLeftNoDeleted(5);
126   EXPECT_TRUE(gi.HasNoDeletedAndGrowthLeft());
127 }
128 
TEST(GrowthInfoTest,HasNoGrowthLeftAndNoDeleted)129 TEST(GrowthInfoTest, HasNoGrowthLeftAndNoDeleted) {
130   GrowthInfo gi;
131   gi.InitGrowthLeftNoDeleted(1);
132   EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted());
133   gi.OverwriteEmptyAsFull();
134   EXPECT_TRUE(gi.HasNoGrowthLeftAndNoDeleted());
135   gi.OverwriteFullAsDeleted();
136   EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted());
137   gi.OverwriteFullAsEmpty();
138   EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted());
139   gi.InitGrowthLeftNoDeleted(0);
140   EXPECT_TRUE(gi.HasNoGrowthLeftAndNoDeleted());
141   gi.OverwriteFullAsEmpty();
142   EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted());
143 }
144 
TEST(GrowthInfoTest,OverwriteFullAsEmpty)145 TEST(GrowthInfoTest, OverwriteFullAsEmpty) {
146   GrowthInfo gi;
147   gi.InitGrowthLeftNoDeleted(5);
148   gi.OverwriteFullAsEmpty();
149   EXPECT_EQ(gi.GetGrowthLeft(), 6);
150   gi.OverwriteFullAsDeleted();
151   EXPECT_EQ(gi.GetGrowthLeft(), 6);
152   gi.OverwriteFullAsEmpty();
153   EXPECT_EQ(gi.GetGrowthLeft(), 7);
154   EXPECT_FALSE(gi.HasNoDeleted());
155 }
156 
TEST(GrowthInfoTest,OverwriteEmptyAsFull)157 TEST(GrowthInfoTest, OverwriteEmptyAsFull) {
158   GrowthInfo gi;
159   gi.InitGrowthLeftNoDeleted(5);
160   gi.OverwriteEmptyAsFull();
161   EXPECT_EQ(gi.GetGrowthLeft(), 4);
162   gi.OverwriteFullAsDeleted();
163   EXPECT_EQ(gi.GetGrowthLeft(), 4);
164   gi.OverwriteEmptyAsFull();
165   EXPECT_EQ(gi.GetGrowthLeft(), 3);
166   EXPECT_FALSE(gi.HasNoDeleted());
167 }
168 
TEST(GrowthInfoTest,OverwriteControlAsFull)169 TEST(GrowthInfoTest, OverwriteControlAsFull) {
170   GrowthInfo gi;
171   gi.InitGrowthLeftNoDeleted(5);
172   gi.OverwriteControlAsFull(ctrl_t::kEmpty);
173   EXPECT_EQ(gi.GetGrowthLeft(), 4);
174   gi.OverwriteControlAsFull(ctrl_t::kDeleted);
175   EXPECT_EQ(gi.GetGrowthLeft(), 4);
176   gi.OverwriteFullAsDeleted();
177   gi.OverwriteControlAsFull(ctrl_t::kDeleted);
178   // We do not count number of deleted, so the bit sticks till the next rehash.
179   EXPECT_FALSE(gi.HasNoDeletedAndGrowthLeft());
180   EXPECT_FALSE(gi.HasNoDeleted());
181 }
182 
TEST(Util,NormalizeCapacity)183 TEST(Util, NormalizeCapacity) {
184   EXPECT_EQ(1, NormalizeCapacity(0));
185   EXPECT_EQ(1, NormalizeCapacity(1));
186   EXPECT_EQ(3, NormalizeCapacity(2));
187   EXPECT_EQ(3, NormalizeCapacity(3));
188   EXPECT_EQ(7, NormalizeCapacity(4));
189   EXPECT_EQ(7, NormalizeCapacity(7));
190   EXPECT_EQ(15, NormalizeCapacity(8));
191   EXPECT_EQ(15, NormalizeCapacity(15));
192   EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1));
193   EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2));
194 }
195 
TEST(Util,GrowthAndCapacity)196 TEST(Util, GrowthAndCapacity) {
197   // Verify that GrowthToCapacity gives the minimum capacity that has enough
198   // growth.
199   for (size_t growth = 0; growth < 10000; ++growth) {
200     SCOPED_TRACE(growth);
201     size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
202     // The capacity is large enough for `growth`.
203     EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
204     // For (capacity+1) < kWidth, growth should equal capacity.
205     if (capacity + 1 < Group::kWidth) {
206       EXPECT_THAT(CapacityToGrowth(capacity), Eq(capacity));
207     } else {
208       EXPECT_THAT(CapacityToGrowth(capacity), Lt(capacity));
209     }
210     if (growth != 0 && capacity > 1) {
211       // There is no smaller capacity that works.
212       EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
213     }
214   }
215 
216   for (size_t capacity = Group::kWidth - 1; capacity < 10000;
217        capacity = 2 * capacity + 1) {
218     SCOPED_TRACE(capacity);
219     size_t growth = CapacityToGrowth(capacity);
220     EXPECT_THAT(growth, Lt(capacity));
221     EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity);
222     EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity);
223   }
224 }
225 
TEST(Util,probe_seq)226 TEST(Util, probe_seq) {
227   probe_seq<16> seq(0, 127);
228   auto gen = [&]() {
229     size_t res = seq.offset();
230     seq.next();
231     return res;
232   };
233   std::vector<size_t> offsets(8);
234   std::generate_n(offsets.begin(), 8, gen);
235   EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
236   seq = probe_seq<16>(128, 127);
237   std::generate_n(offsets.begin(), 8, gen);
238   EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
239 }
240 
TEST(BitMask,Smoke)241 TEST(BitMask, Smoke) {
242   EXPECT_FALSE((BitMask<uint8_t, 8>(0)));
243   EXPECT_TRUE((BitMask<uint8_t, 8>(5)));
244 
245   EXPECT_THAT((BitMask<uint8_t, 8>(0)), ElementsAre());
246   EXPECT_THAT((BitMask<uint8_t, 8>(0x1)), ElementsAre(0));
247   EXPECT_THAT((BitMask<uint8_t, 8>(0x2)), ElementsAre(1));
248   EXPECT_THAT((BitMask<uint8_t, 8>(0x3)), ElementsAre(0, 1));
249   EXPECT_THAT((BitMask<uint8_t, 8>(0x4)), ElementsAre(2));
250   EXPECT_THAT((BitMask<uint8_t, 8>(0x5)), ElementsAre(0, 2));
251   EXPECT_THAT((BitMask<uint8_t, 8>(0x55)), ElementsAre(0, 2, 4, 6));
252   EXPECT_THAT((BitMask<uint8_t, 8>(0xAA)), ElementsAre(1, 3, 5, 7));
253 }
254 
TEST(BitMask,WithShift_MatchPortable)255 TEST(BitMask, WithShift_MatchPortable) {
256   // See the non-SSE version of Group for details on what this math is for.
257   uint64_t ctrl = 0x1716151413121110;
258   uint64_t hash = 0x12;
259   constexpr uint64_t lsbs = 0x0101010101010101ULL;
260   auto x = ctrl ^ (lsbs * hash);
261   uint64_t mask = (x - lsbs) & ~x & kMsbs8Bytes;
262   EXPECT_EQ(0x0000000080800000, mask);
263 
264   BitMask<uint64_t, 8, 3> b(mask);
265   EXPECT_EQ(*b, 2);
266 }
267 
268 constexpr uint64_t kSome8BytesMask = /*  */ 0x8000808080008000ULL;
269 constexpr uint64_t kSome8BytesMaskAllOnes = 0xff00ffffff00ff00ULL;
270 constexpr auto kSome8BytesMaskBits = std::array<int, 5>{1, 3, 4, 5, 7};
271 
272 
TEST(BitMask,WithShift_FullMask)273 TEST(BitMask, WithShift_FullMask) {
274   EXPECT_THAT((BitMask<uint64_t, 8, 3>(kMsbs8Bytes)),
275               ElementsAre(0, 1, 2, 3, 4, 5, 6, 7));
276   EXPECT_THAT(
277       (BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(kMsbs8Bytes)),
278       ElementsAre(0, 1, 2, 3, 4, 5, 6, 7));
279   EXPECT_THAT(
280       (BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(~uint64_t{0})),
281       ElementsAre(0, 1, 2, 3, 4, 5, 6, 7));
282 }
283 
TEST(BitMask,WithShift_EmptyMask)284 TEST(BitMask, WithShift_EmptyMask) {
285   EXPECT_THAT((BitMask<uint64_t, 8, 3>(0)), ElementsAre());
286   EXPECT_THAT((BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(0)),
287               ElementsAre());
288 }
289 
TEST(BitMask,WithShift_SomeMask)290 TEST(BitMask, WithShift_SomeMask) {
291   EXPECT_THAT((BitMask<uint64_t, 8, 3>(kSome8BytesMask)),
292               ElementsAreArray(kSome8BytesMaskBits));
293   EXPECT_THAT((BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(
294                   kSome8BytesMask)),
295               ElementsAreArray(kSome8BytesMaskBits));
296   EXPECT_THAT((BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(
297                   kSome8BytesMaskAllOnes)),
298               ElementsAreArray(kSome8BytesMaskBits));
299 }
300 
TEST(BitMask,WithShift_SomeMaskExtraBitsForNullify)301 TEST(BitMask, WithShift_SomeMaskExtraBitsForNullify) {
302   // Verify that adding extra bits into non zero bytes is fine.
303   uint64_t extra_bits = 77;
304   for (int i = 0; i < 100; ++i) {
305     // Add extra bits, but keep zero bytes untouched.
306     uint64_t extra_mask = extra_bits & kSome8BytesMaskAllOnes;
307     EXPECT_THAT((BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(
308                     kSome8BytesMask | extra_mask)),
309                 ElementsAreArray(kSome8BytesMaskBits))
310         << i << " " << extra_mask;
311     extra_bits = (extra_bits + 1) * 3;
312   }
313 }
314 
TEST(BitMask,LeadingTrailing)315 TEST(BitMask, LeadingTrailing) {
316   EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).LeadingZeros()), 3);
317   EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).TrailingZeros()), 6);
318 
319   EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).LeadingZeros()), 15);
320   EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).TrailingZeros()), 0);
321 
322   EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).LeadingZeros()), 0);
323   EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).TrailingZeros()), 15);
324 
325   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3);
326   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1);
327 
328   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).LeadingZeros()), 7);
329   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).TrailingZeros()), 0);
330 
331   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).LeadingZeros()), 0);
332   EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).TrailingZeros()), 7);
333 }
334 
TEST(Group,EmptyGroup)335 TEST(Group, EmptyGroup) {
336   for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h));
337 }
338 
TEST(Group,Match)339 TEST(Group, Match) {
340   if (Group::kWidth == 16) {
341     ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted,  CtrlT(3),
342                       ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
343                       CtrlT(7),       CtrlT(5), CtrlT(3),          CtrlT(1),
344                       CtrlT(1),       CtrlT(1), CtrlT(1),          CtrlT(1)};
345     EXPECT_THAT(Group{group}.Match(0), ElementsAre());
346     EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
347     EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10));
348     EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9));
349     EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8));
350   } else if (Group::kWidth == 8) {
351     ctrl_t group[] = {ctrl_t::kEmpty,    CtrlT(1), CtrlT(2),
352                       ctrl_t::kDeleted,  CtrlT(2), CtrlT(1),
353                       ctrl_t::kSentinel, CtrlT(1)};
354     EXPECT_THAT(Group{group}.Match(0), ElementsAre());
355     EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7));
356     EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4));
357   } else {
358     FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
359   }
360 }
361 
TEST(Group,MaskEmpty)362 TEST(Group, MaskEmpty) {
363   if (Group::kWidth == 16) {
364     ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted,  CtrlT(3),
365                       ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
366                       CtrlT(7),       CtrlT(5), CtrlT(3),          CtrlT(1),
367                       CtrlT(1),       CtrlT(1), CtrlT(1),          CtrlT(1)};
368     EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
369     EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 4);
370   } else if (Group::kWidth == 8) {
371     ctrl_t group[] = {ctrl_t::kEmpty,    CtrlT(1), CtrlT(2),
372                       ctrl_t::kDeleted,  CtrlT(2), CtrlT(1),
373                       ctrl_t::kSentinel, CtrlT(1)};
374     EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
375     EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 0);
376   } else {
377     FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
378   }
379 }
380 
TEST(Group,MaskFull)381 TEST(Group, MaskFull) {
382   if (Group::kWidth == 16) {
383     ctrl_t group[] = {
384         ctrl_t::kEmpty, CtrlT(1),          ctrl_t::kDeleted,  CtrlT(3),
385         ctrl_t::kEmpty, CtrlT(5),          ctrl_t::kSentinel, CtrlT(7),
386         CtrlT(7),       CtrlT(5),          ctrl_t::kDeleted,  CtrlT(1),
387         CtrlT(1),       ctrl_t::kSentinel, ctrl_t::kEmpty,    CtrlT(1)};
388     EXPECT_THAT(Group{group}.MaskFull(),
389                 ElementsAre(1, 3, 5, 7, 8, 9, 11, 12, 15));
390   } else if (Group::kWidth == 8) {
391     ctrl_t group[] = {ctrl_t::kEmpty,    CtrlT(1), ctrl_t::kEmpty,
392                       ctrl_t::kDeleted,  CtrlT(2), ctrl_t::kSentinel,
393                       ctrl_t::kSentinel, CtrlT(1)};
394     EXPECT_THAT(Group{group}.MaskFull(), ElementsAre(1, 4, 7));
395   } else {
396     FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
397   }
398 }
399 
TEST(Group,MaskNonFull)400 TEST(Group, MaskNonFull) {
401   if (Group::kWidth == 16) {
402     ctrl_t group[] = {
403         ctrl_t::kEmpty, CtrlT(1),          ctrl_t::kDeleted,  CtrlT(3),
404         ctrl_t::kEmpty, CtrlT(5),          ctrl_t::kSentinel, CtrlT(7),
405         CtrlT(7),       CtrlT(5),          ctrl_t::kDeleted,  CtrlT(1),
406         CtrlT(1),       ctrl_t::kSentinel, ctrl_t::kEmpty,    CtrlT(1)};
407     EXPECT_THAT(Group{group}.MaskNonFull(),
408                 ElementsAre(0, 2, 4, 6, 10, 13, 14));
409   } else if (Group::kWidth == 8) {
410     ctrl_t group[] = {ctrl_t::kEmpty,    CtrlT(1), ctrl_t::kEmpty,
411                       ctrl_t::kDeleted,  CtrlT(2), ctrl_t::kSentinel,
412                       ctrl_t::kSentinel, CtrlT(1)};
413     EXPECT_THAT(Group{group}.MaskNonFull(), ElementsAre(0, 2, 3, 5, 6));
414   } else {
415     FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
416   }
417 }
418 
TEST(Group,MaskEmptyOrDeleted)419 TEST(Group, MaskEmptyOrDeleted) {
420   if (Group::kWidth == 16) {
421     ctrl_t group[] = {ctrl_t::kEmpty,   CtrlT(1), ctrl_t::kEmpty,    CtrlT(3),
422                       ctrl_t::kDeleted, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
423                       CtrlT(7),         CtrlT(5), CtrlT(3),          CtrlT(1),
424                       CtrlT(1),         CtrlT(1), CtrlT(1),          CtrlT(1)};
425     EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
426     EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 4);
427   } else if (Group::kWidth == 8) {
428     ctrl_t group[] = {ctrl_t::kEmpty,    CtrlT(1), CtrlT(2),
429                       ctrl_t::kDeleted,  CtrlT(2), CtrlT(1),
430                       ctrl_t::kSentinel, CtrlT(1)};
431     EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
432     EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 3);
433   } else {
434     FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
435   }
436 }
437 
TEST(Batch,DropDeletes)438 TEST(Batch, DropDeletes) {
439   constexpr size_t kCapacity = 63;
440   constexpr size_t kGroupWidth = container_internal::Group::kWidth;
441   std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth);
442   ctrl[kCapacity] = ctrl_t::kSentinel;
443   std::vector<ctrl_t> pattern = {
444       ctrl_t::kEmpty, CtrlT(2), ctrl_t::kDeleted, CtrlT(2),
445       ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted};
446   for (size_t i = 0; i != kCapacity; ++i) {
447     ctrl[i] = pattern[i % pattern.size()];
448     if (i < kGroupWidth - 1)
449       ctrl[i + kCapacity + 1] = pattern[i % pattern.size()];
450   }
451   ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
452   ASSERT_EQ(ctrl[kCapacity], ctrl_t::kSentinel);
453   for (size_t i = 0; i < kCapacity + kGroupWidth; ++i) {
454     ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
455     if (i == kCapacity) expected = ctrl_t::kSentinel;
456     if (expected == ctrl_t::kDeleted) expected = ctrl_t::kEmpty;
457     if (IsFull(expected)) expected = ctrl_t::kDeleted;
458     EXPECT_EQ(ctrl[i], expected)
459         << i << " " << static_cast<int>(pattern[i % pattern.size()]);
460   }
461 }
462 
TEST(Group,CountLeadingEmptyOrDeleted)463 TEST(Group, CountLeadingEmptyOrDeleted) {
464   const std::vector<ctrl_t> empty_examples = {ctrl_t::kEmpty, ctrl_t::kDeleted};
465   const std::vector<ctrl_t> full_examples = {
466       CtrlT(0), CtrlT(1), CtrlT(2),   CtrlT(3),
467       CtrlT(5), CtrlT(9), CtrlT(127), ctrl_t::kSentinel};
468 
469   for (ctrl_t empty : empty_examples) {
470     std::vector<ctrl_t> e(Group::kWidth, empty);
471     EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted());
472     for (ctrl_t full : full_examples) {
473       for (size_t i = 0; i != Group::kWidth; ++i) {
474         std::vector<ctrl_t> f(Group::kWidth, empty);
475         f[i] = full;
476         EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted());
477       }
478       std::vector<ctrl_t> f(Group::kWidth, empty);
479       f[Group::kWidth * 2 / 3] = full;
480       f[Group::kWidth / 2] = full;
481       EXPECT_EQ(Group::kWidth / 2,
482                 Group{f.data()}.CountLeadingEmptyOrDeleted());
483     }
484   }
485 }
486 
487 template <class T, bool kTransferable = false, bool kSoo = false>
488 struct ValuePolicy {
489   using slot_type = T;
490   using key_type = T;
491   using init_type = T;
492 
493   template <class Allocator, class... Args>
constructabsl::container_internal::__anon2239e51d0111::ValuePolicy494   static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
495     absl::allocator_traits<Allocator>::construct(*alloc, slot,
496                                                  std::forward<Args>(args)...);
497   }
498 
499   template <class Allocator>
destroyabsl::container_internal::__anon2239e51d0111::ValuePolicy500   static void destroy(Allocator* alloc, slot_type* slot) {
501     absl::allocator_traits<Allocator>::destroy(*alloc, slot);
502   }
503 
504   template <class Allocator>
transferabsl::container_internal::__anon2239e51d0111::ValuePolicy505   static std::integral_constant<bool, kTransferable> transfer(
506       Allocator* alloc, slot_type* new_slot, slot_type* old_slot) {
507     construct(alloc, new_slot, std::move(*old_slot));
508     destroy(alloc, old_slot);
509     return {};
510   }
511 
elementabsl::container_internal::__anon2239e51d0111::ValuePolicy512   static T& element(slot_type* slot) { return *slot; }
513 
514   template <class F, class... Args>
515   static decltype(absl::container_internal::DecomposeValue(
516       std::declval<F>(), std::declval<Args>()...))
applyabsl::container_internal::__anon2239e51d0111::ValuePolicy517   apply(F&& f, Args&&... args) {
518     return absl::container_internal::DecomposeValue(
519         std::forward<F>(f), std::forward<Args>(args)...);
520   }
521 
522   template <class Hash>
get_hash_slot_fnabsl::container_internal::__anon2239e51d0111::ValuePolicy523   static constexpr HashSlotFn get_hash_slot_fn() {
524     return nullptr;
525   }
526 
soo_enabledabsl::container_internal::__anon2239e51d0111::ValuePolicy527   static constexpr bool soo_enabled() { return kSoo; }
528 };
529 
530 using IntPolicy = ValuePolicy<int64_t>;
531 using Uint8Policy = ValuePolicy<uint8_t>;
532 
533 using TranferableIntPolicy = ValuePolicy<int64_t, /*kTransferable=*/true>;
534 
535 // For testing SOO.
536 template <int N>
537 class SizedValue {
538  public:
SizedValue(int64_t v)539   SizedValue(int64_t v) {  // NOLINT
540     vals_[0] = v;
541   }
SizedValue()542   SizedValue() : SizedValue(0) {}
543   SizedValue(const SizedValue&) = default;
544   SizedValue& operator=(const SizedValue&) = default;
545 
operator *() const546   int64_t operator*() const {
547     // Suppress erroneous uninitialized memory errors on GCC.
548 #if !defined(__clang__) && defined(__GNUC__)
549 #pragma GCC diagnostic push
550 #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
551 #endif
552     return vals_[0];
553 #if !defined(__clang__) && defined(__GNUC__)
554 #pragma GCC diagnostic pop
555 #endif
556   }
operator int() const557   explicit operator int() const { return **this; }
operator int64_t() const558   explicit operator int64_t() const { return **this; }
559 
560   template <typename H>
AbslHashValue(H h,SizedValue sv)561   friend H AbslHashValue(H h, SizedValue sv) {
562     return H::combine(std::move(h), *sv);
563   }
operator ==(const SizedValue & rhs) const564   bool operator==(const SizedValue& rhs) const { return **this == *rhs; }
565 
566  private:
567   int64_t vals_[N / sizeof(int64_t)];
568 };
569 template <int N, bool kSoo>
570 using SizedValuePolicy =
571     ValuePolicy<SizedValue<N>, /*kTransferable=*/true, kSoo>;
572 
573 class StringPolicy {
574   template <class F, class K, class V,
575             class = typename std::enable_if<
576                 std::is_convertible<const K&, absl::string_view>::value>::type>
577   decltype(std::declval<F>()(
578       std::declval<const absl::string_view&>(), std::piecewise_construct,
579       std::declval<std::tuple<K>>(),
apply_impl(F && f,std::pair<std::tuple<K>,V> p)580       std::declval<V>())) static apply_impl(F&& f,
581                                             std::pair<std::tuple<K>, V> p) {
582     const absl::string_view& key = std::get<0>(p.first);
583     return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
584                               std::move(p.second));
585   }
586 
587  public:
588   struct slot_type {
589     struct ctor {};
590 
591     template <class... Ts>
slot_typeabsl::container_internal::__anon2239e51d0111::StringPolicy::slot_type592     explicit slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
593 
594     std::pair<std::string, std::string> pair;
595   };
596 
597   using key_type = std::string;
598   using init_type = std::pair<std::string, std::string>;
599 
600   template <class allocator_type, class... Args>
construct(allocator_type * alloc,slot_type * slot,Args...args)601   static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
602     std::allocator_traits<allocator_type>::construct(
603         *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
604   }
605 
606   template <class allocator_type>
destroy(allocator_type * alloc,slot_type * slot)607   static void destroy(allocator_type* alloc, slot_type* slot) {
608     std::allocator_traits<allocator_type>::destroy(*alloc, slot);
609   }
610 
611   template <class allocator_type>
transfer(allocator_type * alloc,slot_type * new_slot,slot_type * old_slot)612   static void transfer(allocator_type* alloc, slot_type* new_slot,
613                        slot_type* old_slot) {
614     construct(alloc, new_slot, std::move(old_slot->pair));
615     destroy(alloc, old_slot);
616   }
617 
element(slot_type * slot)618   static std::pair<std::string, std::string>& element(slot_type* slot) {
619     return slot->pair;
620   }
621 
622   template <class F, class... Args>
apply(F && f,Args &&...args)623   static auto apply(F&& f, Args&&... args)
624       -> decltype(apply_impl(std::forward<F>(f),
625                              PairArgs(std::forward<Args>(args)...))) {
626     return apply_impl(std::forward<F>(f),
627                       PairArgs(std::forward<Args>(args)...));
628   }
629 
630   template <class Hash>
get_hash_slot_fn()631   static constexpr HashSlotFn get_hash_slot_fn() {
632     return nullptr;
633   }
634 };
635 
636 struct StringHash : absl::Hash<absl::string_view> {
637   using is_transparent = void;
638 };
639 struct StringEq : std::equal_to<absl::string_view> {
640   using is_transparent = void;
641 };
642 
643 struct StringTable
644     : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
645   using Base = typename StringTable::raw_hash_set;
646   StringTable() = default;
647   using Base::Base;
648 };
649 
650 template <typename T, bool kTransferable = false, bool kSoo = false>
651 struct ValueTable
652     : raw_hash_set<ValuePolicy<T, kTransferable, kSoo>, hash_default_hash<T>,
653                    std::equal_to<T>, std::allocator<T>> {
654   using Base = typename ValueTable::raw_hash_set;
655   using Base::Base;
656 };
657 
658 using IntTable = ValueTable<int64_t>;
659 using Uint8Table = ValueTable<uint8_t>;
660 
661 using TransferableIntTable = ValueTable<int64_t, /*kTransferable=*/true>;
662 
663 constexpr size_t kNonSooSize = sizeof(HeapOrSoo) + 8;
664 static_assert(sizeof(SizedValue<kNonSooSize>) >= kNonSooSize, "too small");
665 using NonSooIntTable = ValueTable<SizedValue<kNonSooSize>>;
666 using SooIntTable = ValueTable<int64_t, /*kTransferable=*/true, /*kSoo=*/true>;
667 
668 template <typename T>
669 struct CustomAlloc : std::allocator<T> {
670   CustomAlloc() = default;
671 
672   template <typename U>
CustomAllocabsl::container_internal::__anon2239e51d0111::CustomAlloc673   explicit CustomAlloc(const CustomAlloc<U>& /*other*/) {}
674 
675   template <class U>
676   struct rebind {
677     using other = CustomAlloc<U>;
678   };
679 };
680 
681 struct CustomAllocIntTable
682     : raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
683                    std::equal_to<int64_t>, CustomAlloc<int64_t>> {
684   using Base = typename CustomAllocIntTable::raw_hash_set;
685   using Base::Base;
686 };
687 
688 struct MinimumAlignmentUint8Table
689     : raw_hash_set<Uint8Policy, hash_default_hash<uint8_t>,
690                    std::equal_to<uint8_t>, MinimumAlignmentAlloc<uint8_t>> {
691   using Base = typename MinimumAlignmentUint8Table::raw_hash_set;
692   using Base::Base;
693 };
694 
695 // Allows for freezing the allocator to expect no further allocations.
696 template <typename T>
697 struct FreezableAlloc : std::allocator<T> {
FreezableAllocabsl::container_internal::__anon2239e51d0111::FreezableAlloc698   explicit FreezableAlloc(bool* f) : frozen(f) {}
699 
700   template <typename U>
FreezableAllocabsl::container_internal::__anon2239e51d0111::FreezableAlloc701   explicit FreezableAlloc(const FreezableAlloc<U>& other)
702       : frozen(other.frozen) {}
703 
704   template <class U>
705   struct rebind {
706     using other = FreezableAlloc<U>;
707   };
708 
allocateabsl::container_internal::__anon2239e51d0111::FreezableAlloc709   T* allocate(size_t n) {
710     EXPECT_FALSE(*frozen);
711     return std::allocator<T>::allocate(n);
712   }
713 
714   bool* frozen;
715 };
716 
717 template <int N>
718 struct FreezableSizedValueSooTable
719     : raw_hash_set<SizedValuePolicy<N, /*kSoo=*/true>,
720                    container_internal::hash_default_hash<SizedValue<N>>,
721                    std::equal_to<SizedValue<N>>,
722                    FreezableAlloc<SizedValue<N>>> {
723   using Base = typename FreezableSizedValueSooTable::raw_hash_set;
724   using Base::Base;
725 };
726 
727 struct BadFastHash {
728   template <class T>
operator ()absl::container_internal::__anon2239e51d0111::BadFastHash729   size_t operator()(const T&) const {
730     return 0;
731   }
732 };
733 
734 struct BadHashFreezableIntTable
735     : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int64_t>,
736                    FreezableAlloc<int64_t>> {
737   using Base = typename BadHashFreezableIntTable::raw_hash_set;
738   using Base::Base;
739 };
740 
741 struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
742                                std::allocator<int>> {
743   using Base = typename BadTable::raw_hash_set;
744   BadTable() = default;
745   using Base::Base;
746 };
747 
TEST(Table,EmptyFunctorOptimization)748 TEST(Table, EmptyFunctorOptimization) {
749   static_assert(std::is_empty<std::equal_to<absl::string_view>>::value, "");
750   static_assert(std::is_empty<std::allocator<int>>::value, "");
751 
752   struct MockTable {
753     void* ctrl;
754     void* slots;
755     size_t size;
756     size_t capacity;
757   };
758   struct StatelessHash {
759     size_t operator()(absl::string_view) const { return 0; }
760   };
761   struct StatefulHash : StatelessHash {
762     size_t dummy;
763   };
764 
765   struct GenerationData {
766     size_t reserved_growth;
767     size_t reservation_size;
768     GenerationType* generation;
769   };
770 
771 // Ignore unreachable-code warning. Compiler thinks one branch of each ternary
772 // conditional is unreachable.
773 #if defined(__clang__)
774 #pragma clang diagnostic push
775 #pragma clang diagnostic ignored "-Wunreachable-code"
776 #endif
777   constexpr size_t mock_size = sizeof(MockTable);
778   constexpr size_t generation_size =
779       SwisstableGenerationsEnabled() ? sizeof(GenerationData) : 0;
780 #if defined(__clang__)
781 #pragma clang diagnostic pop
782 #endif
783 
784   EXPECT_EQ(
785       mock_size + generation_size,
786       sizeof(
787           raw_hash_set<StringPolicy, StatelessHash,
788                        std::equal_to<absl::string_view>, std::allocator<int>>));
789 
790   EXPECT_EQ(
791       mock_size + sizeof(StatefulHash) + generation_size,
792       sizeof(
793           raw_hash_set<StringPolicy, StatefulHash,
794                        std::equal_to<absl::string_view>, std::allocator<int>>));
795 }
796 
797 template <class TableType>
798 class SooTest : public testing::Test {};
799 
800 TYPED_TEST_SUITE_P(SooTest);
801 
TYPED_TEST_P(SooTest,Empty)802 TYPED_TEST_P(SooTest, Empty) {
803   TypeParam t;
804   EXPECT_EQ(0, t.size());
805   EXPECT_TRUE(t.empty());
806 }
807 
TYPED_TEST_P(SooTest,LookupEmpty)808 TYPED_TEST_P(SooTest, LookupEmpty) {
809   TypeParam t;
810   auto it = t.find(0);
811   EXPECT_TRUE(it == t.end());
812 }
813 
TYPED_TEST_P(SooTest,Insert1)814 TYPED_TEST_P(SooTest, Insert1) {
815   TypeParam t;
816   EXPECT_TRUE(t.find(0) == t.end());
817   auto res = t.emplace(0);
818   EXPECT_TRUE(res.second);
819   EXPECT_THAT(*res.first, 0);
820   EXPECT_EQ(1, t.size());
821   EXPECT_THAT(*t.find(0), 0);
822 }
823 
TYPED_TEST_P(SooTest,Insert2)824 TYPED_TEST_P(SooTest, Insert2) {
825   TypeParam t;
826   EXPECT_TRUE(t.find(0) == t.end());
827   auto res = t.emplace(0);
828   EXPECT_TRUE(res.second);
829   EXPECT_THAT(*res.first, 0);
830   EXPECT_EQ(1, t.size());
831   EXPECT_TRUE(t.find(1) == t.end());
832   res = t.emplace(1);
833   EXPECT_TRUE(res.second);
834   EXPECT_THAT(*res.first, 1);
835   EXPECT_EQ(2, t.size());
836   EXPECT_THAT(*t.find(0), 0);
837   EXPECT_THAT(*t.find(1), 1);
838 }
839 
TEST(Table,InsertCollision)840 TEST(Table, InsertCollision) {
841   BadTable t;
842   EXPECT_TRUE(t.find(1) == t.end());
843   auto res = t.emplace(1);
844   EXPECT_TRUE(res.second);
845   EXPECT_THAT(*res.first, 1);
846   EXPECT_EQ(1, t.size());
847 
848   EXPECT_TRUE(t.find(2) == t.end());
849   res = t.emplace(2);
850   EXPECT_THAT(*res.first, 2);
851   EXPECT_TRUE(res.second);
852   EXPECT_EQ(2, t.size());
853 
854   EXPECT_THAT(*t.find(1), 1);
855   EXPECT_THAT(*t.find(2), 2);
856 }
857 
858 // Test that we do not add existent element in case we need to search through
859 // many groups with deleted elements
TEST(Table,InsertCollisionAndFindAfterDelete)860 TEST(Table, InsertCollisionAndFindAfterDelete) {
861   BadTable t;  // all elements go to the same group.
862   // Have at least 2 groups with Group::kWidth collisions
863   // plus some extra collisions in the last group.
864   constexpr size_t kNumInserts = Group::kWidth * 2 + 5;
865   for (size_t i = 0; i < kNumInserts; ++i) {
866     auto res = t.emplace(i);
867     EXPECT_TRUE(res.second);
868     EXPECT_THAT(*res.first, i);
869     EXPECT_EQ(i + 1, t.size());
870   }
871 
872   // Remove elements one by one and check
873   // that we still can find all other elements.
874   for (size_t i = 0; i < kNumInserts; ++i) {
875     EXPECT_EQ(1, t.erase(i)) << i;
876     for (size_t j = i + 1; j < kNumInserts; ++j) {
877       EXPECT_THAT(*t.find(j), j);
878       auto res = t.emplace(j);
879       EXPECT_FALSE(res.second) << i << " " << j;
880       EXPECT_THAT(*res.first, j);
881       EXPECT_EQ(kNumInserts - i - 1, t.size());
882     }
883   }
884   EXPECT_TRUE(t.empty());
885 }
886 
TYPED_TEST_P(SooTest,EraseInSmallTables)887 TYPED_TEST_P(SooTest, EraseInSmallTables) {
888   for (int64_t size = 0; size < 64; ++size) {
889     TypeParam t;
890     for (int64_t i = 0; i < size; ++i) {
891       t.insert(i);
892     }
893     for (int64_t i = 0; i < size; ++i) {
894       t.erase(i);
895       EXPECT_EQ(t.size(), size - i - 1);
896       for (int64_t j = i + 1; j < size; ++j) {
897         EXPECT_THAT(*t.find(j), j);
898       }
899     }
900     EXPECT_TRUE(t.empty());
901   }
902 }
903 
TYPED_TEST_P(SooTest,InsertWithinCapacity)904 TYPED_TEST_P(SooTest, InsertWithinCapacity) {
905   TypeParam t;
906   t.reserve(10);
907   const size_t original_capacity = t.capacity();
908   const auto addr = [&](int i) {
909     return reinterpret_cast<uintptr_t>(&*t.find(i));
910   };
911   // Inserting an element does not change capacity.
912   t.insert(0);
913   EXPECT_THAT(t.capacity(), original_capacity);
914   const uintptr_t original_addr_0 = addr(0);
915   // Inserting another element does not rehash.
916   t.insert(1);
917   EXPECT_THAT(t.capacity(), original_capacity);
918   EXPECT_THAT(addr(0), original_addr_0);
919   // Inserting lots of duplicate elements does not rehash.
920   for (int i = 0; i < 100; ++i) {
921     t.insert(i % 10);
922   }
923   EXPECT_THAT(t.capacity(), original_capacity);
924   EXPECT_THAT(addr(0), original_addr_0);
925   // Inserting a range of duplicate elements does not rehash.
926   std::vector<int> dup_range;
927   for (int i = 0; i < 100; ++i) {
928     dup_range.push_back(i % 10);
929   }
930   t.insert(dup_range.begin(), dup_range.end());
931   EXPECT_THAT(t.capacity(), original_capacity);
932   EXPECT_THAT(addr(0), original_addr_0);
933 }
934 
935 template <class TableType>
936 class SmallTableResizeTest : public testing::Test {};
937 
938 TYPED_TEST_SUITE_P(SmallTableResizeTest);
939 
TYPED_TEST_P(SmallTableResizeTest,InsertIntoSmallTable)940 TYPED_TEST_P(SmallTableResizeTest, InsertIntoSmallTable) {
941   TypeParam t;
942   for (int i = 0; i < 32; ++i) {
943     t.insert(i);
944     ASSERT_EQ(t.size(), i + 1);
945     for (int j = 0; j < i + 1; ++j) {
946       EXPECT_TRUE(t.find(j) != t.end());
947       EXPECT_EQ(*t.find(j), j);
948     }
949   }
950 }
951 
TYPED_TEST_P(SmallTableResizeTest,ResizeGrowSmallTables)952 TYPED_TEST_P(SmallTableResizeTest, ResizeGrowSmallTables) {
953   for (size_t source_size = 0; source_size < 32; ++source_size) {
954     for (size_t target_size = source_size; target_size < 32; ++target_size) {
955       for (bool rehash : {false, true}) {
956         TypeParam t;
957         for (size_t i = 0; i < source_size; ++i) {
958           t.insert(static_cast<int>(i));
959         }
960         if (rehash) {
961           t.rehash(target_size);
962         } else {
963           t.reserve(target_size);
964         }
965         for (size_t i = 0; i < source_size; ++i) {
966           EXPECT_TRUE(t.find(static_cast<int>(i)) != t.end());
967           EXPECT_EQ(*t.find(static_cast<int>(i)), static_cast<int>(i));
968         }
969       }
970     }
971   }
972 }
973 
TYPED_TEST_P(SmallTableResizeTest,ResizeReduceSmallTables)974 TYPED_TEST_P(SmallTableResizeTest, ResizeReduceSmallTables) {
975   for (size_t source_size = 0; source_size < 32; ++source_size) {
976     for (size_t target_size = 0; target_size <= source_size; ++target_size) {
977       TypeParam t;
978       size_t inserted_count = std::min<size_t>(source_size, 5);
979       for (size_t i = 0; i < inserted_count; ++i) {
980         t.insert(static_cast<int>(i));
981       }
982       const size_t minimum_capacity = t.capacity();
983       t.reserve(source_size);
984       t.rehash(target_size);
985       if (target_size == 0) {
986         EXPECT_EQ(t.capacity(), minimum_capacity)
987             << "rehash(0) must resize to the minimum capacity";
988       }
989       for (size_t i = 0; i < inserted_count; ++i) {
990         EXPECT_TRUE(t.find(static_cast<int>(i)) != t.end());
991         EXPECT_EQ(*t.find(static_cast<int>(i)), static_cast<int>(i));
992       }
993     }
994   }
995 }
996 
997 REGISTER_TYPED_TEST_SUITE_P(SmallTableResizeTest, InsertIntoSmallTable,
998                             ResizeGrowSmallTables, ResizeReduceSmallTables);
999 using SmallTableTypes =
1000     ::testing::Types<IntTable, TransferableIntTable, SooIntTable>;
1001 INSTANTIATE_TYPED_TEST_SUITE_P(InstanceSmallTableResizeTest,
1002                                SmallTableResizeTest, SmallTableTypes);
1003 
TEST(Table,LazyEmplace)1004 TEST(Table, LazyEmplace) {
1005   StringTable t;
1006   bool called = false;
1007   auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
1008     called = true;
1009     f("abc", "ABC");
1010   });
1011   EXPECT_TRUE(called);
1012   EXPECT_THAT(*it, Pair("abc", "ABC"));
1013   called = false;
1014   it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
1015     called = true;
1016     f("abc", "DEF");
1017   });
1018   EXPECT_FALSE(called);
1019   EXPECT_THAT(*it, Pair("abc", "ABC"));
1020 }
1021 
TYPED_TEST_P(SooTest,ContainsEmpty)1022 TYPED_TEST_P(SooTest, ContainsEmpty) {
1023   TypeParam t;
1024 
1025   EXPECT_FALSE(t.contains(0));
1026 }
1027 
TYPED_TEST_P(SooTest,Contains1)1028 TYPED_TEST_P(SooTest, Contains1) {
1029   TypeParam t;
1030 
1031   EXPECT_TRUE(t.insert(0).second);
1032   EXPECT_TRUE(t.contains(0));
1033   EXPECT_FALSE(t.contains(1));
1034 
1035   EXPECT_EQ(1, t.erase(0));
1036   EXPECT_FALSE(t.contains(0));
1037 }
1038 
TYPED_TEST_P(SooTest,Contains2)1039 TYPED_TEST_P(SooTest, Contains2) {
1040   TypeParam t;
1041 
1042   EXPECT_TRUE(t.insert(0).second);
1043   EXPECT_TRUE(t.contains(0));
1044   EXPECT_FALSE(t.contains(1));
1045 
1046   t.clear();
1047   EXPECT_FALSE(t.contains(0));
1048 }
1049 
1050 int decompose_constructed;
1051 int decompose_copy_constructed;
1052 int decompose_copy_assigned;
1053 int decompose_move_constructed;
1054 int decompose_move_assigned;
1055 struct DecomposeType {
DecomposeTypeabsl::container_internal::__anon2239e51d0111::DecomposeType1056   DecomposeType(int i = 0) : i(i) {  // NOLINT
1057     ++decompose_constructed;
1058   }
1059 
DecomposeTypeabsl::container_internal::__anon2239e51d0111::DecomposeType1060   explicit DecomposeType(const char* d) : DecomposeType(*d) {}
1061 
DecomposeTypeabsl::container_internal::__anon2239e51d0111::DecomposeType1062   DecomposeType(const DecomposeType& other) : i(other.i) {
1063     ++decompose_copy_constructed;
1064   }
operator =absl::container_internal::__anon2239e51d0111::DecomposeType1065   DecomposeType& operator=(const DecomposeType& other) {
1066     ++decompose_copy_assigned;
1067     i = other.i;
1068     return *this;
1069   }
DecomposeTypeabsl::container_internal::__anon2239e51d0111::DecomposeType1070   DecomposeType(DecomposeType&& other) : i(other.i) {
1071     ++decompose_move_constructed;
1072   }
operator =absl::container_internal::__anon2239e51d0111::DecomposeType1073   DecomposeType& operator=(DecomposeType&& other) {
1074     ++decompose_move_assigned;
1075     i = other.i;
1076     return *this;
1077   }
1078 
1079   int i;
1080 };
1081 
1082 struct DecomposeHash {
1083   using is_transparent = void;
operator ()absl::container_internal::__anon2239e51d0111::DecomposeHash1084   size_t operator()(const DecomposeType& a) const { return a.i; }
operator ()absl::container_internal::__anon2239e51d0111::DecomposeHash1085   size_t operator()(int a) const { return a; }
operator ()absl::container_internal::__anon2239e51d0111::DecomposeHash1086   size_t operator()(const char* a) const { return *a; }
1087 };
1088 
1089 struct DecomposeEq {
1090   using is_transparent = void;
operator ()absl::container_internal::__anon2239e51d0111::DecomposeEq1091   bool operator()(const DecomposeType& a, const DecomposeType& b) const {
1092     return a.i == b.i;
1093   }
operator ()absl::container_internal::__anon2239e51d0111::DecomposeEq1094   bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
operator ()absl::container_internal::__anon2239e51d0111::DecomposeEq1095   bool operator()(const DecomposeType& a, const char* b) const {
1096     return a.i == *b;
1097   }
1098 };
1099 
1100 struct DecomposePolicy {
1101   using slot_type = DecomposeType;
1102   using key_type = DecomposeType;
1103   using init_type = DecomposeType;
1104 
1105   template <typename T>
constructabsl::container_internal::__anon2239e51d0111::DecomposePolicy1106   static void construct(void*, DecomposeType* slot, T&& v) {
1107     ::new (slot) DecomposeType(std::forward<T>(v));
1108   }
destroyabsl::container_internal::__anon2239e51d0111::DecomposePolicy1109   static void destroy(void*, DecomposeType* slot) { slot->~DecomposeType(); }
elementabsl::container_internal::__anon2239e51d0111::DecomposePolicy1110   static DecomposeType& element(slot_type* slot) { return *slot; }
1111 
1112   template <class F, class T>
applyabsl::container_internal::__anon2239e51d0111::DecomposePolicy1113   static auto apply(F&& f, const T& x) -> decltype(std::forward<F>(f)(x, x)) {
1114     return std::forward<F>(f)(x, x);
1115   }
1116 
1117   template <class Hash>
get_hash_slot_fnabsl::container_internal::__anon2239e51d0111::DecomposePolicy1118   static constexpr HashSlotFn get_hash_slot_fn() {
1119     return nullptr;
1120   }
1121 };
1122 
1123 template <typename Hash, typename Eq>
TestDecompose(bool construct_three)1124 void TestDecompose(bool construct_three) {
1125   DecomposeType elem{0};
1126   const int one = 1;
1127   const char* three_p = "3";
1128   const auto& three = three_p;
1129   const int elem_vector_count = 256;
1130   std::vector<DecomposeType> elem_vector(elem_vector_count, DecomposeType{0});
1131   std::iota(elem_vector.begin(), elem_vector.end(), 0);
1132 
1133   using DecomposeSet =
1134       raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>>;
1135   DecomposeSet set1;
1136 
1137   decompose_constructed = 0;
1138   int expected_constructed = 0;
1139   EXPECT_EQ(expected_constructed, decompose_constructed);
1140   set1.insert(elem);
1141   EXPECT_EQ(expected_constructed, decompose_constructed);
1142   set1.insert(1);
1143   EXPECT_EQ(++expected_constructed, decompose_constructed);
1144   set1.emplace("3");
1145   EXPECT_EQ(++expected_constructed, decompose_constructed);
1146   EXPECT_EQ(expected_constructed, decompose_constructed);
1147 
1148   {  // insert(T&&)
1149     set1.insert(1);
1150     EXPECT_EQ(expected_constructed, decompose_constructed);
1151   }
1152 
1153   {  // insert(const T&)
1154     set1.insert(one);
1155     EXPECT_EQ(expected_constructed, decompose_constructed);
1156   }
1157 
1158   {  // insert(hint, T&&)
1159     set1.insert(set1.begin(), 1);
1160     EXPECT_EQ(expected_constructed, decompose_constructed);
1161   }
1162 
1163   {  // insert(hint, const T&)
1164     set1.insert(set1.begin(), one);
1165     EXPECT_EQ(expected_constructed, decompose_constructed);
1166   }
1167 
1168   {  // emplace(...)
1169     set1.emplace(1);
1170     EXPECT_EQ(expected_constructed, decompose_constructed);
1171     set1.emplace("3");
1172     expected_constructed += construct_three;
1173     EXPECT_EQ(expected_constructed, decompose_constructed);
1174     set1.emplace(one);
1175     EXPECT_EQ(expected_constructed, decompose_constructed);
1176     set1.emplace(three);
1177     expected_constructed += construct_three;
1178     EXPECT_EQ(expected_constructed, decompose_constructed);
1179   }
1180 
1181   {  // emplace_hint(...)
1182     set1.emplace_hint(set1.begin(), 1);
1183     EXPECT_EQ(expected_constructed, decompose_constructed);
1184     set1.emplace_hint(set1.begin(), "3");
1185     expected_constructed += construct_three;
1186     EXPECT_EQ(expected_constructed, decompose_constructed);
1187     set1.emplace_hint(set1.begin(), one);
1188     EXPECT_EQ(expected_constructed, decompose_constructed);
1189     set1.emplace_hint(set1.begin(), three);
1190     expected_constructed += construct_three;
1191     EXPECT_EQ(expected_constructed, decompose_constructed);
1192   }
1193 
1194   decompose_copy_constructed = 0;
1195   decompose_copy_assigned = 0;
1196   decompose_move_constructed = 0;
1197   decompose_move_assigned = 0;
1198   int expected_copy_constructed = 0;
1199   int expected_move_constructed = 0;
1200   {  // raw_hash_set(first, last) with random-access iterators
1201     DecomposeSet set2(elem_vector.begin(), elem_vector.end());
1202     // Expect exactly one copy-constructor call for each element if no
1203     // rehashing is done.
1204     expected_copy_constructed += elem_vector_count;
1205     EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
1206     EXPECT_EQ(expected_move_constructed, decompose_move_constructed);
1207     EXPECT_EQ(0, decompose_move_assigned);
1208     EXPECT_EQ(0, decompose_copy_assigned);
1209   }
1210 
1211   {  // raw_hash_set(first, last) with forward iterators
1212     std::list<DecomposeType> elem_list(elem_vector.begin(), elem_vector.end());
1213     expected_copy_constructed = decompose_copy_constructed;
1214     DecomposeSet set2(elem_list.begin(), elem_list.end());
1215     // Expect exactly N elements copied into set, expect at most 2*N elements
1216     // moving internally for all resizing needed (for a growth factor of 2).
1217     expected_copy_constructed += elem_vector_count;
1218     EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
1219     expected_move_constructed += elem_vector_count;
1220     EXPECT_LT(expected_move_constructed, decompose_move_constructed);
1221     expected_move_constructed += elem_vector_count;
1222     EXPECT_GE(expected_move_constructed, decompose_move_constructed);
1223     EXPECT_EQ(0, decompose_move_assigned);
1224     EXPECT_EQ(0, decompose_copy_assigned);
1225     expected_copy_constructed = decompose_copy_constructed;
1226     expected_move_constructed = decompose_move_constructed;
1227   }
1228 
1229   {  // insert(first, last)
1230     DecomposeSet set2;
1231     set2.insert(elem_vector.begin(), elem_vector.end());
1232     // Expect exactly N elements copied into set, expect at most 2*N elements
1233     // moving internally for all resizing needed (for a growth factor of 2).
1234     const int expected_new_elements = elem_vector_count;
1235     const int expected_max_element_moves = 2 * elem_vector_count;
1236     expected_copy_constructed += expected_new_elements;
1237     EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
1238     expected_move_constructed += expected_max_element_moves;
1239     EXPECT_GE(expected_move_constructed, decompose_move_constructed);
1240     EXPECT_EQ(0, decompose_move_assigned);
1241     EXPECT_EQ(0, decompose_copy_assigned);
1242     expected_copy_constructed = decompose_copy_constructed;
1243     expected_move_constructed = decompose_move_constructed;
1244   }
1245 }
1246 
TEST(Table,Decompose)1247 TEST(Table, Decompose) {
1248   if (SwisstableGenerationsEnabled()) {
1249     GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
1250   }
1251 
1252   TestDecompose<DecomposeHash, DecomposeEq>(false);
1253 
1254   struct TransparentHashIntOverload {
1255     size_t operator()(const DecomposeType& a) const { return a.i; }
1256     size_t operator()(int a) const { return a; }
1257   };
1258   struct TransparentEqIntOverload {
1259     bool operator()(const DecomposeType& a, const DecomposeType& b) const {
1260       return a.i == b.i;
1261     }
1262     bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
1263   };
1264   TestDecompose<TransparentHashIntOverload, DecomposeEq>(true);
1265   TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true);
1266   TestDecompose<DecomposeHash, TransparentEqIntOverload>(true);
1267 }
1268 
1269 // Returns the largest m such that a table with m elements has the same number
1270 // of buckets as a table with n elements.
MaxDensitySize(size_t n)1271 size_t MaxDensitySize(size_t n) {
1272   IntTable t;
1273   t.reserve(n);
1274   for (size_t i = 0; i != n; ++i) t.emplace(i);
1275   const size_t c = t.bucket_count();
1276   while (c == t.bucket_count()) t.emplace(n++);
1277   return t.size() - 1;
1278 }
1279 
1280 struct Modulo1000Hash {
operator ()absl::container_internal::__anon2239e51d0111::Modulo1000Hash1281   size_t operator()(int64_t x) const { return static_cast<size_t>(x) % 1000; }
1282 };
1283 
1284 struct Modulo1000HashTable
1285     : public raw_hash_set<IntPolicy, Modulo1000Hash, std::equal_to<int>,
1286                           std::allocator<int>> {};
1287 
1288 // Test that rehash with no resize happen in case of many deleted slots.
TEST(Table,RehashWithNoResize)1289 TEST(Table, RehashWithNoResize) {
1290   if (SwisstableGenerationsEnabled()) {
1291     GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
1292   }
1293 
1294   Modulo1000HashTable t;
1295   // Adding the same length (and the same hash) strings
1296   // to have at least kMinFullGroups groups
1297   // with Group::kWidth collisions. Then fill up to MaxDensitySize;
1298   const size_t kMinFullGroups = 7;
1299   std::vector<int> keys;
1300   for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) {
1301     int k = i * 1000;
1302     t.emplace(k);
1303     keys.push_back(k);
1304   }
1305   const size_t capacity = t.capacity();
1306 
1307   // Remove elements from all groups except the first and the last one.
1308   // All elements removed from full groups will be marked as ctrl_t::kDeleted.
1309   const size_t erase_begin = Group::kWidth / 2;
1310   const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth;
1311   for (size_t i = erase_begin; i < erase_end; ++i) {
1312     EXPECT_EQ(1, t.erase(keys[i])) << i;
1313   }
1314   keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end);
1315 
1316   auto last_key = keys.back();
1317   size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key);
1318 
1319   // Make sure that we have to make a lot of probes for last key.
1320   ASSERT_GT(last_key_num_probes, kMinFullGroups);
1321 
1322   int x = 1;
1323   // Insert and erase one element, before inplace rehash happen.
1324   while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) {
1325     t.emplace(x);
1326     ASSERT_EQ(capacity, t.capacity());
1327     // All elements should be there.
1328     ASSERT_TRUE(t.find(x) != t.end()) << x;
1329     for (const auto& k : keys) {
1330       ASSERT_TRUE(t.find(k) != t.end()) << k;
1331     }
1332     t.erase(x);
1333     ++x;
1334   }
1335 }
1336 
TYPED_TEST_P(SooTest,InsertEraseStressTest)1337 TYPED_TEST_P(SooTest, InsertEraseStressTest) {
1338   TypeParam t;
1339   const size_t kMinElementCount = 250;
1340   std::deque<int> keys;
1341   size_t i = 0;
1342   for (; i < MaxDensitySize(kMinElementCount); ++i) {
1343     t.emplace(i);
1344     keys.push_back(i);
1345   }
1346   const size_t kNumIterations = 1000000;
1347   for (; i < kNumIterations; ++i) {
1348     ASSERT_EQ(1, t.erase(keys.front()));
1349     keys.pop_front();
1350     t.emplace(i);
1351     keys.push_back(i);
1352   }
1353 }
1354 
TEST(Table,InsertOverloads)1355 TEST(Table, InsertOverloads) {
1356   StringTable t;
1357   // These should all trigger the insert(init_type) overload.
1358   t.insert({{}, {}});
1359   t.insert({"ABC", {}});
1360   t.insert({"DEF", "!!!"});
1361 
1362   EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""),
1363                                       Pair("DEF", "!!!")));
1364 }
1365 
TYPED_TEST_P(SooTest,LargeTable)1366 TYPED_TEST_P(SooTest, LargeTable) {
1367   TypeParam t;
1368   for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40);
1369   for (int64_t i = 0; i != 100000; ++i)
1370     ASSERT_EQ(i << 40, static_cast<int64_t>(*t.find(i << 40)));
1371 }
1372 
1373 // Timeout if copy is quadratic as it was in Rust.
TYPED_TEST_P(SooTest,EnsureNonQuadraticAsInRust)1374 TYPED_TEST_P(SooTest, EnsureNonQuadraticAsInRust) {
1375   static const size_t kLargeSize = 1 << 15;
1376 
1377   TypeParam t;
1378   for (size_t i = 0; i != kLargeSize; ++i) {
1379     t.insert(i);
1380   }
1381 
1382   // If this is quadratic, the test will timeout.
1383   TypeParam t2;
1384   for (const auto& entry : t) t2.insert(entry);
1385 }
1386 
TYPED_TEST_P(SooTest,ClearBug)1387 TYPED_TEST_P(SooTest, ClearBug) {
1388   if (SwisstableGenerationsEnabled()) {
1389     GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
1390   }
1391 
1392   TypeParam t;
1393   constexpr size_t capacity = container_internal::Group::kWidth - 1;
1394   constexpr size_t max_size = capacity / 2 + 1;
1395   for (size_t i = 0; i < max_size; ++i) {
1396     t.insert(i);
1397   }
1398   ASSERT_EQ(capacity, t.capacity());
1399   intptr_t original = reinterpret_cast<intptr_t>(&*t.find(2));
1400   t.clear();
1401   ASSERT_EQ(capacity, t.capacity());
1402   for (size_t i = 0; i < max_size; ++i) {
1403     t.insert(i);
1404   }
1405   ASSERT_EQ(capacity, t.capacity());
1406   intptr_t second = reinterpret_cast<intptr_t>(&*t.find(2));
1407   // We are checking that original and second are close enough to each other
1408   // that they are probably still in the same group.  This is not strictly
1409   // guaranteed.
1410   EXPECT_LT(static_cast<size_t>(std::abs(original - second)),
1411             capacity * sizeof(typename TypeParam::value_type));
1412 }
1413 
TYPED_TEST_P(SooTest,Erase)1414 TYPED_TEST_P(SooTest, Erase) {
1415   TypeParam t;
1416   EXPECT_TRUE(t.find(0) == t.end());
1417   auto res = t.emplace(0);
1418   EXPECT_TRUE(res.second);
1419   EXPECT_EQ(1, t.size());
1420   t.erase(res.first);
1421   EXPECT_EQ(0, t.size());
1422   EXPECT_TRUE(t.find(0) == t.end());
1423 }
1424 
TYPED_TEST_P(SooTest,EraseMaintainsValidIterator)1425 TYPED_TEST_P(SooTest, EraseMaintainsValidIterator) {
1426   TypeParam t;
1427   const int kNumElements = 100;
1428   for (int i = 0; i < kNumElements; i++) {
1429     EXPECT_TRUE(t.emplace(i).second);
1430   }
1431   EXPECT_EQ(t.size(), kNumElements);
1432 
1433   int num_erase_calls = 0;
1434   auto it = t.begin();
1435   while (it != t.end()) {
1436     t.erase(it++);
1437     num_erase_calls++;
1438   }
1439 
1440   EXPECT_TRUE(t.empty());
1441   EXPECT_EQ(num_erase_calls, kNumElements);
1442 }
1443 
TYPED_TEST_P(SooTest,EraseBeginEnd)1444 TYPED_TEST_P(SooTest, EraseBeginEnd) {
1445   TypeParam t;
1446   for (int i = 0; i < 10; ++i) t.insert(i);
1447   EXPECT_EQ(t.size(), 10);
1448   t.erase(t.begin(), t.end());
1449   EXPECT_EQ(t.size(), 0);
1450 }
1451 
1452 // Collect N bad keys by following algorithm:
1453 // 1. Create an empty table and reserve it to 2 * N.
1454 // 2. Insert N random elements.
1455 // 3. Take first Group::kWidth - 1 to bad_keys array.
1456 // 4. Clear the table without resize.
1457 // 5. Go to point 2 while N keys not collected
CollectBadMergeKeys(size_t N)1458 std::vector<int64_t> CollectBadMergeKeys(size_t N) {
1459   static constexpr int kGroupSize = Group::kWidth - 1;
1460 
1461   auto topk_range = [](size_t b, size_t e,
1462                        IntTable* t) -> std::vector<int64_t> {
1463     for (size_t i = b; i != e; ++i) {
1464       t->emplace(i);
1465     }
1466     std::vector<int64_t> res;
1467     res.reserve(kGroupSize);
1468     auto it = t->begin();
1469     for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) {
1470       res.push_back(*it);
1471     }
1472     return res;
1473   };
1474 
1475   std::vector<int64_t> bad_keys;
1476   bad_keys.reserve(N);
1477   IntTable t;
1478   t.reserve(N * 2);
1479 
1480   for (size_t b = 0; bad_keys.size() < N; b += N) {
1481     auto keys = topk_range(b, b + N, &t);
1482     bad_keys.insert(bad_keys.end(), keys.begin(), keys.end());
1483     t.erase(t.begin(), t.end());
1484     EXPECT_TRUE(t.empty());
1485   }
1486   return bad_keys;
1487 }
1488 
1489 struct ProbeStats {
1490   // Number of elements with specific probe length over all tested tables.
1491   std::vector<size_t> all_probes_histogram;
1492   // Ratios total_probe_length/size for every tested table.
1493   std::vector<double> single_table_ratios;
1494 
1495   // Average ratio total_probe_length/size over tables.
AvgRatioabsl::container_internal::__anon2239e51d0111::ProbeStats1496   double AvgRatio() const {
1497     return std::accumulate(single_table_ratios.begin(),
1498                            single_table_ratios.end(), 0.0) /
1499            single_table_ratios.size();
1500   }
1501 
1502   // Maximum ratio total_probe_length/size over tables.
MaxRatioabsl::container_internal::__anon2239e51d0111::ProbeStats1503   double MaxRatio() const {
1504     return *std::max_element(single_table_ratios.begin(),
1505                              single_table_ratios.end());
1506   }
1507 
1508   // Percentile ratio total_probe_length/size over tables.
PercentileRatioabsl::container_internal::__anon2239e51d0111::ProbeStats1509   double PercentileRatio(double Percentile = 0.95) const {
1510     auto r = single_table_ratios;
1511     auto mid = r.begin() + static_cast<size_t>(r.size() * Percentile);
1512     if (mid != r.end()) {
1513       std::nth_element(r.begin(), mid, r.end());
1514       return *mid;
1515     } else {
1516       return MaxRatio();
1517     }
1518   }
1519 
1520   // Maximum probe length over all elements and all tables.
MaxProbeabsl::container_internal::__anon2239e51d0111::ProbeStats1521   size_t MaxProbe() const { return all_probes_histogram.size(); }
1522 
1523   // Fraction of elements with specified probe length.
ProbeNormalizedHistogramabsl::container_internal::__anon2239e51d0111::ProbeStats1524   std::vector<double> ProbeNormalizedHistogram() const {
1525     double total_elements = std::accumulate(all_probes_histogram.begin(),
1526                                             all_probes_histogram.end(), 0ull);
1527     std::vector<double> res;
1528     for (size_t p : all_probes_histogram) {
1529       res.push_back(p / total_elements);
1530     }
1531     return res;
1532   }
1533 
PercentileProbeabsl::container_internal::__anon2239e51d0111::ProbeStats1534   size_t PercentileProbe(double Percentile = 0.99) const {
1535     size_t idx = 0;
1536     for (double p : ProbeNormalizedHistogram()) {
1537       if (Percentile > p) {
1538         Percentile -= p;
1539         ++idx;
1540       } else {
1541         return idx;
1542       }
1543     }
1544     return idx;
1545   }
1546 
operator <<(std::ostream & out,const ProbeStats & s)1547   friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) {
1548     out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio()
1549         << ", PercentileRatio:" << s.PercentileRatio()
1550         << ", MaxProbe:" << s.MaxProbe() << ", Probes=[";
1551     for (double p : s.ProbeNormalizedHistogram()) {
1552       out << p << ",";
1553     }
1554     out << "]}";
1555 
1556     return out;
1557   }
1558 };
1559 
1560 struct ExpectedStats {
1561   double avg_ratio;
1562   double max_ratio;
1563   std::vector<std::pair<double, double>> pecentile_ratios;
1564   std::vector<std::pair<double, double>> pecentile_probes;
1565 
operator <<(std::ostream & out,const ExpectedStats & s)1566   friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) {
1567     out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio
1568         << ", PercentileRatios: [";
1569     for (auto el : s.pecentile_ratios) {
1570       out << el.first << ":" << el.second << ", ";
1571     }
1572     out << "], PercentileProbes: [";
1573     for (auto el : s.pecentile_probes) {
1574       out << el.first << ":" << el.second << ", ";
1575     }
1576     out << "]}";
1577 
1578     return out;
1579   }
1580 };
1581 
VerifyStats(size_t size,const ExpectedStats & exp,const ProbeStats & stats)1582 void VerifyStats(size_t size, const ExpectedStats& exp,
1583                  const ProbeStats& stats) {
1584   EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats;
1585   EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats;
1586   for (auto pr : exp.pecentile_ratios) {
1587     EXPECT_LE(stats.PercentileRatio(pr.first), pr.second)
1588         << size << " " << pr.first << " " << stats;
1589   }
1590 
1591   for (auto pr : exp.pecentile_probes) {
1592     EXPECT_LE(stats.PercentileProbe(pr.first), pr.second)
1593         << size << " " << pr.first << " " << stats;
1594   }
1595 }
1596 
1597 using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
1598 
1599 // Collect total ProbeStats on num_iters iterations of the following algorithm:
1600 // 1. Create new table and reserve it to keys.size() * 2
1601 // 2. Insert all keys xored with seed
1602 // 3. Collect ProbeStats from final table.
CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t> & keys,size_t num_iters)1603 ProbeStats CollectProbeStatsOnKeysXoredWithSeed(
1604     const std::vector<int64_t>& keys, size_t num_iters) {
1605   const size_t reserve_size = keys.size() * 2;
1606 
1607   ProbeStats stats;
1608 
1609   int64_t seed = 0x71b1a19b907d6e33;
1610   while (num_iters--) {
1611     seed = static_cast<int64_t>(static_cast<uint64_t>(seed) * 17 + 13);
1612     IntTable t1;
1613     t1.reserve(reserve_size);
1614     for (const auto& key : keys) {
1615       t1.emplace(key ^ seed);
1616     }
1617 
1618     auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
1619     stats.all_probes_histogram.resize(
1620         std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
1621     std::transform(probe_histogram.begin(), probe_histogram.end(),
1622                    stats.all_probes_histogram.begin(),
1623                    stats.all_probes_histogram.begin(), std::plus<size_t>());
1624 
1625     size_t total_probe_seq_length = 0;
1626     for (size_t i = 0; i < probe_histogram.size(); ++i) {
1627       total_probe_seq_length += i * probe_histogram[i];
1628     }
1629     stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
1630                                         keys.size());
1631     t1.erase(t1.begin(), t1.end());
1632   }
1633   return stats;
1634 }
1635 
XorSeedExpectedStats()1636 ExpectedStats XorSeedExpectedStats() {
1637   constexpr bool kRandomizesInserts =
1638 #ifdef NDEBUG
1639       false;
1640 #else   // NDEBUG
1641       true;
1642 #endif  // NDEBUG
1643 
1644   // The effective load factor is larger in non-opt mode because we insert
1645   // elements out of order.
1646   switch (container_internal::Group::kWidth) {
1647     case 8:
1648       if (kRandomizesInserts) {
1649         return {0.05,
1650                 1.0,
1651                 {{0.95, 0.5}},
1652                 {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
1653       } else {
1654         return {0.05,
1655                 2.0,
1656                 {{0.95, 0.1}},
1657                 {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
1658       }
1659     case 16:
1660       if (kRandomizesInserts) {
1661         return {0.1,
1662                 2.0,
1663                 {{0.95, 0.1}},
1664                 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1665       } else {
1666         return {0.05,
1667                 1.0,
1668                 {{0.95, 0.05}},
1669                 {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
1670       }
1671   }
1672   LOG(FATAL) << "Unknown Group width";
1673   return {};
1674 }
1675 
1676 // TODO(b/80415403): Figure out why this test is so flaky, esp. on MSVC
TEST(Table,DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength)1677 TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
1678   ProbeStatsPerSize stats;
1679   std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
1680   for (size_t size : sizes) {
1681     stats[size] =
1682         CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200);
1683   }
1684   auto expected = XorSeedExpectedStats();
1685   for (size_t size : sizes) {
1686     auto& stat = stats[size];
1687     VerifyStats(size, expected, stat);
1688     LOG(INFO) << size << " " << stat;
1689   }
1690 }
1691 
1692 // Collect total ProbeStats on num_iters iterations of the following algorithm:
1693 // 1. Create new table
1694 // 2. Select 10% of keys and insert 10 elements key * 17 + j * 13
1695 // 3. Collect ProbeStats from final table
CollectProbeStatsOnLinearlyTransformedKeys(const std::vector<int64_t> & keys,size_t num_iters)1696 ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
1697     const std::vector<int64_t>& keys, size_t num_iters) {
1698   ProbeStats stats;
1699 
1700   std::random_device rd;
1701   std::mt19937 rng(rd());
1702   auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; };
1703   std::uniform_int_distribution<size_t> dist(0, keys.size() - 1);
1704   while (num_iters--) {
1705     IntTable t1;
1706     size_t num_keys = keys.size() / 10;
1707     size_t start = dist(rng);
1708     for (size_t i = 0; i != num_keys; ++i) {
1709       for (size_t j = 0; j != 10; ++j) {
1710         t1.emplace(linear_transform(keys[(i + start) % keys.size()], j));
1711       }
1712     }
1713 
1714     auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
1715     stats.all_probes_histogram.resize(
1716         std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
1717     std::transform(probe_histogram.begin(), probe_histogram.end(),
1718                    stats.all_probes_histogram.begin(),
1719                    stats.all_probes_histogram.begin(), std::plus<size_t>());
1720 
1721     size_t total_probe_seq_length = 0;
1722     for (size_t i = 0; i < probe_histogram.size(); ++i) {
1723       total_probe_seq_length += i * probe_histogram[i];
1724     }
1725     stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
1726                                         t1.size());
1727     t1.erase(t1.begin(), t1.end());
1728   }
1729   return stats;
1730 }
1731 
LinearTransformExpectedStats()1732 ExpectedStats LinearTransformExpectedStats() {
1733   constexpr bool kRandomizesInserts =
1734 #ifdef NDEBUG
1735       false;
1736 #else   // NDEBUG
1737       true;
1738 #endif  // NDEBUG
1739 
1740   // The effective load factor is larger in non-opt mode because we insert
1741   // elements out of order.
1742   switch (container_internal::Group::kWidth) {
1743     case 8:
1744       if (kRandomizesInserts) {
1745         return {0.1,
1746                 0.5,
1747                 {{0.95, 0.3}},
1748                 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1749       } else {
1750         return {0.4,
1751                 0.6,
1752                 {{0.95, 0.5}},
1753                 {{0.95, 1}, {0.99, 14}, {0.999, 23}, {0.9999, 26}}};
1754       }
1755     case 16:
1756       if (kRandomizesInserts) {
1757         return {0.1,
1758                 0.4,
1759                 {{0.95, 0.3}},
1760                 {{0.95, 1}, {0.99, 2}, {0.999, 9}, {0.9999, 15}}};
1761       } else {
1762         return {0.05,
1763                 0.2,
1764                 {{0.95, 0.1}},
1765                 {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
1766       }
1767   }
1768   LOG(FATAL) << "Unknown Group width";
1769   return {};
1770 }
1771 
1772 // TODO(b/80415403): Figure out why this test is so flaky.
TEST(Table,DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength)1773 TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
1774   ProbeStatsPerSize stats;
1775   std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
1776   for (size_t size : sizes) {
1777     stats[size] = CollectProbeStatsOnLinearlyTransformedKeys(
1778         CollectBadMergeKeys(size), 300);
1779   }
1780   auto expected = LinearTransformExpectedStats();
1781   for (size_t size : sizes) {
1782     auto& stat = stats[size];
1783     VerifyStats(size, expected, stat);
1784     LOG(INFO) << size << " " << stat;
1785   }
1786 }
1787 
TEST(Table,EraseCollision)1788 TEST(Table, EraseCollision) {
1789   BadTable t;
1790 
1791   // 1 2 3
1792   t.emplace(1);
1793   t.emplace(2);
1794   t.emplace(3);
1795   EXPECT_THAT(*t.find(1), 1);
1796   EXPECT_THAT(*t.find(2), 2);
1797   EXPECT_THAT(*t.find(3), 3);
1798   EXPECT_EQ(3, t.size());
1799 
1800   // 1 DELETED 3
1801   t.erase(t.find(2));
1802   EXPECT_THAT(*t.find(1), 1);
1803   EXPECT_TRUE(t.find(2) == t.end());
1804   EXPECT_THAT(*t.find(3), 3);
1805   EXPECT_EQ(2, t.size());
1806 
1807   // DELETED DELETED 3
1808   t.erase(t.find(1));
1809   EXPECT_TRUE(t.find(1) == t.end());
1810   EXPECT_TRUE(t.find(2) == t.end());
1811   EXPECT_THAT(*t.find(3), 3);
1812   EXPECT_EQ(1, t.size());
1813 
1814   // DELETED DELETED DELETED
1815   t.erase(t.find(3));
1816   EXPECT_TRUE(t.find(1) == t.end());
1817   EXPECT_TRUE(t.find(2) == t.end());
1818   EXPECT_TRUE(t.find(3) == t.end());
1819   EXPECT_EQ(0, t.size());
1820 }
1821 
TEST(Table,EraseInsertProbing)1822 TEST(Table, EraseInsertProbing) {
1823   BadTable t(100);
1824 
1825   // 1 2 3 4
1826   t.emplace(1);
1827   t.emplace(2);
1828   t.emplace(3);
1829   t.emplace(4);
1830 
1831   // 1 DELETED 3 DELETED
1832   t.erase(t.find(2));
1833   t.erase(t.find(4));
1834 
1835   // 1 10 3 11 12
1836   t.emplace(10);
1837   t.emplace(11);
1838   t.emplace(12);
1839 
1840   EXPECT_EQ(5, t.size());
1841   EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12));
1842 }
1843 
TEST(Table,GrowthInfoDeletedBit)1844 TEST(Table, GrowthInfoDeletedBit) {
1845   BadTable t;
1846   EXPECT_TRUE(
1847       RawHashSetTestOnlyAccess::GetCommon(t).growth_info().HasNoDeleted());
1848   int64_t init_count = static_cast<int64_t>(
1849       CapacityToGrowth(NormalizeCapacity(Group::kWidth + 1)));
1850   for (int64_t i = 0; i < init_count; ++i) {
1851     t.insert(i);
1852   }
1853   EXPECT_TRUE(
1854       RawHashSetTestOnlyAccess::GetCommon(t).growth_info().HasNoDeleted());
1855   t.erase(0);
1856   EXPECT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 1);
1857   EXPECT_FALSE(
1858       RawHashSetTestOnlyAccess::GetCommon(t).growth_info().HasNoDeleted());
1859   t.rehash(0);
1860   EXPECT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 0);
1861   EXPECT_TRUE(
1862       RawHashSetTestOnlyAccess::GetCommon(t).growth_info().HasNoDeleted());
1863 }
1864 
TYPED_TEST_P(SooTest,Clear)1865 TYPED_TEST_P(SooTest, Clear) {
1866   TypeParam t;
1867   EXPECT_TRUE(t.find(0) == t.end());
1868   t.clear();
1869   EXPECT_TRUE(t.find(0) == t.end());
1870   auto res = t.emplace(0);
1871   EXPECT_TRUE(res.second);
1872   EXPECT_EQ(1, t.size());
1873   t.clear();
1874   EXPECT_EQ(0, t.size());
1875   EXPECT_TRUE(t.find(0) == t.end());
1876 }
1877 
TYPED_TEST_P(SooTest,Swap)1878 TYPED_TEST_P(SooTest, Swap) {
1879   TypeParam t;
1880   EXPECT_TRUE(t.find(0) == t.end());
1881   auto res = t.emplace(0);
1882   EXPECT_TRUE(res.second);
1883   EXPECT_EQ(1, t.size());
1884   TypeParam u;
1885   t.swap(u);
1886   EXPECT_EQ(0, t.size());
1887   EXPECT_EQ(1, u.size());
1888   EXPECT_TRUE(t.find(0) == t.end());
1889   EXPECT_THAT(*u.find(0), 0);
1890 }
1891 
TYPED_TEST_P(SooTest,Rehash)1892 TYPED_TEST_P(SooTest, Rehash) {
1893   TypeParam t;
1894   EXPECT_TRUE(t.find(0) == t.end());
1895   t.emplace(0);
1896   t.emplace(1);
1897   EXPECT_EQ(2, t.size());
1898   t.rehash(128);
1899   EXPECT_EQ(2, t.size());
1900   EXPECT_THAT(*t.find(0), 0);
1901   EXPECT_THAT(*t.find(1), 1);
1902 }
1903 
TYPED_TEST_P(SooTest,RehashDoesNotRehashWhenNotNecessary)1904 TYPED_TEST_P(SooTest, RehashDoesNotRehashWhenNotNecessary) {
1905   TypeParam t;
1906   t.emplace(0);
1907   t.emplace(1);
1908   auto* p = &*t.find(0);
1909   t.rehash(1);
1910   EXPECT_EQ(p, &*t.find(0));
1911 }
1912 
1913 // Following two tests use non-SOO table because they test for 0 capacity.
TEST(Table,RehashZeroDoesNotAllocateOnEmptyTable)1914 TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) {
1915   NonSooIntTable t;
1916   t.rehash(0);
1917   EXPECT_EQ(0, t.bucket_count());
1918 }
1919 
TEST(Table,RehashZeroDeallocatesEmptyTable)1920 TEST(Table, RehashZeroDeallocatesEmptyTable) {
1921   NonSooIntTable t;
1922   t.emplace(0);
1923   t.clear();
1924   EXPECT_NE(0, t.bucket_count());
1925   t.rehash(0);
1926   EXPECT_EQ(0, t.bucket_count());
1927 }
1928 
TYPED_TEST_P(SooTest,RehashZeroForcesRehash)1929 TYPED_TEST_P(SooTest, RehashZeroForcesRehash) {
1930   TypeParam t;
1931   t.emplace(0);
1932   t.emplace(1);
1933   auto* p = &*t.find(0);
1934   t.rehash(0);
1935   EXPECT_NE(p, &*t.find(0));
1936 }
1937 
TEST(Table,ConstructFromInitList)1938 TEST(Table, ConstructFromInitList) {
1939   using P = std::pair<std::string, std::string>;
1940   struct Q {
1941     operator P() const { return {}; }  // NOLINT
1942   };
1943   StringTable t = {P(), Q(), {}, {{}, {}}};
1944 }
1945 
TYPED_TEST_P(SooTest,CopyConstruct)1946 TYPED_TEST_P(SooTest, CopyConstruct) {
1947   TypeParam t;
1948   t.emplace(0);
1949   EXPECT_EQ(1, t.size());
1950   {
1951     TypeParam u(t);
1952     EXPECT_EQ(1, u.size());
1953     EXPECT_THAT(*u.find(0), 0);
1954   }
1955   {
1956     TypeParam u{t};
1957     EXPECT_EQ(1, u.size());
1958     EXPECT_THAT(*u.find(0), 0);
1959   }
1960   {
1961     TypeParam u = t;
1962     EXPECT_EQ(1, u.size());
1963     EXPECT_THAT(*u.find(0), 0);
1964   }
1965 }
1966 
TYPED_TEST_P(SooTest,CopyDifferentSizes)1967 TYPED_TEST_P(SooTest, CopyDifferentSizes) {
1968   TypeParam t;
1969 
1970   for (int i = 0; i < 100; ++i) {
1971     t.emplace(i);
1972     TypeParam c = t;
1973     for (int j = 0; j <= i; ++j) {
1974       ASSERT_TRUE(c.find(j) != c.end()) << "i=" << i << " j=" << j;
1975     }
1976     // Testing find miss to verify that table is not full.
1977     ASSERT_TRUE(c.find(-1) == c.end());
1978   }
1979 }
1980 
TYPED_TEST_P(SooTest,CopyDifferentCapacities)1981 TYPED_TEST_P(SooTest, CopyDifferentCapacities) {
1982   for (int cap = 1; cap < 100; cap = cap * 2 + 1) {
1983     TypeParam t;
1984     t.reserve(static_cast<size_t>(cap));
1985     for (int i = 0; i <= cap; ++i) {
1986       t.emplace(i);
1987       if (i != cap && i % 5 != 0) {
1988         continue;
1989       }
1990       TypeParam c = t;
1991       for (int j = 0; j <= i; ++j) {
1992         ASSERT_TRUE(c.find(j) != c.end())
1993             << "cap=" << cap << " i=" << i << " j=" << j;
1994       }
1995       // Testing find miss to verify that table is not full.
1996       ASSERT_TRUE(c.find(-1) == c.end());
1997     }
1998   }
1999 }
2000 
TEST(Table,CopyConstructWithAlloc)2001 TEST(Table, CopyConstructWithAlloc) {
2002   StringTable t;
2003   t.emplace("a", "b");
2004   EXPECT_EQ(1, t.size());
2005   StringTable u(t, Alloc<std::pair<std::string, std::string>>());
2006   EXPECT_EQ(1, u.size());
2007   EXPECT_THAT(*u.find("a"), Pair("a", "b"));
2008 }
2009 
2010 struct ExplicitAllocIntTable
2011     : raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
2012                    std::equal_to<int64_t>, Alloc<int64_t>> {
2013   ExplicitAllocIntTable() = default;
2014 };
2015 
TEST(Table,AllocWithExplicitCtor)2016 TEST(Table, AllocWithExplicitCtor) {
2017   ExplicitAllocIntTable t;
2018   EXPECT_EQ(0, t.size());
2019 }
2020 
TEST(Table,MoveConstruct)2021 TEST(Table, MoveConstruct) {
2022   {
2023     StringTable t;
2024     t.emplace("a", "b");
2025     EXPECT_EQ(1, t.size());
2026 
2027     StringTable u(std::move(t));
2028     EXPECT_EQ(1, u.size());
2029     EXPECT_THAT(*u.find("a"), Pair("a", "b"));
2030   }
2031   {
2032     StringTable t;
2033     t.emplace("a", "b");
2034     EXPECT_EQ(1, t.size());
2035 
2036     StringTable u{std::move(t)};
2037     EXPECT_EQ(1, u.size());
2038     EXPECT_THAT(*u.find("a"), Pair("a", "b"));
2039   }
2040   {
2041     StringTable t;
2042     t.emplace("a", "b");
2043     EXPECT_EQ(1, t.size());
2044 
2045     StringTable u = std::move(t);
2046     EXPECT_EQ(1, u.size());
2047     EXPECT_THAT(*u.find("a"), Pair("a", "b"));
2048   }
2049 }
2050 
TEST(Table,MoveConstructWithAlloc)2051 TEST(Table, MoveConstructWithAlloc) {
2052   StringTable t;
2053   t.emplace("a", "b");
2054   EXPECT_EQ(1, t.size());
2055   StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>());
2056   EXPECT_EQ(1, u.size());
2057   EXPECT_THAT(*u.find("a"), Pair("a", "b"));
2058 }
2059 
TEST(Table,CopyAssign)2060 TEST(Table, CopyAssign) {
2061   StringTable t;
2062   t.emplace("a", "b");
2063   EXPECT_EQ(1, t.size());
2064   StringTable u;
2065   u = t;
2066   EXPECT_EQ(1, u.size());
2067   EXPECT_THAT(*u.find("a"), Pair("a", "b"));
2068 }
2069 
TEST(Table,CopySelfAssign)2070 TEST(Table, CopySelfAssign) {
2071   StringTable t;
2072   t.emplace("a", "b");
2073   EXPECT_EQ(1, t.size());
2074   t = *&t;
2075   EXPECT_EQ(1, t.size());
2076   EXPECT_THAT(*t.find("a"), Pair("a", "b"));
2077 }
2078 
TEST(Table,MoveAssign)2079 TEST(Table, MoveAssign) {
2080   StringTable t;
2081   t.emplace("a", "b");
2082   EXPECT_EQ(1, t.size());
2083   StringTable u;
2084   u = std::move(t);
2085   EXPECT_EQ(1, u.size());
2086   EXPECT_THAT(*u.find("a"), Pair("a", "b"));
2087 }
2088 
TEST(Table,MoveSelfAssign)2089 TEST(Table, MoveSelfAssign) {
2090   StringTable t;
2091   t.emplace("a", "b");
2092   EXPECT_EQ(1, t.size());
2093   t = std::move(*&t);
2094   // As long as we don't crash, it's fine.
2095 }
2096 
TEST(Table,Equality)2097 TEST(Table, Equality) {
2098   StringTable t;
2099   std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
2100                                                         {"aa", "bb"}};
2101   t.insert(std::begin(v), std::end(v));
2102   StringTable u = t;
2103   EXPECT_EQ(u, t);
2104 }
2105 
TEST(Table,Equality2)2106 TEST(Table, Equality2) {
2107   StringTable t;
2108   std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"},
2109                                                          {"aa", "bb"}};
2110   t.insert(std::begin(v1), std::end(v1));
2111   StringTable u;
2112   std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
2113                                                          {"aa", "aa"}};
2114   u.insert(std::begin(v2), std::end(v2));
2115   EXPECT_NE(u, t);
2116 }
2117 
TEST(Table,Equality3)2118 TEST(Table, Equality3) {
2119   StringTable t;
2120   std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"},
2121                                                          {"bb", "bb"}};
2122   t.insert(std::begin(v1), std::end(v1));
2123   StringTable u;
2124   std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
2125                                                          {"aa", "aa"}};
2126   u.insert(std::begin(v2), std::end(v2));
2127   EXPECT_NE(u, t);
2128 }
2129 
TYPED_TEST_P(SooTest,NumDeletedRegression)2130 TYPED_TEST_P(SooTest, NumDeletedRegression) {
2131   TypeParam t;
2132   t.emplace(0);
2133   t.erase(t.find(0));
2134   // construct over a deleted slot.
2135   t.emplace(0);
2136   t.clear();
2137 }
2138 
TYPED_TEST_P(SooTest,FindFullDeletedRegression)2139 TYPED_TEST_P(SooTest, FindFullDeletedRegression) {
2140   TypeParam t;
2141   for (int i = 0; i < 1000; ++i) {
2142     t.emplace(i);
2143     t.erase(t.find(i));
2144   }
2145   EXPECT_EQ(0, t.size());
2146 }
2147 
TYPED_TEST_P(SooTest,ReplacingDeletedSlotDoesNotRehash)2148 TYPED_TEST_P(SooTest, ReplacingDeletedSlotDoesNotRehash) {
2149   // We need to disable hashtablez to avoid issues related to SOO and sampling.
2150   SetHashtablezEnabled(false);
2151 
2152   size_t n;
2153   {
2154     // Compute n such that n is the maximum number of elements before rehash.
2155     TypeParam t;
2156     t.emplace(0);
2157     size_t c = t.bucket_count();
2158     for (n = 1; c == t.bucket_count(); ++n) t.emplace(n);
2159     --n;
2160   }
2161   TypeParam t;
2162   t.rehash(n);
2163   const size_t c = t.bucket_count();
2164   for (size_t i = 0; i != n; ++i) t.emplace(i);
2165   EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
2166   t.erase(0);
2167   t.emplace(0);
2168   EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
2169 }
2170 
TEST(Table,NoThrowMoveConstruct)2171 TEST(Table, NoThrowMoveConstruct) {
2172   ASSERT_TRUE(
2173       std::is_nothrow_copy_constructible<absl::Hash<absl::string_view>>::value);
2174   ASSERT_TRUE(std::is_nothrow_copy_constructible<
2175               std::equal_to<absl::string_view>>::value);
2176   ASSERT_TRUE(std::is_nothrow_copy_constructible<std::allocator<int>>::value);
2177   EXPECT_TRUE(std::is_nothrow_move_constructible<StringTable>::value);
2178 }
2179 
TEST(Table,NoThrowMoveAssign)2180 TEST(Table, NoThrowMoveAssign) {
2181   ASSERT_TRUE(
2182       std::is_nothrow_move_assignable<absl::Hash<absl::string_view>>::value);
2183   ASSERT_TRUE(
2184       std::is_nothrow_move_assignable<std::equal_to<absl::string_view>>::value);
2185   ASSERT_TRUE(std::is_nothrow_move_assignable<std::allocator<int>>::value);
2186   ASSERT_TRUE(
2187       absl::allocator_traits<std::allocator<int>>::is_always_equal::value);
2188   EXPECT_TRUE(std::is_nothrow_move_assignable<StringTable>::value);
2189 }
2190 
TEST(Table,NoThrowSwappable)2191 TEST(Table, NoThrowSwappable) {
2192   ASSERT_TRUE(
2193       container_internal::IsNoThrowSwappable<absl::Hash<absl::string_view>>());
2194   ASSERT_TRUE(container_internal::IsNoThrowSwappable<
2195               std::equal_to<absl::string_view>>());
2196   ASSERT_TRUE(container_internal::IsNoThrowSwappable<std::allocator<int>>());
2197   EXPECT_TRUE(container_internal::IsNoThrowSwappable<StringTable>());
2198 }
2199 
TEST(Table,HeterogeneousLookup)2200 TEST(Table, HeterogeneousLookup) {
2201   struct Hash {
2202     size_t operator()(int64_t i) const { return i; }
2203     size_t operator()(double i) const {
2204       ADD_FAILURE();
2205       return i;
2206     }
2207   };
2208   struct Eq {
2209     bool operator()(int64_t a, int64_t b) const { return a == b; }
2210     bool operator()(double a, int64_t b) const {
2211       ADD_FAILURE();
2212       return a == b;
2213     }
2214     bool operator()(int64_t a, double b) const {
2215       ADD_FAILURE();
2216       return a == b;
2217     }
2218     bool operator()(double a, double b) const {
2219       ADD_FAILURE();
2220       return a == b;
2221     }
2222   };
2223 
2224   struct THash {
2225     using is_transparent = void;
2226     size_t operator()(int64_t i) const { return i; }
2227     size_t operator()(double i) const { return i; }
2228   };
2229   struct TEq {
2230     using is_transparent = void;
2231     bool operator()(int64_t a, int64_t b) const { return a == b; }
2232     bool operator()(double a, int64_t b) const { return a == b; }
2233     bool operator()(int64_t a, double b) const { return a == b; }
2234     bool operator()(double a, double b) const { return a == b; }
2235   };
2236 
2237   raw_hash_set<IntPolicy, Hash, Eq, Alloc<int64_t>> s{0, 1, 2};
2238   // It will convert to int64_t before the query.
2239   EXPECT_EQ(1, *s.find(double{1.1}));
2240 
2241   raw_hash_set<IntPolicy, THash, TEq, Alloc<int64_t>> ts{0, 1, 2};
2242   // It will try to use the double, and fail to find the object.
2243   EXPECT_TRUE(ts.find(1.1) == ts.end());
2244 }
2245 
2246 template <class Table>
2247 using CallFind = decltype(std::declval<Table&>().find(17));
2248 
2249 template <class Table>
2250 using CallErase = decltype(std::declval<Table&>().erase(17));
2251 
2252 template <class Table>
2253 using CallExtract = decltype(std::declval<Table&>().extract(17));
2254 
2255 template <class Table>
2256 using CallPrefetch = decltype(std::declval<Table&>().prefetch(17));
2257 
2258 template <class Table>
2259 using CallCount = decltype(std::declval<Table&>().count(17));
2260 
2261 template <template <typename> class C, class Table, class = void>
2262 struct VerifyResultOf : std::false_type {};
2263 
2264 template <template <typename> class C, class Table>
2265 struct VerifyResultOf<C, Table, absl::void_t<C<Table>>> : std::true_type {};
2266 
TEST(Table,HeterogeneousLookupOverloads)2267 TEST(Table, HeterogeneousLookupOverloads) {
2268   using NonTransparentTable =
2269       raw_hash_set<StringPolicy, absl::Hash<absl::string_view>,
2270                    std::equal_to<absl::string_view>, std::allocator<int>>;
2271 
2272   EXPECT_FALSE((VerifyResultOf<CallFind, NonTransparentTable>()));
2273   EXPECT_FALSE((VerifyResultOf<CallErase, NonTransparentTable>()));
2274   EXPECT_FALSE((VerifyResultOf<CallExtract, NonTransparentTable>()));
2275   EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>()));
2276   EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>()));
2277 
2278   using TransparentTable =
2279       raw_hash_set<StringPolicy, hash_default_hash<absl::string_view>,
2280                    hash_default_eq<absl::string_view>, std::allocator<int>>;
2281 
2282   EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>()));
2283   EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>()));
2284   EXPECT_TRUE((VerifyResultOf<CallExtract, TransparentTable>()));
2285   EXPECT_TRUE((VerifyResultOf<CallPrefetch, TransparentTable>()));
2286   EXPECT_TRUE((VerifyResultOf<CallCount, TransparentTable>()));
2287 }
2288 
TEST(Iterator,IsDefaultConstructible)2289 TEST(Iterator, IsDefaultConstructible) {
2290   StringTable::iterator i;
2291   EXPECT_TRUE(i == StringTable::iterator());
2292 }
2293 
TEST(ConstIterator,IsDefaultConstructible)2294 TEST(ConstIterator, IsDefaultConstructible) {
2295   StringTable::const_iterator i;
2296   EXPECT_TRUE(i == StringTable::const_iterator());
2297 }
2298 
TEST(Iterator,ConvertsToConstIterator)2299 TEST(Iterator, ConvertsToConstIterator) {
2300   StringTable::iterator i;
2301   EXPECT_TRUE(i == StringTable::const_iterator());
2302 }
2303 
TEST(Iterator,Iterates)2304 TEST(Iterator, Iterates) {
2305   IntTable t;
2306   for (size_t i = 3; i != 6; ++i) EXPECT_TRUE(t.emplace(i).second);
2307   EXPECT_THAT(t, UnorderedElementsAre(3, 4, 5));
2308 }
2309 
TEST(Table,Merge)2310 TEST(Table, Merge) {
2311   StringTable t1, t2;
2312   t1.emplace("0", "-0");
2313   t1.emplace("1", "-1");
2314   t2.emplace("0", "~0");
2315   t2.emplace("2", "~2");
2316 
2317   EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1")));
2318   EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"), Pair("2", "~2")));
2319 
2320   t1.merge(t2);
2321   EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1"),
2322                                        Pair("2", "~2")));
2323   EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
2324 }
2325 
TEST(Table,IteratorEmplaceConstructibleRequirement)2326 TEST(Table, IteratorEmplaceConstructibleRequirement) {
2327   struct Value {
2328     explicit Value(absl::string_view view) : value(view) {}
2329     std::string value;
2330 
2331     bool operator==(const Value& other) const { return value == other.value; }
2332   };
2333   struct H {
2334     size_t operator()(const Value& v) const {
2335       return absl::Hash<std::string>{}(v.value);
2336     }
2337   };
2338 
2339   struct Table : raw_hash_set<ValuePolicy<Value>, H, std::equal_to<Value>,
2340                               std::allocator<Value>> {
2341     using Base = typename Table::raw_hash_set;
2342     using Base::Base;
2343   };
2344 
2345   std::string input[3]{"A", "B", "C"};
2346 
2347   Table t(std::begin(input), std::end(input));
2348   EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"}));
2349 
2350   input[0] = "D";
2351   input[1] = "E";
2352   input[2] = "F";
2353   t.insert(std::begin(input), std::end(input));
2354   EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"},
2355                                       Value{"D"}, Value{"E"}, Value{"F"}));
2356 }
2357 
TEST(Nodes,EmptyNodeType)2358 TEST(Nodes, EmptyNodeType) {
2359   using node_type = StringTable::node_type;
2360   node_type n;
2361   EXPECT_FALSE(n);
2362   EXPECT_TRUE(n.empty());
2363 
2364   EXPECT_TRUE((std::is_same<node_type::allocator_type,
2365                             StringTable::allocator_type>::value));
2366 }
2367 
TEST(Nodes,ExtractInsert)2368 TEST(Nodes, ExtractInsert) {
2369   constexpr char k0[] = "Very long string zero.";
2370   constexpr char k1[] = "Very long string one.";
2371   constexpr char k2[] = "Very long string two.";
2372   StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}};
2373   EXPECT_THAT(t,
2374               UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, "")));
2375 
2376   auto node = t.extract(k0);
2377   EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
2378   EXPECT_TRUE(node);
2379   EXPECT_FALSE(node.empty());
2380 
2381   StringTable t2;
2382   StringTable::insert_return_type res = t2.insert(std::move(node));
2383   EXPECT_TRUE(res.inserted);
2384   EXPECT_THAT(*res.position, Pair(k0, ""));
2385   EXPECT_FALSE(res.node);
2386   EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
2387 
2388   // Not there.
2389   EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
2390   node = t.extract("Not there!");
2391   EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
2392   EXPECT_FALSE(node);
2393 
2394   // Inserting nothing.
2395   res = t2.insert(std::move(node));
2396   EXPECT_FALSE(res.inserted);
2397   EXPECT_EQ(res.position, t2.end());
2398   EXPECT_FALSE(res.node);
2399   EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
2400 
2401   t.emplace(k0, "1");
2402   node = t.extract(k0);
2403 
2404   // Insert duplicate.
2405   res = t2.insert(std::move(node));
2406   EXPECT_FALSE(res.inserted);
2407   EXPECT_THAT(*res.position, Pair(k0, ""));
2408   EXPECT_TRUE(res.node);
2409   EXPECT_FALSE(node);  // NOLINT(bugprone-use-after-move)
2410 }
2411 
TYPED_TEST_P(SooTest,HintInsert)2412 TYPED_TEST_P(SooTest, HintInsert) {
2413   TypeParam t = {1, 2, 3};
2414   auto node = t.extract(1);
2415   EXPECT_THAT(t, UnorderedElementsAre(2, 3));
2416   auto it = t.insert(t.begin(), std::move(node));
2417   EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
2418   EXPECT_EQ(*it, 1);
2419   EXPECT_FALSE(node);  // NOLINT(bugprone-use-after-move)
2420 
2421   node = t.extract(2);
2422   EXPECT_THAT(t, UnorderedElementsAre(1, 3));
2423   // reinsert 2 to make the next insert fail.
2424   t.insert(2);
2425   EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
2426   it = t.insert(t.begin(), std::move(node));
2427   EXPECT_EQ(*it, 2);
2428   // The node was not emptied by the insert call.
2429   EXPECT_TRUE(node);  // NOLINT(bugprone-use-after-move)
2430 }
2431 
2432 template <typename T>
MakeSimpleTable(size_t size)2433 T MakeSimpleTable(size_t size) {
2434   T t;
2435   while (t.size() < size) t.insert(t.size());
2436   return t;
2437 }
2438 
2439 template <typename T>
OrderOfIteration(const T & t)2440 std::vector<int> OrderOfIteration(const T& t) {
2441   std::vector<int> res;
2442   for (auto i : t) res.push_back(static_cast<int>(i));
2443   return res;
2444 }
2445 
2446 // These IterationOrderChanges tests depend on non-deterministic behavior.
2447 // We are injecting non-determinism from the pointer of the table, but do so in
2448 // a way that only the page matters. We have to retry enough times to make sure
2449 // we are touching different memory pages to cause the ordering to change.
2450 // We also need to keep the old tables around to avoid getting the same memory
2451 // blocks over and over.
TYPED_TEST_P(SooTest,IterationOrderChangesByInstance)2452 TYPED_TEST_P(SooTest, IterationOrderChangesByInstance) {
2453   for (size_t size : {2, 6, 12, 20}) {
2454     const auto reference_table = MakeSimpleTable<TypeParam>(size);
2455     const auto reference = OrderOfIteration(reference_table);
2456 
2457     std::vector<TypeParam> tables;
2458     bool found_difference = false;
2459     for (int i = 0; !found_difference && i < 5000; ++i) {
2460       tables.push_back(MakeSimpleTable<TypeParam>(size));
2461       found_difference = OrderOfIteration(tables.back()) != reference;
2462     }
2463     if (!found_difference) {
2464       FAIL()
2465           << "Iteration order remained the same across many attempts with size "
2466           << size;
2467     }
2468   }
2469 }
2470 
TYPED_TEST_P(SooTest,IterationOrderChangesOnRehash)2471 TYPED_TEST_P(SooTest, IterationOrderChangesOnRehash) {
2472   // We test different sizes with many small numbers, because small table
2473   // resize has a different codepath.
2474   // Note: iteration order for size() <= 1 is always the same.
2475   for (size_t size : std::vector<size_t>{2, 3, 6, 7, 12, 15, 20, 50}) {
2476     for (size_t rehash_size : {
2477              size_t{0},  // Force rehash is guaranteed.
2478              size * 10   // Rehash to the larger capacity is guaranteed.
2479          }) {
2480       std::vector<TypeParam> garbage;
2481       bool ok = false;
2482       for (int i = 0; i < 5000; ++i) {
2483         auto t = MakeSimpleTable<TypeParam>(size);
2484         const auto reference = OrderOfIteration(t);
2485         // Force rehash.
2486         t.rehash(rehash_size);
2487         auto trial = OrderOfIteration(t);
2488         if (trial != reference) {
2489           // We are done.
2490           ok = true;
2491           break;
2492         }
2493         garbage.push_back(std::move(t));
2494       }
2495       EXPECT_TRUE(ok)
2496           << "Iteration order remained the same across many attempts " << size
2497           << "->" << rehash_size << ".";
2498     }
2499   }
2500 }
2501 
2502 // Verify that pointers are invalidated as soon as a second element is inserted.
2503 // This prevents dependency on pointer stability on small tables.
TYPED_TEST_P(SooTest,UnstablePointers)2504 TYPED_TEST_P(SooTest, UnstablePointers) {
2505   // We need to disable hashtablez to avoid issues related to SOO and sampling.
2506   SetHashtablezEnabled(false);
2507 
2508   TypeParam table;
2509 
2510   const auto addr = [&](int i) {
2511     return reinterpret_cast<uintptr_t>(&*table.find(i));
2512   };
2513 
2514   table.insert(0);
2515   const uintptr_t old_ptr = addr(0);
2516 
2517   // This causes a rehash.
2518   table.insert(1);
2519 
2520   EXPECT_NE(old_ptr, addr(0));
2521 }
2522 
TEST(TableDeathTest,InvalidIteratorAsserts)2523 TEST(TableDeathTest, InvalidIteratorAsserts) {
2524   if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
2525     GTEST_SKIP() << "Assertions not enabled.";
2526 
2527   NonSooIntTable t;
2528   // Extra simple "regexp" as regexp support is highly varied across platforms.
2529   EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()),
2530                             "erase.* called on end.. iterator.");
2531   typename NonSooIntTable::iterator iter;
2532   EXPECT_DEATH_IF_SUPPORTED(
2533       ++iter, "operator.* called on default-constructed iterator.");
2534   t.insert(0);
2535   iter = t.begin();
2536   t.erase(iter);
2537   const char* const kErasedDeathMessage =
2538       SwisstableGenerationsEnabled()
2539           ? "operator.* called on invalid iterator.*was likely erased"
2540           : "operator.* called on invalid iterator.*might have been "
2541             "erased.*config=asan";
2542   EXPECT_DEATH_IF_SUPPORTED(++iter, kErasedDeathMessage);
2543 }
2544 
TEST(TableDeathTest,InvalidIteratorAssertsSoo)2545 TEST(TableDeathTest, InvalidIteratorAssertsSoo) {
2546   if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
2547     GTEST_SKIP() << "Assertions not enabled.";
2548 
2549   SooIntTable t;
2550   // Extra simple "regexp" as regexp support is highly varied across platforms.
2551   EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()),
2552                             "erase.* called on end.. iterator.");
2553   typename SooIntTable::iterator iter;
2554   EXPECT_DEATH_IF_SUPPORTED(
2555       ++iter, "operator.* called on default-constructed iterator.");
2556 
2557   // We can't detect the erased iterator case as invalid in SOO mode because
2558   // the control is static constant.
2559 }
2560 
2561 // Invalid iterator use can trigger use-after-free in asan/hwasan,
2562 // use-of-uninitialized-value in msan, or invalidated iterator assertions.
2563 constexpr const char* kInvalidIteratorDeathMessage =
2564     "use-after-free|use-of-uninitialized-value|invalidated "
2565     "iterator|Invalid iterator|invalid iterator";
2566 
2567 // MSVC doesn't support | in regex.
2568 #if defined(_MSC_VER)
2569 constexpr bool kMsvc = true;
2570 #else
2571 constexpr bool kMsvc = false;
2572 #endif
2573 
TYPED_TEST_P(SooTest,IteratorInvalidAssertsEqualityOperator)2574 TYPED_TEST_P(SooTest, IteratorInvalidAssertsEqualityOperator) {
2575   if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
2576     GTEST_SKIP() << "Assertions not enabled.";
2577 
2578   TypeParam t;
2579   t.insert(1);
2580   t.insert(2);
2581   t.insert(3);
2582   auto iter1 = t.begin();
2583   auto iter2 = std::next(iter1);
2584   ASSERT_NE(iter1, t.end());
2585   ASSERT_NE(iter2, t.end());
2586   t.erase(iter1);
2587   // Extra simple "regexp" as regexp support is highly varied across platforms.
2588   const char* const kErasedDeathMessage =
2589       SwisstableGenerationsEnabled()
2590           ? "Invalid iterator comparison.*was likely erased"
2591           : "Invalid iterator comparison.*might have been erased.*config=asan";
2592   EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
2593   EXPECT_DEATH_IF_SUPPORTED(void(iter2 != iter1), kErasedDeathMessage);
2594   t.erase(iter2);
2595   EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
2596 
2597   TypeParam t1, t2;
2598   t1.insert(0);
2599   t2.insert(0);
2600   iter1 = t1.begin();
2601   iter2 = t2.begin();
2602   const char* const kContainerDiffDeathMessage =
2603       SwisstableGenerationsEnabled()
2604           ? "Invalid iterator comparison.*iterators from different.* hashtables"
2605           : "Invalid iterator comparison.*may be from different "
2606             ".*containers.*config=asan";
2607   EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kContainerDiffDeathMessage);
2608   EXPECT_DEATH_IF_SUPPORTED(void(iter2 == iter1), kContainerDiffDeathMessage);
2609 }
2610 
TYPED_TEST_P(SooTest,IteratorInvalidAssertsEqualityOperatorRehash)2611 TYPED_TEST_P(SooTest, IteratorInvalidAssertsEqualityOperatorRehash) {
2612   if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
2613     GTEST_SKIP() << "Assertions not enabled.";
2614   if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regex.";
2615 #ifdef ABSL_HAVE_THREAD_SANITIZER
2616   GTEST_SKIP() << "ThreadSanitizer test runs fail on use-after-free even in "
2617                   "EXPECT_DEATH.";
2618 #endif
2619 
2620   TypeParam t;
2621   t.insert(0);
2622   auto iter = t.begin();
2623 
2624   // Trigger a rehash in t.
2625   for (int i = 0; i < 10; ++i) t.insert(i);
2626 
2627   const char* const kRehashedDeathMessage =
2628       SwisstableGenerationsEnabled()
2629           ? kInvalidIteratorDeathMessage
2630           : "Invalid iterator comparison.*might have rehashed.*config=asan";
2631   EXPECT_DEATH_IF_SUPPORTED(void(iter == t.begin()), kRehashedDeathMessage);
2632 }
2633 
2634 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
2635 template <typename T>
2636 class RawHashSamplerTest : public testing::Test {};
2637 TYPED_TEST_SUITE_P(RawHashSamplerTest);
2638 
TYPED_TEST_P(RawHashSamplerTest,Sample)2639 TYPED_TEST_P(RawHashSamplerTest, Sample) {
2640   constexpr bool soo_enabled = std::is_same<SooIntTable, TypeParam>::value;
2641   // Enable the feature even if the prod default is off.
2642   SetHashtablezEnabled(true);
2643   SetHashtablezSampleParameter(100);  // Sample ~1% of tables.
2644 
2645   auto& sampler = GlobalHashtablezSampler();
2646   size_t start_size = 0;
2647   absl::flat_hash_set<const HashtablezInfo*> preexisting_info;
2648   start_size += sampler.Iterate([&](const HashtablezInfo& info) {
2649     preexisting_info.insert(&info);
2650     ++start_size;
2651   });
2652 
2653   std::vector<TypeParam> tables;
2654   for (int i = 0; i < 1000000; ++i) {
2655     tables.emplace_back();
2656 
2657     const bool do_reserve = (i % 10 > 5);
2658     const bool do_rehash = !do_reserve && (i % 10 > 0);
2659 
2660     if (do_reserve) {
2661       // Don't reserve on all tables.
2662       tables.back().reserve(10 * (i % 10));
2663     }
2664 
2665     tables.back().insert(1);
2666     tables.back().insert(i % 5);
2667 
2668     if (do_rehash) {
2669       // Rehash some other tables.
2670       tables.back().rehash(10 * (i % 10));
2671     }
2672   }
2673   size_t end_size = 0;
2674   absl::flat_hash_map<size_t, int> observed_checksums;
2675   absl::flat_hash_map<ssize_t, int> reservations;
2676   end_size += sampler.Iterate([&](const HashtablezInfo& info) {
2677     ++end_size;
2678     if (preexisting_info.contains(&info)) return;
2679     observed_checksums[info.hashes_bitwise_xor.load(
2680         std::memory_order_relaxed)]++;
2681     reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
2682     EXPECT_EQ(info.inline_element_size, sizeof(typename TypeParam::value_type));
2683     EXPECT_EQ(info.key_size, sizeof(typename TypeParam::key_type));
2684     EXPECT_EQ(info.value_size, sizeof(typename TypeParam::value_type));
2685 
2686     if (soo_enabled) {
2687       EXPECT_EQ(info.soo_capacity, SooCapacity());
2688     } else {
2689       EXPECT_EQ(info.soo_capacity, 0);
2690     }
2691   });
2692 
2693   // Expect that we sampled at the requested sampling rate of ~1%.
2694   EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
2695               0.01, 0.005);
2696   EXPECT_EQ(observed_checksums.size(), 5);
2697   for (const auto& [_, count] : observed_checksums) {
2698     EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2, 0.05);
2699   }
2700 
2701   EXPECT_EQ(reservations.size(), 10);
2702   for (const auto& [reservation, count] : reservations) {
2703     EXPECT_GE(reservation, 0);
2704     EXPECT_LT(reservation, 100);
2705 
2706     EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.1, 0.05)
2707         << reservation;
2708   }
2709 }
2710 
2711 REGISTER_TYPED_TEST_SUITE_P(RawHashSamplerTest, Sample);
2712 using RawHashSamplerTestTypes = ::testing::Types<SooIntTable, NonSooIntTable>;
2713 INSTANTIATE_TYPED_TEST_SUITE_P(My, RawHashSamplerTest, RawHashSamplerTestTypes);
2714 
SampleSooMutation(absl::FunctionRef<void (SooIntTable &)> mutate_table)2715 std::vector<const HashtablezInfo*> SampleSooMutation(
2716     absl::FunctionRef<void(SooIntTable&)> mutate_table) {
2717   // Enable the feature even if the prod default is off.
2718   SetHashtablezEnabled(true);
2719   SetHashtablezSampleParameter(100);  // Sample ~1% of tables.
2720 
2721   auto& sampler = GlobalHashtablezSampler();
2722   size_t start_size = 0;
2723   absl::flat_hash_set<const HashtablezInfo*> preexisting_info;
2724   start_size += sampler.Iterate([&](const HashtablezInfo& info) {
2725     preexisting_info.insert(&info);
2726     ++start_size;
2727   });
2728 
2729   std::vector<SooIntTable> tables;
2730   for (int i = 0; i < 1000000; ++i) {
2731     tables.emplace_back();
2732     mutate_table(tables.back());
2733   }
2734   size_t end_size = 0;
2735   std::vector<const HashtablezInfo*> infos;
2736   end_size += sampler.Iterate([&](const HashtablezInfo& info) {
2737     ++end_size;
2738     if (preexisting_info.contains(&info)) return;
2739     infos.push_back(&info);
2740   });
2741 
2742   // Expect that we sampled at the requested sampling rate of ~1%.
2743   EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
2744               0.01, 0.005);
2745   return infos;
2746 }
2747 
TEST(RawHashSamplerTest,SooTableInsertToEmpty)2748 TEST(RawHashSamplerTest, SooTableInsertToEmpty) {
2749   if (SooIntTable().capacity() != SooCapacity()) {
2750     CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
2751     GTEST_SKIP() << "not SOO on this platform";
2752   }
2753   std::vector<const HashtablezInfo*> infos =
2754       SampleSooMutation([](SooIntTable& t) { t.insert(1); });
2755 
2756   for (const HashtablezInfo* info : infos) {
2757     ASSERT_EQ(info->inline_element_size,
2758               sizeof(typename SooIntTable::value_type));
2759     ASSERT_EQ(info->soo_capacity, SooCapacity());
2760     ASSERT_EQ(info->capacity, NextCapacity(SooCapacity()));
2761     ASSERT_EQ(info->size, 1);
2762     ASSERT_EQ(info->max_reserve, 0);
2763     ASSERT_EQ(info->num_erases, 0);
2764     ASSERT_EQ(info->max_probe_length, 0);
2765     ASSERT_EQ(info->total_probe_length, 0);
2766   }
2767 }
2768 
TEST(RawHashSamplerTest,SooTableReserveToEmpty)2769 TEST(RawHashSamplerTest, SooTableReserveToEmpty) {
2770   if (SooIntTable().capacity() != SooCapacity()) {
2771     CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
2772     GTEST_SKIP() << "not SOO on this platform";
2773   }
2774   std::vector<const HashtablezInfo*> infos =
2775       SampleSooMutation([](SooIntTable& t) { t.reserve(100); });
2776 
2777   for (const HashtablezInfo* info : infos) {
2778     ASSERT_EQ(info->inline_element_size,
2779               sizeof(typename SooIntTable::value_type));
2780     ASSERT_EQ(info->soo_capacity, SooCapacity());
2781     ASSERT_GE(info->capacity, 100);
2782     ASSERT_EQ(info->size, 0);
2783     ASSERT_EQ(info->max_reserve, 100);
2784     ASSERT_EQ(info->num_erases, 0);
2785     ASSERT_EQ(info->max_probe_length, 0);
2786     ASSERT_EQ(info->total_probe_length, 0);
2787   }
2788 }
2789 
2790 // This tests that reserve on a full SOO table doesn't incorrectly result in new
2791 // (over-)sampling.
TEST(RawHashSamplerTest,SooTableReserveToFullSoo)2792 TEST(RawHashSamplerTest, SooTableReserveToFullSoo) {
2793   if (SooIntTable().capacity() != SooCapacity()) {
2794     CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
2795     GTEST_SKIP() << "not SOO on this platform";
2796   }
2797   std::vector<const HashtablezInfo*> infos =
2798       SampleSooMutation([](SooIntTable& t) {
2799         t.insert(1);
2800         t.reserve(100);
2801       });
2802 
2803   for (const HashtablezInfo* info : infos) {
2804     ASSERT_EQ(info->inline_element_size,
2805               sizeof(typename SooIntTable::value_type));
2806     ASSERT_EQ(info->soo_capacity, SooCapacity());
2807     ASSERT_GE(info->capacity, 100);
2808     ASSERT_EQ(info->size, 1);
2809     ASSERT_EQ(info->max_reserve, 100);
2810     ASSERT_EQ(info->num_erases, 0);
2811     ASSERT_EQ(info->max_probe_length, 0);
2812     ASSERT_EQ(info->total_probe_length, 0);
2813   }
2814 }
2815 
2816 // This tests that rehash(0) on a sampled table with size that fits in SOO
2817 // doesn't incorrectly result in losing sampling.
TEST(RawHashSamplerTest,SooTableRehashShrinkWhenSizeFitsInSoo)2818 TEST(RawHashSamplerTest, SooTableRehashShrinkWhenSizeFitsInSoo) {
2819   if (SooIntTable().capacity() != SooCapacity()) {
2820     CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
2821     GTEST_SKIP() << "not SOO on this platform";
2822   }
2823   std::vector<const HashtablezInfo*> infos =
2824       SampleSooMutation([](SooIntTable& t) {
2825         t.reserve(100);
2826         t.insert(1);
2827         EXPECT_GE(t.capacity(), 100);
2828         t.rehash(0);
2829       });
2830 
2831   for (const HashtablezInfo* info : infos) {
2832     ASSERT_EQ(info->inline_element_size,
2833               sizeof(typename SooIntTable::value_type));
2834     ASSERT_EQ(info->soo_capacity, SooCapacity());
2835     ASSERT_EQ(info->capacity, NextCapacity(SooCapacity()));
2836     ASSERT_EQ(info->size, 1);
2837     ASSERT_EQ(info->max_reserve, 100);
2838     ASSERT_EQ(info->num_erases, 0);
2839     ASSERT_EQ(info->max_probe_length, 0);
2840     ASSERT_EQ(info->total_probe_length, 0);
2841   }
2842 }
2843 #endif  // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
2844 
TEST(RawHashSamplerTest,DoNotSampleCustomAllocators)2845 TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
2846   // Enable the feature even if the prod default is off.
2847   SetHashtablezEnabled(true);
2848   SetHashtablezSampleParameter(100);  // Sample ~1% of tables.
2849 
2850   auto& sampler = GlobalHashtablezSampler();
2851   size_t start_size = 0;
2852   start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
2853 
2854   std::vector<CustomAllocIntTable> tables;
2855   for (int i = 0; i < 1000000; ++i) {
2856     tables.emplace_back();
2857     tables.back().insert(1);
2858   }
2859   size_t end_size = 0;
2860   end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
2861 
2862   EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
2863               0.00, 0.001);
2864 }
2865 
2866 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
2867 template <class TableType>
2868 class SanitizerTest : public testing::Test {};
2869 
2870 TYPED_TEST_SUITE_P(SanitizerTest);
2871 
TYPED_TEST_P(SanitizerTest,PoisoningUnused)2872 TYPED_TEST_P(SanitizerTest, PoisoningUnused) {
2873   TypeParam t;
2874   for (size_t reserve_size = 2; reserve_size < 1024;
2875        reserve_size = reserve_size * 3 / 2) {
2876     t.reserve(reserve_size);
2877     // Insert something to force an allocation.
2878     int64_t& v = *t.insert(0).first;
2879 
2880     // Make sure there is something to test.
2881     ASSERT_GT(t.capacity(), 1);
2882 
2883     int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
2884     for (size_t i = 0; i < t.capacity(); ++i) {
2885       EXPECT_EQ(slots + i != &v, __asan_address_is_poisoned(slots + i)) << i;
2886     }
2887   }
2888 }
2889 
2890 REGISTER_TYPED_TEST_SUITE_P(SanitizerTest, PoisoningUnused);
2891 using SanitizerTableTypes = ::testing::Types<IntTable, TransferableIntTable>;
2892 INSTANTIATE_TYPED_TEST_SUITE_P(InstanceSanitizerTest, SanitizerTest,
2893                                SanitizerTableTypes);
2894 
2895 // TODO(b/289225379): poison inline space when empty SOO.
TEST(Sanitizer,PoisoningOnErase)2896 TEST(Sanitizer, PoisoningOnErase) {
2897   NonSooIntTable t;
2898   auto& v = *t.insert(0).first;
2899 
2900   EXPECT_FALSE(__asan_address_is_poisoned(&v));
2901   t.erase(0);
2902   EXPECT_TRUE(__asan_address_is_poisoned(&v));
2903 }
2904 #endif  // ABSL_HAVE_ADDRESS_SANITIZER
2905 
2906 template <typename T>
2907 class AlignOneTest : public ::testing::Test {};
2908 using AlignOneTestTypes =
2909     ::testing::Types<Uint8Table, MinimumAlignmentUint8Table>;
2910 TYPED_TEST_SUITE(AlignOneTest, AlignOneTestTypes);
2911 
TYPED_TEST(AlignOneTest,AlignOne)2912 TYPED_TEST(AlignOneTest, AlignOne) {
2913   // We previously had a bug in which we were copying a control byte over the
2914   // first slot when alignof(value_type) is 1. We test repeated
2915   // insertions/erases and verify that the behavior is correct.
2916   TypeParam t;
2917   std::unordered_set<uint8_t> verifier;  // NOLINT
2918 
2919   // Do repeated insertions/erases from the table.
2920   for (int64_t i = 0; i < 100000; ++i) {
2921     SCOPED_TRACE(i);
2922     const uint8_t u = (i * -i) & 0xFF;
2923     auto it = t.find(u);
2924     auto verifier_it = verifier.find(u);
2925     if (it == t.end()) {
2926       ASSERT_EQ(verifier_it, verifier.end());
2927       t.insert(u);
2928       verifier.insert(u);
2929     } else {
2930       ASSERT_NE(verifier_it, verifier.end());
2931       t.erase(it);
2932       verifier.erase(verifier_it);
2933     }
2934   }
2935 
2936   EXPECT_EQ(t.size(), verifier.size());
2937   for (uint8_t u : t) {
2938     EXPECT_EQ(verifier.count(u), 1);
2939   }
2940 }
2941 
TEST(Iterator,InvalidUseCrashesWithSanitizers)2942 TEST(Iterator, InvalidUseCrashesWithSanitizers) {
2943   if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
2944   if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
2945 
2946   NonSooIntTable t;
2947   // Start with 1 element so that `it` is never an end iterator.
2948   t.insert(-1);
2949   for (int i = 0; i < 10; ++i) {
2950     auto it = t.begin();
2951     t.insert(i);
2952     EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
2953     EXPECT_DEATH_IF_SUPPORTED(void(it == t.begin()),
2954                               kInvalidIteratorDeathMessage);
2955   }
2956 }
2957 
TEST(Iterator,InvalidUseWithReserveCrashesWithSanitizers)2958 TEST(Iterator, InvalidUseWithReserveCrashesWithSanitizers) {
2959   if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
2960   if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
2961 
2962   IntTable t;
2963   t.reserve(10);
2964   t.insert(0);
2965   auto it = t.begin();
2966   // Reserved growth can't rehash.
2967   for (int i = 1; i < 10; ++i) {
2968     t.insert(i);
2969     EXPECT_EQ(*it, 0);
2970   }
2971   // ptr will become invalidated on rehash.
2972   const int64_t* ptr = &*it;
2973   (void)ptr;
2974 
2975   // erase decreases size but does not decrease reserved growth so the next
2976   // insertion still invalidates iterators.
2977   t.erase(0);
2978   // The first insert after reserved growth is 0 is guaranteed to rehash when
2979   // generations are enabled.
2980   t.insert(10);
2981   EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
2982   EXPECT_DEATH_IF_SUPPORTED(void(it == t.begin()),
2983                             kInvalidIteratorDeathMessage);
2984 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
2985   EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free");
2986 #endif
2987 }
2988 
TEST(Iterator,InvalidUseWithMoveCrashesWithSanitizers)2989 TEST(Iterator, InvalidUseWithMoveCrashesWithSanitizers) {
2990   if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
2991   if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
2992 
2993   NonSooIntTable t1, t2;
2994   t1.insert(1);
2995   auto it = t1.begin();
2996   // ptr will become invalidated on rehash.
2997   const auto* ptr = &*it;
2998   (void)ptr;
2999 
3000   t2 = std::move(t1);
3001   EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
3002   EXPECT_DEATH_IF_SUPPORTED(void(it == t2.begin()),
3003                             kInvalidIteratorDeathMessage);
3004 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
3005   EXPECT_DEATH_IF_SUPPORTED(std::cout << **ptr, "heap-use-after-free");
3006 #endif
3007 }
3008 
TYPED_TEST_P(SooTest,ReservedGrowthUpdatesWhenTableDoesntGrow)3009 TYPED_TEST_P(SooTest, ReservedGrowthUpdatesWhenTableDoesntGrow) {
3010   TypeParam t;
3011   for (int i = 0; i < 8; ++i) t.insert(i);
3012   // Want to insert twice without invalidating iterators so reserve.
3013   const size_t cap = t.capacity();
3014   t.reserve(t.size() + 2);
3015   // We want to be testing the case in which the reserve doesn't grow the table.
3016   ASSERT_EQ(cap, t.capacity());
3017   auto it = t.find(0);
3018   t.insert(100);
3019   t.insert(200);
3020   // `it` shouldn't have been invalidated.
3021   EXPECT_EQ(*it, 0);
3022 }
3023 
TEST(Table,EraseBeginEndResetsReservedGrowth)3024 TEST(Table, EraseBeginEndResetsReservedGrowth) {
3025   bool frozen = false;
3026   BadHashFreezableIntTable t{FreezableAlloc<int64_t>(&frozen)};
3027   t.reserve(100);
3028   const size_t cap = t.capacity();
3029   frozen = true;  // no further allocs allowed
3030 
3031   for (int i = 0; i < 10; ++i) {
3032     // Create a long run (hash function returns constant).
3033     for (int j = 0; j < 100; ++j) t.insert(j);
3034     // Erase elements from the middle of the long run, which creates tombstones.
3035     for (int j = 30; j < 60; ++j) t.erase(j);
3036     EXPECT_EQ(t.size(), 70);
3037     EXPECT_EQ(t.capacity(), cap);
3038     ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 30);
3039 
3040     t.erase(t.begin(), t.end());
3041 
3042     EXPECT_EQ(t.size(), 0);
3043     EXPECT_EQ(t.capacity(), cap);
3044     ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 0);
3045   }
3046 }
3047 
TEST(Table,GenerationInfoResetsOnClear)3048 TEST(Table, GenerationInfoResetsOnClear) {
3049   if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
3050   if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
3051 
3052   NonSooIntTable t;
3053   for (int i = 0; i < 1000; ++i) t.insert(i);
3054   t.reserve(t.size() + 100);
3055 
3056   t.clear();
3057 
3058   t.insert(0);
3059   auto it = t.begin();
3060   t.insert(1);
3061   EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
3062 }
3063 
TEST(Table,InvalidReferenceUseCrashesWithSanitizers)3064 TEST(Table, InvalidReferenceUseCrashesWithSanitizers) {
3065   if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
3066 #ifdef ABSL_HAVE_MEMORY_SANITIZER
3067   GTEST_SKIP() << "MSan fails to detect some of these rehashes.";
3068 #endif
3069 
3070   NonSooIntTable t;
3071   t.insert(0);
3072   // Rehashing is guaranteed on every insertion while capacity is less than
3073   // RehashProbabilityConstant().
3074   int i = 0;
3075   while (t.capacity() <= RehashProbabilityConstant()) {
3076     // ptr will become invalidated on rehash.
3077     const auto* ptr = &*t.begin();
3078     t.insert(++i);
3079     EXPECT_DEATH_IF_SUPPORTED(std::cout << **ptr, "use-after-free") << i;
3080   }
3081 }
3082 
TEST(Iterator,InvalidComparisonDifferentTables)3083 TEST(Iterator, InvalidComparisonDifferentTables) {
3084   if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
3085 
3086   NonSooIntTable t1, t2;
3087   NonSooIntTable::iterator default_constructed_iter;
3088   // We randomly use one of N empty generations for generations from empty
3089   // hashtables. In general, we won't always detect when iterators from
3090   // different empty hashtables are compared, but in this test case, we
3091   // should deterministically detect the error due to our randomness yielding
3092   // consecutive random generations.
3093   EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == t2.end()),
3094                             "Invalid iterator comparison.*empty hashtables");
3095   EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == default_constructed_iter),
3096                             "Invalid iterator comparison.*default-constructed");
3097   t1.insert(0);
3098   EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()),
3099                             "Invalid iterator comparison.*empty hashtable");
3100   EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == default_constructed_iter),
3101                             "Invalid iterator comparison.*default-constructed");
3102   t2.insert(0);
3103   EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()),
3104                             "Invalid iterator comparison.*end.. iterator");
3105   EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.begin()),
3106                             "Invalid iterator comparison.*non-end");
3107 }
3108 
3109 template <typename Alloc>
3110 using RawHashSetAlloc = raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
3111                                      std::equal_to<int64_t>, Alloc>;
3112 
TEST(Table,AllocatorPropagation)3113 TEST(Table, AllocatorPropagation) { TestAllocPropagation<RawHashSetAlloc>(); }
3114 
3115 struct CountedHash {
operator ()absl::container_internal::__anon2239e51d0111::CountedHash3116   size_t operator()(int64_t value) const {
3117     ++count;
3118     return static_cast<size_t>(value);
3119   }
3120   mutable int count = 0;
3121 };
3122 
3123 struct CountedHashIntTable
3124     : raw_hash_set<IntPolicy, CountedHash, std::equal_to<int>,
3125                    std::allocator<int>> {
3126   using Base = typename CountedHashIntTable::raw_hash_set;
3127   using Base::Base;
3128 };
3129 
TEST(Table,CountedHash)3130 TEST(Table, CountedHash) {
3131   // Verify that raw_hash_set does not compute redundant hashes.
3132 #ifdef NDEBUG
3133   constexpr bool kExpectMinimumHashes = true;
3134 #else
3135   constexpr bool kExpectMinimumHashes = false;
3136 #endif
3137   if (!kExpectMinimumHashes) {
3138     GTEST_SKIP() << "Only run under NDEBUG: `assert` statements may cause "
3139                     "redundant hashing.";
3140   }
3141 
3142   using Table = CountedHashIntTable;
3143   auto HashCount = [](const Table& t) { return t.hash_function().count; };
3144   {
3145     Table t;
3146     EXPECT_EQ(HashCount(t), 0);
3147   }
3148   {
3149     Table t;
3150     t.insert(1);
3151     EXPECT_EQ(HashCount(t), 1);
3152     t.erase(1);
3153     EXPECT_EQ(HashCount(t), 2);
3154   }
3155   {
3156     Table t;
3157     t.insert(3);
3158     EXPECT_EQ(HashCount(t), 1);
3159     auto node = t.extract(3);
3160     EXPECT_EQ(HashCount(t), 2);
3161     t.insert(std::move(node));
3162     EXPECT_EQ(HashCount(t), 3);
3163   }
3164   {
3165     Table t;
3166     t.emplace(5);
3167     EXPECT_EQ(HashCount(t), 1);
3168   }
3169   {
3170     Table src;
3171     src.insert(7);
3172     Table dst;
3173     dst.merge(src);
3174     EXPECT_EQ(HashCount(dst), 1);
3175   }
3176 }
3177 
3178 // IterateOverFullSlots doesn't support SOO.
TEST(Table,IterateOverFullSlotsEmpty)3179 TEST(Table, IterateOverFullSlotsEmpty) {
3180   NonSooIntTable t;
3181   auto fail_if_any = [](const ctrl_t*, auto* i) {
3182     FAIL() << "expected no slots " << **i;
3183   };
3184   container_internal::IterateOverFullSlots(
3185       RawHashSetTestOnlyAccess::GetCommon(t),
3186       RawHashSetTestOnlyAccess::GetSlots(t), fail_if_any);
3187   for (size_t i = 0; i < 256; ++i) {
3188     t.reserve(i);
3189     container_internal::IterateOverFullSlots(
3190         RawHashSetTestOnlyAccess::GetCommon(t),
3191         RawHashSetTestOnlyAccess::GetSlots(t), fail_if_any);
3192   }
3193 }
3194 
TEST(Table,IterateOverFullSlotsFull)3195 TEST(Table, IterateOverFullSlotsFull) {
3196   NonSooIntTable t;
3197 
3198   std::vector<int64_t> expected_slots;
3199   for (int64_t i = 0; i < 128; ++i) {
3200     t.insert(i);
3201     expected_slots.push_back(i);
3202 
3203     std::vector<int64_t> slots;
3204     container_internal::IterateOverFullSlots(
3205         RawHashSetTestOnlyAccess::GetCommon(t),
3206         RawHashSetTestOnlyAccess::GetSlots(t),
3207         [&t, &slots](const ctrl_t* ctrl, auto* i) {
3208           ptrdiff_t ctrl_offset =
3209               ctrl - RawHashSetTestOnlyAccess::GetCommon(t).control();
3210           ptrdiff_t slot_offset = i - RawHashSetTestOnlyAccess::GetSlots(t);
3211           ASSERT_EQ(ctrl_offset, slot_offset);
3212           slots.push_back(**i);
3213         });
3214     EXPECT_THAT(slots, testing::UnorderedElementsAreArray(expected_slots));
3215   }
3216 }
3217 
3218 template <typename T>
3219 class SooTable : public testing::Test {};
3220 TYPED_TEST_SUITE_P(SooTable);
3221 
TYPED_TEST_P(SooTable,Basic)3222 TYPED_TEST_P(SooTable, Basic) {
3223   bool frozen = true;
3224   TypeParam t{FreezableAlloc<typename TypeParam::value_type>(&frozen)};
3225   if (t.capacity() != SooCapacity()) {
3226     CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
3227     GTEST_SKIP() << "not SOO on this platform";
3228   }
3229 
3230   t.insert(0);
3231   EXPECT_EQ(t.capacity(), 1);
3232   auto it = t.find(0);
3233   EXPECT_EQ(it, t.begin());
3234   ASSERT_NE(it, t.end());
3235   EXPECT_EQ(*it, 0);
3236   EXPECT_EQ(++it, t.end());
3237   EXPECT_EQ(t.find(1), t.end());
3238   EXPECT_EQ(t.size(), 1);
3239 
3240   t.erase(0);
3241   EXPECT_EQ(t.size(), 0);
3242   t.insert(1);
3243   it = t.find(1);
3244   EXPECT_EQ(it, t.begin());
3245   ASSERT_NE(it, t.end());
3246   EXPECT_EQ(*it, 1);
3247 
3248   t.clear();
3249   EXPECT_EQ(t.size(), 0);
3250 }
3251 
3252 REGISTER_TYPED_TEST_SUITE_P(SooTable, Basic);
3253 using FreezableSooTableTypes =
3254     ::testing::Types<FreezableSizedValueSooTable<8>,
3255                      FreezableSizedValueSooTable<16>>;
3256 INSTANTIATE_TYPED_TEST_SUITE_P(My, SooTable, FreezableSooTableTypes);
3257 
TEST(Table,RehashToSooUnsampled)3258 TEST(Table, RehashToSooUnsampled) {
3259   SooIntTable t;
3260   if (t.capacity() != SooCapacity()) {
3261     CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
3262     GTEST_SKIP() << "not SOO on this platform";
3263   }
3264 
3265   // We disable hashtablez sampling for this test to ensure that the table isn't
3266   // sampled. When the table is sampled, it won't rehash down to SOO.
3267   SetHashtablezEnabled(false);
3268 
3269   t.reserve(100);
3270   t.insert(0);
3271   EXPECT_EQ(*t.begin(), 0);
3272 
3273   t.rehash(0);  // Rehash back down to SOO table.
3274 
3275   EXPECT_EQ(t.capacity(), SooCapacity());
3276   EXPECT_EQ(t.size(), 1);
3277   EXPECT_EQ(*t.begin(), 0);
3278   EXPECT_EQ(t.find(0), t.begin());
3279   EXPECT_EQ(t.find(1), t.end());
3280 }
3281 
TEST(Table,ReserveToNonSoo)3282 TEST(Table, ReserveToNonSoo) {
3283   for (int reserve_capacity : {8, 100000}) {
3284     SooIntTable t;
3285     t.insert(0);
3286 
3287     t.reserve(reserve_capacity);
3288 
3289     EXPECT_EQ(t.find(0), t.begin());
3290     EXPECT_EQ(t.size(), 1);
3291     EXPECT_EQ(*t.begin(), 0);
3292     EXPECT_EQ(t.find(1), t.end());
3293   }
3294 }
3295 
3296 REGISTER_TYPED_TEST_SUITE_P(
3297     SooTest, Empty, Clear, ClearBug, Contains1, Contains2, ContainsEmpty,
3298     CopyConstruct, CopyDifferentCapacities, CopyDifferentSizes,
3299     EnsureNonQuadraticAsInRust, Erase, EraseBeginEnd, EraseInSmallTables,
3300     EraseMaintainsValidIterator, FindFullDeletedRegression, HintInsert, Insert1,
3301     Insert2, InsertEraseStressTest, InsertWithinCapacity,
3302     IterationOrderChangesByInstance, IterationOrderChangesOnRehash,
3303     IteratorInvalidAssertsEqualityOperator,
3304     IteratorInvalidAssertsEqualityOperatorRehash, LargeTable, LookupEmpty,
3305     NumDeletedRegression, Rehash, RehashDoesNotRehashWhenNotNecessary,
3306     RehashZeroForcesRehash, ReplacingDeletedSlotDoesNotRehash,
3307     ReservedGrowthUpdatesWhenTableDoesntGrow, Swap, UnstablePointers);
3308 using SooTableTypes = ::testing::Types<SooIntTable, NonSooIntTable>;
3309 INSTANTIATE_TYPED_TEST_SUITE_P(My, SooTest, SooTableTypes);
3310 
3311 }  // namespace
3312 }  // namespace container_internal
3313 ABSL_NAMESPACE_END
3314 }  // namespace absl
3315