1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "absl/container/internal/raw_hash_set.h"
16
17 #include <algorithm>
18 #include <atomic>
19 #include <cmath>
20 #include <cstddef>
21 #include <cstdint>
22 #include <deque>
23 #include <functional>
24 #include <iostream>
25 #include <iterator>
26 #include <list>
27 #include <map>
28 #include <memory>
29 #include <numeric>
30 #include <ostream>
31 #include <random>
32 #include <string>
33 #include <tuple>
34 #include <type_traits>
35 #include <unordered_map>
36 #include <unordered_set>
37 #include <utility>
38 #include <vector>
39
40 #include "gmock/gmock.h"
41 #include "gtest/gtest.h"
42 #include "absl/base/attributes.h"
43 #include "absl/base/config.h"
44 #include "absl/base/internal/cycleclock.h"
45 #include "absl/base/prefetch.h"
46 #include "absl/container/flat_hash_map.h"
47 #include "absl/container/flat_hash_set.h"
48 #include "absl/container/internal/container_memory.h"
49 #include "absl/container/internal/hash_function_defaults.h"
50 #include "absl/container/internal/hash_policy_testing.h"
51 #include "absl/container/internal/hashtable_debug.h"
52 #include "absl/container/internal/hashtablez_sampler.h"
53 #include "absl/container/internal/test_allocator.h"
54 #include "absl/hash/hash.h"
55 #include "absl/log/log.h"
56 #include "absl/memory/memory.h"
57 #include "absl/meta/type_traits.h"
58 #include "absl/strings/string_view.h"
59
60 namespace absl {
61 ABSL_NAMESPACE_BEGIN
62 namespace container_internal {
63
64 struct RawHashSetTestOnlyAccess {
65 template <typename C>
GetSlotsabsl::container_internal::RawHashSetTestOnlyAccess66 static auto GetSlots(const C& c) -> decltype(c.slot_array()) {
67 return c.slot_array();
68 }
69 template <typename C>
CountTombstonesabsl::container_internal::RawHashSetTestOnlyAccess70 static size_t CountTombstones(const C& c) {
71 return c.common().TombstonesCount();
72 }
73 };
74
75 namespace {
76
77 using ::testing::ElementsAre;
78 using ::testing::Eq;
79 using ::testing::Ge;
80 using ::testing::Lt;
81 using ::testing::Pair;
82 using ::testing::UnorderedElementsAre;
83
84 // Convenience function to static cast to ctrl_t.
CtrlT(int i)85 ctrl_t CtrlT(int i) { return static_cast<ctrl_t>(i); }
86
TEST(Util,NormalizeCapacity)87 TEST(Util, NormalizeCapacity) {
88 EXPECT_EQ(1, NormalizeCapacity(0));
89 EXPECT_EQ(1, NormalizeCapacity(1));
90 EXPECT_EQ(3, NormalizeCapacity(2));
91 EXPECT_EQ(3, NormalizeCapacity(3));
92 EXPECT_EQ(7, NormalizeCapacity(4));
93 EXPECT_EQ(7, NormalizeCapacity(7));
94 EXPECT_EQ(15, NormalizeCapacity(8));
95 EXPECT_EQ(15, NormalizeCapacity(15));
96 EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1));
97 EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2));
98 }
99
TEST(Util,GrowthAndCapacity)100 TEST(Util, GrowthAndCapacity) {
101 // Verify that GrowthToCapacity gives the minimum capacity that has enough
102 // growth.
103 for (size_t growth = 0; growth < 10000; ++growth) {
104 SCOPED_TRACE(growth);
105 size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
106 // The capacity is large enough for `growth`.
107 EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
108 // For (capacity+1) < kWidth, growth should equal capacity.
109 if (capacity + 1 < Group::kWidth) {
110 EXPECT_THAT(CapacityToGrowth(capacity), Eq(capacity));
111 } else {
112 EXPECT_THAT(CapacityToGrowth(capacity), Lt(capacity));
113 }
114 if (growth != 0 && capacity > 1) {
115 // There is no smaller capacity that works.
116 EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
117 }
118 }
119
120 for (size_t capacity = Group::kWidth - 1; capacity < 10000;
121 capacity = 2 * capacity + 1) {
122 SCOPED_TRACE(capacity);
123 size_t growth = CapacityToGrowth(capacity);
124 EXPECT_THAT(growth, Lt(capacity));
125 EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity);
126 EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity);
127 }
128 }
129
TEST(Util,probe_seq)130 TEST(Util, probe_seq) {
131 probe_seq<16> seq(0, 127);
132 auto gen = [&]() {
133 size_t res = seq.offset();
134 seq.next();
135 return res;
136 };
137 std::vector<size_t> offsets(8);
138 std::generate_n(offsets.begin(), 8, gen);
139 EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
140 seq = probe_seq<16>(128, 127);
141 std::generate_n(offsets.begin(), 8, gen);
142 EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
143 }
144
TEST(BitMask,Smoke)145 TEST(BitMask, Smoke) {
146 EXPECT_FALSE((BitMask<uint8_t, 8>(0)));
147 EXPECT_TRUE((BitMask<uint8_t, 8>(5)));
148
149 EXPECT_THAT((BitMask<uint8_t, 8>(0)), ElementsAre());
150 EXPECT_THAT((BitMask<uint8_t, 8>(0x1)), ElementsAre(0));
151 EXPECT_THAT((BitMask<uint8_t, 8>(0x2)), ElementsAre(1));
152 EXPECT_THAT((BitMask<uint8_t, 8>(0x3)), ElementsAre(0, 1));
153 EXPECT_THAT((BitMask<uint8_t, 8>(0x4)), ElementsAre(2));
154 EXPECT_THAT((BitMask<uint8_t, 8>(0x5)), ElementsAre(0, 2));
155 EXPECT_THAT((BitMask<uint8_t, 8>(0x55)), ElementsAre(0, 2, 4, 6));
156 EXPECT_THAT((BitMask<uint8_t, 8>(0xAA)), ElementsAre(1, 3, 5, 7));
157 }
158
TEST(BitMask,WithShift)159 TEST(BitMask, WithShift) {
160 // See the non-SSE version of Group for details on what this math is for.
161 uint64_t ctrl = 0x1716151413121110;
162 uint64_t hash = 0x12;
163 constexpr uint64_t msbs = 0x8080808080808080ULL;
164 constexpr uint64_t lsbs = 0x0101010101010101ULL;
165 auto x = ctrl ^ (lsbs * hash);
166 uint64_t mask = (x - lsbs) & ~x & msbs;
167 EXPECT_EQ(0x0000000080800000, mask);
168
169 BitMask<uint64_t, 8, 3> b(mask);
170 EXPECT_EQ(*b, 2);
171 }
172
TEST(BitMask,LeadingTrailing)173 TEST(BitMask, LeadingTrailing) {
174 EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).LeadingZeros()), 3);
175 EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).TrailingZeros()), 6);
176
177 EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).LeadingZeros()), 15);
178 EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).TrailingZeros()), 0);
179
180 EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).LeadingZeros()), 0);
181 EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).TrailingZeros()), 15);
182
183 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3);
184 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1);
185
186 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).LeadingZeros()), 7);
187 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).TrailingZeros()), 0);
188
189 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).LeadingZeros()), 0);
190 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).TrailingZeros()), 7);
191 }
192
TEST(Group,EmptyGroup)193 TEST(Group, EmptyGroup) {
194 for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h));
195 }
196
TEST(Group,Match)197 TEST(Group, Match) {
198 if (Group::kWidth == 16) {
199 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
200 ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
201 CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
202 CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
203 EXPECT_THAT(Group{group}.Match(0), ElementsAre());
204 EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
205 EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10));
206 EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9));
207 EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8));
208 } else if (Group::kWidth == 8) {
209 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
210 ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
211 ctrl_t::kSentinel, CtrlT(1)};
212 EXPECT_THAT(Group{group}.Match(0), ElementsAre());
213 EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7));
214 EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4));
215 } else {
216 FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
217 }
218 }
219
TEST(Group,MaskEmpty)220 TEST(Group, MaskEmpty) {
221 if (Group::kWidth == 16) {
222 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
223 ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
224 CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
225 CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
226 EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
227 EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 4);
228 } else if (Group::kWidth == 8) {
229 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
230 ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
231 ctrl_t::kSentinel, CtrlT(1)};
232 EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
233 EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 0);
234 } else {
235 FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
236 }
237 }
238
TEST(Group,MaskFull)239 TEST(Group, MaskFull) {
240 if (Group::kWidth == 16) {
241 ctrl_t group[] = {
242 ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
243 ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
244 CtrlT(7), CtrlT(5), ctrl_t::kDeleted, CtrlT(1),
245 CtrlT(1), ctrl_t::kSentinel, ctrl_t::kEmpty, CtrlT(1)};
246 EXPECT_THAT(Group{group}.MaskFull(),
247 ElementsAre(1, 3, 5, 7, 8, 9, 11, 12, 15));
248 } else if (Group::kWidth == 8) {
249 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty,
250 ctrl_t::kDeleted, CtrlT(2), ctrl_t::kSentinel,
251 ctrl_t::kSentinel, CtrlT(1)};
252 EXPECT_THAT(Group{group}.MaskFull(), ElementsAre(1, 4, 7));
253 } else {
254 FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
255 }
256 }
257
TEST(Group,MaskEmptyOrDeleted)258 TEST(Group, MaskEmptyOrDeleted) {
259 if (Group::kWidth == 16) {
260 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, CtrlT(3),
261 ctrl_t::kDeleted, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
262 CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
263 CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
264 EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
265 EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 4);
266 } else if (Group::kWidth == 8) {
267 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
268 ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
269 ctrl_t::kSentinel, CtrlT(1)};
270 EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
271 EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 3);
272 } else {
273 FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
274 }
275 }
276
TEST(Batch,DropDeletes)277 TEST(Batch, DropDeletes) {
278 constexpr size_t kCapacity = 63;
279 constexpr size_t kGroupWidth = container_internal::Group::kWidth;
280 std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth);
281 ctrl[kCapacity] = ctrl_t::kSentinel;
282 std::vector<ctrl_t> pattern = {
283 ctrl_t::kEmpty, CtrlT(2), ctrl_t::kDeleted, CtrlT(2),
284 ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted};
285 for (size_t i = 0; i != kCapacity; ++i) {
286 ctrl[i] = pattern[i % pattern.size()];
287 if (i < kGroupWidth - 1)
288 ctrl[i + kCapacity + 1] = pattern[i % pattern.size()];
289 }
290 ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
291 ASSERT_EQ(ctrl[kCapacity], ctrl_t::kSentinel);
292 for (size_t i = 0; i < kCapacity + kGroupWidth; ++i) {
293 ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
294 if (i == kCapacity) expected = ctrl_t::kSentinel;
295 if (expected == ctrl_t::kDeleted) expected = ctrl_t::kEmpty;
296 if (IsFull(expected)) expected = ctrl_t::kDeleted;
297 EXPECT_EQ(ctrl[i], expected)
298 << i << " " << static_cast<int>(pattern[i % pattern.size()]);
299 }
300 }
301
TEST(Group,CountLeadingEmptyOrDeleted)302 TEST(Group, CountLeadingEmptyOrDeleted) {
303 const std::vector<ctrl_t> empty_examples = {ctrl_t::kEmpty, ctrl_t::kDeleted};
304 const std::vector<ctrl_t> full_examples = {
305 CtrlT(0), CtrlT(1), CtrlT(2), CtrlT(3),
306 CtrlT(5), CtrlT(9), CtrlT(127), ctrl_t::kSentinel};
307
308 for (ctrl_t empty : empty_examples) {
309 std::vector<ctrl_t> e(Group::kWidth, empty);
310 EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted());
311 for (ctrl_t full : full_examples) {
312 for (size_t i = 0; i != Group::kWidth; ++i) {
313 std::vector<ctrl_t> f(Group::kWidth, empty);
314 f[i] = full;
315 EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted());
316 }
317 std::vector<ctrl_t> f(Group::kWidth, empty);
318 f[Group::kWidth * 2 / 3] = full;
319 f[Group::kWidth / 2] = full;
320 EXPECT_EQ(Group::kWidth / 2,
321 Group{f.data()}.CountLeadingEmptyOrDeleted());
322 }
323 }
324 }
325
326 template <class T, bool kTransferable = false>
327 struct ValuePolicy {
328 using slot_type = T;
329 using key_type = T;
330 using init_type = T;
331
332 template <class Allocator, class... Args>
constructabsl::container_internal::__anon425ad70e0111::ValuePolicy333 static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
334 absl::allocator_traits<Allocator>::construct(*alloc, slot,
335 std::forward<Args>(args)...);
336 }
337
338 template <class Allocator>
destroyabsl::container_internal::__anon425ad70e0111::ValuePolicy339 static void destroy(Allocator* alloc, slot_type* slot) {
340 absl::allocator_traits<Allocator>::destroy(*alloc, slot);
341 }
342
343 template <class Allocator>
transferabsl::container_internal::__anon425ad70e0111::ValuePolicy344 static std::integral_constant<bool, kTransferable> transfer(
345 Allocator* alloc, slot_type* new_slot, slot_type* old_slot) {
346 construct(alloc, new_slot, std::move(*old_slot));
347 destroy(alloc, old_slot);
348 return {};
349 }
350
elementabsl::container_internal::__anon425ad70e0111::ValuePolicy351 static T& element(slot_type* slot) { return *slot; }
352
353 template <class F, class... Args>
354 static decltype(absl::container_internal::DecomposeValue(
355 std::declval<F>(), std::declval<Args>()...))
applyabsl::container_internal::__anon425ad70e0111::ValuePolicy356 apply(F&& f, Args&&... args) {
357 return absl::container_internal::DecomposeValue(
358 std::forward<F>(f), std::forward<Args>(args)...);
359 }
360 };
361
362 using IntPolicy = ValuePolicy<int64_t>;
363 using Uint8Policy = ValuePolicy<uint8_t>;
364
365 using TranferableIntPolicy = ValuePolicy<int64_t, /*kTransferable=*/true>;
366
367 class StringPolicy {
368 template <class F, class K, class V,
369 class = typename std::enable_if<
370 std::is_convertible<const K&, absl::string_view>::value>::type>
371 decltype(std::declval<F>()(
372 std::declval<const absl::string_view&>(), std::piecewise_construct,
373 std::declval<std::tuple<K>>(),
apply_impl(F && f,std::pair<std::tuple<K>,V> p)374 std::declval<V>())) static apply_impl(F&& f,
375 std::pair<std::tuple<K>, V> p) {
376 const absl::string_view& key = std::get<0>(p.first);
377 return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
378 std::move(p.second));
379 }
380
381 public:
382 struct slot_type {
383 struct ctor {};
384
385 template <class... Ts>
slot_typeabsl::container_internal::__anon425ad70e0111::StringPolicy::slot_type386 explicit slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
387
388 std::pair<std::string, std::string> pair;
389 };
390
391 using key_type = std::string;
392 using init_type = std::pair<std::string, std::string>;
393
394 template <class allocator_type, class... Args>
construct(allocator_type * alloc,slot_type * slot,Args...args)395 static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
396 std::allocator_traits<allocator_type>::construct(
397 *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
398 }
399
400 template <class allocator_type>
destroy(allocator_type * alloc,slot_type * slot)401 static void destroy(allocator_type* alloc, slot_type* slot) {
402 std::allocator_traits<allocator_type>::destroy(*alloc, slot);
403 }
404
405 template <class allocator_type>
transfer(allocator_type * alloc,slot_type * new_slot,slot_type * old_slot)406 static void transfer(allocator_type* alloc, slot_type* new_slot,
407 slot_type* old_slot) {
408 construct(alloc, new_slot, std::move(old_slot->pair));
409 destroy(alloc, old_slot);
410 }
411
element(slot_type * slot)412 static std::pair<std::string, std::string>& element(slot_type* slot) {
413 return slot->pair;
414 }
415
416 template <class F, class... Args>
apply(F && f,Args &&...args)417 static auto apply(F&& f, Args&&... args)
418 -> decltype(apply_impl(std::forward<F>(f),
419 PairArgs(std::forward<Args>(args)...))) {
420 return apply_impl(std::forward<F>(f),
421 PairArgs(std::forward<Args>(args)...));
422 }
423 };
424
425 struct StringHash : absl::Hash<absl::string_view> {
426 using is_transparent = void;
427 };
428 struct StringEq : std::equal_to<absl::string_view> {
429 using is_transparent = void;
430 };
431
432 struct StringTable
433 : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
434 using Base = typename StringTable::raw_hash_set;
435 StringTable() = default;
436 using Base::Base;
437 };
438
439 template <typename T, bool kTransferable = false>
440 struct ValueTable
441 : raw_hash_set<ValuePolicy<T, kTransferable>, hash_default_hash<T>,
442 std::equal_to<T>, std::allocator<T>> {
443 using Base = typename ValueTable::raw_hash_set;
444 using Base::Base;
445 };
446
447 using IntTable = ValueTable<int64_t>;
448 using Uint8Table = ValueTable<uint8_t>;
449
450 using TransferableIntTable = ValueTable<int64_t, /*kTransferable=*/true>;
451
452 template <typename T>
453 struct CustomAlloc : std::allocator<T> {
454 CustomAlloc() = default;
455
456 template <typename U>
CustomAllocabsl::container_internal::__anon425ad70e0111::CustomAlloc457 explicit CustomAlloc(const CustomAlloc<U>& /*other*/) {}
458
459 template <class U>
460 struct rebind {
461 using other = CustomAlloc<U>;
462 };
463 };
464
465 struct CustomAllocIntTable
466 : raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
467 std::equal_to<int64_t>, CustomAlloc<int64_t>> {
468 using Base = typename CustomAllocIntTable::raw_hash_set;
469 using Base::Base;
470 };
471
472 struct MinimumAlignmentUint8Table
473 : raw_hash_set<Uint8Policy, hash_default_hash<uint8_t>,
474 std::equal_to<uint8_t>, MinimumAlignmentAlloc<uint8_t>> {
475 using Base = typename MinimumAlignmentUint8Table::raw_hash_set;
476 using Base::Base;
477 };
478
479 // Allows for freezing the allocator to expect no further allocations.
480 template <typename T>
481 struct FreezableAlloc : std::allocator<T> {
FreezableAllocabsl::container_internal::__anon425ad70e0111::FreezableAlloc482 explicit FreezableAlloc(bool* f) : frozen(f) {}
483
484 template <typename U>
FreezableAllocabsl::container_internal::__anon425ad70e0111::FreezableAlloc485 explicit FreezableAlloc(const FreezableAlloc<U>& other)
486 : frozen(other.frozen) {}
487
488 template <class U>
489 struct rebind {
490 using other = FreezableAlloc<U>;
491 };
492
allocateabsl::container_internal::__anon425ad70e0111::FreezableAlloc493 T* allocate(size_t n) {
494 EXPECT_FALSE(*frozen);
495 return std::allocator<T>::allocate(n);
496 }
497
498 bool* frozen;
499 };
500
501 struct BadFastHash {
502 template <class T>
operator ()absl::container_internal::__anon425ad70e0111::BadFastHash503 size_t operator()(const T&) const {
504 return 0;
505 }
506 };
507
508 struct BadHashFreezableIntTable
509 : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int64_t>,
510 FreezableAlloc<int64_t>> {
511 using Base = typename BadHashFreezableIntTable::raw_hash_set;
512 using Base::Base;
513 };
514
515 struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
516 std::allocator<int>> {
517 using Base = typename BadTable::raw_hash_set;
518 BadTable() = default;
519 using Base::Base;
520 };
521
TEST(Table,EmptyFunctorOptimization)522 TEST(Table, EmptyFunctorOptimization) {
523 static_assert(std::is_empty<std::equal_to<absl::string_view>>::value, "");
524 static_assert(std::is_empty<std::allocator<int>>::value, "");
525
526 struct MockTable {
527 void* ctrl;
528 void* slots;
529 size_t size;
530 size_t capacity;
531 };
532 struct StatelessHash {
533 size_t operator()(absl::string_view) const { return 0; }
534 };
535 struct StatefulHash : StatelessHash {
536 size_t dummy;
537 };
538
539 struct GenerationData {
540 size_t reserved_growth;
541 size_t reservation_size;
542 GenerationType* generation;
543 };
544
545 // Ignore unreachable-code warning. Compiler thinks one branch of each ternary
546 // conditional is unreachable.
547 #if defined(__clang__)
548 #pragma clang diagnostic push
549 #pragma clang diagnostic ignored "-Wunreachable-code"
550 #endif
551 constexpr size_t mock_size = sizeof(MockTable);
552 constexpr size_t generation_size =
553 SwisstableGenerationsEnabled() ? sizeof(GenerationData) : 0;
554 #if defined(__clang__)
555 #pragma clang diagnostic pop
556 #endif
557
558 EXPECT_EQ(
559 mock_size + generation_size,
560 sizeof(
561 raw_hash_set<StringPolicy, StatelessHash,
562 std::equal_to<absl::string_view>, std::allocator<int>>));
563
564 EXPECT_EQ(
565 mock_size + sizeof(StatefulHash) + generation_size,
566 sizeof(
567 raw_hash_set<StringPolicy, StatefulHash,
568 std::equal_to<absl::string_view>, std::allocator<int>>));
569 }
570
TEST(Table,Empty)571 TEST(Table, Empty) {
572 IntTable t;
573 EXPECT_EQ(0, t.size());
574 EXPECT_TRUE(t.empty());
575 }
576
TEST(Table,LookupEmpty)577 TEST(Table, LookupEmpty) {
578 IntTable t;
579 auto it = t.find(0);
580 EXPECT_TRUE(it == t.end());
581 }
582
TEST(Table,Insert1)583 TEST(Table, Insert1) {
584 IntTable t;
585 EXPECT_TRUE(t.find(0) == t.end());
586 auto res = t.emplace(0);
587 EXPECT_TRUE(res.second);
588 EXPECT_THAT(*res.first, 0);
589 EXPECT_EQ(1, t.size());
590 EXPECT_THAT(*t.find(0), 0);
591 }
592
TEST(Table,Insert2)593 TEST(Table, Insert2) {
594 IntTable t;
595 EXPECT_TRUE(t.find(0) == t.end());
596 auto res = t.emplace(0);
597 EXPECT_TRUE(res.second);
598 EXPECT_THAT(*res.first, 0);
599 EXPECT_EQ(1, t.size());
600 EXPECT_TRUE(t.find(1) == t.end());
601 res = t.emplace(1);
602 EXPECT_TRUE(res.second);
603 EXPECT_THAT(*res.first, 1);
604 EXPECT_EQ(2, t.size());
605 EXPECT_THAT(*t.find(0), 0);
606 EXPECT_THAT(*t.find(1), 1);
607 }
608
TEST(Table,InsertCollision)609 TEST(Table, InsertCollision) {
610 BadTable t;
611 EXPECT_TRUE(t.find(1) == t.end());
612 auto res = t.emplace(1);
613 EXPECT_TRUE(res.second);
614 EXPECT_THAT(*res.first, 1);
615 EXPECT_EQ(1, t.size());
616
617 EXPECT_TRUE(t.find(2) == t.end());
618 res = t.emplace(2);
619 EXPECT_THAT(*res.first, 2);
620 EXPECT_TRUE(res.second);
621 EXPECT_EQ(2, t.size());
622
623 EXPECT_THAT(*t.find(1), 1);
624 EXPECT_THAT(*t.find(2), 2);
625 }
626
627 // Test that we do not add existent element in case we need to search through
628 // many groups with deleted elements
TEST(Table,InsertCollisionAndFindAfterDelete)629 TEST(Table, InsertCollisionAndFindAfterDelete) {
630 BadTable t; // all elements go to the same group.
631 // Have at least 2 groups with Group::kWidth collisions
632 // plus some extra collisions in the last group.
633 constexpr size_t kNumInserts = Group::kWidth * 2 + 5;
634 for (size_t i = 0; i < kNumInserts; ++i) {
635 auto res = t.emplace(i);
636 EXPECT_TRUE(res.second);
637 EXPECT_THAT(*res.first, i);
638 EXPECT_EQ(i + 1, t.size());
639 }
640
641 // Remove elements one by one and check
642 // that we still can find all other elements.
643 for (size_t i = 0; i < kNumInserts; ++i) {
644 EXPECT_EQ(1, t.erase(i)) << i;
645 for (size_t j = i + 1; j < kNumInserts; ++j) {
646 EXPECT_THAT(*t.find(j), j);
647 auto res = t.emplace(j);
648 EXPECT_FALSE(res.second) << i << " " << j;
649 EXPECT_THAT(*res.first, j);
650 EXPECT_EQ(kNumInserts - i - 1, t.size());
651 }
652 }
653 EXPECT_TRUE(t.empty());
654 }
655
TEST(Table,EraseInSmallTables)656 TEST(Table, EraseInSmallTables) {
657 for (int64_t size = 0; size < 64; ++size) {
658 IntTable t;
659 for (int64_t i = 0; i < size; ++i) {
660 t.insert(i);
661 }
662 for (int64_t i = 0; i < size; ++i) {
663 t.erase(i);
664 EXPECT_EQ(t.size(), size - i - 1);
665 for (int64_t j = i + 1; j < size; ++j) {
666 EXPECT_THAT(*t.find(j), j);
667 }
668 }
669 EXPECT_TRUE(t.empty());
670 }
671 }
672
TEST(Table,InsertWithinCapacity)673 TEST(Table, InsertWithinCapacity) {
674 IntTable t;
675 t.reserve(10);
676 const size_t original_capacity = t.capacity();
677 const auto addr = [&](int i) {
678 return reinterpret_cast<uintptr_t>(&*t.find(i));
679 };
680 // Inserting an element does not change capacity.
681 t.insert(0);
682 EXPECT_THAT(t.capacity(), original_capacity);
683 const uintptr_t original_addr_0 = addr(0);
684 // Inserting another element does not rehash.
685 t.insert(1);
686 EXPECT_THAT(t.capacity(), original_capacity);
687 EXPECT_THAT(addr(0), original_addr_0);
688 // Inserting lots of duplicate elements does not rehash.
689 for (int i = 0; i < 100; ++i) {
690 t.insert(i % 10);
691 }
692 EXPECT_THAT(t.capacity(), original_capacity);
693 EXPECT_THAT(addr(0), original_addr_0);
694 // Inserting a range of duplicate elements does not rehash.
695 std::vector<int> dup_range;
696 for (int i = 0; i < 100; ++i) {
697 dup_range.push_back(i % 10);
698 }
699 t.insert(dup_range.begin(), dup_range.end());
700 EXPECT_THAT(t.capacity(), original_capacity);
701 EXPECT_THAT(addr(0), original_addr_0);
702 }
703
704 template <class TableType>
705 class SmallTableResizeTest : public testing::Test {};
706
707 TYPED_TEST_SUITE_P(SmallTableResizeTest);
708
TYPED_TEST_P(SmallTableResizeTest,InsertIntoSmallTable)709 TYPED_TEST_P(SmallTableResizeTest, InsertIntoSmallTable) {
710 TypeParam t;
711 for (int i = 0; i < 32; ++i) {
712 t.insert(i);
713 ASSERT_EQ(t.size(), i + 1);
714 for (int j = 0; j < i + 1; ++j) {
715 EXPECT_TRUE(t.find(j) != t.end());
716 EXPECT_EQ(*t.find(j), j);
717 }
718 }
719 }
720
TYPED_TEST_P(SmallTableResizeTest,ResizeGrowSmallTables)721 TYPED_TEST_P(SmallTableResizeTest, ResizeGrowSmallTables) {
722 TypeParam t;
723 for (size_t source_size = 0; source_size < 32; ++source_size) {
724 for (size_t target_size = source_size; target_size < 32; ++target_size) {
725 for (bool rehash : {false, true}) {
726 for (size_t i = 0; i < source_size; ++i) {
727 t.insert(static_cast<int>(i));
728 }
729 if (rehash) {
730 t.rehash(target_size);
731 } else {
732 t.reserve(target_size);
733 }
734 for (size_t i = 0; i < source_size; ++i) {
735 EXPECT_TRUE(t.find(static_cast<int>(i)) != t.end());
736 EXPECT_EQ(*t.find(static_cast<int>(i)), static_cast<int>(i));
737 }
738 }
739 }
740 }
741 }
742
TYPED_TEST_P(SmallTableResizeTest,ResizeReduceSmallTables)743 TYPED_TEST_P(SmallTableResizeTest, ResizeReduceSmallTables) {
744 TypeParam t;
745 for (size_t source_size = 0; source_size < 32; ++source_size) {
746 for (size_t target_size = 0; target_size <= source_size; ++target_size) {
747 size_t inserted_count = std::min<size_t>(source_size, 5);
748 for (size_t i = 0; i < inserted_count; ++i) {
749 t.insert(static_cast<int>(i));
750 }
751 t.rehash(target_size);
752 for (size_t i = 0; i < inserted_count; ++i) {
753 EXPECT_TRUE(t.find(static_cast<int>(i)) != t.end());
754 EXPECT_EQ(*t.find(static_cast<int>(i)), static_cast<int>(i));
755 }
756 }
757 }
758 }
759
760 REGISTER_TYPED_TEST_SUITE_P(SmallTableResizeTest, InsertIntoSmallTable,
761 ResizeGrowSmallTables, ResizeReduceSmallTables);
762 using SmallTableTypes = ::testing::Types<IntTable, TransferableIntTable>;
763 INSTANTIATE_TYPED_TEST_SUITE_P(InstanceSmallTableResizeTest,
764 SmallTableResizeTest, SmallTableTypes);
765
TEST(Table,LazyEmplace)766 TEST(Table, LazyEmplace) {
767 StringTable t;
768 bool called = false;
769 auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
770 called = true;
771 f("abc", "ABC");
772 });
773 EXPECT_TRUE(called);
774 EXPECT_THAT(*it, Pair("abc", "ABC"));
775 called = false;
776 it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
777 called = true;
778 f("abc", "DEF");
779 });
780 EXPECT_FALSE(called);
781 EXPECT_THAT(*it, Pair("abc", "ABC"));
782 }
783
TEST(Table,ContainsEmpty)784 TEST(Table, ContainsEmpty) {
785 IntTable t;
786
787 EXPECT_FALSE(t.contains(0));
788 }
789
TEST(Table,Contains1)790 TEST(Table, Contains1) {
791 IntTable t;
792
793 EXPECT_TRUE(t.insert(0).second);
794 EXPECT_TRUE(t.contains(0));
795 EXPECT_FALSE(t.contains(1));
796
797 EXPECT_EQ(1, t.erase(0));
798 EXPECT_FALSE(t.contains(0));
799 }
800
TEST(Table,Contains2)801 TEST(Table, Contains2) {
802 IntTable t;
803
804 EXPECT_TRUE(t.insert(0).second);
805 EXPECT_TRUE(t.contains(0));
806 EXPECT_FALSE(t.contains(1));
807
808 t.clear();
809 EXPECT_FALSE(t.contains(0));
810 }
811
812 int decompose_constructed;
813 int decompose_copy_constructed;
814 int decompose_copy_assigned;
815 int decompose_move_constructed;
816 int decompose_move_assigned;
817 struct DecomposeType {
DecomposeTypeabsl::container_internal::__anon425ad70e0111::DecomposeType818 DecomposeType(int i = 0) : i(i) { // NOLINT
819 ++decompose_constructed;
820 }
821
DecomposeTypeabsl::container_internal::__anon425ad70e0111::DecomposeType822 explicit DecomposeType(const char* d) : DecomposeType(*d) {}
823
DecomposeTypeabsl::container_internal::__anon425ad70e0111::DecomposeType824 DecomposeType(const DecomposeType& other) : i(other.i) {
825 ++decompose_copy_constructed;
826 }
operator =absl::container_internal::__anon425ad70e0111::DecomposeType827 DecomposeType& operator=(const DecomposeType& other) {
828 ++decompose_copy_assigned;
829 i = other.i;
830 return *this;
831 }
DecomposeTypeabsl::container_internal::__anon425ad70e0111::DecomposeType832 DecomposeType(DecomposeType&& other) : i(other.i) {
833 ++decompose_move_constructed;
834 }
operator =absl::container_internal::__anon425ad70e0111::DecomposeType835 DecomposeType& operator=(DecomposeType&& other) {
836 ++decompose_move_assigned;
837 i = other.i;
838 return *this;
839 }
840
841 int i;
842 };
843
844 struct DecomposeHash {
845 using is_transparent = void;
operator ()absl::container_internal::__anon425ad70e0111::DecomposeHash846 size_t operator()(const DecomposeType& a) const { return a.i; }
operator ()absl::container_internal::__anon425ad70e0111::DecomposeHash847 size_t operator()(int a) const { return a; }
operator ()absl::container_internal::__anon425ad70e0111::DecomposeHash848 size_t operator()(const char* a) const { return *a; }
849 };
850
851 struct DecomposeEq {
852 using is_transparent = void;
operator ()absl::container_internal::__anon425ad70e0111::DecomposeEq853 bool operator()(const DecomposeType& a, const DecomposeType& b) const {
854 return a.i == b.i;
855 }
operator ()absl::container_internal::__anon425ad70e0111::DecomposeEq856 bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
operator ()absl::container_internal::__anon425ad70e0111::DecomposeEq857 bool operator()(const DecomposeType& a, const char* b) const {
858 return a.i == *b;
859 }
860 };
861
862 struct DecomposePolicy {
863 using slot_type = DecomposeType;
864 using key_type = DecomposeType;
865 using init_type = DecomposeType;
866
867 template <typename T>
constructabsl::container_internal::__anon425ad70e0111::DecomposePolicy868 static void construct(void*, DecomposeType* slot, T&& v) {
869 ::new (slot) DecomposeType(std::forward<T>(v));
870 }
destroyabsl::container_internal::__anon425ad70e0111::DecomposePolicy871 static void destroy(void*, DecomposeType* slot) { slot->~DecomposeType(); }
elementabsl::container_internal::__anon425ad70e0111::DecomposePolicy872 static DecomposeType& element(slot_type* slot) { return *slot; }
873
874 template <class F, class T>
applyabsl::container_internal::__anon425ad70e0111::DecomposePolicy875 static auto apply(F&& f, const T& x) -> decltype(std::forward<F>(f)(x, x)) {
876 return std::forward<F>(f)(x, x);
877 }
878 };
879
880 template <typename Hash, typename Eq>
TestDecompose(bool construct_three)881 void TestDecompose(bool construct_three) {
882 DecomposeType elem{0};
883 const int one = 1;
884 const char* three_p = "3";
885 const auto& three = three_p;
886 const int elem_vector_count = 256;
887 std::vector<DecomposeType> elem_vector(elem_vector_count, DecomposeType{0});
888 std::iota(elem_vector.begin(), elem_vector.end(), 0);
889
890 using DecomposeSet =
891 raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>>;
892 DecomposeSet set1;
893
894 decompose_constructed = 0;
895 int expected_constructed = 0;
896 EXPECT_EQ(expected_constructed, decompose_constructed);
897 set1.insert(elem);
898 EXPECT_EQ(expected_constructed, decompose_constructed);
899 set1.insert(1);
900 EXPECT_EQ(++expected_constructed, decompose_constructed);
901 set1.emplace("3");
902 EXPECT_EQ(++expected_constructed, decompose_constructed);
903 EXPECT_EQ(expected_constructed, decompose_constructed);
904
905 { // insert(T&&)
906 set1.insert(1);
907 EXPECT_EQ(expected_constructed, decompose_constructed);
908 }
909
910 { // insert(const T&)
911 set1.insert(one);
912 EXPECT_EQ(expected_constructed, decompose_constructed);
913 }
914
915 { // insert(hint, T&&)
916 set1.insert(set1.begin(), 1);
917 EXPECT_EQ(expected_constructed, decompose_constructed);
918 }
919
920 { // insert(hint, const T&)
921 set1.insert(set1.begin(), one);
922 EXPECT_EQ(expected_constructed, decompose_constructed);
923 }
924
925 { // emplace(...)
926 set1.emplace(1);
927 EXPECT_EQ(expected_constructed, decompose_constructed);
928 set1.emplace("3");
929 expected_constructed += construct_three;
930 EXPECT_EQ(expected_constructed, decompose_constructed);
931 set1.emplace(one);
932 EXPECT_EQ(expected_constructed, decompose_constructed);
933 set1.emplace(three);
934 expected_constructed += construct_three;
935 EXPECT_EQ(expected_constructed, decompose_constructed);
936 }
937
938 { // emplace_hint(...)
939 set1.emplace_hint(set1.begin(), 1);
940 EXPECT_EQ(expected_constructed, decompose_constructed);
941 set1.emplace_hint(set1.begin(), "3");
942 expected_constructed += construct_three;
943 EXPECT_EQ(expected_constructed, decompose_constructed);
944 set1.emplace_hint(set1.begin(), one);
945 EXPECT_EQ(expected_constructed, decompose_constructed);
946 set1.emplace_hint(set1.begin(), three);
947 expected_constructed += construct_three;
948 EXPECT_EQ(expected_constructed, decompose_constructed);
949 }
950
951 decompose_copy_constructed = 0;
952 decompose_copy_assigned = 0;
953 decompose_move_constructed = 0;
954 decompose_move_assigned = 0;
955 int expected_copy_constructed = 0;
956 int expected_move_constructed = 0;
957 { // raw_hash_set(first, last) with random-access iterators
958 DecomposeSet set2(elem_vector.begin(), elem_vector.end());
959 // Expect exactly one copy-constructor call for each element if no
960 // rehashing is done.
961 expected_copy_constructed += elem_vector_count;
962 EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
963 EXPECT_EQ(expected_move_constructed, decompose_move_constructed);
964 EXPECT_EQ(0, decompose_move_assigned);
965 EXPECT_EQ(0, decompose_copy_assigned);
966 }
967
968 { // raw_hash_set(first, last) with forward iterators
969 std::list<DecomposeType> elem_list(elem_vector.begin(), elem_vector.end());
970 expected_copy_constructed = decompose_copy_constructed;
971 DecomposeSet set2(elem_list.begin(), elem_list.end());
972 // Expect exactly N elements copied into set, expect at most 2*N elements
973 // moving internally for all resizing needed (for a growth factor of 2).
974 expected_copy_constructed += elem_vector_count;
975 EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
976 expected_move_constructed += elem_vector_count;
977 EXPECT_LT(expected_move_constructed, decompose_move_constructed);
978 expected_move_constructed += elem_vector_count;
979 EXPECT_GE(expected_move_constructed, decompose_move_constructed);
980 EXPECT_EQ(0, decompose_move_assigned);
981 EXPECT_EQ(0, decompose_copy_assigned);
982 expected_copy_constructed = decompose_copy_constructed;
983 expected_move_constructed = decompose_move_constructed;
984 }
985
986 { // insert(first, last)
987 DecomposeSet set2;
988 set2.insert(elem_vector.begin(), elem_vector.end());
989 // Expect exactly N elements copied into set, expect at most 2*N elements
990 // moving internally for all resizing needed (for a growth factor of 2).
991 const int expected_new_elements = elem_vector_count;
992 const int expected_max_element_moves = 2 * elem_vector_count;
993 expected_copy_constructed += expected_new_elements;
994 EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
995 expected_move_constructed += expected_max_element_moves;
996 EXPECT_GE(expected_move_constructed, decompose_move_constructed);
997 EXPECT_EQ(0, decompose_move_assigned);
998 EXPECT_EQ(0, decompose_copy_assigned);
999 expected_copy_constructed = decompose_copy_constructed;
1000 expected_move_constructed = decompose_move_constructed;
1001 }
1002 }
1003
TEST(Table,Decompose)1004 TEST(Table, Decompose) {
1005 if (SwisstableGenerationsEnabled()) {
1006 GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
1007 }
1008
1009 TestDecompose<DecomposeHash, DecomposeEq>(false);
1010
1011 struct TransparentHashIntOverload {
1012 size_t operator()(const DecomposeType& a) const { return a.i; }
1013 size_t operator()(int a) const { return a; }
1014 };
1015 struct TransparentEqIntOverload {
1016 bool operator()(const DecomposeType& a, const DecomposeType& b) const {
1017 return a.i == b.i;
1018 }
1019 bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
1020 };
1021 TestDecompose<TransparentHashIntOverload, DecomposeEq>(true);
1022 TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true);
1023 TestDecompose<DecomposeHash, TransparentEqIntOverload>(true);
1024 }
1025
1026 // Returns the largest m such that a table with m elements has the same number
1027 // of buckets as a table with n elements.
MaxDensitySize(size_t n)1028 size_t MaxDensitySize(size_t n) {
1029 IntTable t;
1030 t.reserve(n);
1031 for (size_t i = 0; i != n; ++i) t.emplace(i);
1032 const size_t c = t.bucket_count();
1033 while (c == t.bucket_count()) t.emplace(n++);
1034 return t.size() - 1;
1035 }
1036
1037 struct Modulo1000Hash {
operator ()absl::container_internal::__anon425ad70e0111::Modulo1000Hash1038 size_t operator()(int x) const { return x % 1000; }
1039 };
1040
1041 struct Modulo1000HashTable
1042 : public raw_hash_set<IntPolicy, Modulo1000Hash, std::equal_to<int>,
1043 std::allocator<int>> {};
1044
1045 // Test that rehash with no resize happen in case of many deleted slots.
TEST(Table,RehashWithNoResize)1046 TEST(Table, RehashWithNoResize) {
1047 if (SwisstableGenerationsEnabled()) {
1048 GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
1049 }
1050
1051 Modulo1000HashTable t;
1052 // Adding the same length (and the same hash) strings
1053 // to have at least kMinFullGroups groups
1054 // with Group::kWidth collisions. Then fill up to MaxDensitySize;
1055 const size_t kMinFullGroups = 7;
1056 std::vector<int> keys;
1057 for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) {
1058 int k = i * 1000;
1059 t.emplace(k);
1060 keys.push_back(k);
1061 }
1062 const size_t capacity = t.capacity();
1063
1064 // Remove elements from all groups except the first and the last one.
1065 // All elements removed from full groups will be marked as ctrl_t::kDeleted.
1066 const size_t erase_begin = Group::kWidth / 2;
1067 const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth;
1068 for (size_t i = erase_begin; i < erase_end; ++i) {
1069 EXPECT_EQ(1, t.erase(keys[i])) << i;
1070 }
1071 keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end);
1072
1073 auto last_key = keys.back();
1074 size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key);
1075
1076 // Make sure that we have to make a lot of probes for last key.
1077 ASSERT_GT(last_key_num_probes, kMinFullGroups);
1078
1079 int x = 1;
1080 // Insert and erase one element, before inplace rehash happen.
1081 while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) {
1082 t.emplace(x);
1083 ASSERT_EQ(capacity, t.capacity());
1084 // All elements should be there.
1085 ASSERT_TRUE(t.find(x) != t.end()) << x;
1086 for (const auto& k : keys) {
1087 ASSERT_TRUE(t.find(k) != t.end()) << k;
1088 }
1089 t.erase(x);
1090 ++x;
1091 }
1092 }
1093
TEST(Table,InsertEraseStressTest)1094 TEST(Table, InsertEraseStressTest) {
1095 IntTable t;
1096 const size_t kMinElementCount = 250;
1097 std::deque<int> keys;
1098 size_t i = 0;
1099 for (; i < MaxDensitySize(kMinElementCount); ++i) {
1100 t.emplace(i);
1101 keys.push_back(i);
1102 }
1103 const size_t kNumIterations = 1000000;
1104 for (; i < kNumIterations; ++i) {
1105 ASSERT_EQ(1, t.erase(keys.front()));
1106 keys.pop_front();
1107 t.emplace(i);
1108 keys.push_back(i);
1109 }
1110 }
1111
TEST(Table,InsertOverloads)1112 TEST(Table, InsertOverloads) {
1113 StringTable t;
1114 // These should all trigger the insert(init_type) overload.
1115 t.insert({{}, {}});
1116 t.insert({"ABC", {}});
1117 t.insert({"DEF", "!!!"});
1118
1119 EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""),
1120 Pair("DEF", "!!!")));
1121 }
1122
TEST(Table,LargeTable)1123 TEST(Table, LargeTable) {
1124 IntTable t;
1125 for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40);
1126 for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40));
1127 }
1128
1129 // Timeout if copy is quadratic as it was in Rust.
TEST(Table,EnsureNonQuadraticAsInRust)1130 TEST(Table, EnsureNonQuadraticAsInRust) {
1131 static const size_t kLargeSize = 1 << 15;
1132
1133 IntTable t;
1134 for (size_t i = 0; i != kLargeSize; ++i) {
1135 t.insert(i);
1136 }
1137
1138 // If this is quadratic, the test will timeout.
1139 IntTable t2;
1140 for (const auto& entry : t) t2.insert(entry);
1141 }
1142
TEST(Table,ClearBug)1143 TEST(Table, ClearBug) {
1144 if (SwisstableGenerationsEnabled()) {
1145 GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
1146 }
1147
1148 IntTable t;
1149 constexpr size_t capacity = container_internal::Group::kWidth - 1;
1150 constexpr size_t max_size = capacity / 2 + 1;
1151 for (size_t i = 0; i < max_size; ++i) {
1152 t.insert(i);
1153 }
1154 ASSERT_EQ(capacity, t.capacity());
1155 intptr_t original = reinterpret_cast<intptr_t>(&*t.find(2));
1156 t.clear();
1157 ASSERT_EQ(capacity, t.capacity());
1158 for (size_t i = 0; i < max_size; ++i) {
1159 t.insert(i);
1160 }
1161 ASSERT_EQ(capacity, t.capacity());
1162 intptr_t second = reinterpret_cast<intptr_t>(&*t.find(2));
1163 // We are checking that original and second are close enough to each other
1164 // that they are probably still in the same group. This is not strictly
1165 // guaranteed.
1166 EXPECT_LT(static_cast<size_t>(std::abs(original - second)),
1167 capacity * sizeof(IntTable::value_type));
1168 }
1169
TEST(Table,Erase)1170 TEST(Table, Erase) {
1171 IntTable t;
1172 EXPECT_TRUE(t.find(0) == t.end());
1173 auto res = t.emplace(0);
1174 EXPECT_TRUE(res.second);
1175 EXPECT_EQ(1, t.size());
1176 t.erase(res.first);
1177 EXPECT_EQ(0, t.size());
1178 EXPECT_TRUE(t.find(0) == t.end());
1179 }
1180
TEST(Table,EraseMaintainsValidIterator)1181 TEST(Table, EraseMaintainsValidIterator) {
1182 IntTable t;
1183 const int kNumElements = 100;
1184 for (int i = 0; i < kNumElements; i++) {
1185 EXPECT_TRUE(t.emplace(i).second);
1186 }
1187 EXPECT_EQ(t.size(), kNumElements);
1188
1189 int num_erase_calls = 0;
1190 auto it = t.begin();
1191 while (it != t.end()) {
1192 t.erase(it++);
1193 num_erase_calls++;
1194 }
1195
1196 EXPECT_TRUE(t.empty());
1197 EXPECT_EQ(num_erase_calls, kNumElements);
1198 }
1199
TEST(Table,EraseBeginEnd)1200 TEST(Table, EraseBeginEnd) {
1201 IntTable t;
1202 for (int i = 0; i < 10; ++i) t.insert(i);
1203 EXPECT_EQ(t.size(), 10);
1204 t.erase(t.begin(), t.end());
1205 EXPECT_EQ(t.size(), 0);
1206 }
1207
1208 // Collect N bad keys by following algorithm:
1209 // 1. Create an empty table and reserve it to 2 * N.
1210 // 2. Insert N random elements.
1211 // 3. Take first Group::kWidth - 1 to bad_keys array.
1212 // 4. Clear the table without resize.
1213 // 5. Go to point 2 while N keys not collected
CollectBadMergeKeys(size_t N)1214 std::vector<int64_t> CollectBadMergeKeys(size_t N) {
1215 static constexpr int kGroupSize = Group::kWidth - 1;
1216
1217 auto topk_range = [](size_t b, size_t e,
1218 IntTable* t) -> std::vector<int64_t> {
1219 for (size_t i = b; i != e; ++i) {
1220 t->emplace(i);
1221 }
1222 std::vector<int64_t> res;
1223 res.reserve(kGroupSize);
1224 auto it = t->begin();
1225 for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) {
1226 res.push_back(*it);
1227 }
1228 return res;
1229 };
1230
1231 std::vector<int64_t> bad_keys;
1232 bad_keys.reserve(N);
1233 IntTable t;
1234 t.reserve(N * 2);
1235
1236 for (size_t b = 0; bad_keys.size() < N; b += N) {
1237 auto keys = topk_range(b, b + N, &t);
1238 bad_keys.insert(bad_keys.end(), keys.begin(), keys.end());
1239 t.erase(t.begin(), t.end());
1240 EXPECT_TRUE(t.empty());
1241 }
1242 return bad_keys;
1243 }
1244
1245 struct ProbeStats {
1246 // Number of elements with specific probe length over all tested tables.
1247 std::vector<size_t> all_probes_histogram;
1248 // Ratios total_probe_length/size for every tested table.
1249 std::vector<double> single_table_ratios;
1250
1251 // Average ratio total_probe_length/size over tables.
AvgRatioabsl::container_internal::__anon425ad70e0111::ProbeStats1252 double AvgRatio() const {
1253 return std::accumulate(single_table_ratios.begin(),
1254 single_table_ratios.end(), 0.0) /
1255 single_table_ratios.size();
1256 }
1257
1258 // Maximum ratio total_probe_length/size over tables.
MaxRatioabsl::container_internal::__anon425ad70e0111::ProbeStats1259 double MaxRatio() const {
1260 return *std::max_element(single_table_ratios.begin(),
1261 single_table_ratios.end());
1262 }
1263
1264 // Percentile ratio total_probe_length/size over tables.
PercentileRatioabsl::container_internal::__anon425ad70e0111::ProbeStats1265 double PercentileRatio(double Percentile = 0.95) const {
1266 auto r = single_table_ratios;
1267 auto mid = r.begin() + static_cast<size_t>(r.size() * Percentile);
1268 if (mid != r.end()) {
1269 std::nth_element(r.begin(), mid, r.end());
1270 return *mid;
1271 } else {
1272 return MaxRatio();
1273 }
1274 }
1275
1276 // Maximum probe length over all elements and all tables.
MaxProbeabsl::container_internal::__anon425ad70e0111::ProbeStats1277 size_t MaxProbe() const { return all_probes_histogram.size(); }
1278
1279 // Fraction of elements with specified probe length.
ProbeNormalizedHistogramabsl::container_internal::__anon425ad70e0111::ProbeStats1280 std::vector<double> ProbeNormalizedHistogram() const {
1281 double total_elements = std::accumulate(all_probes_histogram.begin(),
1282 all_probes_histogram.end(), 0ull);
1283 std::vector<double> res;
1284 for (size_t p : all_probes_histogram) {
1285 res.push_back(p / total_elements);
1286 }
1287 return res;
1288 }
1289
PercentileProbeabsl::container_internal::__anon425ad70e0111::ProbeStats1290 size_t PercentileProbe(double Percentile = 0.99) const {
1291 size_t idx = 0;
1292 for (double p : ProbeNormalizedHistogram()) {
1293 if (Percentile > p) {
1294 Percentile -= p;
1295 ++idx;
1296 } else {
1297 return idx;
1298 }
1299 }
1300 return idx;
1301 }
1302
operator <<(std::ostream & out,const ProbeStats & s)1303 friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) {
1304 out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio()
1305 << ", PercentileRatio:" << s.PercentileRatio()
1306 << ", MaxProbe:" << s.MaxProbe() << ", Probes=[";
1307 for (double p : s.ProbeNormalizedHistogram()) {
1308 out << p << ",";
1309 }
1310 out << "]}";
1311
1312 return out;
1313 }
1314 };
1315
1316 struct ExpectedStats {
1317 double avg_ratio;
1318 double max_ratio;
1319 std::vector<std::pair<double, double>> pecentile_ratios;
1320 std::vector<std::pair<double, double>> pecentile_probes;
1321
operator <<(std::ostream & out,const ExpectedStats & s)1322 friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) {
1323 out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio
1324 << ", PercentileRatios: [";
1325 for (auto el : s.pecentile_ratios) {
1326 out << el.first << ":" << el.second << ", ";
1327 }
1328 out << "], PercentileProbes: [";
1329 for (auto el : s.pecentile_probes) {
1330 out << el.first << ":" << el.second << ", ";
1331 }
1332 out << "]}";
1333
1334 return out;
1335 }
1336 };
1337
VerifyStats(size_t size,const ExpectedStats & exp,const ProbeStats & stats)1338 void VerifyStats(size_t size, const ExpectedStats& exp,
1339 const ProbeStats& stats) {
1340 EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats;
1341 EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats;
1342 for (auto pr : exp.pecentile_ratios) {
1343 EXPECT_LE(stats.PercentileRatio(pr.first), pr.second)
1344 << size << " " << pr.first << " " << stats;
1345 }
1346
1347 for (auto pr : exp.pecentile_probes) {
1348 EXPECT_LE(stats.PercentileProbe(pr.first), pr.second)
1349 << size << " " << pr.first << " " << stats;
1350 }
1351 }
1352
1353 using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
1354
1355 // Collect total ProbeStats on num_iters iterations of the following algorithm:
1356 // 1. Create new table and reserve it to keys.size() * 2
1357 // 2. Insert all keys xored with seed
1358 // 3. Collect ProbeStats from final table.
CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t> & keys,size_t num_iters)1359 ProbeStats CollectProbeStatsOnKeysXoredWithSeed(
1360 const std::vector<int64_t>& keys, size_t num_iters) {
1361 const size_t reserve_size = keys.size() * 2;
1362
1363 ProbeStats stats;
1364
1365 int64_t seed = 0x71b1a19b907d6e33;
1366 while (num_iters--) {
1367 seed = static_cast<int64_t>(static_cast<uint64_t>(seed) * 17 + 13);
1368 IntTable t1;
1369 t1.reserve(reserve_size);
1370 for (const auto& key : keys) {
1371 t1.emplace(key ^ seed);
1372 }
1373
1374 auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
1375 stats.all_probes_histogram.resize(
1376 std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
1377 std::transform(probe_histogram.begin(), probe_histogram.end(),
1378 stats.all_probes_histogram.begin(),
1379 stats.all_probes_histogram.begin(), std::plus<size_t>());
1380
1381 size_t total_probe_seq_length = 0;
1382 for (size_t i = 0; i < probe_histogram.size(); ++i) {
1383 total_probe_seq_length += i * probe_histogram[i];
1384 }
1385 stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
1386 keys.size());
1387 t1.erase(t1.begin(), t1.end());
1388 }
1389 return stats;
1390 }
1391
XorSeedExpectedStats()1392 ExpectedStats XorSeedExpectedStats() {
1393 constexpr bool kRandomizesInserts =
1394 #ifdef NDEBUG
1395 false;
1396 #else // NDEBUG
1397 true;
1398 #endif // NDEBUG
1399
1400 // The effective load factor is larger in non-opt mode because we insert
1401 // elements out of order.
1402 switch (container_internal::Group::kWidth) {
1403 case 8:
1404 if (kRandomizesInserts) {
1405 return {0.05,
1406 1.0,
1407 {{0.95, 0.5}},
1408 {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
1409 } else {
1410 return {0.05,
1411 2.0,
1412 {{0.95, 0.1}},
1413 {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
1414 }
1415 case 16:
1416 if (kRandomizesInserts) {
1417 return {0.1,
1418 2.0,
1419 {{0.95, 0.1}},
1420 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1421 } else {
1422 return {0.05,
1423 1.0,
1424 {{0.95, 0.05}},
1425 {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
1426 }
1427 }
1428 LOG(FATAL) << "Unknown Group width";
1429 return {};
1430 }
1431
1432 // TODO(b/80415403): Figure out why this test is so flaky, esp. on MSVC
TEST(Table,DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength)1433 TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
1434 ProbeStatsPerSize stats;
1435 std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
1436 for (size_t size : sizes) {
1437 stats[size] =
1438 CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200);
1439 }
1440 auto expected = XorSeedExpectedStats();
1441 for (size_t size : sizes) {
1442 auto& stat = stats[size];
1443 VerifyStats(size, expected, stat);
1444 LOG(INFO) << size << " " << stat;
1445 }
1446 }
1447
1448 // Collect total ProbeStats on num_iters iterations of the following algorithm:
1449 // 1. Create new table
1450 // 2. Select 10% of keys and insert 10 elements key * 17 + j * 13
1451 // 3. Collect ProbeStats from final table
CollectProbeStatsOnLinearlyTransformedKeys(const std::vector<int64_t> & keys,size_t num_iters)1452 ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
1453 const std::vector<int64_t>& keys, size_t num_iters) {
1454 ProbeStats stats;
1455
1456 std::random_device rd;
1457 std::mt19937 rng(rd());
1458 auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; };
1459 std::uniform_int_distribution<size_t> dist(0, keys.size() - 1);
1460 while (num_iters--) {
1461 IntTable t1;
1462 size_t num_keys = keys.size() / 10;
1463 size_t start = dist(rng);
1464 for (size_t i = 0; i != num_keys; ++i) {
1465 for (size_t j = 0; j != 10; ++j) {
1466 t1.emplace(linear_transform(keys[(i + start) % keys.size()], j));
1467 }
1468 }
1469
1470 auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
1471 stats.all_probes_histogram.resize(
1472 std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
1473 std::transform(probe_histogram.begin(), probe_histogram.end(),
1474 stats.all_probes_histogram.begin(),
1475 stats.all_probes_histogram.begin(), std::plus<size_t>());
1476
1477 size_t total_probe_seq_length = 0;
1478 for (size_t i = 0; i < probe_histogram.size(); ++i) {
1479 total_probe_seq_length += i * probe_histogram[i];
1480 }
1481 stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
1482 t1.size());
1483 t1.erase(t1.begin(), t1.end());
1484 }
1485 return stats;
1486 }
1487
LinearTransformExpectedStats()1488 ExpectedStats LinearTransformExpectedStats() {
1489 constexpr bool kRandomizesInserts =
1490 #ifdef NDEBUG
1491 false;
1492 #else // NDEBUG
1493 true;
1494 #endif // NDEBUG
1495
1496 // The effective load factor is larger in non-opt mode because we insert
1497 // elements out of order.
1498 switch (container_internal::Group::kWidth) {
1499 case 8:
1500 if (kRandomizesInserts) {
1501 return {0.1,
1502 0.5,
1503 {{0.95, 0.3}},
1504 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1505 } else {
1506 return {0.4,
1507 0.6,
1508 {{0.95, 0.5}},
1509 {{0.95, 1}, {0.99, 14}, {0.999, 23}, {0.9999, 26}}};
1510 }
1511 case 16:
1512 if (kRandomizesInserts) {
1513 return {0.1,
1514 0.4,
1515 {{0.95, 0.3}},
1516 {{0.95, 1}, {0.99, 2}, {0.999, 9}, {0.9999, 15}}};
1517 } else {
1518 return {0.05,
1519 0.2,
1520 {{0.95, 0.1}},
1521 {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
1522 }
1523 }
1524 LOG(FATAL) << "Unknown Group width";
1525 return {};
1526 }
1527
1528 // TODO(b/80415403): Figure out why this test is so flaky.
TEST(Table,DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength)1529 TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
1530 ProbeStatsPerSize stats;
1531 std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
1532 for (size_t size : sizes) {
1533 stats[size] = CollectProbeStatsOnLinearlyTransformedKeys(
1534 CollectBadMergeKeys(size), 300);
1535 }
1536 auto expected = LinearTransformExpectedStats();
1537 for (size_t size : sizes) {
1538 auto& stat = stats[size];
1539 VerifyStats(size, expected, stat);
1540 LOG(INFO) << size << " " << stat;
1541 }
1542 }
1543
TEST(Table,EraseCollision)1544 TEST(Table, EraseCollision) {
1545 BadTable t;
1546
1547 // 1 2 3
1548 t.emplace(1);
1549 t.emplace(2);
1550 t.emplace(3);
1551 EXPECT_THAT(*t.find(1), 1);
1552 EXPECT_THAT(*t.find(2), 2);
1553 EXPECT_THAT(*t.find(3), 3);
1554 EXPECT_EQ(3, t.size());
1555
1556 // 1 DELETED 3
1557 t.erase(t.find(2));
1558 EXPECT_THAT(*t.find(1), 1);
1559 EXPECT_TRUE(t.find(2) == t.end());
1560 EXPECT_THAT(*t.find(3), 3);
1561 EXPECT_EQ(2, t.size());
1562
1563 // DELETED DELETED 3
1564 t.erase(t.find(1));
1565 EXPECT_TRUE(t.find(1) == t.end());
1566 EXPECT_TRUE(t.find(2) == t.end());
1567 EXPECT_THAT(*t.find(3), 3);
1568 EXPECT_EQ(1, t.size());
1569
1570 // DELETED DELETED DELETED
1571 t.erase(t.find(3));
1572 EXPECT_TRUE(t.find(1) == t.end());
1573 EXPECT_TRUE(t.find(2) == t.end());
1574 EXPECT_TRUE(t.find(3) == t.end());
1575 EXPECT_EQ(0, t.size());
1576 }
1577
TEST(Table,EraseInsertProbing)1578 TEST(Table, EraseInsertProbing) {
1579 BadTable t(100);
1580
1581 // 1 2 3 4
1582 t.emplace(1);
1583 t.emplace(2);
1584 t.emplace(3);
1585 t.emplace(4);
1586
1587 // 1 DELETED 3 DELETED
1588 t.erase(t.find(2));
1589 t.erase(t.find(4));
1590
1591 // 1 10 3 11 12
1592 t.emplace(10);
1593 t.emplace(11);
1594 t.emplace(12);
1595
1596 EXPECT_EQ(5, t.size());
1597 EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12));
1598 }
1599
TEST(Table,Clear)1600 TEST(Table, Clear) {
1601 IntTable t;
1602 EXPECT_TRUE(t.find(0) == t.end());
1603 t.clear();
1604 EXPECT_TRUE(t.find(0) == t.end());
1605 auto res = t.emplace(0);
1606 EXPECT_TRUE(res.second);
1607 EXPECT_EQ(1, t.size());
1608 t.clear();
1609 EXPECT_EQ(0, t.size());
1610 EXPECT_TRUE(t.find(0) == t.end());
1611 }
1612
TEST(Table,Swap)1613 TEST(Table, Swap) {
1614 IntTable t;
1615 EXPECT_TRUE(t.find(0) == t.end());
1616 auto res = t.emplace(0);
1617 EXPECT_TRUE(res.second);
1618 EXPECT_EQ(1, t.size());
1619 IntTable u;
1620 t.swap(u);
1621 EXPECT_EQ(0, t.size());
1622 EXPECT_EQ(1, u.size());
1623 EXPECT_TRUE(t.find(0) == t.end());
1624 EXPECT_THAT(*u.find(0), 0);
1625 }
1626
TEST(Table,Rehash)1627 TEST(Table, Rehash) {
1628 IntTable t;
1629 EXPECT_TRUE(t.find(0) == t.end());
1630 t.emplace(0);
1631 t.emplace(1);
1632 EXPECT_EQ(2, t.size());
1633 t.rehash(128);
1634 EXPECT_EQ(2, t.size());
1635 EXPECT_THAT(*t.find(0), 0);
1636 EXPECT_THAT(*t.find(1), 1);
1637 }
1638
TEST(Table,RehashDoesNotRehashWhenNotNecessary)1639 TEST(Table, RehashDoesNotRehashWhenNotNecessary) {
1640 IntTable t;
1641 t.emplace(0);
1642 t.emplace(1);
1643 auto* p = &*t.find(0);
1644 t.rehash(1);
1645 EXPECT_EQ(p, &*t.find(0));
1646 }
1647
TEST(Table,RehashZeroDoesNotAllocateOnEmptyTable)1648 TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) {
1649 IntTable t;
1650 t.rehash(0);
1651 EXPECT_EQ(0, t.bucket_count());
1652 }
1653
TEST(Table,RehashZeroDeallocatesEmptyTable)1654 TEST(Table, RehashZeroDeallocatesEmptyTable) {
1655 IntTable t;
1656 t.emplace(0);
1657 t.clear();
1658 EXPECT_NE(0, t.bucket_count());
1659 t.rehash(0);
1660 EXPECT_EQ(0, t.bucket_count());
1661 }
1662
TEST(Table,RehashZeroForcesRehash)1663 TEST(Table, RehashZeroForcesRehash) {
1664 IntTable t;
1665 t.emplace(0);
1666 t.emplace(1);
1667 auto* p = &*t.find(0);
1668 t.rehash(0);
1669 EXPECT_NE(p, &*t.find(0));
1670 }
1671
TEST(Table,ConstructFromInitList)1672 TEST(Table, ConstructFromInitList) {
1673 using P = std::pair<std::string, std::string>;
1674 struct Q {
1675 operator P() const { return {}; } // NOLINT
1676 };
1677 StringTable t = {P(), Q(), {}, {{}, {}}};
1678 }
1679
TEST(Table,CopyConstruct)1680 TEST(Table, CopyConstruct) {
1681 IntTable t;
1682 t.emplace(0);
1683 EXPECT_EQ(1, t.size());
1684 {
1685 IntTable u(t);
1686 EXPECT_EQ(1, u.size());
1687 EXPECT_THAT(*u.find(0), 0);
1688 }
1689 {
1690 IntTable u{t};
1691 EXPECT_EQ(1, u.size());
1692 EXPECT_THAT(*u.find(0), 0);
1693 }
1694 {
1695 IntTable u = t;
1696 EXPECT_EQ(1, u.size());
1697 EXPECT_THAT(*u.find(0), 0);
1698 }
1699 }
1700
TEST(Table,CopyConstructWithAlloc)1701 TEST(Table, CopyConstructWithAlloc) {
1702 StringTable t;
1703 t.emplace("a", "b");
1704 EXPECT_EQ(1, t.size());
1705 StringTable u(t, Alloc<std::pair<std::string, std::string>>());
1706 EXPECT_EQ(1, u.size());
1707 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1708 }
1709
1710 struct ExplicitAllocIntTable
1711 : raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
1712 std::equal_to<int64_t>, Alloc<int64_t>> {
1713 ExplicitAllocIntTable() = default;
1714 };
1715
TEST(Table,AllocWithExplicitCtor)1716 TEST(Table, AllocWithExplicitCtor) {
1717 ExplicitAllocIntTable t;
1718 EXPECT_EQ(0, t.size());
1719 }
1720
TEST(Table,MoveConstruct)1721 TEST(Table, MoveConstruct) {
1722 {
1723 StringTable t;
1724 t.emplace("a", "b");
1725 EXPECT_EQ(1, t.size());
1726
1727 StringTable u(std::move(t));
1728 EXPECT_EQ(1, u.size());
1729 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1730 }
1731 {
1732 StringTable t;
1733 t.emplace("a", "b");
1734 EXPECT_EQ(1, t.size());
1735
1736 StringTable u{std::move(t)};
1737 EXPECT_EQ(1, u.size());
1738 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1739 }
1740 {
1741 StringTable t;
1742 t.emplace("a", "b");
1743 EXPECT_EQ(1, t.size());
1744
1745 StringTable u = std::move(t);
1746 EXPECT_EQ(1, u.size());
1747 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1748 }
1749 }
1750
TEST(Table,MoveConstructWithAlloc)1751 TEST(Table, MoveConstructWithAlloc) {
1752 StringTable t;
1753 t.emplace("a", "b");
1754 EXPECT_EQ(1, t.size());
1755 StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>());
1756 EXPECT_EQ(1, u.size());
1757 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1758 }
1759
TEST(Table,CopyAssign)1760 TEST(Table, CopyAssign) {
1761 StringTable t;
1762 t.emplace("a", "b");
1763 EXPECT_EQ(1, t.size());
1764 StringTable u;
1765 u = t;
1766 EXPECT_EQ(1, u.size());
1767 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1768 }
1769
TEST(Table,CopySelfAssign)1770 TEST(Table, CopySelfAssign) {
1771 StringTable t;
1772 t.emplace("a", "b");
1773 EXPECT_EQ(1, t.size());
1774 t = *&t;
1775 EXPECT_EQ(1, t.size());
1776 EXPECT_THAT(*t.find("a"), Pair("a", "b"));
1777 }
1778
TEST(Table,MoveAssign)1779 TEST(Table, MoveAssign) {
1780 StringTable t;
1781 t.emplace("a", "b");
1782 EXPECT_EQ(1, t.size());
1783 StringTable u;
1784 u = std::move(t);
1785 EXPECT_EQ(1, u.size());
1786 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1787 }
1788
TEST(Table,MoveSelfAssign)1789 TEST(Table, MoveSelfAssign) {
1790 StringTable t;
1791 t.emplace("a", "b");
1792 EXPECT_EQ(1, t.size());
1793 t = std::move(*&t);
1794 // As long as we don't crash, it's fine.
1795 }
1796
TEST(Table,Equality)1797 TEST(Table, Equality) {
1798 StringTable t;
1799 std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
1800 {"aa", "bb"}};
1801 t.insert(std::begin(v), std::end(v));
1802 StringTable u = t;
1803 EXPECT_EQ(u, t);
1804 }
1805
TEST(Table,Equality2)1806 TEST(Table, Equality2) {
1807 StringTable t;
1808 std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"},
1809 {"aa", "bb"}};
1810 t.insert(std::begin(v1), std::end(v1));
1811 StringTable u;
1812 std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
1813 {"aa", "aa"}};
1814 u.insert(std::begin(v2), std::end(v2));
1815 EXPECT_NE(u, t);
1816 }
1817
TEST(Table,Equality3)1818 TEST(Table, Equality3) {
1819 StringTable t;
1820 std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"},
1821 {"bb", "bb"}};
1822 t.insert(std::begin(v1), std::end(v1));
1823 StringTable u;
1824 std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
1825 {"aa", "aa"}};
1826 u.insert(std::begin(v2), std::end(v2));
1827 EXPECT_NE(u, t);
1828 }
1829
TEST(Table,NumDeletedRegression)1830 TEST(Table, NumDeletedRegression) {
1831 IntTable t;
1832 t.emplace(0);
1833 t.erase(t.find(0));
1834 // construct over a deleted slot.
1835 t.emplace(0);
1836 t.clear();
1837 }
1838
TEST(Table,FindFullDeletedRegression)1839 TEST(Table, FindFullDeletedRegression) {
1840 IntTable t;
1841 for (int i = 0; i < 1000; ++i) {
1842 t.emplace(i);
1843 t.erase(t.find(i));
1844 }
1845 EXPECT_EQ(0, t.size());
1846 }
1847
TEST(Table,ReplacingDeletedSlotDoesNotRehash)1848 TEST(Table, ReplacingDeletedSlotDoesNotRehash) {
1849 size_t n;
1850 {
1851 // Compute n such that n is the maximum number of elements before rehash.
1852 IntTable t;
1853 t.emplace(0);
1854 size_t c = t.bucket_count();
1855 for (n = 1; c == t.bucket_count(); ++n) t.emplace(n);
1856 --n;
1857 }
1858 IntTable t;
1859 t.rehash(n);
1860 const size_t c = t.bucket_count();
1861 for (size_t i = 0; i != n; ++i) t.emplace(i);
1862 EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
1863 t.erase(0);
1864 t.emplace(0);
1865 EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
1866 }
1867
TEST(Table,NoThrowMoveConstruct)1868 TEST(Table, NoThrowMoveConstruct) {
1869 ASSERT_TRUE(
1870 std::is_nothrow_copy_constructible<absl::Hash<absl::string_view>>::value);
1871 ASSERT_TRUE(std::is_nothrow_copy_constructible<
1872 std::equal_to<absl::string_view>>::value);
1873 ASSERT_TRUE(std::is_nothrow_copy_constructible<std::allocator<int>>::value);
1874 EXPECT_TRUE(std::is_nothrow_move_constructible<StringTable>::value);
1875 }
1876
TEST(Table,NoThrowMoveAssign)1877 TEST(Table, NoThrowMoveAssign) {
1878 ASSERT_TRUE(
1879 std::is_nothrow_move_assignable<absl::Hash<absl::string_view>>::value);
1880 ASSERT_TRUE(
1881 std::is_nothrow_move_assignable<std::equal_to<absl::string_view>>::value);
1882 ASSERT_TRUE(std::is_nothrow_move_assignable<std::allocator<int>>::value);
1883 ASSERT_TRUE(
1884 absl::allocator_traits<std::allocator<int>>::is_always_equal::value);
1885 EXPECT_TRUE(std::is_nothrow_move_assignable<StringTable>::value);
1886 }
1887
TEST(Table,NoThrowSwappable)1888 TEST(Table, NoThrowSwappable) {
1889 ASSERT_TRUE(
1890 container_internal::IsNoThrowSwappable<absl::Hash<absl::string_view>>());
1891 ASSERT_TRUE(container_internal::IsNoThrowSwappable<
1892 std::equal_to<absl::string_view>>());
1893 ASSERT_TRUE(container_internal::IsNoThrowSwappable<std::allocator<int>>());
1894 EXPECT_TRUE(container_internal::IsNoThrowSwappable<StringTable>());
1895 }
1896
TEST(Table,HeterogeneousLookup)1897 TEST(Table, HeterogeneousLookup) {
1898 struct Hash {
1899 size_t operator()(int64_t i) const { return i; }
1900 size_t operator()(double i) const {
1901 ADD_FAILURE();
1902 return i;
1903 }
1904 };
1905 struct Eq {
1906 bool operator()(int64_t a, int64_t b) const { return a == b; }
1907 bool operator()(double a, int64_t b) const {
1908 ADD_FAILURE();
1909 return a == b;
1910 }
1911 bool operator()(int64_t a, double b) const {
1912 ADD_FAILURE();
1913 return a == b;
1914 }
1915 bool operator()(double a, double b) const {
1916 ADD_FAILURE();
1917 return a == b;
1918 }
1919 };
1920
1921 struct THash {
1922 using is_transparent = void;
1923 size_t operator()(int64_t i) const { return i; }
1924 size_t operator()(double i) const { return i; }
1925 };
1926 struct TEq {
1927 using is_transparent = void;
1928 bool operator()(int64_t a, int64_t b) const { return a == b; }
1929 bool operator()(double a, int64_t b) const { return a == b; }
1930 bool operator()(int64_t a, double b) const { return a == b; }
1931 bool operator()(double a, double b) const { return a == b; }
1932 };
1933
1934 raw_hash_set<IntPolicy, Hash, Eq, Alloc<int64_t>> s{0, 1, 2};
1935 // It will convert to int64_t before the query.
1936 EXPECT_EQ(1, *s.find(double{1.1}));
1937
1938 raw_hash_set<IntPolicy, THash, TEq, Alloc<int64_t>> ts{0, 1, 2};
1939 // It will try to use the double, and fail to find the object.
1940 EXPECT_TRUE(ts.find(1.1) == ts.end());
1941 }
1942
1943 template <class Table>
1944 using CallFind = decltype(std::declval<Table&>().find(17));
1945
1946 template <class Table>
1947 using CallErase = decltype(std::declval<Table&>().erase(17));
1948
1949 template <class Table>
1950 using CallExtract = decltype(std::declval<Table&>().extract(17));
1951
1952 template <class Table>
1953 using CallPrefetch = decltype(std::declval<Table&>().prefetch(17));
1954
1955 template <class Table>
1956 using CallCount = decltype(std::declval<Table&>().count(17));
1957
1958 template <template <typename> class C, class Table, class = void>
1959 struct VerifyResultOf : std::false_type {};
1960
1961 template <template <typename> class C, class Table>
1962 struct VerifyResultOf<C, Table, absl::void_t<C<Table>>> : std::true_type {};
1963
TEST(Table,HeterogeneousLookupOverloads)1964 TEST(Table, HeterogeneousLookupOverloads) {
1965 using NonTransparentTable =
1966 raw_hash_set<StringPolicy, absl::Hash<absl::string_view>,
1967 std::equal_to<absl::string_view>, std::allocator<int>>;
1968
1969 EXPECT_FALSE((VerifyResultOf<CallFind, NonTransparentTable>()));
1970 EXPECT_FALSE((VerifyResultOf<CallErase, NonTransparentTable>()));
1971 EXPECT_FALSE((VerifyResultOf<CallExtract, NonTransparentTable>()));
1972 EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>()));
1973 EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>()));
1974
1975 using TransparentTable =
1976 raw_hash_set<StringPolicy, hash_default_hash<absl::string_view>,
1977 hash_default_eq<absl::string_view>, std::allocator<int>>;
1978
1979 EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>()));
1980 EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>()));
1981 EXPECT_TRUE((VerifyResultOf<CallExtract, TransparentTable>()));
1982 EXPECT_TRUE((VerifyResultOf<CallPrefetch, TransparentTable>()));
1983 EXPECT_TRUE((VerifyResultOf<CallCount, TransparentTable>()));
1984 }
1985
TEST(Iterator,IsDefaultConstructible)1986 TEST(Iterator, IsDefaultConstructible) {
1987 StringTable::iterator i;
1988 EXPECT_TRUE(i == StringTable::iterator());
1989 }
1990
TEST(ConstIterator,IsDefaultConstructible)1991 TEST(ConstIterator, IsDefaultConstructible) {
1992 StringTable::const_iterator i;
1993 EXPECT_TRUE(i == StringTable::const_iterator());
1994 }
1995
TEST(Iterator,ConvertsToConstIterator)1996 TEST(Iterator, ConvertsToConstIterator) {
1997 StringTable::iterator i;
1998 EXPECT_TRUE(i == StringTable::const_iterator());
1999 }
2000
TEST(Iterator,Iterates)2001 TEST(Iterator, Iterates) {
2002 IntTable t;
2003 for (size_t i = 3; i != 6; ++i) EXPECT_TRUE(t.emplace(i).second);
2004 EXPECT_THAT(t, UnorderedElementsAre(3, 4, 5));
2005 }
2006
TEST(Table,Merge)2007 TEST(Table, Merge) {
2008 StringTable t1, t2;
2009 t1.emplace("0", "-0");
2010 t1.emplace("1", "-1");
2011 t2.emplace("0", "~0");
2012 t2.emplace("2", "~2");
2013
2014 EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1")));
2015 EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"), Pair("2", "~2")));
2016
2017 t1.merge(t2);
2018 EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1"),
2019 Pair("2", "~2")));
2020 EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
2021 }
2022
TEST(Table,IteratorEmplaceConstructibleRequirement)2023 TEST(Table, IteratorEmplaceConstructibleRequirement) {
2024 struct Value {
2025 explicit Value(absl::string_view view) : value(view) {}
2026 std::string value;
2027
2028 bool operator==(const Value& other) const { return value == other.value; }
2029 };
2030 struct H {
2031 size_t operator()(const Value& v) const {
2032 return absl::Hash<std::string>{}(v.value);
2033 }
2034 };
2035
2036 struct Table : raw_hash_set<ValuePolicy<Value>, H, std::equal_to<Value>,
2037 std::allocator<Value>> {
2038 using Base = typename Table::raw_hash_set;
2039 using Base::Base;
2040 };
2041
2042 std::string input[3]{"A", "B", "C"};
2043
2044 Table t(std::begin(input), std::end(input));
2045 EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"}));
2046
2047 input[0] = "D";
2048 input[1] = "E";
2049 input[2] = "F";
2050 t.insert(std::begin(input), std::end(input));
2051 EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"},
2052 Value{"D"}, Value{"E"}, Value{"F"}));
2053 }
2054
TEST(Nodes,EmptyNodeType)2055 TEST(Nodes, EmptyNodeType) {
2056 using node_type = StringTable::node_type;
2057 node_type n;
2058 EXPECT_FALSE(n);
2059 EXPECT_TRUE(n.empty());
2060
2061 EXPECT_TRUE((std::is_same<node_type::allocator_type,
2062 StringTable::allocator_type>::value));
2063 }
2064
TEST(Nodes,ExtractInsert)2065 TEST(Nodes, ExtractInsert) {
2066 constexpr char k0[] = "Very long string zero.";
2067 constexpr char k1[] = "Very long string one.";
2068 constexpr char k2[] = "Very long string two.";
2069 StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}};
2070 EXPECT_THAT(t,
2071 UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, "")));
2072
2073 auto node = t.extract(k0);
2074 EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
2075 EXPECT_TRUE(node);
2076 EXPECT_FALSE(node.empty());
2077
2078 StringTable t2;
2079 StringTable::insert_return_type res = t2.insert(std::move(node));
2080 EXPECT_TRUE(res.inserted);
2081 EXPECT_THAT(*res.position, Pair(k0, ""));
2082 EXPECT_FALSE(res.node);
2083 EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
2084
2085 // Not there.
2086 EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
2087 node = t.extract("Not there!");
2088 EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
2089 EXPECT_FALSE(node);
2090
2091 // Inserting nothing.
2092 res = t2.insert(std::move(node));
2093 EXPECT_FALSE(res.inserted);
2094 EXPECT_EQ(res.position, t2.end());
2095 EXPECT_FALSE(res.node);
2096 EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
2097
2098 t.emplace(k0, "1");
2099 node = t.extract(k0);
2100
2101 // Insert duplicate.
2102 res = t2.insert(std::move(node));
2103 EXPECT_FALSE(res.inserted);
2104 EXPECT_THAT(*res.position, Pair(k0, ""));
2105 EXPECT_TRUE(res.node);
2106 EXPECT_FALSE(node); // NOLINT(bugprone-use-after-move)
2107 }
2108
TEST(Nodes,HintInsert)2109 TEST(Nodes, HintInsert) {
2110 IntTable t = {1, 2, 3};
2111 auto node = t.extract(1);
2112 EXPECT_THAT(t, UnorderedElementsAre(2, 3));
2113 auto it = t.insert(t.begin(), std::move(node));
2114 EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
2115 EXPECT_EQ(*it, 1);
2116 EXPECT_FALSE(node); // NOLINT(bugprone-use-after-move)
2117
2118 node = t.extract(2);
2119 EXPECT_THAT(t, UnorderedElementsAre(1, 3));
2120 // reinsert 2 to make the next insert fail.
2121 t.insert(2);
2122 EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
2123 it = t.insert(t.begin(), std::move(node));
2124 EXPECT_EQ(*it, 2);
2125 // The node was not emptied by the insert call.
2126 EXPECT_TRUE(node); // NOLINT(bugprone-use-after-move)
2127 }
2128
MakeSimpleTable(size_t size)2129 IntTable MakeSimpleTable(size_t size) {
2130 IntTable t;
2131 while (t.size() < size) t.insert(t.size());
2132 return t;
2133 }
2134
OrderOfIteration(const IntTable & t)2135 std::vector<int> OrderOfIteration(const IntTable& t) {
2136 return {t.begin(), t.end()};
2137 }
2138
2139 // These IterationOrderChanges tests depend on non-deterministic behavior.
2140 // We are injecting non-determinism from the pointer of the table, but do so in
2141 // a way that only the page matters. We have to retry enough times to make sure
2142 // we are touching different memory pages to cause the ordering to change.
2143 // We also need to keep the old tables around to avoid getting the same memory
2144 // blocks over and over.
TEST(Table,IterationOrderChangesByInstance)2145 TEST(Table, IterationOrderChangesByInstance) {
2146 for (size_t size : {2, 6, 12, 20}) {
2147 const auto reference_table = MakeSimpleTable(size);
2148 const auto reference = OrderOfIteration(reference_table);
2149
2150 std::vector<IntTable> tables;
2151 bool found_difference = false;
2152 for (int i = 0; !found_difference && i < 5000; ++i) {
2153 tables.push_back(MakeSimpleTable(size));
2154 found_difference = OrderOfIteration(tables.back()) != reference;
2155 }
2156 if (!found_difference) {
2157 FAIL()
2158 << "Iteration order remained the same across many attempts with size "
2159 << size;
2160 }
2161 }
2162 }
2163
TEST(Table,IterationOrderChangesOnRehash)2164 TEST(Table, IterationOrderChangesOnRehash) {
2165 std::vector<IntTable> garbage;
2166 for (int i = 0; i < 5000; ++i) {
2167 auto t = MakeSimpleTable(20);
2168 const auto reference = OrderOfIteration(t);
2169 // Force rehash to the same size.
2170 t.rehash(0);
2171 auto trial = OrderOfIteration(t);
2172 if (trial != reference) {
2173 // We are done.
2174 return;
2175 }
2176 garbage.push_back(std::move(t));
2177 }
2178 FAIL() << "Iteration order remained the same across many attempts.";
2179 }
2180
2181 // Verify that pointers are invalidated as soon as a second element is inserted.
2182 // This prevents dependency on pointer stability on small tables.
TEST(Table,UnstablePointers)2183 TEST(Table, UnstablePointers) {
2184 IntTable table;
2185
2186 const auto addr = [&](int i) {
2187 return reinterpret_cast<uintptr_t>(&*table.find(i));
2188 };
2189
2190 table.insert(0);
2191 const uintptr_t old_ptr = addr(0);
2192
2193 // This causes a rehash.
2194 table.insert(1);
2195
2196 EXPECT_NE(old_ptr, addr(0));
2197 }
2198
TEST(TableDeathTest,InvalidIteratorAsserts)2199 TEST(TableDeathTest, InvalidIteratorAsserts) {
2200 if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
2201 GTEST_SKIP() << "Assertions not enabled.";
2202
2203 IntTable t;
2204 // Extra simple "regexp" as regexp support is highly varied across platforms.
2205 EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()),
2206 "erase.* called on end.. iterator.");
2207 typename IntTable::iterator iter;
2208 EXPECT_DEATH_IF_SUPPORTED(
2209 ++iter, "operator.* called on default-constructed iterator.");
2210 t.insert(0);
2211 iter = t.begin();
2212 t.erase(iter);
2213 const char* const kErasedDeathMessage =
2214 SwisstableGenerationsEnabled()
2215 ? "operator.* called on invalid iterator.*was likely erased"
2216 : "operator.* called on invalid iterator.*might have been "
2217 "erased.*config=asan";
2218 EXPECT_DEATH_IF_SUPPORTED(++iter, kErasedDeathMessage);
2219 }
2220
2221 // Invalid iterator use can trigger use-after-free in asan/hwasan,
2222 // use-of-uninitialized-value in msan, or invalidated iterator assertions.
2223 constexpr const char* kInvalidIteratorDeathMessage =
2224 "use-after-free|use-of-uninitialized-value|invalidated "
2225 "iterator|Invalid iterator|invalid iterator";
2226
2227 // MSVC doesn't support | in regex.
2228 #if defined(_MSC_VER)
2229 constexpr bool kMsvc = true;
2230 #else
2231 constexpr bool kMsvc = false;
2232 #endif
2233
TEST(TableDeathTest,IteratorInvalidAssertsEqualityOperator)2234 TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
2235 if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
2236 GTEST_SKIP() << "Assertions not enabled.";
2237
2238 IntTable t;
2239 t.insert(1);
2240 t.insert(2);
2241 t.insert(3);
2242 auto iter1 = t.begin();
2243 auto iter2 = std::next(iter1);
2244 ASSERT_NE(iter1, t.end());
2245 ASSERT_NE(iter2, t.end());
2246 t.erase(iter1);
2247 // Extra simple "regexp" as regexp support is highly varied across platforms.
2248 const char* const kErasedDeathMessage =
2249 SwisstableGenerationsEnabled()
2250 ? "Invalid iterator comparison.*was likely erased"
2251 : "Invalid iterator comparison.*might have been erased.*config=asan";
2252 EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
2253 EXPECT_DEATH_IF_SUPPORTED(void(iter2 != iter1), kErasedDeathMessage);
2254 t.erase(iter2);
2255 EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
2256
2257 IntTable t1, t2;
2258 t1.insert(0);
2259 t2.insert(0);
2260 iter1 = t1.begin();
2261 iter2 = t2.begin();
2262 const char* const kContainerDiffDeathMessage =
2263 SwisstableGenerationsEnabled()
2264 ? "Invalid iterator comparison.*iterators from different hashtables"
2265 : "Invalid iterator comparison.*may be from different "
2266 ".*containers.*config=asan";
2267 EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kContainerDiffDeathMessage);
2268 EXPECT_DEATH_IF_SUPPORTED(void(iter2 == iter1), kContainerDiffDeathMessage);
2269
2270 for (int i = 0; i < 10; ++i) t1.insert(i);
2271 // There should have been a rehash in t1.
2272 if (kMsvc) return; // MSVC doesn't support | in regex.
2273
2274 // NOTE(b/293887834): After rehashing, iterators will contain pointers to
2275 // freed memory, which may be detected by ThreadSanitizer.
2276 const char* const kRehashedDeathMessage =
2277 SwisstableGenerationsEnabled()
2278 ? kInvalidIteratorDeathMessage
2279 : "Invalid iterator comparison.*might have rehashed.*config=asan"
2280 "|ThreadSanitizer: heap-use-after-free";
2281 EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()), kRehashedDeathMessage);
2282 }
2283
2284 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(RawHashSamplerTest,Sample)2285 TEST(RawHashSamplerTest, Sample) {
2286 // Enable the feature even if the prod default is off.
2287 SetHashtablezEnabled(true);
2288 SetHashtablezSampleParameter(100);
2289
2290 auto& sampler = GlobalHashtablezSampler();
2291 size_t start_size = 0;
2292 absl::flat_hash_set<const HashtablezInfo*> preexisting_info;
2293 start_size += sampler.Iterate([&](const HashtablezInfo& info) {
2294 preexisting_info.insert(&info);
2295 ++start_size;
2296 });
2297
2298 std::vector<IntTable> tables;
2299 for (int i = 0; i < 1000000; ++i) {
2300 tables.emplace_back();
2301
2302 const bool do_reserve = (i % 10 > 5);
2303 const bool do_rehash = !do_reserve && (i % 10 > 0);
2304
2305 if (do_reserve) {
2306 // Don't reserve on all tables.
2307 tables.back().reserve(10 * (i % 10));
2308 }
2309
2310 tables.back().insert(1);
2311 tables.back().insert(i % 5);
2312
2313 if (do_rehash) {
2314 // Rehash some other tables.
2315 tables.back().rehash(10 * (i % 10));
2316 }
2317 }
2318 size_t end_size = 0;
2319 absl::flat_hash_map<size_t, int> observed_checksums;
2320 absl::flat_hash_map<ssize_t, int> reservations;
2321 end_size += sampler.Iterate([&](const HashtablezInfo& info) {
2322 if (preexisting_info.count(&info) == 0) {
2323 observed_checksums[info.hashes_bitwise_xor.load(
2324 std::memory_order_relaxed)]++;
2325 reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
2326 }
2327 EXPECT_EQ(info.inline_element_size, sizeof(int64_t));
2328 ++end_size;
2329 });
2330
2331 EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
2332 0.01, 0.005);
2333 EXPECT_EQ(observed_checksums.size(), 5);
2334 for (const auto& [_, count] : observed_checksums) {
2335 EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2, 0.05);
2336 }
2337
2338 EXPECT_EQ(reservations.size(), 10);
2339 for (const auto& [reservation, count] : reservations) {
2340 EXPECT_GE(reservation, 0);
2341 EXPECT_LT(reservation, 100);
2342
2343 EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.1, 0.05)
2344 << reservation;
2345 }
2346 }
2347 #endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
2348
TEST(RawHashSamplerTest,DoNotSampleCustomAllocators)2349 TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
2350 // Enable the feature even if the prod default is off.
2351 SetHashtablezEnabled(true);
2352 SetHashtablezSampleParameter(100);
2353
2354 auto& sampler = GlobalHashtablezSampler();
2355 size_t start_size = 0;
2356 start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
2357
2358 std::vector<CustomAllocIntTable> tables;
2359 for (int i = 0; i < 1000000; ++i) {
2360 tables.emplace_back();
2361 tables.back().insert(1);
2362 }
2363 size_t end_size = 0;
2364 end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
2365
2366 EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
2367 0.00, 0.001);
2368 }
2369
2370 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
2371 template <class TableType>
2372 class SanitizerTest : public testing::Test {};
2373
2374 TYPED_TEST_SUITE_P(SanitizerTest);
2375
TYPED_TEST_P(SanitizerTest,PoisoningUnused)2376 TYPED_TEST_P(SanitizerTest, PoisoningUnused) {
2377 TypeParam t;
2378 for (size_t reserve_size = 2; reserve_size < 1024;
2379 reserve_size = reserve_size * 3 / 2) {
2380 t.reserve(reserve_size);
2381 // Insert something to force an allocation.
2382 int64_t& v = *t.insert(0).first;
2383
2384 // Make sure there is something to test.
2385 ASSERT_GT(t.capacity(), 1);
2386
2387 int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
2388 for (size_t i = 0; i < t.capacity(); ++i) {
2389 EXPECT_EQ(slots + i != &v, __asan_address_is_poisoned(slots + i)) << i;
2390 }
2391 }
2392 }
2393
2394 REGISTER_TYPED_TEST_SUITE_P(SanitizerTest, PoisoningUnused);
2395 using SanitizerTableTypes = ::testing::Types<IntTable, TransferableIntTable>;
2396 INSTANTIATE_TYPED_TEST_SUITE_P(InstanceSanitizerTest, SanitizerTest,
2397 SanitizerTableTypes);
2398
TEST(Sanitizer,PoisoningOnErase)2399 TEST(Sanitizer, PoisoningOnErase) {
2400 IntTable t;
2401 int64_t& v = *t.insert(0).first;
2402
2403 EXPECT_FALSE(__asan_address_is_poisoned(&v));
2404 t.erase(0);
2405 EXPECT_TRUE(__asan_address_is_poisoned(&v));
2406 }
2407 #endif // ABSL_HAVE_ADDRESS_SANITIZER
2408
2409 template <typename T>
2410 class AlignOneTest : public ::testing::Test {};
2411 using AlignOneTestTypes =
2412 ::testing::Types<Uint8Table, MinimumAlignmentUint8Table>;
2413 TYPED_TEST_SUITE(AlignOneTest, AlignOneTestTypes);
2414
TYPED_TEST(AlignOneTest,AlignOne)2415 TYPED_TEST(AlignOneTest, AlignOne) {
2416 // We previously had a bug in which we were copying a control byte over the
2417 // first slot when alignof(value_type) is 1. We test repeated
2418 // insertions/erases and verify that the behavior is correct.
2419 TypeParam t;
2420 std::unordered_set<uint8_t> verifier; // NOLINT
2421
2422 // Do repeated insertions/erases from the table.
2423 for (int64_t i = 0; i < 100000; ++i) {
2424 SCOPED_TRACE(i);
2425 const uint8_t u = (i * -i) & 0xFF;
2426 auto it = t.find(u);
2427 auto verifier_it = verifier.find(u);
2428 if (it == t.end()) {
2429 ASSERT_EQ(verifier_it, verifier.end());
2430 t.insert(u);
2431 verifier.insert(u);
2432 } else {
2433 ASSERT_NE(verifier_it, verifier.end());
2434 t.erase(it);
2435 verifier.erase(verifier_it);
2436 }
2437 }
2438
2439 EXPECT_EQ(t.size(), verifier.size());
2440 for (uint8_t u : t) {
2441 EXPECT_EQ(verifier.count(u), 1);
2442 }
2443 }
2444
TEST(Iterator,InvalidUseCrashesWithSanitizers)2445 TEST(Iterator, InvalidUseCrashesWithSanitizers) {
2446 if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
2447 if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
2448
2449 IntTable t;
2450 // Start with 1 element so that `it` is never an end iterator.
2451 t.insert(-1);
2452 for (int i = 0; i < 10; ++i) {
2453 auto it = t.begin();
2454 t.insert(i);
2455 EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
2456 EXPECT_DEATH_IF_SUPPORTED(void(it == t.begin()),
2457 kInvalidIteratorDeathMessage);
2458 }
2459 }
2460
TEST(Iterator,InvalidUseWithReserveCrashesWithSanitizers)2461 TEST(Iterator, InvalidUseWithReserveCrashesWithSanitizers) {
2462 if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
2463 if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
2464
2465 IntTable t;
2466 t.reserve(10);
2467 t.insert(0);
2468 auto it = t.begin();
2469 // Reserved growth can't rehash.
2470 for (int i = 1; i < 10; ++i) {
2471 t.insert(i);
2472 EXPECT_EQ(*it, 0);
2473 }
2474 // ptr will become invalidated on rehash.
2475 const int64_t* ptr = &*it;
2476 (void)ptr;
2477
2478 // erase decreases size but does not decrease reserved growth so the next
2479 // insertion still invalidates iterators.
2480 t.erase(0);
2481 // The first insert after reserved growth is 0 is guaranteed to rehash when
2482 // generations are enabled.
2483 t.insert(10);
2484 EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
2485 EXPECT_DEATH_IF_SUPPORTED(void(it == t.begin()),
2486 kInvalidIteratorDeathMessage);
2487 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
2488 EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free");
2489 #endif
2490 }
2491
TEST(Iterator,InvalidUseWithMoveCrashesWithSanitizers)2492 TEST(Iterator, InvalidUseWithMoveCrashesWithSanitizers) {
2493 if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
2494 if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
2495
2496 IntTable t1, t2;
2497 t1.insert(1);
2498 auto it = t1.begin();
2499 // ptr will become invalidated on rehash.
2500 const int64_t* ptr = &*it;
2501 (void)ptr;
2502
2503 t2 = std::move(t1);
2504 EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
2505 EXPECT_DEATH_IF_SUPPORTED(void(it == t2.begin()),
2506 kInvalidIteratorDeathMessage);
2507 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
2508 EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free");
2509 #endif
2510 }
2511
TEST(Table,ReservedGrowthUpdatesWhenTableDoesntGrow)2512 TEST(Table, ReservedGrowthUpdatesWhenTableDoesntGrow) {
2513 IntTable t;
2514 for (int i = 0; i < 8; ++i) t.insert(i);
2515 // Want to insert twice without invalidating iterators so reserve.
2516 const size_t cap = t.capacity();
2517 t.reserve(t.size() + 2);
2518 // We want to be testing the case in which the reserve doesn't grow the table.
2519 ASSERT_EQ(cap, t.capacity());
2520 auto it = t.find(0);
2521 t.insert(100);
2522 t.insert(200);
2523 // `it` shouldn't have been invalidated.
2524 EXPECT_EQ(*it, 0);
2525 }
2526
TEST(Table,EraseBeginEndResetsReservedGrowth)2527 TEST(Table, EraseBeginEndResetsReservedGrowth) {
2528 bool frozen = false;
2529 BadHashFreezableIntTable t{FreezableAlloc<int64_t>(&frozen)};
2530 t.reserve(100);
2531 const size_t cap = t.capacity();
2532 frozen = true; // no further allocs allowed
2533
2534 for (int i = 0; i < 10; ++i) {
2535 // Create a long run (hash function returns constant).
2536 for (int j = 0; j < 100; ++j) t.insert(j);
2537 // Erase elements from the middle of the long run, which creates tombstones.
2538 for (int j = 30; j < 60; ++j) t.erase(j);
2539 EXPECT_EQ(t.size(), 70);
2540 EXPECT_EQ(t.capacity(), cap);
2541 ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 30);
2542
2543 t.erase(t.begin(), t.end());
2544
2545 EXPECT_EQ(t.size(), 0);
2546 EXPECT_EQ(t.capacity(), cap);
2547 ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 0);
2548 }
2549 }
2550
TEST(Table,GenerationInfoResetsOnClear)2551 TEST(Table, GenerationInfoResetsOnClear) {
2552 if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
2553 if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
2554
2555 IntTable t;
2556 for (int i = 0; i < 1000; ++i) t.insert(i);
2557 t.reserve(t.size() + 100);
2558
2559 t.clear();
2560
2561 t.insert(0);
2562 auto it = t.begin();
2563 t.insert(1);
2564 EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
2565 }
2566
TEST(Table,InvalidReferenceUseCrashesWithSanitizers)2567 TEST(Table, InvalidReferenceUseCrashesWithSanitizers) {
2568 if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
2569 #ifdef ABSL_HAVE_MEMORY_SANITIZER
2570 GTEST_SKIP() << "MSan fails to detect some of these rehashes.";
2571 #endif
2572
2573 IntTable t;
2574 t.insert(0);
2575 // Rehashing is guaranteed on every insertion while capacity is less than
2576 // RehashProbabilityConstant().
2577 int64_t i = 0;
2578 while (t.capacity() <= RehashProbabilityConstant()) {
2579 // ptr will become invalidated on rehash.
2580 const int64_t* ptr = &*t.begin();
2581 t.insert(++i);
2582 EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "use-after-free") << i;
2583 }
2584 }
2585
TEST(Iterator,InvalidComparisonDifferentTables)2586 TEST(Iterator, InvalidComparisonDifferentTables) {
2587 if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
2588
2589 IntTable t1, t2;
2590 IntTable::iterator default_constructed_iter;
2591 // We randomly use one of N empty generations for generations from empty
2592 // hashtables. In general, we won't always detect when iterators from
2593 // different empty hashtables are compared, but in this test case, we
2594 // should deterministically detect the error due to our randomness yielding
2595 // consecutive random generations.
2596 EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == t2.end()),
2597 "Invalid iterator comparison.*empty hashtables");
2598 EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == default_constructed_iter),
2599 "Invalid iterator comparison.*default-constructed");
2600 t1.insert(0);
2601 EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()),
2602 "Invalid iterator comparison.*empty hashtable");
2603 EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == default_constructed_iter),
2604 "Invalid iterator comparison.*default-constructed");
2605 t2.insert(0);
2606 EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()),
2607 "Invalid iterator comparison.*end.. iterator");
2608 EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.begin()),
2609 "Invalid iterator comparison.*non-end");
2610 }
2611
2612 template <typename Alloc>
2613 using RawHashSetAlloc = raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
2614 std::equal_to<int64_t>, Alloc>;
2615
TEST(Table,AllocatorPropagation)2616 TEST(Table, AllocatorPropagation) { TestAllocPropagation<RawHashSetAlloc>(); }
2617
2618 struct CountedHash {
operator ()absl::container_internal::__anon425ad70e0111::CountedHash2619 size_t operator()(int value) const {
2620 ++count;
2621 return static_cast<size_t>(value);
2622 }
2623 mutable int count = 0;
2624 };
2625
2626 struct CountedHashIntTable
2627 : raw_hash_set<IntPolicy, CountedHash, std::equal_to<int>,
2628 std::allocator<int>> {
2629 using Base = typename CountedHashIntTable::raw_hash_set;
2630 using Base::Base;
2631 };
2632
TEST(Table,CountedHash)2633 TEST(Table, CountedHash) {
2634 // Verify that raw_hash_set does not compute redundant hashes.
2635 #ifdef NDEBUG
2636 constexpr bool kExpectMinimumHashes = true;
2637 #else
2638 constexpr bool kExpectMinimumHashes = false;
2639 #endif
2640 if (!kExpectMinimumHashes) {
2641 GTEST_SKIP() << "Only run under NDEBUG: `assert` statements may cause "
2642 "redundant hashing.";
2643 }
2644
2645 using Table = CountedHashIntTable;
2646 auto HashCount = [](const Table& t) { return t.hash_function().count; };
2647 {
2648 Table t;
2649 EXPECT_EQ(HashCount(t), 0);
2650 }
2651 {
2652 Table t;
2653 t.insert(1);
2654 EXPECT_EQ(HashCount(t), 1);
2655 t.erase(1);
2656 EXPECT_EQ(HashCount(t), 2);
2657 }
2658 {
2659 Table t;
2660 t.insert(3);
2661 EXPECT_EQ(HashCount(t), 1);
2662 auto node = t.extract(3);
2663 EXPECT_EQ(HashCount(t), 2);
2664 t.insert(std::move(node));
2665 EXPECT_EQ(HashCount(t), 3);
2666 }
2667 {
2668 Table t;
2669 t.emplace(5);
2670 EXPECT_EQ(HashCount(t), 1);
2671 }
2672 {
2673 Table src;
2674 src.insert(7);
2675 Table dst;
2676 dst.merge(src);
2677 EXPECT_EQ(HashCount(dst), 1);
2678 }
2679 }
2680
2681 } // namespace
2682 } // namespace container_internal
2683 ABSL_NAMESPACE_END
2684 } // namespace absl
2685