• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 // An open-addressing
16 // hashtable with quadratic probing.
17 //
18 // This is a low level hashtable on top of which different interfaces can be
19 // implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
20 //
21 // The table interface is similar to that of std::unordered_set. Notable
22 // differences are that most member functions support heterogeneous keys when
23 // BOTH the hash and eq functions are marked as transparent. They do so by
24 // providing a typedef called `is_transparent`.
25 //
26 // When heterogeneous lookup is enabled, functions that take key_type act as if
27 // they have an overload set like:
28 //
29 //   iterator find(const key_type& key);
30 //   template <class K>
31 //   iterator find(const K& key);
32 //
33 //   size_type erase(const key_type& key);
34 //   template <class K>
35 //   size_type erase(const K& key);
36 //
37 //   std::pair<iterator, iterator> equal_range(const key_type& key);
38 //   template <class K>
39 //   std::pair<iterator, iterator> equal_range(const K& key);
40 //
41 // When heterogeneous lookup is disabled, only the explicit `key_type` overloads
42 // exist.
43 //
44 // find() also supports passing the hash explicitly:
45 //
46 //   iterator find(const key_type& key, size_t hash);
47 //   template <class U>
48 //   iterator find(const U& key, size_t hash);
49 //
50 // In addition the pointer to element and iterator stability guarantees are
51 // weaker: all iterators and pointers are invalidated after a new element is
52 // inserted.
53 //
54 // IMPLEMENTATION DETAILS
55 //
56 // The table stores elements inline in a slot array. In addition to the slot
57 // array the table maintains some control state per slot. The extra state is one
58 // byte per slot and stores empty or deleted marks, or alternatively 7 bits from
59 // the hash of an occupied slot. The table is split into logical groups of
60 // slots, like so:
61 //
62 //      Group 1         Group 2        Group 3
63 // +---------------+---------------+---------------+
64 // | | | | | | | | | | | | | | | | | | | | | | | | |
65 // +---------------+---------------+---------------+
66 //
67 // On lookup the hash is split into two parts:
68 // - H2: 7 bits (those stored in the control bytes)
69 // - H1: the rest of the bits
70 // The groups are probed using H1. For each group the slots are matched to H2 in
71 // parallel. Because H2 is 7 bits (128 states) and the number of slots per group
72 // is low (8 or 16) in almost all cases a match in H2 is also a lookup hit.
73 //
74 // On insert, once the right group is found (as in lookup), its slots are
75 // filled in order.
76 //
77 // On erase a slot is cleared. In case the group did not have any empty slots
78 // before the erase, the erased slot is marked as deleted.
79 //
80 // Groups without empty slots (but maybe with deleted slots) extend the probe
81 // sequence. The probing algorithm is quadratic. Given N the number of groups,
82 // the probing function for the i'th probe is:
83 //
84 //   P(0) = H1 % N
85 //
86 //   P(i) = (P(i - 1) + i) % N
87 //
88 // This probing function guarantees that after N probes, all the groups of the
89 // table will be probed exactly once.
90 
91 #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
92 #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
93 
94 #include <algorithm>
95 #include <cmath>
96 #include <cstdint>
97 #include <cstring>
98 #include <iterator>
99 #include <limits>
100 #include <memory>
101 #include <tuple>
102 #include <type_traits>
103 #include <utility>
104 
105 #include "absl/base/internal/bits.h"
106 #include "absl/base/internal/endian.h"
107 #include "absl/base/optimization.h"
108 #include "absl/base/port.h"
109 #include "absl/container/internal/common.h"
110 #include "absl/container/internal/compressed_tuple.h"
111 #include "absl/container/internal/container_memory.h"
112 #include "absl/container/internal/hash_policy_traits.h"
113 #include "absl/container/internal/hashtable_debug_hooks.h"
114 #include "absl/container/internal/hashtablez_sampler.h"
115 #include "absl/container/internal/have_sse.h"
116 #include "absl/container/internal/layout.h"
117 #include "absl/memory/memory.h"
118 #include "absl/meta/type_traits.h"
119 #include "absl/utility/utility.h"
120 
121 namespace absl {
122 ABSL_NAMESPACE_BEGIN
123 namespace container_internal {
124 
125 template <typename AllocType>
SwapAlloc(AllocType & lhs,AllocType & rhs,std::true_type)126 void SwapAlloc(AllocType& lhs, AllocType& rhs,
127                std::true_type /* propagate_on_container_swap */) {
128   using std::swap;
129   swap(lhs, rhs);
130 }
131 template <typename AllocType>
SwapAlloc(AllocType &,AllocType &,std::false_type)132 void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
133                std::false_type /* propagate_on_container_swap */) {}
134 
135 template <size_t Width>
136 class probe_seq {
137  public:
probe_seq(size_t hash,size_t mask)138   probe_seq(size_t hash, size_t mask) {
139     assert(((mask + 1) & mask) == 0 && "not a mask");
140     mask_ = mask;
141     offset_ = hash & mask_;
142   }
offset()143   size_t offset() const { return offset_; }
offset(size_t i)144   size_t offset(size_t i) const { return (offset_ + i) & mask_; }
145 
next()146   void next() {
147     index_ += Width;
148     offset_ += index_;
149     offset_ &= mask_;
150   }
151   // 0-based probe index. The i-th probe in the probe sequence.
index()152   size_t index() const { return index_; }
153 
154  private:
155   size_t mask_;
156   size_t offset_;
157   size_t index_ = 0;
158 };
159 
160 template <class ContainerKey, class Hash, class Eq>
161 struct RequireUsableKey {
162   template <class PassedKey, class... Args>
163   std::pair<
164       decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
165       decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
166                                          std::declval<const PassedKey&>()))>*
167   operator()(const PassedKey&, const Args&...) const;
168 };
169 
170 template <class E, class Policy, class Hash, class Eq, class... Ts>
171 struct IsDecomposable : std::false_type {};
172 
173 template <class Policy, class Hash, class Eq, class... Ts>
174 struct IsDecomposable<
175     absl::void_t<decltype(
176         Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
177                       std::declval<Ts>()...))>,
178     Policy, Hash, Eq, Ts...> : std::true_type {};
179 
180 // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
181 template <class T>
182 constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
183   using std::swap;
184   return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
185 }
186 template <class T>
187 constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
188   return false;
189 }
190 
191 template <typename T>
192 int TrailingZeros(T x) {
193   return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64(
194                               static_cast<uint64_t>(x))
195                         : base_internal::CountTrailingZerosNonZero32(
196                               static_cast<uint32_t>(x));
197 }
198 
199 template <typename T>
200 int LeadingZeros(T x) {
201   return sizeof(T) == 8
202              ? base_internal::CountLeadingZeros64(static_cast<uint64_t>(x))
203              : base_internal::CountLeadingZeros32(static_cast<uint32_t>(x));
204 }
205 
206 // An abstraction over a bitmask. It provides an easy way to iterate through the
207 // indexes of the set bits of a bitmask.  When Shift=0 (platforms with SSE),
208 // this is a true bitmask.  On non-SSE, platforms the arithematic used to
209 // emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as
210 // either 0x00 or 0x80.
211 //
212 // For example:
213 //   for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2
214 //   for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
215 template <class T, int SignificantBits, int Shift = 0>
216 class BitMask {
217   static_assert(std::is_unsigned<T>::value, "");
218   static_assert(Shift == 0 || Shift == 3, "");
219 
220  public:
221   // These are useful for unit tests (gunit).
222   using value_type = int;
223   using iterator = BitMask;
224   using const_iterator = BitMask;
225 
226   explicit BitMask(T mask) : mask_(mask) {}
227   BitMask& operator++() {
228     mask_ &= (mask_ - 1);
229     return *this;
230   }
231   explicit operator bool() const { return mask_ != 0; }
232   int operator*() const { return LowestBitSet(); }
233   int LowestBitSet() const {
234     return container_internal::TrailingZeros(mask_) >> Shift;
235   }
236   int HighestBitSet() const {
237     return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) -
238             1) >>
239            Shift;
240   }
241 
242   BitMask begin() const { return *this; }
243   BitMask end() const { return BitMask(0); }
244 
245   int TrailingZeros() const {
246     return container_internal::TrailingZeros(mask_) >> Shift;
247   }
248 
249   int LeadingZeros() const {
250     constexpr int total_significant_bits = SignificantBits << Shift;
251     constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
252     return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift;
253   }
254 
255  private:
256   friend bool operator==(const BitMask& a, const BitMask& b) {
257     return a.mask_ == b.mask_;
258   }
259   friend bool operator!=(const BitMask& a, const BitMask& b) {
260     return a.mask_ != b.mask_;
261   }
262 
263   T mask_;
264 };
265 
266 using ctrl_t = signed char;
267 using h2_t = uint8_t;
268 
269 // The values here are selected for maximum performance. See the static asserts
270 // below for details.
271 enum Ctrl : ctrl_t {
272   kEmpty = -128,   // 0b10000000
273   kDeleted = -2,   // 0b11111110
274   kSentinel = -1,  // 0b11111111
275 };
276 static_assert(
277     kEmpty & kDeleted & kSentinel & 0x80,
278     "Special markers need to have the MSB to make checking for them efficient");
279 static_assert(kEmpty < kSentinel && kDeleted < kSentinel,
280               "kEmpty and kDeleted must be smaller than kSentinel to make the "
281               "SIMD test of IsEmptyOrDeleted() efficient");
282 static_assert(kSentinel == -1,
283               "kSentinel must be -1 to elide loading it from memory into SIMD "
284               "registers (pcmpeqd xmm, xmm)");
285 static_assert(kEmpty == -128,
286               "kEmpty must be -128 to make the SIMD check for its "
287               "existence efficient (psignb xmm, xmm)");
288 static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F,
289               "kEmpty and kDeleted must share an unset bit that is not shared "
290               "by kSentinel to make the scalar test for MatchEmptyOrDeleted() "
291               "efficient");
292 static_assert(kDeleted == -2,
293               "kDeleted must be -2 to make the implementation of "
294               "ConvertSpecialToEmptyAndFullToDeleted efficient");
295 
296 // A single block of empty control bytes for tables without any slots allocated.
297 // This enables removing a branch in the hot path of find().
298 inline ctrl_t* EmptyGroup() {
299   alignas(16) static constexpr ctrl_t empty_group[] = {
300       kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty,
301       kEmpty,    kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty};
302   return const_cast<ctrl_t*>(empty_group);
303 }
304 
305 // Mixes a randomly generated per-process seed with `hash` and `ctrl` to
306 // randomize insertion order within groups.
307 bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl);
308 
309 // Returns a hash seed.
310 //
311 // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
312 // non-determinism of iteration order in most cases.
313 inline size_t HashSeed(const ctrl_t* ctrl) {
314   // The low bits of the pointer have little or no entropy because of
315   // alignment. We shift the pointer to try to use higher entropy bits. A
316   // good number seems to be 12 bits, because that aligns with page size.
317   return reinterpret_cast<uintptr_t>(ctrl) >> 12;
318 }
319 
320 inline size_t H1(size_t hash, const ctrl_t* ctrl) {
321   return (hash >> 7) ^ HashSeed(ctrl);
322 }
323 inline ctrl_t H2(size_t hash) { return hash & 0x7F; }
324 
325 inline bool IsEmpty(ctrl_t c) { return c == kEmpty; }
326 inline bool IsFull(ctrl_t c) { return c >= 0; }
327 inline bool IsDeleted(ctrl_t c) { return c == kDeleted; }
328 inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; }
329 
330 #if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
331 
332 // https://github.com/abseil/abseil-cpp/issues/209
333 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
334 // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
335 // Work around this by using the portable implementation of Group
336 // when using -funsigned-char under GCC.
337 inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
338 #if defined(__GNUC__) && !defined(__clang__)
339   if (std::is_unsigned<char>::value) {
340     const __m128i mask = _mm_set1_epi8(0x80);
341     const __m128i diff = _mm_subs_epi8(b, a);
342     return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
343   }
344 #endif
345   return _mm_cmpgt_epi8(a, b);
346 }
347 
348 struct GroupSse2Impl {
349   static constexpr size_t kWidth = 16;  // the number of slots per group
350 
351   explicit GroupSse2Impl(const ctrl_t* pos) {
352     ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
353   }
354 
355   // Returns a bitmask representing the positions of slots that match hash.
356   BitMask<uint32_t, kWidth> Match(h2_t hash) const {
357     auto match = _mm_set1_epi8(hash);
358     return BitMask<uint32_t, kWidth>(
359         _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)));
360   }
361 
362   // Returns a bitmask representing the positions of empty slots.
363   BitMask<uint32_t, kWidth> MatchEmpty() const {
364 #if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
365     // This only works because kEmpty is -128.
366     return BitMask<uint32_t, kWidth>(
367         _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
368 #else
369     return Match(static_cast<h2_t>(kEmpty));
370 #endif
371   }
372 
373   // Returns a bitmask representing the positions of empty or deleted slots.
374   BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
375     auto special = _mm_set1_epi8(kSentinel);
376     return BitMask<uint32_t, kWidth>(
377         _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)));
378   }
379 
380   // Returns the number of trailing empty or deleted elements in the group.
381   uint32_t CountLeadingEmptyOrDeleted() const {
382     auto special = _mm_set1_epi8(kSentinel);
383     return TrailingZeros(
384         _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1);
385   }
386 
387   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
388     auto msbs = _mm_set1_epi8(static_cast<char>(-128));
389     auto x126 = _mm_set1_epi8(126);
390 #if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
391     auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
392 #else
393     auto zero = _mm_setzero_si128();
394     auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
395     auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
396 #endif
397     _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
398   }
399 
400   __m128i ctrl;
401 };
402 #endif  // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
403 
404 struct GroupPortableImpl {
405   static constexpr size_t kWidth = 8;
406 
407   explicit GroupPortableImpl(const ctrl_t* pos)
408       : ctrl(little_endian::Load64(pos)) {}
409 
410   BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
411     // For the technique, see:
412     // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
413     // (Determine if a word has a byte equal to n).
414     //
415     // Caveat: there are false positives but:
416     // - they only occur if there is a real match
417     // - they never occur on kEmpty, kDeleted, kSentinel
418     // - they will be handled gracefully by subsequent checks in code
419     //
420     // Example:
421     //   v = 0x1716151413121110
422     //   hash = 0x12
423     //   retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
424     constexpr uint64_t msbs = 0x8080808080808080ULL;
425     constexpr uint64_t lsbs = 0x0101010101010101ULL;
426     auto x = ctrl ^ (lsbs * hash);
427     return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
428   }
429 
430   BitMask<uint64_t, kWidth, 3> MatchEmpty() const {
431     constexpr uint64_t msbs = 0x8080808080808080ULL;
432     return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs);
433   }
434 
435   BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const {
436     constexpr uint64_t msbs = 0x8080808080808080ULL;
437     return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs);
438   }
439 
440   uint32_t CountLeadingEmptyOrDeleted() const {
441     constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL;
442     return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3;
443   }
444 
445   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
446     constexpr uint64_t msbs = 0x8080808080808080ULL;
447     constexpr uint64_t lsbs = 0x0101010101010101ULL;
448     auto x = ctrl & msbs;
449     auto res = (~x + (x >> 7)) & ~lsbs;
450     little_endian::Store64(dst, res);
451   }
452 
453   uint64_t ctrl;
454 };
455 
456 #if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
457 using Group = GroupSse2Impl;
458 #else
459 using Group = GroupPortableImpl;
460 #endif
461 
462 template <class Policy, class Hash, class Eq, class Alloc>
463 class raw_hash_set;
464 
465 inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
466 
467 // PRECONDITION:
468 //   IsValidCapacity(capacity)
469 //   ctrl[capacity] == kSentinel
470 //   ctrl[i] != kSentinel for all i < capacity
471 // Applies mapping for every byte in ctrl:
472 //   DELETED -> EMPTY
473 //   EMPTY -> EMPTY
474 //   FULL -> DELETED
475 inline void ConvertDeletedToEmptyAndFullToDeleted(
476     ctrl_t* ctrl, size_t capacity) {
477   assert(ctrl[capacity] == kSentinel);
478   assert(IsValidCapacity(capacity));
479   for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
480     Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
481   }
482   // Copy the cloned ctrl bytes.
483   std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
484   ctrl[capacity] = kSentinel;
485 }
486 
487 // Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
488 inline size_t NormalizeCapacity(size_t n) {
489   return n ? ~size_t{} >> LeadingZeros(n) : 1;
490 }
491 
492 // We use 7/8th as maximum load factor.
493 // For 16-wide groups, that gives an average of two empty slots per group.
494 inline size_t CapacityToGrowth(size_t capacity) {
495   assert(IsValidCapacity(capacity));
496   // `capacity*7/8`
497   if (Group::kWidth == 8 && capacity == 7) {
498     // x-x/8 does not work when x==7.
499     return 6;
500   }
501   return capacity - capacity / 8;
502 }
503 // From desired "growth" to a lowerbound of the necessary capacity.
504 // Might not be a valid one and required NormalizeCapacity().
505 inline size_t GrowthToLowerboundCapacity(size_t growth) {
506   // `growth*8/7`
507   if (Group::kWidth == 8 && growth == 7) {
508     // x+(x-1)/7 does not work when x==7.
509     return 8;
510   }
511   return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
512 }
513 
514 inline void AssertIsFull(ctrl_t* ctrl) {
515   ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) &&
516                         "Invalid operation on iterator. The element might have "
517                         "been erased, or the table might have rehashed.");
518 }
519 
520 inline void AssertIsValid(ctrl_t* ctrl) {
521   ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
522                         "Invalid operation on iterator. The element might have "
523                         "been erased, or the table might have rehashed.");
524 }
525 
526 // Policy: a policy defines how to perform different operations on
527 // the slots of the hashtable (see hash_policy_traits.h for the full interface
528 // of policy).
529 //
530 // Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
531 // functor should accept a key and return size_t as hash. For best performance
532 // it is important that the hash function provides high entropy across all bits
533 // of the hash.
534 //
535 // Eq: a (possibly polymorphic) functor that compares two keys for equality. It
536 // should accept two (of possibly different type) keys and return a bool: true
537 // if they are equal, false if they are not. If two keys compare equal, then
538 // their hash values as defined by Hash MUST be equal.
539 //
540 // Allocator: an Allocator
541 // [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
542 // the storage of the hashtable will be allocated and the elements will be
543 // constructed and destroyed.
544 template <class Policy, class Hash, class Eq, class Alloc>
545 class raw_hash_set {
546   using PolicyTraits = hash_policy_traits<Policy>;
547   using KeyArgImpl =
548       KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
549 
550  public:
551   using init_type = typename PolicyTraits::init_type;
552   using key_type = typename PolicyTraits::key_type;
553   // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
554   // code fixes!
555   using slot_type = typename PolicyTraits::slot_type;
556   using allocator_type = Alloc;
557   using size_type = size_t;
558   using difference_type = ptrdiff_t;
559   using hasher = Hash;
560   using key_equal = Eq;
561   using policy_type = Policy;
562   using value_type = typename PolicyTraits::value_type;
563   using reference = value_type&;
564   using const_reference = const value_type&;
565   using pointer = typename absl::allocator_traits<
566       allocator_type>::template rebind_traits<value_type>::pointer;
567   using const_pointer = typename absl::allocator_traits<
568       allocator_type>::template rebind_traits<value_type>::const_pointer;
569 
570   // Alias used for heterogeneous lookup functions.
571   // `key_arg<K>` evaluates to `K` when the functors are transparent and to
572   // `key_type` otherwise. It permits template argument deduction on `K` for the
573   // transparent case.
574   template <class K>
575   using key_arg = typename KeyArgImpl::template type<K, key_type>;
576 
577  private:
578   // Give an early error when key_type is not hashable/eq.
579   auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
580   auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
581 
582   using Layout = absl::container_internal::Layout<ctrl_t, slot_type>;
583 
584   static Layout MakeLayout(size_t capacity) {
585     assert(IsValidCapacity(capacity));
586     return Layout(capacity + Group::kWidth + 1, capacity);
587   }
588 
589   using AllocTraits = absl::allocator_traits<allocator_type>;
590   using SlotAlloc = typename absl::allocator_traits<
591       allocator_type>::template rebind_alloc<slot_type>;
592   using SlotAllocTraits = typename absl::allocator_traits<
593       allocator_type>::template rebind_traits<slot_type>;
594 
595   static_assert(std::is_lvalue_reference<reference>::value,
596                 "Policy::element() must return a reference");
597 
598   template <typename T>
599   struct SameAsElementReference
600       : std::is_same<typename std::remove_cv<
601                          typename std::remove_reference<reference>::type>::type,
602                      typename std::remove_cv<
603                          typename std::remove_reference<T>::type>::type> {};
604 
605   // An enabler for insert(T&&): T must be convertible to init_type or be the
606   // same as [cv] value_type [ref].
607   // Note: we separate SameAsElementReference into its own type to avoid using
608   // reference unless we need to. MSVC doesn't seem to like it in some
609   // cases.
610   template <class T>
611   using RequiresInsertable = typename std::enable_if<
612       absl::disjunction<std::is_convertible<T, init_type>,
613                         SameAsElementReference<T>>::value,
614       int>::type;
615 
616   // RequiresNotInit is a workaround for gcc prior to 7.1.
617   // See https://godbolt.org/g/Y4xsUh.
618   template <class T>
619   using RequiresNotInit =
620       typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
621 
622   template <class... Ts>
623   using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
624 
625  public:
626   static_assert(std::is_same<pointer, value_type*>::value,
627                 "Allocators with custom pointer types are not supported");
628   static_assert(std::is_same<const_pointer, const value_type*>::value,
629                 "Allocators with custom pointer types are not supported");
630 
631   class iterator {
632     friend class raw_hash_set;
633 
634    public:
635     using iterator_category = std::forward_iterator_tag;
636     using value_type = typename raw_hash_set::value_type;
637     using reference =
638         absl::conditional_t<PolicyTraits::constant_iterators::value,
639                             const value_type&, value_type&>;
640     using pointer = absl::remove_reference_t<reference>*;
641     using difference_type = typename raw_hash_set::difference_type;
642 
643     iterator() {}
644 
645     // PRECONDITION: not an end() iterator.
646     reference operator*() const {
647       AssertIsFull(ctrl_);
648       return PolicyTraits::element(slot_);
649     }
650 
651     // PRECONDITION: not an end() iterator.
652     pointer operator->() const { return &operator*(); }
653 
654     // PRECONDITION: not an end() iterator.
655     iterator& operator++() {
656       AssertIsFull(ctrl_);
657       ++ctrl_;
658       ++slot_;
659       skip_empty_or_deleted();
660       return *this;
661     }
662     // PRECONDITION: not an end() iterator.
663     iterator operator++(int) {
664       auto tmp = *this;
665       ++*this;
666       return tmp;
667     }
668 
669     friend bool operator==(const iterator& a, const iterator& b) {
670       AssertIsValid(a.ctrl_);
671       AssertIsValid(b.ctrl_);
672       return a.ctrl_ == b.ctrl_;
673     }
674     friend bool operator!=(const iterator& a, const iterator& b) {
675       return !(a == b);
676     }
677 
678    private:
679     iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {
680       // This assumption helps the compiler know that any non-end iterator is
681       // not equal to any end iterator.
682       ABSL_INTERNAL_ASSUME(ctrl != nullptr);
683     }
684 
685     void skip_empty_or_deleted() {
686       while (IsEmptyOrDeleted(*ctrl_)) {
687         uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
688         ctrl_ += shift;
689         slot_ += shift;
690       }
691       if (ABSL_PREDICT_FALSE(*ctrl_ == kSentinel)) ctrl_ = nullptr;
692     }
693 
694     ctrl_t* ctrl_ = nullptr;
695     // To avoid uninitialized member warnings, put slot_ in an anonymous union.
696     // The member is not initialized on singleton and end iterators.
697     union {
698       slot_type* slot_;
699     };
700   };
701 
702   class const_iterator {
703     friend class raw_hash_set;
704 
705    public:
706     using iterator_category = typename iterator::iterator_category;
707     using value_type = typename raw_hash_set::value_type;
708     using reference = typename raw_hash_set::const_reference;
709     using pointer = typename raw_hash_set::const_pointer;
710     using difference_type = typename raw_hash_set::difference_type;
711 
712     const_iterator() {}
713     // Implicit construction from iterator.
714     const_iterator(iterator i) : inner_(std::move(i)) {}
715 
716     reference operator*() const { return *inner_; }
717     pointer operator->() const { return inner_.operator->(); }
718 
719     const_iterator& operator++() {
720       ++inner_;
721       return *this;
722     }
723     const_iterator operator++(int) { return inner_++; }
724 
725     friend bool operator==(const const_iterator& a, const const_iterator& b) {
726       return a.inner_ == b.inner_;
727     }
728     friend bool operator!=(const const_iterator& a, const const_iterator& b) {
729       return !(a == b);
730     }
731 
732    private:
733     const_iterator(const ctrl_t* ctrl, const slot_type* slot)
734         : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot)) {}
735 
736     iterator inner_;
737   };
738 
739   using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
740   using insert_return_type = InsertReturnType<iterator, node_type>;
741 
742   raw_hash_set() noexcept(
743       std::is_nothrow_default_constructible<hasher>::value&&
744           std::is_nothrow_default_constructible<key_equal>::value&&
745               std::is_nothrow_default_constructible<allocator_type>::value) {}
746 
747   explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
748                         const key_equal& eq = key_equal(),
749                         const allocator_type& alloc = allocator_type())
750       : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) {
751     if (bucket_count) {
752       capacity_ = NormalizeCapacity(bucket_count);
753       reset_growth_left();
754       initialize_slots();
755     }
756   }
757 
758   raw_hash_set(size_t bucket_count, const hasher& hash,
759                const allocator_type& alloc)
760       : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
761 
762   raw_hash_set(size_t bucket_count, const allocator_type& alloc)
763       : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
764 
765   explicit raw_hash_set(const allocator_type& alloc)
766       : raw_hash_set(0, hasher(), key_equal(), alloc) {}
767 
768   template <class InputIter>
769   raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
770                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
771                const allocator_type& alloc = allocator_type())
772       : raw_hash_set(bucket_count, hash, eq, alloc) {
773     insert(first, last);
774   }
775 
776   template <class InputIter>
777   raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
778                const hasher& hash, const allocator_type& alloc)
779       : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
780 
781   template <class InputIter>
782   raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
783                const allocator_type& alloc)
784       : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
785 
786   template <class InputIter>
787   raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
788       : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
789 
790   // Instead of accepting std::initializer_list<value_type> as the first
791   // argument like std::unordered_set<value_type> does, we have two overloads
792   // that accept std::initializer_list<T> and std::initializer_list<init_type>.
793   // This is advantageous for performance.
794   //
795   //   // Turns {"abc", "def"} into std::initializer_list<std::string>, then
796   //   // copies the strings into the set.
797   //   std::unordered_set<std::string> s = {"abc", "def"};
798   //
799   //   // Turns {"abc", "def"} into std::initializer_list<const char*>, then
800   //   // copies the strings into the set.
801   //   absl::flat_hash_set<std::string> s = {"abc", "def"};
802   //
803   // The same trick is used in insert().
804   //
805   // The enabler is necessary to prevent this constructor from triggering where
806   // the copy constructor is meant to be called.
807   //
808   //   absl::flat_hash_set<int> a, b{a};
809   //
810   // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
811   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
812   raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
813                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
814                const allocator_type& alloc = allocator_type())
815       : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
816 
817   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
818                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
819                const allocator_type& alloc = allocator_type())
820       : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
821 
822   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
823   raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
824                const hasher& hash, const allocator_type& alloc)
825       : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
826 
827   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
828                const hasher& hash, const allocator_type& alloc)
829       : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
830 
831   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
832   raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
833                const allocator_type& alloc)
834       : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
835 
836   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
837                const allocator_type& alloc)
838       : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
839 
840   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
841   raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
842       : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
843 
844   raw_hash_set(std::initializer_list<init_type> init,
845                const allocator_type& alloc)
846       : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
847 
848   raw_hash_set(const raw_hash_set& that)
849       : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
850                                that.alloc_ref())) {}
851 
852   raw_hash_set(const raw_hash_set& that, const allocator_type& a)
853       : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
854     reserve(that.size());
855     // Because the table is guaranteed to be empty, we can do something faster
856     // than a full `insert`.
857     for (const auto& v : that) {
858       const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
859       auto target = find_first_non_full(hash);
860       set_ctrl(target.offset, H2(hash));
861       emplace_at(target.offset, v);
862       infoz_.RecordInsert(hash, target.probe_length);
863     }
864     size_ = that.size();
865     growth_left() -= that.size();
866   }
867 
868   raw_hash_set(raw_hash_set&& that) noexcept(
869       std::is_nothrow_copy_constructible<hasher>::value&&
870           std::is_nothrow_copy_constructible<key_equal>::value&&
871               std::is_nothrow_copy_constructible<allocator_type>::value)
872       : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())),
873         slots_(absl::exchange(that.slots_, nullptr)),
874         size_(absl::exchange(that.size_, 0)),
875         capacity_(absl::exchange(that.capacity_, 0)),
876         infoz_(absl::exchange(that.infoz_, HashtablezInfoHandle())),
877         // Hash, equality and allocator are copied instead of moved because
878         // `that` must be left valid. If Hash is std::function<Key>, moving it
879         // would create a nullptr functor that cannot be called.
880         settings_(that.settings_) {
881     // growth_left was copied above, reset the one from `that`.
882     that.growth_left() = 0;
883   }
884 
885   raw_hash_set(raw_hash_set&& that, const allocator_type& a)
886       : ctrl_(EmptyGroup()),
887         slots_(nullptr),
888         size_(0),
889         capacity_(0),
890         settings_(0, that.hash_ref(), that.eq_ref(), a) {
891     if (a == that.alloc_ref()) {
892       std::swap(ctrl_, that.ctrl_);
893       std::swap(slots_, that.slots_);
894       std::swap(size_, that.size_);
895       std::swap(capacity_, that.capacity_);
896       std::swap(growth_left(), that.growth_left());
897       std::swap(infoz_, that.infoz_);
898     } else {
899       reserve(that.size());
900       // Note: this will copy elements of dense_set and unordered_set instead of
901       // moving them. This can be fixed if it ever becomes an issue.
902       for (auto& elem : that) insert(std::move(elem));
903     }
904   }
905 
906   raw_hash_set& operator=(const raw_hash_set& that) {
907     raw_hash_set tmp(that,
908                      AllocTraits::propagate_on_container_copy_assignment::value
909                          ? that.alloc_ref()
910                          : alloc_ref());
911     swap(tmp);
912     return *this;
913   }
914 
915   raw_hash_set& operator=(raw_hash_set&& that) noexcept(
916       absl::allocator_traits<allocator_type>::is_always_equal::value&&
917           std::is_nothrow_move_assignable<hasher>::value&&
918               std::is_nothrow_move_assignable<key_equal>::value) {
919     // TODO(sbenza): We should only use the operations from the noexcept clause
920     // to make sure we actually adhere to that contract.
921     return move_assign(
922         std::move(that),
923         typename AllocTraits::propagate_on_container_move_assignment());
924   }
925 
926   ~raw_hash_set() { destroy_slots(); }
927 
928   iterator begin() {
929     auto it = iterator_at(0);
930     it.skip_empty_or_deleted();
931     return it;
932   }
933   iterator end() { return {}; }
934 
935   const_iterator begin() const {
936     return const_cast<raw_hash_set*>(this)->begin();
937   }
938   const_iterator end() const { return {}; }
939   const_iterator cbegin() const { return begin(); }
940   const_iterator cend() const { return end(); }
941 
942   bool empty() const { return !size(); }
943   size_t size() const { return size_; }
944   size_t capacity() const { return capacity_; }
945   size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
946 
947   ABSL_ATTRIBUTE_REINITIALIZES void clear() {
948     // Iterating over this container is O(bucket_count()). When bucket_count()
949     // is much greater than size(), iteration becomes prohibitively expensive.
950     // For clear() it is more important to reuse the allocated array when the
951     // container is small because allocation takes comparatively long time
952     // compared to destruction of the elements of the container. So we pick the
953     // largest bucket_count() threshold for which iteration is still fast and
954     // past that we simply deallocate the array.
955     if (capacity_ > 127) {
956       destroy_slots();
957     } else if (capacity_) {
958       for (size_t i = 0; i != capacity_; ++i) {
959         if (IsFull(ctrl_[i])) {
960           PolicyTraits::destroy(&alloc_ref(), slots_ + i);
961         }
962       }
963       size_ = 0;
964       reset_ctrl();
965       reset_growth_left();
966     }
967     assert(empty());
968     infoz_.RecordStorageChanged(0, capacity_);
969   }
970 
971   // This overload kicks in when the argument is an rvalue of insertable and
972   // decomposable type other than init_type.
973   //
974   //   flat_hash_map<std::string, int> m;
975   //   m.insert(std::make_pair("abc", 42));
976   // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
977   // bug.
978   template <class T, RequiresInsertable<T> = 0,
979             class T2 = T,
980             typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
981             T* = nullptr>
982   std::pair<iterator, bool> insert(T&& value) {
983     return emplace(std::forward<T>(value));
984   }
985 
986   // This overload kicks in when the argument is a bitfield or an lvalue of
987   // insertable and decomposable type.
988   //
989   //   union { int n : 1; };
990   //   flat_hash_set<int> s;
991   //   s.insert(n);
992   //
993   //   flat_hash_set<std::string> s;
994   //   const char* p = "hello";
995   //   s.insert(p);
996   //
997   // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
998   // RequiresInsertable<T> with RequiresInsertable<const T&>.
999   // We are hitting this bug: https://godbolt.org/g/1Vht4f.
1000   template <
1001       class T, RequiresInsertable<T> = 0,
1002       typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
1003   std::pair<iterator, bool> insert(const T& value) {
1004     return emplace(value);
1005   }
1006 
1007   // This overload kicks in when the argument is an rvalue of init_type. Its
1008   // purpose is to handle brace-init-list arguments.
1009   //
1010   //   flat_hash_map<std::string, int> s;
1011   //   s.insert({"abc", 42});
1012   std::pair<iterator, bool> insert(init_type&& value) {
1013     return emplace(std::move(value));
1014   }
1015 
1016   // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
1017   // bug.
1018   template <class T, RequiresInsertable<T> = 0, class T2 = T,
1019             typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
1020             T* = nullptr>
1021   iterator insert(const_iterator, T&& value) {
1022     return insert(std::forward<T>(value)).first;
1023   }
1024 
1025   // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
1026   // RequiresInsertable<T> with RequiresInsertable<const T&>.
1027   // We are hitting this bug: https://godbolt.org/g/1Vht4f.
1028   template <
1029       class T, RequiresInsertable<T> = 0,
1030       typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
1031   iterator insert(const_iterator, const T& value) {
1032     return insert(value).first;
1033   }
1034 
1035   iterator insert(const_iterator, init_type&& value) {
1036     return insert(std::move(value)).first;
1037   }
1038 
1039   template <class InputIt>
1040   void insert(InputIt first, InputIt last) {
1041     for (; first != last; ++first) insert(*first);
1042   }
1043 
1044   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
1045   void insert(std::initializer_list<T> ilist) {
1046     insert(ilist.begin(), ilist.end());
1047   }
1048 
1049   void insert(std::initializer_list<init_type> ilist) {
1050     insert(ilist.begin(), ilist.end());
1051   }
1052 
1053   insert_return_type insert(node_type&& node) {
1054     if (!node) return {end(), false, node_type()};
1055     const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
1056     auto res = PolicyTraits::apply(
1057         InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
1058         elem);
1059     if (res.second) {
1060       CommonAccess::Reset(&node);
1061       return {res.first, true, node_type()};
1062     } else {
1063       return {res.first, false, std::move(node)};
1064     }
1065   }
1066 
1067   iterator insert(const_iterator, node_type&& node) {
1068     return insert(std::move(node)).first;
1069   }
1070 
1071   // This overload kicks in if we can deduce the key from args. This enables us
1072   // to avoid constructing value_type if an entry with the same key already
1073   // exists.
1074   //
1075   // For example:
1076   //
1077   //   flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
1078   //   // Creates no std::string copies and makes no heap allocations.
1079   //   m.emplace("abc", "xyz");
1080   template <class... Args, typename std::enable_if<
1081                                IsDecomposable<Args...>::value, int>::type = 0>
1082   std::pair<iterator, bool> emplace(Args&&... args) {
1083     return PolicyTraits::apply(EmplaceDecomposable{*this},
1084                                std::forward<Args>(args)...);
1085   }
1086 
1087   // This overload kicks in if we cannot deduce the key from args. It constructs
1088   // value_type unconditionally and then either moves it into the table or
1089   // destroys.
1090   template <class... Args, typename std::enable_if<
1091                                !IsDecomposable<Args...>::value, int>::type = 0>
1092   std::pair<iterator, bool> emplace(Args&&... args) {
1093     alignas(slot_type) unsigned char raw[sizeof(slot_type)];
1094     slot_type* slot = reinterpret_cast<slot_type*>(&raw);
1095 
1096     PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
1097     const auto& elem = PolicyTraits::element(slot);
1098     return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
1099   }
1100 
1101   template <class... Args>
1102   iterator emplace_hint(const_iterator, Args&&... args) {
1103     return emplace(std::forward<Args>(args)...).first;
1104   }
1105 
1106   // Extension API: support for lazy emplace.
1107   //
1108   // Looks up key in the table. If found, returns the iterator to the element.
1109   // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`.
1110   //
1111   // `f` must abide by several restrictions:
1112   //  - it MUST call `raw_hash_set::constructor` with arguments as if a
1113   //    `raw_hash_set::value_type` is constructed,
1114   //  - it MUST NOT access the container before the call to
1115   //    `raw_hash_set::constructor`, and
1116   //  - it MUST NOT erase the lazily emplaced element.
1117   // Doing any of these is undefined behavior.
1118   //
1119   // For example:
1120   //
1121   //   std::unordered_set<ArenaString> s;
1122   //   // Makes ArenaStr even if "abc" is in the map.
1123   //   s.insert(ArenaString(&arena, "abc"));
1124   //
1125   //   flat_hash_set<ArenaStr> s;
1126   //   // Makes ArenaStr only if "abc" is not in the map.
1127   //   s.lazy_emplace("abc", [&](const constructor& ctor) {
1128   //     ctor(&arena, "abc");
1129   //   });
1130   //
1131   // WARNING: This API is currently experimental. If there is a way to implement
1132   // the same thing with the rest of the API, prefer that.
1133   class constructor {
1134     friend class raw_hash_set;
1135 
1136    public:
1137     template <class... Args>
1138     void operator()(Args&&... args) const {
1139       assert(*slot_);
1140       PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
1141       *slot_ = nullptr;
1142     }
1143 
1144    private:
1145     constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
1146 
1147     allocator_type* alloc_;
1148     slot_type** slot_;
1149   };
1150 
1151   template <class K = key_type, class F>
1152   iterator lazy_emplace(const key_arg<K>& key, F&& f) {
1153     auto res = find_or_prepare_insert(key);
1154     if (res.second) {
1155       slot_type* slot = slots_ + res.first;
1156       std::forward<F>(f)(constructor(&alloc_ref(), &slot));
1157       assert(!slot);
1158     }
1159     return iterator_at(res.first);
1160   }
1161 
1162   // Extension API: support for heterogeneous keys.
1163   //
1164   //   std::unordered_set<std::string> s;
1165   //   // Turns "abc" into std::string.
1166   //   s.erase("abc");
1167   //
1168   //   flat_hash_set<std::string> s;
1169   //   // Uses "abc" directly without copying it into std::string.
1170   //   s.erase("abc");
1171   template <class K = key_type>
1172   size_type erase(const key_arg<K>& key) {
1173     auto it = find(key);
1174     if (it == end()) return 0;
1175     erase(it);
1176     return 1;
1177   }
1178 
1179   // Erases the element pointed to by `it`.  Unlike `std::unordered_set::erase`,
1180   // this method returns void to reduce algorithmic complexity to O(1).  The
1181   // iterator is invalidated, so any increment should be done before calling
1182   // erase.  In order to erase while iterating across a map, use the following
1183   // idiom (which also works for standard containers):
1184   //
1185   // for (auto it = m.begin(), end = m.end(); it != end;) {
1186   //   // `erase()` will invalidate `it`, so advance `it` first.
1187   //   auto copy_it = it++;
1188   //   if (<pred>) {
1189   //     m.erase(copy_it);
1190   //   }
1191   // }
1192   void erase(const_iterator cit) { erase(cit.inner_); }
1193 
1194   // This overload is necessary because otherwise erase<K>(const K&) would be
1195   // a better match if non-const iterator is passed as an argument.
1196   void erase(iterator it) {
1197     AssertIsFull(it.ctrl_);
1198     PolicyTraits::destroy(&alloc_ref(), it.slot_);
1199     erase_meta_only(it);
1200   }
1201 
1202   iterator erase(const_iterator first, const_iterator last) {
1203     while (first != last) {
1204       erase(first++);
1205     }
1206     return last.inner_;
1207   }
1208 
1209   // Moves elements from `src` into `this`.
1210   // If the element already exists in `this`, it is left unmodified in `src`.
1211   template <typename H, typename E>
1212   void merge(raw_hash_set<Policy, H, E, Alloc>& src) {  // NOLINT
1213     assert(this != &src);
1214     for (auto it = src.begin(), e = src.end(); it != e;) {
1215       auto next = std::next(it);
1216       if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
1217                               PolicyTraits::element(it.slot_))
1218               .second) {
1219         src.erase_meta_only(it);
1220       }
1221       it = next;
1222     }
1223   }
1224 
1225   template <typename H, typename E>
1226   void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
1227     merge(src);
1228   }
1229 
1230   node_type extract(const_iterator position) {
1231     AssertIsFull(position.inner_.ctrl_);
1232     auto node =
1233         CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
1234     erase_meta_only(position);
1235     return node;
1236   }
1237 
1238   template <
1239       class K = key_type,
1240       typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
1241   node_type extract(const key_arg<K>& key) {
1242     auto it = find(key);
1243     return it == end() ? node_type() : extract(const_iterator{it});
1244   }
1245 
1246   void swap(raw_hash_set& that) noexcept(
1247       IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
1248       IsNoThrowSwappable<allocator_type>(
1249           typename AllocTraits::propagate_on_container_swap{})) {
1250     using std::swap;
1251     swap(ctrl_, that.ctrl_);
1252     swap(slots_, that.slots_);
1253     swap(size_, that.size_);
1254     swap(capacity_, that.capacity_);
1255     swap(growth_left(), that.growth_left());
1256     swap(hash_ref(), that.hash_ref());
1257     swap(eq_ref(), that.eq_ref());
1258     swap(infoz_, that.infoz_);
1259     SwapAlloc(alloc_ref(), that.alloc_ref(),
1260               typename AllocTraits::propagate_on_container_swap{});
1261   }
1262 
1263   void rehash(size_t n) {
1264     if (n == 0 && capacity_ == 0) return;
1265     if (n == 0 && size_ == 0) {
1266       destroy_slots();
1267       infoz_.RecordStorageChanged(0, 0);
1268       return;
1269     }
1270     // bitor is a faster way of doing `max` here. We will round up to the next
1271     // power-of-2-minus-1, so bitor is good enough.
1272     auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
1273     // n == 0 unconditionally rehashes as per the standard.
1274     if (n == 0 || m > capacity_) {
1275       resize(m);
1276     }
1277   }
1278 
1279   void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); }
1280 
1281   // Extension API: support for heterogeneous keys.
1282   //
1283   //   std::unordered_set<std::string> s;
1284   //   // Turns "abc" into std::string.
1285   //   s.count("abc");
1286   //
1287   //   ch_set<std::string> s;
1288   //   // Uses "abc" directly without copying it into std::string.
1289   //   s.count("abc");
1290   template <class K = key_type>
1291   size_t count(const key_arg<K>& key) const {
1292     return find(key) == end() ? 0 : 1;
1293   }
1294 
1295   // Issues CPU prefetch instructions for the memory needed to find or insert
1296   // a key.  Like all lookup functions, this support heterogeneous keys.
1297   //
1298   // NOTE: This is a very low level operation and should not be used without
1299   // specific benchmarks indicating its importance.
1300   template <class K = key_type>
1301   void prefetch(const key_arg<K>& key) const {
1302     (void)key;
1303 #if defined(__GNUC__)
1304     auto seq = probe(hash_ref()(key));
1305     __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
1306     __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
1307 #endif  // __GNUC__
1308   }
1309 
1310   // The API of find() has two extensions.
1311   //
1312   // 1. The hash can be passed by the user. It must be equal to the hash of the
1313   // key.
1314   //
1315   // 2. The type of the key argument doesn't have to be key_type. This is so
1316   // called heterogeneous key support.
1317   template <class K = key_type>
1318   iterator find(const key_arg<K>& key, size_t hash) {
1319     auto seq = probe(hash);
1320     while (true) {
1321       Group g{ctrl_ + seq.offset()};
1322       for (int i : g.Match(H2(hash))) {
1323         if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
1324                 EqualElement<K>{key, eq_ref()},
1325                 PolicyTraits::element(slots_ + seq.offset(i)))))
1326           return iterator_at(seq.offset(i));
1327       }
1328       if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
1329       seq.next();
1330       assert(seq.index() < capacity_ && "full table!");
1331     }
1332   }
1333   template <class K = key_type>
1334   iterator find(const key_arg<K>& key) {
1335     return find(key, hash_ref()(key));
1336   }
1337 
1338   template <class K = key_type>
1339   const_iterator find(const key_arg<K>& key, size_t hash) const {
1340     return const_cast<raw_hash_set*>(this)->find(key, hash);
1341   }
1342   template <class K = key_type>
1343   const_iterator find(const key_arg<K>& key) const {
1344     return find(key, hash_ref()(key));
1345   }
1346 
1347   template <class K = key_type>
1348   bool contains(const key_arg<K>& key) const {
1349     return find(key) != end();
1350   }
1351 
1352   template <class K = key_type>
1353   std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
1354     auto it = find(key);
1355     if (it != end()) return {it, std::next(it)};
1356     return {it, it};
1357   }
1358   template <class K = key_type>
1359   std::pair<const_iterator, const_iterator> equal_range(
1360       const key_arg<K>& key) const {
1361     auto it = find(key);
1362     if (it != end()) return {it, std::next(it)};
1363     return {it, it};
1364   }
1365 
1366   size_t bucket_count() const { return capacity_; }
1367   float load_factor() const {
1368     return capacity_ ? static_cast<double>(size()) / capacity_ : 0.0;
1369   }
1370   float max_load_factor() const { return 1.0f; }
1371   void max_load_factor(float) {
1372     // Does nothing.
1373   }
1374 
1375   hasher hash_function() const { return hash_ref(); }
1376   key_equal key_eq() const { return eq_ref(); }
1377   allocator_type get_allocator() const { return alloc_ref(); }
1378 
1379   friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
1380     if (a.size() != b.size()) return false;
1381     const raw_hash_set* outer = &a;
1382     const raw_hash_set* inner = &b;
1383     if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
1384     for (const value_type& elem : *outer)
1385       if (!inner->has_element(elem)) return false;
1386     return true;
1387   }
1388 
1389   friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
1390     return !(a == b);
1391   }
1392 
1393   friend void swap(raw_hash_set& a,
1394                    raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
1395     a.swap(b);
1396   }
1397 
1398  private:
1399   template <class Container, typename Enabler>
1400   friend struct absl::container_internal::hashtable_debug_internal::
1401       HashtableDebugAccess;
1402 
1403   struct FindElement {
1404     template <class K, class... Args>
1405     const_iterator operator()(const K& key, Args&&...) const {
1406       return s.find(key);
1407     }
1408     const raw_hash_set& s;
1409   };
1410 
1411   struct HashElement {
1412     template <class K, class... Args>
1413     size_t operator()(const K& key, Args&&...) const {
1414       return h(key);
1415     }
1416     const hasher& h;
1417   };
1418 
1419   template <class K1>
1420   struct EqualElement {
1421     template <class K2, class... Args>
1422     bool operator()(const K2& lhs, Args&&...) const {
1423       return eq(lhs, rhs);
1424     }
1425     const K1& rhs;
1426     const key_equal& eq;
1427   };
1428 
1429   struct EmplaceDecomposable {
1430     template <class K, class... Args>
1431     std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
1432       auto res = s.find_or_prepare_insert(key);
1433       if (res.second) {
1434         s.emplace_at(res.first, std::forward<Args>(args)...);
1435       }
1436       return {s.iterator_at(res.first), res.second};
1437     }
1438     raw_hash_set& s;
1439   };
1440 
1441   template <bool do_destroy>
1442   struct InsertSlot {
1443     template <class K, class... Args>
1444     std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
1445       auto res = s.find_or_prepare_insert(key);
1446       if (res.second) {
1447         PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot);
1448       } else if (do_destroy) {
1449         PolicyTraits::destroy(&s.alloc_ref(), &slot);
1450       }
1451       return {s.iterator_at(res.first), res.second};
1452     }
1453     raw_hash_set& s;
1454     // Constructed slot. Either moved into place or destroyed.
1455     slot_type&& slot;
1456   };
1457 
1458   // "erases" the object from the container, except that it doesn't actually
1459   // destroy the object. It only updates all the metadata of the class.
1460   // This can be used in conjunction with Policy::transfer to move the object to
1461   // another place.
1462   void erase_meta_only(const_iterator it) {
1463     assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
1464     --size_;
1465     const size_t index = it.inner_.ctrl_ - ctrl_;
1466     const size_t index_before = (index - Group::kWidth) & capacity_;
1467     const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty();
1468     const auto empty_before = Group(ctrl_ + index_before).MatchEmpty();
1469 
1470     // We count how many consecutive non empties we have to the right and to the
1471     // left of `it`. If the sum is >= kWidth then there is at least one probe
1472     // window that might have seen a full group.
1473     bool was_never_full =
1474         empty_before && empty_after &&
1475         static_cast<size_t>(empty_after.TrailingZeros() +
1476                             empty_before.LeadingZeros()) < Group::kWidth;
1477 
1478     set_ctrl(index, was_never_full ? kEmpty : kDeleted);
1479     growth_left() += was_never_full;
1480     infoz_.RecordErase();
1481   }
1482 
1483   void initialize_slots() {
1484     assert(capacity_);
1485     // Folks with custom allocators often make unwarranted assumptions about the
1486     // behavior of their classes vis-a-vis trivial destructability and what
1487     // calls they will or wont make.  Avoid sampling for people with custom
1488     // allocators to get us out of this mess.  This is not a hard guarantee but
1489     // a workaround while we plan the exact guarantee we want to provide.
1490     //
1491     // People are often sloppy with the exact type of their allocator (sometimes
1492     // it has an extra const or is missing the pair, but rebinds made it work
1493     // anyway).  To avoid the ambiguity, we work off SlotAlloc which we have
1494     // bound more carefully.
1495     if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
1496         slots_ == nullptr) {
1497       infoz_ = Sample();
1498     }
1499 
1500     auto layout = MakeLayout(capacity_);
1501     char* mem = static_cast<char*>(
1502         Allocate<Layout::Alignment()>(&alloc_ref(), layout.AllocSize()));
1503     ctrl_ = reinterpret_cast<ctrl_t*>(layout.template Pointer<0>(mem));
1504     slots_ = layout.template Pointer<1>(mem);
1505     reset_ctrl();
1506     reset_growth_left();
1507     infoz_.RecordStorageChanged(size_, capacity_);
1508   }
1509 
1510   void destroy_slots() {
1511     if (!capacity_) return;
1512     for (size_t i = 0; i != capacity_; ++i) {
1513       if (IsFull(ctrl_[i])) {
1514         PolicyTraits::destroy(&alloc_ref(), slots_ + i);
1515       }
1516     }
1517     auto layout = MakeLayout(capacity_);
1518     // Unpoison before returning the memory to the allocator.
1519     SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
1520     Deallocate<Layout::Alignment()>(&alloc_ref(), ctrl_, layout.AllocSize());
1521     ctrl_ = EmptyGroup();
1522     slots_ = nullptr;
1523     size_ = 0;
1524     capacity_ = 0;
1525     growth_left() = 0;
1526   }
1527 
1528   void resize(size_t new_capacity) {
1529     assert(IsValidCapacity(new_capacity));
1530     auto* old_ctrl = ctrl_;
1531     auto* old_slots = slots_;
1532     const size_t old_capacity = capacity_;
1533     capacity_ = new_capacity;
1534     initialize_slots();
1535 
1536     size_t total_probe_length = 0;
1537     for (size_t i = 0; i != old_capacity; ++i) {
1538       if (IsFull(old_ctrl[i])) {
1539         size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
1540                                           PolicyTraits::element(old_slots + i));
1541         auto target = find_first_non_full(hash);
1542         size_t new_i = target.offset;
1543         total_probe_length += target.probe_length;
1544         set_ctrl(new_i, H2(hash));
1545         PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
1546       }
1547     }
1548     if (old_capacity) {
1549       SanitizerUnpoisonMemoryRegion(old_slots,
1550                                     sizeof(slot_type) * old_capacity);
1551       auto layout = MakeLayout(old_capacity);
1552       Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl,
1553                                       layout.AllocSize());
1554     }
1555     infoz_.RecordRehash(total_probe_length);
1556   }
1557 
1558   void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
1559     assert(IsValidCapacity(capacity_));
1560     assert(!is_small());
1561     // Algorithm:
1562     // - mark all DELETED slots as EMPTY
1563     // - mark all FULL slots as DELETED
1564     // - for each slot marked as DELETED
1565     //     hash = Hash(element)
1566     //     target = find_first_non_full(hash)
1567     //     if target is in the same group
1568     //       mark slot as FULL
1569     //     else if target is EMPTY
1570     //       transfer element to target
1571     //       mark slot as EMPTY
1572     //       mark target as FULL
1573     //     else if target is DELETED
1574     //       swap current element with target element
1575     //       mark target as FULL
1576     //       repeat procedure for current slot with moved from element (target)
1577     ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
1578     alignas(slot_type) unsigned char raw[sizeof(slot_type)];
1579     size_t total_probe_length = 0;
1580     slot_type* slot = reinterpret_cast<slot_type*>(&raw);
1581     for (size_t i = 0; i != capacity_; ++i) {
1582       if (!IsDeleted(ctrl_[i])) continue;
1583       size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
1584                                         PolicyTraits::element(slots_ + i));
1585       auto target = find_first_non_full(hash);
1586       size_t new_i = target.offset;
1587       total_probe_length += target.probe_length;
1588 
1589       // Verify if the old and new i fall within the same group wrt the hash.
1590       // If they do, we don't need to move the object as it falls already in the
1591       // best probe we can.
1592       const auto probe_index = [&](size_t pos) {
1593         return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth;
1594       };
1595 
1596       // Element doesn't move.
1597       if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
1598         set_ctrl(i, H2(hash));
1599         continue;
1600       }
1601       if (IsEmpty(ctrl_[new_i])) {
1602         // Transfer element to the empty spot.
1603         // set_ctrl poisons/unpoisons the slots so we have to call it at the
1604         // right time.
1605         set_ctrl(new_i, H2(hash));
1606         PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i);
1607         set_ctrl(i, kEmpty);
1608       } else {
1609         assert(IsDeleted(ctrl_[new_i]));
1610         set_ctrl(new_i, H2(hash));
1611         // Until we are done rehashing, DELETED marks previously FULL slots.
1612         // Swap i and new_i elements.
1613         PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i);
1614         PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i);
1615         PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot);
1616         --i;  // repeat
1617       }
1618     }
1619     reset_growth_left();
1620     infoz_.RecordRehash(total_probe_length);
1621   }
1622 
1623   void rehash_and_grow_if_necessary() {
1624     if (capacity_ == 0) {
1625       resize(1);
1626     } else if (size() <= CapacityToGrowth(capacity()) / 2) {
1627       // Squash DELETED without growing if there is enough capacity.
1628       drop_deletes_without_resize();
1629     } else {
1630       // Otherwise grow the container.
1631       resize(capacity_ * 2 + 1);
1632     }
1633   }
1634 
1635   bool has_element(const value_type& elem) const {
1636     size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
1637     auto seq = probe(hash);
1638     while (true) {
1639       Group g{ctrl_ + seq.offset()};
1640       for (int i : g.Match(H2(hash))) {
1641         if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) ==
1642                               elem))
1643           return true;
1644       }
1645       if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
1646       seq.next();
1647       assert(seq.index() < capacity_ && "full table!");
1648     }
1649     return false;
1650   }
1651 
1652   // Probes the raw_hash_set with the probe sequence for hash and returns the
1653   // pointer to the first empty or deleted slot.
1654   // NOTE: this function must work with tables having both kEmpty and kDelete
1655   // in one group. Such tables appears during drop_deletes_without_resize.
1656   //
1657   // This function is very useful when insertions happen and:
1658   // - the input is already a set
1659   // - there are enough slots
1660   // - the element with the hash is not in the table
1661   struct FindInfo {
1662     size_t offset;
1663     size_t probe_length;
1664   };
1665   FindInfo find_first_non_full(size_t hash) {
1666     auto seq = probe(hash);
1667     while (true) {
1668       Group g{ctrl_ + seq.offset()};
1669       auto mask = g.MatchEmptyOrDeleted();
1670       if (mask) {
1671 #if !defined(NDEBUG)
1672         // We want to add entropy even when ASLR is not enabled.
1673         // In debug build we will randomly insert in either the front or back of
1674         // the group.
1675         // TODO(kfm,sbenza): revisit after we do unconditional mixing
1676         if (!is_small() && ShouldInsertBackwards(hash, ctrl_)) {
1677           return {seq.offset(mask.HighestBitSet()), seq.index()};
1678         }
1679 #endif
1680         return {seq.offset(mask.LowestBitSet()), seq.index()};
1681       }
1682       seq.next();
1683       assert(seq.index() < capacity_ && "full table!");
1684     }
1685   }
1686 
1687   // TODO(alkis): Optimize this assuming *this and that don't overlap.
1688   raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
1689     raw_hash_set tmp(std::move(that));
1690     swap(tmp);
1691     return *this;
1692   }
1693   raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
1694     raw_hash_set tmp(std::move(that), alloc_ref());
1695     swap(tmp);
1696     return *this;
1697   }
1698 
1699  protected:
1700   template <class K>
1701   std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
1702     auto hash = hash_ref()(key);
1703     auto seq = probe(hash);
1704     while (true) {
1705       Group g{ctrl_ + seq.offset()};
1706       for (int i : g.Match(H2(hash))) {
1707         if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
1708                 EqualElement<K>{key, eq_ref()},
1709                 PolicyTraits::element(slots_ + seq.offset(i)))))
1710           return {seq.offset(i), false};
1711       }
1712       if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
1713       seq.next();
1714       assert(seq.index() < capacity_ && "full table!");
1715     }
1716     return {prepare_insert(hash), true};
1717   }
1718 
1719   size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
1720     auto target = find_first_non_full(hash);
1721     if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
1722                            !IsDeleted(ctrl_[target.offset]))) {
1723       rehash_and_grow_if_necessary();
1724       target = find_first_non_full(hash);
1725     }
1726     ++size_;
1727     growth_left() -= IsEmpty(ctrl_[target.offset]);
1728     set_ctrl(target.offset, H2(hash));
1729     infoz_.RecordInsert(hash, target.probe_length);
1730     return target.offset;
1731   }
1732 
1733   // Constructs the value in the space pointed by the iterator. This only works
1734   // after an unsuccessful find_or_prepare_insert() and before any other
1735   // modifications happen in the raw_hash_set.
1736   //
1737   // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
1738   // k is the key decomposed from `forward<Args>(args)...`, and the bool
1739   // returned by find_or_prepare_insert(k) was true.
1740   // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
1741   template <class... Args>
1742   void emplace_at(size_t i, Args&&... args) {
1743     PolicyTraits::construct(&alloc_ref(), slots_ + i,
1744                             std::forward<Args>(args)...);
1745 
1746     assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
1747                iterator_at(i) &&
1748            "constructed value does not match the lookup key");
1749   }
1750 
1751   iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; }
1752   const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; }
1753 
1754  private:
1755   friend struct RawHashSetTestOnlyAccess;
1756 
1757   probe_seq<Group::kWidth> probe(size_t hash) const {
1758     return probe_seq<Group::kWidth>(H1(hash, ctrl_), capacity_);
1759   }
1760 
1761   // Reset all ctrl bytes back to kEmpty, except the sentinel.
1762   void reset_ctrl() {
1763     std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth);
1764     ctrl_[capacity_] = kSentinel;
1765     SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
1766   }
1767 
1768   void reset_growth_left() {
1769     growth_left() = CapacityToGrowth(capacity()) - size_;
1770   }
1771 
1772   // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at
1773   // the end too.
1774   void set_ctrl(size_t i, ctrl_t h) {
1775     assert(i < capacity_);
1776 
1777     if (IsFull(h)) {
1778       SanitizerUnpoisonObject(slots_ + i);
1779     } else {
1780       SanitizerPoisonObject(slots_ + i);
1781     }
1782 
1783     ctrl_[i] = h;
1784     ctrl_[((i - Group::kWidth) & capacity_) + 1 +
1785           ((Group::kWidth - 1) & capacity_)] = h;
1786   }
1787 
1788   size_t& growth_left() { return settings_.template get<0>(); }
1789 
1790   // The representation of the object has two modes:
1791   //  - small: For capacities < kWidth-1
1792   //  - large: For the rest.
1793   //
1794   // Differences:
1795   //  - In small mode we are able to use the whole capacity. The extra control
1796   //  bytes give us at least one "empty" control byte to stop the iteration.
1797   //  This is important to make 1 a valid capacity.
1798   //
1799   //  - In small mode only the first `capacity()` control bytes after the
1800   //  sentinel are valid. The rest contain dummy kEmpty values that do not
1801   //  represent a real slot. This is important to take into account on
1802   //  find_first_non_full(), where we never try ShouldInsertBackwards() for
1803   //  small tables.
1804   bool is_small() const { return capacity_ < Group::kWidth - 1; }
1805 
1806   hasher& hash_ref() { return settings_.template get<1>(); }
1807   const hasher& hash_ref() const { return settings_.template get<1>(); }
1808   key_equal& eq_ref() { return settings_.template get<2>(); }
1809   const key_equal& eq_ref() const { return settings_.template get<2>(); }
1810   allocator_type& alloc_ref() { return settings_.template get<3>(); }
1811   const allocator_type& alloc_ref() const {
1812     return settings_.template get<3>();
1813   }
1814 
1815   // TODO(alkis): Investigate removing some of these fields:
1816   // - ctrl/slots can be derived from each other
1817   // - size can be moved into the slot array
1818   ctrl_t* ctrl_ = EmptyGroup();    // [(capacity + 1) * ctrl_t]
1819   slot_type* slots_ = nullptr;     // [capacity * slot_type]
1820   size_t size_ = 0;                // number of full slots
1821   size_t capacity_ = 0;            // total number of slots
1822   HashtablezInfoHandle infoz_;
1823   absl::container_internal::CompressedTuple<size_t /* growth_left */, hasher,
1824                                             key_equal, allocator_type>
1825       settings_{0, hasher{}, key_equal{}, allocator_type{}};
1826 };
1827 
1828 // Erases all elements that satisfy the predicate `pred` from the container `c`.
1829 template <typename P, typename H, typename E, typename A, typename Predicate>
1830 void EraseIf(Predicate pred, raw_hash_set<P, H, E, A>* c) {
1831   for (auto it = c->begin(), last = c->end(); it != last;) {
1832     auto copy_it = it++;
1833     if (pred(*copy_it)) {
1834       c->erase(copy_it);
1835     }
1836   }
1837 }
1838 
1839 namespace hashtable_debug_internal {
1840 template <typename Set>
1841 struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
1842   using Traits = typename Set::PolicyTraits;
1843   using Slot = typename Traits::slot_type;
1844 
1845   static size_t GetNumProbes(const Set& set,
1846                              const typename Set::key_type& key) {
1847     size_t num_probes = 0;
1848     size_t hash = set.hash_ref()(key);
1849     auto seq = set.probe(hash);
1850     while (true) {
1851       container_internal::Group g{set.ctrl_ + seq.offset()};
1852       for (int i : g.Match(container_internal::H2(hash))) {
1853         if (Traits::apply(
1854                 typename Set::template EqualElement<typename Set::key_type>{
1855                     key, set.eq_ref()},
1856                 Traits::element(set.slots_ + seq.offset(i))))
1857           return num_probes;
1858         ++num_probes;
1859       }
1860       if (g.MatchEmpty()) return num_probes;
1861       seq.next();
1862       ++num_probes;
1863     }
1864   }
1865 
1866   static size_t AllocatedByteSize(const Set& c) {
1867     size_t capacity = c.capacity_;
1868     if (capacity == 0) return 0;
1869     auto layout = Set::MakeLayout(capacity);
1870     size_t m = layout.AllocSize();
1871 
1872     size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
1873     if (per_slot != ~size_t{}) {
1874       m += per_slot * c.size();
1875     } else {
1876       for (size_t i = 0; i != capacity; ++i) {
1877         if (container_internal::IsFull(c.ctrl_[i])) {
1878           m += Traits::space_used(c.slots_ + i);
1879         }
1880       }
1881     }
1882     return m;
1883   }
1884 
1885   static size_t LowerBoundAllocatedByteSize(size_t size) {
1886     size_t capacity = GrowthToLowerboundCapacity(size);
1887     if (capacity == 0) return 0;
1888     auto layout = Set::MakeLayout(NormalizeCapacity(capacity));
1889     size_t m = layout.AllocSize();
1890     size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
1891     if (per_slot != ~size_t{}) {
1892       m += per_slot * size;
1893     }
1894     return m;
1895   }
1896 };
1897 
1898 }  // namespace hashtable_debug_internal
1899 }  // namespace container_internal
1900 ABSL_NAMESPACE_END
1901 }  // namespace absl
1902 
1903 #endif  // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
1904