• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 // An open-addressing
16 // hashtable with quadratic probing.
17 //
18 // This is a low level hashtable on top of which different interfaces can be
19 // implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
20 //
21 // The table interface is similar to that of std::unordered_set. Notable
22 // differences are that most member functions support heterogeneous keys when
23 // BOTH the hash and eq functions are marked as transparent. They do so by
24 // providing a typedef called `is_transparent`.
25 //
26 // When heterogeneous lookup is enabled, functions that take key_type act as if
27 // they have an overload set like:
28 //
29 //   iterator find(const key_type& key);
30 //   template <class K>
31 //   iterator find(const K& key);
32 //
33 //   size_type erase(const key_type& key);
34 //   template <class K>
35 //   size_type erase(const K& key);
36 //
37 //   std::pair<iterator, iterator> equal_range(const key_type& key);
38 //   template <class K>
39 //   std::pair<iterator, iterator> equal_range(const K& key);
40 //
41 // When heterogeneous lookup is disabled, only the explicit `key_type` overloads
42 // exist.
43 //
44 // find() also supports passing the hash explicitly:
45 //
46 //   iterator find(const key_type& key, size_t hash);
47 //   template <class U>
48 //   iterator find(const U& key, size_t hash);
49 //
50 // In addition the pointer to element and iterator stability guarantees are
51 // weaker: all iterators and pointers are invalidated after a new element is
52 // inserted.
53 //
54 // IMPLEMENTATION DETAILS
55 //
56 // # Table Layout
57 //
58 // A raw_hash_set's backing array consists of control bytes followed by slots
59 // that may or may not contain objects.
60 //
61 // The layout of the backing array, for `capacity` slots, is thus, as a
62 // pseudo-struct:
63 //
64 //   struct BackingArray {
65 //     // The number of elements we can insert before growing the capacity.
66 //     size_t growth_left;
67 //     // Control bytes for the "real" slots.
68 //     ctrl_t ctrl[capacity];
69 //     // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
70 //     // stop and serves no other purpose.
71 //     ctrl_t sentinel;
72 //     // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
73 //     // that if a probe sequence picks a value near the end of `ctrl`,
74 //     // `Group` will have valid control bytes to look at.
75 //     ctrl_t clones[kWidth - 1];
76 //     // The actual slot data.
77 //     slot_type slots[capacity];
78 //   };
79 //
80 // The length of this array is computed by `AllocSize()` below.
81 //
82 // Control bytes (`ctrl_t`) are bytes (collected into groups of a
83 // platform-specific size) that define the state of the corresponding slot in
84 // the slot array. Group manipulation is tightly optimized to be as efficient
85 // as possible: SSE and friends on x86, clever bit operations on other arches.
86 //
87 //      Group 1         Group 2        Group 3
88 // +---------------+---------------+---------------+
89 // | | | | | | | | | | | | | | | | | | | | | | | | |
90 // +---------------+---------------+---------------+
91 //
92 // Each control byte is either a special value for empty slots, deleted slots
93 // (sometimes called *tombstones*), and a special end-of-table marker used by
94 // iterators, or, if occupied, seven bits (H2) from the hash of the value in the
95 // corresponding slot.
96 //
97 // Storing control bytes in a separate array also has beneficial cache effects,
98 // since more logical slots will fit into a cache line.
99 //
100 // # Hashing
101 //
102 // We compute two separate hashes, `H1` and `H2`, from the hash of an object.
103 // `H1(hash(x))` is an index into `slots`, and essentially the starting point
104 // for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
105 // objects that cannot possibly be the one we are looking for.
106 //
107 // # Table operations.
108 //
109 // The key operations are `insert`, `find`, and `erase`.
110 //
111 // Since `insert` and `erase` are implemented in terms of `find`, we describe
112 // `find` first. To `find` a value `x`, we compute `hash(x)`. From
113 // `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
114 // group of slots in some interesting order.
115 //
116 // We now walk through these indices. At each index, we select the entire group
117 // starting with that index and extract potential candidates: occupied slots
118 // with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
119 // group, we stop and return an error. Each candidate slot `y` is compared with
120 // `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the
121 // next probe index. Tombstones effectively behave like full slots that never
122 // match the value we're looking for.
123 //
124 // The `H2` bits ensure when we compare a slot to an object with `==`, we are
125 // likely to have actually found the object.  That is, the chance is low that
126 // `==` is called and returns `false`.  Thus, when we search for an object, we
127 // are unlikely to call `==` many times.  This likelyhood can be analyzed as
128 // follows (assuming that H2 is a random enough hash function).
129 //
130 // Let's assume that there are `k` "wrong" objects that must be examined in a
131 // probe sequence.  For example, when doing a `find` on an object that is in the
132 // table, `k` is the number of objects between the start of the probe sequence
133 // and the final found object (not including the final found object).  The
134 // expected number of objects with an H2 match is then `k/128`.  Measurements
135 // and analysis indicate that even at high load factors, `k` is less than 32,
136 // meaning that the number of "false positive" comparisons we must perform is
137 // less than 1/8 per `find`.
138 
139 // `insert` is implemented in terms of `unchecked_insert`, which inserts a
140 // value presumed to not be in the table (violating this requirement will cause
141 // the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
142 // it, we construct a `probe_seq` once again, and use it to find the first
143 // group with an unoccupied (empty *or* deleted) slot. We place `x` into the
144 // first such slot in the group and mark it as full with `x`'s H2.
145 //
146 // To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
147 // perform a `find` to see if it's already present; if it is, we're done. If
148 // it's not, we may decide the table is getting overcrowded (i.e. the load
149 // factor is greater than 7/8 for big tables; `is_small()` tables use a max load
150 // factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
151 // each element of the table into the new array (we know that no insertion here
152 // will insert an already-present value), and discard the old backing array. At
153 // this point, we may `unchecked_insert` the value `x`.
154 //
155 // Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
156 // presents a viable, initialized slot pointee to the caller.
157 //
158 // `erase` is implemented in terms of `erase_at`, which takes an index to a
159 // slot. Given an offset, we simply create a tombstone and destroy its contents.
160 // If we can prove that the slot would not appear in a probe sequence, we can
161 // make the slot as empty, instead. We can prove this by observing that if a
162 // group has any empty slots, it has never been full (assuming we never create
163 // an empty slot in a group with no empties, which this heuristic guarantees we
164 // never do) and find would stop at this group anyways (since it does not probe
165 // beyond groups with empties).
166 //
167 // `erase` is `erase_at` composed with `find`: if we
168 // have a value `x`, we can perform a `find`, and then `erase_at` the resulting
169 // slot.
170 //
171 // To iterate, we simply traverse the array, skipping empty and deleted slots
172 // and stopping when we hit a `kSentinel`.
173 
174 #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
175 #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
176 
177 #include <algorithm>
178 #include <cmath>
179 #include <cstddef>
180 #include <cstdint>
181 #include <cstring>
182 #include <iterator>
183 #include <limits>
184 #include <memory>
185 #include <string>
186 #include <tuple>
187 #include <type_traits>
188 #include <utility>
189 
190 #include "absl/base/config.h"
191 #include "absl/base/internal/endian.h"
192 #include "absl/base/internal/raw_logging.h"
193 #include "absl/base/optimization.h"
194 #include "absl/base/port.h"
195 #include "absl/base/prefetch.h"
196 #include "absl/container/internal/common.h"
197 #include "absl/container/internal/compressed_tuple.h"
198 #include "absl/container/internal/container_memory.h"
199 #include "absl/container/internal/hash_policy_traits.h"
200 #include "absl/container/internal/hashtable_debug_hooks.h"
201 #include "absl/container/internal/hashtablez_sampler.h"
202 #include "absl/memory/memory.h"
203 #include "absl/meta/type_traits.h"
204 #include "absl/numeric/bits.h"
205 #include "absl/utility/utility.h"
206 
207 #ifdef ABSL_INTERNAL_HAVE_SSE2
208 #include <emmintrin.h>
209 #endif
210 
211 #ifdef ABSL_INTERNAL_HAVE_SSSE3
212 #include <tmmintrin.h>
213 #endif
214 
215 #ifdef _MSC_VER
216 #include <intrin.h>
217 #endif
218 
219 #ifdef ABSL_INTERNAL_HAVE_ARM_NEON
220 #include <arm_neon.h>
221 #endif
222 
223 namespace absl {
224 ABSL_NAMESPACE_BEGIN
225 namespace container_internal {
226 
227 #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
228 #error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
229 #elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
230     defined(ABSL_HAVE_MEMORY_SANITIZER)
231 // When compiled in sanitizer mode, we add generation integers to the backing
232 // array and iterators. In the backing array, we store the generation between
233 // the control bytes and the slots. When iterators are dereferenced, we assert
234 // that the container has not been mutated in a way that could cause iterator
235 // invalidation since the iterator was initialized.
236 #define ABSL_SWISSTABLE_ENABLE_GENERATIONS
237 #endif
238 
239 // We use uint8_t so we don't need to worry about padding.
240 using GenerationType = uint8_t;
241 
242 // A sentinel value for empty generations. Using 0 makes it easy to constexpr
243 // initialize an array of this value.
SentinelEmptyGeneration()244 constexpr GenerationType SentinelEmptyGeneration() { return 0; }
245 
NextGeneration(GenerationType generation)246 constexpr GenerationType NextGeneration(GenerationType generation) {
247   return ++generation == SentinelEmptyGeneration() ? ++generation : generation;
248 }
249 
250 #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
SwisstableGenerationsEnabled()251 constexpr bool SwisstableGenerationsEnabled() { return true; }
NumGenerationBytes()252 constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); }
253 #else
SwisstableGenerationsEnabled()254 constexpr bool SwisstableGenerationsEnabled() { return false; }
NumGenerationBytes()255 constexpr size_t NumGenerationBytes() { return 0; }
256 #endif
257 
258 template <typename AllocType>
SwapAlloc(AllocType & lhs,AllocType & rhs,std::true_type)259 void SwapAlloc(AllocType& lhs, AllocType& rhs,
260                std::true_type /* propagate_on_container_swap */) {
261   using std::swap;
262   swap(lhs, rhs);
263 }
264 template <typename AllocType>
SwapAlloc(AllocType &,AllocType &,std::false_type)265 void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
266                std::false_type /* propagate_on_container_swap */) {}
267 
268 // The state for a probe sequence.
269 //
270 // Currently, the sequence is a triangular progression of the form
271 //
272 //   p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
273 //
274 // The use of `Width` ensures that each probe step does not overlap groups;
275 // the sequence effectively outputs the addresses of *groups* (although not
276 // necessarily aligned to any boundary). The `Group` machinery allows us
277 // to check an entire group with minimal branching.
278 //
279 // Wrapping around at `mask + 1` is important, but not for the obvious reason.
280 // As described above, the first few entries of the control byte array
281 // are mirrored at the end of the array, which `Group` will find and use
282 // for selecting candidates. However, when those candidates' slots are
283 // actually inspected, there are no corresponding slots for the cloned bytes,
284 // so we need to make sure we've treated those offsets as "wrapping around".
285 //
286 // It turns out that this probe sequence visits every group exactly once if the
287 // number of groups is a power of two, since (i^2+i)/2 is a bijection in
288 // Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
289 template <size_t Width>
290 class probe_seq {
291  public:
292   // Creates a new probe sequence using `hash` as the initial value of the
293   // sequence and `mask` (usually the capacity of the table) as the mask to
294   // apply to each value in the progression.
probe_seq(size_t hash,size_t mask)295   probe_seq(size_t hash, size_t mask) {
296     assert(((mask + 1) & mask) == 0 && "not a mask");
297     mask_ = mask;
298     offset_ = hash & mask_;
299   }
300 
301   // The offset within the table, i.e., the value `p(i)` above.
offset()302   size_t offset() const { return offset_; }
offset(size_t i)303   size_t offset(size_t i) const { return (offset_ + i) & mask_; }
304 
next()305   void next() {
306     index_ += Width;
307     offset_ += index_;
308     offset_ &= mask_;
309   }
310   // 0-based probe index, a multiple of `Width`.
index()311   size_t index() const { return index_; }
312 
313  private:
314   size_t mask_;
315   size_t offset_;
316   size_t index_ = 0;
317 };
318 
319 template <class ContainerKey, class Hash, class Eq>
320 struct RequireUsableKey {
321   template <class PassedKey, class... Args>
322   std::pair<
323       decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
324       decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
325                                          std::declval<const PassedKey&>()))>*
326   operator()(const PassedKey&, const Args&...) const;
327 };
328 
329 template <class E, class Policy, class Hash, class Eq, class... Ts>
330 struct IsDecomposable : std::false_type {};
331 
332 template <class Policy, class Hash, class Eq, class... Ts>
333 struct IsDecomposable<
334     absl::void_t<decltype(Policy::apply(
335         RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
336         std::declval<Ts>()...))>,
337     Policy, Hash, Eq, Ts...> : std::true_type {};
338 
339 // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
340 template <class T>
341 constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
342   using std::swap;
343   return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
344 }
345 template <class T>
346 constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
347   return false;
348 }
349 
350 template <typename T>
351 uint32_t TrailingZeros(T x) {
352   ABSL_ASSUME(x != 0);
353   return static_cast<uint32_t>(countr_zero(x));
354 }
355 
356 // An abstract bitmask, such as that emitted by a SIMD instruction.
357 //
358 // Specifically, this type implements a simple bitset whose representation is
359 // controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
360 // of abstract bits in the bitset, while `Shift` is the log-base-two of the
361 // width of an abstract bit in the representation.
362 // This mask provides operations for any number of real bits set in an abstract
363 // bit. To add iteration on top of that, implementation must guarantee no more
364 // than one real bit is set in an abstract bit.
365 template <class T, int SignificantBits, int Shift = 0>
366 class NonIterableBitMask {
367  public:
368   explicit NonIterableBitMask(T mask) : mask_(mask) {}
369 
370   explicit operator bool() const { return this->mask_ != 0; }
371 
372   // Returns the index of the lowest *abstract* bit set in `self`.
373   uint32_t LowestBitSet() const {
374     return container_internal::TrailingZeros(mask_) >> Shift;
375   }
376 
377   // Returns the index of the highest *abstract* bit set in `self`.
378   uint32_t HighestBitSet() const {
379     return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
380   }
381 
382   // Returns the number of trailing zero *abstract* bits.
383   uint32_t TrailingZeros() const {
384     return container_internal::TrailingZeros(mask_) >> Shift;
385   }
386 
387   // Returns the number of leading zero *abstract* bits.
388   uint32_t LeadingZeros() const {
389     constexpr int total_significant_bits = SignificantBits << Shift;
390     constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
391     return static_cast<uint32_t>(countl_zero(mask_ << extra_bits)) >> Shift;
392   }
393 
394   T mask_;
395 };
396 
397 // Mask that can be iterable
398 //
399 // For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
400 // an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
401 // `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
402 // the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
403 //
404 // For example:
405 //   for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
406 //   for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
407 template <class T, int SignificantBits, int Shift = 0>
408 class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
409   using Base = NonIterableBitMask<T, SignificantBits, Shift>;
410   static_assert(std::is_unsigned<T>::value, "");
411   static_assert(Shift == 0 || Shift == 3, "");
412 
413  public:
414   explicit BitMask(T mask) : Base(mask) {}
415   // BitMask is an iterator over the indices of its abstract bits.
416   using value_type = int;
417   using iterator = BitMask;
418   using const_iterator = BitMask;
419 
420   BitMask& operator++() {
421     this->mask_ &= (this->mask_ - 1);
422     return *this;
423   }
424 
425   uint32_t operator*() const { return Base::LowestBitSet(); }
426 
427   BitMask begin() const { return *this; }
428   BitMask end() const { return BitMask(0); }
429 
430  private:
431   friend bool operator==(const BitMask& a, const BitMask& b) {
432     return a.mask_ == b.mask_;
433   }
434   friend bool operator!=(const BitMask& a, const BitMask& b) {
435     return a.mask_ != b.mask_;
436   }
437 };
438 
439 using h2_t = uint8_t;
440 
441 // The values here are selected for maximum performance. See the static asserts
442 // below for details.
443 
444 // A `ctrl_t` is a single control byte, which can have one of four
445 // states: empty, deleted, full (which has an associated seven-bit h2_t value)
446 // and the sentinel. They have the following bit patterns:
447 //
448 //      empty: 1 0 0 0 0 0 0 0
449 //    deleted: 1 1 1 1 1 1 1 0
450 //       full: 0 h h h h h h h  // h represents the hash bits.
451 //   sentinel: 1 1 1 1 1 1 1 1
452 //
453 // These values are specifically tuned for SSE-flavored SIMD.
454 // The static_asserts below detail the source of these choices.
455 //
456 // We use an enum class so that when strict aliasing is enabled, the compiler
457 // knows ctrl_t doesn't alias other types.
458 enum class ctrl_t : int8_t {
459   kEmpty = -128,   // 0b10000000
460   kDeleted = -2,   // 0b11111110
461   kSentinel = -1,  // 0b11111111
462 };
463 static_assert(
464     (static_cast<int8_t>(ctrl_t::kEmpty) &
465      static_cast<int8_t>(ctrl_t::kDeleted) &
466      static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
467     "Special markers need to have the MSB to make checking for them efficient");
468 static_assert(
469     ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
470     "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
471     "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
472 static_assert(
473     ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
474     "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
475     "registers (pcmpeqd xmm, xmm)");
476 static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
477               "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
478               "existence efficient (psignb xmm, xmm)");
479 static_assert(
480     (~static_cast<int8_t>(ctrl_t::kEmpty) &
481      ~static_cast<int8_t>(ctrl_t::kDeleted) &
482      static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
483     "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
484     "shared by ctrl_t::kSentinel to make the scalar test for "
485     "MaskEmptyOrDeleted() efficient");
486 static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
487               "ctrl_t::kDeleted must be -2 to make the implementation of "
488               "ConvertSpecialToEmptyAndFullToDeleted efficient");
489 
490 // See definition comment for why this is size 32.
491 ABSL_DLL extern const ctrl_t kEmptyGroup[32];
492 
493 // Returns a pointer to a control byte group that can be used by empty tables.
494 inline ctrl_t* EmptyGroup() {
495   // Const must be cast away here; no uses of this function will actually write
496   // to it, because it is only used for empty tables.
497   return const_cast<ctrl_t*>(kEmptyGroup + 16);
498 }
499 
500 // Returns a pointer to a generation to use for an empty hashtable.
501 GenerationType* EmptyGeneration();
502 
503 // Returns whether `generation` is a generation for an empty hashtable that
504 // could be returned by EmptyGeneration().
505 inline bool IsEmptyGeneration(const GenerationType* generation) {
506   return *generation == SentinelEmptyGeneration();
507 }
508 
509 // Mixes a randomly generated per-process seed with `hash` and `ctrl` to
510 // randomize insertion order within groups.
511 bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
512 
513 // Returns a per-table, hash salt, which changes on resize. This gets mixed into
514 // H1 to randomize iteration order per-table.
515 //
516 // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
517 // non-determinism of iteration order in most cases.
518 inline size_t PerTableSalt(const ctrl_t* ctrl) {
519   // The low bits of the pointer have little or no entropy because of
520   // alignment. We shift the pointer to try to use higher entropy bits. A
521   // good number seems to be 12 bits, because that aligns with page size.
522   return reinterpret_cast<uintptr_t>(ctrl) >> 12;
523 }
524 // Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
525 inline size_t H1(size_t hash, const ctrl_t* ctrl) {
526   return (hash >> 7) ^ PerTableSalt(ctrl);
527 }
528 
529 // Extracts the H2 portion of a hash: the 7 bits not used for H1.
530 //
531 // These are used as an occupied control byte.
532 inline h2_t H2(size_t hash) { return hash & 0x7F; }
533 
534 // Helpers for checking the state of a control byte.
535 inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
536 inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
537 inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
538 inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
539 
540 #ifdef ABSL_INTERNAL_HAVE_SSE2
541 // Quick reference guide for intrinsics used below:
542 //
543 // * __m128i: An XMM (128-bit) word.
544 //
545 // * _mm_setzero_si128: Returns a zero vector.
546 // * _mm_set1_epi8:     Returns a vector with the same i8 in each lane.
547 //
548 // * _mm_subs_epi8:    Saturating-subtracts two i8 vectors.
549 // * _mm_and_si128:    Ands two i128s together.
550 // * _mm_or_si128:     Ors two i128s together.
551 // * _mm_andnot_si128: And-nots two i128s together.
552 //
553 // * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
554 //                   filling each lane with 0x00 or 0xff.
555 // * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
556 //
557 // * _mm_loadu_si128:  Performs an unaligned load of an i128.
558 // * _mm_storeu_si128: Performs an unaligned store of an i128.
559 //
560 // * _mm_sign_epi8:     Retains, negates, or zeroes each i8 lane of the first
561 //                      argument if the corresponding lane of the second
562 //                      argument is positive, negative, or zero, respectively.
563 // * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
564 //                      bitmask consisting of those bits.
565 // * _mm_shuffle_epi8:  Selects i8s from the first argument, using the low
566 //                      four bits of each i8 lane in the second argument as
567 //                      indices.
568 
569 // https://github.com/abseil/abseil-cpp/issues/209
570 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
571 // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
572 // Work around this by using the portable implementation of Group
573 // when using -funsigned-char under GCC.
574 inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
575 #if defined(__GNUC__) && !defined(__clang__)
576   if (std::is_unsigned<char>::value) {
577     const __m128i mask = _mm_set1_epi8(0x80);
578     const __m128i diff = _mm_subs_epi8(b, a);
579     return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
580   }
581 #endif
582   return _mm_cmpgt_epi8(a, b);
583 }
584 
585 struct GroupSse2Impl {
586   static constexpr size_t kWidth = 16;  // the number of slots per group
587 
588   explicit GroupSse2Impl(const ctrl_t* pos) {
589     ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
590   }
591 
592   // Returns a bitmask representing the positions of slots that match hash.
593   BitMask<uint32_t, kWidth> Match(h2_t hash) const {
594     auto match = _mm_set1_epi8(static_cast<char>(hash));
595     return BitMask<uint32_t, kWidth>(
596         static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
597   }
598 
599   // Returns a bitmask representing the positions of empty slots.
600   NonIterableBitMask<uint32_t, kWidth> MaskEmpty() const {
601 #ifdef ABSL_INTERNAL_HAVE_SSSE3
602     // This only works because ctrl_t::kEmpty is -128.
603     return NonIterableBitMask<uint32_t, kWidth>(
604         static_cast<uint32_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
605 #else
606     auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
607     return NonIterableBitMask<uint32_t, kWidth>(
608         static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
609 #endif
610   }
611 
612   // Returns a bitmask representing the positions of empty or deleted slots.
613   NonIterableBitMask<uint32_t, kWidth> MaskEmptyOrDeleted() const {
614     auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
615     return NonIterableBitMask<uint32_t, kWidth>(static_cast<uint32_t>(
616         _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
617   }
618 
619   // Returns the number of trailing empty or deleted elements in the group.
620   uint32_t CountLeadingEmptyOrDeleted() const {
621     auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
622     return TrailingZeros(static_cast<uint32_t>(
623         _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
624   }
625 
626   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
627     auto msbs = _mm_set1_epi8(static_cast<char>(-128));
628     auto x126 = _mm_set1_epi8(126);
629 #ifdef ABSL_INTERNAL_HAVE_SSSE3
630     auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
631 #else
632     auto zero = _mm_setzero_si128();
633     auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
634     auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
635 #endif
636     _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
637   }
638 
639   __m128i ctrl;
640 };
641 #endif  // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
642 
643 #if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
644 struct GroupAArch64Impl {
645   static constexpr size_t kWidth = 8;
646 
647   explicit GroupAArch64Impl(const ctrl_t* pos) {
648     ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
649   }
650 
651   BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
652     uint8x8_t dup = vdup_n_u8(hash);
653     auto mask = vceq_u8(ctrl, dup);
654     constexpr uint64_t msbs = 0x8080808080808080ULL;
655     return BitMask<uint64_t, kWidth, 3>(
656         vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs);
657   }
658 
659   NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
660     uint64_t mask =
661         vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
662                           vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
663                           vreinterpret_s8_u8(ctrl))),
664                       0);
665     return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
666   }
667 
668   NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
669     uint64_t mask =
670         vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
671                           vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
672                           vreinterpret_s8_u8(ctrl))),
673                       0);
674     return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
675   }
676 
677   uint32_t CountLeadingEmptyOrDeleted() const {
678     uint64_t mask =
679         vget_lane_u64(vreinterpret_u64_u8(vcle_s8(
680                           vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
681                           vreinterpret_s8_u8(ctrl))),
682                       0);
683     // Similar to MaskEmptyorDeleted() but we invert the logic to invert the
684     // produced bitfield. We then count number of trailing zeros.
685     // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
686     // so we should be fine.
687     return static_cast<uint32_t>(countr_zero(mask)) >> 3;
688   }
689 
690   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
691     uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
692     constexpr uint64_t msbs = 0x8080808080808080ULL;
693     constexpr uint64_t slsbs = 0x0202020202020202ULL;
694     constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
695     auto x = slsbs & (mask >> 6);
696     auto res = (x + midbs) | msbs;
697     little_endian::Store64(dst, res);
698   }
699 
700   uint8x8_t ctrl;
701 };
702 #endif  // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
703 
704 struct GroupPortableImpl {
705   static constexpr size_t kWidth = 8;
706 
707   explicit GroupPortableImpl(const ctrl_t* pos)
708       : ctrl(little_endian::Load64(pos)) {}
709 
710   BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
711     // For the technique, see:
712     // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
713     // (Determine if a word has a byte equal to n).
714     //
715     // Caveat: there are false positives but:
716     // - they only occur if there is a real match
717     // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
718     // - they will be handled gracefully by subsequent checks in code
719     //
720     // Example:
721     //   v = 0x1716151413121110
722     //   hash = 0x12
723     //   retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
724     constexpr uint64_t msbs = 0x8080808080808080ULL;
725     constexpr uint64_t lsbs = 0x0101010101010101ULL;
726     auto x = ctrl ^ (lsbs * hash);
727     return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
728   }
729 
730   NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
731     constexpr uint64_t msbs = 0x8080808080808080ULL;
732     return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) &
733                                                    msbs);
734   }
735 
736   NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
737     constexpr uint64_t msbs = 0x8080808080808080ULL;
738     return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) &
739                                                    msbs);
740   }
741 
742   uint32_t CountLeadingEmptyOrDeleted() const {
743     // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
744     // kDeleted. We lower all other bits and count number of trailing zeros.
745     constexpr uint64_t bits = 0x0101010101010101ULL;
746     return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >>
747                                  3);
748   }
749 
750   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
751     constexpr uint64_t msbs = 0x8080808080808080ULL;
752     constexpr uint64_t lsbs = 0x0101010101010101ULL;
753     auto x = ctrl & msbs;
754     auto res = (~x + (x >> 7)) & ~lsbs;
755     little_endian::Store64(dst, res);
756   }
757 
758   uint64_t ctrl;
759 };
760 
761 #ifdef ABSL_INTERNAL_HAVE_SSE2
762 using Group = GroupSse2Impl;
763 #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
764 using Group = GroupAArch64Impl;
765 #else
766 using Group = GroupPortableImpl;
767 #endif
768 
769 // When there is an insertion with no reserved growth, we rehash with
770 // probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
771 // constant divided by capacity ensures that inserting N elements is still O(N)
772 // in the average case. Using the constant 16 means that we expect to rehash ~8
773 // times more often than when generations are disabled. We are adding expected
774 // rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 -
775 // 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth.
776 inline size_t RehashProbabilityConstant() { return 16; }
777 
778 class CommonFieldsGenerationInfoEnabled {
779   // A sentinel value for reserved_growth_ indicating that we just ran out of
780   // reserved growth on the last insertion. When reserve is called and then
781   // insertions take place, reserved_growth_'s state machine is N, ..., 1,
782   // kReservedGrowthJustRanOut, 0.
783   static constexpr size_t kReservedGrowthJustRanOut =
784       (std::numeric_limits<size_t>::max)();
785 
786  public:
787   CommonFieldsGenerationInfoEnabled() = default;
788   CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that)
789       : reserved_growth_(that.reserved_growth_),
790         reservation_size_(that.reservation_size_),
791         generation_(that.generation_) {
792     that.reserved_growth_ = 0;
793     that.reservation_size_ = 0;
794     that.generation_ = EmptyGeneration();
795   }
796   CommonFieldsGenerationInfoEnabled& operator=(
797       CommonFieldsGenerationInfoEnabled&&) = default;
798 
799   // Whether we should rehash on insert in order to detect bugs of using invalid
800   // references. We rehash on the first insertion after reserved_growth_ reaches
801   // 0 after a call to reserve. We also do a rehash with low probability
802   // whenever reserved_growth_ is zero.
803   bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
804                                                  size_t capacity) const;
805   void maybe_increment_generation_on_insert() {
806     if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
807 
808     if (reserved_growth_ > 0) {
809       if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
810     } else {
811       *generation_ = NextGeneration(*generation_);
812     }
813   }
814   void reset_reserved_growth(size_t reservation, size_t size) {
815     reserved_growth_ = reservation - size;
816   }
817   size_t reserved_growth() const { return reserved_growth_; }
818   void set_reserved_growth(size_t r) { reserved_growth_ = r; }
819   size_t reservation_size() const { return reservation_size_; }
820   void set_reservation_size(size_t r) { reservation_size_ = r; }
821   GenerationType generation() const { return *generation_; }
822   void set_generation(GenerationType g) { *generation_ = g; }
823   GenerationType* generation_ptr() const { return generation_; }
824   void set_generation_ptr(GenerationType* g) { generation_ = g; }
825 
826  private:
827   // The number of insertions remaining that are guaranteed to not rehash due to
828   // a prior call to reserve. Note: we store reserved growth in addition to
829   // reservation size because calls to erase() decrease size_ but don't decrease
830   // reserved growth.
831   size_t reserved_growth_ = 0;
832   // The maximum argument to reserve() since the container was cleared. We need
833   // to keep track of this, in addition to reserved growth, because we reset
834   // reserved growth to this when erase(begin(), end()) is called.
835   size_t reservation_size_ = 0;
836   // Pointer to the generation counter, which is used to validate iterators and
837   // is stored in the backing array between the control bytes and the slots.
838   // Note that we can't store the generation inside the container itself and
839   // keep a pointer to the container in the iterators because iterators must
840   // remain valid when the container is moved.
841   // Note: we could derive this pointer from the control pointer, but it makes
842   // the code more complicated, and there's a benefit in having the sizes of
843   // raw_hash_set in sanitizer mode and non-sanitizer mode a bit more different,
844   // which is that tests are less likely to rely on the size remaining the same.
845   GenerationType* generation_ = EmptyGeneration();
846 };
847 
848 class CommonFieldsGenerationInfoDisabled {
849  public:
850   CommonFieldsGenerationInfoDisabled() = default;
851   CommonFieldsGenerationInfoDisabled(CommonFieldsGenerationInfoDisabled&&) =
852       default;
853   CommonFieldsGenerationInfoDisabled& operator=(
854       CommonFieldsGenerationInfoDisabled&&) = default;
855 
856   bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const {
857     return false;
858   }
859   void maybe_increment_generation_on_insert() {}
860   void reset_reserved_growth(size_t, size_t) {}
861   size_t reserved_growth() const { return 0; }
862   void set_reserved_growth(size_t) {}
863   size_t reservation_size() const { return 0; }
864   void set_reservation_size(size_t) {}
865   GenerationType generation() const { return 0; }
866   void set_generation(GenerationType) {}
867   GenerationType* generation_ptr() const { return nullptr; }
868   void set_generation_ptr(GenerationType*) {}
869 };
870 
871 class HashSetIteratorGenerationInfoEnabled {
872  public:
873   HashSetIteratorGenerationInfoEnabled() = default;
874   explicit HashSetIteratorGenerationInfoEnabled(
875       const GenerationType* generation_ptr)
876       : generation_ptr_(generation_ptr), generation_(*generation_ptr) {}
877 
878   GenerationType generation() const { return generation_; }
879   void reset_generation() { generation_ = *generation_ptr_; }
880   const GenerationType* generation_ptr() const { return generation_ptr_; }
881   void set_generation_ptr(const GenerationType* ptr) { generation_ptr_ = ptr; }
882 
883  private:
884   const GenerationType* generation_ptr_ = EmptyGeneration();
885   GenerationType generation_ = *generation_ptr_;
886 };
887 
888 class HashSetIteratorGenerationInfoDisabled {
889  public:
890   HashSetIteratorGenerationInfoDisabled() = default;
891   explicit HashSetIteratorGenerationInfoDisabled(const GenerationType*) {}
892 
893   GenerationType generation() const { return 0; }
894   void reset_generation() {}
895   const GenerationType* generation_ptr() const { return nullptr; }
896   void set_generation_ptr(const GenerationType*) {}
897 };
898 
899 #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
900 using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoEnabled;
901 using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoEnabled;
902 #else
903 using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled;
904 using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
905 #endif
906 
907 // Returns whether `n` is a valid capacity (i.e., number of slots).
908 //
909 // A valid capacity is a non-zero integer `2^m - 1`.
910 inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
911 
912 // Computes the offset from the start of the backing allocation of the control
913 // bytes. growth_left is stored at the beginning of the backing array.
914 inline size_t ControlOffset() { return sizeof(size_t); }
915 
916 // Returns the number of "cloned control bytes".
917 //
918 // This is the number of control bytes that are present both at the beginning
919 // of the control byte array and at the end, such that we can create a
920 // `Group::kWidth`-width probe window starting from any control byte.
921 constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
922 
923 // Given the capacity of a table, computes the offset (from the start of the
924 // backing allocation) of the generation counter (if it exists).
925 inline size_t GenerationOffset(size_t capacity) {
926   assert(IsValidCapacity(capacity));
927   const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
928   return ControlOffset() + num_control_bytes;
929 }
930 
931 // Given the capacity of a table, computes the offset (from the start of the
932 // backing allocation) at which the slots begin.
933 inline size_t SlotOffset(size_t capacity, size_t slot_align) {
934   assert(IsValidCapacity(capacity));
935   return (GenerationOffset(capacity) + NumGenerationBytes() + slot_align - 1) &
936          (~slot_align + 1);
937 }
938 
939 // Given the capacity of a table, computes the total size of the backing
940 // array.
941 inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
942   return SlotOffset(capacity, slot_align) + capacity * slot_size;
943 }
944 
945 // CommonFields hold the fields in raw_hash_set that do not depend
946 // on template parameters. This allows us to conveniently pass all
947 // of this state to helper functions as a single argument.
948 class CommonFields : public CommonFieldsGenerationInfo {
949  public:
950   CommonFields() = default;
951 
952   // Not copyable
953   CommonFields(const CommonFields&) = delete;
954   CommonFields& operator=(const CommonFields&) = delete;
955 
956   // Movable
957   CommonFields(CommonFields&& that)
958       : CommonFieldsGenerationInfo(
959             std::move(static_cast<CommonFieldsGenerationInfo&&>(that))),
960         // Explicitly copying fields into "this" and then resetting "that"
961         // fields generates less code then calling absl::exchange per field.
962         control_(that.control()),
963         slots_(that.slot_array()),
964         capacity_(that.capacity()),
965         compressed_tuple_(that.size(), std::move(that.infoz())) {
966     that.set_control(EmptyGroup());
967     that.set_slots(nullptr);
968     that.set_capacity(0);
969     that.set_size(0);
970   }
971   CommonFields& operator=(CommonFields&&) = default;
972 
973   ctrl_t* control() const { return control_; }
974   void set_control(ctrl_t* c) { control_ = c; }
975   void* backing_array_start() const {
976     // growth_left is stored before control bytes.
977     assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
978     return control() - sizeof(size_t);
979   }
980 
981   // Note: we can't use slots() because Qt defines "slots" as a macro.
982   void* slot_array() const { return slots_; }
983   void set_slots(void* s) { slots_ = s; }
984 
985   // The number of filled slots.
986   size_t size() const { return compressed_tuple_.template get<0>(); }
987   void set_size(size_t s) { compressed_tuple_.template get<0>() = s; }
988 
989   // The total number of available slots.
990   size_t capacity() const { return capacity_; }
991   void set_capacity(size_t c) {
992     assert(c == 0 || IsValidCapacity(c));
993     capacity_ = c;
994   }
995 
996   // The number of slots we can still fill without needing to rehash.
997   // This is stored in the heap allocation before the control bytes.
998   size_t growth_left() const {
999     return *reinterpret_cast<size_t*>(backing_array_start());
1000   }
1001   void set_growth_left(size_t gl) {
1002     *reinterpret_cast<size_t*>(backing_array_start()) = gl;
1003   }
1004 
1005   HashtablezInfoHandle& infoz() { return compressed_tuple_.template get<1>(); }
1006   const HashtablezInfoHandle& infoz() const {
1007     return compressed_tuple_.template get<1>();
1008   }
1009 
1010   bool should_rehash_for_bug_detection_on_insert() const {
1011     return CommonFieldsGenerationInfo::
1012         should_rehash_for_bug_detection_on_insert(control(), capacity());
1013   }
1014   void reset_reserved_growth(size_t reservation) {
1015     CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
1016   }
1017 
1018   // The size of the backing array allocation.
1019   size_t alloc_size(size_t slot_size, size_t slot_align) const {
1020     return AllocSize(capacity(), slot_size, slot_align);
1021   }
1022 
1023   // Returns the number of control bytes set to kDeleted. For testing only.
1024   size_t TombstonesCount() const {
1025     return static_cast<size_t>(
1026         std::count(control(), control() + capacity(), ctrl_t::kDeleted));
1027   }
1028 
1029  private:
1030   // TODO(b/259599413): Investigate removing some of these fields:
1031   // - control/slots can be derived from each other
1032   // - we can use 6 bits for capacity since it's always a power of two minus 1
1033 
1034   // The control bytes (and, also, a pointer near to the base of the backing
1035   // array).
1036   //
1037   // This contains `capacity + 1 + NumClonedBytes()` entries, even
1038   // when the table is empty (hence EmptyGroup).
1039   //
1040   // Note that growth_left is stored immediately before this pointer.
1041   ctrl_t* control_ = EmptyGroup();
1042 
1043   // The beginning of the slots, located at `SlotOffset()` bytes after
1044   // `control`. May be null for empty tables.
1045   void* slots_ = nullptr;
1046 
1047   size_t capacity_ = 0;
1048 
1049   // Bundle together size and HashtablezInfoHandle to ensure EBO for
1050   // HashtablezInfoHandle when sampling is turned off.
1051   absl::container_internal::CompressedTuple<size_t, HashtablezInfoHandle>
1052       compressed_tuple_{0u, HashtablezInfoHandle{}};
1053 };
1054 
1055 template <class Policy, class Hash, class Eq, class Alloc>
1056 class raw_hash_set;
1057 
1058 // Returns the next valid capacity after `n`.
1059 inline size_t NextCapacity(size_t n) {
1060   assert(IsValidCapacity(n) || n == 0);
1061   return n * 2 + 1;
1062 }
1063 
1064 // Applies the following mapping to every byte in the control array:
1065 //   * kDeleted -> kEmpty
1066 //   * kEmpty -> kEmpty
1067 //   * _ -> kDeleted
1068 // PRECONDITION:
1069 //   IsValidCapacity(capacity)
1070 //   ctrl[capacity] == ctrl_t::kSentinel
1071 //   ctrl[i] != ctrl_t::kSentinel for all i < capacity
1072 void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
1073 
1074 // Converts `n` into the next valid capacity, per `IsValidCapacity`.
1075 inline size_t NormalizeCapacity(size_t n) {
1076   return n ? ~size_t{} >> countl_zero(n) : 1;
1077 }
1078 
1079 // General notes on capacity/growth methods below:
1080 // - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
1081 //   average of two empty slots per group.
1082 // - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
1083 // - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
1084 //   never need to probe (the whole table fits in one group) so we don't need a
1085 //   load factor less than 1.
1086 
1087 // Given `capacity`, applies the load factor; i.e., it returns the maximum
1088 // number of values we should put into the table before a resizing rehash.
1089 inline size_t CapacityToGrowth(size_t capacity) {
1090   assert(IsValidCapacity(capacity));
1091   // `capacity*7/8`
1092   if (Group::kWidth == 8 && capacity == 7) {
1093     // x-x/8 does not work when x==7.
1094     return 6;
1095   }
1096   return capacity - capacity / 8;
1097 }
1098 
1099 // Given `growth`, "unapplies" the load factor to find how large the capacity
1100 // should be to stay within the load factor.
1101 //
1102 // This might not be a valid capacity and `NormalizeCapacity()` should be
1103 // called on this.
1104 inline size_t GrowthToLowerboundCapacity(size_t growth) {
1105   // `growth*8/7`
1106   if (Group::kWidth == 8 && growth == 7) {
1107     // x+(x-1)/7 does not work when x==7.
1108     return 8;
1109   }
1110   return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
1111 }
1112 
1113 template <class InputIter>
1114 size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
1115                                      size_t bucket_count) {
1116   if (bucket_count != 0) {
1117     return bucket_count;
1118   }
1119   using InputIterCategory =
1120       typename std::iterator_traits<InputIter>::iterator_category;
1121   if (std::is_base_of<std::random_access_iterator_tag,
1122                       InputIterCategory>::value) {
1123     return GrowthToLowerboundCapacity(
1124         static_cast<size_t>(std::distance(first, last)));
1125   }
1126   return 0;
1127 }
1128 
1129 constexpr bool SwisstableDebugEnabled() {
1130 #if defined(ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \
1131     ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG)
1132   return true;
1133 #else
1134   return false;
1135 #endif
1136 }
1137 
1138 inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
1139                          const GenerationType* generation_ptr,
1140                          const char* operation) {
1141   if (!SwisstableDebugEnabled()) return;
1142   if (ctrl == nullptr) {
1143     ABSL_INTERNAL_LOG(FATAL,
1144                       std::string(operation) + " called on end() iterator.");
1145   }
1146   if (ctrl == EmptyGroup()) {
1147     ABSL_INTERNAL_LOG(FATAL, std::string(operation) +
1148                                  " called on default-constructed iterator.");
1149   }
1150   if (SwisstableGenerationsEnabled()) {
1151     if (generation != *generation_ptr) {
1152       ABSL_INTERNAL_LOG(FATAL,
1153                         std::string(operation) +
1154                             " called on invalid iterator. The table could have "
1155                             "rehashed since this iterator was initialized.");
1156     }
1157     if (!IsFull(*ctrl)) {
1158       ABSL_INTERNAL_LOG(
1159           FATAL,
1160           std::string(operation) +
1161               " called on invalid iterator. The element was likely erased.");
1162     }
1163   } else {
1164     if (!IsFull(*ctrl)) {
1165       ABSL_INTERNAL_LOG(
1166           FATAL,
1167           std::string(operation) +
1168               " called on invalid iterator. The element might have been erased "
1169               "or the table might have rehashed. Consider running with "
1170               "--config=asan to diagnose rehashing issues.");
1171     }
1172   }
1173 }
1174 
1175 // Note that for comparisons, null/end iterators are valid.
1176 inline void AssertIsValidForComparison(const ctrl_t* ctrl,
1177                                        GenerationType generation,
1178                                        const GenerationType* generation_ptr) {
1179   if (!SwisstableDebugEnabled()) return;
1180   const bool ctrl_is_valid_for_comparison =
1181       ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl);
1182   if (SwisstableGenerationsEnabled()) {
1183     if (generation != *generation_ptr) {
1184       ABSL_INTERNAL_LOG(FATAL,
1185                         "Invalid iterator comparison. The table could have "
1186                         "rehashed since this iterator was initialized.");
1187     }
1188     if (!ctrl_is_valid_for_comparison) {
1189       ABSL_INTERNAL_LOG(
1190           FATAL, "Invalid iterator comparison. The element was likely erased.");
1191     }
1192   } else {
1193     ABSL_HARDENING_ASSERT(
1194         ctrl_is_valid_for_comparison &&
1195         "Invalid iterator comparison. The element might have been erased or "
1196         "the table might have rehashed. Consider running with --config=asan to "
1197         "diagnose rehashing issues.");
1198   }
1199 }
1200 
1201 // If the two iterators come from the same container, then their pointers will
1202 // interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa.
1203 // Note: we take slots by reference so that it's not UB if they're uninitialized
1204 // as long as we don't read them (when ctrl is null).
1205 inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
1206                                       const ctrl_t* ctrl_b,
1207                                       const void* const& slot_a,
1208                                       const void* const& slot_b) {
1209   // If either control byte is null, then we can't tell.
1210   if (ctrl_a == nullptr || ctrl_b == nullptr) return true;
1211   const void* low_slot = slot_a;
1212   const void* hi_slot = slot_b;
1213   if (ctrl_a > ctrl_b) {
1214     std::swap(ctrl_a, ctrl_b);
1215     std::swap(low_slot, hi_slot);
1216   }
1217   return ctrl_b < low_slot && low_slot <= hi_slot;
1218 }
1219 
1220 // Asserts that two iterators come from the same container.
1221 // Note: we take slots by reference so that it's not UB if they're uninitialized
1222 // as long as we don't read them (when ctrl is null).
1223 inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
1224                                 const void* const& slot_a,
1225                                 const void* const& slot_b,
1226                                 const GenerationType* generation_ptr_a,
1227                                 const GenerationType* generation_ptr_b) {
1228   if (!SwisstableDebugEnabled()) return;
1229   const bool a_is_default = ctrl_a == EmptyGroup();
1230   const bool b_is_default = ctrl_b == EmptyGroup();
1231   if (a_is_default != b_is_default) {
1232     ABSL_INTERNAL_LOG(
1233         FATAL,
1234         "Invalid iterator comparison. Comparing default-constructed iterator "
1235         "with non-default-constructed iterator.");
1236   }
1237   if (a_is_default && b_is_default) return;
1238 
1239   if (SwisstableGenerationsEnabled()) {
1240     if (generation_ptr_a == generation_ptr_b) return;
1241     const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
1242     const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
1243     if (a_is_empty != b_is_empty) {
1244       ABSL_INTERNAL_LOG(FATAL,
1245                         "Invalid iterator comparison. Comparing iterator from "
1246                         "a non-empty hashtable with an iterator from an empty "
1247                         "hashtable.");
1248     }
1249     if (a_is_empty && b_is_empty) {
1250       ABSL_INTERNAL_LOG(FATAL,
1251                         "Invalid iterator comparison. Comparing iterators from "
1252                         "different empty hashtables.");
1253     }
1254     const bool a_is_end = ctrl_a == nullptr;
1255     const bool b_is_end = ctrl_b == nullptr;
1256     if (a_is_end || b_is_end) {
1257       ABSL_INTERNAL_LOG(FATAL,
1258                         "Invalid iterator comparison. Comparing iterator with "
1259                         "an end() iterator from a different hashtable.");
1260     }
1261     ABSL_INTERNAL_LOG(FATAL,
1262                       "Invalid iterator comparison. Comparing non-end() "
1263                       "iterators from different hashtables.");
1264   } else {
1265     ABSL_HARDENING_ASSERT(
1266         AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
1267         "Invalid iterator comparison. The iterators may be from different "
1268         "containers or the container might have rehashed. Consider running "
1269         "with --config=asan to diagnose rehashing issues.");
1270   }
1271 }
1272 
1273 struct FindInfo {
1274   size_t offset;
1275   size_t probe_length;
1276 };
1277 
1278 // Whether a table is "small". A small table fits entirely into a probing
1279 // group, i.e., has a capacity < `Group::kWidth`.
1280 //
1281 // In small mode we are able to use the whole capacity. The extra control
1282 // bytes give us at least one "empty" control byte to stop the iteration.
1283 // This is important to make 1 a valid capacity.
1284 //
1285 // In small mode only the first `capacity` control bytes after the sentinel
1286 // are valid. The rest contain dummy ctrl_t::kEmpty values that do not
1287 // represent a real slot. This is important to take into account on
1288 // `find_first_non_full()`, where we never try
1289 // `ShouldInsertBackwards()` for small tables.
1290 inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
1291 
1292 // Begins a probing operation on `common.control`, using `hash`.
1293 inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
1294                                       size_t hash) {
1295   return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
1296 }
1297 inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
1298   return probe(common.control(), common.capacity(), hash);
1299 }
1300 
1301 // Probes an array of control bits using a probe sequence derived from `hash`,
1302 // and returns the offset corresponding to the first deleted or empty slot.
1303 //
1304 // Behavior when the entire table is full is undefined.
1305 //
1306 // NOTE: this function must work with tables having both empty and deleted
1307 // slots in the same group. Such tables appear during `erase()`.
1308 template <typename = void>
1309 inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
1310   auto seq = probe(common, hash);
1311   const ctrl_t* ctrl = common.control();
1312   while (true) {
1313     Group g{ctrl + seq.offset()};
1314     auto mask = g.MaskEmptyOrDeleted();
1315     if (mask) {
1316 #if !defined(NDEBUG)
1317       // We want to add entropy even when ASLR is not enabled.
1318       // In debug build we will randomly insert in either the front or back of
1319       // the group.
1320       // TODO(kfm,sbenza): revisit after we do unconditional mixing
1321       if (!is_small(common.capacity()) && ShouldInsertBackwards(hash, ctrl)) {
1322         return {seq.offset(mask.HighestBitSet()), seq.index()};
1323       }
1324 #endif
1325       return {seq.offset(mask.LowestBitSet()), seq.index()};
1326     }
1327     seq.next();
1328     assert(seq.index() <= common.capacity() && "full table!");
1329   }
1330 }
1331 
1332 // Extern template for inline function keep possibility of inlining.
1333 // When compiler decided to not inline, no symbols will be added to the
1334 // corresponding translation unit.
1335 extern template FindInfo find_first_non_full(const CommonFields&, size_t);
1336 
1337 // Non-inlined version of find_first_non_full for use in less
1338 // performance critical routines.
1339 FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
1340 
1341 inline void ResetGrowthLeft(CommonFields& common) {
1342   common.set_growth_left(CapacityToGrowth(common.capacity()) - common.size());
1343 }
1344 
1345 // Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
1346 // array as marked as empty.
1347 inline void ResetCtrl(CommonFields& common, size_t slot_size) {
1348   const size_t capacity = common.capacity();
1349   ctrl_t* ctrl = common.control();
1350   std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
1351               capacity + 1 + NumClonedBytes());
1352   ctrl[capacity] = ctrl_t::kSentinel;
1353   SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
1354   ResetGrowthLeft(common);
1355 }
1356 
1357 // Sets `ctrl[i]` to `h`.
1358 //
1359 // Unlike setting it directly, this function will perform bounds checks and
1360 // mirror the value to the cloned tail if necessary.
1361 inline void SetCtrl(const CommonFields& common, size_t i, ctrl_t h,
1362                     size_t slot_size) {
1363   const size_t capacity = common.capacity();
1364   assert(i < capacity);
1365 
1366   auto* slot_i = static_cast<const char*>(common.slot_array()) + i * slot_size;
1367   if (IsFull(h)) {
1368     SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
1369   } else {
1370     SanitizerPoisonMemoryRegion(slot_i, slot_size);
1371   }
1372 
1373   ctrl_t* ctrl = common.control();
1374   ctrl[i] = h;
1375   ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
1376 }
1377 
1378 // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
1379 inline void SetCtrl(const CommonFields& common, size_t i, h2_t h,
1380                     size_t slot_size) {
1381   SetCtrl(common, i, static_cast<ctrl_t>(h), slot_size);
1382 }
1383 
1384 // growth_left (which is a size_t) is stored with the backing array.
1385 constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
1386   return (std::max)(align_of_slot, alignof(size_t));
1387 }
1388 
1389 template <typename Alloc, size_t SizeOfSlot, size_t AlignOfSlot>
1390 ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) {
1391   assert(c.capacity());
1392   // Folks with custom allocators often make unwarranted assumptions about the
1393   // behavior of their classes vis-a-vis trivial destructability and what
1394   // calls they will or won't make.  Avoid sampling for people with custom
1395   // allocators to get us out of this mess.  This is not a hard guarantee but
1396   // a workaround while we plan the exact guarantee we want to provide.
1397   const size_t sample_size =
1398       (std::is_same<Alloc, std::allocator<char>>::value &&
1399        c.slot_array() == nullptr)
1400           ? SizeOfSlot
1401           : 0;
1402 
1403   const size_t cap = c.capacity();
1404   const size_t alloc_size = AllocSize(cap, SizeOfSlot, AlignOfSlot);
1405   // growth_left (which is a size_t) is stored with the backing array.
1406   char* mem = static_cast<char*>(
1407       Allocate<BackingArrayAlignment(AlignOfSlot)>(&alloc, alloc_size));
1408   const GenerationType old_generation = c.generation();
1409   c.set_generation_ptr(
1410       reinterpret_cast<GenerationType*>(mem + GenerationOffset(cap)));
1411   c.set_generation(NextGeneration(old_generation));
1412   c.set_control(reinterpret_cast<ctrl_t*>(mem + ControlOffset()));
1413   c.set_slots(mem + SlotOffset(cap, AlignOfSlot));
1414   ResetCtrl(c, SizeOfSlot);
1415   if (sample_size) {
1416     c.infoz() = Sample(sample_size);
1417   }
1418   c.infoz().RecordStorageChanged(c.size(), cap);
1419 }
1420 
1421 // PolicyFunctions bundles together some information for a particular
1422 // raw_hash_set<T, ...> instantiation. This information is passed to
1423 // type-erased functions that want to do small amounts of type-specific
1424 // work.
1425 struct PolicyFunctions {
1426   size_t slot_size;
1427 
1428   // Returns the hash of the pointed-to slot.
1429   size_t (*hash_slot)(void* set, void* slot);
1430 
1431   // Transfer the contents of src_slot to dst_slot.
1432   void (*transfer)(void* set, void* dst_slot, void* src_slot);
1433 
1434   // Deallocate the backing store from common.
1435   void (*dealloc)(CommonFields& common, const PolicyFunctions& policy);
1436 };
1437 
1438 // ClearBackingArray clears the backing array, either modifying it in place,
1439 // or creating a new one based on the value of "reuse".
1440 // REQUIRES: c.capacity > 0
1441 void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
1442                        bool reuse);
1443 
1444 // Type-erased version of raw_hash_set::erase_meta_only.
1445 void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size);
1446 
1447 // Function to place in PolicyFunctions::dealloc for raw_hash_sets
1448 // that are using std::allocator. This allows us to share the same
1449 // function body for raw_hash_set instantiations that have the
1450 // same slot alignment.
1451 template <size_t AlignOfSlot>
1452 ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
1453                                                 const PolicyFunctions& policy) {
1454   // Unpoison before returning the memory to the allocator.
1455   SanitizerUnpoisonMemoryRegion(common.slot_array(),
1456                                 policy.slot_size * common.capacity());
1457 
1458   std::allocator<char> alloc;
1459   Deallocate<BackingArrayAlignment(AlignOfSlot)>(
1460       &alloc, common.backing_array_start(),
1461       common.alloc_size(policy.slot_size, AlignOfSlot));
1462 }
1463 
1464 // For trivially relocatable types we use memcpy directly. This allows us to
1465 // share the same function body for raw_hash_set instantiations that have the
1466 // same slot size as long as they are relocatable.
1467 template <size_t SizeOfSlot>
1468 ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) {
1469   memcpy(dst, src, SizeOfSlot);
1470 }
1471 
1472 // Type-erased version of raw_hash_set::drop_deletes_without_resize.
1473 void DropDeletesWithoutResize(CommonFields& common,
1474                               const PolicyFunctions& policy, void* tmp_space);
1475 
1476 // A SwissTable.
1477 //
1478 // Policy: a policy defines how to perform different operations on
1479 // the slots of the hashtable (see hash_policy_traits.h for the full interface
1480 // of policy).
1481 //
1482 // Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
1483 // functor should accept a key and return size_t as hash. For best performance
1484 // it is important that the hash function provides high entropy across all bits
1485 // of the hash.
1486 //
1487 // Eq: a (possibly polymorphic) functor that compares two keys for equality. It
1488 // should accept two (of possibly different type) keys and return a bool: true
1489 // if they are equal, false if they are not. If two keys compare equal, then
1490 // their hash values as defined by Hash MUST be equal.
1491 //
1492 // Allocator: an Allocator
1493 // [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
1494 // the storage of the hashtable will be allocated and the elements will be
1495 // constructed and destroyed.
1496 template <class Policy, class Hash, class Eq, class Alloc>
1497 class raw_hash_set {
1498   using PolicyTraits = hash_policy_traits<Policy>;
1499   using KeyArgImpl =
1500       KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
1501 
1502  public:
1503   using init_type = typename PolicyTraits::init_type;
1504   using key_type = typename PolicyTraits::key_type;
1505   // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
1506   // code fixes!
1507   using slot_type = typename PolicyTraits::slot_type;
1508   using allocator_type = Alloc;
1509   using size_type = size_t;
1510   using difference_type = ptrdiff_t;
1511   using hasher = Hash;
1512   using key_equal = Eq;
1513   using policy_type = Policy;
1514   using value_type = typename PolicyTraits::value_type;
1515   using reference = value_type&;
1516   using const_reference = const value_type&;
1517   using pointer = typename absl::allocator_traits<
1518       allocator_type>::template rebind_traits<value_type>::pointer;
1519   using const_pointer = typename absl::allocator_traits<
1520       allocator_type>::template rebind_traits<value_type>::const_pointer;
1521 
1522   // Alias used for heterogeneous lookup functions.
1523   // `key_arg<K>` evaluates to `K` when the functors are transparent and to
1524   // `key_type` otherwise. It permits template argument deduction on `K` for the
1525   // transparent case.
1526   template <class K>
1527   using key_arg = typename KeyArgImpl::template type<K, key_type>;
1528 
1529  private:
1530   // Give an early error when key_type is not hashable/eq.
1531   auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
1532   auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
1533 
1534   using AllocTraits = absl::allocator_traits<allocator_type>;
1535   using SlotAlloc = typename absl::allocator_traits<
1536       allocator_type>::template rebind_alloc<slot_type>;
1537   using SlotAllocTraits = typename absl::allocator_traits<
1538       allocator_type>::template rebind_traits<slot_type>;
1539 
1540   static_assert(std::is_lvalue_reference<reference>::value,
1541                 "Policy::element() must return a reference");
1542 
1543   template <typename T>
1544   struct SameAsElementReference
1545       : std::is_same<typename std::remove_cv<
1546                          typename std::remove_reference<reference>::type>::type,
1547                      typename std::remove_cv<
1548                          typename std::remove_reference<T>::type>::type> {};
1549 
1550   // An enabler for insert(T&&): T must be convertible to init_type or be the
1551   // same as [cv] value_type [ref].
1552   // Note: we separate SameAsElementReference into its own type to avoid using
1553   // reference unless we need to. MSVC doesn't seem to like it in some
1554   // cases.
1555   template <class T>
1556   using RequiresInsertable = typename std::enable_if<
1557       absl::disjunction<std::is_convertible<T, init_type>,
1558                         SameAsElementReference<T>>::value,
1559       int>::type;
1560 
1561   // RequiresNotInit is a workaround for gcc prior to 7.1.
1562   // See https://godbolt.org/g/Y4xsUh.
1563   template <class T>
1564   using RequiresNotInit =
1565       typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
1566 
1567   template <class... Ts>
1568   using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
1569 
1570  public:
1571   static_assert(std::is_same<pointer, value_type*>::value,
1572                 "Allocators with custom pointer types are not supported");
1573   static_assert(std::is_same<const_pointer, const value_type*>::value,
1574                 "Allocators with custom pointer types are not supported");
1575 
1576   class iterator : private HashSetIteratorGenerationInfo {
1577     friend class raw_hash_set;
1578 
1579    public:
1580     using iterator_category = std::forward_iterator_tag;
1581     using value_type = typename raw_hash_set::value_type;
1582     using reference =
1583         absl::conditional_t<PolicyTraits::constant_iterators::value,
1584                             const value_type&, value_type&>;
1585     using pointer = absl::remove_reference_t<reference>*;
1586     using difference_type = typename raw_hash_set::difference_type;
1587 
1588     iterator() {}
1589 
1590     // PRECONDITION: not an end() iterator.
1591     reference operator*() const {
1592       AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()");
1593       return PolicyTraits::element(slot_);
1594     }
1595 
1596     // PRECONDITION: not an end() iterator.
1597     pointer operator->() const {
1598       AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->");
1599       return &operator*();
1600     }
1601 
1602     // PRECONDITION: not an end() iterator.
1603     iterator& operator++() {
1604       AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++");
1605       ++ctrl_;
1606       ++slot_;
1607       skip_empty_or_deleted();
1608       return *this;
1609     }
1610     // PRECONDITION: not an end() iterator.
1611     iterator operator++(int) {
1612       auto tmp = *this;
1613       ++*this;
1614       return tmp;
1615     }
1616 
1617     friend bool operator==(const iterator& a, const iterator& b) {
1618       AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr());
1619       AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr());
1620       AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_,
1621                           a.generation_ptr(), b.generation_ptr());
1622       return a.ctrl_ == b.ctrl_;
1623     }
1624     friend bool operator!=(const iterator& a, const iterator& b) {
1625       return !(a == b);
1626     }
1627 
1628    private:
1629     iterator(ctrl_t* ctrl, slot_type* slot,
1630              const GenerationType* generation_ptr)
1631         : HashSetIteratorGenerationInfo(generation_ptr),
1632           ctrl_(ctrl),
1633           slot_(slot) {
1634       // This assumption helps the compiler know that any non-end iterator is
1635       // not equal to any end iterator.
1636       ABSL_ASSUME(ctrl != nullptr);
1637     }
1638     // For end() iterators.
1639     explicit iterator(const GenerationType* generation_ptr)
1640         : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {}
1641 
1642     // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
1643     // they reach one.
1644     //
1645     // If a sentinel is reached, we null `ctrl_` out instead.
1646     void skip_empty_or_deleted() {
1647       while (IsEmptyOrDeleted(*ctrl_)) {
1648         uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
1649         ctrl_ += shift;
1650         slot_ += shift;
1651       }
1652       if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
1653     }
1654 
1655     // We use EmptyGroup() for default-constructed iterators so that they can
1656     // be distinguished from end iterators, which have nullptr ctrl_.
1657     ctrl_t* ctrl_ = EmptyGroup();
1658     // To avoid uninitialized member warnings, put slot_ in an anonymous union.
1659     // The member is not initialized on singleton and end iterators.
1660     union {
1661       slot_type* slot_;
1662     };
1663   };
1664 
1665   class const_iterator {
1666     friend class raw_hash_set;
1667 
1668    public:
1669     using iterator_category = typename iterator::iterator_category;
1670     using value_type = typename raw_hash_set::value_type;
1671     using reference = typename raw_hash_set::const_reference;
1672     using pointer = typename raw_hash_set::const_pointer;
1673     using difference_type = typename raw_hash_set::difference_type;
1674 
1675     const_iterator() = default;
1676     // Implicit construction from iterator.
1677     const_iterator(iterator i) : inner_(std::move(i)) {}  // NOLINT
1678 
1679     reference operator*() const { return *inner_; }
1680     pointer operator->() const { return inner_.operator->(); }
1681 
1682     const_iterator& operator++() {
1683       ++inner_;
1684       return *this;
1685     }
1686     const_iterator operator++(int) { return inner_++; }
1687 
1688     friend bool operator==(const const_iterator& a, const const_iterator& b) {
1689       return a.inner_ == b.inner_;
1690     }
1691     friend bool operator!=(const const_iterator& a, const const_iterator& b) {
1692       return !(a == b);
1693     }
1694 
1695    private:
1696     const_iterator(const ctrl_t* ctrl, const slot_type* slot,
1697                    const GenerationType* gen)
1698         : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot), gen) {
1699     }
1700 
1701     iterator inner_;
1702   };
1703 
1704   using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
1705   using insert_return_type = InsertReturnType<iterator, node_type>;
1706 
1707   // Note: can't use `= default` due to non-default noexcept (causes
1708   // problems for some compilers). NOLINTNEXTLINE
1709   raw_hash_set() noexcept(
1710       std::is_nothrow_default_constructible<hasher>::value &&
1711       std::is_nothrow_default_constructible<key_equal>::value &&
1712       std::is_nothrow_default_constructible<allocator_type>::value) {}
1713 
1714   ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set(
1715       size_t bucket_count, const hasher& hash = hasher(),
1716       const key_equal& eq = key_equal(),
1717       const allocator_type& alloc = allocator_type())
1718       : settings_(CommonFields{}, hash, eq, alloc) {
1719     if (bucket_count) {
1720       common().set_capacity(NormalizeCapacity(bucket_count));
1721       initialize_slots();
1722     }
1723   }
1724 
1725   raw_hash_set(size_t bucket_count, const hasher& hash,
1726                const allocator_type& alloc)
1727       : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
1728 
1729   raw_hash_set(size_t bucket_count, const allocator_type& alloc)
1730       : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
1731 
1732   explicit raw_hash_set(const allocator_type& alloc)
1733       : raw_hash_set(0, hasher(), key_equal(), alloc) {}
1734 
1735   template <class InputIter>
1736   raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
1737                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
1738                const allocator_type& alloc = allocator_type())
1739       : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
1740                      hash, eq, alloc) {
1741     insert(first, last);
1742   }
1743 
1744   template <class InputIter>
1745   raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
1746                const hasher& hash, const allocator_type& alloc)
1747       : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
1748 
1749   template <class InputIter>
1750   raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
1751                const allocator_type& alloc)
1752       : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
1753 
1754   template <class InputIter>
1755   raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
1756       : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
1757 
1758   // Instead of accepting std::initializer_list<value_type> as the first
1759   // argument like std::unordered_set<value_type> does, we have two overloads
1760   // that accept std::initializer_list<T> and std::initializer_list<init_type>.
1761   // This is advantageous for performance.
1762   //
1763   //   // Turns {"abc", "def"} into std::initializer_list<std::string>, then
1764   //   // copies the strings into the set.
1765   //   std::unordered_set<std::string> s = {"abc", "def"};
1766   //
1767   //   // Turns {"abc", "def"} into std::initializer_list<const char*>, then
1768   //   // copies the strings into the set.
1769   //   absl::flat_hash_set<std::string> s = {"abc", "def"};
1770   //
1771   // The same trick is used in insert().
1772   //
1773   // The enabler is necessary to prevent this constructor from triggering where
1774   // the copy constructor is meant to be called.
1775   //
1776   //   absl::flat_hash_set<int> a, b{a};
1777   //
1778   // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
1779   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
1780   raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
1781                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
1782                const allocator_type& alloc = allocator_type())
1783       : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
1784 
1785   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
1786                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
1787                const allocator_type& alloc = allocator_type())
1788       : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
1789 
1790   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
1791   raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
1792                const hasher& hash, const allocator_type& alloc)
1793       : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
1794 
1795   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
1796                const hasher& hash, const allocator_type& alloc)
1797       : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
1798 
1799   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
1800   raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
1801                const allocator_type& alloc)
1802       : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
1803 
1804   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
1805                const allocator_type& alloc)
1806       : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
1807 
1808   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
1809   raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
1810       : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
1811 
1812   raw_hash_set(std::initializer_list<init_type> init,
1813                const allocator_type& alloc)
1814       : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
1815 
1816   raw_hash_set(const raw_hash_set& that)
1817       : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
1818                                that.alloc_ref())) {}
1819 
1820   raw_hash_set(const raw_hash_set& that, const allocator_type& a)
1821       : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
1822     const size_t size = that.size();
1823     if (size == 0) return;
1824     reserve(size);
1825     // Because the table is guaranteed to be empty, we can do something faster
1826     // than a full `insert`.
1827     for (const auto& v : that) {
1828       const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
1829       auto target = find_first_non_full_outofline(common(), hash);
1830       SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
1831       emplace_at(target.offset, v);
1832       common().maybe_increment_generation_on_insert();
1833       infoz().RecordInsert(hash, target.probe_length);
1834     }
1835     common().set_size(size);
1836     set_growth_left(growth_left() - size);
1837   }
1838 
1839   ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
1840       std::is_nothrow_copy_constructible<hasher>::value &&
1841       std::is_nothrow_copy_constructible<key_equal>::value &&
1842       std::is_nothrow_copy_constructible<allocator_type>::value)
1843       :  // Hash, equality and allocator are copied instead of moved because
1844          // `that` must be left valid. If Hash is std::function<Key>, moving it
1845          // would create a nullptr functor that cannot be called.
1846         settings_(absl::exchange(that.common(), CommonFields{}),
1847                   that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
1848 
1849   raw_hash_set(raw_hash_set&& that, const allocator_type& a)
1850       : settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) {
1851     if (a == that.alloc_ref()) {
1852       std::swap(common(), that.common());
1853     } else {
1854       reserve(that.size());
1855       // Note: this will copy elements of dense_set and unordered_set instead of
1856       // moving them. This can be fixed if it ever becomes an issue.
1857       for (auto& elem : that) insert(std::move(elem));
1858     }
1859   }
1860 
1861   raw_hash_set& operator=(const raw_hash_set& that) {
1862     raw_hash_set tmp(that,
1863                      AllocTraits::propagate_on_container_copy_assignment::value
1864                          ? that.alloc_ref()
1865                          : alloc_ref());
1866     swap(tmp);
1867     return *this;
1868   }
1869 
1870   raw_hash_set& operator=(raw_hash_set&& that) noexcept(
1871       absl::allocator_traits<allocator_type>::is_always_equal::value &&
1872       std::is_nothrow_move_assignable<hasher>::value &&
1873       std::is_nothrow_move_assignable<key_equal>::value) {
1874     // TODO(sbenza): We should only use the operations from the noexcept clause
1875     // to make sure we actually adhere to that contract.
1876     // NOLINTNEXTLINE: not returning *this for performance.
1877     return move_assign(
1878         std::move(that),
1879         typename AllocTraits::propagate_on_container_move_assignment());
1880   }
1881 
1882   ~raw_hash_set() {
1883     const size_t cap = capacity();
1884     if (!cap) return;
1885     destroy_slots();
1886 
1887     // Unpoison before returning the memory to the allocator.
1888     SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * cap);
1889     Deallocate<BackingArrayAlignment(alignof(slot_type))>(
1890         &alloc_ref(), common().backing_array_start(),
1891         AllocSize(cap, sizeof(slot_type), alignof(slot_type)));
1892 
1893     infoz().Unregister();
1894   }
1895 
1896   iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
1897     auto it = iterator_at(0);
1898     it.skip_empty_or_deleted();
1899     return it;
1900   }
1901   iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND {
1902     return iterator(common().generation_ptr());
1903   }
1904 
1905   const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
1906     return const_cast<raw_hash_set*>(this)->begin();
1907   }
1908   const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
1909     return iterator(common().generation_ptr());
1910   }
1911   const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
1912     return begin();
1913   }
1914   const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
1915 
1916   bool empty() const { return !size(); }
1917   size_t size() const { return common().size(); }
1918   size_t capacity() const { return common().capacity(); }
1919   size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
1920 
1921   ABSL_ATTRIBUTE_REINITIALIZES void clear() {
1922     // Iterating over this container is O(bucket_count()). When bucket_count()
1923     // is much greater than size(), iteration becomes prohibitively expensive.
1924     // For clear() it is more important to reuse the allocated array when the
1925     // container is small because allocation takes comparatively long time
1926     // compared to destruction of the elements of the container. So we pick the
1927     // largest bucket_count() threshold for which iteration is still fast and
1928     // past that we simply deallocate the array.
1929     const size_t cap = capacity();
1930     if (cap == 0) {
1931       // Already guaranteed to be empty; so nothing to do.
1932     } else {
1933       destroy_slots();
1934       ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128);
1935     }
1936     common().set_reserved_growth(0);
1937     common().set_reservation_size(0);
1938   }
1939 
1940   inline void destroy_slots() {
1941     const size_t cap = capacity();
1942     const ctrl_t* ctrl = control();
1943     slot_type* slot = slot_array();
1944     for (size_t i = 0; i != cap; ++i) {
1945       if (IsFull(ctrl[i])) {
1946         PolicyTraits::destroy(&alloc_ref(), slot + i);
1947       }
1948     }
1949   }
1950 
1951   // This overload kicks in when the argument is an rvalue of insertable and
1952   // decomposable type other than init_type.
1953   //
1954   //   flat_hash_map<std::string, int> m;
1955   //   m.insert(std::make_pair("abc", 42));
1956   // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
1957   // bug.
1958   template <class T, RequiresInsertable<T> = 0, class T2 = T,
1959             typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
1960             T* = nullptr>
1961   std::pair<iterator, bool> insert(T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
1962     return emplace(std::forward<T>(value));
1963   }
1964 
1965   // This overload kicks in when the argument is a bitfield or an lvalue of
1966   // insertable and decomposable type.
1967   //
1968   //   union { int n : 1; };
1969   //   flat_hash_set<int> s;
1970   //   s.insert(n);
1971   //
1972   //   flat_hash_set<std::string> s;
1973   //   const char* p = "hello";
1974   //   s.insert(p);
1975   //
1976   template <
1977       class T, RequiresInsertable<const T&> = 0,
1978       typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
1979   std::pair<iterator, bool> insert(const T& value)
1980       ABSL_ATTRIBUTE_LIFETIME_BOUND {
1981     return emplace(value);
1982   }
1983 
1984   // This overload kicks in when the argument is an rvalue of init_type. Its
1985   // purpose is to handle brace-init-list arguments.
1986   //
1987   //   flat_hash_map<std::string, int> s;
1988   //   s.insert({"abc", 42});
1989   std::pair<iterator, bool> insert(init_type&& value)
1990       ABSL_ATTRIBUTE_LIFETIME_BOUND {
1991     return emplace(std::move(value));
1992   }
1993 
1994   // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
1995   // bug.
1996   template <class T, RequiresInsertable<T> = 0, class T2 = T,
1997             typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
1998             T* = nullptr>
1999   iterator insert(const_iterator, T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2000     return insert(std::forward<T>(value)).first;
2001   }
2002 
2003   template <
2004       class T, RequiresInsertable<const T&> = 0,
2005       typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
2006   iterator insert(const_iterator,
2007                   const T& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2008     return insert(value).first;
2009   }
2010 
2011   iterator insert(const_iterator,
2012                   init_type&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2013     return insert(std::move(value)).first;
2014   }
2015 
2016   template <class InputIt>
2017   void insert(InputIt first, InputIt last) {
2018     for (; first != last; ++first) emplace(*first);
2019   }
2020 
2021   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
2022   void insert(std::initializer_list<T> ilist) {
2023     insert(ilist.begin(), ilist.end());
2024   }
2025 
2026   void insert(std::initializer_list<init_type> ilist) {
2027     insert(ilist.begin(), ilist.end());
2028   }
2029 
2030   insert_return_type insert(node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2031     if (!node) return {end(), false, node_type()};
2032     const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
2033     auto res = PolicyTraits::apply(
2034         InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
2035         elem);
2036     if (res.second) {
2037       CommonAccess::Reset(&node);
2038       return {res.first, true, node_type()};
2039     } else {
2040       return {res.first, false, std::move(node)};
2041     }
2042   }
2043 
2044   iterator insert(const_iterator,
2045                   node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2046     auto res = insert(std::move(node));
2047     node = std::move(res.node);
2048     return res.position;
2049   }
2050 
2051   // This overload kicks in if we can deduce the key from args. This enables us
2052   // to avoid constructing value_type if an entry with the same key already
2053   // exists.
2054   //
2055   // For example:
2056   //
2057   //   flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
2058   //   // Creates no std::string copies and makes no heap allocations.
2059   //   m.emplace("abc", "xyz");
2060   template <class... Args, typename std::enable_if<
2061                                IsDecomposable<Args...>::value, int>::type = 0>
2062   std::pair<iterator, bool> emplace(Args&&... args)
2063       ABSL_ATTRIBUTE_LIFETIME_BOUND {
2064     return PolicyTraits::apply(EmplaceDecomposable{*this},
2065                                std::forward<Args>(args)...);
2066   }
2067 
2068   // This overload kicks in if we cannot deduce the key from args. It constructs
2069   // value_type unconditionally and then either moves it into the table or
2070   // destroys.
2071   template <class... Args, typename std::enable_if<
2072                                !IsDecomposable<Args...>::value, int>::type = 0>
2073   std::pair<iterator, bool> emplace(Args&&... args)
2074       ABSL_ATTRIBUTE_LIFETIME_BOUND {
2075     alignas(slot_type) unsigned char raw[sizeof(slot_type)];
2076     slot_type* slot = reinterpret_cast<slot_type*>(&raw);
2077 
2078     PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
2079     const auto& elem = PolicyTraits::element(slot);
2080     return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
2081   }
2082 
2083   template <class... Args>
2084   iterator emplace_hint(const_iterator,
2085                         Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2086     return emplace(std::forward<Args>(args)...).first;
2087   }
2088 
2089   // Extension API: support for lazy emplace.
2090   //
2091   // Looks up key in the table. If found, returns the iterator to the element.
2092   // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`,
2093   // and returns an iterator to the new element.
2094   //
2095   // `f` must abide by several restrictions:
2096   //  - it MUST call `raw_hash_set::constructor` with arguments as if a
2097   //    `raw_hash_set::value_type` is constructed,
2098   //  - it MUST NOT access the container before the call to
2099   //    `raw_hash_set::constructor`, and
2100   //  - it MUST NOT erase the lazily emplaced element.
2101   // Doing any of these is undefined behavior.
2102   //
2103   // For example:
2104   //
2105   //   std::unordered_set<ArenaString> s;
2106   //   // Makes ArenaStr even if "abc" is in the map.
2107   //   s.insert(ArenaString(&arena, "abc"));
2108   //
2109   //   flat_hash_set<ArenaStr> s;
2110   //   // Makes ArenaStr only if "abc" is not in the map.
2111   //   s.lazy_emplace("abc", [&](const constructor& ctor) {
2112   //     ctor(&arena, "abc");
2113   //   });
2114   //
2115   // WARNING: This API is currently experimental. If there is a way to implement
2116   // the same thing with the rest of the API, prefer that.
2117   class constructor {
2118     friend class raw_hash_set;
2119 
2120    public:
2121     template <class... Args>
2122     void operator()(Args&&... args) const {
2123       assert(*slot_);
2124       PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
2125       *slot_ = nullptr;
2126     }
2127 
2128    private:
2129     constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
2130 
2131     allocator_type* alloc_;
2132     slot_type** slot_;
2133   };
2134 
2135   template <class K = key_type, class F>
2136   iterator lazy_emplace(const key_arg<K>& key,
2137                         F&& f) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2138     auto res = find_or_prepare_insert(key);
2139     if (res.second) {
2140       slot_type* slot = slot_array() + res.first;
2141       std::forward<F>(f)(constructor(&alloc_ref(), &slot));
2142       assert(!slot);
2143     }
2144     return iterator_at(res.first);
2145   }
2146 
2147   // Extension API: support for heterogeneous keys.
2148   //
2149   //   std::unordered_set<std::string> s;
2150   //   // Turns "abc" into std::string.
2151   //   s.erase("abc");
2152   //
2153   //   flat_hash_set<std::string> s;
2154   //   // Uses "abc" directly without copying it into std::string.
2155   //   s.erase("abc");
2156   template <class K = key_type>
2157   size_type erase(const key_arg<K>& key) {
2158     auto it = find(key);
2159     if (it == end()) return 0;
2160     erase(it);
2161     return 1;
2162   }
2163 
2164   // Erases the element pointed to by `it`.  Unlike `std::unordered_set::erase`,
2165   // this method returns void to reduce algorithmic complexity to O(1).  The
2166   // iterator is invalidated, so any increment should be done before calling
2167   // erase.  In order to erase while iterating across a map, use the following
2168   // idiom (which also works for standard containers):
2169   //
2170   // for (auto it = m.begin(), end = m.end(); it != end;) {
2171   //   // `erase()` will invalidate `it`, so advance `it` first.
2172   //   auto copy_it = it++;
2173   //   if (<pred>) {
2174   //     m.erase(copy_it);
2175   //   }
2176   // }
2177   void erase(const_iterator cit) { erase(cit.inner_); }
2178 
2179   // This overload is necessary because otherwise erase<K>(const K&) would be
2180   // a better match if non-const iterator is passed as an argument.
2181   void erase(iterator it) {
2182     AssertIsFull(it.ctrl_, it.generation(), it.generation_ptr(), "erase()");
2183     PolicyTraits::destroy(&alloc_ref(), it.slot_);
2184     erase_meta_only(it);
2185   }
2186 
2187   iterator erase(const_iterator first,
2188                  const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2189     // We check for empty first because ClearBackingArray requires that
2190     // capacity() > 0 as a precondition.
2191     if (empty()) return end();
2192     if (first == begin() && last == end()) {
2193       // TODO(ezb): we access control bytes in destroy_slots so it could make
2194       // sense to combine destroy_slots and ClearBackingArray to avoid cache
2195       // misses when the table is large. Note that we also do this in clear().
2196       destroy_slots();
2197       ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true);
2198       common().set_reserved_growth(common().reservation_size());
2199       return end();
2200     }
2201     while (first != last) {
2202       erase(first++);
2203     }
2204     return last.inner_;
2205   }
2206 
2207   // Moves elements from `src` into `this`.
2208   // If the element already exists in `this`, it is left unmodified in `src`.
2209   template <typename H, typename E>
2210   void merge(raw_hash_set<Policy, H, E, Alloc>& src) {  // NOLINT
2211     assert(this != &src);
2212     for (auto it = src.begin(), e = src.end(); it != e;) {
2213       auto next = std::next(it);
2214       if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
2215                               PolicyTraits::element(it.slot_))
2216               .second) {
2217         src.erase_meta_only(it);
2218       }
2219       it = next;
2220     }
2221   }
2222 
2223   template <typename H, typename E>
2224   void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
2225     merge(src);
2226   }
2227 
2228   node_type extract(const_iterator position) {
2229     AssertIsFull(position.inner_.ctrl_, position.inner_.generation(),
2230                  position.inner_.generation_ptr(), "extract()");
2231     auto node =
2232         CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
2233     erase_meta_only(position);
2234     return node;
2235   }
2236 
2237   template <
2238       class K = key_type,
2239       typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
2240   node_type extract(const key_arg<K>& key) {
2241     auto it = find(key);
2242     return it == end() ? node_type() : extract(const_iterator{it});
2243   }
2244 
2245   void swap(raw_hash_set& that) noexcept(
2246       IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
2247       IsNoThrowSwappable<allocator_type>(
2248           typename AllocTraits::propagate_on_container_swap{})) {
2249     using std::swap;
2250     swap(common(), that.common());
2251     swap(hash_ref(), that.hash_ref());
2252     swap(eq_ref(), that.eq_ref());
2253     SwapAlloc(alloc_ref(), that.alloc_ref(),
2254               typename AllocTraits::propagate_on_container_swap{});
2255   }
2256 
2257   void rehash(size_t n) {
2258     if (n == 0 && capacity() == 0) return;
2259     if (n == 0 && size() == 0) {
2260       ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false);
2261       return;
2262     }
2263 
2264     // bitor is a faster way of doing `max` here. We will round up to the next
2265     // power-of-2-minus-1, so bitor is good enough.
2266     auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
2267     // n == 0 unconditionally rehashes as per the standard.
2268     if (n == 0 || m > capacity()) {
2269       resize(m);
2270 
2271       // This is after resize, to ensure that we have completed the allocation
2272       // and have potentially sampled the hashtable.
2273       infoz().RecordReservation(n);
2274     }
2275   }
2276 
2277   void reserve(size_t n) {
2278     if (n > size() + growth_left()) {
2279       size_t m = GrowthToLowerboundCapacity(n);
2280       resize(NormalizeCapacity(m));
2281 
2282       // This is after resize, to ensure that we have completed the allocation
2283       // and have potentially sampled the hashtable.
2284       infoz().RecordReservation(n);
2285     }
2286     common().reset_reserved_growth(n);
2287     common().set_reservation_size(n);
2288   }
2289 
2290   // Extension API: support for heterogeneous keys.
2291   //
2292   //   std::unordered_set<std::string> s;
2293   //   // Turns "abc" into std::string.
2294   //   s.count("abc");
2295   //
2296   //   ch_set<std::string> s;
2297   //   // Uses "abc" directly without copying it into std::string.
2298   //   s.count("abc");
2299   template <class K = key_type>
2300   size_t count(const key_arg<K>& key) const {
2301     return find(key) == end() ? 0 : 1;
2302   }
2303 
2304   // Issues CPU prefetch instructions for the memory needed to find or insert
2305   // a key.  Like all lookup functions, this support heterogeneous keys.
2306   //
2307   // NOTE: This is a very low level operation and should not be used without
2308   // specific benchmarks indicating its importance.
2309   template <class K = key_type>
2310   void prefetch(const key_arg<K>& key) const {
2311     (void)key;
2312     // Avoid probing if we won't be able to prefetch the addresses received.
2313 #ifdef ABSL_HAVE_PREFETCH
2314     prefetch_heap_block();
2315     auto seq = probe(common(), hash_ref()(key));
2316     PrefetchToLocalCache(control() + seq.offset());
2317     PrefetchToLocalCache(slot_array() + seq.offset());
2318 #endif  // ABSL_HAVE_PREFETCH
2319   }
2320 
2321   // The API of find() has two extensions.
2322   //
2323   // 1. The hash can be passed by the user. It must be equal to the hash of the
2324   // key.
2325   //
2326   // 2. The type of the key argument doesn't have to be key_type. This is so
2327   // called heterogeneous key support.
2328   template <class K = key_type>
2329   iterator find(const key_arg<K>& key,
2330                 size_t hash) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2331     auto seq = probe(common(), hash);
2332     slot_type* slot_ptr = slot_array();
2333     const ctrl_t* ctrl = control();
2334     while (true) {
2335       Group g{ctrl + seq.offset()};
2336       for (uint32_t i : g.Match(H2(hash))) {
2337         if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
2338                 EqualElement<K>{key, eq_ref()},
2339                 PolicyTraits::element(slot_ptr + seq.offset(i)))))
2340           return iterator_at(seq.offset(i));
2341       }
2342       if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
2343       seq.next();
2344       assert(seq.index() <= capacity() && "full table!");
2345     }
2346   }
2347   template <class K = key_type>
2348   iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2349     prefetch_heap_block();
2350     return find(key, hash_ref()(key));
2351   }
2352 
2353   template <class K = key_type>
2354   const_iterator find(const key_arg<K>& key,
2355                       size_t hash) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2356     return const_cast<raw_hash_set*>(this)->find(key, hash);
2357   }
2358   template <class K = key_type>
2359   const_iterator find(const key_arg<K>& key) const
2360       ABSL_ATTRIBUTE_LIFETIME_BOUND {
2361     prefetch_heap_block();
2362     return find(key, hash_ref()(key));
2363   }
2364 
2365   template <class K = key_type>
2366   bool contains(const key_arg<K>& key) const {
2367     return find(key) != end();
2368   }
2369 
2370   template <class K = key_type>
2371   std::pair<iterator, iterator> equal_range(const key_arg<K>& key)
2372       ABSL_ATTRIBUTE_LIFETIME_BOUND {
2373     auto it = find(key);
2374     if (it != end()) return {it, std::next(it)};
2375     return {it, it};
2376   }
2377   template <class K = key_type>
2378   std::pair<const_iterator, const_iterator> equal_range(
2379       const key_arg<K>& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2380     auto it = find(key);
2381     if (it != end()) return {it, std::next(it)};
2382     return {it, it};
2383   }
2384 
2385   size_t bucket_count() const { return capacity(); }
2386   float load_factor() const {
2387     return capacity() ? static_cast<double>(size()) / capacity() : 0.0;
2388   }
2389   float max_load_factor() const { return 1.0f; }
2390   void max_load_factor(float) {
2391     // Does nothing.
2392   }
2393 
2394   hasher hash_function() const { return hash_ref(); }
2395   key_equal key_eq() const { return eq_ref(); }
2396   allocator_type get_allocator() const { return alloc_ref(); }
2397 
2398   friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
2399     if (a.size() != b.size()) return false;
2400     const raw_hash_set* outer = &a;
2401     const raw_hash_set* inner = &b;
2402     if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
2403     for (const value_type& elem : *outer)
2404       if (!inner->has_element(elem)) return false;
2405     return true;
2406   }
2407 
2408   friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
2409     return !(a == b);
2410   }
2411 
2412   template <typename H>
2413   friend typename std::enable_if<H::template is_hashable<value_type>::value,
2414                                  H>::type
2415   AbslHashValue(H h, const raw_hash_set& s) {
2416     return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
2417                       s.size());
2418   }
2419 
2420   friend void swap(raw_hash_set& a,
2421                    raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
2422     a.swap(b);
2423   }
2424 
2425  private:
2426   template <class Container, typename Enabler>
2427   friend struct absl::container_internal::hashtable_debug_internal::
2428       HashtableDebugAccess;
2429 
2430   struct FindElement {
2431     template <class K, class... Args>
2432     const_iterator operator()(const K& key, Args&&...) const {
2433       return s.find(key);
2434     }
2435     const raw_hash_set& s;
2436   };
2437 
2438   struct HashElement {
2439     template <class K, class... Args>
2440     size_t operator()(const K& key, Args&&...) const {
2441       return h(key);
2442     }
2443     const hasher& h;
2444   };
2445 
2446   template <class K1>
2447   struct EqualElement {
2448     template <class K2, class... Args>
2449     bool operator()(const K2& lhs, Args&&...) const {
2450       return eq(lhs, rhs);
2451     }
2452     const K1& rhs;
2453     const key_equal& eq;
2454   };
2455 
2456   struct EmplaceDecomposable {
2457     template <class K, class... Args>
2458     std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
2459       auto res = s.find_or_prepare_insert(key);
2460       if (res.second) {
2461         s.emplace_at(res.first, std::forward<Args>(args)...);
2462       }
2463       return {s.iterator_at(res.first), res.second};
2464     }
2465     raw_hash_set& s;
2466   };
2467 
2468   template <bool do_destroy>
2469   struct InsertSlot {
2470     template <class K, class... Args>
2471     std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
2472       auto res = s.find_or_prepare_insert(key);
2473       if (res.second) {
2474         PolicyTraits::transfer(&s.alloc_ref(), s.slot_array() + res.first,
2475                                &slot);
2476       } else if (do_destroy) {
2477         PolicyTraits::destroy(&s.alloc_ref(), &slot);
2478       }
2479       return {s.iterator_at(res.first), res.second};
2480     }
2481     raw_hash_set& s;
2482     // Constructed slot. Either moved into place or destroyed.
2483     slot_type&& slot;
2484   };
2485 
2486   // Erases, but does not destroy, the value pointed to by `it`.
2487   //
2488   // This merely updates the pertinent control byte. This can be used in
2489   // conjunction with Policy::transfer to move the object to another place.
2490   void erase_meta_only(const_iterator it) {
2491     EraseMetaOnly(common(), it.inner_.ctrl_, sizeof(slot_type));
2492   }
2493 
2494   // Allocates a backing array for `self` and initializes its control bytes.
2495   // This reads `capacity` and updates all other fields based on the result of
2496   // the allocation.
2497   //
2498   // This does not free the currently held array; `capacity` must be nonzero.
2499   inline void initialize_slots() {
2500     // People are often sloppy with the exact type of their allocator (sometimes
2501     // it has an extra const or is missing the pair, but rebinds made it work
2502     // anyway).
2503     using CharAlloc =
2504         typename absl::allocator_traits<Alloc>::template rebind_alloc<char>;
2505     InitializeSlots<CharAlloc, sizeof(slot_type), alignof(slot_type)>(
2506         common(), CharAlloc(alloc_ref()));
2507   }
2508 
2509   ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) {
2510     assert(IsValidCapacity(new_capacity));
2511     auto* old_ctrl = control();
2512     auto* old_slots = slot_array();
2513     const size_t old_capacity = common().capacity();
2514     common().set_capacity(new_capacity);
2515     initialize_slots();
2516 
2517     auto* new_slots = slot_array();
2518     size_t total_probe_length = 0;
2519     for (size_t i = 0; i != old_capacity; ++i) {
2520       if (IsFull(old_ctrl[i])) {
2521         size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
2522                                           PolicyTraits::element(old_slots + i));
2523         auto target = find_first_non_full(common(), hash);
2524         size_t new_i = target.offset;
2525         total_probe_length += target.probe_length;
2526         SetCtrl(common(), new_i, H2(hash), sizeof(slot_type));
2527         PolicyTraits::transfer(&alloc_ref(), new_slots + new_i, old_slots + i);
2528       }
2529     }
2530     if (old_capacity) {
2531       SanitizerUnpoisonMemoryRegion(old_slots,
2532                                     sizeof(slot_type) * old_capacity);
2533       Deallocate<BackingArrayAlignment(alignof(slot_type))>(
2534           &alloc_ref(), old_ctrl - ControlOffset(),
2535           AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
2536     }
2537     infoz().RecordRehash(total_probe_length);
2538   }
2539 
2540   // Prunes control bytes to remove as many tombstones as possible.
2541   //
2542   // See the comment on `rehash_and_grow_if_necessary()`.
2543   inline void drop_deletes_without_resize() {
2544     // Stack-allocate space for swapping elements.
2545     alignas(slot_type) unsigned char tmp[sizeof(slot_type)];
2546     DropDeletesWithoutResize(common(), GetPolicyFunctions(), tmp);
2547   }
2548 
2549   // Called whenever the table *might* need to conditionally grow.
2550   //
2551   // This function is an optimization opportunity to perform a rehash even when
2552   // growth is unnecessary, because vacating tombstones is beneficial for
2553   // performance in the long-run.
2554   void rehash_and_grow_if_necessary() {
2555     const size_t cap = capacity();
2556     if (cap > Group::kWidth &&
2557         // Do these calculations in 64-bit to avoid overflow.
2558         size() * uint64_t{32} <= cap * uint64_t{25}) {
2559       // Squash DELETED without growing if there is enough capacity.
2560       //
2561       // Rehash in place if the current size is <= 25/32 of capacity.
2562       // Rationale for such a high factor: 1) drop_deletes_without_resize() is
2563       // faster than resize, and 2) it takes quite a bit of work to add
2564       // tombstones.  In the worst case, seems to take approximately 4
2565       // insert/erase pairs to create a single tombstone and so if we are
2566       // rehashing because of tombstones, we can afford to rehash-in-place as
2567       // long as we are reclaiming at least 1/8 the capacity without doing more
2568       // than 2X the work.  (Where "work" is defined to be size() for rehashing
2569       // or rehashing in place, and 1 for an insert or erase.)  But rehashing in
2570       // place is faster per operation than inserting or even doubling the size
2571       // of the table, so we actually afford to reclaim even less space from a
2572       // resize-in-place.  The decision is to rehash in place if we can reclaim
2573       // at about 1/8th of the usable capacity (specifically 3/28 of the
2574       // capacity) which means that the total cost of rehashing will be a small
2575       // fraction of the total work.
2576       //
2577       // Here is output of an experiment using the BM_CacheInSteadyState
2578       // benchmark running the old case (where we rehash-in-place only if we can
2579       // reclaim at least 7/16*capacity) vs. this code (which rehashes in place
2580       // if we can recover 3/32*capacity).
2581       //
2582       // Note that although in the worst-case number of rehashes jumped up from
2583       // 15 to 190, but the number of operations per second is almost the same.
2584       //
2585       // Abridged output of running BM_CacheInSteadyState benchmark from
2586       // raw_hash_set_benchmark.   N is the number of insert/erase operations.
2587       //
2588       //      | OLD (recover >= 7/16        | NEW (recover >= 3/32)
2589       // size |    N/s LoadFactor NRehashes |    N/s LoadFactor NRehashes
2590       //  448 | 145284       0.44        18 | 140118       0.44        19
2591       //  493 | 152546       0.24        11 | 151417       0.48        28
2592       //  538 | 151439       0.26        11 | 151152       0.53        38
2593       //  583 | 151765       0.28        11 | 150572       0.57        50
2594       //  628 | 150241       0.31        11 | 150853       0.61        66
2595       //  672 | 149602       0.33        12 | 150110       0.66        90
2596       //  717 | 149998       0.35        12 | 149531       0.70       129
2597       //  762 | 149836       0.37        13 | 148559       0.74       190
2598       //  807 | 149736       0.39        14 | 151107       0.39        14
2599       //  852 | 150204       0.42        15 | 151019       0.42        15
2600       drop_deletes_without_resize();
2601     } else {
2602       // Otherwise grow the container.
2603       resize(NextCapacity(cap));
2604     }
2605   }
2606 
2607   bool has_element(const value_type& elem) const {
2608     size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
2609     auto seq = probe(common(), hash);
2610     const ctrl_t* ctrl = control();
2611     while (true) {
2612       Group g{ctrl + seq.offset()};
2613       for (uint32_t i : g.Match(H2(hash))) {
2614         if (ABSL_PREDICT_TRUE(
2615                 PolicyTraits::element(slot_array() + seq.offset(i)) == elem))
2616           return true;
2617       }
2618       if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return false;
2619       seq.next();
2620       assert(seq.index() <= capacity() && "full table!");
2621     }
2622     return false;
2623   }
2624 
2625   // TODO(alkis): Optimize this assuming *this and that don't overlap.
2626   raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
2627     raw_hash_set tmp(std::move(that));
2628     swap(tmp);
2629     return *this;
2630   }
2631   raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
2632     raw_hash_set tmp(std::move(that), alloc_ref());
2633     swap(tmp);
2634     return *this;
2635   }
2636 
2637  protected:
2638   // Attempts to find `key` in the table; if it isn't found, returns a slot that
2639   // the value can be inserted into, with the control byte already set to
2640   // `key`'s H2.
2641   template <class K>
2642   std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
2643     prefetch_heap_block();
2644     auto hash = hash_ref()(key);
2645     auto seq = probe(common(), hash);
2646     const ctrl_t* ctrl = control();
2647     while (true) {
2648       Group g{ctrl + seq.offset()};
2649       for (uint32_t i : g.Match(H2(hash))) {
2650         if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
2651                 EqualElement<K>{key, eq_ref()},
2652                 PolicyTraits::element(slot_array() + seq.offset(i)))))
2653           return {seq.offset(i), false};
2654       }
2655       if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break;
2656       seq.next();
2657       assert(seq.index() <= capacity() && "full table!");
2658     }
2659     return {prepare_insert(hash), true};
2660   }
2661 
2662   // Given the hash of a value not currently in the table, finds the next
2663   // viable slot index to insert it at.
2664   //
2665   // REQUIRES: At least one non-full slot available.
2666   size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
2667     const bool rehash_for_bug_detection =
2668         common().should_rehash_for_bug_detection_on_insert();
2669     if (rehash_for_bug_detection) {
2670       // Move to a different heap allocation in order to detect bugs.
2671       const size_t cap = capacity();
2672       resize(growth_left() > 0 ? cap : NextCapacity(cap));
2673     }
2674     auto target = find_first_non_full(common(), hash);
2675     if (!rehash_for_bug_detection &&
2676         ABSL_PREDICT_FALSE(growth_left() == 0 &&
2677                            !IsDeleted(control()[target.offset]))) {
2678       rehash_and_grow_if_necessary();
2679       target = find_first_non_full(common(), hash);
2680     }
2681     common().set_size(common().size() + 1);
2682     set_growth_left(growth_left() - IsEmpty(control()[target.offset]));
2683     SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
2684     common().maybe_increment_generation_on_insert();
2685     infoz().RecordInsert(hash, target.probe_length);
2686     return target.offset;
2687   }
2688 
2689   // Constructs the value in the space pointed by the iterator. This only works
2690   // after an unsuccessful find_or_prepare_insert() and before any other
2691   // modifications happen in the raw_hash_set.
2692   //
2693   // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
2694   // k is the key decomposed from `forward<Args>(args)...`, and the bool
2695   // returned by find_or_prepare_insert(k) was true.
2696   // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
2697   template <class... Args>
2698   void emplace_at(size_t i, Args&&... args) {
2699     PolicyTraits::construct(&alloc_ref(), slot_array() + i,
2700                             std::forward<Args>(args)...);
2701 
2702     assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
2703                iterator_at(i) &&
2704            "constructed value does not match the lookup key");
2705   }
2706 
2707   iterator iterator_at(size_t i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2708     return {control() + i, slot_array() + i, common().generation_ptr()};
2709   }
2710   const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2711     return {control() + i, slot_array() + i, common().generation_ptr()};
2712   }
2713 
2714  private:
2715   friend struct RawHashSetTestOnlyAccess;
2716 
2717   // The number of slots we can still fill without needing to rehash.
2718   //
2719   // This is stored separately due to tombstones: we do not include tombstones
2720   // in the growth capacity, because we'd like to rehash when the table is
2721   // otherwise filled with tombstones: otherwise, probe sequences might get
2722   // unacceptably long without triggering a rehash. Callers can also force a
2723   // rehash via the standard `rehash(0)`, which will recompute this value as a
2724   // side-effect.
2725   //
2726   // See `CapacityToGrowth()`.
2727   size_t growth_left() const { return common().growth_left(); }
2728   void set_growth_left(size_t gl) { return common().set_growth_left(gl); }
2729 
2730   // Prefetch the heap-allocated memory region to resolve potential TLB and
2731   // cache misses. This is intended to overlap with execution of calculating the
2732   // hash for a key.
2733   void prefetch_heap_block() const {
2734 #if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
2735     __builtin_prefetch(control(), 0, 1);
2736 #endif
2737   }
2738 
2739   CommonFields& common() { return settings_.template get<0>(); }
2740   const CommonFields& common() const { return settings_.template get<0>(); }
2741 
2742   ctrl_t* control() const { return common().control(); }
2743   slot_type* slot_array() const {
2744     return static_cast<slot_type*>(common().slot_array());
2745   }
2746   HashtablezInfoHandle& infoz() { return common().infoz(); }
2747 
2748   hasher& hash_ref() { return settings_.template get<1>(); }
2749   const hasher& hash_ref() const { return settings_.template get<1>(); }
2750   key_equal& eq_ref() { return settings_.template get<2>(); }
2751   const key_equal& eq_ref() const { return settings_.template get<2>(); }
2752   allocator_type& alloc_ref() { return settings_.template get<3>(); }
2753   const allocator_type& alloc_ref() const {
2754     return settings_.template get<3>();
2755   }
2756 
2757   // Make type-specific functions for this type's PolicyFunctions struct.
2758   static size_t hash_slot_fn(void* set, void* slot) {
2759     auto* h = static_cast<raw_hash_set*>(set);
2760     return PolicyTraits::apply(
2761         HashElement{h->hash_ref()},
2762         PolicyTraits::element(static_cast<slot_type*>(slot)));
2763   }
2764   static void transfer_slot_fn(void* set, void* dst, void* src) {
2765     auto* h = static_cast<raw_hash_set*>(set);
2766     PolicyTraits::transfer(&h->alloc_ref(), static_cast<slot_type*>(dst),
2767                            static_cast<slot_type*>(src));
2768   }
2769   // Note: dealloc_fn will only be used if we have a non-standard allocator.
2770   static void dealloc_fn(CommonFields& common, const PolicyFunctions&) {
2771     auto* set = reinterpret_cast<raw_hash_set*>(&common);
2772 
2773     // Unpoison before returning the memory to the allocator.
2774     SanitizerUnpoisonMemoryRegion(common.slot_array(),
2775                                   sizeof(slot_type) * common.capacity());
2776 
2777     Deallocate<BackingArrayAlignment(alignof(slot_type))>(
2778         &set->alloc_ref(), common.backing_array_start(),
2779         common.alloc_size(sizeof(slot_type), alignof(slot_type)));
2780   }
2781 
2782   static const PolicyFunctions& GetPolicyFunctions() {
2783     static constexpr PolicyFunctions value = {
2784         sizeof(slot_type),
2785         &raw_hash_set::hash_slot_fn,
2786         PolicyTraits::transfer_uses_memcpy()
2787             ? TransferRelocatable<sizeof(slot_type)>
2788             : &raw_hash_set::transfer_slot_fn,
2789         (std::is_same<SlotAlloc, std::allocator<slot_type>>::value
2790              ? &DeallocateStandard<alignof(slot_type)>
2791              : &raw_hash_set::dealloc_fn),
2792     };
2793     return value;
2794   }
2795 
2796   // Bundle together CommonFields plus other objects which might be empty.
2797   // CompressedTuple will ensure that sizeof is not affected by any of the empty
2798   // fields that occur after CommonFields.
2799   absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
2800                                             allocator_type>
2801       settings_{CommonFields{}, hasher{}, key_equal{}, allocator_type{}};
2802 };
2803 
2804 // Erases all elements that satisfy the predicate `pred` from the container `c`.
2805 template <typename P, typename H, typename E, typename A, typename Predicate>
2806 typename raw_hash_set<P, H, E, A>::size_type EraseIf(
2807     Predicate& pred, raw_hash_set<P, H, E, A>* c) {
2808   const auto initial_size = c->size();
2809   for (auto it = c->begin(), last = c->end(); it != last;) {
2810     if (pred(*it)) {
2811       c->erase(it++);
2812     } else {
2813       ++it;
2814     }
2815   }
2816   return initial_size - c->size();
2817 }
2818 
2819 namespace hashtable_debug_internal {
2820 template <typename Set>
2821 struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
2822   using Traits = typename Set::PolicyTraits;
2823   using Slot = typename Traits::slot_type;
2824 
2825   static size_t GetNumProbes(const Set& set,
2826                              const typename Set::key_type& key) {
2827     size_t num_probes = 0;
2828     size_t hash = set.hash_ref()(key);
2829     auto seq = probe(set.common(), hash);
2830     const ctrl_t* ctrl = set.control();
2831     while (true) {
2832       container_internal::Group g{ctrl + seq.offset()};
2833       for (uint32_t i : g.Match(container_internal::H2(hash))) {
2834         if (Traits::apply(
2835                 typename Set::template EqualElement<typename Set::key_type>{
2836                     key, set.eq_ref()},
2837                 Traits::element(set.slot_array() + seq.offset(i))))
2838           return num_probes;
2839         ++num_probes;
2840       }
2841       if (g.MaskEmpty()) return num_probes;
2842       seq.next();
2843       ++num_probes;
2844     }
2845   }
2846 
2847   static size_t AllocatedByteSize(const Set& c) {
2848     size_t capacity = c.capacity();
2849     if (capacity == 0) return 0;
2850     size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot));
2851 
2852     size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
2853     if (per_slot != ~size_t{}) {
2854       m += per_slot * c.size();
2855     } else {
2856       const ctrl_t* ctrl = c.control();
2857       for (size_t i = 0; i != capacity; ++i) {
2858         if (container_internal::IsFull(ctrl[i])) {
2859           m += Traits::space_used(c.slot_array() + i);
2860         }
2861       }
2862     }
2863     return m;
2864   }
2865 
2866   static size_t LowerBoundAllocatedByteSize(size_t size) {
2867     size_t capacity = GrowthToLowerboundCapacity(size);
2868     if (capacity == 0) return 0;
2869     size_t m =
2870         AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot));
2871     size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
2872     if (per_slot != ~size_t{}) {
2873       m += per_slot * size;
2874     }
2875     return m;
2876   }
2877 };
2878 
2879 }  // namespace hashtable_debug_internal
2880 }  // namespace container_internal
2881 ABSL_NAMESPACE_END
2882 }  // namespace absl
2883 
2884 #undef ABSL_SWISSTABLE_ENABLE_GENERATIONS
2885 
2886 #endif  // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
2887