• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 // An open-addressing
16 // hashtable with quadratic probing.
17 //
18 // This is a low level hashtable on top of which different interfaces can be
19 // implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
20 //
21 // The table interface is similar to that of std::unordered_set. Notable
22 // differences are that most member functions support heterogeneous keys when
23 // BOTH the hash and eq functions are marked as transparent. They do so by
24 // providing a typedef called `is_transparent`.
25 //
26 // When heterogeneous lookup is enabled, functions that take key_type act as if
27 // they have an overload set like:
28 //
29 //   iterator find(const key_type& key);
30 //   template <class K>
31 //   iterator find(const K& key);
32 //
33 //   size_type erase(const key_type& key);
34 //   template <class K>
35 //   size_type erase(const K& key);
36 //
37 //   std::pair<iterator, iterator> equal_range(const key_type& key);
38 //   template <class K>
39 //   std::pair<iterator, iterator> equal_range(const K& key);
40 //
41 // When heterogeneous lookup is disabled, only the explicit `key_type` overloads
42 // exist.
43 //
44 // find() also supports passing the hash explicitly:
45 //
46 //   iterator find(const key_type& key, size_t hash);
47 //   template <class U>
48 //   iterator find(const U& key, size_t hash);
49 //
50 // In addition the pointer to element and iterator stability guarantees are
51 // weaker: all iterators and pointers are invalidated after a new element is
52 // inserted.
53 //
54 // IMPLEMENTATION DETAILS
55 //
56 // # Table Layout
57 //
58 // A raw_hash_set's backing array consists of control bytes followed by slots
59 // that may or may not contain objects.
60 //
61 // The layout of the backing array, for `capacity` slots, is thus, as a
62 // pseudo-struct:
63 //
64 //   struct BackingArray {
65 //     // Sampling handler. This field isn't present when the sampling is
66 //     // disabled or this allocation hasn't been selected for sampling.
67 //     HashtablezInfoHandle infoz_;
68 //     // The number of elements we can insert before growing the capacity.
69 //     size_t growth_left;
70 //     // Control bytes for the "real" slots.
71 //     ctrl_t ctrl[capacity];
72 //     // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
73 //     // stop and serves no other purpose.
74 //     ctrl_t sentinel;
75 //     // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
76 //     // that if a probe sequence picks a value near the end of `ctrl`,
77 //     // `Group` will have valid control bytes to look at.
78 //     ctrl_t clones[kWidth - 1];
79 //     // The actual slot data.
80 //     slot_type slots[capacity];
81 //   };
82 //
83 // The length of this array is computed by `AllocSize()` below.
84 //
85 // Control bytes (`ctrl_t`) are bytes (collected into groups of a
86 // platform-specific size) that define the state of the corresponding slot in
87 // the slot array. Group manipulation is tightly optimized to be as efficient
88 // as possible: SSE and friends on x86, clever bit operations on other arches.
89 //
90 //      Group 1         Group 2        Group 3
91 // +---------------+---------------+---------------+
92 // | | | | | | | | | | | | | | | | | | | | | | | | |
93 // +---------------+---------------+---------------+
94 //
95 // Each control byte is either a special value for empty slots, deleted slots
96 // (sometimes called *tombstones*), and a special end-of-table marker used by
97 // iterators, or, if occupied, seven bits (H2) from the hash of the value in the
98 // corresponding slot.
99 //
100 // Storing control bytes in a separate array also has beneficial cache effects,
101 // since more logical slots will fit into a cache line.
102 //
103 // # Hashing
104 //
105 // We compute two separate hashes, `H1` and `H2`, from the hash of an object.
106 // `H1(hash(x))` is an index into `slots`, and essentially the starting point
107 // for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
108 // objects that cannot possibly be the one we are looking for.
109 //
110 // # Table operations.
111 //
112 // The key operations are `insert`, `find`, and `erase`.
113 //
114 // Since `insert` and `erase` are implemented in terms of `find`, we describe
115 // `find` first. To `find` a value `x`, we compute `hash(x)`. From
116 // `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
117 // group of slots in some interesting order.
118 //
119 // We now walk through these indices. At each index, we select the entire group
120 // starting with that index and extract potential candidates: occupied slots
121 // with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
122 // group, we stop and return an error. Each candidate slot `y` is compared with
123 // `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the
124 // next probe index. Tombstones effectively behave like full slots that never
125 // match the value we're looking for.
126 //
127 // The `H2` bits ensure when we compare a slot to an object with `==`, we are
128 // likely to have actually found the object.  That is, the chance is low that
129 // `==` is called and returns `false`.  Thus, when we search for an object, we
130 // are unlikely to call `==` many times.  This likelyhood can be analyzed as
131 // follows (assuming that H2 is a random enough hash function).
132 //
133 // Let's assume that there are `k` "wrong" objects that must be examined in a
134 // probe sequence.  For example, when doing a `find` on an object that is in the
135 // table, `k` is the number of objects between the start of the probe sequence
136 // and the final found object (not including the final found object).  The
137 // expected number of objects with an H2 match is then `k/128`.  Measurements
138 // and analysis indicate that even at high load factors, `k` is less than 32,
139 // meaning that the number of "false positive" comparisons we must perform is
140 // less than 1/8 per `find`.
141 
142 // `insert` is implemented in terms of `unchecked_insert`, which inserts a
143 // value presumed to not be in the table (violating this requirement will cause
144 // the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
145 // it, we construct a `probe_seq` once again, and use it to find the first
146 // group with an unoccupied (empty *or* deleted) slot. We place `x` into the
147 // first such slot in the group and mark it as full with `x`'s H2.
148 //
149 // To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
150 // perform a `find` to see if it's already present; if it is, we're done. If
151 // it's not, we may decide the table is getting overcrowded (i.e. the load
152 // factor is greater than 7/8 for big tables; `is_small()` tables use a max load
153 // factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
154 // each element of the table into the new array (we know that no insertion here
155 // will insert an already-present value), and discard the old backing array. At
156 // this point, we may `unchecked_insert` the value `x`.
157 //
158 // Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
159 // presents a viable, initialized slot pointee to the caller.
160 //
161 // `erase` is implemented in terms of `erase_at`, which takes an index to a
162 // slot. Given an offset, we simply create a tombstone and destroy its contents.
163 // If we can prove that the slot would not appear in a probe sequence, we can
164 // make the slot as empty, instead. We can prove this by observing that if a
165 // group has any empty slots, it has never been full (assuming we never create
166 // an empty slot in a group with no empties, which this heuristic guarantees we
167 // never do) and find would stop at this group anyways (since it does not probe
168 // beyond groups with empties).
169 //
170 // `erase` is `erase_at` composed with `find`: if we
171 // have a value `x`, we can perform a `find`, and then `erase_at` the resulting
172 // slot.
173 //
174 // To iterate, we simply traverse the array, skipping empty and deleted slots
175 // and stopping when we hit a `kSentinel`.
176 
177 #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
178 #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
179 
180 #include <algorithm>
181 #include <cassert>
182 #include <cmath>
183 #include <cstddef>
184 #include <cstdint>
185 #include <cstring>
186 #include <initializer_list>
187 #include <iterator>
188 #include <limits>
189 #include <memory>
190 #include <tuple>
191 #include <type_traits>
192 #include <utility>
193 
194 #include "absl/base/attributes.h"
195 #include "absl/base/config.h"
196 #include "absl/base/internal/endian.h"
197 #include "absl/base/internal/raw_logging.h"
198 #include "absl/base/macros.h"
199 #include "absl/base/optimization.h"
200 #include "absl/base/options.h"
201 #include "absl/base/port.h"
202 #include "absl/base/prefetch.h"
203 #include "absl/container/internal/common.h"  // IWYU pragma: export // for node_handle
204 #include "absl/container/internal/compressed_tuple.h"
205 #include "absl/container/internal/container_memory.h"
206 #include "absl/container/internal/hash_policy_traits.h"
207 #include "absl/container/internal/hashtable_debug_hooks.h"
208 #include "absl/container/internal/hashtablez_sampler.h"
209 #include "absl/memory/memory.h"
210 #include "absl/meta/type_traits.h"
211 #include "absl/numeric/bits.h"
212 #include "absl/utility/utility.h"
213 
214 #ifdef ABSL_INTERNAL_HAVE_SSE2
215 #include <emmintrin.h>
216 #endif
217 
218 #ifdef ABSL_INTERNAL_HAVE_SSSE3
219 #include <tmmintrin.h>
220 #endif
221 
222 #ifdef _MSC_VER
223 #include <intrin.h>
224 #endif
225 
226 #ifdef ABSL_INTERNAL_HAVE_ARM_NEON
227 #include <arm_neon.h>
228 #endif
229 
230 namespace absl {
231 ABSL_NAMESPACE_BEGIN
232 namespace container_internal {
233 
234 #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
235 #error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
236 #elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
237     defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
238     defined(ABSL_HAVE_MEMORY_SANITIZER)
239 // When compiled in sanitizer mode, we add generation integers to the backing
240 // array and iterators. In the backing array, we store the generation between
241 // the control bytes and the slots. When iterators are dereferenced, we assert
242 // that the container has not been mutated in a way that could cause iterator
243 // invalidation since the iterator was initialized.
244 #define ABSL_SWISSTABLE_ENABLE_GENERATIONS
245 #endif
246 
247 // We use uint8_t so we don't need to worry about padding.
248 using GenerationType = uint8_t;
249 
250 // A sentinel value for empty generations. Using 0 makes it easy to constexpr
251 // initialize an array of this value.
SentinelEmptyGeneration()252 constexpr GenerationType SentinelEmptyGeneration() { return 0; }
253 
NextGeneration(GenerationType generation)254 constexpr GenerationType NextGeneration(GenerationType generation) {
255   return ++generation == SentinelEmptyGeneration() ? ++generation : generation;
256 }
257 
258 #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
SwisstableGenerationsEnabled()259 constexpr bool SwisstableGenerationsEnabled() { return true; }
NumGenerationBytes()260 constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); }
261 #else
SwisstableGenerationsEnabled()262 constexpr bool SwisstableGenerationsEnabled() { return false; }
NumGenerationBytes()263 constexpr size_t NumGenerationBytes() { return 0; }
264 #endif
265 
266 template <typename AllocType>
SwapAlloc(AllocType & lhs,AllocType & rhs,std::true_type)267 void SwapAlloc(AllocType& lhs, AllocType& rhs,
268                std::true_type /* propagate_on_container_swap */) {
269   using std::swap;
270   swap(lhs, rhs);
271 }
272 template <typename AllocType>
SwapAlloc(AllocType & lhs,AllocType & rhs,std::false_type)273 void SwapAlloc(AllocType& lhs, AllocType& rhs,
274                std::false_type /* propagate_on_container_swap */) {
275   (void)lhs;
276   (void)rhs;
277   assert(lhs == rhs &&
278          "It's UB to call swap with unequal non-propagating allocators.");
279 }
280 
281 template <typename AllocType>
CopyAlloc(AllocType & lhs,AllocType & rhs,std::true_type)282 void CopyAlloc(AllocType& lhs, AllocType& rhs,
283                std::true_type /* propagate_alloc */) {
284   lhs = rhs;
285 }
286 template <typename AllocType>
CopyAlloc(AllocType &,AllocType &,std::false_type)287 void CopyAlloc(AllocType&, AllocType&, std::false_type /* propagate_alloc */) {}
288 
289 // The state for a probe sequence.
290 //
291 // Currently, the sequence is a triangular progression of the form
292 //
293 //   p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
294 //
295 // The use of `Width` ensures that each probe step does not overlap groups;
296 // the sequence effectively outputs the addresses of *groups* (although not
297 // necessarily aligned to any boundary). The `Group` machinery allows us
298 // to check an entire group with minimal branching.
299 //
300 // Wrapping around at `mask + 1` is important, but not for the obvious reason.
301 // As described above, the first few entries of the control byte array
302 // are mirrored at the end of the array, which `Group` will find and use
303 // for selecting candidates. However, when those candidates' slots are
304 // actually inspected, there are no corresponding slots for the cloned bytes,
305 // so we need to make sure we've treated those offsets as "wrapping around".
306 //
307 // It turns out that this probe sequence visits every group exactly once if the
308 // number of groups is a power of two, since (i^2+i)/2 is a bijection in
309 // Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
310 template <size_t Width>
311 class probe_seq {
312  public:
313   // Creates a new probe sequence using `hash` as the initial value of the
314   // sequence and `mask` (usually the capacity of the table) as the mask to
315   // apply to each value in the progression.
probe_seq(size_t hash,size_t mask)316   probe_seq(size_t hash, size_t mask) {
317     assert(((mask + 1) & mask) == 0 && "not a mask");
318     mask_ = mask;
319     offset_ = hash & mask_;
320   }
321 
322   // The offset within the table, i.e., the value `p(i)` above.
offset()323   size_t offset() const { return offset_; }
offset(size_t i)324   size_t offset(size_t i) const { return (offset_ + i) & mask_; }
325 
next()326   void next() {
327     index_ += Width;
328     offset_ += index_;
329     offset_ &= mask_;
330   }
331   // 0-based probe index, a multiple of `Width`.
index()332   size_t index() const { return index_; }
333 
334  private:
335   size_t mask_;
336   size_t offset_;
337   size_t index_ = 0;
338 };
339 
340 template <class ContainerKey, class Hash, class Eq>
341 struct RequireUsableKey {
342   template <class PassedKey, class... Args>
343   std::pair<
344       decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
345       decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
346                                          std::declval<const PassedKey&>()))>*
347   operator()(const PassedKey&, const Args&...) const;
348 };
349 
350 template <class E, class Policy, class Hash, class Eq, class... Ts>
351 struct IsDecomposable : std::false_type {};
352 
353 template <class Policy, class Hash, class Eq, class... Ts>
354 struct IsDecomposable<
355     absl::void_t<decltype(Policy::apply(
356         RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
357         std::declval<Ts>()...))>,
358     Policy, Hash, Eq, Ts...> : std::true_type {};
359 
360 // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
361 template <class T>
362 constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
363   using std::swap;
364   return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
365 }
366 template <class T>
367 constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
368   return false;
369 }
370 
371 template <typename T>
372 uint32_t TrailingZeros(T x) {
373   ABSL_ASSUME(x != 0);
374   return static_cast<uint32_t>(countr_zero(x));
375 }
376 
377 // An abstract bitmask, such as that emitted by a SIMD instruction.
378 //
379 // Specifically, this type implements a simple bitset whose representation is
380 // controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
381 // of abstract bits in the bitset, while `Shift` is the log-base-two of the
382 // width of an abstract bit in the representation.
383 // This mask provides operations for any number of real bits set in an abstract
384 // bit. To add iteration on top of that, implementation must guarantee no more
385 // than the most significant real bit is set in a set abstract bit.
386 template <class T, int SignificantBits, int Shift = 0>
387 class NonIterableBitMask {
388  public:
389   explicit NonIterableBitMask(T mask) : mask_(mask) {}
390 
391   explicit operator bool() const { return this->mask_ != 0; }
392 
393   // Returns the index of the lowest *abstract* bit set in `self`.
394   uint32_t LowestBitSet() const {
395     return container_internal::TrailingZeros(mask_) >> Shift;
396   }
397 
398   // Returns the index of the highest *abstract* bit set in `self`.
399   uint32_t HighestBitSet() const {
400     return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
401   }
402 
403   // Returns the number of trailing zero *abstract* bits.
404   uint32_t TrailingZeros() const {
405     return container_internal::TrailingZeros(mask_) >> Shift;
406   }
407 
408   // Returns the number of leading zero *abstract* bits.
409   uint32_t LeadingZeros() const {
410     constexpr int total_significant_bits = SignificantBits << Shift;
411     constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
412     return static_cast<uint32_t>(
413                countl_zero(static_cast<T>(mask_ << extra_bits))) >>
414            Shift;
415   }
416 
417   T mask_;
418 };
419 
420 // Mask that can be iterable
421 //
422 // For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
423 // an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
424 // `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
425 // the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
426 //
427 // For example:
428 //   for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
429 //   for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
430 template <class T, int SignificantBits, int Shift = 0>
431 class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
432   using Base = NonIterableBitMask<T, SignificantBits, Shift>;
433   static_assert(std::is_unsigned<T>::value, "");
434   static_assert(Shift == 0 || Shift == 3, "");
435 
436  public:
437   explicit BitMask(T mask) : Base(mask) {}
438   // BitMask is an iterator over the indices of its abstract bits.
439   using value_type = int;
440   using iterator = BitMask;
441   using const_iterator = BitMask;
442 
443   BitMask& operator++() {
444     if (Shift == 3) {
445       constexpr uint64_t msbs = 0x8080808080808080ULL;
446       this->mask_ &= msbs;
447     }
448     this->mask_ &= (this->mask_ - 1);
449     return *this;
450   }
451 
452   uint32_t operator*() const { return Base::LowestBitSet(); }
453 
454   BitMask begin() const { return *this; }
455   BitMask end() const { return BitMask(0); }
456 
457  private:
458   friend bool operator==(const BitMask& a, const BitMask& b) {
459     return a.mask_ == b.mask_;
460   }
461   friend bool operator!=(const BitMask& a, const BitMask& b) {
462     return a.mask_ != b.mask_;
463   }
464 };
465 
466 using h2_t = uint8_t;
467 
468 // The values here are selected for maximum performance. See the static asserts
469 // below for details.
470 
471 // A `ctrl_t` is a single control byte, which can have one of four
472 // states: empty, deleted, full (which has an associated seven-bit h2_t value)
473 // and the sentinel. They have the following bit patterns:
474 //
475 //      empty: 1 0 0 0 0 0 0 0
476 //    deleted: 1 1 1 1 1 1 1 0
477 //       full: 0 h h h h h h h  // h represents the hash bits.
478 //   sentinel: 1 1 1 1 1 1 1 1
479 //
480 // These values are specifically tuned for SSE-flavored SIMD.
481 // The static_asserts below detail the source of these choices.
482 //
483 // We use an enum class so that when strict aliasing is enabled, the compiler
484 // knows ctrl_t doesn't alias other types.
485 enum class ctrl_t : int8_t {
486   kEmpty = -128,   // 0b10000000
487   kDeleted = -2,   // 0b11111110
488   kSentinel = -1,  // 0b11111111
489 };
490 static_assert(
491     (static_cast<int8_t>(ctrl_t::kEmpty) &
492      static_cast<int8_t>(ctrl_t::kDeleted) &
493      static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
494     "Special markers need to have the MSB to make checking for them efficient");
495 static_assert(
496     ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
497     "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
498     "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
499 static_assert(
500     ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
501     "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
502     "registers (pcmpeqd xmm, xmm)");
503 static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
504               "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
505               "existence efficient (psignb xmm, xmm)");
506 static_assert(
507     (~static_cast<int8_t>(ctrl_t::kEmpty) &
508      ~static_cast<int8_t>(ctrl_t::kDeleted) &
509      static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
510     "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
511     "shared by ctrl_t::kSentinel to make the scalar test for "
512     "MaskEmptyOrDeleted() efficient");
513 static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
514               "ctrl_t::kDeleted must be -2 to make the implementation of "
515               "ConvertSpecialToEmptyAndFullToDeleted efficient");
516 
517 // See definition comment for why this is size 32.
518 ABSL_DLL extern const ctrl_t kEmptyGroup[32];
519 
520 // Returns a pointer to a control byte group that can be used by empty tables.
521 inline ctrl_t* EmptyGroup() {
522   // Const must be cast away here; no uses of this function will actually write
523   // to it, because it is only used for empty tables.
524   return const_cast<ctrl_t*>(kEmptyGroup + 16);
525 }
526 
527 // Returns a pointer to a generation to use for an empty hashtable.
528 GenerationType* EmptyGeneration();
529 
530 // Returns whether `generation` is a generation for an empty hashtable that
531 // could be returned by EmptyGeneration().
532 inline bool IsEmptyGeneration(const GenerationType* generation) {
533   return *generation == SentinelEmptyGeneration();
534 }
535 
536 // Mixes a randomly generated per-process seed with `hash` and `ctrl` to
537 // randomize insertion order within groups.
538 bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
539 
540 // Returns a per-table, hash salt, which changes on resize. This gets mixed into
541 // H1 to randomize iteration order per-table.
542 //
543 // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
544 // non-determinism of iteration order in most cases.
545 inline size_t PerTableSalt(const ctrl_t* ctrl) {
546   // The low bits of the pointer have little or no entropy because of
547   // alignment. We shift the pointer to try to use higher entropy bits. A
548   // good number seems to be 12 bits, because that aligns with page size.
549   return reinterpret_cast<uintptr_t>(ctrl) >> 12;
550 }
551 // Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
552 inline size_t H1(size_t hash, const ctrl_t* ctrl) {
553   return (hash >> 7) ^ PerTableSalt(ctrl);
554 }
555 
556 // Extracts the H2 portion of a hash: the 7 bits not used for H1.
557 //
558 // These are used as an occupied control byte.
559 inline h2_t H2(size_t hash) { return hash & 0x7F; }
560 
561 // Helpers for checking the state of a control byte.
562 inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
563 inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
564 inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
565 inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
566 
567 #ifdef ABSL_INTERNAL_HAVE_SSE2
568 // Quick reference guide for intrinsics used below:
569 //
570 // * __m128i: An XMM (128-bit) word.
571 //
572 // * _mm_setzero_si128: Returns a zero vector.
573 // * _mm_set1_epi8:     Returns a vector with the same i8 in each lane.
574 //
575 // * _mm_subs_epi8:    Saturating-subtracts two i8 vectors.
576 // * _mm_and_si128:    Ands two i128s together.
577 // * _mm_or_si128:     Ors two i128s together.
578 // * _mm_andnot_si128: And-nots two i128s together.
579 //
580 // * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
581 //                   filling each lane with 0x00 or 0xff.
582 // * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
583 //
584 // * _mm_loadu_si128:  Performs an unaligned load of an i128.
585 // * _mm_storeu_si128: Performs an unaligned store of an i128.
586 //
587 // * _mm_sign_epi8:     Retains, negates, or zeroes each i8 lane of the first
588 //                      argument if the corresponding lane of the second
589 //                      argument is positive, negative, or zero, respectively.
590 // * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
591 //                      bitmask consisting of those bits.
592 // * _mm_shuffle_epi8:  Selects i8s from the first argument, using the low
593 //                      four bits of each i8 lane in the second argument as
594 //                      indices.
595 
596 // https://github.com/abseil/abseil-cpp/issues/209
597 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
598 // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
599 // Work around this by using the portable implementation of Group
600 // when using -funsigned-char under GCC.
601 inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
602 #if defined(__GNUC__) && !defined(__clang__)
603   if (std::is_unsigned<char>::value) {
604     const __m128i mask = _mm_set1_epi8(0x80);
605     const __m128i diff = _mm_subs_epi8(b, a);
606     return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
607   }
608 #endif
609   return _mm_cmpgt_epi8(a, b);
610 }
611 
612 struct GroupSse2Impl {
613   static constexpr size_t kWidth = 16;  // the number of slots per group
614 
615   explicit GroupSse2Impl(const ctrl_t* pos) {
616     ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
617   }
618 
619   // Returns a bitmask representing the positions of slots that match hash.
620   BitMask<uint16_t, kWidth> Match(h2_t hash) const {
621     auto match = _mm_set1_epi8(static_cast<char>(hash));
622     BitMask<uint16_t, kWidth> result = BitMask<uint16_t, kWidth>(0);
623     result = BitMask<uint16_t, kWidth>(
624         static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
625     return result;
626   }
627 
628   // Returns a bitmask representing the positions of empty slots.
629   NonIterableBitMask<uint16_t, kWidth> MaskEmpty() const {
630 #ifdef ABSL_INTERNAL_HAVE_SSSE3
631     // This only works because ctrl_t::kEmpty is -128.
632     return NonIterableBitMask<uint16_t, kWidth>(
633         static_cast<uint16_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
634 #else
635     auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
636     return NonIterableBitMask<uint16_t, kWidth>(
637         static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
638 #endif
639   }
640 
641   // Returns a bitmask representing the positions of full slots.
642   // Note: for `is_small()` tables group may contain the "same" slot twice:
643   // original and mirrored.
644   BitMask<uint16_t, kWidth> MaskFull() const {
645     return BitMask<uint16_t, kWidth>(
646         static_cast<uint16_t>(_mm_movemask_epi8(ctrl) ^ 0xffff));
647   }
648 
649   // Returns a bitmask representing the positions of empty or deleted slots.
650   NonIterableBitMask<uint16_t, kWidth> MaskEmptyOrDeleted() const {
651     auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
652     return NonIterableBitMask<uint16_t, kWidth>(static_cast<uint16_t>(
653         _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
654   }
655 
656   // Returns the number of trailing empty or deleted elements in the group.
657   uint32_t CountLeadingEmptyOrDeleted() const {
658     auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
659     return TrailingZeros(static_cast<uint32_t>(
660         _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
661   }
662 
663   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
664     auto msbs = _mm_set1_epi8(static_cast<char>(-128));
665     auto x126 = _mm_set1_epi8(126);
666 #ifdef ABSL_INTERNAL_HAVE_SSSE3
667     auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
668 #else
669     auto zero = _mm_setzero_si128();
670     auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
671     auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
672 #endif
673     _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
674   }
675 
676   __m128i ctrl;
677 };
678 #endif  // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
679 
680 #if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
681 struct GroupAArch64Impl {
682   static constexpr size_t kWidth = 8;
683 
684   explicit GroupAArch64Impl(const ctrl_t* pos) {
685     ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
686   }
687 
688   BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
689     uint8x8_t dup = vdup_n_u8(hash);
690     auto mask = vceq_u8(ctrl, dup);
691     return BitMask<uint64_t, kWidth, 3>(
692         vget_lane_u64(vreinterpret_u64_u8(mask), 0));
693   }
694 
695   NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
696     uint64_t mask =
697         vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
698                           vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
699                           vreinterpret_s8_u8(ctrl))),
700                       0);
701     return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
702   }
703 
704   // Returns a bitmask representing the positions of full slots.
705   // Note: for `is_small()` tables group may contain the "same" slot twice:
706   // original and mirrored.
707   BitMask<uint64_t, kWidth, 3> MaskFull() const {
708     uint64_t mask = vget_lane_u64(
709         vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl),
710                                     vdup_n_s8(static_cast<int8_t>(0)))),
711         0);
712     return BitMask<uint64_t, kWidth, 3>(mask);
713   }
714 
715   NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
716     uint64_t mask =
717         vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
718                           vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
719                           vreinterpret_s8_u8(ctrl))),
720                       0);
721     return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
722   }
723 
724   uint32_t CountLeadingEmptyOrDeleted() const {
725     uint64_t mask =
726         vget_lane_u64(vreinterpret_u64_u8(vcle_s8(
727                           vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
728                           vreinterpret_s8_u8(ctrl))),
729                       0);
730     // Similar to MaskEmptyorDeleted() but we invert the logic to invert the
731     // produced bitfield. We then count number of trailing zeros.
732     // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
733     // so we should be fine.
734     return static_cast<uint32_t>(countr_zero(mask)) >> 3;
735   }
736 
737   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
738     uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
739     constexpr uint64_t msbs = 0x8080808080808080ULL;
740     constexpr uint64_t slsbs = 0x0202020202020202ULL;
741     constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
742     auto x = slsbs & (mask >> 6);
743     auto res = (x + midbs) | msbs;
744     little_endian::Store64(dst, res);
745   }
746 
747   uint8x8_t ctrl;
748 };
749 #endif  // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
750 
751 struct GroupPortableImpl {
752   static constexpr size_t kWidth = 8;
753 
754   explicit GroupPortableImpl(const ctrl_t* pos)
755       : ctrl(little_endian::Load64(pos)) {}
756 
757   BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
758     // For the technique, see:
759     // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
760     // (Determine if a word has a byte equal to n).
761     //
762     // Caveat: there are false positives but:
763     // - they only occur if there is a real match
764     // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
765     // - they will be handled gracefully by subsequent checks in code
766     //
767     // Example:
768     //   v = 0x1716151413121110
769     //   hash = 0x12
770     //   retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
771     constexpr uint64_t msbs = 0x8080808080808080ULL;
772     constexpr uint64_t lsbs = 0x0101010101010101ULL;
773     auto x = ctrl ^ (lsbs * hash);
774     return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
775   }
776 
777   NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
778     constexpr uint64_t msbs = 0x8080808080808080ULL;
779     return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 6)) &
780                                                    msbs);
781   }
782 
783   // Returns a bitmask representing the positions of full slots.
784   // Note: for `is_small()` tables group may contain the "same" slot twice:
785   // original and mirrored.
786   BitMask<uint64_t, kWidth, 3> MaskFull() const {
787     constexpr uint64_t msbs = 0x8080808080808080ULL;
788     return BitMask<uint64_t, kWidth, 3>((ctrl ^ msbs) & msbs);
789   }
790 
791   NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
792     constexpr uint64_t msbs = 0x8080808080808080ULL;
793     return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 7)) &
794                                                    msbs);
795   }
796 
797   uint32_t CountLeadingEmptyOrDeleted() const {
798     // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
799     // kDeleted. We lower all other bits and count number of trailing zeros.
800     constexpr uint64_t bits = 0x0101010101010101ULL;
801     return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >>
802                                  3);
803   }
804 
805   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
806     constexpr uint64_t msbs = 0x8080808080808080ULL;
807     constexpr uint64_t lsbs = 0x0101010101010101ULL;
808     auto x = ctrl & msbs;
809     auto res = (~x + (x >> 7)) & ~lsbs;
810     little_endian::Store64(dst, res);
811   }
812 
813   uint64_t ctrl;
814 };
815 
816 #ifdef ABSL_INTERNAL_HAVE_SSE2
817 using Group = GroupSse2Impl;
818 using GroupEmptyOrDeleted = GroupSse2Impl;
819 #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
820 using Group = GroupAArch64Impl;
821 // For Aarch64, we use the portable implementation for counting and masking
822 // empty or deleted group elements. This is to avoid the latency of moving
823 // between data GPRs and Neon registers when it does not provide a benefit.
824 // Using Neon is profitable when we call Match(), but is not when we don't,
825 // which is the case when we do *EmptyOrDeleted operations. It is difficult to
826 // make a similar approach beneficial on other architectures such as x86 since
827 // they have much lower GPR <-> vector register transfer latency and 16-wide
828 // Groups.
829 using GroupEmptyOrDeleted = GroupPortableImpl;
830 #else
831 using Group = GroupPortableImpl;
832 using GroupEmptyOrDeleted = GroupPortableImpl;
833 #endif
834 
835 // When there is an insertion with no reserved growth, we rehash with
836 // probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
837 // constant divided by capacity ensures that inserting N elements is still O(N)
838 // in the average case. Using the constant 16 means that we expect to rehash ~8
839 // times more often than when generations are disabled. We are adding expected
840 // rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 -
841 // 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth.
842 inline size_t RehashProbabilityConstant() { return 16; }
843 
844 class CommonFieldsGenerationInfoEnabled {
845   // A sentinel value for reserved_growth_ indicating that we just ran out of
846   // reserved growth on the last insertion. When reserve is called and then
847   // insertions take place, reserved_growth_'s state machine is N, ..., 1,
848   // kReservedGrowthJustRanOut, 0.
849   static constexpr size_t kReservedGrowthJustRanOut =
850       (std::numeric_limits<size_t>::max)();
851 
852  public:
853   CommonFieldsGenerationInfoEnabled() = default;
854   CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that)
855       : reserved_growth_(that.reserved_growth_),
856         reservation_size_(that.reservation_size_),
857         generation_(that.generation_) {
858     that.reserved_growth_ = 0;
859     that.reservation_size_ = 0;
860     that.generation_ = EmptyGeneration();
861   }
862   CommonFieldsGenerationInfoEnabled& operator=(
863       CommonFieldsGenerationInfoEnabled&&) = default;
864 
865   // Whether we should rehash on insert in order to detect bugs of using invalid
866   // references. We rehash on the first insertion after reserved_growth_ reaches
867   // 0 after a call to reserve. We also do a rehash with low probability
868   // whenever reserved_growth_ is zero.
869   bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
870                                                  size_t capacity) const;
871   // Similar to above, except that we don't depend on reserved_growth_.
872   bool should_rehash_for_bug_detection_on_move(const ctrl_t* ctrl,
873                                                size_t capacity) const;
874   void maybe_increment_generation_on_insert() {
875     if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
876 
877     if (reserved_growth_ > 0) {
878       if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
879     } else {
880       increment_generation();
881     }
882   }
883   void increment_generation() { *generation_ = NextGeneration(*generation_); }
884   void reset_reserved_growth(size_t reservation, size_t size) {
885     reserved_growth_ = reservation - size;
886   }
887   size_t reserved_growth() const { return reserved_growth_; }
888   void set_reserved_growth(size_t r) { reserved_growth_ = r; }
889   size_t reservation_size() const { return reservation_size_; }
890   void set_reservation_size(size_t r) { reservation_size_ = r; }
891   GenerationType generation() const { return *generation_; }
892   void set_generation(GenerationType g) { *generation_ = g; }
893   GenerationType* generation_ptr() const { return generation_; }
894   void set_generation_ptr(GenerationType* g) { generation_ = g; }
895 
896  private:
897   // The number of insertions remaining that are guaranteed to not rehash due to
898   // a prior call to reserve. Note: we store reserved growth in addition to
899   // reservation size because calls to erase() decrease size_ but don't decrease
900   // reserved growth.
901   size_t reserved_growth_ = 0;
902   // The maximum argument to reserve() since the container was cleared. We need
903   // to keep track of this, in addition to reserved growth, because we reset
904   // reserved growth to this when erase(begin(), end()) is called.
905   size_t reservation_size_ = 0;
906   // Pointer to the generation counter, which is used to validate iterators and
907   // is stored in the backing array between the control bytes and the slots.
908   // Note that we can't store the generation inside the container itself and
909   // keep a pointer to the container in the iterators because iterators must
910   // remain valid when the container is moved.
911   // Note: we could derive this pointer from the control pointer, but it makes
912   // the code more complicated, and there's a benefit in having the sizes of
913   // raw_hash_set in sanitizer mode and non-sanitizer mode a bit more different,
914   // which is that tests are less likely to rely on the size remaining the same.
915   GenerationType* generation_ = EmptyGeneration();
916 };
917 
918 class CommonFieldsGenerationInfoDisabled {
919  public:
920   CommonFieldsGenerationInfoDisabled() = default;
921   CommonFieldsGenerationInfoDisabled(CommonFieldsGenerationInfoDisabled&&) =
922       default;
923   CommonFieldsGenerationInfoDisabled& operator=(
924       CommonFieldsGenerationInfoDisabled&&) = default;
925 
926   bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const {
927     return false;
928   }
929   bool should_rehash_for_bug_detection_on_move(const ctrl_t*, size_t) const {
930     return false;
931   }
932   void maybe_increment_generation_on_insert() {}
933   void increment_generation() {}
934   void reset_reserved_growth(size_t, size_t) {}
935   size_t reserved_growth() const { return 0; }
936   void set_reserved_growth(size_t) {}
937   size_t reservation_size() const { return 0; }
938   void set_reservation_size(size_t) {}
939   GenerationType generation() const { return 0; }
940   void set_generation(GenerationType) {}
941   GenerationType* generation_ptr() const { return nullptr; }
942   void set_generation_ptr(GenerationType*) {}
943 };
944 
945 class HashSetIteratorGenerationInfoEnabled {
946  public:
947   HashSetIteratorGenerationInfoEnabled() = default;
948   explicit HashSetIteratorGenerationInfoEnabled(
949       const GenerationType* generation_ptr)
950       : generation_ptr_(generation_ptr), generation_(*generation_ptr) {}
951 
952   GenerationType generation() const { return generation_; }
953   void reset_generation() { generation_ = *generation_ptr_; }
954   const GenerationType* generation_ptr() const { return generation_ptr_; }
955   void set_generation_ptr(const GenerationType* ptr) { generation_ptr_ = ptr; }
956 
957  private:
958   const GenerationType* generation_ptr_ = EmptyGeneration();
959   GenerationType generation_ = *generation_ptr_;
960 };
961 
962 class HashSetIteratorGenerationInfoDisabled {
963  public:
964   HashSetIteratorGenerationInfoDisabled() = default;
965   explicit HashSetIteratorGenerationInfoDisabled(const GenerationType*) {}
966 
967   GenerationType generation() const { return 0; }
968   void reset_generation() {}
969   const GenerationType* generation_ptr() const { return nullptr; }
970   void set_generation_ptr(const GenerationType*) {}
971 };
972 
973 #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
974 using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoEnabled;
975 using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoEnabled;
976 #else
977 using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled;
978 using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
979 #endif
980 
981 // Returns whether `n` is a valid capacity (i.e., number of slots).
982 //
983 // A valid capacity is a non-zero integer `2^m - 1`.
984 inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
985 
986 // Computes the offset from the start of the backing allocation of control.
987 // infoz and growth_left are stored at the beginning of the backing array.
988 inline size_t ControlOffset(bool has_infoz) {
989   return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(size_t);
990 }
991 
992 // Returns the number of "cloned control bytes".
993 //
994 // This is the number of control bytes that are present both at the beginning
995 // of the control byte array and at the end, such that we can create a
996 // `Group::kWidth`-width probe window starting from any control byte.
997 constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
998 
999 // Given the capacity of a table, computes the offset (from the start of the
1000 // backing allocation) of the generation counter (if it exists).
1001 inline size_t GenerationOffset(size_t capacity, bool has_infoz) {
1002   assert(IsValidCapacity(capacity));
1003   const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
1004   return ControlOffset(has_infoz) + num_control_bytes;
1005 }
1006 
1007 // Given the capacity of a table, computes the offset (from the start of the
1008 // backing allocation) at which the slots begin.
1009 inline size_t SlotOffset(size_t capacity, size_t slot_align, bool has_infoz) {
1010   assert(IsValidCapacity(capacity));
1011   return (GenerationOffset(capacity, has_infoz) + NumGenerationBytes() +
1012           slot_align - 1) &
1013          (~slot_align + 1);
1014 }
1015 
1016 // Given the capacity of a table, computes the total size of the backing
1017 // array.
1018 inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align,
1019                         bool has_infoz) {
1020   return SlotOffset(capacity, slot_align, has_infoz) + capacity * slot_size;
1021 }
1022 
1023 // CommonFields hold the fields in raw_hash_set that do not depend
1024 // on template parameters. This allows us to conveniently pass all
1025 // of this state to helper functions as a single argument.
1026 class CommonFields : public CommonFieldsGenerationInfo {
1027  public:
1028   CommonFields() = default;
1029 
1030   // Not copyable
1031   CommonFields(const CommonFields&) = delete;
1032   CommonFields& operator=(const CommonFields&) = delete;
1033 
1034   // Movable
1035   CommonFields(CommonFields&& that) = default;
1036   CommonFields& operator=(CommonFields&&) = default;
1037 
1038   ctrl_t* control() const { return control_; }
1039   void set_control(ctrl_t* c) { control_ = c; }
1040   void* backing_array_start() const {
1041     // growth_left (and maybe infoz) is stored before control bytes.
1042     assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
1043     return control() - ControlOffset(has_infoz());
1044   }
1045 
1046   // Note: we can't use slots() because Qt defines "slots" as a macro.
1047   void* slot_array() const { return slots_; }
1048   void set_slots(void* s) { slots_ = s; }
1049 
1050   // The number of filled slots.
1051   size_t size() const { return size_ >> HasInfozShift(); }
1052   void set_size(size_t s) {
1053     size_ = (s << HasInfozShift()) | (size_ & HasInfozMask());
1054   }
1055   void increment_size() {
1056     assert(size() < capacity());
1057     size_ += size_t{1} << HasInfozShift();
1058   }
1059   void decrement_size() {
1060     assert(size() > 0);
1061     size_ -= size_t{1} << HasInfozShift();
1062   }
1063 
1064   // The total number of available slots.
1065   size_t capacity() const { return capacity_; }
1066   void set_capacity(size_t c) {
1067     assert(c == 0 || IsValidCapacity(c));
1068     capacity_ = c;
1069   }
1070 
1071   // The number of slots we can still fill without needing to rehash.
1072   // This is stored in the heap allocation before the control bytes.
1073   size_t growth_left() const {
1074     const size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1;
1075     assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0);
1076     return *gl_ptr;
1077   }
1078   void set_growth_left(size_t gl) {
1079     size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1;
1080     assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0);
1081     *gl_ptr = gl;
1082   }
1083 
1084   bool has_infoz() const {
1085     return ABSL_PREDICT_FALSE((size_ & HasInfozMask()) != 0);
1086   }
1087   void set_has_infoz(bool has_infoz) {
1088     size_ = (size() << HasInfozShift()) | static_cast<size_t>(has_infoz);
1089   }
1090 
1091   HashtablezInfoHandle infoz() {
1092     return has_infoz()
1093                ? *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start())
1094                : HashtablezInfoHandle();
1095   }
1096   void set_infoz(HashtablezInfoHandle infoz) {
1097     assert(has_infoz());
1098     *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start()) = infoz;
1099   }
1100 
1101   bool should_rehash_for_bug_detection_on_insert() const {
1102     return CommonFieldsGenerationInfo::
1103         should_rehash_for_bug_detection_on_insert(control(), capacity());
1104   }
1105   bool should_rehash_for_bug_detection_on_move() const {
1106     return CommonFieldsGenerationInfo::
1107         should_rehash_for_bug_detection_on_move(control(), capacity());
1108   }
1109   void maybe_increment_generation_on_move() {
1110     if (capacity() == 0) return;
1111     increment_generation();
1112   }
1113   void reset_reserved_growth(size_t reservation) {
1114     CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
1115   }
1116 
1117   // The size of the backing array allocation.
1118   size_t alloc_size(size_t slot_size, size_t slot_align) const {
1119     return AllocSize(capacity(), slot_size, slot_align, has_infoz());
1120   }
1121 
1122   // Returns the number of control bytes set to kDeleted. For testing only.
1123   size_t TombstonesCount() const {
1124     return static_cast<size_t>(
1125         std::count(control(), control() + capacity(), ctrl_t::kDeleted));
1126   }
1127 
1128  private:
1129   // We store the has_infoz bit in the lowest bit of size_.
1130   static constexpr size_t HasInfozShift() { return 1; }
1131   static constexpr size_t HasInfozMask() {
1132     return (size_t{1} << HasInfozShift()) - 1;
1133   }
1134 
1135   // TODO(b/182800944): Investigate removing some of these fields:
1136   // - control/slots can be derived from each other
1137 
1138   // The control bytes (and, also, a pointer near to the base of the backing
1139   // array).
1140   //
1141   // This contains `capacity + 1 + NumClonedBytes()` entries, even
1142   // when the table is empty (hence EmptyGroup).
1143   //
1144   // Note that growth_left is stored immediately before this pointer.
1145   ctrl_t* control_ = EmptyGroup();
1146 
1147   // The beginning of the slots, located at `SlotOffset()` bytes after
1148   // `control`. May be null for empty tables.
1149   void* slots_ = nullptr;
1150 
1151   // The number of slots in the backing array. This is always 2^N-1 for an
1152   // integer N. NOTE: we tried experimenting with compressing the capacity and
1153   // storing it together with size_: (a) using 6 bits to store the corresponding
1154   // power (N in 2^N-1), and (b) storing 2^N as the most significant bit of
1155   // size_ and storing size in the low bits. Both of these experiments were
1156   // regressions, presumably because we need capacity to do find operations.
1157   size_t capacity_ = 0;
1158 
1159   // The size and also has one bit that stores whether we have infoz.
1160   size_t size_ = 0;
1161 };
1162 
1163 template <class Policy, class Hash, class Eq, class Alloc>
1164 class raw_hash_set;
1165 
1166 // Returns the next valid capacity after `n`.
1167 inline size_t NextCapacity(size_t n) {
1168   assert(IsValidCapacity(n) || n == 0);
1169   return n * 2 + 1;
1170 }
1171 
1172 // Applies the following mapping to every byte in the control array:
1173 //   * kDeleted -> kEmpty
1174 //   * kEmpty -> kEmpty
1175 //   * _ -> kDeleted
1176 // PRECONDITION:
1177 //   IsValidCapacity(capacity)
1178 //   ctrl[capacity] == ctrl_t::kSentinel
1179 //   ctrl[i] != ctrl_t::kSentinel for all i < capacity
1180 void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
1181 
1182 // Converts `n` into the next valid capacity, per `IsValidCapacity`.
1183 inline size_t NormalizeCapacity(size_t n) {
1184   return n ? ~size_t{} >> countl_zero(n) : 1;
1185 }
1186 
1187 // General notes on capacity/growth methods below:
1188 // - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
1189 //   average of two empty slots per group.
1190 // - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
1191 // - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
1192 //   never need to probe (the whole table fits in one group) so we don't need a
1193 //   load factor less than 1.
1194 
1195 // Given `capacity`, applies the load factor; i.e., it returns the maximum
1196 // number of values we should put into the table before a resizing rehash.
1197 inline size_t CapacityToGrowth(size_t capacity) {
1198   assert(IsValidCapacity(capacity));
1199   // `capacity*7/8`
1200   if (Group::kWidth == 8 && capacity == 7) {
1201     // x-x/8 does not work when x==7.
1202     return 6;
1203   }
1204   return capacity - capacity / 8;
1205 }
1206 
1207 // Given `growth`, "unapplies" the load factor to find how large the capacity
1208 // should be to stay within the load factor.
1209 //
1210 // This might not be a valid capacity and `NormalizeCapacity()` should be
1211 // called on this.
1212 inline size_t GrowthToLowerboundCapacity(size_t growth) {
1213   // `growth*8/7`
1214   if (Group::kWidth == 8 && growth == 7) {
1215     // x+(x-1)/7 does not work when x==7.
1216     return 8;
1217   }
1218   return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
1219 }
1220 
1221 template <class InputIter>
1222 size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
1223                                      size_t bucket_count) {
1224   if (bucket_count != 0) {
1225     return bucket_count;
1226   }
1227   using InputIterCategory =
1228       typename std::iterator_traits<InputIter>::iterator_category;
1229   if (std::is_base_of<std::random_access_iterator_tag,
1230                       InputIterCategory>::value) {
1231     return GrowthToLowerboundCapacity(
1232         static_cast<size_t>(std::distance(first, last)));
1233   }
1234   return 0;
1235 }
1236 
1237 constexpr bool SwisstableDebugEnabled() {
1238 #if defined(ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \
1239     ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG)
1240   return true;
1241 #else
1242   return false;
1243 #endif
1244 }
1245 
1246 inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
1247                          const GenerationType* generation_ptr,
1248                          const char* operation) {
1249   if (!SwisstableDebugEnabled()) return;
1250   // `SwisstableDebugEnabled()` is also true for release builds with hardening
1251   // enabled. To minimize their impact in those builds:
1252   // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
1253   // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
1254   //   the chances that the hot paths will be inlined.
1255   if (ABSL_PREDICT_FALSE(ctrl == nullptr)) {
1256     ABSL_RAW_LOG(FATAL, "%s called on end() iterator.", operation);
1257   }
1258   if (ABSL_PREDICT_FALSE(ctrl == EmptyGroup())) {
1259     ABSL_RAW_LOG(FATAL, "%s called on default-constructed iterator.",
1260                  operation);
1261   }
1262   if (SwisstableGenerationsEnabled()) {
1263     if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
1264       ABSL_RAW_LOG(FATAL,
1265                    "%s called on invalid iterator. The table could have "
1266                    "rehashed or moved since this iterator was initialized.",
1267                    operation);
1268     }
1269     if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
1270       ABSL_RAW_LOG(
1271           FATAL,
1272           "%s called on invalid iterator. The element was likely erased.",
1273           operation);
1274     }
1275   } else {
1276     if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
1277       ABSL_RAW_LOG(
1278           FATAL,
1279           "%s called on invalid iterator. The element might have been erased "
1280           "or the table might have rehashed. Consider running with "
1281           "--config=asan to diagnose rehashing issues.",
1282           operation);
1283     }
1284   }
1285 }
1286 
1287 // Note that for comparisons, null/end iterators are valid.
1288 inline void AssertIsValidForComparison(const ctrl_t* ctrl,
1289                                        GenerationType generation,
1290                                        const GenerationType* generation_ptr) {
1291   if (!SwisstableDebugEnabled()) return;
1292   const bool ctrl_is_valid_for_comparison =
1293       ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl);
1294   if (SwisstableGenerationsEnabled()) {
1295     if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
1296       ABSL_RAW_LOG(FATAL,
1297                    "Invalid iterator comparison. The table could have rehashed "
1298                    "or moved since this iterator was initialized.");
1299     }
1300     if (ABSL_PREDICT_FALSE(!ctrl_is_valid_for_comparison)) {
1301       ABSL_RAW_LOG(
1302           FATAL, "Invalid iterator comparison. The element was likely erased.");
1303     }
1304   } else {
1305     ABSL_HARDENING_ASSERT(
1306         ctrl_is_valid_for_comparison &&
1307         "Invalid iterator comparison. The element might have been erased or "
1308         "the table might have rehashed. Consider running with --config=asan to "
1309         "diagnose rehashing issues.");
1310   }
1311 }
1312 
1313 // If the two iterators come from the same container, then their pointers will
1314 // interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa.
1315 // Note: we take slots by reference so that it's not UB if they're uninitialized
1316 // as long as we don't read them (when ctrl is null).
1317 inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
1318                                       const ctrl_t* ctrl_b,
1319                                       const void* const& slot_a,
1320                                       const void* const& slot_b) {
1321   // If either control byte is null, then we can't tell.
1322   if (ctrl_a == nullptr || ctrl_b == nullptr) return true;
1323   const void* low_slot = slot_a;
1324   const void* hi_slot = slot_b;
1325   if (ctrl_a > ctrl_b) {
1326     std::swap(ctrl_a, ctrl_b);
1327     std::swap(low_slot, hi_slot);
1328   }
1329   return ctrl_b < low_slot && low_slot <= hi_slot;
1330 }
1331 
1332 // Asserts that two iterators come from the same container.
1333 // Note: we take slots by reference so that it's not UB if they're uninitialized
1334 // as long as we don't read them (when ctrl is null).
1335 inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
1336                                 const void* const& slot_a,
1337                                 const void* const& slot_b,
1338                                 const GenerationType* generation_ptr_a,
1339                                 const GenerationType* generation_ptr_b) {
1340   if (!SwisstableDebugEnabled()) return;
1341   // `SwisstableDebugEnabled()` is also true for release builds with hardening
1342   // enabled. To minimize their impact in those builds:
1343   // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
1344   // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
1345   //   the chances that the hot paths will be inlined.
1346   const bool a_is_default = ctrl_a == EmptyGroup();
1347   const bool b_is_default = ctrl_b == EmptyGroup();
1348   if (ABSL_PREDICT_FALSE(a_is_default != b_is_default)) {
1349     ABSL_RAW_LOG(
1350         FATAL,
1351         "Invalid iterator comparison. Comparing default-constructed iterator "
1352         "with non-default-constructed iterator.");
1353   }
1354   if (a_is_default && b_is_default) return;
1355 
1356   if (SwisstableGenerationsEnabled()) {
1357     if (ABSL_PREDICT_TRUE(generation_ptr_a == generation_ptr_b)) return;
1358     const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
1359     const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
1360     if (a_is_empty != b_is_empty) {
1361       ABSL_RAW_LOG(FATAL,
1362                    "Invalid iterator comparison. Comparing iterator from a "
1363                    "non-empty hashtable with an iterator from an empty "
1364                    "hashtable.");
1365     }
1366     if (a_is_empty && b_is_empty) {
1367       ABSL_RAW_LOG(FATAL,
1368                    "Invalid iterator comparison. Comparing iterators from "
1369                    "different empty hashtables.");
1370     }
1371     const bool a_is_end = ctrl_a == nullptr;
1372     const bool b_is_end = ctrl_b == nullptr;
1373     if (a_is_end || b_is_end) {
1374       ABSL_RAW_LOG(FATAL,
1375                    "Invalid iterator comparison. Comparing iterator with an "
1376                    "end() iterator from a different hashtable.");
1377     }
1378     ABSL_RAW_LOG(FATAL,
1379                  "Invalid iterator comparison. Comparing non-end() iterators "
1380                  "from different hashtables.");
1381   } else {
1382     ABSL_HARDENING_ASSERT(
1383         AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
1384         "Invalid iterator comparison. The iterators may be from different "
1385         "containers or the container might have rehashed or moved. Consider "
1386         "running with --config=asan to diagnose issues.");
1387   }
1388 }
1389 
1390 struct FindInfo {
1391   size_t offset;
1392   size_t probe_length;
1393 };
1394 
1395 // Whether a table is "small". A small table fits entirely into a probing
1396 // group, i.e., has a capacity < `Group::kWidth`.
1397 //
1398 // In small mode we are able to use the whole capacity. The extra control
1399 // bytes give us at least one "empty" control byte to stop the iteration.
1400 // This is important to make 1 a valid capacity.
1401 //
1402 // In small mode only the first `capacity` control bytes after the sentinel
1403 // are valid. The rest contain dummy ctrl_t::kEmpty values that do not
1404 // represent a real slot. This is important to take into account on
1405 // `find_first_non_full()`, where we never try
1406 // `ShouldInsertBackwards()` for small tables.
1407 inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
1408 
1409 // Whether a table fits entirely into a probing group.
1410 // Arbitrary order of elements in such tables is correct.
1411 inline bool is_single_group(size_t capacity) {
1412   return capacity <= Group::kWidth;
1413 }
1414 
1415 // Begins a probing operation on `common.control`, using `hash`.
1416 inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
1417                                       size_t hash) {
1418   return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
1419 }
1420 inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
1421   return probe(common.control(), common.capacity(), hash);
1422 }
1423 
1424 // Probes an array of control bits using a probe sequence derived from `hash`,
1425 // and returns the offset corresponding to the first deleted or empty slot.
1426 //
1427 // Behavior when the entire table is full is undefined.
1428 //
1429 // NOTE: this function must work with tables having both empty and deleted
1430 // slots in the same group. Such tables appear during `erase()`.
1431 template <typename = void>
1432 inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
1433   auto seq = probe(common, hash);
1434   const ctrl_t* ctrl = common.control();
1435   while (true) {
1436     GroupEmptyOrDeleted g{ctrl + seq.offset()};
1437     auto mask = g.MaskEmptyOrDeleted();
1438     if (mask) {
1439 #if !defined(NDEBUG)
1440       // We want to add entropy even when ASLR is not enabled.
1441       // In debug build we will randomly insert in either the front or back of
1442       // the group.
1443       // TODO(kfm,sbenza): revisit after we do unconditional mixing
1444       if (!is_small(common.capacity()) && ShouldInsertBackwards(hash, ctrl)) {
1445         return {seq.offset(mask.HighestBitSet()), seq.index()};
1446       }
1447 #endif
1448       return {seq.offset(mask.LowestBitSet()), seq.index()};
1449     }
1450     seq.next();
1451     assert(seq.index() <= common.capacity() && "full table!");
1452   }
1453 }
1454 
1455 // Extern template for inline function keep possibility of inlining.
1456 // When compiler decided to not inline, no symbols will be added to the
1457 // corresponding translation unit.
1458 extern template FindInfo find_first_non_full(const CommonFields&, size_t);
1459 
1460 // Non-inlined version of find_first_non_full for use in less
1461 // performance critical routines.
1462 FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
1463 
1464 inline void ResetGrowthLeft(CommonFields& common) {
1465   common.set_growth_left(CapacityToGrowth(common.capacity()) - common.size());
1466 }
1467 
1468 // Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
1469 // array as marked as empty.
1470 inline void ResetCtrl(CommonFields& common, size_t slot_size) {
1471   const size_t capacity = common.capacity();
1472   ctrl_t* ctrl = common.control();
1473   std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
1474               capacity + 1 + NumClonedBytes());
1475   ctrl[capacity] = ctrl_t::kSentinel;
1476   SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
1477 }
1478 
1479 // Sets `ctrl[i]` to `h`.
1480 //
1481 // Unlike setting it directly, this function will perform bounds checks and
1482 // mirror the value to the cloned tail if necessary.
1483 inline void SetCtrl(const CommonFields& common, size_t i, ctrl_t h,
1484                     size_t slot_size) {
1485   const size_t capacity = common.capacity();
1486   assert(i < capacity);
1487 
1488   auto* slot_i = static_cast<const char*>(common.slot_array()) + i * slot_size;
1489   if (IsFull(h)) {
1490     SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
1491   } else {
1492     SanitizerPoisonMemoryRegion(slot_i, slot_size);
1493   }
1494 
1495   ctrl_t* ctrl = common.control();
1496   ctrl[i] = h;
1497   ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
1498 }
1499 
1500 // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
1501 inline void SetCtrl(const CommonFields& common, size_t i, h2_t h,
1502                     size_t slot_size) {
1503   SetCtrl(common, i, static_cast<ctrl_t>(h), slot_size);
1504 }
1505 
1506 // growth_left (which is a size_t) is stored with the backing array.
1507 constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
1508   return (std::max)(align_of_slot, alignof(size_t));
1509 }
1510 
1511 // Returns the address of the ith slot in slots where each slot occupies
1512 // slot_size.
1513 inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
1514   return reinterpret_cast<void*>(reinterpret_cast<char*>(slot_array) +
1515                                  (slot * slot_size));
1516 }
1517 
1518 // Helper class to perform resize of the hash set.
1519 //
1520 // It contains special optimizations for small group resizes.
1521 // See GrowIntoSingleGroupShuffleControlBytes for details.
1522 class HashSetResizeHelper {
1523  public:
1524   explicit HashSetResizeHelper(CommonFields& c)
1525       : old_ctrl_(c.control()),
1526         old_capacity_(c.capacity()),
1527         had_infoz_(c.has_infoz()) {}
1528 
1529   // Optimized for small groups version of `find_first_non_full` applicable
1530   // only right after calling `raw_hash_set::resize`.
1531   // It has implicit assumption that `resize` will call
1532   // `GrowSizeIntoSingleGroup*` in case `IsGrowingIntoSingleGroupApplicable`.
1533   // Falls back to `find_first_non_full` in case of big groups, so it is
1534   // safe to use after `rehash_and_grow_if_necessary`.
1535   static FindInfo FindFirstNonFullAfterResize(const CommonFields& c,
1536                                               size_t old_capacity,
1537                                               size_t hash) {
1538     if (!IsGrowingIntoSingleGroupApplicable(old_capacity, c.capacity())) {
1539       return find_first_non_full(c, hash);
1540     }
1541     // Find a location for the new element non-deterministically.
1542     // Note that any position is correct.
1543     // It will located at `half_old_capacity` or one of the other
1544     // empty slots with approximately 50% probability each.
1545     size_t offset = probe(c, hash).offset();
1546 
1547     // Note that we intentionally use unsigned int underflow.
1548     if (offset - (old_capacity + 1) >= old_capacity) {
1549       // Offset fall on kSentinel or into the mostly occupied first half.
1550       offset = old_capacity / 2;
1551     }
1552     assert(IsEmpty(c.control()[offset]));
1553     return FindInfo{offset, 0};
1554   }
1555 
1556   ctrl_t* old_ctrl() const { return old_ctrl_; }
1557   size_t old_capacity() const { return old_capacity_; }
1558 
1559   // Allocates a backing array for the hashtable.
1560   // Reads `capacity` and updates all other fields based on the result of
1561   // the allocation.
1562   //
1563   // It also may do the folowing actions:
1564   // 1. initialize control bytes
1565   // 2. initialize slots
1566   // 3. deallocate old slots.
1567   //
1568   // We are bundling a lot of functionality
1569   // in one ABSL_ATTRIBUTE_NOINLINE function in order to minimize binary code
1570   // duplication in raw_hash_set<>::resize.
1571   //
1572   // `c.capacity()` must be nonzero.
1573   // POSTCONDITIONS:
1574   //  1. CommonFields is initialized.
1575   //
1576   //  if IsGrowingIntoSingleGroupApplicable && TransferUsesMemcpy
1577   //    Both control bytes and slots are fully initialized.
1578   //    old_slots are deallocated.
1579   //    infoz.RecordRehash is called.
1580   //
1581   //  if IsGrowingIntoSingleGroupApplicable && !TransferUsesMemcpy
1582   //    Control bytes are fully initialized.
1583   //    infoz.RecordRehash is called.
1584   //    GrowSizeIntoSingleGroup must be called to finish slots initialization.
1585   //
1586   //  if !IsGrowingIntoSingleGroupApplicable
1587   //    Control bytes are initialized to empty table via ResetCtrl.
1588   //    raw_hash_set<>::resize must insert elements regularly.
1589   //    infoz.RecordRehash is called if old_capacity == 0.
1590   //
1591   //  Returns IsGrowingIntoSingleGroupApplicable result to avoid recomputation.
1592   template <typename Alloc, size_t SizeOfSlot, bool TransferUsesMemcpy,
1593             size_t AlignOfSlot>
1594   ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, void* old_slots,
1595                                                Alloc alloc) {
1596     assert(c.capacity());
1597     // Folks with custom allocators often make unwarranted assumptions about the
1598     // behavior of their classes vis-a-vis trivial destructability and what
1599     // calls they will or won't make.  Avoid sampling for people with custom
1600     // allocators to get us out of this mess.  This is not a hard guarantee but
1601     // a workaround while we plan the exact guarantee we want to provide.
1602     const size_t sample_size =
1603         (std::is_same<Alloc, std::allocator<char>>::value &&
1604          c.slot_array() == nullptr)
1605             ? SizeOfSlot
1606             : 0;
1607     HashtablezInfoHandle infoz =
1608         sample_size > 0 ? Sample(sample_size) : c.infoz();
1609 
1610     const bool has_infoz = infoz.IsSampled();
1611     const size_t cap = c.capacity();
1612     const size_t alloc_size =
1613         AllocSize(cap, SizeOfSlot, AlignOfSlot, has_infoz);
1614     char* mem = static_cast<char*>(
1615         Allocate<BackingArrayAlignment(AlignOfSlot)>(&alloc, alloc_size));
1616     const GenerationType old_generation = c.generation();
1617     c.set_generation_ptr(reinterpret_cast<GenerationType*>(
1618         mem + GenerationOffset(cap, has_infoz)));
1619     c.set_generation(NextGeneration(old_generation));
1620     c.set_control(reinterpret_cast<ctrl_t*>(mem + ControlOffset(has_infoz)));
1621     c.set_slots(mem + SlotOffset(cap, AlignOfSlot, has_infoz));
1622     ResetGrowthLeft(c);
1623 
1624     const bool grow_single_group =
1625         IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity());
1626     if (old_capacity_ != 0 && grow_single_group) {
1627       if (TransferUsesMemcpy) {
1628         GrowSizeIntoSingleGroupTransferable(c, old_slots, SizeOfSlot);
1629         DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot, old_slots);
1630       } else {
1631         GrowIntoSingleGroupShuffleControlBytes(c.control(), c.capacity());
1632       }
1633     } else {
1634       ResetCtrl(c, SizeOfSlot);
1635     }
1636 
1637     c.set_has_infoz(has_infoz);
1638     if (has_infoz) {
1639       infoz.RecordStorageChanged(c.size(), cap);
1640       if (grow_single_group || old_capacity_ == 0) {
1641         infoz.RecordRehash(0);
1642       }
1643       c.set_infoz(infoz);
1644     }
1645     return grow_single_group;
1646   }
1647 
1648   // Relocates slots into new single group consistent with
1649   // GrowIntoSingleGroupShuffleControlBytes.
1650   //
1651   // PRECONDITIONS:
1652   // 1. GrowIntoSingleGroupShuffleControlBytes was already called.
1653   template <class PolicyTraits, class Alloc>
1654   void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref,
1655                                typename PolicyTraits::slot_type* old_slots) {
1656     assert(old_capacity_ < Group::kWidth / 2);
1657     assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
1658     using slot_type = typename PolicyTraits::slot_type;
1659     assert(is_single_group(c.capacity()));
1660 
1661     auto* new_slots = reinterpret_cast<slot_type*>(c.slot_array());
1662 
1663     size_t shuffle_bit = old_capacity_ / 2 + 1;
1664     for (size_t i = 0; i < old_capacity_; ++i) {
1665       if (IsFull(old_ctrl_[i])) {
1666         size_t new_i = i ^ shuffle_bit;
1667         SanitizerUnpoisonMemoryRegion(new_slots + new_i, sizeof(slot_type));
1668         PolicyTraits::transfer(&alloc_ref, new_slots + new_i, old_slots + i);
1669       }
1670     }
1671     PoisonSingleGroupEmptySlots(c, sizeof(slot_type));
1672   }
1673 
1674   // Deallocates old backing array.
1675   template <size_t AlignOfSlot, class CharAlloc>
1676   void DeallocateOld(CharAlloc alloc_ref, size_t slot_size, void* old_slots) {
1677     SanitizerUnpoisonMemoryRegion(old_slots, slot_size * old_capacity_);
1678     Deallocate<BackingArrayAlignment(AlignOfSlot)>(
1679         &alloc_ref, old_ctrl_ - ControlOffset(had_infoz_),
1680         AllocSize(old_capacity_, slot_size, AlignOfSlot, had_infoz_));
1681   }
1682 
1683  private:
1684   // Returns true if `GrowSizeIntoSingleGroup` can be used for resizing.
1685   static bool IsGrowingIntoSingleGroupApplicable(size_t old_capacity,
1686                                                  size_t new_capacity) {
1687     // NOTE that `old_capacity < new_capacity` in order to have
1688     // `old_capacity < Group::kWidth / 2` to make faster copies of 8 bytes.
1689     return is_single_group(new_capacity) && old_capacity < new_capacity;
1690   }
1691 
1692   // Relocates control bytes and slots into new single group for
1693   // transferable objects.
1694   // Must be called only if IsGrowingIntoSingleGroupApplicable returned true.
1695   void GrowSizeIntoSingleGroupTransferable(CommonFields& c, void* old_slots,
1696                                            size_t slot_size);
1697 
1698   // Shuffle control bits deterministically to the next capacity.
1699   // Returns offset for newly added element with given hash.
1700   //
1701   // PRECONDITIONs:
1702   // 1. new_ctrl is allocated for new_capacity,
1703   //    but not initialized.
1704   // 2. new_capacity is a single group.
1705   //
1706   // All elements are transferred into the first `old_capacity + 1` positions
1707   // of the new_ctrl. Elements are rotated by `old_capacity_ / 2 + 1` positions
1708   // in order to change an order and keep it non deterministic.
1709   // Although rotation itself deterministic, position of the new added element
1710   // will be based on `H1` and is not deterministic.
1711   //
1712   // Examples:
1713   // S = kSentinel, E = kEmpty
1714   //
1715   // old_ctrl = SEEEEEEEE...
1716   // new_ctrl = ESEEEEEEE...
1717   //
1718   // old_ctrl = 0SEEEEEEE...
1719   // new_ctrl = E0ESE0EEE...
1720   //
1721   // old_ctrl = 012S012EEEEEEEEE...
1722   // new_ctrl = 2E01EEES2E01EEE...
1723   //
1724   // old_ctrl = 0123456S0123456EEEEEEEEEEE...
1725   // new_ctrl = 456E0123EEEEEES456E0123EEE...
1726   void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* new_ctrl,
1727                                               size_t new_capacity) const;
1728 
1729   // Shuffle trivially transferable slots in the way consistent with
1730   // GrowIntoSingleGroupShuffleControlBytes.
1731   //
1732   // PRECONDITIONs:
1733   // 1. old_capacity must be non-zero.
1734   // 2. new_ctrl is fully initialized using
1735   //    GrowIntoSingleGroupShuffleControlBytes.
1736   // 3. new_slots is allocated and *not* poisoned.
1737   //
1738   // POSTCONDITIONS:
1739   // 1. new_slots are transferred from old_slots_ consistent with
1740   //    GrowIntoSingleGroupShuffleControlBytes.
1741   // 2. Empty new_slots are *not* poisoned.
1742   void GrowIntoSingleGroupShuffleTransferableSlots(void* old_slots,
1743                                                    void* new_slots,
1744                                                    size_t slot_size) const;
1745 
1746   // Poison empty slots that were transferred using the deterministic algorithm
1747   // described above.
1748   // PRECONDITIONs:
1749   // 1. new_ctrl is fully initialized using
1750   //    GrowIntoSingleGroupShuffleControlBytes.
1751   // 2. new_slots is fully initialized consistent with
1752   //    GrowIntoSingleGroupShuffleControlBytes.
1753   void PoisonSingleGroupEmptySlots(CommonFields& c, size_t slot_size) const {
1754     // poison non full items
1755     for (size_t i = 0; i < c.capacity(); ++i) {
1756       if (!IsFull(c.control()[i])) {
1757         SanitizerPoisonMemoryRegion(SlotAddress(c.slot_array(), i, slot_size),
1758                                     slot_size);
1759       }
1760     }
1761   }
1762 
1763   ctrl_t* old_ctrl_;
1764   size_t old_capacity_;
1765   bool had_infoz_;
1766 };
1767 
1768 // PolicyFunctions bundles together some information for a particular
1769 // raw_hash_set<T, ...> instantiation. This information is passed to
1770 // type-erased functions that want to do small amounts of type-specific
1771 // work.
1772 struct PolicyFunctions {
1773   size_t slot_size;
1774 
1775   // Returns the hash of the pointed-to slot.
1776   size_t (*hash_slot)(void* set, void* slot);
1777 
1778   // Transfer the contents of src_slot to dst_slot.
1779   void (*transfer)(void* set, void* dst_slot, void* src_slot);
1780 
1781   // Deallocate the backing store from common.
1782   void (*dealloc)(CommonFields& common, const PolicyFunctions& policy);
1783 };
1784 
1785 // ClearBackingArray clears the backing array, either modifying it in place,
1786 // or creating a new one based on the value of "reuse".
1787 // REQUIRES: c.capacity > 0
1788 void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
1789                        bool reuse);
1790 
1791 // Type-erased version of raw_hash_set::erase_meta_only.
1792 void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size);
1793 
1794 // Function to place in PolicyFunctions::dealloc for raw_hash_sets
1795 // that are using std::allocator. This allows us to share the same
1796 // function body for raw_hash_set instantiations that have the
1797 // same slot alignment.
1798 template <size_t AlignOfSlot>
1799 ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
1800                                                 const PolicyFunctions& policy) {
1801   // Unpoison before returning the memory to the allocator.
1802   SanitizerUnpoisonMemoryRegion(common.slot_array(),
1803                                 policy.slot_size * common.capacity());
1804 
1805   std::allocator<char> alloc;
1806   common.infoz().Unregister();
1807   Deallocate<BackingArrayAlignment(AlignOfSlot)>(
1808       &alloc, common.backing_array_start(),
1809       common.alloc_size(policy.slot_size, AlignOfSlot));
1810 }
1811 
1812 // For trivially relocatable types we use memcpy directly. This allows us to
1813 // share the same function body for raw_hash_set instantiations that have the
1814 // same slot size as long as they are relocatable.
1815 template <size_t SizeOfSlot>
1816 ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) {
1817   memcpy(dst, src, SizeOfSlot);
1818 }
1819 
1820 // Type-erased version of raw_hash_set::drop_deletes_without_resize.
1821 void DropDeletesWithoutResize(CommonFields& common,
1822                               const PolicyFunctions& policy, void* tmp_space);
1823 
1824 // A SwissTable.
1825 //
1826 // Policy: a policy defines how to perform different operations on
1827 // the slots of the hashtable (see hash_policy_traits.h for the full interface
1828 // of policy).
1829 //
1830 // Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
1831 // functor should accept a key and return size_t as hash. For best performance
1832 // it is important that the hash function provides high entropy across all bits
1833 // of the hash.
1834 //
1835 // Eq: a (possibly polymorphic) functor that compares two keys for equality. It
1836 // should accept two (of possibly different type) keys and return a bool: true
1837 // if they are equal, false if they are not. If two keys compare equal, then
1838 // their hash values as defined by Hash MUST be equal.
1839 //
1840 // Allocator: an Allocator
1841 // [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
1842 // the storage of the hashtable will be allocated and the elements will be
1843 // constructed and destroyed.
1844 template <class Policy, class Hash, class Eq, class Alloc>
1845 class raw_hash_set {
1846   using PolicyTraits = hash_policy_traits<Policy>;
1847   using KeyArgImpl =
1848       KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
1849 
1850  public:
1851   using init_type = typename PolicyTraits::init_type;
1852   using key_type = typename PolicyTraits::key_type;
1853   // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
1854   // code fixes!
1855   using slot_type = typename PolicyTraits::slot_type;
1856   using allocator_type = Alloc;
1857   using size_type = size_t;
1858   using difference_type = ptrdiff_t;
1859   using hasher = Hash;
1860   using key_equal = Eq;
1861   using policy_type = Policy;
1862   using value_type = typename PolicyTraits::value_type;
1863   using reference = value_type&;
1864   using const_reference = const value_type&;
1865   using pointer = typename absl::allocator_traits<
1866       allocator_type>::template rebind_traits<value_type>::pointer;
1867   using const_pointer = typename absl::allocator_traits<
1868       allocator_type>::template rebind_traits<value_type>::const_pointer;
1869 
1870   // Alias used for heterogeneous lookup functions.
1871   // `key_arg<K>` evaluates to `K` when the functors are transparent and to
1872   // `key_type` otherwise. It permits template argument deduction on `K` for the
1873   // transparent case.
1874   template <class K>
1875   using key_arg = typename KeyArgImpl::template type<K, key_type>;
1876 
1877  private:
1878   // Give an early error when key_type is not hashable/eq.
1879   auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
1880   auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
1881 
1882   using AllocTraits = absl::allocator_traits<allocator_type>;
1883   using SlotAlloc = typename absl::allocator_traits<
1884       allocator_type>::template rebind_alloc<slot_type>;
1885   // People are often sloppy with the exact type of their allocator (sometimes
1886   // it has an extra const or is missing the pair, but rebinds made it work
1887   // anyway).
1888   using CharAlloc =
1889       typename absl::allocator_traits<Alloc>::template rebind_alloc<char>;
1890   using SlotAllocTraits = typename absl::allocator_traits<
1891       allocator_type>::template rebind_traits<slot_type>;
1892 
1893   static_assert(std::is_lvalue_reference<reference>::value,
1894                 "Policy::element() must return a reference");
1895 
1896   template <typename T>
1897   struct SameAsElementReference
1898       : std::is_same<typename std::remove_cv<
1899                          typename std::remove_reference<reference>::type>::type,
1900                      typename std::remove_cv<
1901                          typename std::remove_reference<T>::type>::type> {};
1902 
1903   // An enabler for insert(T&&): T must be convertible to init_type or be the
1904   // same as [cv] value_type [ref].
1905   // Note: we separate SameAsElementReference into its own type to avoid using
1906   // reference unless we need to. MSVC doesn't seem to like it in some
1907   // cases.
1908   template <class T>
1909   using RequiresInsertable = typename std::enable_if<
1910       absl::disjunction<std::is_convertible<T, init_type>,
1911                         SameAsElementReference<T>>::value,
1912       int>::type;
1913 
1914   // RequiresNotInit is a workaround for gcc prior to 7.1.
1915   // See https://godbolt.org/g/Y4xsUh.
1916   template <class T>
1917   using RequiresNotInit =
1918       typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
1919 
1920   template <class... Ts>
1921   using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
1922 
1923  public:
1924   static_assert(std::is_same<pointer, value_type*>::value,
1925                 "Allocators with custom pointer types are not supported");
1926   static_assert(std::is_same<const_pointer, const value_type*>::value,
1927                 "Allocators with custom pointer types are not supported");
1928 
1929   class iterator : private HashSetIteratorGenerationInfo {
1930     friend class raw_hash_set;
1931 
1932    public:
1933     using iterator_category = std::forward_iterator_tag;
1934     using value_type = typename raw_hash_set::value_type;
1935     using reference =
1936         absl::conditional_t<PolicyTraits::constant_iterators::value,
1937                             const value_type&, value_type&>;
1938     using pointer = absl::remove_reference_t<reference>*;
1939     using difference_type = typename raw_hash_set::difference_type;
1940 
1941     iterator() {}
1942 
1943     // PRECONDITION: not an end() iterator.
1944     reference operator*() const {
1945       AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()");
1946       return unchecked_deref();
1947     }
1948 
1949     // PRECONDITION: not an end() iterator.
1950     pointer operator->() const {
1951       AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->");
1952       return &operator*();
1953     }
1954 
1955     // PRECONDITION: not an end() iterator.
1956     iterator& operator++() {
1957       AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++");
1958       ++ctrl_;
1959       ++slot_;
1960       skip_empty_or_deleted();
1961       return *this;
1962     }
1963     // PRECONDITION: not an end() iterator.
1964     iterator operator++(int) {
1965       auto tmp = *this;
1966       ++*this;
1967       return tmp;
1968     }
1969 
1970     friend bool operator==(const iterator& a, const iterator& b) {
1971       AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr());
1972       AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr());
1973       AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_,
1974                           a.generation_ptr(), b.generation_ptr());
1975       return a.ctrl_ == b.ctrl_;
1976     }
1977     friend bool operator!=(const iterator& a, const iterator& b) {
1978       return !(a == b);
1979     }
1980 
1981    private:
1982     iterator(ctrl_t* ctrl, slot_type* slot,
1983              const GenerationType* generation_ptr)
1984         : HashSetIteratorGenerationInfo(generation_ptr),
1985           ctrl_(ctrl),
1986           slot_(slot) {
1987       // This assumption helps the compiler know that any non-end iterator is
1988       // not equal to any end iterator.
1989       ABSL_ASSUME(ctrl != nullptr);
1990     }
1991     // For end() iterators.
1992     explicit iterator(const GenerationType* generation_ptr)
1993         : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {}
1994 
1995     // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
1996     // they reach one.
1997     //
1998     // If a sentinel is reached, we null `ctrl_` out instead.
1999     void skip_empty_or_deleted() {
2000       while (IsEmptyOrDeleted(*ctrl_)) {
2001         uint32_t shift =
2002             GroupEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted();
2003         ctrl_ += shift;
2004         slot_ += shift;
2005       }
2006       if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
2007     }
2008 
2009     ctrl_t* control() const { return ctrl_; }
2010     slot_type* slot() const { return slot_; }
2011 
2012     // We use EmptyGroup() for default-constructed iterators so that they can
2013     // be distinguished from end iterators, which have nullptr ctrl_.
2014     ctrl_t* ctrl_ = EmptyGroup();
2015     // To avoid uninitialized member warnings, put slot_ in an anonymous union.
2016     // The member is not initialized on singleton and end iterators.
2017     union {
2018       slot_type* slot_;
2019     };
2020 
2021     // An equality check which skips ABSL Hardening iterator invalidation
2022     // checks.
2023     // Should be used when the lifetimes of the iterators are well-enough
2024     // understood to prove that they cannot be invalid.
2025     bool unchecked_equals(const iterator& b) { return ctrl_ == b.control(); }
2026 
2027     // Dereferences the iterator without ABSL Hardening iterator invalidation
2028     // checks.
2029     reference unchecked_deref() const { return PolicyTraits::element(slot_); }
2030   };
2031 
2032   class const_iterator {
2033     friend class raw_hash_set;
2034     template <class Container, typename Enabler>
2035     friend struct absl::container_internal::hashtable_debug_internal::
2036         HashtableDebugAccess;
2037 
2038    public:
2039     using iterator_category = typename iterator::iterator_category;
2040     using value_type = typename raw_hash_set::value_type;
2041     using reference = typename raw_hash_set::const_reference;
2042     using pointer = typename raw_hash_set::const_pointer;
2043     using difference_type = typename raw_hash_set::difference_type;
2044 
2045     const_iterator() = default;
2046     // Implicit construction from iterator.
2047     const_iterator(iterator i) : inner_(std::move(i)) {}  // NOLINT
2048 
2049     reference operator*() const { return *inner_; }
2050     pointer operator->() const { return inner_.operator->(); }
2051 
2052     const_iterator& operator++() {
2053       ++inner_;
2054       return *this;
2055     }
2056     const_iterator operator++(int) { return inner_++; }
2057 
2058     friend bool operator==(const const_iterator& a, const const_iterator& b) {
2059       return a.inner_ == b.inner_;
2060     }
2061     friend bool operator!=(const const_iterator& a, const const_iterator& b) {
2062       return !(a == b);
2063     }
2064 
2065    private:
2066     const_iterator(const ctrl_t* ctrl, const slot_type* slot,
2067                    const GenerationType* gen)
2068         : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot), gen) {
2069     }
2070     ctrl_t* control() const { return inner_.control(); }
2071     slot_type* slot() const { return inner_.slot(); }
2072 
2073     iterator inner_;
2074 
2075     bool unchecked_equals(const const_iterator& b) {
2076       return inner_.unchecked_equals(b.inner_);
2077     }
2078   };
2079 
2080   using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
2081   using insert_return_type = InsertReturnType<iterator, node_type>;
2082 
2083   // Note: can't use `= default` due to non-default noexcept (causes
2084   // problems for some compilers). NOLINTNEXTLINE
2085   raw_hash_set() noexcept(
2086       std::is_nothrow_default_constructible<hasher>::value &&
2087       std::is_nothrow_default_constructible<key_equal>::value &&
2088       std::is_nothrow_default_constructible<allocator_type>::value) {}
2089 
2090   ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set(
2091       size_t bucket_count, const hasher& hash = hasher(),
2092       const key_equal& eq = key_equal(),
2093       const allocator_type& alloc = allocator_type())
2094       : settings_(CommonFields{}, hash, eq, alloc) {
2095     if (bucket_count) {
2096       resize(NormalizeCapacity(bucket_count));
2097     }
2098   }
2099 
2100   raw_hash_set(size_t bucket_count, const hasher& hash,
2101                const allocator_type& alloc)
2102       : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
2103 
2104   raw_hash_set(size_t bucket_count, const allocator_type& alloc)
2105       : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
2106 
2107   explicit raw_hash_set(const allocator_type& alloc)
2108       : raw_hash_set(0, hasher(), key_equal(), alloc) {}
2109 
2110   template <class InputIter>
2111   raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
2112                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
2113                const allocator_type& alloc = allocator_type())
2114       : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
2115                      hash, eq, alloc) {
2116     insert(first, last);
2117   }
2118 
2119   template <class InputIter>
2120   raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
2121                const hasher& hash, const allocator_type& alloc)
2122       : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
2123 
2124   template <class InputIter>
2125   raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
2126                const allocator_type& alloc)
2127       : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
2128 
2129   template <class InputIter>
2130   raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
2131       : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
2132 
2133   // Instead of accepting std::initializer_list<value_type> as the first
2134   // argument like std::unordered_set<value_type> does, we have two overloads
2135   // that accept std::initializer_list<T> and std::initializer_list<init_type>.
2136   // This is advantageous for performance.
2137   //
2138   //   // Turns {"abc", "def"} into std::initializer_list<std::string>, then
2139   //   // copies the strings into the set.
2140   //   std::unordered_set<std::string> s = {"abc", "def"};
2141   //
2142   //   // Turns {"abc", "def"} into std::initializer_list<const char*>, then
2143   //   // copies the strings into the set.
2144   //   absl::flat_hash_set<std::string> s = {"abc", "def"};
2145   //
2146   // The same trick is used in insert().
2147   //
2148   // The enabler is necessary to prevent this constructor from triggering where
2149   // the copy constructor is meant to be called.
2150   //
2151   //   absl::flat_hash_set<int> a, b{a};
2152   //
2153   // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
2154   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
2155   raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
2156                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
2157                const allocator_type& alloc = allocator_type())
2158       : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
2159 
2160   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
2161                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
2162                const allocator_type& alloc = allocator_type())
2163       : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
2164 
2165   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
2166   raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
2167                const hasher& hash, const allocator_type& alloc)
2168       : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
2169 
2170   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
2171                const hasher& hash, const allocator_type& alloc)
2172       : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
2173 
2174   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
2175   raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
2176                const allocator_type& alloc)
2177       : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
2178 
2179   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
2180                const allocator_type& alloc)
2181       : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
2182 
2183   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
2184   raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
2185       : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
2186 
2187   raw_hash_set(std::initializer_list<init_type> init,
2188                const allocator_type& alloc)
2189       : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
2190 
2191   raw_hash_set(const raw_hash_set& that)
2192       : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
2193                                that.alloc_ref())) {}
2194 
2195   raw_hash_set(const raw_hash_set& that, const allocator_type& a)
2196       : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
2197     const size_t size = that.size();
2198     if (size == 0) return;
2199     reserve(size);
2200     // Because the table is guaranteed to be empty, we can do something faster
2201     // than a full `insert`.
2202     for (const auto& v : that) {
2203       const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
2204       auto target = find_first_non_full_outofline(common(), hash);
2205       SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
2206       emplace_at(target.offset, v);
2207       common().maybe_increment_generation_on_insert();
2208       infoz().RecordInsert(hash, target.probe_length);
2209     }
2210     common().set_size(size);
2211     set_growth_left(growth_left() - size);
2212   }
2213 
2214   ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
2215       std::is_nothrow_copy_constructible<hasher>::value &&
2216       std::is_nothrow_copy_constructible<key_equal>::value &&
2217       std::is_nothrow_copy_constructible<allocator_type>::value)
2218       :  // Hash, equality and allocator are copied instead of moved because
2219          // `that` must be left valid. If Hash is std::function<Key>, moving it
2220          // would create a nullptr functor that cannot be called.
2221          // TODO(b/296061262): move instead of copying hash/eq/alloc.
2222          // Note: we avoid using exchange for better generated code.
2223         settings_(std::move(that.common()), that.hash_ref(), that.eq_ref(),
2224                   that.alloc_ref()) {
2225     that.common() = CommonFields{};
2226     maybe_increment_generation_or_rehash_on_move();
2227   }
2228 
2229   raw_hash_set(raw_hash_set&& that, const allocator_type& a)
2230       : settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) {
2231     if (a == that.alloc_ref()) {
2232       std::swap(common(), that.common());
2233       maybe_increment_generation_or_rehash_on_move();
2234     } else {
2235       move_elements_allocs_unequal(std::move(that));
2236     }
2237   }
2238 
2239   raw_hash_set& operator=(const raw_hash_set& that) {
2240     if (ABSL_PREDICT_FALSE(this == &that)) return *this;
2241     constexpr bool propagate_alloc =
2242         AllocTraits::propagate_on_container_copy_assignment::value;
2243     // TODO(ezb): maybe avoid allocating a new backing array if this->capacity()
2244     // is an exact match for that.size(). If this->capacity() is too big, then
2245     // it would make iteration very slow to reuse the allocation. Maybe we can
2246     // do the same heuristic as clear() and reuse if it's small enough.
2247     raw_hash_set tmp(that, propagate_alloc ? that.alloc_ref() : alloc_ref());
2248     // NOLINTNEXTLINE: not returning *this for performance.
2249     return assign_impl<propagate_alloc>(std::move(tmp));
2250   }
2251 
2252   raw_hash_set& operator=(raw_hash_set&& that) noexcept(
2253       absl::allocator_traits<allocator_type>::is_always_equal::value &&
2254       std::is_nothrow_move_assignable<hasher>::value &&
2255       std::is_nothrow_move_assignable<key_equal>::value) {
2256     // TODO(sbenza): We should only use the operations from the noexcept clause
2257     // to make sure we actually adhere to that contract.
2258     // NOLINTNEXTLINE: not returning *this for performance.
2259     return move_assign(
2260         std::move(that),
2261         typename AllocTraits::propagate_on_container_move_assignment());
2262   }
2263 
2264   ~raw_hash_set() { destructor_impl(); }
2265 
2266   iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
2267     auto it = iterator_at(0);
2268     it.skip_empty_or_deleted();
2269     return it;
2270   }
2271   iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND {
2272     return iterator(common().generation_ptr());
2273   }
2274 
2275   const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2276     return const_cast<raw_hash_set*>(this)->begin();
2277   }
2278   const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2279     return iterator(common().generation_ptr());
2280   }
2281   const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2282     return begin();
2283   }
2284   const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
2285 
2286   bool empty() const { return !size(); }
2287   size_t size() const { return common().size(); }
2288   size_t capacity() const { return common().capacity(); }
2289   size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
2290 
2291   ABSL_ATTRIBUTE_REINITIALIZES void clear() {
2292     // Iterating over this container is O(bucket_count()). When bucket_count()
2293     // is much greater than size(), iteration becomes prohibitively expensive.
2294     // For clear() it is more important to reuse the allocated array when the
2295     // container is small because allocation takes comparatively long time
2296     // compared to destruction of the elements of the container. So we pick the
2297     // largest bucket_count() threshold for which iteration is still fast and
2298     // past that we simply deallocate the array.
2299     const size_t cap = capacity();
2300     if (cap == 0) {
2301       // Already guaranteed to be empty; so nothing to do.
2302     } else {
2303       destroy_slots();
2304       ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128);
2305     }
2306     common().set_reserved_growth(0);
2307     common().set_reservation_size(0);
2308   }
2309 
2310   // This overload kicks in when the argument is an rvalue of insertable and
2311   // decomposable type other than init_type.
2312   //
2313   //   flat_hash_map<std::string, int> m;
2314   //   m.insert(std::make_pair("abc", 42));
2315   // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
2316   // bug.
2317   template <class T, RequiresInsertable<T> = 0, class T2 = T,
2318             typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
2319             T* = nullptr>
2320   std::pair<iterator, bool> insert(T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2321     return emplace(std::forward<T>(value));
2322   }
2323 
2324   // This overload kicks in when the argument is a bitfield or an lvalue of
2325   // insertable and decomposable type.
2326   //
2327   //   union { int n : 1; };
2328   //   flat_hash_set<int> s;
2329   //   s.insert(n);
2330   //
2331   //   flat_hash_set<std::string> s;
2332   //   const char* p = "hello";
2333   //   s.insert(p);
2334   //
2335   template <
2336       class T, RequiresInsertable<const T&> = 0,
2337       typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
2338   std::pair<iterator, bool> insert(const T& value)
2339       ABSL_ATTRIBUTE_LIFETIME_BOUND {
2340     return emplace(value);
2341   }
2342 
2343   // This overload kicks in when the argument is an rvalue of init_type. Its
2344   // purpose is to handle brace-init-list arguments.
2345   //
2346   //   flat_hash_map<std::string, int> s;
2347   //   s.insert({"abc", 42});
2348   std::pair<iterator, bool> insert(init_type&& value)
2349       ABSL_ATTRIBUTE_LIFETIME_BOUND {
2350     return emplace(std::move(value));
2351   }
2352 
2353   // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
2354   // bug.
2355   template <class T, RequiresInsertable<T> = 0, class T2 = T,
2356             typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
2357             T* = nullptr>
2358   iterator insert(const_iterator, T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2359     return insert(std::forward<T>(value)).first;
2360   }
2361 
2362   template <
2363       class T, RequiresInsertable<const T&> = 0,
2364       typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
2365   iterator insert(const_iterator,
2366                   const T& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2367     return insert(value).first;
2368   }
2369 
2370   iterator insert(const_iterator,
2371                   init_type&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2372     return insert(std::move(value)).first;
2373   }
2374 
2375   template <class InputIt>
2376   void insert(InputIt first, InputIt last) {
2377     for (; first != last; ++first) emplace(*first);
2378   }
2379 
2380   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
2381   void insert(std::initializer_list<T> ilist) {
2382     insert(ilist.begin(), ilist.end());
2383   }
2384 
2385   void insert(std::initializer_list<init_type> ilist) {
2386     insert(ilist.begin(), ilist.end());
2387   }
2388 
2389   insert_return_type insert(node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2390     if (!node) return {end(), false, node_type()};
2391     const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
2392     auto res = PolicyTraits::apply(
2393         InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
2394         elem);
2395     if (res.second) {
2396       CommonAccess::Reset(&node);
2397       return {res.first, true, node_type()};
2398     } else {
2399       return {res.first, false, std::move(node)};
2400     }
2401   }
2402 
2403   iterator insert(const_iterator,
2404                   node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2405     auto res = insert(std::move(node));
2406     node = std::move(res.node);
2407     return res.position;
2408   }
2409 
2410   // This overload kicks in if we can deduce the key from args. This enables us
2411   // to avoid constructing value_type if an entry with the same key already
2412   // exists.
2413   //
2414   // For example:
2415   //
2416   //   flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
2417   //   // Creates no std::string copies and makes no heap allocations.
2418   //   m.emplace("abc", "xyz");
2419   template <class... Args, typename std::enable_if<
2420                                IsDecomposable<Args...>::value, int>::type = 0>
2421   std::pair<iterator, bool> emplace(Args&&... args)
2422       ABSL_ATTRIBUTE_LIFETIME_BOUND {
2423     return PolicyTraits::apply(EmplaceDecomposable{*this},
2424                                std::forward<Args>(args)...);
2425   }
2426 
2427   // This overload kicks in if we cannot deduce the key from args. It constructs
2428   // value_type unconditionally and then either moves it into the table or
2429   // destroys.
2430   template <class... Args, typename std::enable_if<
2431                                !IsDecomposable<Args...>::value, int>::type = 0>
2432   std::pair<iterator, bool> emplace(Args&&... args)
2433       ABSL_ATTRIBUTE_LIFETIME_BOUND {
2434     alignas(slot_type) unsigned char raw[sizeof(slot_type)];
2435     slot_type* slot = reinterpret_cast<slot_type*>(&raw);
2436 
2437     construct(slot, std::forward<Args>(args)...);
2438     const auto& elem = PolicyTraits::element(slot);
2439     return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
2440   }
2441 
2442   template <class... Args>
2443   iterator emplace_hint(const_iterator,
2444                         Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2445     return emplace(std::forward<Args>(args)...).first;
2446   }
2447 
2448   // Extension API: support for lazy emplace.
2449   //
2450   // Looks up key in the table. If found, returns the iterator to the element.
2451   // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`,
2452   // and returns an iterator to the new element.
2453   //
2454   // `f` must abide by several restrictions:
2455   //  - it MUST call `raw_hash_set::constructor` with arguments as if a
2456   //    `raw_hash_set::value_type` is constructed,
2457   //  - it MUST NOT access the container before the call to
2458   //    `raw_hash_set::constructor`, and
2459   //  - it MUST NOT erase the lazily emplaced element.
2460   // Doing any of these is undefined behavior.
2461   //
2462   // For example:
2463   //
2464   //   std::unordered_set<ArenaString> s;
2465   //   // Makes ArenaStr even if "abc" is in the map.
2466   //   s.insert(ArenaString(&arena, "abc"));
2467   //
2468   //   flat_hash_set<ArenaStr> s;
2469   //   // Makes ArenaStr only if "abc" is not in the map.
2470   //   s.lazy_emplace("abc", [&](const constructor& ctor) {
2471   //     ctor(&arena, "abc");
2472   //   });
2473   //
2474   // WARNING: This API is currently experimental. If there is a way to implement
2475   // the same thing with the rest of the API, prefer that.
2476   class constructor {
2477     friend class raw_hash_set;
2478 
2479    public:
2480     template <class... Args>
2481     void operator()(Args&&... args) const {
2482       assert(*slot_);
2483       PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
2484       *slot_ = nullptr;
2485     }
2486 
2487    private:
2488     constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
2489 
2490     allocator_type* alloc_;
2491     slot_type** slot_;
2492   };
2493 
2494   template <class K = key_type, class F>
2495   iterator lazy_emplace(const key_arg<K>& key,
2496                         F&& f) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2497     auto res = find_or_prepare_insert(key);
2498     if (res.second) {
2499       slot_type* slot = slot_array() + res.first;
2500       std::forward<F>(f)(constructor(&alloc_ref(), &slot));
2501       assert(!slot);
2502     }
2503     return iterator_at(res.first);
2504   }
2505 
2506   // Extension API: support for heterogeneous keys.
2507   //
2508   //   std::unordered_set<std::string> s;
2509   //   // Turns "abc" into std::string.
2510   //   s.erase("abc");
2511   //
2512   //   flat_hash_set<std::string> s;
2513   //   // Uses "abc" directly without copying it into std::string.
2514   //   s.erase("abc");
2515   template <class K = key_type>
2516   size_type erase(const key_arg<K>& key) {
2517     auto it = find(key);
2518     if (it == end()) return 0;
2519     erase(it);
2520     return 1;
2521   }
2522 
2523   // Erases the element pointed to by `it`.  Unlike `std::unordered_set::erase`,
2524   // this method returns void to reduce algorithmic complexity to O(1).  The
2525   // iterator is invalidated, so any increment should be done before calling
2526   // erase.  In order to erase while iterating across a map, use the following
2527   // idiom (which also works for standard containers):
2528   //
2529   // for (auto it = m.begin(), end = m.end(); it != end;) {
2530   //   // `erase()` will invalidate `it`, so advance `it` first.
2531   //   auto copy_it = it++;
2532   //   if (<pred>) {
2533   //     m.erase(copy_it);
2534   //   }
2535   // }
2536   void erase(const_iterator cit) { erase(cit.inner_); }
2537 
2538   // This overload is necessary because otherwise erase<K>(const K&) would be
2539   // a better match if non-const iterator is passed as an argument.
2540   void erase(iterator it) {
2541     AssertIsFull(it.control(), it.generation(), it.generation_ptr(), "erase()");
2542     destroy(it.slot());
2543     erase_meta_only(it);
2544   }
2545 
2546   iterator erase(const_iterator first,
2547                  const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2548     // We check for empty first because ClearBackingArray requires that
2549     // capacity() > 0 as a precondition.
2550     if (empty()) return end();
2551     if (first == begin() && last == end()) {
2552       // TODO(ezb): we access control bytes in destroy_slots so it could make
2553       // sense to combine destroy_slots and ClearBackingArray to avoid cache
2554       // misses when the table is large. Note that we also do this in clear().
2555       destroy_slots();
2556       ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true);
2557       common().set_reserved_growth(common().reservation_size());
2558       return end();
2559     }
2560     while (first != last) {
2561       erase(first++);
2562     }
2563     return last.inner_;
2564   }
2565 
2566   // Moves elements from `src` into `this`.
2567   // If the element already exists in `this`, it is left unmodified in `src`.
2568   template <typename H, typename E>
2569   void merge(raw_hash_set<Policy, H, E, Alloc>& src) {  // NOLINT
2570     assert(this != &src);
2571     for (auto it = src.begin(), e = src.end(); it != e;) {
2572       auto next = std::next(it);
2573       if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot())},
2574                               PolicyTraits::element(it.slot()))
2575               .second) {
2576         src.erase_meta_only(it);
2577       }
2578       it = next;
2579     }
2580   }
2581 
2582   template <typename H, typename E>
2583   void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
2584     merge(src);
2585   }
2586 
2587   node_type extract(const_iterator position) {
2588     AssertIsFull(position.control(), position.inner_.generation(),
2589                  position.inner_.generation_ptr(), "extract()");
2590     auto node = CommonAccess::Transfer<node_type>(alloc_ref(), position.slot());
2591     erase_meta_only(position);
2592     return node;
2593   }
2594 
2595   template <
2596       class K = key_type,
2597       typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
2598   node_type extract(const key_arg<K>& key) {
2599     auto it = find(key);
2600     return it == end() ? node_type() : extract(const_iterator{it});
2601   }
2602 
2603   void swap(raw_hash_set& that) noexcept(
2604       IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
2605       IsNoThrowSwappable<allocator_type>(
2606           typename AllocTraits::propagate_on_container_swap{})) {
2607     using std::swap;
2608     swap(common(), that.common());
2609     swap(hash_ref(), that.hash_ref());
2610     swap(eq_ref(), that.eq_ref());
2611     SwapAlloc(alloc_ref(), that.alloc_ref(),
2612               typename AllocTraits::propagate_on_container_swap{});
2613   }
2614 
2615   void rehash(size_t n) {
2616     if (n == 0 && capacity() == 0) return;
2617     if (n == 0 && size() == 0) {
2618       ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false);
2619       return;
2620     }
2621 
2622     // bitor is a faster way of doing `max` here. We will round up to the next
2623     // power-of-2-minus-1, so bitor is good enough.
2624     auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
2625     // n == 0 unconditionally rehashes as per the standard.
2626     if (n == 0 || m > capacity()) {
2627       resize(m);
2628 
2629       // This is after resize, to ensure that we have completed the allocation
2630       // and have potentially sampled the hashtable.
2631       infoz().RecordReservation(n);
2632     }
2633   }
2634 
2635   void reserve(size_t n) {
2636     if (n > size() + growth_left()) {
2637       size_t m = GrowthToLowerboundCapacity(n);
2638       resize(NormalizeCapacity(m));
2639 
2640       // This is after resize, to ensure that we have completed the allocation
2641       // and have potentially sampled the hashtable.
2642       infoz().RecordReservation(n);
2643     }
2644     common().reset_reserved_growth(n);
2645     common().set_reservation_size(n);
2646   }
2647 
2648   // Extension API: support for heterogeneous keys.
2649   //
2650   //   std::unordered_set<std::string> s;
2651   //   // Turns "abc" into std::string.
2652   //   s.count("abc");
2653   //
2654   //   ch_set<std::string> s;
2655   //   // Uses "abc" directly without copying it into std::string.
2656   //   s.count("abc");
2657   template <class K = key_type>
2658   size_t count(const key_arg<K>& key) const {
2659     return find(key) == end() ? 0 : 1;
2660   }
2661 
2662   // Issues CPU prefetch instructions for the memory needed to find or insert
2663   // a key.  Like all lookup functions, this support heterogeneous keys.
2664   //
2665   // NOTE: This is a very low level operation and should not be used without
2666   // specific benchmarks indicating its importance.
2667   template <class K = key_type>
2668   void prefetch(const key_arg<K>& key) const {
2669     (void)key;
2670     // Avoid probing if we won't be able to prefetch the addresses received.
2671 #ifdef ABSL_HAVE_PREFETCH
2672     prefetch_heap_block();
2673     auto seq = probe(common(), hash_ref()(key));
2674     PrefetchToLocalCache(control() + seq.offset());
2675     PrefetchToLocalCache(slot_array() + seq.offset());
2676 #endif  // ABSL_HAVE_PREFETCH
2677   }
2678 
2679   // The API of find() has two extensions.
2680   //
2681   // 1. The hash can be passed by the user. It must be equal to the hash of the
2682   // key.
2683   //
2684   // 2. The type of the key argument doesn't have to be key_type. This is so
2685   // called heterogeneous key support.
2686   template <class K = key_type>
2687   iterator find(const key_arg<K>& key,
2688                 size_t hash) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2689     auto seq = probe(common(), hash);
2690     slot_type* slot_ptr = slot_array();
2691     const ctrl_t* ctrl = control();
2692     while (true) {
2693       Group g{ctrl + seq.offset()};
2694       for (uint32_t i : g.Match(H2(hash))) {
2695         if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
2696                 EqualElement<K>{key, eq_ref()},
2697                 PolicyTraits::element(slot_ptr + seq.offset(i)))))
2698           return iterator_at(seq.offset(i));
2699       }
2700       if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
2701       seq.next();
2702       assert(seq.index() <= capacity() && "full table!");
2703     }
2704   }
2705   template <class K = key_type>
2706   iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
2707     prefetch_heap_block();
2708     return find(key, hash_ref()(key));
2709   }
2710 
2711   template <class K = key_type>
2712   const_iterator find(const key_arg<K>& key,
2713                       size_t hash) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2714     return const_cast<raw_hash_set*>(this)->find(key, hash);
2715   }
2716   template <class K = key_type>
2717   const_iterator find(const key_arg<K>& key) const
2718       ABSL_ATTRIBUTE_LIFETIME_BOUND {
2719     prefetch_heap_block();
2720     return find(key, hash_ref()(key));
2721   }
2722 
2723   template <class K = key_type>
2724   bool contains(const key_arg<K>& key) const {
2725     // Here neither the iterator returned by `find()` nor `end()` can be invalid
2726     // outside of potential thread-safety issues.
2727     // `find()`'s return value is constructed, used, and then destructed
2728     // all in this context.
2729     return !find(key).unchecked_equals(end());
2730   }
2731 
2732   template <class K = key_type>
2733   std::pair<iterator, iterator> equal_range(const key_arg<K>& key)
2734       ABSL_ATTRIBUTE_LIFETIME_BOUND {
2735     auto it = find(key);
2736     if (it != end()) return {it, std::next(it)};
2737     return {it, it};
2738   }
2739   template <class K = key_type>
2740   std::pair<const_iterator, const_iterator> equal_range(
2741       const key_arg<K>& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
2742     auto it = find(key);
2743     if (it != end()) return {it, std::next(it)};
2744     return {it, it};
2745   }
2746 
2747   size_t bucket_count() const { return capacity(); }
2748   float load_factor() const {
2749     return capacity() ? static_cast<double>(size()) / capacity() : 0.0;
2750   }
2751   float max_load_factor() const { return 1.0f; }
2752   void max_load_factor(float) {
2753     // Does nothing.
2754   }
2755 
2756   hasher hash_function() const { return hash_ref(); }
2757   key_equal key_eq() const { return eq_ref(); }
2758   allocator_type get_allocator() const { return alloc_ref(); }
2759 
2760   friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
2761     if (a.size() != b.size()) return false;
2762     const raw_hash_set* outer = &a;
2763     const raw_hash_set* inner = &b;
2764     if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
2765     for (const value_type& elem : *outer) {
2766       auto it = PolicyTraits::apply(FindElement{*inner}, elem);
2767       if (it == inner->end() || !(*it == elem)) return false;
2768     }
2769     return true;
2770   }
2771 
2772   friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
2773     return !(a == b);
2774   }
2775 
2776   template <typename H>
2777   friend typename std::enable_if<H::template is_hashable<value_type>::value,
2778                                  H>::type
2779   AbslHashValue(H h, const raw_hash_set& s) {
2780     return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
2781                       s.size());
2782   }
2783 
2784   friend void swap(raw_hash_set& a,
2785                    raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
2786     a.swap(b);
2787   }
2788 
2789  private:
2790   template <class Container, typename Enabler>
2791   friend struct absl::container_internal::hashtable_debug_internal::
2792       HashtableDebugAccess;
2793 
2794   struct FindElement {
2795     template <class K, class... Args>
2796     const_iterator operator()(const K& key, Args&&...) const {
2797       return s.find(key);
2798     }
2799     const raw_hash_set& s;
2800   };
2801 
2802   struct HashElement {
2803     template <class K, class... Args>
2804     size_t operator()(const K& key, Args&&...) const {
2805       return h(key);
2806     }
2807     const hasher& h;
2808   };
2809 
2810   template <class K1>
2811   struct EqualElement {
2812     template <class K2, class... Args>
2813     bool operator()(const K2& lhs, Args&&...) const {
2814       return eq(lhs, rhs);
2815     }
2816     const K1& rhs;
2817     const key_equal& eq;
2818   };
2819 
2820   struct EmplaceDecomposable {
2821     template <class K, class... Args>
2822     std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
2823       auto res = s.find_or_prepare_insert(key);
2824       if (res.second) {
2825         s.emplace_at(res.first, std::forward<Args>(args)...);
2826       }
2827       return {s.iterator_at(res.first), res.second};
2828     }
2829     raw_hash_set& s;
2830   };
2831 
2832   template <bool do_destroy>
2833   struct InsertSlot {
2834     template <class K, class... Args>
2835     std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
2836       auto res = s.find_or_prepare_insert(key);
2837       if (res.second) {
2838         s.transfer(s.slot_array() + res.first, &slot);
2839       } else if (do_destroy) {
2840         s.destroy(&slot);
2841       }
2842       return {s.iterator_at(res.first), res.second};
2843     }
2844     raw_hash_set& s;
2845     // Constructed slot. Either moved into place or destroyed.
2846     slot_type&& slot;
2847   };
2848 
2849   // TODO(b/303305702): re-enable reentrant validation.
2850   template <typename... Args>
2851   inline void construct(slot_type* slot, Args&&... args) {
2852     PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
2853   }
2854   inline void destroy(slot_type* slot) {
2855     PolicyTraits::destroy(&alloc_ref(), slot);
2856   }
2857   inline void transfer(slot_type* to, slot_type* from) {
2858     PolicyTraits::transfer(&alloc_ref(), to, from);
2859   }
2860 
2861   inline void destroy_slots() {
2862     const size_t cap = capacity();
2863     const ctrl_t* ctrl = control();
2864     slot_type* slot = slot_array();
2865     for (size_t i = 0; i != cap; ++i) {
2866       if (IsFull(ctrl[i])) {
2867         destroy(slot + i);
2868       }
2869     }
2870   }
2871 
2872   inline void dealloc() {
2873     assert(capacity() != 0);
2874     // Unpoison before returning the memory to the allocator.
2875     SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * capacity());
2876     infoz().Unregister();
2877     Deallocate<BackingArrayAlignment(alignof(slot_type))>(
2878         &alloc_ref(), common().backing_array_start(),
2879         common().alloc_size(sizeof(slot_type), alignof(slot_type)));
2880   }
2881 
2882   inline void destructor_impl() {
2883     if (capacity() == 0) return;
2884     destroy_slots();
2885     dealloc();
2886   }
2887 
2888   // Erases, but does not destroy, the value pointed to by `it`.
2889   //
2890   // This merely updates the pertinent control byte. This can be used in
2891   // conjunction with Policy::transfer to move the object to another place.
2892   void erase_meta_only(const_iterator it) {
2893     EraseMetaOnly(common(), static_cast<size_t>(it.control() - control()),
2894                   sizeof(slot_type));
2895   }
2896 
2897   // Resizes table to the new capacity and move all elements to the new
2898   // positions accordingly.
2899   //
2900   // Note that for better performance instead of
2901   // find_first_non_full(common(), hash),
2902   // HashSetResizeHelper::FindFirstNonFullAfterResize(
2903   //    common(), old_capacity, hash)
2904   // can be called right after `resize`.
2905   ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) {
2906     assert(IsValidCapacity(new_capacity));
2907     HashSetResizeHelper resize_helper(common());
2908     auto* old_slots = slot_array();
2909     common().set_capacity(new_capacity);
2910     // Note that `InitializeSlots` does different number initialization steps
2911     // depending on the values of `transfer_uses_memcpy` and capacities.
2912     // Refer to the comment in `InitializeSlots` for more details.
2913     const bool grow_single_group =
2914         resize_helper.InitializeSlots<CharAlloc, sizeof(slot_type),
2915                                       PolicyTraits::transfer_uses_memcpy(),
2916                                       alignof(slot_type)>(
2917             common(), const_cast<std::remove_const_t<slot_type>*>(old_slots),
2918             CharAlloc(alloc_ref()));
2919 
2920     if (resize_helper.old_capacity() == 0) {
2921       // InitializeSlots did all the work including infoz().RecordRehash().
2922       return;
2923     }
2924 
2925     if (grow_single_group) {
2926       if (PolicyTraits::transfer_uses_memcpy()) {
2927         // InitializeSlots did all the work.
2928         return;
2929       }
2930       // We want GrowSizeIntoSingleGroup to be called here in order to make
2931       // InitializeSlots not depend on PolicyTraits.
2932       resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common(), alloc_ref(),
2933                                                           old_slots);
2934     } else {
2935       // InitializeSlots prepares control bytes to correspond to empty table.
2936       auto* new_slots = slot_array();
2937       size_t total_probe_length = 0;
2938       for (size_t i = 0; i != resize_helper.old_capacity(); ++i) {
2939         if (IsFull(resize_helper.old_ctrl()[i])) {
2940           size_t hash = PolicyTraits::apply(
2941               HashElement{hash_ref()}, PolicyTraits::element(old_slots + i));
2942           auto target = find_first_non_full(common(), hash);
2943           size_t new_i = target.offset;
2944           total_probe_length += target.probe_length;
2945           SetCtrl(common(), new_i, H2(hash), sizeof(slot_type));
2946           transfer(new_slots + new_i, old_slots + i);
2947         }
2948       }
2949       infoz().RecordRehash(total_probe_length);
2950     }
2951     resize_helper.DeallocateOld<alignof(slot_type)>(
2952         CharAlloc(alloc_ref()), sizeof(slot_type),
2953         const_cast<std::remove_const_t<slot_type>*>(old_slots));
2954   }
2955 
2956   // Prunes control bytes to remove as many tombstones as possible.
2957   //
2958   // See the comment on `rehash_and_grow_if_necessary()`.
2959   inline void drop_deletes_without_resize() {
2960     // Stack-allocate space for swapping elements.
2961     alignas(slot_type) unsigned char tmp[sizeof(slot_type)];
2962     DropDeletesWithoutResize(common(), GetPolicyFunctions(), tmp);
2963   }
2964 
2965   // Called whenever the table *might* need to conditionally grow.
2966   //
2967   // This function is an optimization opportunity to perform a rehash even when
2968   // growth is unnecessary, because vacating tombstones is beneficial for
2969   // performance in the long-run.
2970   void rehash_and_grow_if_necessary() {
2971     const size_t cap = capacity();
2972     if (cap > Group::kWidth &&
2973         // Do these calculations in 64-bit to avoid overflow.
2974         size() * uint64_t{32} <= cap * uint64_t{25}) {
2975       // Squash DELETED without growing if there is enough capacity.
2976       //
2977       // Rehash in place if the current size is <= 25/32 of capacity.
2978       // Rationale for such a high factor: 1) drop_deletes_without_resize() is
2979       // faster than resize, and 2) it takes quite a bit of work to add
2980       // tombstones.  In the worst case, seems to take approximately 4
2981       // insert/erase pairs to create a single tombstone and so if we are
2982       // rehashing because of tombstones, we can afford to rehash-in-place as
2983       // long as we are reclaiming at least 1/8 the capacity without doing more
2984       // than 2X the work.  (Where "work" is defined to be size() for rehashing
2985       // or rehashing in place, and 1 for an insert or erase.)  But rehashing in
2986       // place is faster per operation than inserting or even doubling the size
2987       // of the table, so we actually afford to reclaim even less space from a
2988       // resize-in-place.  The decision is to rehash in place if we can reclaim
2989       // at about 1/8th of the usable capacity (specifically 3/28 of the
2990       // capacity) which means that the total cost of rehashing will be a small
2991       // fraction of the total work.
2992       //
2993       // Here is output of an experiment using the BM_CacheInSteadyState
2994       // benchmark running the old case (where we rehash-in-place only if we can
2995       // reclaim at least 7/16*capacity) vs. this code (which rehashes in place
2996       // if we can recover 3/32*capacity).
2997       //
2998       // Note that although in the worst-case number of rehashes jumped up from
2999       // 15 to 190, but the number of operations per second is almost the same.
3000       //
3001       // Abridged output of running BM_CacheInSteadyState benchmark from
3002       // raw_hash_set_benchmark.   N is the number of insert/erase operations.
3003       //
3004       //      | OLD (recover >= 7/16        | NEW (recover >= 3/32)
3005       // size |    N/s LoadFactor NRehashes |    N/s LoadFactor NRehashes
3006       //  448 | 145284       0.44        18 | 140118       0.44        19
3007       //  493 | 152546       0.24        11 | 151417       0.48        28
3008       //  538 | 151439       0.26        11 | 151152       0.53        38
3009       //  583 | 151765       0.28        11 | 150572       0.57        50
3010       //  628 | 150241       0.31        11 | 150853       0.61        66
3011       //  672 | 149602       0.33        12 | 150110       0.66        90
3012       //  717 | 149998       0.35        12 | 149531       0.70       129
3013       //  762 | 149836       0.37        13 | 148559       0.74       190
3014       //  807 | 149736       0.39        14 | 151107       0.39        14
3015       //  852 | 150204       0.42        15 | 151019       0.42        15
3016       drop_deletes_without_resize();
3017     } else {
3018       // Otherwise grow the container.
3019       resize(NextCapacity(cap));
3020     }
3021   }
3022 
3023   void maybe_increment_generation_or_rehash_on_move() {
3024     common().maybe_increment_generation_on_move();
3025     if (!empty() && common().should_rehash_for_bug_detection_on_move()) {
3026       resize(capacity());
3027     }
3028   }
3029 
3030   template<bool propagate_alloc>
3031   raw_hash_set& assign_impl(raw_hash_set&& that) {
3032     // We don't bother checking for this/that aliasing. We just need to avoid
3033     // breaking the invariants in that case.
3034     destructor_impl();
3035     common() = std::move(that.common());
3036     // TODO(b/296061262): move instead of copying hash/eq/alloc.
3037     hash_ref() = that.hash_ref();
3038     eq_ref() = that.eq_ref();
3039     CopyAlloc(alloc_ref(), that.alloc_ref(),
3040               std::integral_constant<bool, propagate_alloc>());
3041     that.common() = CommonFields{};
3042     maybe_increment_generation_or_rehash_on_move();
3043     return *this;
3044   }
3045 
3046   raw_hash_set& move_elements_allocs_unequal(raw_hash_set&& that) {
3047     const size_t size = that.size();
3048     if (size == 0) return *this;
3049     reserve(size);
3050     for (iterator it = that.begin(); it != that.end(); ++it) {
3051       insert(std::move(PolicyTraits::element(it.slot())));
3052       that.destroy(it.slot());
3053     }
3054     that.dealloc();
3055     that.common() = CommonFields{};
3056     maybe_increment_generation_or_rehash_on_move();
3057     return *this;
3058   }
3059 
3060   raw_hash_set& move_assign(raw_hash_set&& that,
3061                             std::true_type /*propagate_alloc*/) {
3062     return assign_impl<true>(std::move(that));
3063   }
3064   raw_hash_set& move_assign(raw_hash_set&& that,
3065                             std::false_type /*propagate_alloc*/) {
3066     if (alloc_ref() == that.alloc_ref()) {
3067       return assign_impl<false>(std::move(that));
3068     }
3069     // Aliasing can't happen here because allocs would compare equal above.
3070     assert(this != &that);
3071     destructor_impl();
3072     // We can't take over that's memory so we need to move each element.
3073     // While moving elements, this should have that's hash/eq so copy hash/eq
3074     // before moving elements.
3075     // TODO(b/296061262): move instead of copying hash/eq.
3076     hash_ref() = that.hash_ref();
3077     eq_ref() = that.eq_ref();
3078     return move_elements_allocs_unequal(std::move(that));
3079   }
3080 
3081  protected:
3082   // Attempts to find `key` in the table; if it isn't found, returns a slot that
3083   // the value can be inserted into, with the control byte already set to
3084   // `key`'s H2.
3085   template <class K>
3086   std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
3087     prefetch_heap_block();
3088     auto hash = hash_ref()(key);
3089     auto seq = probe(common(), hash);
3090     const ctrl_t* ctrl = control();
3091     while (true) {
3092       Group g{ctrl + seq.offset()};
3093       for (uint32_t i : g.Match(H2(hash))) {
3094         if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
3095                 EqualElement<K>{key, eq_ref()},
3096                 PolicyTraits::element(slot_array() + seq.offset(i)))))
3097           return {seq.offset(i), false};
3098       }
3099       if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break;
3100       seq.next();
3101       assert(seq.index() <= capacity() && "full table!");
3102     }
3103     return {prepare_insert(hash), true};
3104   }
3105 
3106   // Given the hash of a value not currently in the table, finds the next
3107   // viable slot index to insert it at.
3108   //
3109   // REQUIRES: At least one non-full slot available.
3110   size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
3111     const bool rehash_for_bug_detection =
3112         common().should_rehash_for_bug_detection_on_insert();
3113     if (rehash_for_bug_detection) {
3114       // Move to a different heap allocation in order to detect bugs.
3115       const size_t cap = capacity();
3116       resize(growth_left() > 0 ? cap : NextCapacity(cap));
3117     }
3118     auto target = find_first_non_full(common(), hash);
3119     if (!rehash_for_bug_detection &&
3120         ABSL_PREDICT_FALSE(growth_left() == 0 &&
3121                            !IsDeleted(control()[target.offset]))) {
3122       size_t old_capacity = capacity();
3123       rehash_and_grow_if_necessary();
3124       // NOTE: It is safe to use `FindFirstNonFullAfterResize`.
3125       // `FindFirstNonFullAfterResize` must be called right after resize.
3126       // `rehash_and_grow_if_necessary` may *not* call `resize`
3127       // and perform `drop_deletes_without_resize` instead. But this
3128       // could happen only on big tables.
3129       // For big tables `FindFirstNonFullAfterResize` will always
3130       // fallback to normal `find_first_non_full`, so it is safe to use it.
3131       target = HashSetResizeHelper::FindFirstNonFullAfterResize(
3132           common(), old_capacity, hash);
3133     }
3134     common().increment_size();
3135     set_growth_left(growth_left() - IsEmpty(control()[target.offset]));
3136     SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
3137     common().maybe_increment_generation_on_insert();
3138     infoz().RecordInsert(hash, target.probe_length);
3139     return target.offset;
3140   }
3141 
3142   // Constructs the value in the space pointed by the iterator. This only works
3143   // after an unsuccessful find_or_prepare_insert() and before any other
3144   // modifications happen in the raw_hash_set.
3145   //
3146   // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
3147   // k is the key decomposed from `forward<Args>(args)...`, and the bool
3148   // returned by find_or_prepare_insert(k) was true.
3149   // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
3150   template <class... Args>
3151   void emplace_at(size_t i, Args&&... args) {
3152     construct(slot_array() + i, std::forward<Args>(args)...);
3153 
3154     assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
3155                iterator_at(i) &&
3156            "constructed value does not match the lookup key");
3157   }
3158 
3159   iterator iterator_at(size_t i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
3160     return {control() + i, slot_array() + i, common().generation_ptr()};
3161   }
3162   const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
3163     return {control() + i, slot_array() + i, common().generation_ptr()};
3164   }
3165 
3166   reference unchecked_deref(iterator it) { return it.unchecked_deref(); }
3167 
3168  private:
3169   friend struct RawHashSetTestOnlyAccess;
3170 
3171   // The number of slots we can still fill without needing to rehash.
3172   //
3173   // This is stored separately due to tombstones: we do not include tombstones
3174   // in the growth capacity, because we'd like to rehash when the table is
3175   // otherwise filled with tombstones: otherwise, probe sequences might get
3176   // unacceptably long without triggering a rehash. Callers can also force a
3177   // rehash via the standard `rehash(0)`, which will recompute this value as a
3178   // side-effect.
3179   //
3180   // See `CapacityToGrowth()`.
3181   size_t growth_left() const { return common().growth_left(); }
3182   void set_growth_left(size_t gl) { return common().set_growth_left(gl); }
3183 
3184   // Prefetch the heap-allocated memory region to resolve potential TLB and
3185   // cache misses. This is intended to overlap with execution of calculating the
3186   // hash for a key.
3187   void prefetch_heap_block() const {
3188 #if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
3189     __builtin_prefetch(control(), 0, 1);
3190 #endif
3191   }
3192 
3193   CommonFields& common() { return settings_.template get<0>(); }
3194   const CommonFields& common() const { return settings_.template get<0>(); }
3195 
3196   ctrl_t* control() const { return common().control(); }
3197   slot_type* slot_array() const {
3198     return static_cast<slot_type*>(common().slot_array());
3199   }
3200   HashtablezInfoHandle infoz() { return common().infoz(); }
3201 
3202   hasher& hash_ref() { return settings_.template get<1>(); }
3203   const hasher& hash_ref() const { return settings_.template get<1>(); }
3204   key_equal& eq_ref() { return settings_.template get<2>(); }
3205   const key_equal& eq_ref() const { return settings_.template get<2>(); }
3206   allocator_type& alloc_ref() { return settings_.template get<3>(); }
3207   const allocator_type& alloc_ref() const {
3208     return settings_.template get<3>();
3209   }
3210 
3211   // Make type-specific functions for this type's PolicyFunctions struct.
3212   static size_t hash_slot_fn(void* set, void* slot) {
3213     auto* h = static_cast<raw_hash_set*>(set);
3214     return PolicyTraits::apply(
3215         HashElement{h->hash_ref()},
3216         PolicyTraits::element(static_cast<slot_type*>(slot)));
3217   }
3218   static void transfer_slot_fn(void* set, void* dst, void* src) {
3219     auto* h = static_cast<raw_hash_set*>(set);
3220     h->transfer(static_cast<slot_type*>(dst), static_cast<slot_type*>(src));
3221   }
3222   // Note: dealloc_fn will only be used if we have a non-standard allocator.
3223   static void dealloc_fn(CommonFields& common, const PolicyFunctions&) {
3224     auto* set = reinterpret_cast<raw_hash_set*>(&common);
3225 
3226     // Unpoison before returning the memory to the allocator.
3227     SanitizerUnpoisonMemoryRegion(common.slot_array(),
3228                                   sizeof(slot_type) * common.capacity());
3229 
3230     common.infoz().Unregister();
3231     Deallocate<BackingArrayAlignment(alignof(slot_type))>(
3232         &set->alloc_ref(), common.backing_array_start(),
3233         common.alloc_size(sizeof(slot_type), alignof(slot_type)));
3234   }
3235 
3236   static const PolicyFunctions& GetPolicyFunctions() {
3237     static constexpr PolicyFunctions value = {
3238         sizeof(slot_type),
3239         &raw_hash_set::hash_slot_fn,
3240         PolicyTraits::transfer_uses_memcpy()
3241             ? TransferRelocatable<sizeof(slot_type)>
3242             : &raw_hash_set::transfer_slot_fn,
3243         (std::is_same<SlotAlloc, std::allocator<slot_type>>::value
3244              ? &DeallocateStandard<alignof(slot_type)>
3245              : &raw_hash_set::dealloc_fn),
3246     };
3247     return value;
3248   }
3249 
3250   // Bundle together CommonFields plus other objects which might be empty.
3251   // CompressedTuple will ensure that sizeof is not affected by any of the empty
3252   // fields that occur after CommonFields.
3253   absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
3254                                             allocator_type>
3255       settings_{CommonFields{}, hasher{}, key_equal{}, allocator_type{}};
3256 };
3257 
3258 // Erases all elements that satisfy the predicate `pred` from the container `c`.
3259 template <typename P, typename H, typename E, typename A, typename Predicate>
3260 typename raw_hash_set<P, H, E, A>::size_type EraseIf(
3261     Predicate& pred, raw_hash_set<P, H, E, A>* c) {
3262   const auto initial_size = c->size();
3263   for (auto it = c->begin(), last = c->end(); it != last;) {
3264     if (pred(*it)) {
3265       c->erase(it++);
3266     } else {
3267       ++it;
3268     }
3269   }
3270   return initial_size - c->size();
3271 }
3272 
3273 namespace hashtable_debug_internal {
3274 template <typename Set>
3275 struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
3276   using Traits = typename Set::PolicyTraits;
3277   using Slot = typename Traits::slot_type;
3278 
3279   static size_t GetNumProbes(const Set& set,
3280                              const typename Set::key_type& key) {
3281     size_t num_probes = 0;
3282     size_t hash = set.hash_ref()(key);
3283     auto seq = probe(set.common(), hash);
3284     const ctrl_t* ctrl = set.control();
3285     while (true) {
3286       container_internal::Group g{ctrl + seq.offset()};
3287       for (uint32_t i : g.Match(container_internal::H2(hash))) {
3288         if (Traits::apply(
3289                 typename Set::template EqualElement<typename Set::key_type>{
3290                     key, set.eq_ref()},
3291                 Traits::element(set.slot_array() + seq.offset(i))))
3292           return num_probes;
3293         ++num_probes;
3294       }
3295       if (g.MaskEmpty()) return num_probes;
3296       seq.next();
3297       ++num_probes;
3298     }
3299   }
3300 
3301   static size_t AllocatedByteSize(const Set& c) {
3302     size_t capacity = c.capacity();
3303     if (capacity == 0) return 0;
3304     size_t m = c.common().alloc_size(sizeof(Slot), alignof(Slot));
3305 
3306     size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
3307     if (per_slot != ~size_t{}) {
3308       m += per_slot * c.size();
3309     } else {
3310       for (auto it = c.begin(); it != c.end(); ++it) {
3311         m += Traits::space_used(it.slot());
3312       }
3313     }
3314     return m;
3315   }
3316 };
3317 
3318 }  // namespace hashtable_debug_internal
3319 }  // namespace container_internal
3320 ABSL_NAMESPACE_END
3321 }  // namespace absl
3322 
3323 #undef ABSL_SWISSTABLE_ENABLE_GENERATIONS
3324 
3325 #endif  // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
3326