• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 // An open-addressing
16 // hashtable with quadratic probing.
17 //
18 // This is a low level hashtable on top of which different interfaces can be
19 // implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
20 //
21 // The table interface is similar to that of std::unordered_set. Notable
22 // differences are that most member functions support heterogeneous keys when
23 // BOTH the hash and eq functions are marked as transparent. They do so by
24 // providing a typedef called `is_transparent`.
25 //
26 // When heterogeneous lookup is enabled, functions that take key_type act as if
27 // they have an overload set like:
28 //
29 //   iterator find(const key_type& key);
30 //   template <class K>
31 //   iterator find(const K& key);
32 //
33 //   size_type erase(const key_type& key);
34 //   template <class K>
35 //   size_type erase(const K& key);
36 //
37 //   std::pair<iterator, iterator> equal_range(const key_type& key);
38 //   template <class K>
39 //   std::pair<iterator, iterator> equal_range(const K& key);
40 //
41 // When heterogeneous lookup is disabled, only the explicit `key_type` overloads
42 // exist.
43 //
44 // find() also supports passing the hash explicitly:
45 //
46 //   iterator find(const key_type& key, size_t hash);
47 //   template <class U>
48 //   iterator find(const U& key, size_t hash);
49 //
50 // In addition the pointer to element and iterator stability guarantees are
51 // weaker: all iterators and pointers are invalidated after a new element is
52 // inserted.
53 //
54 // IMPLEMENTATION DETAILS
55 //
56 // The table stores elements inline in a slot array. In addition to the slot
57 // array the table maintains some control state per slot. The extra state is one
58 // byte per slot and stores empty or deleted marks, or alternatively 7 bits from
59 // the hash of an occupied slot. The table is split into logical groups of
60 // slots, like so:
61 //
62 //      Group 1         Group 2        Group 3
63 // +---------------+---------------+---------------+
64 // | | | | | | | | | | | | | | | | | | | | | | | | |
65 // +---------------+---------------+---------------+
66 //
67 // On lookup the hash is split into two parts:
68 // - H2: 7 bits (those stored in the control bytes)
69 // - H1: the rest of the bits
70 // The groups are probed using H1. For each group the slots are matched to H2 in
71 // parallel. Because H2 is 7 bits (128 states) and the number of slots per group
72 // is low (8 or 16) in almost all cases a match in H2 is also a lookup hit.
73 //
74 // On insert, once the right group is found (as in lookup), its slots are
75 // filled in order.
76 //
77 // On erase a slot is cleared. In case the group did not have any empty slots
78 // before the erase, the erased slot is marked as deleted.
79 //
80 // Groups without empty slots (but maybe with deleted slots) extend the probe
81 // sequence. The probing algorithm is quadratic. Given N the number of groups,
82 // the probing function for the i'th probe is:
83 //
84 //   P(0) = H1 % N
85 //
86 //   P(i) = (P(i - 1) + i) % N
87 //
88 // This probing function guarantees that after N probes, all the groups of the
89 // table will be probed exactly once.
90 
91 #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
92 #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
93 
94 #include <algorithm>
95 #include <cmath>
96 #include <cstdint>
97 #include <cstring>
98 #include <iterator>
99 #include <limits>
100 #include <memory>
101 #include <tuple>
102 #include <type_traits>
103 #include <utility>
104 
105 #include "absl/base/internal/endian.h"
106 #include "absl/base/optimization.h"
107 #include "absl/base/port.h"
108 #include "absl/container/internal/common.h"
109 #include "absl/container/internal/compressed_tuple.h"
110 #include "absl/container/internal/container_memory.h"
111 #include "absl/container/internal/hash_policy_traits.h"
112 #include "absl/container/internal/hashtable_debug_hooks.h"
113 #include "absl/container/internal/hashtablez_sampler.h"
114 #include "absl/container/internal/have_sse.h"
115 #include "absl/memory/memory.h"
116 #include "absl/meta/type_traits.h"
117 #include "absl/numeric/bits.h"
118 #include "absl/utility/utility.h"
119 
120 namespace absl {
121 ABSL_NAMESPACE_BEGIN
122 namespace container_internal {
123 
124 template <typename AllocType>
SwapAlloc(AllocType & lhs,AllocType & rhs,std::true_type)125 void SwapAlloc(AllocType& lhs, AllocType& rhs,
126                std::true_type /* propagate_on_container_swap */) {
127   using std::swap;
128   swap(lhs, rhs);
129 }
130 template <typename AllocType>
SwapAlloc(AllocType &,AllocType &,std::false_type)131 void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
132                std::false_type /* propagate_on_container_swap */) {}
133 
134 template <size_t Width>
135 class probe_seq {
136  public:
probe_seq(size_t hash,size_t mask)137   probe_seq(size_t hash, size_t mask) {
138     assert(((mask + 1) & mask) == 0 && "not a mask");
139     mask_ = mask;
140     offset_ = hash & mask_;
141   }
offset()142   size_t offset() const { return offset_; }
offset(size_t i)143   size_t offset(size_t i) const { return (offset_ + i) & mask_; }
144 
next()145   void next() {
146     index_ += Width;
147     offset_ += index_;
148     offset_ &= mask_;
149   }
150   // 0-based probe index. The i-th probe in the probe sequence.
index()151   size_t index() const { return index_; }
152 
153  private:
154   size_t mask_;
155   size_t offset_;
156   size_t index_ = 0;
157 };
158 
159 template <class ContainerKey, class Hash, class Eq>
160 struct RequireUsableKey {
161   template <class PassedKey, class... Args>
162   std::pair<
163       decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
164       decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
165                                          std::declval<const PassedKey&>()))>*
166   operator()(const PassedKey&, const Args&...) const;
167 };
168 
169 template <class E, class Policy, class Hash, class Eq, class... Ts>
170 struct IsDecomposable : std::false_type {};
171 
172 template <class Policy, class Hash, class Eq, class... Ts>
173 struct IsDecomposable<
174     absl::void_t<decltype(
175         Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
176                       std::declval<Ts>()...))>,
177     Policy, Hash, Eq, Ts...> : std::true_type {};
178 
179 // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
180 template <class T>
181 constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
182   using std::swap;
183   return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
184 }
185 template <class T>
186 constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
187   return false;
188 }
189 
190 template <typename T>
191 uint32_t TrailingZeros(T x) {
192   ABSL_INTERNAL_ASSUME(x != 0);
193   return countr_zero(x);
194 }
195 
196 // An abstraction over a bitmask. It provides an easy way to iterate through the
197 // indexes of the set bits of a bitmask.  When Shift=0 (platforms with SSE),
198 // this is a true bitmask.  On non-SSE, platforms the arithematic used to
199 // emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as
200 // either 0x00 or 0x80.
201 //
202 // For example:
203 //   for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2
204 //   for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
205 template <class T, int SignificantBits, int Shift = 0>
206 class BitMask {
207   static_assert(std::is_unsigned<T>::value, "");
208   static_assert(Shift == 0 || Shift == 3, "");
209 
210  public:
211   // These are useful for unit tests (gunit).
212   using value_type = int;
213   using iterator = BitMask;
214   using const_iterator = BitMask;
215 
216   explicit BitMask(T mask) : mask_(mask) {}
217   BitMask& operator++() {
218     mask_ &= (mask_ - 1);
219     return *this;
220   }
221   explicit operator bool() const { return mask_ != 0; }
222   int operator*() const { return LowestBitSet(); }
223   uint32_t LowestBitSet() const {
224     return container_internal::TrailingZeros(mask_) >> Shift;
225   }
226   uint32_t HighestBitSet() const {
227     return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
228   }
229 
230   BitMask begin() const { return *this; }
231   BitMask end() const { return BitMask(0); }
232 
233   uint32_t TrailingZeros() const {
234     return container_internal::TrailingZeros(mask_) >> Shift;
235   }
236 
237   uint32_t LeadingZeros() const {
238     constexpr int total_significant_bits = SignificantBits << Shift;
239     constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
240     return countl_zero(mask_ << extra_bits) >> Shift;
241   }
242 
243  private:
244   friend bool operator==(const BitMask& a, const BitMask& b) {
245     return a.mask_ == b.mask_;
246   }
247   friend bool operator!=(const BitMask& a, const BitMask& b) {
248     return a.mask_ != b.mask_;
249   }
250 
251   T mask_;
252 };
253 
254 using h2_t = uint8_t;
255 
256 // The values here are selected for maximum performance. See the static asserts
257 // below for details. We use an enum class so that when strict aliasing is
258 // enabled, the compiler knows ctrl_t doesn't alias other types.
259 enum class ctrl_t : int8_t {
260   kEmpty = -128,   // 0b10000000
261   kDeleted = -2,   // 0b11111110
262   kSentinel = -1,  // 0b11111111
263 };
264 static_assert(
265     (static_cast<int8_t>(ctrl_t::kEmpty) &
266      static_cast<int8_t>(ctrl_t::kDeleted) &
267      static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
268     "Special markers need to have the MSB to make checking for them efficient");
269 static_assert(
270     ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
271     "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
272     "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
273 static_assert(
274     ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
275     "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
276     "registers (pcmpeqd xmm, xmm)");
277 static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
278               "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
279               "existence efficient (psignb xmm, xmm)");
280 static_assert(
281     (~static_cast<int8_t>(ctrl_t::kEmpty) &
282      ~static_cast<int8_t>(ctrl_t::kDeleted) &
283      static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
284     "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
285     "shared by ctrl_t::kSentinel to make the scalar test for "
286     "MatchEmptyOrDeleted() efficient");
287 static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
288               "ctrl_t::kDeleted must be -2 to make the implementation of "
289               "ConvertSpecialToEmptyAndFullToDeleted efficient");
290 
291 // A single block of empty control bytes for tables without any slots allocated.
292 // This enables removing a branch in the hot path of find().
293 ABSL_DLL extern const ctrl_t kEmptyGroup[16];
294 inline ctrl_t* EmptyGroup() {
295   return const_cast<ctrl_t*>(kEmptyGroup);
296 }
297 
298 // Mixes a randomly generated per-process seed with `hash` and `ctrl` to
299 // randomize insertion order within groups.
300 bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
301 
302 // Returns a hash seed.
303 //
304 // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
305 // non-determinism of iteration order in most cases.
306 inline size_t HashSeed(const ctrl_t* ctrl) {
307   // The low bits of the pointer have little or no entropy because of
308   // alignment. We shift the pointer to try to use higher entropy bits. A
309   // good number seems to be 12 bits, because that aligns with page size.
310   return reinterpret_cast<uintptr_t>(ctrl) >> 12;
311 }
312 
313 inline size_t H1(size_t hash, const ctrl_t* ctrl) {
314   return (hash >> 7) ^ HashSeed(ctrl);
315 }
316 inline h2_t H2(size_t hash) { return hash & 0x7F; }
317 
318 inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
319 inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
320 inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
321 inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
322 
323 #if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
324 
325 // https://github.com/abseil/abseil-cpp/issues/209
326 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
327 // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
328 // Work around this by using the portable implementation of Group
329 // when using -funsigned-char under GCC.
330 inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
331 #if defined(__GNUC__) && !defined(__clang__)
332   if (std::is_unsigned<char>::value) {
333     const __m128i mask = _mm_set1_epi8(0x80);
334     const __m128i diff = _mm_subs_epi8(b, a);
335     return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
336   }
337 #endif
338   return _mm_cmpgt_epi8(a, b);
339 }
340 
341 struct GroupSse2Impl {
342   static constexpr size_t kWidth = 16;  // the number of slots per group
343 
344   explicit GroupSse2Impl(const ctrl_t* pos) {
345     ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
346   }
347 
348   // Returns a bitmask representing the positions of slots that match hash.
349   BitMask<uint32_t, kWidth> Match(h2_t hash) const {
350     auto match = _mm_set1_epi8(hash);
351     return BitMask<uint32_t, kWidth>(
352         _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)));
353   }
354 
355   // Returns a bitmask representing the positions of empty slots.
356   BitMask<uint32_t, kWidth> MatchEmpty() const {
357 #if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
358     // This only works because ctrl_t::kEmpty is -128.
359     return BitMask<uint32_t, kWidth>(
360         _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
361 #else
362     return Match(static_cast<h2_t>(ctrl_t::kEmpty));
363 #endif
364   }
365 
366   // Returns a bitmask representing the positions of empty or deleted slots.
367   BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
368     auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
369     return BitMask<uint32_t, kWidth>(
370         _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)));
371   }
372 
373   // Returns the number of trailing empty or deleted elements in the group.
374   uint32_t CountLeadingEmptyOrDeleted() const {
375     auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
376     return TrailingZeros(static_cast<uint32_t>(
377         _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
378   }
379 
380   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
381     auto msbs = _mm_set1_epi8(static_cast<char>(-128));
382     auto x126 = _mm_set1_epi8(126);
383 #if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
384     auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
385 #else
386     auto zero = _mm_setzero_si128();
387     auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
388     auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
389 #endif
390     _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
391   }
392 
393   __m128i ctrl;
394 };
395 #endif  // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
396 
397 struct GroupPortableImpl {
398   static constexpr size_t kWidth = 8;
399 
400   explicit GroupPortableImpl(const ctrl_t* pos)
401       : ctrl(little_endian::Load64(pos)) {}
402 
403   BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
404     // For the technique, see:
405     // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
406     // (Determine if a word has a byte equal to n).
407     //
408     // Caveat: there are false positives but:
409     // - they only occur if there is a real match
410     // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
411     // - they will be handled gracefully by subsequent checks in code
412     //
413     // Example:
414     //   v = 0x1716151413121110
415     //   hash = 0x12
416     //   retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
417     constexpr uint64_t msbs = 0x8080808080808080ULL;
418     constexpr uint64_t lsbs = 0x0101010101010101ULL;
419     auto x = ctrl ^ (lsbs * hash);
420     return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
421   }
422 
423   BitMask<uint64_t, kWidth, 3> MatchEmpty() const {
424     constexpr uint64_t msbs = 0x8080808080808080ULL;
425     return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs);
426   }
427 
428   BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const {
429     constexpr uint64_t msbs = 0x8080808080808080ULL;
430     return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs);
431   }
432 
433   uint32_t CountLeadingEmptyOrDeleted() const {
434     constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL;
435     return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3;
436   }
437 
438   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
439     constexpr uint64_t msbs = 0x8080808080808080ULL;
440     constexpr uint64_t lsbs = 0x0101010101010101ULL;
441     auto x = ctrl & msbs;
442     auto res = (~x + (x >> 7)) & ~lsbs;
443     little_endian::Store64(dst, res);
444   }
445 
446   uint64_t ctrl;
447 };
448 
449 #if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
450 using Group = GroupSse2Impl;
451 #else
452 using Group = GroupPortableImpl;
453 #endif
454 
455 // The number of cloned control bytes that we copy from the beginning to the
456 // end of the control bytes array.
457 constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
458 
459 template <class Policy, class Hash, class Eq, class Alloc>
460 class raw_hash_set;
461 
462 inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
463 
464 // PRECONDITION:
465 //   IsValidCapacity(capacity)
466 //   ctrl[capacity] == ctrl_t::kSentinel
467 //   ctrl[i] != ctrl_t::kSentinel for all i < capacity
468 // Applies mapping for every byte in ctrl:
469 //   DELETED -> EMPTY
470 //   EMPTY -> EMPTY
471 //   FULL -> DELETED
472 void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
473 
474 // Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
475 inline size_t NormalizeCapacity(size_t n) {
476   return n ? ~size_t{} >> countl_zero(n) : 1;
477 }
478 
479 // General notes on capacity/growth methods below:
480 // - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
481 //   average of two empty slots per group.
482 // - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
483 // - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
484 //   never need to probe (the whole table fits in one group) so we don't need a
485 //   load factor less than 1.
486 
487 // Given `capacity` of the table, returns the size (i.e. number of full slots)
488 // at which we should grow the capacity.
489 inline size_t CapacityToGrowth(size_t capacity) {
490   assert(IsValidCapacity(capacity));
491   // `capacity*7/8`
492   if (Group::kWidth == 8 && capacity == 7) {
493     // x-x/8 does not work when x==7.
494     return 6;
495   }
496   return capacity - capacity / 8;
497 }
498 // From desired "growth" to a lowerbound of the necessary capacity.
499 // Might not be a valid one and requires NormalizeCapacity().
500 inline size_t GrowthToLowerboundCapacity(size_t growth) {
501   // `growth*8/7`
502   if (Group::kWidth == 8 && growth == 7) {
503     // x+(x-1)/7 does not work when x==7.
504     return 8;
505   }
506   return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
507 }
508 
509 template <class InputIter>
510 size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
511                                      size_t bucket_count) {
512   if (bucket_count != 0) {
513     return bucket_count;
514   }
515   using InputIterCategory =
516       typename std::iterator_traits<InputIter>::iterator_category;
517   if (std::is_base_of<std::random_access_iterator_tag,
518                       InputIterCategory>::value) {
519     return GrowthToLowerboundCapacity(
520         static_cast<size_t>(std::distance(first, last)));
521   }
522   return 0;
523 }
524 
525 inline void AssertIsFull(ctrl_t* ctrl) {
526   ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) &&
527                         "Invalid operation on iterator. The element might have "
528                         "been erased, or the table might have rehashed.");
529 }
530 
531 inline void AssertIsValid(ctrl_t* ctrl) {
532   ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
533                         "Invalid operation on iterator. The element might have "
534                         "been erased, or the table might have rehashed.");
535 }
536 
537 struct FindInfo {
538   size_t offset;
539   size_t probe_length;
540 };
541 
542 // The representation of the object has two modes:
543 //  - small: For capacities < kWidth-1
544 //  - large: For the rest.
545 //
546 // Differences:
547 //  - In small mode we are able to use the whole capacity. The extra control
548 //  bytes give us at least one "empty" control byte to stop the iteration.
549 //  This is important to make 1 a valid capacity.
550 //
551 //  - In small mode only the first `capacity()` control bytes after the
552 //  sentinel are valid. The rest contain dummy ctrl_t::kEmpty values that do not
553 //  represent a real slot. This is important to take into account on
554 //  find_first_non_full(), where we never try ShouldInsertBackwards() for
555 //  small tables.
556 inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
557 
558 inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, size_t hash,
559                                       size_t capacity) {
560   return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
561 }
562 
563 // Probes the raw_hash_set with the probe sequence for hash and returns the
564 // pointer to the first empty or deleted slot.
565 // NOTE: this function must work with tables having both ctrl_t::kEmpty and
566 // ctrl_t::kDeleted in one group. Such tables appears during
567 // drop_deletes_without_resize.
568 //
569 // This function is very useful when insertions happen and:
570 // - the input is already a set
571 // - there are enough slots
572 // - the element with the hash is not in the table
573 template <typename = void>
574 inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash,
575                                     size_t capacity) {
576   auto seq = probe(ctrl, hash, capacity);
577   while (true) {
578     Group g{ctrl + seq.offset()};
579     auto mask = g.MatchEmptyOrDeleted();
580     if (mask) {
581 #if !defined(NDEBUG)
582       // We want to add entropy even when ASLR is not enabled.
583       // In debug build we will randomly insert in either the front or back of
584       // the group.
585       // TODO(kfm,sbenza): revisit after we do unconditional mixing
586       if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) {
587         return {seq.offset(mask.HighestBitSet()), seq.index()};
588       }
589 #endif
590       return {seq.offset(mask.LowestBitSet()), seq.index()};
591     }
592     seq.next();
593     assert(seq.index() <= capacity && "full table!");
594   }
595 }
596 
597 // Extern template for inline function keep possibility of inlining.
598 // When compiler decided to not inline, no symbols will be added to the
599 // corresponding translation unit.
600 extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
601 
602 // Reset all ctrl bytes back to ctrl_t::kEmpty, except the sentinel.
603 inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot,
604                       size_t slot_size) {
605   std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
606               capacity + 1 + NumClonedBytes());
607   ctrl[capacity] = ctrl_t::kSentinel;
608   SanitizerPoisonMemoryRegion(slot, slot_size * capacity);
609 }
610 
611 // Sets the control byte, and if `i < NumClonedBytes()`, set the cloned byte
612 // at the end too.
613 inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl,
614                     const void* slot, size_t slot_size) {
615   assert(i < capacity);
616 
617   auto* slot_i = static_cast<const char*>(slot) + i * slot_size;
618   if (IsFull(h)) {
619     SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
620   } else {
621     SanitizerPoisonMemoryRegion(slot_i, slot_size);
622   }
623 
624   ctrl[i] = h;
625   ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
626 }
627 
628 inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl,
629                     const void* slot, size_t slot_size) {
630   SetCtrl(i, static_cast<ctrl_t>(h), capacity, ctrl, slot, slot_size);
631 }
632 
633 // The allocated block consists of `capacity + 1 + NumClonedBytes()` control
634 // bytes followed by `capacity` slots, which must be aligned to `slot_align`.
635 // SlotOffset returns the offset of the slots into the allocated block.
636 inline size_t SlotOffset(size_t capacity, size_t slot_align) {
637   assert(IsValidCapacity(capacity));
638   const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
639   return (num_control_bytes + slot_align - 1) & (~slot_align + 1);
640 }
641 
642 // Returns the size of the allocated block. See also above comment.
643 inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
644   return SlotOffset(capacity, slot_align) + capacity * slot_size;
645 }
646 
647 // Policy: a policy defines how to perform different operations on
648 // the slots of the hashtable (see hash_policy_traits.h for the full interface
649 // of policy).
650 //
651 // Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
652 // functor should accept a key and return size_t as hash. For best performance
653 // it is important that the hash function provides high entropy across all bits
654 // of the hash.
655 //
656 // Eq: a (possibly polymorphic) functor that compares two keys for equality. It
657 // should accept two (of possibly different type) keys and return a bool: true
658 // if they are equal, false if they are not. If two keys compare equal, then
659 // their hash values as defined by Hash MUST be equal.
660 //
661 // Allocator: an Allocator
662 // [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
663 // the storage of the hashtable will be allocated and the elements will be
664 // constructed and destroyed.
665 template <class Policy, class Hash, class Eq, class Alloc>
666 class raw_hash_set {
667   using PolicyTraits = hash_policy_traits<Policy>;
668   using KeyArgImpl =
669       KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
670 
671  public:
672   using init_type = typename PolicyTraits::init_type;
673   using key_type = typename PolicyTraits::key_type;
674   // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
675   // code fixes!
676   using slot_type = typename PolicyTraits::slot_type;
677   using allocator_type = Alloc;
678   using size_type = size_t;
679   using difference_type = ptrdiff_t;
680   using hasher = Hash;
681   using key_equal = Eq;
682   using policy_type = Policy;
683   using value_type = typename PolicyTraits::value_type;
684   using reference = value_type&;
685   using const_reference = const value_type&;
686   using pointer = typename absl::allocator_traits<
687       allocator_type>::template rebind_traits<value_type>::pointer;
688   using const_pointer = typename absl::allocator_traits<
689       allocator_type>::template rebind_traits<value_type>::const_pointer;
690 
691   // Alias used for heterogeneous lookup functions.
692   // `key_arg<K>` evaluates to `K` when the functors are transparent and to
693   // `key_type` otherwise. It permits template argument deduction on `K` for the
694   // transparent case.
695   template <class K>
696   using key_arg = typename KeyArgImpl::template type<K, key_type>;
697 
698  private:
699   // Give an early error when key_type is not hashable/eq.
700   auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
701   auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
702 
703   using AllocTraits = absl::allocator_traits<allocator_type>;
704   using SlotAlloc = typename absl::allocator_traits<
705       allocator_type>::template rebind_alloc<slot_type>;
706   using SlotAllocTraits = typename absl::allocator_traits<
707       allocator_type>::template rebind_traits<slot_type>;
708 
709   static_assert(std::is_lvalue_reference<reference>::value,
710                 "Policy::element() must return a reference");
711 
712   template <typename T>
713   struct SameAsElementReference
714       : std::is_same<typename std::remove_cv<
715                          typename std::remove_reference<reference>::type>::type,
716                      typename std::remove_cv<
717                          typename std::remove_reference<T>::type>::type> {};
718 
719   // An enabler for insert(T&&): T must be convertible to init_type or be the
720   // same as [cv] value_type [ref].
721   // Note: we separate SameAsElementReference into its own type to avoid using
722   // reference unless we need to. MSVC doesn't seem to like it in some
723   // cases.
724   template <class T>
725   using RequiresInsertable = typename std::enable_if<
726       absl::disjunction<std::is_convertible<T, init_type>,
727                         SameAsElementReference<T>>::value,
728       int>::type;
729 
730   // RequiresNotInit is a workaround for gcc prior to 7.1.
731   // See https://godbolt.org/g/Y4xsUh.
732   template <class T>
733   using RequiresNotInit =
734       typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
735 
736   template <class... Ts>
737   using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
738 
739  public:
740   static_assert(std::is_same<pointer, value_type*>::value,
741                 "Allocators with custom pointer types are not supported");
742   static_assert(std::is_same<const_pointer, const value_type*>::value,
743                 "Allocators with custom pointer types are not supported");
744 
745   class iterator {
746     friend class raw_hash_set;
747 
748    public:
749     using iterator_category = std::forward_iterator_tag;
750     using value_type = typename raw_hash_set::value_type;
751     using reference =
752         absl::conditional_t<PolicyTraits::constant_iterators::value,
753                             const value_type&, value_type&>;
754     using pointer = absl::remove_reference_t<reference>*;
755     using difference_type = typename raw_hash_set::difference_type;
756 
757     iterator() {}
758 
759     // PRECONDITION: not an end() iterator.
760     reference operator*() const {
761       AssertIsFull(ctrl_);
762       return PolicyTraits::element(slot_);
763     }
764 
765     // PRECONDITION: not an end() iterator.
766     pointer operator->() const { return &operator*(); }
767 
768     // PRECONDITION: not an end() iterator.
769     iterator& operator++() {
770       AssertIsFull(ctrl_);
771       ++ctrl_;
772       ++slot_;
773       skip_empty_or_deleted();
774       return *this;
775     }
776     // PRECONDITION: not an end() iterator.
777     iterator operator++(int) {
778       auto tmp = *this;
779       ++*this;
780       return tmp;
781     }
782 
783     friend bool operator==(const iterator& a, const iterator& b) {
784       AssertIsValid(a.ctrl_);
785       AssertIsValid(b.ctrl_);
786       return a.ctrl_ == b.ctrl_;
787     }
788     friend bool operator!=(const iterator& a, const iterator& b) {
789       return !(a == b);
790     }
791 
792    private:
793     iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {
794       // This assumption helps the compiler know that any non-end iterator is
795       // not equal to any end iterator.
796       ABSL_INTERNAL_ASSUME(ctrl != nullptr);
797     }
798 
799     void skip_empty_or_deleted() {
800       while (IsEmptyOrDeleted(*ctrl_)) {
801         uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
802         ctrl_ += shift;
803         slot_ += shift;
804       }
805       if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
806     }
807 
808     ctrl_t* ctrl_ = nullptr;
809     // To avoid uninitialized member warnings, put slot_ in an anonymous union.
810     // The member is not initialized on singleton and end iterators.
811     union {
812       slot_type* slot_;
813     };
814   };
815 
816   class const_iterator {
817     friend class raw_hash_set;
818 
819    public:
820     using iterator_category = typename iterator::iterator_category;
821     using value_type = typename raw_hash_set::value_type;
822     using reference = typename raw_hash_set::const_reference;
823     using pointer = typename raw_hash_set::const_pointer;
824     using difference_type = typename raw_hash_set::difference_type;
825 
826     const_iterator() {}
827     // Implicit construction from iterator.
828     const_iterator(iterator i) : inner_(std::move(i)) {}
829 
830     reference operator*() const { return *inner_; }
831     pointer operator->() const { return inner_.operator->(); }
832 
833     const_iterator& operator++() {
834       ++inner_;
835       return *this;
836     }
837     const_iterator operator++(int) { return inner_++; }
838 
839     friend bool operator==(const const_iterator& a, const const_iterator& b) {
840       return a.inner_ == b.inner_;
841     }
842     friend bool operator!=(const const_iterator& a, const const_iterator& b) {
843       return !(a == b);
844     }
845 
846    private:
847     const_iterator(const ctrl_t* ctrl, const slot_type* slot)
848         : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot)) {}
849 
850     iterator inner_;
851   };
852 
853   using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
854   using insert_return_type = InsertReturnType<iterator, node_type>;
855 
856   raw_hash_set() noexcept(
857       std::is_nothrow_default_constructible<hasher>::value&&
858           std::is_nothrow_default_constructible<key_equal>::value&&
859               std::is_nothrow_default_constructible<allocator_type>::value) {}
860 
861   explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
862                         const key_equal& eq = key_equal(),
863                         const allocator_type& alloc = allocator_type())
864       : ctrl_(EmptyGroup()),
865         settings_(0, HashtablezInfoHandle(), hash, eq, alloc) {
866     if (bucket_count) {
867       capacity_ = NormalizeCapacity(bucket_count);
868       initialize_slots();
869     }
870   }
871 
872   raw_hash_set(size_t bucket_count, const hasher& hash,
873                const allocator_type& alloc)
874       : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
875 
876   raw_hash_set(size_t bucket_count, const allocator_type& alloc)
877       : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
878 
879   explicit raw_hash_set(const allocator_type& alloc)
880       : raw_hash_set(0, hasher(), key_equal(), alloc) {}
881 
882   template <class InputIter>
883   raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
884                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
885                const allocator_type& alloc = allocator_type())
886       : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
887                      hash, eq, alloc) {
888     insert(first, last);
889   }
890 
891   template <class InputIter>
892   raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
893                const hasher& hash, const allocator_type& alloc)
894       : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
895 
896   template <class InputIter>
897   raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
898                const allocator_type& alloc)
899       : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
900 
901   template <class InputIter>
902   raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
903       : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
904 
905   // Instead of accepting std::initializer_list<value_type> as the first
906   // argument like std::unordered_set<value_type> does, we have two overloads
907   // that accept std::initializer_list<T> and std::initializer_list<init_type>.
908   // This is advantageous for performance.
909   //
910   //   // Turns {"abc", "def"} into std::initializer_list<std::string>, then
911   //   // copies the strings into the set.
912   //   std::unordered_set<std::string> s = {"abc", "def"};
913   //
914   //   // Turns {"abc", "def"} into std::initializer_list<const char*>, then
915   //   // copies the strings into the set.
916   //   absl::flat_hash_set<std::string> s = {"abc", "def"};
917   //
918   // The same trick is used in insert().
919   //
920   // The enabler is necessary to prevent this constructor from triggering where
921   // the copy constructor is meant to be called.
922   //
923   //   absl::flat_hash_set<int> a, b{a};
924   //
925   // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
926   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
927   raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
928                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
929                const allocator_type& alloc = allocator_type())
930       : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
931 
932   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
933                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
934                const allocator_type& alloc = allocator_type())
935       : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
936 
937   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
938   raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
939                const hasher& hash, const allocator_type& alloc)
940       : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
941 
942   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
943                const hasher& hash, const allocator_type& alloc)
944       : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
945 
946   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
947   raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
948                const allocator_type& alloc)
949       : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
950 
951   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
952                const allocator_type& alloc)
953       : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
954 
955   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
956   raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
957       : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
958 
959   raw_hash_set(std::initializer_list<init_type> init,
960                const allocator_type& alloc)
961       : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
962 
963   raw_hash_set(const raw_hash_set& that)
964       : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
965                                that.alloc_ref())) {}
966 
967   raw_hash_set(const raw_hash_set& that, const allocator_type& a)
968       : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
969     reserve(that.size());
970     // Because the table is guaranteed to be empty, we can do something faster
971     // than a full `insert`.
972     for (const auto& v : that) {
973       const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
974       auto target = find_first_non_full(ctrl_, hash, capacity_);
975       SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
976               sizeof(slot_type));
977       emplace_at(target.offset, v);
978       infoz().RecordInsert(hash, target.probe_length);
979     }
980     size_ = that.size();
981     growth_left() -= that.size();
982   }
983 
984   raw_hash_set(raw_hash_set&& that) noexcept(
985       std::is_nothrow_copy_constructible<hasher>::value&&
986           std::is_nothrow_copy_constructible<key_equal>::value&&
987               std::is_nothrow_copy_constructible<allocator_type>::value)
988       : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())),
989         slots_(absl::exchange(that.slots_, nullptr)),
990         size_(absl::exchange(that.size_, 0)),
991         capacity_(absl::exchange(that.capacity_, 0)),
992         // Hash, equality and allocator are copied instead of moved because
993         // `that` must be left valid. If Hash is std::function<Key>, moving it
994         // would create a nullptr functor that cannot be called.
995         settings_(absl::exchange(that.growth_left(), 0),
996                   absl::exchange(that.infoz(), HashtablezInfoHandle()),
997                   that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
998 
999   raw_hash_set(raw_hash_set&& that, const allocator_type& a)
1000       : ctrl_(EmptyGroup()),
1001         slots_(nullptr),
1002         size_(0),
1003         capacity_(0),
1004         settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(),
1005                   a) {
1006     if (a == that.alloc_ref()) {
1007       std::swap(ctrl_, that.ctrl_);
1008       std::swap(slots_, that.slots_);
1009       std::swap(size_, that.size_);
1010       std::swap(capacity_, that.capacity_);
1011       std::swap(growth_left(), that.growth_left());
1012       std::swap(infoz(), that.infoz());
1013     } else {
1014       reserve(that.size());
1015       // Note: this will copy elements of dense_set and unordered_set instead of
1016       // moving them. This can be fixed if it ever becomes an issue.
1017       for (auto& elem : that) insert(std::move(elem));
1018     }
1019   }
1020 
1021   raw_hash_set& operator=(const raw_hash_set& that) {
1022     raw_hash_set tmp(that,
1023                      AllocTraits::propagate_on_container_copy_assignment::value
1024                          ? that.alloc_ref()
1025                          : alloc_ref());
1026     swap(tmp);
1027     return *this;
1028   }
1029 
1030   raw_hash_set& operator=(raw_hash_set&& that) noexcept(
1031       absl::allocator_traits<allocator_type>::is_always_equal::value&&
1032           std::is_nothrow_move_assignable<hasher>::value&&
1033               std::is_nothrow_move_assignable<key_equal>::value) {
1034     // TODO(sbenza): We should only use the operations from the noexcept clause
1035     // to make sure we actually adhere to that contract.
1036     return move_assign(
1037         std::move(that),
1038         typename AllocTraits::propagate_on_container_move_assignment());
1039   }
1040 
1041   ~raw_hash_set() { destroy_slots(); }
1042 
1043   iterator begin() {
1044     auto it = iterator_at(0);
1045     it.skip_empty_or_deleted();
1046     return it;
1047   }
1048   iterator end() { return {}; }
1049 
1050   const_iterator begin() const {
1051     return const_cast<raw_hash_set*>(this)->begin();
1052   }
1053   const_iterator end() const { return {}; }
1054   const_iterator cbegin() const { return begin(); }
1055   const_iterator cend() const { return end(); }
1056 
1057   bool empty() const { return !size(); }
1058   size_t size() const { return size_; }
1059   size_t capacity() const { return capacity_; }
1060   size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
1061 
1062   ABSL_ATTRIBUTE_REINITIALIZES void clear() {
1063     // Iterating over this container is O(bucket_count()). When bucket_count()
1064     // is much greater than size(), iteration becomes prohibitively expensive.
1065     // For clear() it is more important to reuse the allocated array when the
1066     // container is small because allocation takes comparatively long time
1067     // compared to destruction of the elements of the container. So we pick the
1068     // largest bucket_count() threshold for which iteration is still fast and
1069     // past that we simply deallocate the array.
1070     if (capacity_ > 127) {
1071       destroy_slots();
1072     } else if (capacity_) {
1073       for (size_t i = 0; i != capacity_; ++i) {
1074         if (IsFull(ctrl_[i])) {
1075           PolicyTraits::destroy(&alloc_ref(), slots_ + i);
1076         }
1077       }
1078       size_ = 0;
1079       ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
1080       reset_growth_left();
1081     }
1082     assert(empty());
1083     infoz().RecordStorageChanged(0, capacity_);
1084   }
1085 
1086   // This overload kicks in when the argument is an rvalue of insertable and
1087   // decomposable type other than init_type.
1088   //
1089   //   flat_hash_map<std::string, int> m;
1090   //   m.insert(std::make_pair("abc", 42));
1091   // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
1092   // bug.
1093   template <class T, RequiresInsertable<T> = 0,
1094             class T2 = T,
1095             typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
1096             T* = nullptr>
1097   std::pair<iterator, bool> insert(T&& value) {
1098     return emplace(std::forward<T>(value));
1099   }
1100 
1101   // This overload kicks in when the argument is a bitfield or an lvalue of
1102   // insertable and decomposable type.
1103   //
1104   //   union { int n : 1; };
1105   //   flat_hash_set<int> s;
1106   //   s.insert(n);
1107   //
1108   //   flat_hash_set<std::string> s;
1109   //   const char* p = "hello";
1110   //   s.insert(p);
1111   //
1112   // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
1113   // RequiresInsertable<T> with RequiresInsertable<const T&>.
1114   // We are hitting this bug: https://godbolt.org/g/1Vht4f.
1115   template <
1116       class T, RequiresInsertable<T> = 0,
1117       typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
1118   std::pair<iterator, bool> insert(const T& value) {
1119     return emplace(value);
1120   }
1121 
1122   // This overload kicks in when the argument is an rvalue of init_type. Its
1123   // purpose is to handle brace-init-list arguments.
1124   //
1125   //   flat_hash_map<std::string, int> s;
1126   //   s.insert({"abc", 42});
1127   std::pair<iterator, bool> insert(init_type&& value) {
1128     return emplace(std::move(value));
1129   }
1130 
1131   // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
1132   // bug.
1133   template <class T, RequiresInsertable<T> = 0, class T2 = T,
1134             typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
1135             T* = nullptr>
1136   iterator insert(const_iterator, T&& value) {
1137     return insert(std::forward<T>(value)).first;
1138   }
1139 
1140   // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
1141   // RequiresInsertable<T> with RequiresInsertable<const T&>.
1142   // We are hitting this bug: https://godbolt.org/g/1Vht4f.
1143   template <
1144       class T, RequiresInsertable<T> = 0,
1145       typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
1146   iterator insert(const_iterator, const T& value) {
1147     return insert(value).first;
1148   }
1149 
1150   iterator insert(const_iterator, init_type&& value) {
1151     return insert(std::move(value)).first;
1152   }
1153 
1154   template <class InputIt>
1155   void insert(InputIt first, InputIt last) {
1156     for (; first != last; ++first) emplace(*first);
1157   }
1158 
1159   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
1160   void insert(std::initializer_list<T> ilist) {
1161     insert(ilist.begin(), ilist.end());
1162   }
1163 
1164   void insert(std::initializer_list<init_type> ilist) {
1165     insert(ilist.begin(), ilist.end());
1166   }
1167 
1168   insert_return_type insert(node_type&& node) {
1169     if (!node) return {end(), false, node_type()};
1170     const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
1171     auto res = PolicyTraits::apply(
1172         InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
1173         elem);
1174     if (res.second) {
1175       CommonAccess::Reset(&node);
1176       return {res.first, true, node_type()};
1177     } else {
1178       return {res.first, false, std::move(node)};
1179     }
1180   }
1181 
1182   iterator insert(const_iterator, node_type&& node) {
1183     auto res = insert(std::move(node));
1184     node = std::move(res.node);
1185     return res.position;
1186   }
1187 
1188   // This overload kicks in if we can deduce the key from args. This enables us
1189   // to avoid constructing value_type if an entry with the same key already
1190   // exists.
1191   //
1192   // For example:
1193   //
1194   //   flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
1195   //   // Creates no std::string copies and makes no heap allocations.
1196   //   m.emplace("abc", "xyz");
1197   template <class... Args, typename std::enable_if<
1198                                IsDecomposable<Args...>::value, int>::type = 0>
1199   std::pair<iterator, bool> emplace(Args&&... args) {
1200     return PolicyTraits::apply(EmplaceDecomposable{*this},
1201                                std::forward<Args>(args)...);
1202   }
1203 
1204   // This overload kicks in if we cannot deduce the key from args. It constructs
1205   // value_type unconditionally and then either moves it into the table or
1206   // destroys.
1207   template <class... Args, typename std::enable_if<
1208                                !IsDecomposable<Args...>::value, int>::type = 0>
1209   std::pair<iterator, bool> emplace(Args&&... args) {
1210     alignas(slot_type) unsigned char raw[sizeof(slot_type)];
1211     slot_type* slot = reinterpret_cast<slot_type*>(&raw);
1212 
1213     PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
1214     const auto& elem = PolicyTraits::element(slot);
1215     return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
1216   }
1217 
1218   template <class... Args>
1219   iterator emplace_hint(const_iterator, Args&&... args) {
1220     return emplace(std::forward<Args>(args)...).first;
1221   }
1222 
1223   // Extension API: support for lazy emplace.
1224   //
1225   // Looks up key in the table. If found, returns the iterator to the element.
1226   // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`.
1227   //
1228   // `f` must abide by several restrictions:
1229   //  - it MUST call `raw_hash_set::constructor` with arguments as if a
1230   //    `raw_hash_set::value_type` is constructed,
1231   //  - it MUST NOT access the container before the call to
1232   //    `raw_hash_set::constructor`, and
1233   //  - it MUST NOT erase the lazily emplaced element.
1234   // Doing any of these is undefined behavior.
1235   //
1236   // For example:
1237   //
1238   //   std::unordered_set<ArenaString> s;
1239   //   // Makes ArenaStr even if "abc" is in the map.
1240   //   s.insert(ArenaString(&arena, "abc"));
1241   //
1242   //   flat_hash_set<ArenaStr> s;
1243   //   // Makes ArenaStr only if "abc" is not in the map.
1244   //   s.lazy_emplace("abc", [&](const constructor& ctor) {
1245   //     ctor(&arena, "abc");
1246   //   });
1247   //
1248   // WARNING: This API is currently experimental. If there is a way to implement
1249   // the same thing with the rest of the API, prefer that.
1250   class constructor {
1251     friend class raw_hash_set;
1252 
1253    public:
1254     template <class... Args>
1255     void operator()(Args&&... args) const {
1256       assert(*slot_);
1257       PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
1258       *slot_ = nullptr;
1259     }
1260 
1261    private:
1262     constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
1263 
1264     allocator_type* alloc_;
1265     slot_type** slot_;
1266   };
1267 
1268   template <class K = key_type, class F>
1269   iterator lazy_emplace(const key_arg<K>& key, F&& f) {
1270     auto res = find_or_prepare_insert(key);
1271     if (res.second) {
1272       slot_type* slot = slots_ + res.first;
1273       std::forward<F>(f)(constructor(&alloc_ref(), &slot));
1274       assert(!slot);
1275     }
1276     return iterator_at(res.first);
1277   }
1278 
1279   // Extension API: support for heterogeneous keys.
1280   //
1281   //   std::unordered_set<std::string> s;
1282   //   // Turns "abc" into std::string.
1283   //   s.erase("abc");
1284   //
1285   //   flat_hash_set<std::string> s;
1286   //   // Uses "abc" directly without copying it into std::string.
1287   //   s.erase("abc");
1288   template <class K = key_type>
1289   size_type erase(const key_arg<K>& key) {
1290     auto it = find(key);
1291     if (it == end()) return 0;
1292     erase(it);
1293     return 1;
1294   }
1295 
1296   // Erases the element pointed to by `it`.  Unlike `std::unordered_set::erase`,
1297   // this method returns void to reduce algorithmic complexity to O(1).  The
1298   // iterator is invalidated, so any increment should be done before calling
1299   // erase.  In order to erase while iterating across a map, use the following
1300   // idiom (which also works for standard containers):
1301   //
1302   // for (auto it = m.begin(), end = m.end(); it != end;) {
1303   //   // `erase()` will invalidate `it`, so advance `it` first.
1304   //   auto copy_it = it++;
1305   //   if (<pred>) {
1306   //     m.erase(copy_it);
1307   //   }
1308   // }
1309   void erase(const_iterator cit) { erase(cit.inner_); }
1310 
1311   // This overload is necessary because otherwise erase<K>(const K&) would be
1312   // a better match if non-const iterator is passed as an argument.
1313   void erase(iterator it) {
1314     AssertIsFull(it.ctrl_);
1315     PolicyTraits::destroy(&alloc_ref(), it.slot_);
1316     erase_meta_only(it);
1317   }
1318 
1319   iterator erase(const_iterator first, const_iterator last) {
1320     while (first != last) {
1321       erase(first++);
1322     }
1323     return last.inner_;
1324   }
1325 
1326   // Moves elements from `src` into `this`.
1327   // If the element already exists in `this`, it is left unmodified in `src`.
1328   template <typename H, typename E>
1329   void merge(raw_hash_set<Policy, H, E, Alloc>& src) {  // NOLINT
1330     assert(this != &src);
1331     for (auto it = src.begin(), e = src.end(); it != e;) {
1332       auto next = std::next(it);
1333       if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
1334                               PolicyTraits::element(it.slot_))
1335               .second) {
1336         src.erase_meta_only(it);
1337       }
1338       it = next;
1339     }
1340   }
1341 
1342   template <typename H, typename E>
1343   void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
1344     merge(src);
1345   }
1346 
1347   node_type extract(const_iterator position) {
1348     AssertIsFull(position.inner_.ctrl_);
1349     auto node =
1350         CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
1351     erase_meta_only(position);
1352     return node;
1353   }
1354 
1355   template <
1356       class K = key_type,
1357       typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
1358   node_type extract(const key_arg<K>& key) {
1359     auto it = find(key);
1360     return it == end() ? node_type() : extract(const_iterator{it});
1361   }
1362 
1363   void swap(raw_hash_set& that) noexcept(
1364       IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
1365       IsNoThrowSwappable<allocator_type>(
1366           typename AllocTraits::propagate_on_container_swap{})) {
1367     using std::swap;
1368     swap(ctrl_, that.ctrl_);
1369     swap(slots_, that.slots_);
1370     swap(size_, that.size_);
1371     swap(capacity_, that.capacity_);
1372     swap(growth_left(), that.growth_left());
1373     swap(hash_ref(), that.hash_ref());
1374     swap(eq_ref(), that.eq_ref());
1375     swap(infoz(), that.infoz());
1376     SwapAlloc(alloc_ref(), that.alloc_ref(),
1377               typename AllocTraits::propagate_on_container_swap{});
1378   }
1379 
1380   void rehash(size_t n) {
1381     if (n == 0 && capacity_ == 0) return;
1382     if (n == 0 && size_ == 0) {
1383       destroy_slots();
1384       infoz().RecordStorageChanged(0, 0);
1385       return;
1386     }
1387     // bitor is a faster way of doing `max` here. We will round up to the next
1388     // power-of-2-minus-1, so bitor is good enough.
1389     auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
1390     // n == 0 unconditionally rehashes as per the standard.
1391     if (n == 0 || m > capacity_) {
1392       resize(m);
1393     }
1394   }
1395 
1396   void reserve(size_t n) {
1397     if (n > size() + growth_left()) {
1398       size_t m = GrowthToLowerboundCapacity(n);
1399       resize(NormalizeCapacity(m));
1400     }
1401   }
1402 
1403   // Extension API: support for heterogeneous keys.
1404   //
1405   //   std::unordered_set<std::string> s;
1406   //   // Turns "abc" into std::string.
1407   //   s.count("abc");
1408   //
1409   //   ch_set<std::string> s;
1410   //   // Uses "abc" directly without copying it into std::string.
1411   //   s.count("abc");
1412   template <class K = key_type>
1413   size_t count(const key_arg<K>& key) const {
1414     return find(key) == end() ? 0 : 1;
1415   }
1416 
1417   // Issues CPU prefetch instructions for the memory needed to find or insert
1418   // a key.  Like all lookup functions, this support heterogeneous keys.
1419   //
1420   // NOTE: This is a very low level operation and should not be used without
1421   // specific benchmarks indicating its importance.
1422   template <class K = key_type>
1423   void prefetch(const key_arg<K>& key) const {
1424     (void)key;
1425 #if defined(__GNUC__)
1426     auto seq = probe(ctrl_, hash_ref()(key), capacity_);
1427     __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
1428     __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
1429 #endif  // __GNUC__
1430   }
1431 
1432   // The API of find() has two extensions.
1433   //
1434   // 1. The hash can be passed by the user. It must be equal to the hash of the
1435   // key.
1436   //
1437   // 2. The type of the key argument doesn't have to be key_type. This is so
1438   // called heterogeneous key support.
1439   template <class K = key_type>
1440   iterator find(const key_arg<K>& key, size_t hash) {
1441     auto seq = probe(ctrl_, hash, capacity_);
1442     while (true) {
1443       Group g{ctrl_ + seq.offset()};
1444       for (int i : g.Match(H2(hash))) {
1445         if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
1446                 EqualElement<K>{key, eq_ref()},
1447                 PolicyTraits::element(slots_ + seq.offset(i)))))
1448           return iterator_at(seq.offset(i));
1449       }
1450       if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
1451       seq.next();
1452       assert(seq.index() <= capacity_ && "full table!");
1453     }
1454   }
1455   template <class K = key_type>
1456   iterator find(const key_arg<K>& key) {
1457     return find(key, hash_ref()(key));
1458   }
1459 
1460   template <class K = key_type>
1461   const_iterator find(const key_arg<K>& key, size_t hash) const {
1462     return const_cast<raw_hash_set*>(this)->find(key, hash);
1463   }
1464   template <class K = key_type>
1465   const_iterator find(const key_arg<K>& key) const {
1466     return find(key, hash_ref()(key));
1467   }
1468 
1469   template <class K = key_type>
1470   bool contains(const key_arg<K>& key) const {
1471     return find(key) != end();
1472   }
1473 
1474   template <class K = key_type>
1475   std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
1476     auto it = find(key);
1477     if (it != end()) return {it, std::next(it)};
1478     return {it, it};
1479   }
1480   template <class K = key_type>
1481   std::pair<const_iterator, const_iterator> equal_range(
1482       const key_arg<K>& key) const {
1483     auto it = find(key);
1484     if (it != end()) return {it, std::next(it)};
1485     return {it, it};
1486   }
1487 
1488   size_t bucket_count() const { return capacity_; }
1489   float load_factor() const {
1490     return capacity_ ? static_cast<double>(size()) / capacity_ : 0.0;
1491   }
1492   float max_load_factor() const { return 1.0f; }
1493   void max_load_factor(float) {
1494     // Does nothing.
1495   }
1496 
1497   hasher hash_function() const { return hash_ref(); }
1498   key_equal key_eq() const { return eq_ref(); }
1499   allocator_type get_allocator() const { return alloc_ref(); }
1500 
1501   friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
1502     if (a.size() != b.size()) return false;
1503     const raw_hash_set* outer = &a;
1504     const raw_hash_set* inner = &b;
1505     if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
1506     for (const value_type& elem : *outer)
1507       if (!inner->has_element(elem)) return false;
1508     return true;
1509   }
1510 
1511   friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
1512     return !(a == b);
1513   }
1514 
1515   friend void swap(raw_hash_set& a,
1516                    raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
1517     a.swap(b);
1518   }
1519 
1520  private:
1521   template <class Container, typename Enabler>
1522   friend struct absl::container_internal::hashtable_debug_internal::
1523       HashtableDebugAccess;
1524 
1525   struct FindElement {
1526     template <class K, class... Args>
1527     const_iterator operator()(const K& key, Args&&...) const {
1528       return s.find(key);
1529     }
1530     const raw_hash_set& s;
1531   };
1532 
1533   struct HashElement {
1534     template <class K, class... Args>
1535     size_t operator()(const K& key, Args&&...) const {
1536       return h(key);
1537     }
1538     const hasher& h;
1539   };
1540 
1541   template <class K1>
1542   struct EqualElement {
1543     template <class K2, class... Args>
1544     bool operator()(const K2& lhs, Args&&...) const {
1545       return eq(lhs, rhs);
1546     }
1547     const K1& rhs;
1548     const key_equal& eq;
1549   };
1550 
1551   struct EmplaceDecomposable {
1552     template <class K, class... Args>
1553     std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
1554       auto res = s.find_or_prepare_insert(key);
1555       if (res.second) {
1556         s.emplace_at(res.first, std::forward<Args>(args)...);
1557       }
1558       return {s.iterator_at(res.first), res.second};
1559     }
1560     raw_hash_set& s;
1561   };
1562 
1563   template <bool do_destroy>
1564   struct InsertSlot {
1565     template <class K, class... Args>
1566     std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
1567       auto res = s.find_or_prepare_insert(key);
1568       if (res.second) {
1569         PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot);
1570       } else if (do_destroy) {
1571         PolicyTraits::destroy(&s.alloc_ref(), &slot);
1572       }
1573       return {s.iterator_at(res.first), res.second};
1574     }
1575     raw_hash_set& s;
1576     // Constructed slot. Either moved into place or destroyed.
1577     slot_type&& slot;
1578   };
1579 
1580   // "erases" the object from the container, except that it doesn't actually
1581   // destroy the object. It only updates all the metadata of the class.
1582   // This can be used in conjunction with Policy::transfer to move the object to
1583   // another place.
1584   void erase_meta_only(const_iterator it) {
1585     assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
1586     --size_;
1587     const size_t index = it.inner_.ctrl_ - ctrl_;
1588     const size_t index_before = (index - Group::kWidth) & capacity_;
1589     const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty();
1590     const auto empty_before = Group(ctrl_ + index_before).MatchEmpty();
1591 
1592     // We count how many consecutive non empties we have to the right and to the
1593     // left of `it`. If the sum is >= kWidth then there is at least one probe
1594     // window that might have seen a full group.
1595     bool was_never_full =
1596         empty_before && empty_after &&
1597         static_cast<size_t>(empty_after.TrailingZeros() +
1598                             empty_before.LeadingZeros()) < Group::kWidth;
1599 
1600     SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
1601             capacity_, ctrl_, slots_, sizeof(slot_type));
1602     growth_left() += was_never_full;
1603     infoz().RecordErase();
1604   }
1605 
1606   void initialize_slots() {
1607     assert(capacity_);
1608     // Folks with custom allocators often make unwarranted assumptions about the
1609     // behavior of their classes vis-a-vis trivial destructability and what
1610     // calls they will or wont make.  Avoid sampling for people with custom
1611     // allocators to get us out of this mess.  This is not a hard guarantee but
1612     // a workaround while we plan the exact guarantee we want to provide.
1613     //
1614     // People are often sloppy with the exact type of their allocator (sometimes
1615     // it has an extra const or is missing the pair, but rebinds made it work
1616     // anyway).  To avoid the ambiguity, we work off SlotAlloc which we have
1617     // bound more carefully.
1618     if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
1619         slots_ == nullptr) {
1620       infoz() = Sample();
1621     }
1622 
1623     char* mem = static_cast<char*>(Allocate<alignof(slot_type)>(
1624         &alloc_ref(),
1625         AllocSize(capacity_, sizeof(slot_type), alignof(slot_type))));
1626     ctrl_ = reinterpret_cast<ctrl_t*>(mem);
1627     slots_ = reinterpret_cast<slot_type*>(
1628         mem + SlotOffset(capacity_, alignof(slot_type)));
1629     ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
1630     reset_growth_left();
1631     infoz().RecordStorageChanged(size_, capacity_);
1632   }
1633 
1634   void destroy_slots() {
1635     if (!capacity_) return;
1636     for (size_t i = 0; i != capacity_; ++i) {
1637       if (IsFull(ctrl_[i])) {
1638         PolicyTraits::destroy(&alloc_ref(), slots_ + i);
1639       }
1640     }
1641     // Unpoison before returning the memory to the allocator.
1642     SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
1643     Deallocate<alignof(slot_type)>(
1644         &alloc_ref(), ctrl_,
1645         AllocSize(capacity_, sizeof(slot_type), alignof(slot_type)));
1646     ctrl_ = EmptyGroup();
1647     slots_ = nullptr;
1648     size_ = 0;
1649     capacity_ = 0;
1650     growth_left() = 0;
1651   }
1652 
1653   void resize(size_t new_capacity) {
1654     assert(IsValidCapacity(new_capacity));
1655     auto* old_ctrl = ctrl_;
1656     auto* old_slots = slots_;
1657     const size_t old_capacity = capacity_;
1658     capacity_ = new_capacity;
1659     initialize_slots();
1660 
1661     size_t total_probe_length = 0;
1662     for (size_t i = 0; i != old_capacity; ++i) {
1663       if (IsFull(old_ctrl[i])) {
1664         size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
1665                                           PolicyTraits::element(old_slots + i));
1666         auto target = find_first_non_full(ctrl_, hash, capacity_);
1667         size_t new_i = target.offset;
1668         total_probe_length += target.probe_length;
1669         SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
1670         PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
1671       }
1672     }
1673     if (old_capacity) {
1674       SanitizerUnpoisonMemoryRegion(old_slots,
1675                                     sizeof(slot_type) * old_capacity);
1676       Deallocate<alignof(slot_type)>(
1677           &alloc_ref(), old_ctrl,
1678           AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
1679     }
1680     infoz().RecordRehash(total_probe_length);
1681   }
1682 
1683   void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
1684     assert(IsValidCapacity(capacity_));
1685     assert(!is_small(capacity_));
1686     // Algorithm:
1687     // - mark all DELETED slots as EMPTY
1688     // - mark all FULL slots as DELETED
1689     // - for each slot marked as DELETED
1690     //     hash = Hash(element)
1691     //     target = find_first_non_full(hash)
1692     //     if target is in the same group
1693     //       mark slot as FULL
1694     //     else if target is EMPTY
1695     //       transfer element to target
1696     //       mark slot as EMPTY
1697     //       mark target as FULL
1698     //     else if target is DELETED
1699     //       swap current element with target element
1700     //       mark target as FULL
1701     //       repeat procedure for current slot with moved from element (target)
1702     ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
1703     alignas(slot_type) unsigned char raw[sizeof(slot_type)];
1704     size_t total_probe_length = 0;
1705     slot_type* slot = reinterpret_cast<slot_type*>(&raw);
1706     for (size_t i = 0; i != capacity_; ++i) {
1707       if (!IsDeleted(ctrl_[i])) continue;
1708       const size_t hash = PolicyTraits::apply(
1709           HashElement{hash_ref()}, PolicyTraits::element(slots_ + i));
1710       const FindInfo target = find_first_non_full(ctrl_, hash, capacity_);
1711       const size_t new_i = target.offset;
1712       total_probe_length += target.probe_length;
1713 
1714       // Verify if the old and new i fall within the same group wrt the hash.
1715       // If they do, we don't need to move the object as it falls already in the
1716       // best probe we can.
1717       const size_t probe_offset = probe(ctrl_, hash, capacity_).offset();
1718       const auto probe_index = [probe_offset, this](size_t pos) {
1719         return ((pos - probe_offset) & capacity_) / Group::kWidth;
1720       };
1721 
1722       // Element doesn't move.
1723       if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
1724         SetCtrl(i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
1725         continue;
1726       }
1727       if (IsEmpty(ctrl_[new_i])) {
1728         // Transfer element to the empty spot.
1729         // SetCtrl poisons/unpoisons the slots so we have to call it at the
1730         // right time.
1731         SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
1732         PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i);
1733         SetCtrl(i, ctrl_t::kEmpty, capacity_, ctrl_, slots_, sizeof(slot_type));
1734       } else {
1735         assert(IsDeleted(ctrl_[new_i]));
1736         SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
1737         // Until we are done rehashing, DELETED marks previously FULL slots.
1738         // Swap i and new_i elements.
1739         PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i);
1740         PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i);
1741         PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot);
1742         --i;  // repeat
1743       }
1744     }
1745     reset_growth_left();
1746     infoz().RecordRehash(total_probe_length);
1747   }
1748 
1749   void rehash_and_grow_if_necessary() {
1750     if (capacity_ == 0) {
1751       resize(1);
1752     } else if (capacity_ > Group::kWidth &&
1753                // Do these calcuations in 64-bit to avoid overflow.
1754                size() * uint64_t{32} <= capacity_ * uint64_t{25}) {
1755       // Squash DELETED without growing if there is enough capacity.
1756       //
1757       // Rehash in place if the current size is <= 25/32 of capacity_.
1758       // Rationale for such a high factor: 1) drop_deletes_without_resize() is
1759       // faster than resize, and 2) it takes quite a bit of work to add
1760       // tombstones.  In the worst case, seems to take approximately 4
1761       // insert/erase pairs to create a single tombstone and so if we are
1762       // rehashing because of tombstones, we can afford to rehash-in-place as
1763       // long as we are reclaiming at least 1/8 the capacity without doing more
1764       // than 2X the work.  (Where "work" is defined to be size() for rehashing
1765       // or rehashing in place, and 1 for an insert or erase.)  But rehashing in
1766       // place is faster per operation than inserting or even doubling the size
1767       // of the table, so we actually afford to reclaim even less space from a
1768       // resize-in-place.  The decision is to rehash in place if we can reclaim
1769       // at about 1/8th of the usable capacity (specifically 3/28 of the
1770       // capacity) which means that the total cost of rehashing will be a small
1771       // fraction of the total work.
1772       //
1773       // Here is output of an experiment using the BM_CacheInSteadyState
1774       // benchmark running the old case (where we rehash-in-place only if we can
1775       // reclaim at least 7/16*capacity_) vs. this code (which rehashes in place
1776       // if we can recover 3/32*capacity_).
1777       //
1778       // Note that although in the worst-case number of rehashes jumped up from
1779       // 15 to 190, but the number of operations per second is almost the same.
1780       //
1781       // Abridged output of running BM_CacheInSteadyState benchmark from
1782       // raw_hash_set_benchmark.   N is the number of insert/erase operations.
1783       //
1784       //      | OLD (recover >= 7/16        | NEW (recover >= 3/32)
1785       // size |    N/s LoadFactor NRehashes |    N/s LoadFactor NRehashes
1786       //  448 | 145284       0.44        18 | 140118       0.44        19
1787       //  493 | 152546       0.24        11 | 151417       0.48        28
1788       //  538 | 151439       0.26        11 | 151152       0.53        38
1789       //  583 | 151765       0.28        11 | 150572       0.57        50
1790       //  628 | 150241       0.31        11 | 150853       0.61        66
1791       //  672 | 149602       0.33        12 | 150110       0.66        90
1792       //  717 | 149998       0.35        12 | 149531       0.70       129
1793       //  762 | 149836       0.37        13 | 148559       0.74       190
1794       //  807 | 149736       0.39        14 | 151107       0.39        14
1795       //  852 | 150204       0.42        15 | 151019       0.42        15
1796       drop_deletes_without_resize();
1797     } else {
1798       // Otherwise grow the container.
1799       resize(capacity_ * 2 + 1);
1800     }
1801   }
1802 
1803   bool has_element(const value_type& elem) const {
1804     size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
1805     auto seq = probe(ctrl_, hash, capacity_);
1806     while (true) {
1807       Group g{ctrl_ + seq.offset()};
1808       for (int i : g.Match(H2(hash))) {
1809         if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) ==
1810                               elem))
1811           return true;
1812       }
1813       if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
1814       seq.next();
1815       assert(seq.index() <= capacity_ && "full table!");
1816     }
1817     return false;
1818   }
1819 
1820   // TODO(alkis): Optimize this assuming *this and that don't overlap.
1821   raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
1822     raw_hash_set tmp(std::move(that));
1823     swap(tmp);
1824     return *this;
1825   }
1826   raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
1827     raw_hash_set tmp(std::move(that), alloc_ref());
1828     swap(tmp);
1829     return *this;
1830   }
1831 
1832  protected:
1833   template <class K>
1834   std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
1835     auto hash = hash_ref()(key);
1836     auto seq = probe(ctrl_, hash, capacity_);
1837     while (true) {
1838       Group g{ctrl_ + seq.offset()};
1839       for (int i : g.Match(H2(hash))) {
1840         if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
1841                 EqualElement<K>{key, eq_ref()},
1842                 PolicyTraits::element(slots_ + seq.offset(i)))))
1843           return {seq.offset(i), false};
1844       }
1845       if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
1846       seq.next();
1847       assert(seq.index() <= capacity_ && "full table!");
1848     }
1849     return {prepare_insert(hash), true};
1850   }
1851 
1852   size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
1853     auto target = find_first_non_full(ctrl_, hash, capacity_);
1854     if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
1855                            !IsDeleted(ctrl_[target.offset]))) {
1856       rehash_and_grow_if_necessary();
1857       target = find_first_non_full(ctrl_, hash, capacity_);
1858     }
1859     ++size_;
1860     growth_left() -= IsEmpty(ctrl_[target.offset]);
1861     SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
1862             sizeof(slot_type));
1863     infoz().RecordInsert(hash, target.probe_length);
1864     return target.offset;
1865   }
1866 
1867   // Constructs the value in the space pointed by the iterator. This only works
1868   // after an unsuccessful find_or_prepare_insert() and before any other
1869   // modifications happen in the raw_hash_set.
1870   //
1871   // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
1872   // k is the key decomposed from `forward<Args>(args)...`, and the bool
1873   // returned by find_or_prepare_insert(k) was true.
1874   // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
1875   template <class... Args>
1876   void emplace_at(size_t i, Args&&... args) {
1877     PolicyTraits::construct(&alloc_ref(), slots_ + i,
1878                             std::forward<Args>(args)...);
1879 
1880     assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
1881                iterator_at(i) &&
1882            "constructed value does not match the lookup key");
1883   }
1884 
1885   iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; }
1886   const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; }
1887 
1888  private:
1889   friend struct RawHashSetTestOnlyAccess;
1890 
1891   void reset_growth_left() {
1892     growth_left() = CapacityToGrowth(capacity()) - size_;
1893   }
1894 
1895   size_t& growth_left() { return settings_.template get<0>(); }
1896 
1897   HashtablezInfoHandle& infoz() { return settings_.template get<1>(); }
1898 
1899   hasher& hash_ref() { return settings_.template get<2>(); }
1900   const hasher& hash_ref() const { return settings_.template get<2>(); }
1901   key_equal& eq_ref() { return settings_.template get<3>(); }
1902   const key_equal& eq_ref() const { return settings_.template get<3>(); }
1903   allocator_type& alloc_ref() { return settings_.template get<4>(); }
1904   const allocator_type& alloc_ref() const {
1905     return settings_.template get<4>();
1906   }
1907 
1908   // TODO(alkis): Investigate removing some of these fields:
1909   // - ctrl/slots can be derived from each other
1910   // - size can be moved into the slot array
1911   ctrl_t* ctrl_ = EmptyGroup();  // [(capacity + 1 + NumClonedBytes()) * ctrl_t]
1912   slot_type* slots_ = nullptr;   // [capacity * slot_type]
1913   size_t size_ = 0;              // number of full slots
1914   size_t capacity_ = 0;          // total number of slots
1915   absl::container_internal::CompressedTuple<size_t /* growth_left */,
1916                                             HashtablezInfoHandle, hasher,
1917                                             key_equal, allocator_type>
1918       settings_{0, HashtablezInfoHandle{}, hasher{}, key_equal{},
1919                 allocator_type{}};
1920 };
1921 
1922 // Erases all elements that satisfy the predicate `pred` from the container `c`.
1923 template <typename P, typename H, typename E, typename A, typename Predicate>
1924 void EraseIf(Predicate& pred, raw_hash_set<P, H, E, A>* c) {
1925   for (auto it = c->begin(), last = c->end(); it != last;) {
1926     if (pred(*it)) {
1927       c->erase(it++);
1928     } else {
1929       ++it;
1930     }
1931   }
1932 }
1933 
1934 namespace hashtable_debug_internal {
1935 template <typename Set>
1936 struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
1937   using Traits = typename Set::PolicyTraits;
1938   using Slot = typename Traits::slot_type;
1939 
1940   static size_t GetNumProbes(const Set& set,
1941                              const typename Set::key_type& key) {
1942     size_t num_probes = 0;
1943     size_t hash = set.hash_ref()(key);
1944     auto seq = probe(set.ctrl_, hash, set.capacity_);
1945     while (true) {
1946       container_internal::Group g{set.ctrl_ + seq.offset()};
1947       for (int i : g.Match(container_internal::H2(hash))) {
1948         if (Traits::apply(
1949                 typename Set::template EqualElement<typename Set::key_type>{
1950                     key, set.eq_ref()},
1951                 Traits::element(set.slots_ + seq.offset(i))))
1952           return num_probes;
1953         ++num_probes;
1954       }
1955       if (g.MatchEmpty()) return num_probes;
1956       seq.next();
1957       ++num_probes;
1958     }
1959   }
1960 
1961   static size_t AllocatedByteSize(const Set& c) {
1962     size_t capacity = c.capacity_;
1963     if (capacity == 0) return 0;
1964     size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot));
1965 
1966     size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
1967     if (per_slot != ~size_t{}) {
1968       m += per_slot * c.size();
1969     } else {
1970       for (size_t i = 0; i != capacity; ++i) {
1971         if (container_internal::IsFull(c.ctrl_[i])) {
1972           m += Traits::space_used(c.slots_ + i);
1973         }
1974       }
1975     }
1976     return m;
1977   }
1978 
1979   static size_t LowerBoundAllocatedByteSize(size_t size) {
1980     size_t capacity = GrowthToLowerboundCapacity(size);
1981     if (capacity == 0) return 0;
1982     size_t m =
1983         AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot));
1984     size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
1985     if (per_slot != ~size_t{}) {
1986       m += per_slot * size;
1987     }
1988     return m;
1989   }
1990 };
1991 
1992 }  // namespace hashtable_debug_internal
1993 }  // namespace container_internal
1994 ABSL_NAMESPACE_END
1995 }  // namespace absl
1996 
1997 #endif  // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
1998