1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 // An open-addressing
16 // hashtable with quadratic probing.
17 //
18 // This is a low level hashtable on top of which different interfaces can be
19 // implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
20 //
21 // The table interface is similar to that of std::unordered_set. Notable
22 // differences are that most member functions support heterogeneous keys when
23 // BOTH the hash and eq functions are marked as transparent. They do so by
24 // providing a typedef called `is_transparent`.
25 //
26 // When heterogeneous lookup is enabled, functions that take key_type act as if
27 // they have an overload set like:
28 //
29 // iterator find(const key_type& key);
30 // template <class K>
31 // iterator find(const K& key);
32 //
33 // size_type erase(const key_type& key);
34 // template <class K>
35 // size_type erase(const K& key);
36 //
37 // std::pair<iterator, iterator> equal_range(const key_type& key);
38 // template <class K>
39 // std::pair<iterator, iterator> equal_range(const K& key);
40 //
41 // When heterogeneous lookup is disabled, only the explicit `key_type` overloads
42 // exist.
43 //
44 // find() also supports passing the hash explicitly:
45 //
46 // iterator find(const key_type& key, size_t hash);
47 // template <class U>
48 // iterator find(const U& key, size_t hash);
49 //
50 // In addition the pointer to element and iterator stability guarantees are
51 // weaker: all iterators and pointers are invalidated after a new element is
52 // inserted.
53 //
54 // IMPLEMENTATION DETAILS
55 //
56 // # Table Layout
57 //
58 // A raw_hash_set's backing array consists of control bytes followed by slots
59 // that may or may not contain objects.
60 //
61 // The layout of the backing array, for `capacity` slots, is thus, as a
62 // pseudo-struct:
63 //
64 // struct BackingArray {
65 // // Control bytes for the "real" slots.
66 // ctrl_t ctrl[capacity];
67 // // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
68 // // stop and serves no other purpose.
69 // ctrl_t sentinel;
70 // // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
71 // // that if a probe sequence picks a value near the end of `ctrl`,
72 // // `Group` will have valid control bytes to look at.
73 // ctrl_t clones[kWidth - 1];
74 // // The actual slot data.
75 // slot_type slots[capacity];
76 // };
77 //
78 // The length of this array is computed by `AllocSize()` below.
79 //
80 // Control bytes (`ctrl_t`) are bytes (collected into groups of a
81 // platform-specific size) that define the state of the corresponding slot in
82 // the slot array. Group manipulation is tightly optimized to be as efficient
83 // as possible: SSE and friends on x86, clever bit operations on other arches.
84 //
85 // Group 1 Group 2 Group 3
86 // +---------------+---------------+---------------+
87 // | | | | | | | | | | | | | | | | | | | | | | | | |
88 // +---------------+---------------+---------------+
89 //
90 // Each control byte is either a special value for empty slots, deleted slots
91 // (sometimes called *tombstones*), and a special end-of-table marker used by
92 // iterators, or, if occupied, seven bits (H2) from the hash of the value in the
93 // corresponding slot.
94 //
95 // Storing control bytes in a separate array also has beneficial cache effects,
96 // since more logical slots will fit into a cache line.
97 //
98 // # Hashing
99 //
100 // We compute two separate hashes, `H1` and `H2`, from the hash of an object.
101 // `H1(hash(x))` is an index into `slots`, and essentially the starting point
102 // for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
103 // objects that cannot possibly be the one we are looking for.
104 //
105 // # Table operations.
106 //
107 // The key operations are `insert`, `find`, and `erase`.
108 //
109 // Since `insert` and `erase` are implemented in terms of `find`, we describe
110 // `find` first. To `find` a value `x`, we compute `hash(x)`. From
111 // `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
112 // group of slots in some interesting order.
113 //
114 // We now walk through these indices. At each index, we select the entire group
115 // starting with that index and extract potential candidates: occupied slots
116 // with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
117 // group, we stop and return an error. Each candidate slot `y` is compared with
118 // `x`; if `x == y`, we are done and return `&y`; otherwise we contine to the
119 // next probe index. Tombstones effectively behave like full slots that never
120 // match the value we're looking for.
121 //
122 // The `H2` bits ensure when we compare a slot to an object with `==`, we are
123 // likely to have actually found the object. That is, the chance is low that
124 // `==` is called and returns `false`. Thus, when we search for an object, we
125 // are unlikely to call `==` many times. This likelyhood can be analyzed as
126 // follows (assuming that H2 is a random enough hash function).
127 //
128 // Let's assume that there are `k` "wrong" objects that must be examined in a
129 // probe sequence. For example, when doing a `find` on an object that is in the
130 // table, `k` is the number of objects between the start of the probe sequence
131 // and the final found object (not including the final found object). The
132 // expected number of objects with an H2 match is then `k/128`. Measurements
133 // and analysis indicate that even at high load factors, `k` is less than 32,
134 // meaning that the number of "false positive" comparisons we must perform is
135 // less than 1/8 per `find`.
136
137 // `insert` is implemented in terms of `unchecked_insert`, which inserts a
138 // value presumed to not be in the table (violating this requirement will cause
139 // the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
140 // it, we construct a `probe_seq` once again, and use it to find the first
141 // group with an unoccupied (empty *or* deleted) slot. We place `x` into the
142 // first such slot in the group and mark it as full with `x`'s H2.
143 //
144 // To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
145 // perform a `find` to see if it's already present; if it is, we're done. If
146 // it's not, we may decide the table is getting overcrowded (i.e. the load
147 // factor is greater than 7/8 for big tables; `is_small()` tables use a max load
148 // factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
149 // each element of the table into the new array (we know that no insertion here
150 // will insert an already-present value), and discard the old backing array. At
151 // this point, we may `unchecked_insert` the value `x`.
152 //
153 // Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
154 // presents a viable, initialized slot pointee to the caller.
155 //
156 // `erase` is implemented in terms of `erase_at`, which takes an index to a
157 // slot. Given an offset, we simply create a tombstone and destroy its contents.
158 // If we can prove that the slot would not appear in a probe sequence, we can
159 // make the slot as empty, instead. We can prove this by observing that if a
160 // group has any empty slots, it has never been full (assuming we never create
161 // an empty slot in a group with no empties, which this heuristic guarantees we
162 // never do) and find would stop at this group anyways (since it does not probe
163 // beyond groups with empties).
164 //
165 // `erase` is `erase_at` composed with `find`: if we
166 // have a value `x`, we can perform a `find`, and then `erase_at` the resulting
167 // slot.
168 //
169 // To iterate, we simply traverse the array, skipping empty and deleted slots
170 // and stopping when we hit a `kSentinel`.
171
172 #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
173 #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
174
175 #include <algorithm>
176 #include <cmath>
177 #include <cstdint>
178 #include <cstring>
179 #include <iterator>
180 #include <limits>
181 #include <memory>
182 #include <tuple>
183 #include <type_traits>
184 #include <utility>
185
186 #include "absl/base/config.h"
187 #include "absl/base/internal/endian.h"
188 #include "absl/base/internal/prefetch.h"
189 #include "absl/base/optimization.h"
190 #include "absl/base/port.h"
191 #include "absl/container/internal/common.h"
192 #include "absl/container/internal/compressed_tuple.h"
193 #include "absl/container/internal/container_memory.h"
194 #include "absl/container/internal/hash_policy_traits.h"
195 #include "absl/container/internal/hashtable_debug_hooks.h"
196 #include "absl/container/internal/hashtablez_sampler.h"
197 #include "absl/memory/memory.h"
198 #include "absl/meta/type_traits.h"
199 #include "absl/numeric/bits.h"
200 #include "absl/utility/utility.h"
201
202 #ifdef ABSL_INTERNAL_HAVE_SSE2
203 #include <emmintrin.h>
204 #endif
205
206 #ifdef ABSL_INTERNAL_HAVE_SSSE3
207 #include <tmmintrin.h>
208 #endif
209
210 #ifdef _MSC_VER
211 #include <intrin.h>
212 #endif
213
214 #ifdef ABSL_INTERNAL_HAVE_ARM_NEON
215 #include <arm_neon.h>
216 #endif
217
218 namespace absl {
219 ABSL_NAMESPACE_BEGIN
220 namespace container_internal {
221
222 template <typename AllocType>
SwapAlloc(AllocType & lhs,AllocType & rhs,std::true_type)223 void SwapAlloc(AllocType& lhs, AllocType& rhs,
224 std::true_type /* propagate_on_container_swap */) {
225 using std::swap;
226 swap(lhs, rhs);
227 }
228 template <typename AllocType>
SwapAlloc(AllocType &,AllocType &,std::false_type)229 void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
230 std::false_type /* propagate_on_container_swap */) {}
231
232 // The state for a probe sequence.
233 //
234 // Currently, the sequence is a triangular progression of the form
235 //
236 // p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
237 //
238 // The use of `Width` ensures that each probe step does not overlap groups;
239 // the sequence effectively outputs the addresses of *groups* (although not
240 // necessarily aligned to any boundary). The `Group` machinery allows us
241 // to check an entire group with minimal branching.
242 //
243 // Wrapping around at `mask + 1` is important, but not for the obvious reason.
244 // As described above, the first few entries of the control byte array
245 // are mirrored at the end of the array, which `Group` will find and use
246 // for selecting candidates. However, when those candidates' slots are
247 // actually inspected, there are no corresponding slots for the cloned bytes,
248 // so we need to make sure we've treated those offsets as "wrapping around".
249 //
250 // It turns out that this probe sequence visits every group exactly once if the
251 // number of groups is a power of two, since (i^2+i)/2 is a bijection in
252 // Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
253 template <size_t Width>
254 class probe_seq {
255 public:
256 // Creates a new probe sequence using `hash` as the initial value of the
257 // sequence and `mask` (usually the capacity of the table) as the mask to
258 // apply to each value in the progression.
probe_seq(size_t hash,size_t mask)259 probe_seq(size_t hash, size_t mask) {
260 assert(((mask + 1) & mask) == 0 && "not a mask");
261 mask_ = mask;
262 offset_ = hash & mask_;
263 }
264
265 // The offset within the table, i.e., the value `p(i)` above.
offset()266 size_t offset() const { return offset_; }
offset(size_t i)267 size_t offset(size_t i) const { return (offset_ + i) & mask_; }
268
next()269 void next() {
270 index_ += Width;
271 offset_ += index_;
272 offset_ &= mask_;
273 }
274 // 0-based probe index, a multiple of `Width`.
index()275 size_t index() const { return index_; }
276
277 private:
278 size_t mask_;
279 size_t offset_;
280 size_t index_ = 0;
281 };
282
283 template <class ContainerKey, class Hash, class Eq>
284 struct RequireUsableKey {
285 template <class PassedKey, class... Args>
286 std::pair<
287 decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
288 decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
289 std::declval<const PassedKey&>()))>*
290 operator()(const PassedKey&, const Args&...) const;
291 };
292
293 template <class E, class Policy, class Hash, class Eq, class... Ts>
294 struct IsDecomposable : std::false_type {};
295
296 template <class Policy, class Hash, class Eq, class... Ts>
297 struct IsDecomposable<
298 absl::void_t<decltype(Policy::apply(
299 RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
300 std::declval<Ts>()...))>,
301 Policy, Hash, Eq, Ts...> : std::true_type {};
302
303 // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
304 template <class T>
305 constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
306 using std::swap;
307 return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
308 }
309 template <class T>
310 constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
311 return false;
312 }
313
314 template <typename T>
315 uint32_t TrailingZeros(T x) {
316 ABSL_ASSUME(x != 0);
317 return static_cast<uint32_t>(countr_zero(x));
318 }
319
320 // An abstract bitmask, such as that emitted by a SIMD instruction.
321 //
322 // Specifically, this type implements a simple bitset whose representation is
323 // controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
324 // of abstract bits in the bitset, while `Shift` is the log-base-two of the
325 // width of an abstract bit in the representation.
326 // This mask provides operations for any number of real bits set in an abstract
327 // bit. To add iteration on top of that, implementation must guarantee no more
328 // than one real bit is set in an abstract bit.
329 template <class T, int SignificantBits, int Shift = 0>
330 class NonIterableBitMask {
331 public:
332 explicit NonIterableBitMask(T mask) : mask_(mask) {}
333
334 explicit operator bool() const { return this->mask_ != 0; }
335
336 // Returns the index of the lowest *abstract* bit set in `self`.
337 uint32_t LowestBitSet() const {
338 return container_internal::TrailingZeros(mask_) >> Shift;
339 }
340
341 // Returns the index of the highest *abstract* bit set in `self`.
342 uint32_t HighestBitSet() const {
343 return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
344 }
345
346 // Return the number of trailing zero *abstract* bits.
347 uint32_t TrailingZeros() const {
348 return container_internal::TrailingZeros(mask_) >> Shift;
349 }
350
351 // Return the number of leading zero *abstract* bits.
352 uint32_t LeadingZeros() const {
353 constexpr int total_significant_bits = SignificantBits << Shift;
354 constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
355 return static_cast<uint32_t>(countl_zero(mask_ << extra_bits)) >> Shift;
356 }
357
358 T mask_;
359 };
360
361 // Mask that can be iterable
362 //
363 // For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
364 // an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
365 // `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
366 // the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
367 //
368 // For example:
369 // for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
370 // for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
371 template <class T, int SignificantBits, int Shift = 0>
372 class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
373 using Base = NonIterableBitMask<T, SignificantBits, Shift>;
374 static_assert(std::is_unsigned<T>::value, "");
375 static_assert(Shift == 0 || Shift == 3, "");
376
377 public:
378 explicit BitMask(T mask) : Base(mask) {}
379 // BitMask is an iterator over the indices of its abstract bits.
380 using value_type = int;
381 using iterator = BitMask;
382 using const_iterator = BitMask;
383
384 BitMask& operator++() {
385 this->mask_ &= (this->mask_ - 1);
386 return *this;
387 }
388
389 uint32_t operator*() const { return Base::LowestBitSet(); }
390
391 BitMask begin() const { return *this; }
392 BitMask end() const { return BitMask(0); }
393
394 private:
395 friend bool operator==(const BitMask& a, const BitMask& b) {
396 return a.mask_ == b.mask_;
397 }
398 friend bool operator!=(const BitMask& a, const BitMask& b) {
399 return a.mask_ != b.mask_;
400 }
401 };
402
403 using h2_t = uint8_t;
404
405 // The values here are selected for maximum performance. See the static asserts
406 // below for details.
407
408 // A `ctrl_t` is a single control byte, which can have one of four
409 // states: empty, deleted, full (which has an associated seven-bit h2_t value)
410 // and the sentinel. They have the following bit patterns:
411 //
412 // empty: 1 0 0 0 0 0 0 0
413 // deleted: 1 1 1 1 1 1 1 0
414 // full: 0 h h h h h h h // h represents the hash bits.
415 // sentinel: 1 1 1 1 1 1 1 1
416 //
417 // These values are specifically tuned for SSE-flavored SIMD.
418 // The static_asserts below detail the source of these choices.
419 //
420 // We use an enum class so that when strict aliasing is enabled, the compiler
421 // knows ctrl_t doesn't alias other types.
422 enum class ctrl_t : int8_t {
423 kEmpty = -128, // 0b10000000
424 kDeleted = -2, // 0b11111110
425 kSentinel = -1, // 0b11111111
426 };
427 static_assert(
428 (static_cast<int8_t>(ctrl_t::kEmpty) &
429 static_cast<int8_t>(ctrl_t::kDeleted) &
430 static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
431 "Special markers need to have the MSB to make checking for them efficient");
432 static_assert(
433 ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
434 "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
435 "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
436 static_assert(
437 ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
438 "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
439 "registers (pcmpeqd xmm, xmm)");
440 static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
441 "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
442 "existence efficient (psignb xmm, xmm)");
443 static_assert(
444 (~static_cast<int8_t>(ctrl_t::kEmpty) &
445 ~static_cast<int8_t>(ctrl_t::kDeleted) &
446 static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
447 "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
448 "shared by ctrl_t::kSentinel to make the scalar test for "
449 "MaskEmptyOrDeleted() efficient");
450 static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
451 "ctrl_t::kDeleted must be -2 to make the implementation of "
452 "ConvertSpecialToEmptyAndFullToDeleted efficient");
453
454 ABSL_DLL extern const ctrl_t kEmptyGroup[16];
455
456 // Returns a pointer to a control byte group that can be used by empty tables.
457 inline ctrl_t* EmptyGroup() {
458 // Const must be cast away here; no uses of this function will actually write
459 // to it, because it is only used for empty tables.
460 return const_cast<ctrl_t*>(kEmptyGroup);
461 }
462
463 // Mixes a randomly generated per-process seed with `hash` and `ctrl` to
464 // randomize insertion order within groups.
465 bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
466
467 // Returns a per-table, hash salt, which changes on resize. This gets mixed into
468 // H1 to randomize iteration order per-table.
469 //
470 // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
471 // non-determinism of iteration order in most cases.
472 inline size_t PerTableSalt(const ctrl_t* ctrl) {
473 // The low bits of the pointer have little or no entropy because of
474 // alignment. We shift the pointer to try to use higher entropy bits. A
475 // good number seems to be 12 bits, because that aligns with page size.
476 return reinterpret_cast<uintptr_t>(ctrl) >> 12;
477 }
478 // Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
479 inline size_t H1(size_t hash, const ctrl_t* ctrl) {
480 return (hash >> 7) ^ PerTableSalt(ctrl);
481 }
482
483 // Extracts the H2 portion of a hash: the 7 bits not used for H1.
484 //
485 // These are used as an occupied control byte.
486 inline h2_t H2(size_t hash) { return hash & 0x7F; }
487
488 // Helpers for checking the state of a control byte.
489 inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
490 inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
491 inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
492 inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
493
494 #ifdef ABSL_INTERNAL_HAVE_SSE2
495 // Quick reference guide for intrinsics used below:
496 //
497 // * __m128i: An XMM (128-bit) word.
498 //
499 // * _mm_setzero_si128: Returns a zero vector.
500 // * _mm_set1_epi8: Returns a vector with the same i8 in each lane.
501 //
502 // * _mm_subs_epi8: Saturating-subtracts two i8 vectors.
503 // * _mm_and_si128: Ands two i128s together.
504 // * _mm_or_si128: Ors two i128s together.
505 // * _mm_andnot_si128: And-nots two i128s together.
506 //
507 // * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
508 // filling each lane with 0x00 or 0xff.
509 // * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
510 //
511 // * _mm_loadu_si128: Performs an unaligned load of an i128.
512 // * _mm_storeu_si128: Performs an unaligned store of an i128.
513 //
514 // * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first
515 // argument if the corresponding lane of the second
516 // argument is positive, negative, or zero, respectively.
517 // * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
518 // bitmask consisting of those bits.
519 // * _mm_shuffle_epi8: Selects i8s from the first argument, using the low
520 // four bits of each i8 lane in the second argument as
521 // indices.
522
523 // https://github.com/abseil/abseil-cpp/issues/209
524 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
525 // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
526 // Work around this by using the portable implementation of Group
527 // when using -funsigned-char under GCC.
528 inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
529 #if defined(__GNUC__) && !defined(__clang__)
530 if (std::is_unsigned<char>::value) {
531 const __m128i mask = _mm_set1_epi8(0x80);
532 const __m128i diff = _mm_subs_epi8(b, a);
533 return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
534 }
535 #endif
536 return _mm_cmpgt_epi8(a, b);
537 }
538
539 struct GroupSse2Impl {
540 static constexpr size_t kWidth = 16; // the number of slots per group
541
542 explicit GroupSse2Impl(const ctrl_t* pos) {
543 ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
544 }
545
546 // Returns a bitmask representing the positions of slots that match hash.
547 BitMask<uint32_t, kWidth> Match(h2_t hash) const {
548 auto match = _mm_set1_epi8(static_cast<char>(hash));
549 return BitMask<uint32_t, kWidth>(
550 static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
551 }
552
553 // Returns a bitmask representing the positions of empty slots.
554 NonIterableBitMask<uint32_t, kWidth> MaskEmpty() const {
555 #ifdef ABSL_INTERNAL_HAVE_SSSE3
556 // This only works because ctrl_t::kEmpty is -128.
557 return NonIterableBitMask<uint32_t, kWidth>(
558 static_cast<uint32_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
559 #else
560 auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
561 return NonIterableBitMask<uint32_t, kWidth>(
562 static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
563 #endif
564 }
565
566 // Returns a bitmask representing the positions of empty or deleted slots.
567 NonIterableBitMask<uint32_t, kWidth> MaskEmptyOrDeleted() const {
568 auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
569 return NonIterableBitMask<uint32_t, kWidth>(static_cast<uint32_t>(
570 _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
571 }
572
573 // Returns the number of trailing empty or deleted elements in the group.
574 uint32_t CountLeadingEmptyOrDeleted() const {
575 auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
576 return TrailingZeros(static_cast<uint32_t>(
577 _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
578 }
579
580 void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
581 auto msbs = _mm_set1_epi8(static_cast<char>(-128));
582 auto x126 = _mm_set1_epi8(126);
583 #ifdef ABSL_INTERNAL_HAVE_SSSE3
584 auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
585 #else
586 auto zero = _mm_setzero_si128();
587 auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
588 auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
589 #endif
590 _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
591 }
592
593 __m128i ctrl;
594 };
595 #endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
596
597 #if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
598 struct GroupAArch64Impl {
599 static constexpr size_t kWidth = 8;
600
601 explicit GroupAArch64Impl(const ctrl_t* pos) {
602 ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
603 }
604
605 BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
606 uint8x8_t dup = vdup_n_u8(hash);
607 auto mask = vceq_u8(ctrl, dup);
608 constexpr uint64_t msbs = 0x8080808080808080ULL;
609 return BitMask<uint64_t, kWidth, 3>(
610 vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs);
611 }
612
613 NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
614 uint64_t mask =
615 vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
616 vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
617 vreinterpret_s8_u8(ctrl))),
618 0);
619 return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
620 }
621
622 NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
623 uint64_t mask =
624 vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
625 vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
626 vreinterpret_s8_u8(ctrl))),
627 0);
628 return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
629 }
630
631 uint32_t CountLeadingEmptyOrDeleted() const {
632 uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
633 // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
634 // kDeleted. We lower all other bits and count number of trailing zeros.
635 // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
636 // so we should be fine.
637 constexpr uint64_t bits = 0x0101010101010101ULL;
638 return static_cast<uint32_t>(countr_zero((mask | ~(mask >> 7)) & bits) >>
639 3);
640 }
641
642 void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
643 uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
644 constexpr uint64_t msbs = 0x8080808080808080ULL;
645 constexpr uint64_t lsbs = 0x0101010101010101ULL;
646 auto x = mask & msbs;
647 auto res = (~x + (x >> 7)) & ~lsbs;
648 little_endian::Store64(dst, res);
649 }
650
651 uint8x8_t ctrl;
652 };
653 #endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
654
655 struct GroupPortableImpl {
656 static constexpr size_t kWidth = 8;
657
658 explicit GroupPortableImpl(const ctrl_t* pos)
659 : ctrl(little_endian::Load64(pos)) {}
660
661 BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
662 // For the technique, see:
663 // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
664 // (Determine if a word has a byte equal to n).
665 //
666 // Caveat: there are false positives but:
667 // - they only occur if there is a real match
668 // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
669 // - they will be handled gracefully by subsequent checks in code
670 //
671 // Example:
672 // v = 0x1716151413121110
673 // hash = 0x12
674 // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
675 constexpr uint64_t msbs = 0x8080808080808080ULL;
676 constexpr uint64_t lsbs = 0x0101010101010101ULL;
677 auto x = ctrl ^ (lsbs * hash);
678 return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
679 }
680
681 NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
682 constexpr uint64_t msbs = 0x8080808080808080ULL;
683 return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) &
684 msbs);
685 }
686
687 NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
688 constexpr uint64_t msbs = 0x8080808080808080ULL;
689 return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) &
690 msbs);
691 }
692
693 uint32_t CountLeadingEmptyOrDeleted() const {
694 // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
695 // kDeleted. We lower all other bits and count number of trailing zeros.
696 constexpr uint64_t bits = 0x0101010101010101ULL;
697 return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >>
698 3);
699 }
700
701 void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
702 constexpr uint64_t msbs = 0x8080808080808080ULL;
703 constexpr uint64_t lsbs = 0x0101010101010101ULL;
704 auto x = ctrl & msbs;
705 auto res = (~x + (x >> 7)) & ~lsbs;
706 little_endian::Store64(dst, res);
707 }
708
709 uint64_t ctrl;
710 };
711
712 #ifdef ABSL_INTERNAL_HAVE_SSE2
713 using Group = GroupSse2Impl;
714 #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
715 using Group = GroupAArch64Impl;
716 #else
717 using Group = GroupPortableImpl;
718 #endif
719
720 // Returns he number of "cloned control bytes".
721 //
722 // This is the number of control bytes that are present both at the beginning
723 // of the control byte array and at the end, such that we can create a
724 // `Group::kWidth`-width probe window starting from any control byte.
725 constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
726
727 template <class Policy, class Hash, class Eq, class Alloc>
728 class raw_hash_set;
729
730 // Returns whether `n` is a valid capacity (i.e., number of slots).
731 //
732 // A valid capacity is a non-zero integer `2^m - 1`.
733 inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
734
735 // Applies the following mapping to every byte in the control array:
736 // * kDeleted -> kEmpty
737 // * kEmpty -> kEmpty
738 // * _ -> kDeleted
739 // PRECONDITION:
740 // IsValidCapacity(capacity)
741 // ctrl[capacity] == ctrl_t::kSentinel
742 // ctrl[i] != ctrl_t::kSentinel for all i < capacity
743 void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
744
745 // Converts `n` into the next valid capacity, per `IsValidCapacity`.
746 inline size_t NormalizeCapacity(size_t n) {
747 return n ? ~size_t{} >> countl_zero(n) : 1;
748 }
749
750 // General notes on capacity/growth methods below:
751 // - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
752 // average of two empty slots per group.
753 // - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
754 // - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
755 // never need to probe (the whole table fits in one group) so we don't need a
756 // load factor less than 1.
757
758 // Given `capacity`, applies the load factor; i.e., it returns the maximum
759 // number of values we should put into the table before a resizing rehash.
760 inline size_t CapacityToGrowth(size_t capacity) {
761 assert(IsValidCapacity(capacity));
762 // `capacity*7/8`
763 if (Group::kWidth == 8 && capacity == 7) {
764 // x-x/8 does not work when x==7.
765 return 6;
766 }
767 return capacity - capacity / 8;
768 }
769
770 // Given `growth`, "unapplies" the load factor to find how large the capacity
771 // should be to stay within the load factor.
772 //
773 // This might not be a valid capacity and `NormalizeCapacity()` should be
774 // called on this.
775 inline size_t GrowthToLowerboundCapacity(size_t growth) {
776 // `growth*8/7`
777 if (Group::kWidth == 8 && growth == 7) {
778 // x+(x-1)/7 does not work when x==7.
779 return 8;
780 }
781 return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
782 }
783
784 template <class InputIter>
785 size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
786 size_t bucket_count) {
787 if (bucket_count != 0) {
788 return bucket_count;
789 }
790 using InputIterCategory =
791 typename std::iterator_traits<InputIter>::iterator_category;
792 if (std::is_base_of<std::random_access_iterator_tag,
793 InputIterCategory>::value) {
794 return GrowthToLowerboundCapacity(
795 static_cast<size_t>(std::distance(first, last)));
796 }
797 return 0;
798 }
799
800 #define ABSL_INTERNAL_ASSERT_IS_FULL(ctrl, operation) \
801 do { \
802 ABSL_HARDENING_ASSERT( \
803 (ctrl != nullptr) && operation \
804 " called on invalid iterator. The iterator might be an end() " \
805 "iterator or may have been default constructed."); \
806 ABSL_HARDENING_ASSERT( \
807 (IsFull(*ctrl)) && operation \
808 " called on invalid iterator. The element might have been erased or " \
809 "the table might have rehashed."); \
810 } while (0)
811
812 inline void AssertIsValid(ctrl_t* ctrl) {
813 ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
814 "Invalid operation on iterator. The element might have "
815 "been erased or the table might have rehashed.");
816 }
817
818 struct FindInfo {
819 size_t offset;
820 size_t probe_length;
821 };
822
823 // Whether a table is "small". A small table fits entirely into a probing
824 // group, i.e., has a capacity < `Group::kWidth`.
825 //
826 // In small mode we are able to use the whole capacity. The extra control
827 // bytes give us at least one "empty" control byte to stop the iteration.
828 // This is important to make 1 a valid capacity.
829 //
830 // In small mode only the first `capacity` control bytes after the sentinel
831 // are valid. The rest contain dummy ctrl_t::kEmpty values that do not
832 // represent a real slot. This is important to take into account on
833 // `find_first_non_full()`, where we never try
834 // `ShouldInsertBackwards()` for small tables.
835 inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
836
837 // Begins a probing operation on `ctrl`, using `hash`.
838 inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, size_t hash,
839 size_t capacity) {
840 return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
841 }
842
843 // Probes an array of control bits using a probe sequence derived from `hash`,
844 // and returns the offset corresponding to the first deleted or empty slot.
845 //
846 // Behavior when the entire table is full is undefined.
847 //
848 // NOTE: this function must work with tables having both empty and deleted
849 // slots in the same group. Such tables appear during `erase()`.
850 template <typename = void>
851 inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash,
852 size_t capacity) {
853 auto seq = probe(ctrl, hash, capacity);
854 while (true) {
855 Group g{ctrl + seq.offset()};
856 auto mask = g.MaskEmptyOrDeleted();
857 if (mask) {
858 #if !defined(NDEBUG)
859 // We want to add entropy even when ASLR is not enabled.
860 // In debug build we will randomly insert in either the front or back of
861 // the group.
862 // TODO(kfm,sbenza): revisit after we do unconditional mixing
863 if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) {
864 return {seq.offset(mask.HighestBitSet()), seq.index()};
865 }
866 #endif
867 return {seq.offset(mask.LowestBitSet()), seq.index()};
868 }
869 seq.next();
870 assert(seq.index() <= capacity && "full table!");
871 }
872 }
873
874 // Extern template for inline function keep possibility of inlining.
875 // When compiler decided to not inline, no symbols will be added to the
876 // corresponding translation unit.
877 extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
878
879 // Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
880 // array as marked as empty.
881 inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot,
882 size_t slot_size) {
883 std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
884 capacity + 1 + NumClonedBytes());
885 ctrl[capacity] = ctrl_t::kSentinel;
886 SanitizerPoisonMemoryRegion(slot, slot_size * capacity);
887 }
888
889 // Sets `ctrl[i]` to `h`.
890 //
891 // Unlike setting it directly, this function will perform bounds checks and
892 // mirror the value to the cloned tail if necessary.
893 inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl,
894 const void* slot, size_t slot_size) {
895 assert(i < capacity);
896
897 auto* slot_i = static_cast<const char*>(slot) + i * slot_size;
898 if (IsFull(h)) {
899 SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
900 } else {
901 SanitizerPoisonMemoryRegion(slot_i, slot_size);
902 }
903
904 ctrl[i] = h;
905 ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
906 }
907
908 // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
909 inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl,
910 const void* slot, size_t slot_size) {
911 SetCtrl(i, static_cast<ctrl_t>(h), capacity, ctrl, slot, slot_size);
912 }
913
914 // Given the capacity of a table, computes the offset (from the start of the
915 // backing allocation) at which the slots begin.
916 inline size_t SlotOffset(size_t capacity, size_t slot_align) {
917 assert(IsValidCapacity(capacity));
918 const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
919 return (num_control_bytes + slot_align - 1) & (~slot_align + 1);
920 }
921
922 // Given the capacity of a table, computes the total size of the backing
923 // array.
924 inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
925 return SlotOffset(capacity, slot_align) + capacity * slot_size;
926 }
927
928 // A SwissTable.
929 //
930 // Policy: a policy defines how to perform different operations on
931 // the slots of the hashtable (see hash_policy_traits.h for the full interface
932 // of policy).
933 //
934 // Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
935 // functor should accept a key and return size_t as hash. For best performance
936 // it is important that the hash function provides high entropy across all bits
937 // of the hash.
938 //
939 // Eq: a (possibly polymorphic) functor that compares two keys for equality. It
940 // should accept two (of possibly different type) keys and return a bool: true
941 // if they are equal, false if they are not. If two keys compare equal, then
942 // their hash values as defined by Hash MUST be equal.
943 //
944 // Allocator: an Allocator
945 // [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
946 // the storage of the hashtable will be allocated and the elements will be
947 // constructed and destroyed.
948 template <class Policy, class Hash, class Eq, class Alloc>
949 class raw_hash_set {
950 using PolicyTraits = hash_policy_traits<Policy>;
951 using KeyArgImpl =
952 KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
953
954 public:
955 using init_type = typename PolicyTraits::init_type;
956 using key_type = typename PolicyTraits::key_type;
957 // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
958 // code fixes!
959 using slot_type = typename PolicyTraits::slot_type;
960 using allocator_type = Alloc;
961 using size_type = size_t;
962 using difference_type = ptrdiff_t;
963 using hasher = Hash;
964 using key_equal = Eq;
965 using policy_type = Policy;
966 using value_type = typename PolicyTraits::value_type;
967 using reference = value_type&;
968 using const_reference = const value_type&;
969 using pointer = typename absl::allocator_traits<
970 allocator_type>::template rebind_traits<value_type>::pointer;
971 using const_pointer = typename absl::allocator_traits<
972 allocator_type>::template rebind_traits<value_type>::const_pointer;
973
974 // Alias used for heterogeneous lookup functions.
975 // `key_arg<K>` evaluates to `K` when the functors are transparent and to
976 // `key_type` otherwise. It permits template argument deduction on `K` for the
977 // transparent case.
978 template <class K>
979 using key_arg = typename KeyArgImpl::template type<K, key_type>;
980
981 private:
982 // Give an early error when key_type is not hashable/eq.
983 auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
984 auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
985
986 using AllocTraits = absl::allocator_traits<allocator_type>;
987 using SlotAlloc = typename absl::allocator_traits<
988 allocator_type>::template rebind_alloc<slot_type>;
989 using SlotAllocTraits = typename absl::allocator_traits<
990 allocator_type>::template rebind_traits<slot_type>;
991
992 static_assert(std::is_lvalue_reference<reference>::value,
993 "Policy::element() must return a reference");
994
995 template <typename T>
996 struct SameAsElementReference
997 : std::is_same<typename std::remove_cv<
998 typename std::remove_reference<reference>::type>::type,
999 typename std::remove_cv<
1000 typename std::remove_reference<T>::type>::type> {};
1001
1002 // An enabler for insert(T&&): T must be convertible to init_type or be the
1003 // same as [cv] value_type [ref].
1004 // Note: we separate SameAsElementReference into its own type to avoid using
1005 // reference unless we need to. MSVC doesn't seem to like it in some
1006 // cases.
1007 template <class T>
1008 using RequiresInsertable = typename std::enable_if<
1009 absl::disjunction<std::is_convertible<T, init_type>,
1010 SameAsElementReference<T>>::value,
1011 int>::type;
1012
1013 // RequiresNotInit is a workaround for gcc prior to 7.1.
1014 // See https://godbolt.org/g/Y4xsUh.
1015 template <class T>
1016 using RequiresNotInit =
1017 typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
1018
1019 template <class... Ts>
1020 using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
1021
1022 public:
1023 static_assert(std::is_same<pointer, value_type*>::value,
1024 "Allocators with custom pointer types are not supported");
1025 static_assert(std::is_same<const_pointer, const value_type*>::value,
1026 "Allocators with custom pointer types are not supported");
1027
1028 class iterator {
1029 friend class raw_hash_set;
1030
1031 public:
1032 using iterator_category = std::forward_iterator_tag;
1033 using value_type = typename raw_hash_set::value_type;
1034 using reference =
1035 absl::conditional_t<PolicyTraits::constant_iterators::value,
1036 const value_type&, value_type&>;
1037 using pointer = absl::remove_reference_t<reference>*;
1038 using difference_type = typename raw_hash_set::difference_type;
1039
1040 iterator() {}
1041
1042 // PRECONDITION: not an end() iterator.
1043 reference operator*() const {
1044 ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator*()");
1045 return PolicyTraits::element(slot_);
1046 }
1047
1048 // PRECONDITION: not an end() iterator.
1049 pointer operator->() const {
1050 ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator->");
1051 return &operator*();
1052 }
1053
1054 // PRECONDITION: not an end() iterator.
1055 iterator& operator++() {
1056 ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator++");
1057 ++ctrl_;
1058 ++slot_;
1059 skip_empty_or_deleted();
1060 return *this;
1061 }
1062 // PRECONDITION: not an end() iterator.
1063 iterator operator++(int) {
1064 auto tmp = *this;
1065 ++*this;
1066 return tmp;
1067 }
1068
1069 friend bool operator==(const iterator& a, const iterator& b) {
1070 AssertIsValid(a.ctrl_);
1071 AssertIsValid(b.ctrl_);
1072 return a.ctrl_ == b.ctrl_;
1073 }
1074 friend bool operator!=(const iterator& a, const iterator& b) {
1075 return !(a == b);
1076 }
1077
1078 private:
1079 iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {
1080 // This assumption helps the compiler know that any non-end iterator is
1081 // not equal to any end iterator.
1082 ABSL_ASSUME(ctrl != nullptr);
1083 }
1084
1085 // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
1086 // they reach one.
1087 //
1088 // If a sentinel is reached, we null `ctrl_` out instead.
1089 void skip_empty_or_deleted() {
1090 while (IsEmptyOrDeleted(*ctrl_)) {
1091 uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
1092 ctrl_ += shift;
1093 slot_ += shift;
1094 }
1095 if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
1096 }
1097
1098 ctrl_t* ctrl_ = nullptr;
1099 // To avoid uninitialized member warnings, put slot_ in an anonymous union.
1100 // The member is not initialized on singleton and end iterators.
1101 union {
1102 slot_type* slot_;
1103 };
1104 };
1105
1106 class const_iterator {
1107 friend class raw_hash_set;
1108
1109 public:
1110 using iterator_category = typename iterator::iterator_category;
1111 using value_type = typename raw_hash_set::value_type;
1112 using reference = typename raw_hash_set::const_reference;
1113 using pointer = typename raw_hash_set::const_pointer;
1114 using difference_type = typename raw_hash_set::difference_type;
1115
1116 const_iterator() {}
1117 // Implicit construction from iterator.
1118 const_iterator(iterator i) : inner_(std::move(i)) {}
1119
1120 reference operator*() const { return *inner_; }
1121 pointer operator->() const { return inner_.operator->(); }
1122
1123 const_iterator& operator++() {
1124 ++inner_;
1125 return *this;
1126 }
1127 const_iterator operator++(int) { return inner_++; }
1128
1129 friend bool operator==(const const_iterator& a, const const_iterator& b) {
1130 return a.inner_ == b.inner_;
1131 }
1132 friend bool operator!=(const const_iterator& a, const const_iterator& b) {
1133 return !(a == b);
1134 }
1135
1136 private:
1137 const_iterator(const ctrl_t* ctrl, const slot_type* slot)
1138 : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot)) {}
1139
1140 iterator inner_;
1141 };
1142
1143 using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
1144 using insert_return_type = InsertReturnType<iterator, node_type>;
1145
1146 raw_hash_set() noexcept(
1147 std::is_nothrow_default_constructible<hasher>::value&&
1148 std::is_nothrow_default_constructible<key_equal>::value&&
1149 std::is_nothrow_default_constructible<allocator_type>::value) {}
1150
1151 explicit raw_hash_set(size_t bucket_count,
1152 const hasher& hash = hasher(),
1153 const key_equal& eq = key_equal(),
1154 const allocator_type& alloc = allocator_type())
1155 : ctrl_(EmptyGroup()),
1156 settings_(0u, HashtablezInfoHandle(), hash, eq, alloc) {
1157 if (bucket_count) {
1158 capacity_ = NormalizeCapacity(bucket_count);
1159 initialize_slots();
1160 }
1161 }
1162
1163 raw_hash_set(size_t bucket_count, const hasher& hash,
1164 const allocator_type& alloc)
1165 : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
1166
1167 raw_hash_set(size_t bucket_count, const allocator_type& alloc)
1168 : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
1169
1170 explicit raw_hash_set(const allocator_type& alloc)
1171 : raw_hash_set(0, hasher(), key_equal(), alloc) {}
1172
1173 template <class InputIter>
1174 raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
1175 const hasher& hash = hasher(), const key_equal& eq = key_equal(),
1176 const allocator_type& alloc = allocator_type())
1177 : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
1178 hash, eq, alloc) {
1179 insert(first, last);
1180 }
1181
1182 template <class InputIter>
1183 raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
1184 const hasher& hash, const allocator_type& alloc)
1185 : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
1186
1187 template <class InputIter>
1188 raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
1189 const allocator_type& alloc)
1190 : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
1191
1192 template <class InputIter>
1193 raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
1194 : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
1195
1196 // Instead of accepting std::initializer_list<value_type> as the first
1197 // argument like std::unordered_set<value_type> does, we have two overloads
1198 // that accept std::initializer_list<T> and std::initializer_list<init_type>.
1199 // This is advantageous for performance.
1200 //
1201 // // Turns {"abc", "def"} into std::initializer_list<std::string>, then
1202 // // copies the strings into the set.
1203 // std::unordered_set<std::string> s = {"abc", "def"};
1204 //
1205 // // Turns {"abc", "def"} into std::initializer_list<const char*>, then
1206 // // copies the strings into the set.
1207 // absl::flat_hash_set<std::string> s = {"abc", "def"};
1208 //
1209 // The same trick is used in insert().
1210 //
1211 // The enabler is necessary to prevent this constructor from triggering where
1212 // the copy constructor is meant to be called.
1213 //
1214 // absl::flat_hash_set<int> a, b{a};
1215 //
1216 // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
1217 template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
1218 raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
1219 const hasher& hash = hasher(), const key_equal& eq = key_equal(),
1220 const allocator_type& alloc = allocator_type())
1221 : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
1222
1223 raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
1224 const hasher& hash = hasher(), const key_equal& eq = key_equal(),
1225 const allocator_type& alloc = allocator_type())
1226 : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
1227
1228 template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
1229 raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
1230 const hasher& hash, const allocator_type& alloc)
1231 : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
1232
1233 raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
1234 const hasher& hash, const allocator_type& alloc)
1235 : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
1236
1237 template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
1238 raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
1239 const allocator_type& alloc)
1240 : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
1241
1242 raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
1243 const allocator_type& alloc)
1244 : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
1245
1246 template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
1247 raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
1248 : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
1249
1250 raw_hash_set(std::initializer_list<init_type> init,
1251 const allocator_type& alloc)
1252 : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
1253
1254 raw_hash_set(const raw_hash_set& that)
1255 : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
1256 that.alloc_ref())) {}
1257
1258 raw_hash_set(const raw_hash_set& that, const allocator_type& a)
1259 : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
1260 reserve(that.size());
1261 // Because the table is guaranteed to be empty, we can do something faster
1262 // than a full `insert`.
1263 for (const auto& v : that) {
1264 const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
1265 auto target = find_first_non_full(ctrl_, hash, capacity_);
1266 SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
1267 sizeof(slot_type));
1268 emplace_at(target.offset, v);
1269 infoz().RecordInsert(hash, target.probe_length);
1270 }
1271 size_ = that.size();
1272 growth_left() -= that.size();
1273 }
1274
1275 raw_hash_set(raw_hash_set&& that) noexcept(
1276 std::is_nothrow_copy_constructible<hasher>::value&&
1277 std::is_nothrow_copy_constructible<key_equal>::value&&
1278 std::is_nothrow_copy_constructible<allocator_type>::value)
1279 : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())),
1280 slots_(absl::exchange(that.slots_, nullptr)),
1281 size_(absl::exchange(that.size_, size_t{0})),
1282 capacity_(absl::exchange(that.capacity_, size_t{0})),
1283 // Hash, equality and allocator are copied instead of moved because
1284 // `that` must be left valid. If Hash is std::function<Key>, moving it
1285 // would create a nullptr functor that cannot be called.
1286 settings_(absl::exchange(that.growth_left(), size_t{0}),
1287 absl::exchange(that.infoz(), HashtablezInfoHandle()),
1288 that.hash_ref(),
1289 that.eq_ref(),
1290 that.alloc_ref()) {}
1291
1292 raw_hash_set(raw_hash_set&& that, const allocator_type& a)
1293 : ctrl_(EmptyGroup()),
1294 slots_(nullptr),
1295 size_(0),
1296 capacity_(0),
1297 settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(),
1298 a) {
1299 if (a == that.alloc_ref()) {
1300 std::swap(ctrl_, that.ctrl_);
1301 std::swap(slots_, that.slots_);
1302 std::swap(size_, that.size_);
1303 std::swap(capacity_, that.capacity_);
1304 std::swap(growth_left(), that.growth_left());
1305 std::swap(infoz(), that.infoz());
1306 } else {
1307 reserve(that.size());
1308 // Note: this will copy elements of dense_set and unordered_set instead of
1309 // moving them. This can be fixed if it ever becomes an issue.
1310 for (auto& elem : that) insert(std::move(elem));
1311 }
1312 }
1313
1314 raw_hash_set& operator=(const raw_hash_set& that) {
1315 raw_hash_set tmp(that,
1316 AllocTraits::propagate_on_container_copy_assignment::value
1317 ? that.alloc_ref()
1318 : alloc_ref());
1319 swap(tmp);
1320 return *this;
1321 }
1322
1323 raw_hash_set& operator=(raw_hash_set&& that) noexcept(
1324 absl::allocator_traits<allocator_type>::is_always_equal::value&&
1325 std::is_nothrow_move_assignable<hasher>::value&&
1326 std::is_nothrow_move_assignable<key_equal>::value) {
1327 // TODO(sbenza): We should only use the operations from the noexcept clause
1328 // to make sure we actually adhere to that contract.
1329 return move_assign(
1330 std::move(that),
1331 typename AllocTraits::propagate_on_container_move_assignment());
1332 }
1333
1334 ~raw_hash_set() { destroy_slots(); }
1335
1336 iterator begin() {
1337 auto it = iterator_at(0);
1338 it.skip_empty_or_deleted();
1339 return it;
1340 }
1341 iterator end() { return {}; }
1342
1343 const_iterator begin() const {
1344 return const_cast<raw_hash_set*>(this)->begin();
1345 }
1346 const_iterator end() const { return {}; }
1347 const_iterator cbegin() const { return begin(); }
1348 const_iterator cend() const { return end(); }
1349
1350 bool empty() const { return !size(); }
1351 size_t size() const { return size_; }
1352 size_t capacity() const { return capacity_; }
1353 size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
1354
1355 ABSL_ATTRIBUTE_REINITIALIZES void clear() {
1356 // Iterating over this container is O(bucket_count()). When bucket_count()
1357 // is much greater than size(), iteration becomes prohibitively expensive.
1358 // For clear() it is more important to reuse the allocated array when the
1359 // container is small because allocation takes comparatively long time
1360 // compared to destruction of the elements of the container. So we pick the
1361 // largest bucket_count() threshold for which iteration is still fast and
1362 // past that we simply deallocate the array.
1363 if (capacity_ > 127) {
1364 destroy_slots();
1365
1366 infoz().RecordClearedReservation();
1367 } else if (capacity_) {
1368 for (size_t i = 0; i != capacity_; ++i) {
1369 if (IsFull(ctrl_[i])) {
1370 PolicyTraits::destroy(&alloc_ref(), slots_ + i);
1371 }
1372 }
1373 size_ = 0;
1374 ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
1375 reset_growth_left();
1376 }
1377 assert(empty());
1378 infoz().RecordStorageChanged(0, capacity_);
1379 }
1380
1381 // This overload kicks in when the argument is an rvalue of insertable and
1382 // decomposable type other than init_type.
1383 //
1384 // flat_hash_map<std::string, int> m;
1385 // m.insert(std::make_pair("abc", 42));
1386 // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
1387 // bug.
1388 template <class T, RequiresInsertable<T> = 0, class T2 = T,
1389 typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
1390 T* = nullptr>
1391 std::pair<iterator, bool> insert(T&& value) {
1392 return emplace(std::forward<T>(value));
1393 }
1394
1395 // This overload kicks in when the argument is a bitfield or an lvalue of
1396 // insertable and decomposable type.
1397 //
1398 // union { int n : 1; };
1399 // flat_hash_set<int> s;
1400 // s.insert(n);
1401 //
1402 // flat_hash_set<std::string> s;
1403 // const char* p = "hello";
1404 // s.insert(p);
1405 //
1406 // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
1407 // RequiresInsertable<T> with RequiresInsertable<const T&>.
1408 // We are hitting this bug: https://godbolt.org/g/1Vht4f.
1409 template <
1410 class T, RequiresInsertable<T> = 0,
1411 typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
1412 std::pair<iterator, bool> insert(const T& value) {
1413 return emplace(value);
1414 }
1415
1416 // This overload kicks in when the argument is an rvalue of init_type. Its
1417 // purpose is to handle brace-init-list arguments.
1418 //
1419 // flat_hash_map<std::string, int> s;
1420 // s.insert({"abc", 42});
1421 std::pair<iterator, bool> insert(init_type&& value) {
1422 return emplace(std::move(value));
1423 }
1424
1425 // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
1426 // bug.
1427 template <class T, RequiresInsertable<T> = 0, class T2 = T,
1428 typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
1429 T* = nullptr>
1430 iterator insert(const_iterator, T&& value) {
1431 return insert(std::forward<T>(value)).first;
1432 }
1433
1434 // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
1435 // RequiresInsertable<T> with RequiresInsertable<const T&>.
1436 // We are hitting this bug: https://godbolt.org/g/1Vht4f.
1437 template <
1438 class T, RequiresInsertable<T> = 0,
1439 typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
1440 iterator insert(const_iterator, const T& value) {
1441 return insert(value).first;
1442 }
1443
1444 iterator insert(const_iterator, init_type&& value) {
1445 return insert(std::move(value)).first;
1446 }
1447
1448 template <class InputIt>
1449 void insert(InputIt first, InputIt last) {
1450 for (; first != last; ++first) emplace(*first);
1451 }
1452
1453 template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
1454 void insert(std::initializer_list<T> ilist) {
1455 insert(ilist.begin(), ilist.end());
1456 }
1457
1458 void insert(std::initializer_list<init_type> ilist) {
1459 insert(ilist.begin(), ilist.end());
1460 }
1461
1462 insert_return_type insert(node_type&& node) {
1463 if (!node) return {end(), false, node_type()};
1464 const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
1465 auto res = PolicyTraits::apply(
1466 InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
1467 elem);
1468 if (res.second) {
1469 CommonAccess::Reset(&node);
1470 return {res.first, true, node_type()};
1471 } else {
1472 return {res.first, false, std::move(node)};
1473 }
1474 }
1475
1476 iterator insert(const_iterator, node_type&& node) {
1477 auto res = insert(std::move(node));
1478 node = std::move(res.node);
1479 return res.position;
1480 }
1481
1482 // This overload kicks in if we can deduce the key from args. This enables us
1483 // to avoid constructing value_type if an entry with the same key already
1484 // exists.
1485 //
1486 // For example:
1487 //
1488 // flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
1489 // // Creates no std::string copies and makes no heap allocations.
1490 // m.emplace("abc", "xyz");
1491 template <class... Args, typename std::enable_if<
1492 IsDecomposable<Args...>::value, int>::type = 0>
1493 std::pair<iterator, bool> emplace(Args&&... args) {
1494 return PolicyTraits::apply(EmplaceDecomposable{*this},
1495 std::forward<Args>(args)...);
1496 }
1497
1498 // This overload kicks in if we cannot deduce the key from args. It constructs
1499 // value_type unconditionally and then either moves it into the table or
1500 // destroys.
1501 template <class... Args, typename std::enable_if<
1502 !IsDecomposable<Args...>::value, int>::type = 0>
1503 std::pair<iterator, bool> emplace(Args&&... args) {
1504 alignas(slot_type) unsigned char raw[sizeof(slot_type)];
1505 slot_type* slot = reinterpret_cast<slot_type*>(&raw);
1506
1507 PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
1508 const auto& elem = PolicyTraits::element(slot);
1509 return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
1510 }
1511
1512 template <class... Args>
1513 iterator emplace_hint(const_iterator, Args&&... args) {
1514 return emplace(std::forward<Args>(args)...).first;
1515 }
1516
1517 // Extension API: support for lazy emplace.
1518 //
1519 // Looks up key in the table. If found, returns the iterator to the element.
1520 // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`.
1521 //
1522 // `f` must abide by several restrictions:
1523 // - it MUST call `raw_hash_set::constructor` with arguments as if a
1524 // `raw_hash_set::value_type` is constructed,
1525 // - it MUST NOT access the container before the call to
1526 // `raw_hash_set::constructor`, and
1527 // - it MUST NOT erase the lazily emplaced element.
1528 // Doing any of these is undefined behavior.
1529 //
1530 // For example:
1531 //
1532 // std::unordered_set<ArenaString> s;
1533 // // Makes ArenaStr even if "abc" is in the map.
1534 // s.insert(ArenaString(&arena, "abc"));
1535 //
1536 // flat_hash_set<ArenaStr> s;
1537 // // Makes ArenaStr only if "abc" is not in the map.
1538 // s.lazy_emplace("abc", [&](const constructor& ctor) {
1539 // ctor(&arena, "abc");
1540 // });
1541 //
1542 // WARNING: This API is currently experimental. If there is a way to implement
1543 // the same thing with the rest of the API, prefer that.
1544 class constructor {
1545 friend class raw_hash_set;
1546
1547 public:
1548 template <class... Args>
1549 void operator()(Args&&... args) const {
1550 assert(*slot_);
1551 PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
1552 *slot_ = nullptr;
1553 }
1554
1555 private:
1556 constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
1557
1558 allocator_type* alloc_;
1559 slot_type** slot_;
1560 };
1561
1562 template <class K = key_type, class F>
1563 iterator lazy_emplace(const key_arg<K>& key, F&& f) {
1564 auto res = find_or_prepare_insert(key);
1565 if (res.second) {
1566 slot_type* slot = slots_ + res.first;
1567 std::forward<F>(f)(constructor(&alloc_ref(), &slot));
1568 assert(!slot);
1569 }
1570 return iterator_at(res.first);
1571 }
1572
1573 // Extension API: support for heterogeneous keys.
1574 //
1575 // std::unordered_set<std::string> s;
1576 // // Turns "abc" into std::string.
1577 // s.erase("abc");
1578 //
1579 // flat_hash_set<std::string> s;
1580 // // Uses "abc" directly without copying it into std::string.
1581 // s.erase("abc");
1582 template <class K = key_type>
1583 size_type erase(const key_arg<K>& key) {
1584 auto it = find(key);
1585 if (it == end()) return 0;
1586 erase(it);
1587 return 1;
1588 }
1589
1590 // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`,
1591 // this method returns void to reduce algorithmic complexity to O(1). The
1592 // iterator is invalidated, so any increment should be done before calling
1593 // erase. In order to erase while iterating across a map, use the following
1594 // idiom (which also works for standard containers):
1595 //
1596 // for (auto it = m.begin(), end = m.end(); it != end;) {
1597 // // `erase()` will invalidate `it`, so advance `it` first.
1598 // auto copy_it = it++;
1599 // if (<pred>) {
1600 // m.erase(copy_it);
1601 // }
1602 // }
1603 void erase(const_iterator cit) { erase(cit.inner_); }
1604
1605 // This overload is necessary because otherwise erase<K>(const K&) would be
1606 // a better match if non-const iterator is passed as an argument.
1607 void erase(iterator it) {
1608 ABSL_INTERNAL_ASSERT_IS_FULL(it.ctrl_, "erase()");
1609 PolicyTraits::destroy(&alloc_ref(), it.slot_);
1610 erase_meta_only(it);
1611 }
1612
1613 iterator erase(const_iterator first, const_iterator last) {
1614 while (first != last) {
1615 erase(first++);
1616 }
1617 return last.inner_;
1618 }
1619
1620 // Moves elements from `src` into `this`.
1621 // If the element already exists in `this`, it is left unmodified in `src`.
1622 template <typename H, typename E>
1623 void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT
1624 assert(this != &src);
1625 for (auto it = src.begin(), e = src.end(); it != e;) {
1626 auto next = std::next(it);
1627 if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
1628 PolicyTraits::element(it.slot_))
1629 .second) {
1630 src.erase_meta_only(it);
1631 }
1632 it = next;
1633 }
1634 }
1635
1636 template <typename H, typename E>
1637 void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
1638 merge(src);
1639 }
1640
1641 node_type extract(const_iterator position) {
1642 ABSL_INTERNAL_ASSERT_IS_FULL(position.inner_.ctrl_, "extract()");
1643 auto node =
1644 CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
1645 erase_meta_only(position);
1646 return node;
1647 }
1648
1649 template <
1650 class K = key_type,
1651 typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
1652 node_type extract(const key_arg<K>& key) {
1653 auto it = find(key);
1654 return it == end() ? node_type() : extract(const_iterator{it});
1655 }
1656
1657 void swap(raw_hash_set& that) noexcept(
1658 IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
1659 IsNoThrowSwappable<allocator_type>(
1660 typename AllocTraits::propagate_on_container_swap{})) {
1661 using std::swap;
1662 swap(ctrl_, that.ctrl_);
1663 swap(slots_, that.slots_);
1664 swap(size_, that.size_);
1665 swap(capacity_, that.capacity_);
1666 swap(growth_left(), that.growth_left());
1667 swap(hash_ref(), that.hash_ref());
1668 swap(eq_ref(), that.eq_ref());
1669 swap(infoz(), that.infoz());
1670 SwapAlloc(alloc_ref(), that.alloc_ref(),
1671 typename AllocTraits::propagate_on_container_swap{});
1672 }
1673
1674 void rehash(size_t n) {
1675 if (n == 0 && capacity_ == 0) return;
1676 if (n == 0 && size_ == 0) {
1677 destroy_slots();
1678 infoz().RecordStorageChanged(0, 0);
1679 infoz().RecordClearedReservation();
1680 return;
1681 }
1682
1683 // bitor is a faster way of doing `max` here. We will round up to the next
1684 // power-of-2-minus-1, so bitor is good enough.
1685 auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
1686 // n == 0 unconditionally rehashes as per the standard.
1687 if (n == 0 || m > capacity_) {
1688 resize(m);
1689
1690 // This is after resize, to ensure that we have completed the allocation
1691 // and have potentially sampled the hashtable.
1692 infoz().RecordReservation(n);
1693 }
1694 }
1695
1696 void reserve(size_t n) {
1697 if (n > size() + growth_left()) {
1698 size_t m = GrowthToLowerboundCapacity(n);
1699 resize(NormalizeCapacity(m));
1700
1701 // This is after resize, to ensure that we have completed the allocation
1702 // and have potentially sampled the hashtable.
1703 infoz().RecordReservation(n);
1704 }
1705 }
1706
1707 // Extension API: support for heterogeneous keys.
1708 //
1709 // std::unordered_set<std::string> s;
1710 // // Turns "abc" into std::string.
1711 // s.count("abc");
1712 //
1713 // ch_set<std::string> s;
1714 // // Uses "abc" directly without copying it into std::string.
1715 // s.count("abc");
1716 template <class K = key_type>
1717 size_t count(const key_arg<K>& key) const {
1718 return find(key) == end() ? 0 : 1;
1719 }
1720
1721 // Issues CPU prefetch instructions for the memory needed to find or insert
1722 // a key. Like all lookup functions, this support heterogeneous keys.
1723 //
1724 // NOTE: This is a very low level operation and should not be used without
1725 // specific benchmarks indicating its importance.
1726 template <class K = key_type>
1727 void prefetch(const key_arg<K>& key) const {
1728 (void)key;
1729 // Avoid probing if we won't be able to prefetch the addresses received.
1730 #ifdef ABSL_INTERNAL_HAVE_PREFETCH
1731 prefetch_heap_block();
1732 auto seq = probe(ctrl_, hash_ref()(key), capacity_);
1733 base_internal::PrefetchT0(ctrl_ + seq.offset());
1734 base_internal::PrefetchT0(slots_ + seq.offset());
1735 #endif // ABSL_INTERNAL_HAVE_PREFETCH
1736 }
1737
1738 // The API of find() has two extensions.
1739 //
1740 // 1. The hash can be passed by the user. It must be equal to the hash of the
1741 // key.
1742 //
1743 // 2. The type of the key argument doesn't have to be key_type. This is so
1744 // called heterogeneous key support.
1745 template <class K = key_type>
1746 iterator find(const key_arg<K>& key, size_t hash) {
1747 auto seq = probe(ctrl_, hash, capacity_);
1748 while (true) {
1749 Group g{ctrl_ + seq.offset()};
1750 for (uint32_t i : g.Match(H2(hash))) {
1751 if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
1752 EqualElement<K>{key, eq_ref()},
1753 PolicyTraits::element(slots_ + seq.offset(i)))))
1754 return iterator_at(seq.offset(i));
1755 }
1756 if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
1757 seq.next();
1758 assert(seq.index() <= capacity_ && "full table!");
1759 }
1760 }
1761 template <class K = key_type>
1762 iterator find(const key_arg<K>& key) {
1763 prefetch_heap_block();
1764 return find(key, hash_ref()(key));
1765 }
1766
1767 template <class K = key_type>
1768 const_iterator find(const key_arg<K>& key, size_t hash) const {
1769 return const_cast<raw_hash_set*>(this)->find(key, hash);
1770 }
1771 template <class K = key_type>
1772 const_iterator find(const key_arg<K>& key) const {
1773 prefetch_heap_block();
1774 return find(key, hash_ref()(key));
1775 }
1776
1777 template <class K = key_type>
1778 bool contains(const key_arg<K>& key) const {
1779 return find(key) != end();
1780 }
1781
1782 template <class K = key_type>
1783 std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
1784 auto it = find(key);
1785 if (it != end()) return {it, std::next(it)};
1786 return {it, it};
1787 }
1788 template <class K = key_type>
1789 std::pair<const_iterator, const_iterator> equal_range(
1790 const key_arg<K>& key) const {
1791 auto it = find(key);
1792 if (it != end()) return {it, std::next(it)};
1793 return {it, it};
1794 }
1795
1796 size_t bucket_count() const { return capacity_; }
1797 float load_factor() const {
1798 return capacity_ ? static_cast<double>(size()) / capacity_ : 0.0;
1799 }
1800 float max_load_factor() const { return 1.0f; }
1801 void max_load_factor(float) {
1802 // Does nothing.
1803 }
1804
1805 hasher hash_function() const { return hash_ref(); }
1806 key_equal key_eq() const { return eq_ref(); }
1807 allocator_type get_allocator() const { return alloc_ref(); }
1808
1809 friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
1810 if (a.size() != b.size()) return false;
1811 const raw_hash_set* outer = &a;
1812 const raw_hash_set* inner = &b;
1813 if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
1814 for (const value_type& elem : *outer)
1815 if (!inner->has_element(elem)) return false;
1816 return true;
1817 }
1818
1819 friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
1820 return !(a == b);
1821 }
1822
1823 template <typename H>
1824 friend typename std::enable_if<H::template is_hashable<value_type>::value,
1825 H>::type
1826 AbslHashValue(H h, const raw_hash_set& s) {
1827 return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
1828 s.size());
1829 }
1830
1831 friend void swap(raw_hash_set& a,
1832 raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
1833 a.swap(b);
1834 }
1835
1836 private:
1837 template <class Container, typename Enabler>
1838 friend struct absl::container_internal::hashtable_debug_internal::
1839 HashtableDebugAccess;
1840
1841 struct FindElement {
1842 template <class K, class... Args>
1843 const_iterator operator()(const K& key, Args&&...) const {
1844 return s.find(key);
1845 }
1846 const raw_hash_set& s;
1847 };
1848
1849 struct HashElement {
1850 template <class K, class... Args>
1851 size_t operator()(const K& key, Args&&...) const {
1852 return h(key);
1853 }
1854 const hasher& h;
1855 };
1856
1857 template <class K1>
1858 struct EqualElement {
1859 template <class K2, class... Args>
1860 bool operator()(const K2& lhs, Args&&...) const {
1861 return eq(lhs, rhs);
1862 }
1863 const K1& rhs;
1864 const key_equal& eq;
1865 };
1866
1867 struct EmplaceDecomposable {
1868 template <class K, class... Args>
1869 std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
1870 auto res = s.find_or_prepare_insert(key);
1871 if (res.second) {
1872 s.emplace_at(res.first, std::forward<Args>(args)...);
1873 }
1874 return {s.iterator_at(res.first), res.second};
1875 }
1876 raw_hash_set& s;
1877 };
1878
1879 template <bool do_destroy>
1880 struct InsertSlot {
1881 template <class K, class... Args>
1882 std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
1883 auto res = s.find_or_prepare_insert(key);
1884 if (res.second) {
1885 PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot);
1886 } else if (do_destroy) {
1887 PolicyTraits::destroy(&s.alloc_ref(), &slot);
1888 }
1889 return {s.iterator_at(res.first), res.second};
1890 }
1891 raw_hash_set& s;
1892 // Constructed slot. Either moved into place or destroyed.
1893 slot_type&& slot;
1894 };
1895
1896 // Erases, but does not destroy, the value pointed to by `it`.
1897 //
1898 // This merely updates the pertinent control byte. This can be used in
1899 // conjunction with Policy::transfer to move the object to another place.
1900 void erase_meta_only(const_iterator it) {
1901 assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
1902 --size_;
1903 const size_t index = static_cast<size_t>(it.inner_.ctrl_ - ctrl_);
1904 const size_t index_before = (index - Group::kWidth) & capacity_;
1905 const auto empty_after = Group(it.inner_.ctrl_).MaskEmpty();
1906 const auto empty_before = Group(ctrl_ + index_before).MaskEmpty();
1907
1908 // We count how many consecutive non empties we have to the right and to the
1909 // left of `it`. If the sum is >= kWidth then there is at least one probe
1910 // window that might have seen a full group.
1911 bool was_never_full =
1912 empty_before && empty_after &&
1913 static_cast<size_t>(empty_after.TrailingZeros() +
1914 empty_before.LeadingZeros()) < Group::kWidth;
1915
1916 SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
1917 capacity_, ctrl_, slots_, sizeof(slot_type));
1918 growth_left() += was_never_full;
1919 infoz().RecordErase();
1920 }
1921
1922 // Allocates a backing array for `self` and initializes its control bytes.
1923 // This reads `capacity_` and updates all other fields based on the result of
1924 // the allocation.
1925 //
1926 // This does not free the currently held array; `capacity_` must be nonzero.
1927 void initialize_slots() {
1928 assert(capacity_);
1929 // Folks with custom allocators often make unwarranted assumptions about the
1930 // behavior of their classes vis-a-vis trivial destructability and what
1931 // calls they will or wont make. Avoid sampling for people with custom
1932 // allocators to get us out of this mess. This is not a hard guarantee but
1933 // a workaround while we plan the exact guarantee we want to provide.
1934 //
1935 // People are often sloppy with the exact type of their allocator (sometimes
1936 // it has an extra const or is missing the pair, but rebinds made it work
1937 // anyway). To avoid the ambiguity, we work off SlotAlloc which we have
1938 // bound more carefully.
1939 if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
1940 slots_ == nullptr) {
1941 infoz() = Sample(sizeof(slot_type));
1942 }
1943
1944 char* mem = static_cast<char*>(Allocate<alignof(slot_type)>(
1945 &alloc_ref(),
1946 AllocSize(capacity_, sizeof(slot_type), alignof(slot_type))));
1947 ctrl_ = reinterpret_cast<ctrl_t*>(mem);
1948 slots_ = reinterpret_cast<slot_type*>(
1949 mem + SlotOffset(capacity_, alignof(slot_type)));
1950 ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
1951 reset_growth_left();
1952 infoz().RecordStorageChanged(size_, capacity_);
1953 }
1954
1955 // Destroys all slots in the backing array, frees the backing array, and
1956 // clears all top-level book-keeping data.
1957 //
1958 // This essentially implements `map = raw_hash_set();`.
1959 void destroy_slots() {
1960 if (!capacity_) return;
1961 for (size_t i = 0; i != capacity_; ++i) {
1962 if (IsFull(ctrl_[i])) {
1963 PolicyTraits::destroy(&alloc_ref(), slots_ + i);
1964 }
1965 }
1966
1967 // Unpoison before returning the memory to the allocator.
1968 SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
1969 Deallocate<alignof(slot_type)>(
1970 &alloc_ref(), ctrl_,
1971 AllocSize(capacity_, sizeof(slot_type), alignof(slot_type)));
1972 ctrl_ = EmptyGroup();
1973 slots_ = nullptr;
1974 size_ = 0;
1975 capacity_ = 0;
1976 growth_left() = 0;
1977 }
1978
1979 void resize(size_t new_capacity) {
1980 assert(IsValidCapacity(new_capacity));
1981 auto* old_ctrl = ctrl_;
1982 auto* old_slots = slots_;
1983 const size_t old_capacity = capacity_;
1984 capacity_ = new_capacity;
1985 initialize_slots();
1986
1987 size_t total_probe_length = 0;
1988 for (size_t i = 0; i != old_capacity; ++i) {
1989 if (IsFull(old_ctrl[i])) {
1990 size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
1991 PolicyTraits::element(old_slots + i));
1992 auto target = find_first_non_full(ctrl_, hash, capacity_);
1993 size_t new_i = target.offset;
1994 total_probe_length += target.probe_length;
1995 SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
1996 PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
1997 }
1998 }
1999 if (old_capacity) {
2000 SanitizerUnpoisonMemoryRegion(old_slots,
2001 sizeof(slot_type) * old_capacity);
2002 Deallocate<alignof(slot_type)>(
2003 &alloc_ref(), old_ctrl,
2004 AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
2005 }
2006 infoz().RecordRehash(total_probe_length);
2007 }
2008
2009 // Prunes control bytes to remove as many tombstones as possible.
2010 //
2011 // See the comment on `rehash_and_grow_if_necessary()`.
2012 void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
2013 assert(IsValidCapacity(capacity_));
2014 assert(!is_small(capacity_));
2015 // Algorithm:
2016 // - mark all DELETED slots as EMPTY
2017 // - mark all FULL slots as DELETED
2018 // - for each slot marked as DELETED
2019 // hash = Hash(element)
2020 // target = find_first_non_full(hash)
2021 // if target is in the same group
2022 // mark slot as FULL
2023 // else if target is EMPTY
2024 // transfer element to target
2025 // mark slot as EMPTY
2026 // mark target as FULL
2027 // else if target is DELETED
2028 // swap current element with target element
2029 // mark target as FULL
2030 // repeat procedure for current slot with moved from element (target)
2031 ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
2032 alignas(slot_type) unsigned char raw[sizeof(slot_type)];
2033 size_t total_probe_length = 0;
2034 slot_type* slot = reinterpret_cast<slot_type*>(&raw);
2035 for (size_t i = 0; i != capacity_; ++i) {
2036 if (!IsDeleted(ctrl_[i])) continue;
2037 const size_t hash = PolicyTraits::apply(
2038 HashElement{hash_ref()}, PolicyTraits::element(slots_ + i));
2039 const FindInfo target = find_first_non_full(ctrl_, hash, capacity_);
2040 const size_t new_i = target.offset;
2041 total_probe_length += target.probe_length;
2042
2043 // Verify if the old and new i fall within the same group wrt the hash.
2044 // If they do, we don't need to move the object as it falls already in the
2045 // best probe we can.
2046 const size_t probe_offset = probe(ctrl_, hash, capacity_).offset();
2047 const auto probe_index = [probe_offset, this](size_t pos) {
2048 return ((pos - probe_offset) & capacity_) / Group::kWidth;
2049 };
2050
2051 // Element doesn't move.
2052 if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
2053 SetCtrl(i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
2054 continue;
2055 }
2056 if (IsEmpty(ctrl_[new_i])) {
2057 // Transfer element to the empty spot.
2058 // SetCtrl poisons/unpoisons the slots so we have to call it at the
2059 // right time.
2060 SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
2061 PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i);
2062 SetCtrl(i, ctrl_t::kEmpty, capacity_, ctrl_, slots_, sizeof(slot_type));
2063 } else {
2064 assert(IsDeleted(ctrl_[new_i]));
2065 SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
2066 // Until we are done rehashing, DELETED marks previously FULL slots.
2067 // Swap i and new_i elements.
2068 PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i);
2069 PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i);
2070 PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot);
2071 --i; // repeat
2072 }
2073 }
2074 reset_growth_left();
2075 infoz().RecordRehash(total_probe_length);
2076 }
2077
2078 // Called whenever the table *might* need to conditionally grow.
2079 //
2080 // This function is an optimization opportunity to perform a rehash even when
2081 // growth is unnecessary, because vacating tombstones is beneficial for
2082 // performance in the long-run.
2083 void rehash_and_grow_if_necessary() {
2084 if (capacity_ == 0) {
2085 resize(1);
2086 } else if (capacity_ > Group::kWidth &&
2087 // Do these calcuations in 64-bit to avoid overflow.
2088 size() * uint64_t{32} <= capacity_ * uint64_t{25}) {
2089 // Squash DELETED without growing if there is enough capacity.
2090 //
2091 // Rehash in place if the current size is <= 25/32 of capacity_.
2092 // Rationale for such a high factor: 1) drop_deletes_without_resize() is
2093 // faster than resize, and 2) it takes quite a bit of work to add
2094 // tombstones. In the worst case, seems to take approximately 4
2095 // insert/erase pairs to create a single tombstone and so if we are
2096 // rehashing because of tombstones, we can afford to rehash-in-place as
2097 // long as we are reclaiming at least 1/8 the capacity without doing more
2098 // than 2X the work. (Where "work" is defined to be size() for rehashing
2099 // or rehashing in place, and 1 for an insert or erase.) But rehashing in
2100 // place is faster per operation than inserting or even doubling the size
2101 // of the table, so we actually afford to reclaim even less space from a
2102 // resize-in-place. The decision is to rehash in place if we can reclaim
2103 // at about 1/8th of the usable capacity (specifically 3/28 of the
2104 // capacity) which means that the total cost of rehashing will be a small
2105 // fraction of the total work.
2106 //
2107 // Here is output of an experiment using the BM_CacheInSteadyState
2108 // benchmark running the old case (where we rehash-in-place only if we can
2109 // reclaim at least 7/16*capacity_) vs. this code (which rehashes in place
2110 // if we can recover 3/32*capacity_).
2111 //
2112 // Note that although in the worst-case number of rehashes jumped up from
2113 // 15 to 190, but the number of operations per second is almost the same.
2114 //
2115 // Abridged output of running BM_CacheInSteadyState benchmark from
2116 // raw_hash_set_benchmark. N is the number of insert/erase operations.
2117 //
2118 // | OLD (recover >= 7/16 | NEW (recover >= 3/32)
2119 // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes
2120 // 448 | 145284 0.44 18 | 140118 0.44 19
2121 // 493 | 152546 0.24 11 | 151417 0.48 28
2122 // 538 | 151439 0.26 11 | 151152 0.53 38
2123 // 583 | 151765 0.28 11 | 150572 0.57 50
2124 // 628 | 150241 0.31 11 | 150853 0.61 66
2125 // 672 | 149602 0.33 12 | 150110 0.66 90
2126 // 717 | 149998 0.35 12 | 149531 0.70 129
2127 // 762 | 149836 0.37 13 | 148559 0.74 190
2128 // 807 | 149736 0.39 14 | 151107 0.39 14
2129 // 852 | 150204 0.42 15 | 151019 0.42 15
2130 drop_deletes_without_resize();
2131 } else {
2132 // Otherwise grow the container.
2133 resize(capacity_ * 2 + 1);
2134 }
2135 }
2136
2137 bool has_element(const value_type& elem) const {
2138 size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
2139 auto seq = probe(ctrl_, hash, capacity_);
2140 while (true) {
2141 Group g{ctrl_ + seq.offset()};
2142 for (uint32_t i : g.Match(H2(hash))) {
2143 if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) ==
2144 elem))
2145 return true;
2146 }
2147 if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return false;
2148 seq.next();
2149 assert(seq.index() <= capacity_ && "full table!");
2150 }
2151 return false;
2152 }
2153
2154 // TODO(alkis): Optimize this assuming *this and that don't overlap.
2155 raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
2156 raw_hash_set tmp(std::move(that));
2157 swap(tmp);
2158 return *this;
2159 }
2160 raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
2161 raw_hash_set tmp(std::move(that), alloc_ref());
2162 swap(tmp);
2163 return *this;
2164 }
2165
2166 protected:
2167 // Attempts to find `key` in the table; if it isn't found, returns a slot that
2168 // the value can be inserted into, with the control byte already set to
2169 // `key`'s H2.
2170 template <class K>
2171 std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
2172 prefetch_heap_block();
2173 auto hash = hash_ref()(key);
2174 auto seq = probe(ctrl_, hash, capacity_);
2175 while (true) {
2176 Group g{ctrl_ + seq.offset()};
2177 for (uint32_t i : g.Match(H2(hash))) {
2178 if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
2179 EqualElement<K>{key, eq_ref()},
2180 PolicyTraits::element(slots_ + seq.offset(i)))))
2181 return {seq.offset(i), false};
2182 }
2183 if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break;
2184 seq.next();
2185 assert(seq.index() <= capacity_ && "full table!");
2186 }
2187 return {prepare_insert(hash), true};
2188 }
2189
2190 // Given the hash of a value not currently in the table, finds the next
2191 // viable slot index to insert it at.
2192 //
2193 // REQUIRES: At least one non-full slot available.
2194 size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
2195 auto target = find_first_non_full(ctrl_, hash, capacity_);
2196 if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
2197 !IsDeleted(ctrl_[target.offset]))) {
2198 rehash_and_grow_if_necessary();
2199 target = find_first_non_full(ctrl_, hash, capacity_);
2200 }
2201 ++size_;
2202 growth_left() -= IsEmpty(ctrl_[target.offset]);
2203 SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
2204 sizeof(slot_type));
2205 infoz().RecordInsert(hash, target.probe_length);
2206 return target.offset;
2207 }
2208
2209 // Constructs the value in the space pointed by the iterator. This only works
2210 // after an unsuccessful find_or_prepare_insert() and before any other
2211 // modifications happen in the raw_hash_set.
2212 //
2213 // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
2214 // k is the key decomposed from `forward<Args>(args)...`, and the bool
2215 // returned by find_or_prepare_insert(k) was true.
2216 // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
2217 template <class... Args>
2218 void emplace_at(size_t i, Args&&... args) {
2219 PolicyTraits::construct(&alloc_ref(), slots_ + i,
2220 std::forward<Args>(args)...);
2221
2222 assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
2223 iterator_at(i) &&
2224 "constructed value does not match the lookup key");
2225 }
2226
2227 iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; }
2228 const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; }
2229
2230 private:
2231 friend struct RawHashSetTestOnlyAccess;
2232
2233 void reset_growth_left() {
2234 growth_left() = CapacityToGrowth(capacity()) - size_;
2235 }
2236
2237 // The number of slots we can still fill without needing to rehash.
2238 //
2239 // This is stored separately due to tombstones: we do not include tombstones
2240 // in the growth capacity, because we'd like to rehash when the table is
2241 // otherwise filled with tombstones: otherwise, probe sequences might get
2242 // unacceptably long without triggering a rehash. Callers can also force a
2243 // rehash via the standard `rehash(0)`, which will recompute this value as a
2244 // side-effect.
2245 //
2246 // See `CapacityToGrowth()`.
2247 size_t& growth_left() { return settings_.template get<0>(); }
2248
2249 // Prefetch the heap-allocated memory region to resolve potential TLB misses.
2250 // This is intended to overlap with execution of calculating the hash for a
2251 // key.
2252 void prefetch_heap_block() const {
2253 base_internal::PrefetchT2(ctrl_);
2254 }
2255
2256 HashtablezInfoHandle& infoz() { return settings_.template get<1>(); }
2257
2258 hasher& hash_ref() { return settings_.template get<2>(); }
2259 const hasher& hash_ref() const { return settings_.template get<2>(); }
2260 key_equal& eq_ref() { return settings_.template get<3>(); }
2261 const key_equal& eq_ref() const { return settings_.template get<3>(); }
2262 allocator_type& alloc_ref() { return settings_.template get<4>(); }
2263 const allocator_type& alloc_ref() const {
2264 return settings_.template get<4>();
2265 }
2266
2267 // TODO(alkis): Investigate removing some of these fields:
2268 // - ctrl/slots can be derived from each other
2269 // - size can be moved into the slot array
2270
2271 // The control bytes (and, also, a pointer to the base of the backing array).
2272 //
2273 // This contains `capacity_ + 1 + NumClonedBytes()` entries, even
2274 // when the table is empty (hence EmptyGroup).
2275 ctrl_t* ctrl_ = EmptyGroup();
2276 // The beginning of the slots, located at `SlotOffset()` bytes after
2277 // `ctrl_`. May be null for empty tables.
2278 slot_type* slots_ = nullptr;
2279
2280 // The number of filled slots.
2281 size_t size_ = 0;
2282
2283 // The total number of available slots.
2284 size_t capacity_ = 0;
2285 absl::container_internal::CompressedTuple<size_t /* growth_left */,
2286 HashtablezInfoHandle, hasher,
2287 key_equal, allocator_type>
2288 settings_{0u, HashtablezInfoHandle{}, hasher{}, key_equal{},
2289 allocator_type{}};
2290 };
2291
2292 // Erases all elements that satisfy the predicate `pred` from the container `c`.
2293 template <typename P, typename H, typename E, typename A, typename Predicate>
2294 typename raw_hash_set<P, H, E, A>::size_type EraseIf(
2295 Predicate& pred, raw_hash_set<P, H, E, A>* c) {
2296 const auto initial_size = c->size();
2297 for (auto it = c->begin(), last = c->end(); it != last;) {
2298 if (pred(*it)) {
2299 c->erase(it++);
2300 } else {
2301 ++it;
2302 }
2303 }
2304 return initial_size - c->size();
2305 }
2306
2307 namespace hashtable_debug_internal {
2308 template <typename Set>
2309 struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
2310 using Traits = typename Set::PolicyTraits;
2311 using Slot = typename Traits::slot_type;
2312
2313 static size_t GetNumProbes(const Set& set,
2314 const typename Set::key_type& key) {
2315 size_t num_probes = 0;
2316 size_t hash = set.hash_ref()(key);
2317 auto seq = probe(set.ctrl_, hash, set.capacity_);
2318 while (true) {
2319 container_internal::Group g{set.ctrl_ + seq.offset()};
2320 for (uint32_t i : g.Match(container_internal::H2(hash))) {
2321 if (Traits::apply(
2322 typename Set::template EqualElement<typename Set::key_type>{
2323 key, set.eq_ref()},
2324 Traits::element(set.slots_ + seq.offset(i))))
2325 return num_probes;
2326 ++num_probes;
2327 }
2328 if (g.MaskEmpty()) return num_probes;
2329 seq.next();
2330 ++num_probes;
2331 }
2332 }
2333
2334 static size_t AllocatedByteSize(const Set& c) {
2335 size_t capacity = c.capacity_;
2336 if (capacity == 0) return 0;
2337 size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot));
2338
2339 size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
2340 if (per_slot != ~size_t{}) {
2341 m += per_slot * c.size();
2342 } else {
2343 for (size_t i = 0; i != capacity; ++i) {
2344 if (container_internal::IsFull(c.ctrl_[i])) {
2345 m += Traits::space_used(c.slots_ + i);
2346 }
2347 }
2348 }
2349 return m;
2350 }
2351
2352 static size_t LowerBoundAllocatedByteSize(size_t size) {
2353 size_t capacity = GrowthToLowerboundCapacity(size);
2354 if (capacity == 0) return 0;
2355 size_t m =
2356 AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot));
2357 size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
2358 if (per_slot != ~size_t{}) {
2359 m += per_slot * size;
2360 }
2361 return m;
2362 }
2363 };
2364
2365 } // namespace hashtable_debug_internal
2366 } // namespace container_internal
2367 ABSL_NAMESPACE_END
2368 } // namespace absl
2369
2370 #undef ABSL_INTERNAL_ASSERT_IS_FULL
2371
2372 #endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
2373