• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use crate::alloc::alloc::{handle_alloc_error, Layout};
2 use crate::scopeguard::{guard, ScopeGuard};
3 use crate::TryReserveError;
4 use core::iter::FusedIterator;
5 use core::marker::PhantomData;
6 use core::mem;
7 use core::mem::ManuallyDrop;
8 use core::mem::MaybeUninit;
9 use core::ptr::NonNull;
10 use core::{hint, ptr};
11 
12 cfg_if! {
13     // Use the SSE2 implementation if possible: it allows us to scan 16 buckets
14     // at once instead of 8. We don't bother with AVX since it would require
15     // runtime dispatch and wouldn't gain us much anyways: the probability of
16     // finding a match drops off drastically after the first few buckets.
17     //
18     // I attempted an implementation on ARM using NEON instructions, but it
19     // turns out that most NEON instructions have multi-cycle latency, which in
20     // the end outweighs any gains over the generic implementation.
21     if #[cfg(all(
22         target_feature = "sse2",
23         any(target_arch = "x86", target_arch = "x86_64"),
24         not(miri)
25     ))] {
26         mod sse2;
27         use sse2 as imp;
28     } else {
29         #[path = "generic.rs"]
30         mod generic;
31         use generic as imp;
32     }
33 }
34 
35 mod alloc;
36 pub(crate) use self::alloc::{do_alloc, Allocator, Global};
37 
38 mod bitmask;
39 
40 use self::bitmask::{BitMask, BitMaskIter};
41 use self::imp::Group;
42 
43 // Branch prediction hint. This is currently only available on nightly but it
44 // consistently improves performance by 10-15%.
45 #[cfg(feature = "nightly")]
46 use core::intrinsics::{likely, unlikely};
47 
48 // On stable we can use #[cold] to get a equivalent effect: this attributes
49 // suggests that the function is unlikely to be called
50 #[cfg(not(feature = "nightly"))]
51 #[inline]
52 #[cold]
cold()53 fn cold() {}
54 
55 #[cfg(not(feature = "nightly"))]
56 #[inline]
likely(b: bool) -> bool57 fn likely(b: bool) -> bool {
58     if !b {
59         cold();
60     }
61     b
62 }
63 #[cfg(not(feature = "nightly"))]
64 #[inline]
unlikely(b: bool) -> bool65 fn unlikely(b: bool) -> bool {
66     if b {
67         cold();
68     }
69     b
70 }
71 
72 #[inline]
offset_from<T>(to: *const T, from: *const T) -> usize73 unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
74     to.offset_from(from) as usize
75 }
76 
77 /// Whether memory allocation errors should return an error or abort.
78 #[derive(Copy, Clone)]
79 enum Fallibility {
80     Fallible,
81     Infallible,
82 }
83 
84 impl Fallibility {
85     /// Error to return on capacity overflow.
86     #[cfg_attr(feature = "inline-more", inline)]
capacity_overflow(self) -> TryReserveError87     fn capacity_overflow(self) -> TryReserveError {
88         match self {
89             Fallibility::Fallible => TryReserveError::CapacityOverflow,
90             Fallibility::Infallible => panic!("Hash table capacity overflow"),
91         }
92     }
93 
94     /// Error to return on allocation error.
95     #[cfg_attr(feature = "inline-more", inline)]
alloc_err(self, layout: Layout) -> TryReserveError96     fn alloc_err(self, layout: Layout) -> TryReserveError {
97         match self {
98             Fallibility::Fallible => TryReserveError::AllocError { layout },
99             Fallibility::Infallible => handle_alloc_error(layout),
100         }
101     }
102 }
103 
104 /// Control byte value for an empty bucket.
105 const EMPTY: u8 = 0b1111_1111;
106 
107 /// Control byte value for a deleted bucket.
108 const DELETED: u8 = 0b1000_0000;
109 
110 /// Checks whether a control byte represents a full bucket (top bit is clear).
111 #[inline]
is_full(ctrl: u8) -> bool112 fn is_full(ctrl: u8) -> bool {
113     ctrl & 0x80 == 0
114 }
115 
116 /// Checks whether a control byte represents a special value (top bit is set).
117 #[inline]
is_special(ctrl: u8) -> bool118 fn is_special(ctrl: u8) -> bool {
119     ctrl & 0x80 != 0
120 }
121 
122 /// Checks whether a special control value is EMPTY (just check 1 bit).
123 #[inline]
special_is_empty(ctrl: u8) -> bool124 fn special_is_empty(ctrl: u8) -> bool {
125     debug_assert!(is_special(ctrl));
126     ctrl & 0x01 != 0
127 }
128 
129 /// Primary hash function, used to select the initial bucket to probe from.
130 #[inline]
131 #[allow(clippy::cast_possible_truncation)]
h1(hash: u64) -> usize132 fn h1(hash: u64) -> usize {
133     // On 32-bit platforms we simply ignore the higher hash bits.
134     hash as usize
135 }
136 
137 /// Secondary hash function, saved in the low 7 bits of the control byte.
138 #[inline]
139 #[allow(clippy::cast_possible_truncation)]
h2(hash: u64) -> u8140 fn h2(hash: u64) -> u8 {
141     // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
142     // value, some hash functions (such as FxHash) produce a usize result
143     // instead, which means that the top 32 bits are 0 on 32-bit platforms.
144     let hash_len = usize::min(mem::size_of::<usize>(), mem::size_of::<u64>());
145     let top7 = hash >> (hash_len * 8 - 7);
146     (top7 & 0x7f) as u8 // truncation
147 }
148 
149 /// Probe sequence based on triangular numbers, which is guaranteed (since our
150 /// table size is a power of two) to visit every group of elements exactly once.
151 ///
152 /// A triangular probe has us jump by 1 more group every time. So first we
153 /// jump by 1 group (meaning we just continue our linear scan), then 2 groups
154 /// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
155 ///
156 /// Proof that the probe will visit every group in the table:
157 /// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
158 struct ProbeSeq {
159     pos: usize,
160     stride: usize,
161 }
162 
163 impl ProbeSeq {
164     #[inline]
move_next(&mut self, bucket_mask: usize)165     fn move_next(&mut self, bucket_mask: usize) {
166         // We should have found an empty bucket by now and ended the probe.
167         debug_assert!(
168             self.stride <= bucket_mask,
169             "Went past end of probe sequence"
170         );
171 
172         self.stride += Group::WIDTH;
173         self.pos += self.stride;
174         self.pos &= bucket_mask;
175     }
176 }
177 
178 /// Returns the number of buckets needed to hold the given number of items,
179 /// taking the maximum load factor into account.
180 ///
181 /// Returns `None` if an overflow occurs.
182 // Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
183 #[cfg_attr(target_os = "emscripten", inline(never))]
184 #[cfg_attr(not(target_os = "emscripten"), inline)]
capacity_to_buckets(cap: usize) -> Option<usize>185 fn capacity_to_buckets(cap: usize) -> Option<usize> {
186     debug_assert_ne!(cap, 0);
187 
188     // For small tables we require at least 1 empty bucket so that lookups are
189     // guaranteed to terminate if an element doesn't exist in the table.
190     if cap < 8 {
191         // We don't bother with a table size of 2 buckets since that can only
192         // hold a single element. Instead we skip directly to a 4 bucket table
193         // which can hold 3 elements.
194         return Some(if cap < 4 { 4 } else { 8 });
195     }
196 
197     // Otherwise require 1/8 buckets to be empty (87.5% load)
198     //
199     // Be careful when modifying this, calculate_layout relies on the
200     // overflow check here.
201     let adjusted_cap = cap.checked_mul(8)? / 7;
202 
203     // Any overflows will have been caught by the checked_mul. Also, any
204     // rounding errors from the division above will be cleaned up by
205     // next_power_of_two (which can't overflow because of the previous division).
206     Some(adjusted_cap.next_power_of_two())
207 }
208 
209 /// Returns the maximum effective capacity for the given bucket mask, taking
210 /// the maximum load factor into account.
211 #[inline]
bucket_mask_to_capacity(bucket_mask: usize) -> usize212 fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
213     if bucket_mask < 8 {
214         // For tables with 1/2/4/8 buckets, we always reserve one empty slot.
215         // Keep in mind that the bucket mask is one less than the bucket count.
216         bucket_mask
217     } else {
218         // For larger tables we reserve 12.5% of the slots as empty.
219         ((bucket_mask + 1) / 8) * 7
220     }
221 }
222 
223 /// Helper which allows the max calculation for ctrl_align to be statically computed for each T
224 /// while keeping the rest of `calculate_layout_for` independent of `T`
225 #[derive(Copy, Clone)]
226 struct TableLayout {
227     size: usize,
228     ctrl_align: usize,
229 }
230 
231 impl TableLayout {
232     #[inline]
new<T>() -> Self233     fn new<T>() -> Self {
234         let layout = Layout::new::<T>();
235         Self {
236             size: layout.size(),
237             ctrl_align: usize::max(layout.align(), Group::WIDTH),
238         }
239     }
240 
241     #[inline]
calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)>242     fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> {
243         debug_assert!(buckets.is_power_of_two());
244 
245         let TableLayout { size, ctrl_align } = self;
246         // Manual layout calculation since Layout methods are not yet stable.
247         let ctrl_offset =
248             size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1);
249         let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
250 
251         Some((
252             unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
253             ctrl_offset,
254         ))
255     }
256 }
257 
258 /// Returns a Layout which describes the allocation required for a hash table,
259 /// and the offset of the control bytes in the allocation.
260 /// (the offset is also one past last element of buckets)
261 ///
262 /// Returns `None` if an overflow occurs.
263 #[cfg_attr(feature = "inline-more", inline)]
calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)>264 fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
265     TableLayout::new::<T>().calculate_layout_for(buckets)
266 }
267 
268 /// A reference to a hash table bucket containing a `T`.
269 ///
270 /// This is usually just a pointer to the element itself. However if the element
271 /// is a ZST, then we instead track the index of the element in the table so
272 /// that `erase` works properly.
273 pub struct Bucket<T> {
274     // Actually it is pointer to next element than element itself
275     // this is needed to maintain pointer arithmetic invariants
276     // keeping direct pointer to element introduces difficulty.
277     // Using `NonNull` for variance and niche layout
278     ptr: NonNull<T>,
279 }
280 
281 // This Send impl is needed for rayon support. This is safe since Bucket is
282 // never exposed in a public API.
283 unsafe impl<T> Send for Bucket<T> {}
284 
285 impl<T> Clone for Bucket<T> {
286     #[inline]
clone(&self) -> Self287     fn clone(&self) -> Self {
288         Self { ptr: self.ptr }
289     }
290 }
291 
292 impl<T> Bucket<T> {
293     #[inline]
from_base_index(base: NonNull<T>, index: usize) -> Self294     unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
295         let ptr = if mem::size_of::<T>() == 0 {
296             // won't overflow because index must be less than length
297             (index + 1) as *mut T
298         } else {
299             base.as_ptr().sub(index)
300         };
301         Self {
302             ptr: NonNull::new_unchecked(ptr),
303         }
304     }
305     #[inline]
to_base_index(&self, base: NonNull<T>) -> usize306     unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
307         if mem::size_of::<T>() == 0 {
308             self.ptr.as_ptr() as usize - 1
309         } else {
310             offset_from(base.as_ptr(), self.ptr.as_ptr())
311         }
312     }
313     #[inline]
as_ptr(&self) -> *mut T314     pub fn as_ptr(&self) -> *mut T {
315         if mem::size_of::<T>() == 0 {
316             // Just return an arbitrary ZST pointer which is properly aligned
317             mem::align_of::<T>() as *mut T
318         } else {
319             unsafe { self.ptr.as_ptr().sub(1) }
320         }
321     }
322     #[inline]
next_n(&self, offset: usize) -> Self323     unsafe fn next_n(&self, offset: usize) -> Self {
324         let ptr = if mem::size_of::<T>() == 0 {
325             (self.ptr.as_ptr() as usize + offset) as *mut T
326         } else {
327             self.ptr.as_ptr().sub(offset)
328         };
329         Self {
330             ptr: NonNull::new_unchecked(ptr),
331         }
332     }
333     #[cfg_attr(feature = "inline-more", inline)]
drop(&self)334     pub unsafe fn drop(&self) {
335         self.as_ptr().drop_in_place();
336     }
337     #[inline]
read(&self) -> T338     pub unsafe fn read(&self) -> T {
339         self.as_ptr().read()
340     }
341     #[inline]
write(&self, val: T)342     pub unsafe fn write(&self, val: T) {
343         self.as_ptr().write(val);
344     }
345     #[inline]
as_ref<'a>(&self) -> &'a T346     pub unsafe fn as_ref<'a>(&self) -> &'a T {
347         &*self.as_ptr()
348     }
349     #[inline]
as_mut<'a>(&self) -> &'a mut T350     pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
351         &mut *self.as_ptr()
352     }
353     #[cfg(feature = "raw")]
354     #[inline]
copy_from_nonoverlapping(&self, other: &Self)355     pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) {
356         self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1);
357     }
358 }
359 
360 /// A raw hash table with an unsafe API.
361 pub struct RawTable<T, A: Allocator + Clone = Global> {
362     table: RawTableInner<A>,
363     // Tell dropck that we own instances of T.
364     marker: PhantomData<T>,
365 }
366 
367 /// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless
368 /// of how many different key-value types are used.
369 struct RawTableInner<A> {
370     // Mask to get an index from a hash value. The value is one less than the
371     // number of buckets in the table.
372     bucket_mask: usize,
373 
374     // [Padding], T1, T2, ..., Tlast, C1, C2, ...
375     //                                ^ points here
376     ctrl: NonNull<u8>,
377 
378     // Number of elements that can be inserted before we need to grow the table
379     growth_left: usize,
380 
381     // Number of elements in the table, only really used by len()
382     items: usize,
383 
384     alloc: A,
385 }
386 
387 impl<T> RawTable<T, Global> {
388     /// Creates a new empty hash table without allocating any memory.
389     ///
390     /// In effect this returns a table with exactly 1 bucket. However we can
391     /// leave the data pointer dangling since that bucket is never written to
392     /// due to our load factor forcing us to always have at least 1 free bucket.
393     #[inline]
new() -> Self394     pub const fn new() -> Self {
395         Self {
396             table: RawTableInner::new_in(Global),
397             marker: PhantomData,
398         }
399     }
400 
401     /// Attempts to allocate a new hash table with at least enough capacity
402     /// for inserting the given number of elements without reallocating.
403     #[cfg(feature = "raw")]
try_with_capacity(capacity: usize) -> Result<Self, TryReserveError>404     pub fn try_with_capacity(capacity: usize) -> Result<Self, TryReserveError> {
405         Self::try_with_capacity_in(capacity, Global)
406     }
407 
408     /// Allocates a new hash table with at least enough capacity for inserting
409     /// the given number of elements without reallocating.
with_capacity(capacity: usize) -> Self410     pub fn with_capacity(capacity: usize) -> Self {
411         Self::with_capacity_in(capacity, Global)
412     }
413 }
414 
415 impl<T, A: Allocator + Clone> RawTable<T, A> {
416     /// Creates a new empty hash table without allocating any memory, using the
417     /// given allocator.
418     ///
419     /// In effect this returns a table with exactly 1 bucket. However we can
420     /// leave the data pointer dangling since that bucket is never written to
421     /// due to our load factor forcing us to always have at least 1 free bucket.
422     #[inline]
new_in(alloc: A) -> Self423     pub fn new_in(alloc: A) -> Self {
424         Self {
425             table: RawTableInner::new_in(alloc),
426             marker: PhantomData,
427         }
428     }
429 
430     /// Allocates a new hash table with the given number of buckets.
431     ///
432     /// The control bytes are left uninitialized.
433     #[cfg_attr(feature = "inline-more", inline)]
new_uninitialized( alloc: A, buckets: usize, fallibility: Fallibility, ) -> Result<Self, TryReserveError>434     unsafe fn new_uninitialized(
435         alloc: A,
436         buckets: usize,
437         fallibility: Fallibility,
438     ) -> Result<Self, TryReserveError> {
439         debug_assert!(buckets.is_power_of_two());
440 
441         Ok(Self {
442             table: RawTableInner::new_uninitialized(
443                 alloc,
444                 TableLayout::new::<T>(),
445                 buckets,
446                 fallibility,
447             )?,
448             marker: PhantomData,
449         })
450     }
451 
452     /// Attempts to allocate a new hash table with at least enough capacity
453     /// for inserting the given number of elements without reallocating.
fallible_with_capacity( alloc: A, capacity: usize, fallibility: Fallibility, ) -> Result<Self, TryReserveError>454     fn fallible_with_capacity(
455         alloc: A,
456         capacity: usize,
457         fallibility: Fallibility,
458     ) -> Result<Self, TryReserveError> {
459         Ok(Self {
460             table: RawTableInner::fallible_with_capacity(
461                 alloc,
462                 TableLayout::new::<T>(),
463                 capacity,
464                 fallibility,
465             )?,
466             marker: PhantomData,
467         })
468     }
469 
470     /// Attempts to allocate a new hash table using the given allocator, with at least enough
471     /// capacity for inserting the given number of elements without reallocating.
472     #[cfg(feature = "raw")]
try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError>473     pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
474         Self::fallible_with_capacity(alloc, capacity, Fallibility::Fallible)
475     }
476 
477     /// Allocates a new hash table using the given allocator, with at least enough capacity for
478     /// inserting the given number of elements without reallocating.
with_capacity_in(capacity: usize, alloc: A) -> Self479     pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
480         // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
481         match Self::fallible_with_capacity(alloc, capacity, Fallibility::Infallible) {
482             Ok(capacity) => capacity,
483             Err(_) => unsafe { hint::unreachable_unchecked() },
484         }
485     }
486 
487     /// Returns a reference to the underlying allocator.
488     #[inline]
allocator(&self) -> &A489     pub fn allocator(&self) -> &A {
490         &self.table.alloc
491     }
492 
493     /// Deallocates the table without dropping any entries.
494     #[cfg_attr(feature = "inline-more", inline)]
free_buckets(&mut self)495     unsafe fn free_buckets(&mut self) {
496         self.table.free_buckets(TableLayout::new::<T>());
497     }
498 
499     /// Returns pointer to one past last element of data table.
500     #[inline]
data_end(&self) -> NonNull<T>501     pub unsafe fn data_end(&self) -> NonNull<T> {
502         NonNull::new_unchecked(self.table.ctrl.as_ptr().cast())
503     }
504 
505     /// Returns pointer to start of data table.
506     #[inline]
507     #[cfg(feature = "nightly")]
data_start(&self) -> *mut T508     pub unsafe fn data_start(&self) -> *mut T {
509         self.data_end().as_ptr().wrapping_sub(self.buckets())
510     }
511 
512     /// Returns the index of a bucket from a `Bucket`.
513     #[inline]
bucket_index(&self, bucket: &Bucket<T>) -> usize514     pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
515         bucket.to_base_index(self.data_end())
516     }
517 
518     /// Returns a pointer to an element in the table.
519     #[inline]
bucket(&self, index: usize) -> Bucket<T>520     pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
521         debug_assert_ne!(self.table.bucket_mask, 0);
522         debug_assert!(index < self.buckets());
523         Bucket::from_base_index(self.data_end(), index)
524     }
525 
526     /// Erases an element from the table without dropping it.
527     #[cfg_attr(feature = "inline-more", inline)]
528     #[deprecated(since = "0.8.1", note = "use erase or remove instead")]
erase_no_drop(&mut self, item: &Bucket<T>)529     pub unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
530         let index = self.bucket_index(item);
531         self.table.erase(index);
532     }
533 
534     /// Erases an element from the table, dropping it in place.
535     #[cfg_attr(feature = "inline-more", inline)]
536     #[allow(clippy::needless_pass_by_value)]
537     #[allow(deprecated)]
erase(&mut self, item: Bucket<T>)538     pub unsafe fn erase(&mut self, item: Bucket<T>) {
539         // Erase the element from the table first since drop might panic.
540         self.erase_no_drop(&item);
541         item.drop();
542     }
543 
544     /// Finds and erases an element from the table, dropping it in place.
545     /// Returns true if an element was found.
546     #[cfg(feature = "raw")]
547     #[cfg_attr(feature = "inline-more", inline)]
erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool548     pub fn erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool {
549         // Avoid `Option::map` because it bloats LLVM IR.
550         if let Some(bucket) = self.find(hash, eq) {
551             unsafe {
552                 self.erase(bucket);
553             }
554             true
555         } else {
556             false
557         }
558     }
559 
560     /// Removes an element from the table, returning it.
561     #[cfg_attr(feature = "inline-more", inline)]
562     #[allow(clippy::needless_pass_by_value)]
563     #[allow(deprecated)]
remove(&mut self, item: Bucket<T>) -> T564     pub unsafe fn remove(&mut self, item: Bucket<T>) -> T {
565         self.erase_no_drop(&item);
566         item.read()
567     }
568 
569     /// Finds and removes an element from the table, returning it.
570     #[cfg_attr(feature = "inline-more", inline)]
remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T>571     pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
572         // Avoid `Option::map` because it bloats LLVM IR.
573         match self.find(hash, eq) {
574             Some(bucket) => Some(unsafe { self.remove(bucket) }),
575             None => None,
576         }
577     }
578 
579     /// Marks all table buckets as empty without dropping their contents.
580     #[cfg_attr(feature = "inline-more", inline)]
clear_no_drop(&mut self)581     pub fn clear_no_drop(&mut self) {
582         self.table.clear_no_drop();
583     }
584 
585     /// Removes all elements from the table without freeing the backing memory.
586     #[cfg_attr(feature = "inline-more", inline)]
clear(&mut self)587     pub fn clear(&mut self) {
588         // Ensure that the table is reset even if one of the drops panic
589         let mut self_ = guard(self, |self_| self_.clear_no_drop());
590         unsafe {
591             self_.drop_elements();
592         }
593     }
594 
drop_elements(&mut self)595     unsafe fn drop_elements(&mut self) {
596         if mem::needs_drop::<T>() && !self.is_empty() {
597             for item in self.iter() {
598                 item.drop();
599             }
600         }
601     }
602 
603     /// Shrinks the table to fit `max(self.len(), min_size)` elements.
604     #[cfg_attr(feature = "inline-more", inline)]
shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64)605     pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) {
606         // Calculate the minimal number of elements that we need to reserve
607         // space for.
608         let min_size = usize::max(self.table.items, min_size);
609         if min_size == 0 {
610             *self = Self::new_in(self.table.alloc.clone());
611             return;
612         }
613 
614         // Calculate the number of buckets that we need for this number of
615         // elements. If the calculation overflows then the requested bucket
616         // count must be larger than what we have right and nothing needs to be
617         // done.
618         let min_buckets = match capacity_to_buckets(min_size) {
619             Some(buckets) => buckets,
620             None => return,
621         };
622 
623         // If we have more buckets than we need, shrink the table.
624         if min_buckets < self.buckets() {
625             // Fast path if the table is empty
626             if self.table.items == 0 {
627                 *self = Self::with_capacity_in(min_size, self.table.alloc.clone());
628             } else {
629                 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
630                 if self
631                     .resize(min_size, hasher, Fallibility::Infallible)
632                     .is_err()
633                 {
634                     unsafe { hint::unreachable_unchecked() }
635                 }
636             }
637         }
638     }
639 
640     /// Ensures that at least `additional` items can be inserted into the table
641     /// without reallocation.
642     #[cfg_attr(feature = "inline-more", inline)]
reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64)643     pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
644         if additional > self.table.growth_left {
645             // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
646             if self
647                 .reserve_rehash(additional, hasher, Fallibility::Infallible)
648                 .is_err()
649             {
650                 unsafe { hint::unreachable_unchecked() }
651             }
652         }
653     }
654 
655     /// Tries to ensure that at least `additional` items can be inserted into
656     /// the table without reallocation.
657     #[cfg_attr(feature = "inline-more", inline)]
try_reserve( &mut self, additional: usize, hasher: impl Fn(&T) -> u64, ) -> Result<(), TryReserveError>658     pub fn try_reserve(
659         &mut self,
660         additional: usize,
661         hasher: impl Fn(&T) -> u64,
662     ) -> Result<(), TryReserveError> {
663         if additional > self.table.growth_left {
664             self.reserve_rehash(additional, hasher, Fallibility::Fallible)
665         } else {
666             Ok(())
667         }
668     }
669 
670     /// Out-of-line slow path for `reserve` and `try_reserve`.
671     #[cold]
672     #[inline(never)]
reserve_rehash( &mut self, additional: usize, hasher: impl Fn(&T) -> u64, fallibility: Fallibility, ) -> Result<(), TryReserveError>673     fn reserve_rehash(
674         &mut self,
675         additional: usize,
676         hasher: impl Fn(&T) -> u64,
677         fallibility: Fallibility,
678     ) -> Result<(), TryReserveError> {
679         unsafe {
680             self.table.reserve_rehash_inner(
681                 additional,
682                 &|table, index| hasher(table.bucket::<T>(index).as_ref()),
683                 fallibility,
684                 TableLayout::new::<T>(),
685                 if mem::needs_drop::<T>() {
686                     Some(mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T)))
687                 } else {
688                     None
689                 },
690             )
691         }
692     }
693 
694     /// Allocates a new table of a different size and moves the contents of the
695     /// current table into it.
resize( &mut self, capacity: usize, hasher: impl Fn(&T) -> u64, fallibility: Fallibility, ) -> Result<(), TryReserveError>696     fn resize(
697         &mut self,
698         capacity: usize,
699         hasher: impl Fn(&T) -> u64,
700         fallibility: Fallibility,
701     ) -> Result<(), TryReserveError> {
702         unsafe {
703             self.table.resize_inner(
704                 capacity,
705                 &|table, index| hasher(table.bucket::<T>(index).as_ref()),
706                 fallibility,
707                 TableLayout::new::<T>(),
708             )
709         }
710     }
711 
712     /// Inserts a new element into the table, and returns its raw bucket.
713     ///
714     /// This does not check if the given element already exists in the table.
715     #[cfg_attr(feature = "inline-more", inline)]
insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T>716     pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
717         unsafe {
718             let mut index = self.table.find_insert_slot(hash);
719 
720             // We can avoid growing the table once we have reached our load
721             // factor if we are replacing a tombstone. This works since the
722             // number of EMPTY slots does not change in this case.
723             let old_ctrl = *self.table.ctrl(index);
724             if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) {
725                 self.reserve(1, hasher);
726                 index = self.table.find_insert_slot(hash);
727             }
728 
729             self.table.record_item_insert_at(index, old_ctrl, hash);
730 
731             let bucket = self.bucket(index);
732             bucket.write(value);
733             bucket
734         }
735     }
736 
737     /// Attempts to insert a new element without growing the table and return its raw bucket.
738     ///
739     /// Returns an `Err` containing the given element if inserting it would require growing the
740     /// table.
741     ///
742     /// This does not check if the given element already exists in the table.
743     #[cfg(feature = "raw")]
744     #[cfg_attr(feature = "inline-more", inline)]
try_insert_no_grow(&mut self, hash: u64, value: T) -> Result<Bucket<T>, T>745     pub fn try_insert_no_grow(&mut self, hash: u64, value: T) -> Result<Bucket<T>, T> {
746         unsafe {
747             match self.table.prepare_insert_no_grow(hash) {
748                 Ok(index) => {
749                     let bucket = self.bucket(index);
750                     bucket.write(value);
751                     Ok(bucket)
752                 }
753                 Err(()) => Err(value),
754             }
755         }
756     }
757 
758     /// Inserts a new element into the table, and returns a mutable reference to it.
759     ///
760     /// This does not check if the given element already exists in the table.
761     #[cfg_attr(feature = "inline-more", inline)]
insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T762     pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T {
763         unsafe { self.insert(hash, value, hasher).as_mut() }
764     }
765 
766     /// Inserts a new element into the table, without growing the table.
767     ///
768     /// There must be enough space in the table to insert the new element.
769     ///
770     /// This does not check if the given element already exists in the table.
771     #[cfg_attr(feature = "inline-more", inline)]
772     #[cfg(any(feature = "raw", feature = "rustc-internal-api"))]
insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T>773     pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
774         let (index, old_ctrl) = self.table.prepare_insert_slot(hash);
775         let bucket = self.table.bucket(index);
776 
777         // If we are replacing a DELETED entry then we don't need to update
778         // the load counter.
779         self.table.growth_left -= special_is_empty(old_ctrl) as usize;
780 
781         bucket.write(value);
782         self.table.items += 1;
783         bucket
784     }
785 
786     /// Temporary removes a bucket, applying the given function to the removed
787     /// element and optionally put back the returned value in the same bucket.
788     ///
789     /// Returns `true` if the bucket still contains an element
790     ///
791     /// This does not check if the given bucket is actually occupied.
792     #[cfg_attr(feature = "inline-more", inline)]
replace_bucket_with<F>(&mut self, bucket: Bucket<T>, f: F) -> bool where F: FnOnce(T) -> Option<T>,793     pub unsafe fn replace_bucket_with<F>(&mut self, bucket: Bucket<T>, f: F) -> bool
794     where
795         F: FnOnce(T) -> Option<T>,
796     {
797         let index = self.bucket_index(&bucket);
798         let old_ctrl = *self.table.ctrl(index);
799         debug_assert!(is_full(old_ctrl));
800         let old_growth_left = self.table.growth_left;
801         let item = self.remove(bucket);
802         if let Some(new_item) = f(item) {
803             self.table.growth_left = old_growth_left;
804             self.table.set_ctrl(index, old_ctrl);
805             self.table.items += 1;
806             self.bucket(index).write(new_item);
807             true
808         } else {
809             false
810         }
811     }
812 
813     /// Searches for an element in the table.
814     #[inline]
find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>>815     pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
816         let result = self.table.find_inner(hash, &mut |index| unsafe {
817             eq(self.bucket(index).as_ref())
818         });
819 
820         // Avoid `Option::map` because it bloats LLVM IR.
821         match result {
822             Some(index) => Some(unsafe { self.bucket(index) }),
823             None => None,
824         }
825     }
826 
827     /// Gets a reference to an element in the table.
828     #[inline]
get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T>829     pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> {
830         // Avoid `Option::map` because it bloats LLVM IR.
831         match self.find(hash, eq) {
832             Some(bucket) => Some(unsafe { bucket.as_ref() }),
833             None => None,
834         }
835     }
836 
837     /// Gets a mutable reference to an element in the table.
838     #[inline]
get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T>839     pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
840         // Avoid `Option::map` because it bloats LLVM IR.
841         match self.find(hash, eq) {
842             Some(bucket) => Some(unsafe { bucket.as_mut() }),
843             None => None,
844         }
845     }
846 
847     /// Attempts to get mutable references to `N` entries in the table at once.
848     ///
849     /// Returns an array of length `N` with the results of each query.
850     ///
851     /// At most one mutable reference will be returned to any entry. `None` will be returned if any
852     /// of the hashes are duplicates. `None` will be returned if the hash is not found.
853     ///
854     /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to
855     /// the `i`th key to be looked up.
get_many_mut<const N: usize>( &mut self, hashes: [u64; N], eq: impl FnMut(usize, &T) -> bool, ) -> Option<[&'_ mut T; N]>856     pub fn get_many_mut<const N: usize>(
857         &mut self,
858         hashes: [u64; N],
859         eq: impl FnMut(usize, &T) -> bool,
860     ) -> Option<[&'_ mut T; N]> {
861         unsafe {
862             let ptrs = self.get_many_mut_pointers(hashes, eq)?;
863 
864             for (i, &cur) in ptrs.iter().enumerate() {
865                 if ptrs[..i].iter().any(|&prev| ptr::eq::<T>(prev, cur)) {
866                     return None;
867                 }
868             }
869             // All bucket are distinct from all previous buckets so we're clear to return the result
870             // of the lookup.
871 
872             // TODO use `MaybeUninit::array_assume_init` here instead once that's stable.
873             Some(mem::transmute_copy(&ptrs))
874         }
875     }
876 
get_many_unchecked_mut<const N: usize>( &mut self, hashes: [u64; N], eq: impl FnMut(usize, &T) -> bool, ) -> Option<[&'_ mut T; N]>877     pub unsafe fn get_many_unchecked_mut<const N: usize>(
878         &mut self,
879         hashes: [u64; N],
880         eq: impl FnMut(usize, &T) -> bool,
881     ) -> Option<[&'_ mut T; N]> {
882         let ptrs = self.get_many_mut_pointers(hashes, eq)?;
883         Some(mem::transmute_copy(&ptrs))
884     }
885 
get_many_mut_pointers<const N: usize>( &mut self, hashes: [u64; N], mut eq: impl FnMut(usize, &T) -> bool, ) -> Option<[*mut T; N]>886     unsafe fn get_many_mut_pointers<const N: usize>(
887         &mut self,
888         hashes: [u64; N],
889         mut eq: impl FnMut(usize, &T) -> bool,
890     ) -> Option<[*mut T; N]> {
891         // TODO use `MaybeUninit::uninit_array` here instead once that's stable.
892         let mut outs: MaybeUninit<[*mut T; N]> = MaybeUninit::uninit();
893         let outs_ptr = outs.as_mut_ptr();
894 
895         for (i, &hash) in hashes.iter().enumerate() {
896             let cur = self.find(hash, |k| eq(i, k))?;
897             *(*outs_ptr).get_unchecked_mut(i) = cur.as_mut();
898         }
899 
900         // TODO use `MaybeUninit::array_assume_init` here instead once that's stable.
901         Some(outs.assume_init())
902     }
903 
904     /// Returns the number of elements the map can hold without reallocating.
905     ///
906     /// This number is a lower bound; the table might be able to hold
907     /// more, but is guaranteed to be able to hold at least this many.
908     #[inline]
capacity(&self) -> usize909     pub fn capacity(&self) -> usize {
910         self.table.items + self.table.growth_left
911     }
912 
913     /// Returns the number of elements in the table.
914     #[inline]
len(&self) -> usize915     pub fn len(&self) -> usize {
916         self.table.items
917     }
918 
919     /// Returns `true` if the table contains no elements.
920     #[inline]
is_empty(&self) -> bool921     pub fn is_empty(&self) -> bool {
922         self.len() == 0
923     }
924 
925     /// Returns the number of buckets in the table.
926     #[inline]
buckets(&self) -> usize927     pub fn buckets(&self) -> usize {
928         self.table.bucket_mask + 1
929     }
930 
931     /// Returns an iterator over every element in the table. It is up to
932     /// the caller to ensure that the `RawTable` outlives the `RawIter`.
933     /// Because we cannot make the `next` method unsafe on the `RawIter`
934     /// struct, we have to make the `iter` method unsafe.
935     #[inline]
iter(&self) -> RawIter<T>936     pub unsafe fn iter(&self) -> RawIter<T> {
937         let data = Bucket::from_base_index(self.data_end(), 0);
938         RawIter {
939             iter: RawIterRange::new(self.table.ctrl.as_ptr(), data, self.table.buckets()),
940             items: self.table.items,
941         }
942     }
943 
944     /// Returns an iterator over occupied buckets that could match a given hash.
945     ///
946     /// `RawTable` only stores 7 bits of the hash value, so this iterator may
947     /// return items that have a hash value different than the one provided. You
948     /// should always validate the returned values before using them.
949     ///
950     /// It is up to the caller to ensure that the `RawTable` outlives the
951     /// `RawIterHash`. Because we cannot make the `next` method unsafe on the
952     /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
953     #[cfg_attr(feature = "inline-more", inline)]
954     #[cfg(feature = "raw")]
iter_hash(&self, hash: u64) -> RawIterHash<'_, T, A>955     pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<'_, T, A> {
956         RawIterHash::new(self, hash)
957     }
958 
959     /// Returns an iterator which removes all elements from the table without
960     /// freeing the memory.
961     #[cfg_attr(feature = "inline-more", inline)]
drain(&mut self) -> RawDrain<'_, T, A>962     pub fn drain(&mut self) -> RawDrain<'_, T, A> {
963         unsafe {
964             let iter = self.iter();
965             self.drain_iter_from(iter)
966         }
967     }
968 
969     /// Returns an iterator which removes all elements from the table without
970     /// freeing the memory.
971     ///
972     /// Iteration starts at the provided iterator's current location.
973     ///
974     /// It is up to the caller to ensure that the iterator is valid for this
975     /// `RawTable` and covers all items that remain in the table.
976     #[cfg_attr(feature = "inline-more", inline)]
drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A>977     pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
978         debug_assert_eq!(iter.len(), self.len());
979         RawDrain {
980             iter,
981             table: ManuallyDrop::new(mem::replace(self, Self::new_in(self.table.alloc.clone()))),
982             orig_table: NonNull::from(self),
983             marker: PhantomData,
984         }
985     }
986 
987     /// Returns an iterator which consumes all elements from the table.
988     ///
989     /// Iteration starts at the provided iterator's current location.
990     ///
991     /// It is up to the caller to ensure that the iterator is valid for this
992     /// `RawTable` and covers all items that remain in the table.
into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A>993     pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A> {
994         debug_assert_eq!(iter.len(), self.len());
995 
996         let alloc = self.table.alloc.clone();
997         let allocation = self.into_allocation();
998         RawIntoIter {
999             iter,
1000             allocation,
1001             marker: PhantomData,
1002             alloc,
1003         }
1004     }
1005 
1006     /// Converts the table into a raw allocation. The contents of the table
1007     /// should be dropped using a `RawIter` before freeing the allocation.
1008     #[cfg_attr(feature = "inline-more", inline)]
into_allocation(self) -> Option<(NonNull<u8>, Layout)>1009     pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout)> {
1010         let alloc = if self.table.is_empty_singleton() {
1011             None
1012         } else {
1013             // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
1014             let (layout, ctrl_offset) = match calculate_layout::<T>(self.table.buckets()) {
1015                 Some(lco) => lco,
1016                 None => unsafe { hint::unreachable_unchecked() },
1017             };
1018             Some((
1019                 unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) },
1020                 layout,
1021             ))
1022         };
1023         mem::forget(self);
1024         alloc
1025     }
1026 }
1027 
1028 unsafe impl<T, A: Allocator + Clone> Send for RawTable<T, A>
1029 where
1030     T: Send,
1031     A: Send,
1032 {
1033 }
1034 unsafe impl<T, A: Allocator + Clone> Sync for RawTable<T, A>
1035 where
1036     T: Sync,
1037     A: Sync,
1038 {
1039 }
1040 
1041 impl<A> RawTableInner<A> {
1042     #[inline]
new_in(alloc: A) -> Self1043     const fn new_in(alloc: A) -> Self {
1044         Self {
1045             // Be careful to cast the entire slice to a raw pointer.
1046             ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) },
1047             bucket_mask: 0,
1048             items: 0,
1049             growth_left: 0,
1050             alloc,
1051         }
1052     }
1053 }
1054 
1055 impl<A: Allocator + Clone> RawTableInner<A> {
1056     #[cfg_attr(feature = "inline-more", inline)]
new_uninitialized( alloc: A, table_layout: TableLayout, buckets: usize, fallibility: Fallibility, ) -> Result<Self, TryReserveError>1057     unsafe fn new_uninitialized(
1058         alloc: A,
1059         table_layout: TableLayout,
1060         buckets: usize,
1061         fallibility: Fallibility,
1062     ) -> Result<Self, TryReserveError> {
1063         debug_assert!(buckets.is_power_of_two());
1064 
1065         // Avoid `Option::ok_or_else` because it bloats LLVM IR.
1066         let (layout, ctrl_offset) = match table_layout.calculate_layout_for(buckets) {
1067             Some(lco) => lco,
1068             None => return Err(fallibility.capacity_overflow()),
1069         };
1070 
1071         // We need an additional check to ensure that the allocation doesn't
1072         // exceed `isize::MAX`. We can skip this check on 64-bit systems since
1073         // such allocations will never succeed anyways.
1074         //
1075         // This mirrors what Vec does in the standard library.
1076         if mem::size_of::<usize>() < 8 && layout.size() > isize::MAX as usize {
1077             return Err(fallibility.capacity_overflow());
1078         }
1079 
1080         let ptr: NonNull<u8> = match do_alloc(&alloc, layout) {
1081             Ok(block) => block.cast(),
1082             Err(_) => return Err(fallibility.alloc_err(layout)),
1083         };
1084 
1085         let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
1086         Ok(Self {
1087             ctrl,
1088             bucket_mask: buckets - 1,
1089             items: 0,
1090             growth_left: bucket_mask_to_capacity(buckets - 1),
1091             alloc,
1092         })
1093     }
1094 
1095     #[inline]
fallible_with_capacity( alloc: A, table_layout: TableLayout, capacity: usize, fallibility: Fallibility, ) -> Result<Self, TryReserveError>1096     fn fallible_with_capacity(
1097         alloc: A,
1098         table_layout: TableLayout,
1099         capacity: usize,
1100         fallibility: Fallibility,
1101     ) -> Result<Self, TryReserveError> {
1102         if capacity == 0 {
1103             Ok(Self::new_in(alloc))
1104         } else {
1105             unsafe {
1106                 let buckets =
1107                     capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?;
1108 
1109                 let result = Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?;
1110                 result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes());
1111 
1112                 Ok(result)
1113             }
1114         }
1115     }
1116 
1117     /// Searches for an empty or deleted bucket which is suitable for inserting
1118     /// a new element and sets the hash for that slot.
1119     ///
1120     /// There must be at least 1 empty bucket in the table.
1121     #[inline]
prepare_insert_slot(&self, hash: u64) -> (usize, u8)1122     unsafe fn prepare_insert_slot(&self, hash: u64) -> (usize, u8) {
1123         let index = self.find_insert_slot(hash);
1124         let old_ctrl = *self.ctrl(index);
1125         self.set_ctrl_h2(index, hash);
1126         (index, old_ctrl)
1127     }
1128 
1129     /// Searches for an empty or deleted bucket which is suitable for inserting
1130     /// a new element.
1131     ///
1132     /// There must be at least 1 empty bucket in the table.
1133     #[inline]
find_insert_slot(&self, hash: u64) -> usize1134     fn find_insert_slot(&self, hash: u64) -> usize {
1135         let mut probe_seq = self.probe_seq(hash);
1136         loop {
1137             unsafe {
1138                 let group = Group::load(self.ctrl(probe_seq.pos));
1139                 if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() {
1140                     let result = (probe_seq.pos + bit) & self.bucket_mask;
1141 
1142                     // In tables smaller than the group width, trailing control
1143                     // bytes outside the range of the table are filled with
1144                     // EMPTY entries. These will unfortunately trigger a
1145                     // match, but once masked may point to a full bucket that
1146                     // is already occupied. We detect this situation here and
1147                     // perform a second scan starting at the beginning of the
1148                     // table. This second scan is guaranteed to find an empty
1149                     // slot (due to the load factor) before hitting the trailing
1150                     // control bytes (containing EMPTY).
1151                     if unlikely(is_full(*self.ctrl(result))) {
1152                         debug_assert!(self.bucket_mask < Group::WIDTH);
1153                         debug_assert_ne!(probe_seq.pos, 0);
1154                         return Group::load_aligned(self.ctrl(0))
1155                             .match_empty_or_deleted()
1156                             .lowest_set_bit_nonzero();
1157                     }
1158 
1159                     return result;
1160                 }
1161             }
1162             probe_seq.move_next(self.bucket_mask);
1163         }
1164     }
1165 
1166     /// Searches for an element in the table. This uses dynamic dispatch to reduce the amount of
1167     /// code generated, but it is eliminated by LLVM optimizations.
1168     #[inline]
find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option<usize>1169     fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option<usize> {
1170         let h2_hash = h2(hash);
1171         let mut probe_seq = self.probe_seq(hash);
1172 
1173         loop {
1174             let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1175 
1176             for bit in group.match_byte(h2_hash) {
1177                 let index = (probe_seq.pos + bit) & self.bucket_mask;
1178 
1179                 if likely(eq(index)) {
1180                     return Some(index);
1181                 }
1182             }
1183 
1184             if likely(group.match_empty().any_bit_set()) {
1185                 return None;
1186             }
1187 
1188             probe_seq.move_next(self.bucket_mask);
1189         }
1190     }
1191 
1192     #[allow(clippy::mut_mut)]
1193     #[inline]
prepare_rehash_in_place(&mut self)1194     unsafe fn prepare_rehash_in_place(&mut self) {
1195         // Bulk convert all full control bytes to DELETED, and all DELETED
1196         // control bytes to EMPTY. This effectively frees up all buckets
1197         // containing a DELETED entry.
1198         for i in (0..self.buckets()).step_by(Group::WIDTH) {
1199             let group = Group::load_aligned(self.ctrl(i));
1200             let group = group.convert_special_to_empty_and_full_to_deleted();
1201             group.store_aligned(self.ctrl(i));
1202         }
1203 
1204         // Fix up the trailing control bytes. See the comments in set_ctrl
1205         // for the handling of tables smaller than the group width.
1206         if self.buckets() < Group::WIDTH {
1207             self.ctrl(0)
1208                 .copy_to(self.ctrl(Group::WIDTH), self.buckets());
1209         } else {
1210             self.ctrl(0)
1211                 .copy_to(self.ctrl(self.buckets()), Group::WIDTH);
1212         }
1213     }
1214 
1215     #[inline]
bucket<T>(&self, index: usize) -> Bucket<T>1216     unsafe fn bucket<T>(&self, index: usize) -> Bucket<T> {
1217         debug_assert_ne!(self.bucket_mask, 0);
1218         debug_assert!(index < self.buckets());
1219         Bucket::from_base_index(self.data_end(), index)
1220     }
1221 
1222     #[inline]
bucket_ptr(&self, index: usize, size_of: usize) -> *mut u81223     unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 {
1224         debug_assert_ne!(self.bucket_mask, 0);
1225         debug_assert!(index < self.buckets());
1226         let base: *mut u8 = self.data_end().as_ptr();
1227         base.sub((index + 1) * size_of)
1228     }
1229 
1230     #[inline]
data_end<T>(&self) -> NonNull<T>1231     unsafe fn data_end<T>(&self) -> NonNull<T> {
1232         NonNull::new_unchecked(self.ctrl.as_ptr().cast())
1233     }
1234 
1235     /// Returns an iterator-like object for a probe sequence on the table.
1236     ///
1237     /// This iterator never terminates, but is guaranteed to visit each bucket
1238     /// group exactly once. The loop using `probe_seq` must terminate upon
1239     /// reaching a group containing an empty bucket.
1240     #[inline]
probe_seq(&self, hash: u64) -> ProbeSeq1241     fn probe_seq(&self, hash: u64) -> ProbeSeq {
1242         ProbeSeq {
1243             pos: h1(hash) & self.bucket_mask,
1244             stride: 0,
1245         }
1246     }
1247 
1248     /// Returns the index of a bucket for which a value must be inserted if there is enough rooom
1249     /// in the table, otherwise returns error
1250     #[cfg(feature = "raw")]
1251     #[inline]
prepare_insert_no_grow(&mut self, hash: u64) -> Result<usize, ()>1252     unsafe fn prepare_insert_no_grow(&mut self, hash: u64) -> Result<usize, ()> {
1253         let index = self.find_insert_slot(hash);
1254         let old_ctrl = *self.ctrl(index);
1255         if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) {
1256             Err(())
1257         } else {
1258             self.record_item_insert_at(index, old_ctrl, hash);
1259             Ok(index)
1260         }
1261     }
1262 
1263     #[inline]
record_item_insert_at(&mut self, index: usize, old_ctrl: u8, hash: u64)1264     unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: u8, hash: u64) {
1265         self.growth_left -= usize::from(special_is_empty(old_ctrl));
1266         self.set_ctrl_h2(index, hash);
1267         self.items += 1;
1268     }
1269 
1270     #[inline]
is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool1271     fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool {
1272         let probe_seq_pos = self.probe_seq(hash).pos;
1273         let probe_index =
1274             |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH;
1275         probe_index(i) == probe_index(new_i)
1276     }
1277 
1278     /// Sets a control byte to the hash, and possibly also the replicated control byte at
1279     /// the end of the array.
1280     #[inline]
set_ctrl_h2(&self, index: usize, hash: u64)1281     unsafe fn set_ctrl_h2(&self, index: usize, hash: u64) {
1282         self.set_ctrl(index, h2(hash));
1283     }
1284 
1285     #[inline]
replace_ctrl_h2(&self, index: usize, hash: u64) -> u81286     unsafe fn replace_ctrl_h2(&self, index: usize, hash: u64) -> u8 {
1287         let prev_ctrl = *self.ctrl(index);
1288         self.set_ctrl_h2(index, hash);
1289         prev_ctrl
1290     }
1291 
1292     /// Sets a control byte, and possibly also the replicated control byte at
1293     /// the end of the array.
1294     #[inline]
set_ctrl(&self, index: usize, ctrl: u8)1295     unsafe fn set_ctrl(&self, index: usize, ctrl: u8) {
1296         // Replicate the first Group::WIDTH control bytes at the end of
1297         // the array without using a branch:
1298         // - If index >= Group::WIDTH then index == index2.
1299         // - Otherwise index2 == self.bucket_mask + 1 + index.
1300         //
1301         // The very last replicated control byte is never actually read because
1302         // we mask the initial index for unaligned loads, but we write it
1303         // anyways because it makes the set_ctrl implementation simpler.
1304         //
1305         // If there are fewer buckets than Group::WIDTH then this code will
1306         // replicate the buckets at the end of the trailing group. For example
1307         // with 2 buckets and a group size of 4, the control bytes will look
1308         // like this:
1309         //
1310         //     Real    |             Replicated
1311         // ---------------------------------------------
1312         // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
1313         // ---------------------------------------------
1314         let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
1315 
1316         *self.ctrl(index) = ctrl;
1317         *self.ctrl(index2) = ctrl;
1318     }
1319 
1320     /// Returns a pointer to a control byte.
1321     #[inline]
ctrl(&self, index: usize) -> *mut u81322     unsafe fn ctrl(&self, index: usize) -> *mut u8 {
1323         debug_assert!(index < self.num_ctrl_bytes());
1324         self.ctrl.as_ptr().add(index)
1325     }
1326 
1327     #[inline]
buckets(&self) -> usize1328     fn buckets(&self) -> usize {
1329         self.bucket_mask + 1
1330     }
1331 
1332     #[inline]
num_ctrl_bytes(&self) -> usize1333     fn num_ctrl_bytes(&self) -> usize {
1334         self.bucket_mask + 1 + Group::WIDTH
1335     }
1336 
1337     #[inline]
is_empty_singleton(&self) -> bool1338     fn is_empty_singleton(&self) -> bool {
1339         self.bucket_mask == 0
1340     }
1341 
1342     #[allow(clippy::mut_mut)]
1343     #[inline]
prepare_resize( &self, table_layout: TableLayout, capacity: usize, fallibility: Fallibility, ) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self)>, TryReserveError>1344     unsafe fn prepare_resize(
1345         &self,
1346         table_layout: TableLayout,
1347         capacity: usize,
1348         fallibility: Fallibility,
1349     ) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self)>, TryReserveError> {
1350         debug_assert!(self.items <= capacity);
1351 
1352         // Allocate and initialize the new table.
1353         let mut new_table = RawTableInner::fallible_with_capacity(
1354             self.alloc.clone(),
1355             table_layout,
1356             capacity,
1357             fallibility,
1358         )?;
1359         new_table.growth_left -= self.items;
1360         new_table.items = self.items;
1361 
1362         // The hash function may panic, in which case we simply free the new
1363         // table without dropping any elements that may have been copied into
1364         // it.
1365         //
1366         // This guard is also used to free the old table on success, see
1367         // the comment at the bottom of this function.
1368         Ok(guard(new_table, move |self_| {
1369             if !self_.is_empty_singleton() {
1370                 self_.free_buckets(table_layout);
1371             }
1372         }))
1373     }
1374 
1375     /// Reserves or rehashes to make room for `additional` more elements.
1376     ///
1377     /// This uses dynamic dispatch to reduce the amount of
1378     /// code generated, but it is eliminated by LLVM optimizations when inlined.
1379     #[allow(clippy::inline_always)]
1380     #[inline(always)]
reserve_rehash_inner( &mut self, additional: usize, hasher: &dyn Fn(&mut Self, usize) -> u64, fallibility: Fallibility, layout: TableLayout, drop: Option<fn(*mut u8)>, ) -> Result<(), TryReserveError>1381     unsafe fn reserve_rehash_inner(
1382         &mut self,
1383         additional: usize,
1384         hasher: &dyn Fn(&mut Self, usize) -> u64,
1385         fallibility: Fallibility,
1386         layout: TableLayout,
1387         drop: Option<fn(*mut u8)>,
1388     ) -> Result<(), TryReserveError> {
1389         // Avoid `Option::ok_or_else` because it bloats LLVM IR.
1390         let new_items = match self.items.checked_add(additional) {
1391             Some(new_items) => new_items,
1392             None => return Err(fallibility.capacity_overflow()),
1393         };
1394         let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
1395         if new_items <= full_capacity / 2 {
1396             // Rehash in-place without re-allocating if we have plenty of spare
1397             // capacity that is locked up due to DELETED entries.
1398             self.rehash_in_place(hasher, layout.size, drop);
1399             Ok(())
1400         } else {
1401             // Otherwise, conservatively resize to at least the next size up
1402             // to avoid churning deletes into frequent rehashes.
1403             self.resize_inner(
1404                 usize::max(new_items, full_capacity + 1),
1405                 hasher,
1406                 fallibility,
1407                 layout,
1408             )
1409         }
1410     }
1411 
1412     /// Allocates a new table of a different size and moves the contents of the
1413     /// current table into it.
1414     ///
1415     /// This uses dynamic dispatch to reduce the amount of
1416     /// code generated, but it is eliminated by LLVM optimizations when inlined.
1417     #[allow(clippy::inline_always)]
1418     #[inline(always)]
resize_inner( &mut self, capacity: usize, hasher: &dyn Fn(&mut Self, usize) -> u64, fallibility: Fallibility, layout: TableLayout, ) -> Result<(), TryReserveError>1419     unsafe fn resize_inner(
1420         &mut self,
1421         capacity: usize,
1422         hasher: &dyn Fn(&mut Self, usize) -> u64,
1423         fallibility: Fallibility,
1424         layout: TableLayout,
1425     ) -> Result<(), TryReserveError> {
1426         let mut new_table = self.prepare_resize(layout, capacity, fallibility)?;
1427 
1428         // Copy all elements to the new table.
1429         for i in 0..self.buckets() {
1430             if !is_full(*self.ctrl(i)) {
1431                 continue;
1432             }
1433 
1434             // This may panic.
1435             let hash = hasher(self, i);
1436 
1437             // We can use a simpler version of insert() here since:
1438             // - there are no DELETED entries.
1439             // - we know there is enough space in the table.
1440             // - all elements are unique.
1441             let (index, _) = new_table.prepare_insert_slot(hash);
1442 
1443             ptr::copy_nonoverlapping(
1444                 self.bucket_ptr(i, layout.size),
1445                 new_table.bucket_ptr(index, layout.size),
1446                 layout.size,
1447             );
1448         }
1449 
1450         // We successfully copied all elements without panicking. Now replace
1451         // self with the new table. The old table will have its memory freed but
1452         // the items will not be dropped (since they have been moved into the
1453         // new table).
1454         mem::swap(self, &mut new_table);
1455 
1456         Ok(())
1457     }
1458 
1459     /// Rehashes the contents of the table in place (i.e. without changing the
1460     /// allocation).
1461     ///
1462     /// If `hasher` panics then some the table's contents may be lost.
1463     ///
1464     /// This uses dynamic dispatch to reduce the amount of
1465     /// code generated, but it is eliminated by LLVM optimizations when inlined.
1466     #[allow(clippy::inline_always)]
1467     #[cfg_attr(feature = "inline-more", inline(always))]
1468     #[cfg_attr(not(feature = "inline-more"), inline)]
rehash_in_place( &mut self, hasher: &dyn Fn(&mut Self, usize) -> u64, size_of: usize, drop: Option<fn(*mut u8)>, )1469     unsafe fn rehash_in_place(
1470         &mut self,
1471         hasher: &dyn Fn(&mut Self, usize) -> u64,
1472         size_of: usize,
1473         drop: Option<fn(*mut u8)>,
1474     ) {
1475         // If the hash function panics then properly clean up any elements
1476         // that we haven't rehashed yet. We unfortunately can't preserve the
1477         // element since we lost their hash and have no way of recovering it
1478         // without risking another panic.
1479         self.prepare_rehash_in_place();
1480 
1481         let mut guard = guard(self, move |self_| {
1482             if let Some(drop) = drop {
1483                 for i in 0..self_.buckets() {
1484                     if *self_.ctrl(i) == DELETED {
1485                         self_.set_ctrl(i, EMPTY);
1486                         drop(self_.bucket_ptr(i, size_of));
1487                         self_.items -= 1;
1488                     }
1489                 }
1490             }
1491             self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
1492         });
1493 
1494         // At this point, DELETED elements are elements that we haven't
1495         // rehashed yet. Find them and re-insert them at their ideal
1496         // position.
1497         'outer: for i in 0..guard.buckets() {
1498             if *guard.ctrl(i) != DELETED {
1499                 continue;
1500             }
1501 
1502             let i_p = guard.bucket_ptr(i, size_of);
1503 
1504             'inner: loop {
1505                 // Hash the current item
1506                 let hash = hasher(*guard, i);
1507 
1508                 // Search for a suitable place to put it
1509                 let new_i = guard.find_insert_slot(hash);
1510                 let new_i_p = guard.bucket_ptr(new_i, size_of);
1511 
1512                 // Probing works by scanning through all of the control
1513                 // bytes in groups, which may not be aligned to the group
1514                 // size. If both the new and old position fall within the
1515                 // same unaligned group, then there is no benefit in moving
1516                 // it and we can just continue to the next item.
1517                 if likely(guard.is_in_same_group(i, new_i, hash)) {
1518                     guard.set_ctrl_h2(i, hash);
1519                     continue 'outer;
1520                 }
1521 
1522                 // We are moving the current item to a new position. Write
1523                 // our H2 to the control byte of the new position.
1524                 let prev_ctrl = guard.replace_ctrl_h2(new_i, hash);
1525                 if prev_ctrl == EMPTY {
1526                     guard.set_ctrl(i, EMPTY);
1527                     // If the target slot is empty, simply move the current
1528                     // element into the new slot and clear the old control
1529                     // byte.
1530                     ptr::copy_nonoverlapping(i_p, new_i_p, size_of);
1531                     continue 'outer;
1532                 } else {
1533                     // If the target slot is occupied, swap the two elements
1534                     // and then continue processing the element that we just
1535                     // swapped into the old slot.
1536                     debug_assert_eq!(prev_ctrl, DELETED);
1537                     ptr::swap_nonoverlapping(i_p, new_i_p, size_of);
1538                     continue 'inner;
1539                 }
1540             }
1541         }
1542 
1543         guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
1544 
1545         mem::forget(guard);
1546     }
1547 
1548     #[inline]
free_buckets(&mut self, table_layout: TableLayout)1549     unsafe fn free_buckets(&mut self, table_layout: TableLayout) {
1550         // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
1551         let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) {
1552             Some(lco) => lco,
1553             None => hint::unreachable_unchecked(),
1554         };
1555         self.alloc.deallocate(
1556             NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)),
1557             layout,
1558         );
1559     }
1560 
1561     /// Marks all table buckets as empty without dropping their contents.
1562     #[inline]
clear_no_drop(&mut self)1563     fn clear_no_drop(&mut self) {
1564         if !self.is_empty_singleton() {
1565             unsafe {
1566                 self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes());
1567             }
1568         }
1569         self.items = 0;
1570         self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
1571     }
1572 
1573     #[inline]
erase(&mut self, index: usize)1574     unsafe fn erase(&mut self, index: usize) {
1575         debug_assert!(is_full(*self.ctrl(index)));
1576         let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
1577         let empty_before = Group::load(self.ctrl(index_before)).match_empty();
1578         let empty_after = Group::load(self.ctrl(index)).match_empty();
1579 
1580         // If we are inside a continuous block of Group::WIDTH full or deleted
1581         // cells then a probe window may have seen a full block when trying to
1582         // insert. We therefore need to keep that block non-empty so that
1583         // lookups will continue searching to the next probe window.
1584         //
1585         // Note that in this context `leading_zeros` refers to the bytes at the
1586         // end of a group, while `trailing_zeros` refers to the bytes at the
1587         // beginning of a group.
1588         let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
1589             DELETED
1590         } else {
1591             self.growth_left += 1;
1592             EMPTY
1593         };
1594         self.set_ctrl(index, ctrl);
1595         self.items -= 1;
1596     }
1597 }
1598 
1599 impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
clone(&self) -> Self1600     fn clone(&self) -> Self {
1601         if self.table.is_empty_singleton() {
1602             Self::new_in(self.table.alloc.clone())
1603         } else {
1604             unsafe {
1605                 // Avoid `Result::ok_or_else` because it bloats LLVM IR.
1606                 let new_table = match Self::new_uninitialized(
1607                     self.table.alloc.clone(),
1608                     self.table.buckets(),
1609                     Fallibility::Infallible,
1610                 ) {
1611                     Ok(table) => table,
1612                     Err(_) => hint::unreachable_unchecked(),
1613                 };
1614 
1615                 // If cloning fails then we need to free the allocation for the
1616                 // new table. However we don't run its drop since its control
1617                 // bytes are not initialized yet.
1618                 let mut guard = guard(ManuallyDrop::new(new_table), |new_table| {
1619                     new_table.free_buckets();
1620                 });
1621 
1622                 guard.clone_from_spec(self);
1623 
1624                 // Disarm the scope guard and return the newly created table.
1625                 ManuallyDrop::into_inner(ScopeGuard::into_inner(guard))
1626             }
1627         }
1628     }
1629 
clone_from(&mut self, source: &Self)1630     fn clone_from(&mut self, source: &Self) {
1631         if source.table.is_empty_singleton() {
1632             *self = Self::new_in(self.table.alloc.clone());
1633         } else {
1634             unsafe {
1635                 // Make sure that if any panics occurs, we clear the table and
1636                 // leave it in an empty state.
1637                 let mut self_ = guard(self, |self_| {
1638                     self_.clear_no_drop();
1639                 });
1640 
1641                 // First, drop all our elements without clearing the control
1642                 // bytes. If this panics then the scope guard will clear the
1643                 // table, leaking any elements that were not dropped yet.
1644                 //
1645                 // This leak is unavoidable: we can't try dropping more elements
1646                 // since this could lead to another panic and abort the process.
1647                 self_.drop_elements();
1648 
1649                 // If necessary, resize our table to match the source.
1650                 if self_.buckets() != source.buckets() {
1651                     // Skip our drop by using ptr::write.
1652                     if !self_.table.is_empty_singleton() {
1653                         self_.free_buckets();
1654                     }
1655                     (&mut **self_ as *mut Self).write(
1656                         // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1657                         match Self::new_uninitialized(
1658                             self_.table.alloc.clone(),
1659                             source.buckets(),
1660                             Fallibility::Infallible,
1661                         ) {
1662                             Ok(table) => table,
1663                             Err(_) => hint::unreachable_unchecked(),
1664                         },
1665                     );
1666                 }
1667 
1668                 self_.clone_from_spec(source);
1669 
1670                 // Disarm the scope guard if cloning was successful.
1671                 ScopeGuard::into_inner(self_);
1672             }
1673         }
1674     }
1675 }
1676 
1677 /// Specialization of `clone_from` for `Copy` types
1678 trait RawTableClone {
clone_from_spec(&mut self, source: &Self)1679     unsafe fn clone_from_spec(&mut self, source: &Self);
1680 }
1681 impl<T: Clone, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
1682     default_fn! {
1683         #[cfg_attr(feature = "inline-more", inline)]
1684         unsafe fn clone_from_spec(&mut self, source: &Self) {
1685             self.clone_from_impl(source);
1686         }
1687     }
1688 }
1689 #[cfg(feature = "nightly")]
1690 impl<T: Copy, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
1691     #[cfg_attr(feature = "inline-more", inline)]
clone_from_spec(&mut self, source: &Self)1692     unsafe fn clone_from_spec(&mut self, source: &Self) {
1693         source
1694             .table
1695             .ctrl(0)
1696             .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
1697         source
1698             .data_start()
1699             .copy_to_nonoverlapping(self.data_start(), self.table.buckets());
1700 
1701         self.table.items = source.table.items;
1702         self.table.growth_left = source.table.growth_left;
1703     }
1704 }
1705 
1706 impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
1707     /// Common code for clone and clone_from. Assumes:
1708     /// - `self.buckets() == source.buckets()`.
1709     /// - Any existing elements have been dropped.
1710     /// - The control bytes are not initialized yet.
1711     #[cfg_attr(feature = "inline-more", inline)]
clone_from_impl(&mut self, source: &Self)1712     unsafe fn clone_from_impl(&mut self, source: &Self) {
1713         // Copy the control bytes unchanged. We do this in a single pass
1714         source
1715             .table
1716             .ctrl(0)
1717             .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
1718 
1719         // The cloning of elements may panic, in which case we need
1720         // to make sure we drop only the elements that have been
1721         // cloned so far.
1722         let mut guard = guard((0, &mut *self), |(index, self_)| {
1723             if mem::needs_drop::<T>() && !self_.is_empty() {
1724                 for i in 0..=*index {
1725                     if is_full(*self_.table.ctrl(i)) {
1726                         self_.bucket(i).drop();
1727                     }
1728                 }
1729             }
1730         });
1731 
1732         for from in source.iter() {
1733             let index = source.bucket_index(&from);
1734             let to = guard.1.bucket(index);
1735             to.write(from.as_ref().clone());
1736 
1737             // Update the index in case we need to unwind.
1738             guard.0 = index;
1739         }
1740 
1741         // Successfully cloned all items, no need to clean up.
1742         mem::forget(guard);
1743 
1744         self.table.items = source.table.items;
1745         self.table.growth_left = source.table.growth_left;
1746     }
1747 
1748     /// Variant of `clone_from` to use when a hasher is available.
1749     #[cfg(feature = "raw")]
clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64)1750     pub fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) {
1751         // If we have enough capacity in the table, just clear it and insert
1752         // elements one by one. We don't do this if we have the same number of
1753         // buckets as the source since we can just copy the contents directly
1754         // in that case.
1755         if self.table.buckets() != source.table.buckets()
1756             && bucket_mask_to_capacity(self.table.bucket_mask) >= source.len()
1757         {
1758             self.clear();
1759 
1760             let guard_self = guard(&mut *self, |self_| {
1761                 // Clear the partially copied table if a panic occurs, otherwise
1762                 // items and growth_left will be out of sync with the contents
1763                 // of the table.
1764                 self_.clear();
1765             });
1766 
1767             unsafe {
1768                 for item in source.iter() {
1769                     // This may panic.
1770                     let item = item.as_ref().clone();
1771                     let hash = hasher(&item);
1772 
1773                     // We can use a simpler version of insert() here since:
1774                     // - there are no DELETED entries.
1775                     // - we know there is enough space in the table.
1776                     // - all elements are unique.
1777                     let (index, _) = guard_self.table.prepare_insert_slot(hash);
1778                     guard_self.bucket(index).write(item);
1779                 }
1780             }
1781 
1782             // Successfully cloned all items, no need to clean up.
1783             mem::forget(guard_self);
1784 
1785             self.table.items = source.table.items;
1786             self.table.growth_left -= source.table.items;
1787         } else {
1788             self.clone_from(source);
1789         }
1790     }
1791 }
1792 
1793 impl<T, A: Allocator + Clone + Default> Default for RawTable<T, A> {
1794     #[inline]
default() -> Self1795     fn default() -> Self {
1796         Self::new_in(Default::default())
1797     }
1798 }
1799 
1800 #[cfg(feature = "nightly")]
1801 unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable<T, A> {
1802     #[cfg_attr(feature = "inline-more", inline)]
drop(&mut self)1803     fn drop(&mut self) {
1804         if !self.table.is_empty_singleton() {
1805             unsafe {
1806                 self.drop_elements();
1807                 self.free_buckets();
1808             }
1809         }
1810     }
1811 }
1812 #[cfg(not(feature = "nightly"))]
1813 impl<T, A: Allocator + Clone> Drop for RawTable<T, A> {
1814     #[cfg_attr(feature = "inline-more", inline)]
drop(&mut self)1815     fn drop(&mut self) {
1816         if !self.table.is_empty_singleton() {
1817             unsafe {
1818                 self.drop_elements();
1819                 self.free_buckets();
1820             }
1821         }
1822     }
1823 }
1824 
1825 impl<T, A: Allocator + Clone> IntoIterator for RawTable<T, A> {
1826     type Item = T;
1827     type IntoIter = RawIntoIter<T, A>;
1828 
1829     #[cfg_attr(feature = "inline-more", inline)]
into_iter(self) -> RawIntoIter<T, A>1830     fn into_iter(self) -> RawIntoIter<T, A> {
1831         unsafe {
1832             let iter = self.iter();
1833             self.into_iter_from(iter)
1834         }
1835     }
1836 }
1837 
1838 /// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
1839 /// not track an item count.
1840 pub(crate) struct RawIterRange<T> {
1841     // Mask of full buckets in the current group. Bits are cleared from this
1842     // mask as each element is processed.
1843     current_group: BitMask,
1844 
1845     // Pointer to the buckets for the current group.
1846     data: Bucket<T>,
1847 
1848     // Pointer to the next group of control bytes,
1849     // Must be aligned to the group size.
1850     next_ctrl: *const u8,
1851 
1852     // Pointer one past the last control byte of this range.
1853     end: *const u8,
1854 }
1855 
1856 impl<T> RawIterRange<T> {
1857     /// Returns a `RawIterRange` covering a subset of a table.
1858     ///
1859     /// The control byte address must be aligned to the group size.
1860     #[cfg_attr(feature = "inline-more", inline)]
new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self1861     unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
1862         debug_assert_ne!(len, 0);
1863         debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
1864         let end = ctrl.add(len);
1865 
1866         // Load the first group and advance ctrl to point to the next group
1867         let current_group = Group::load_aligned(ctrl).match_full();
1868         let next_ctrl = ctrl.add(Group::WIDTH);
1869 
1870         Self {
1871             current_group,
1872             data,
1873             next_ctrl,
1874             end,
1875         }
1876     }
1877 
1878     /// Splits a `RawIterRange` into two halves.
1879     ///
1880     /// Returns `None` if the remaining range is smaller than or equal to the
1881     /// group width.
1882     #[cfg_attr(feature = "inline-more", inline)]
1883     #[cfg(feature = "rayon")]
split(mut self) -> (Self, Option<RawIterRange<T>>)1884     pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
1885         unsafe {
1886             if self.end <= self.next_ctrl {
1887                 // Nothing to split if the group that we are current processing
1888                 // is the last one.
1889                 (self, None)
1890             } else {
1891                 // len is the remaining number of elements after the group that
1892                 // we are currently processing. It must be a multiple of the
1893                 // group size (small tables are caught by the check above).
1894                 let len = offset_from(self.end, self.next_ctrl);
1895                 debug_assert_eq!(len % Group::WIDTH, 0);
1896 
1897                 // Split the remaining elements into two halves, but round the
1898                 // midpoint down in case there is an odd number of groups
1899                 // remaining. This ensures that:
1900                 // - The tail is at least 1 group long.
1901                 // - The split is roughly even considering we still have the
1902                 //   current group to process.
1903                 let mid = (len / 2) & !(Group::WIDTH - 1);
1904 
1905                 let tail = Self::new(
1906                     self.next_ctrl.add(mid),
1907                     self.data.next_n(Group::WIDTH).next_n(mid),
1908                     len - mid,
1909                 );
1910                 debug_assert_eq!(
1911                     self.data.next_n(Group::WIDTH).next_n(mid).ptr,
1912                     tail.data.ptr
1913                 );
1914                 debug_assert_eq!(self.end, tail.end);
1915                 self.end = self.next_ctrl.add(mid);
1916                 debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
1917                 (self, Some(tail))
1918             }
1919         }
1920     }
1921 
1922     /// # Safety
1923     /// If DO_CHECK_PTR_RANGE is false, caller must ensure that we never try to iterate
1924     /// after yielding all elements.
1925     #[cfg_attr(feature = "inline-more", inline)]
next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>>1926     unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
1927         loop {
1928             if let Some(index) = self.current_group.lowest_set_bit() {
1929                 self.current_group = self.current_group.remove_lowest_bit();
1930                 return Some(self.data.next_n(index));
1931             }
1932 
1933             if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end {
1934                 return None;
1935             }
1936 
1937             // We might read past self.end up to the next group boundary,
1938             // but this is fine because it only occurs on tables smaller
1939             // than the group size where the trailing control bytes are all
1940             // EMPTY. On larger tables self.end is guaranteed to be aligned
1941             // to the group size (since tables are power-of-two sized).
1942             self.current_group = Group::load_aligned(self.next_ctrl).match_full();
1943             self.data = self.data.next_n(Group::WIDTH);
1944             self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
1945         }
1946     }
1947 }
1948 
1949 // We make raw iterators unconditionally Send and Sync, and let the PhantomData
1950 // in the actual iterator implementations determine the real Send/Sync bounds.
1951 unsafe impl<T> Send for RawIterRange<T> {}
1952 unsafe impl<T> Sync for RawIterRange<T> {}
1953 
1954 impl<T> Clone for RawIterRange<T> {
1955     #[cfg_attr(feature = "inline-more", inline)]
clone(&self) -> Self1956     fn clone(&self) -> Self {
1957         Self {
1958             data: self.data.clone(),
1959             next_ctrl: self.next_ctrl,
1960             current_group: self.current_group,
1961             end: self.end,
1962         }
1963     }
1964 }
1965 
1966 impl<T> Iterator for RawIterRange<T> {
1967     type Item = Bucket<T>;
1968 
1969     #[cfg_attr(feature = "inline-more", inline)]
next(&mut self) -> Option<Bucket<T>>1970     fn next(&mut self) -> Option<Bucket<T>> {
1971         unsafe {
1972             // SAFETY: We set checker flag to true.
1973             self.next_impl::<true>()
1974         }
1975     }
1976 
1977     #[inline]
size_hint(&self) -> (usize, Option<usize>)1978     fn size_hint(&self) -> (usize, Option<usize>) {
1979         // We don't have an item count, so just guess based on the range size.
1980         let remaining_buckets = if self.end > self.next_ctrl {
1981             unsafe { offset_from(self.end, self.next_ctrl) }
1982         } else {
1983             0
1984         };
1985 
1986         // Add a group width to include the group we are currently processing.
1987         (0, Some(Group::WIDTH + remaining_buckets))
1988     }
1989 }
1990 
1991 impl<T> FusedIterator for RawIterRange<T> {}
1992 
1993 /// Iterator which returns a raw pointer to every full bucket in the table.
1994 ///
1995 /// For maximum flexibility this iterator is not bound by a lifetime, but you
1996 /// must observe several rules when using it:
1997 /// - You must not free the hash table while iterating (including via growing/shrinking).
1998 /// - It is fine to erase a bucket that has been yielded by the iterator.
1999 /// - Erasing a bucket that has not yet been yielded by the iterator may still
2000 ///   result in the iterator yielding that bucket (unless `reflect_remove` is called).
2001 /// - It is unspecified whether an element inserted after the iterator was
2002 ///   created will be yielded by that iterator (unless `reflect_insert` is called).
2003 /// - The order in which the iterator yields bucket is unspecified and may
2004 ///   change in the future.
2005 pub struct RawIter<T> {
2006     pub(crate) iter: RawIterRange<T>,
2007     items: usize,
2008 }
2009 
2010 impl<T> RawIter<T> {
2011     /// Refresh the iterator so that it reflects a removal from the given bucket.
2012     ///
2013     /// For the iterator to remain valid, this method must be called once
2014     /// for each removed bucket before `next` is called again.
2015     ///
2016     /// This method should be called _before_ the removal is made. It is not necessary to call this
2017     /// method if you are removing an item that this iterator yielded in the past.
2018     #[cfg(feature = "raw")]
reflect_remove(&mut self, b: &Bucket<T>)2019     pub fn reflect_remove(&mut self, b: &Bucket<T>) {
2020         self.reflect_toggle_full(b, false);
2021     }
2022 
2023     /// Refresh the iterator so that it reflects an insertion into the given bucket.
2024     ///
2025     /// For the iterator to remain valid, this method must be called once
2026     /// for each insert before `next` is called again.
2027     ///
2028     /// This method does not guarantee that an insertion of a bucket with a greater
2029     /// index than the last one yielded will be reflected in the iterator.
2030     ///
2031     /// This method should be called _after_ the given insert is made.
2032     #[cfg(feature = "raw")]
reflect_insert(&mut self, b: &Bucket<T>)2033     pub fn reflect_insert(&mut self, b: &Bucket<T>) {
2034         self.reflect_toggle_full(b, true);
2035     }
2036 
2037     /// Refresh the iterator so that it reflects a change to the state of the given bucket.
2038     #[cfg(feature = "raw")]
reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool)2039     fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) {
2040         unsafe {
2041             if b.as_ptr() > self.iter.data.as_ptr() {
2042                 // The iterator has already passed the bucket's group.
2043                 // So the toggle isn't relevant to this iterator.
2044                 return;
2045             }
2046 
2047             if self.iter.next_ctrl < self.iter.end
2048                 && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr()
2049             {
2050                 // The iterator has not yet reached the bucket's group.
2051                 // We don't need to reload anything, but we do need to adjust the item count.
2052 
2053                 if cfg!(debug_assertions) {
2054                     // Double-check that the user isn't lying to us by checking the bucket state.
2055                     // To do that, we need to find its control byte. We know that self.iter.data is
2056                     // at self.iter.next_ctrl - Group::WIDTH, so we work from there:
2057                     let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr());
2058                     let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset);
2059                     // This method should be called _before_ a removal, or _after_ an insert,
2060                     // so in both cases the ctrl byte should indicate that the bucket is full.
2061                     assert!(is_full(*ctrl));
2062                 }
2063 
2064                 if is_insert {
2065                     self.items += 1;
2066                 } else {
2067                     self.items -= 1;
2068                 }
2069 
2070                 return;
2071             }
2072 
2073             // The iterator is at the bucket group that the toggled bucket is in.
2074             // We need to do two things:
2075             //
2076             //  - Determine if the iterator already yielded the toggled bucket.
2077             //    If it did, we're done.
2078             //  - Otherwise, update the iterator cached group so that it won't
2079             //    yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
2080             //    We'll also need to update the item count accordingly.
2081             if let Some(index) = self.iter.current_group.lowest_set_bit() {
2082                 let next_bucket = self.iter.data.next_n(index);
2083                 if b.as_ptr() > next_bucket.as_ptr() {
2084                     // The toggled bucket is "before" the bucket the iterator would yield next. We
2085                     // therefore don't need to do anything --- the iterator has already passed the
2086                     // bucket in question.
2087                     //
2088                     // The item count must already be correct, since a removal or insert "prior" to
2089                     // the iterator's position wouldn't affect the item count.
2090                 } else {
2091                     // The removed bucket is an upcoming bucket. We need to make sure it does _not_
2092                     // get yielded, and also that it's no longer included in the item count.
2093                     //
2094                     // NOTE: We can't just reload the group here, both since that might reflect
2095                     // inserts we've already passed, and because that might inadvertently unset the
2096                     // bits for _other_ removals. If we do that, we'd have to also decrement the
2097                     // item count for those other bits that we unset. But the presumably subsequent
2098                     // call to reflect for those buckets might _also_ decrement the item count.
2099                     // Instead, we _just_ flip the bit for the particular bucket the caller asked
2100                     // us to reflect.
2101                     let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr());
2102                     let was_full = self.iter.current_group.flip(our_bit);
2103                     debug_assert_ne!(was_full, is_insert);
2104 
2105                     if is_insert {
2106                         self.items += 1;
2107                     } else {
2108                         self.items -= 1;
2109                     }
2110 
2111                     if cfg!(debug_assertions) {
2112                         if b.as_ptr() == next_bucket.as_ptr() {
2113                             // The removed bucket should no longer be next
2114                             debug_assert_ne!(self.iter.current_group.lowest_set_bit(), Some(index));
2115                         } else {
2116                             // We should not have changed what bucket comes next.
2117                             debug_assert_eq!(self.iter.current_group.lowest_set_bit(), Some(index));
2118                         }
2119                     }
2120                 }
2121             } else {
2122                 // We must have already iterated past the removed item.
2123             }
2124         }
2125     }
2126 
drop_elements(&mut self)2127     unsafe fn drop_elements(&mut self) {
2128         if mem::needs_drop::<T>() && self.len() != 0 {
2129             for item in self {
2130                 item.drop();
2131             }
2132         }
2133     }
2134 }
2135 
2136 impl<T> Clone for RawIter<T> {
2137     #[cfg_attr(feature = "inline-more", inline)]
clone(&self) -> Self2138     fn clone(&self) -> Self {
2139         Self {
2140             iter: self.iter.clone(),
2141             items: self.items,
2142         }
2143     }
2144 }
2145 
2146 impl<T> Iterator for RawIter<T> {
2147     type Item = Bucket<T>;
2148 
2149     #[cfg_attr(feature = "inline-more", inline)]
next(&mut self) -> Option<Bucket<T>>2150     fn next(&mut self) -> Option<Bucket<T>> {
2151         // Inner iterator iterates over buckets
2152         // so it can do unnecessary work if we already yielded all items.
2153         if self.items == 0 {
2154             return None;
2155         }
2156 
2157         let nxt = unsafe {
2158             // SAFETY: We check number of items to yield using `items` field.
2159             self.iter.next_impl::<false>()
2160         };
2161 
2162         if nxt.is_some() {
2163             self.items -= 1;
2164         }
2165 
2166         nxt
2167     }
2168 
2169     #[inline]
size_hint(&self) -> (usize, Option<usize>)2170     fn size_hint(&self) -> (usize, Option<usize>) {
2171         (self.items, Some(self.items))
2172     }
2173 }
2174 
2175 impl<T> ExactSizeIterator for RawIter<T> {}
2176 impl<T> FusedIterator for RawIter<T> {}
2177 
2178 /// Iterator which consumes a table and returns elements.
2179 pub struct RawIntoIter<T, A: Allocator + Clone = Global> {
2180     iter: RawIter<T>,
2181     allocation: Option<(NonNull<u8>, Layout)>,
2182     marker: PhantomData<T>,
2183     alloc: A,
2184 }
2185 
2186 impl<T, A: Allocator + Clone> RawIntoIter<T, A> {
2187     #[cfg_attr(feature = "inline-more", inline)]
iter(&self) -> RawIter<T>2188     pub fn iter(&self) -> RawIter<T> {
2189         self.iter.clone()
2190     }
2191 }
2192 
2193 unsafe impl<T, A: Allocator + Clone> Send for RawIntoIter<T, A>
2194 where
2195     T: Send,
2196     A: Send,
2197 {
2198 }
2199 unsafe impl<T, A: Allocator + Clone> Sync for RawIntoIter<T, A>
2200 where
2201     T: Sync,
2202     A: Sync,
2203 {
2204 }
2205 
2206 #[cfg(feature = "nightly")]
2207 unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
2208     #[cfg_attr(feature = "inline-more", inline)]
drop(&mut self)2209     fn drop(&mut self) {
2210         unsafe {
2211             // Drop all remaining elements
2212             self.iter.drop_elements();
2213 
2214             // Free the table
2215             if let Some((ptr, layout)) = self.allocation {
2216                 self.alloc.deallocate(ptr, layout);
2217             }
2218         }
2219     }
2220 }
2221 #[cfg(not(feature = "nightly"))]
2222 impl<T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
2223     #[cfg_attr(feature = "inline-more", inline)]
drop(&mut self)2224     fn drop(&mut self) {
2225         unsafe {
2226             // Drop all remaining elements
2227             self.iter.drop_elements();
2228 
2229             // Free the table
2230             if let Some((ptr, layout)) = self.allocation {
2231                 self.alloc.deallocate(ptr, layout);
2232             }
2233         }
2234     }
2235 }
2236 
2237 impl<T, A: Allocator + Clone> Iterator for RawIntoIter<T, A> {
2238     type Item = T;
2239 
2240     #[cfg_attr(feature = "inline-more", inline)]
next(&mut self) -> Option<T>2241     fn next(&mut self) -> Option<T> {
2242         unsafe { Some(self.iter.next()?.read()) }
2243     }
2244 
2245     #[inline]
size_hint(&self) -> (usize, Option<usize>)2246     fn size_hint(&self) -> (usize, Option<usize>) {
2247         self.iter.size_hint()
2248     }
2249 }
2250 
2251 impl<T, A: Allocator + Clone> ExactSizeIterator for RawIntoIter<T, A> {}
2252 impl<T, A: Allocator + Clone> FusedIterator for RawIntoIter<T, A> {}
2253 
2254 /// Iterator which consumes elements without freeing the table storage.
2255 pub struct RawDrain<'a, T, A: Allocator + Clone = Global> {
2256     iter: RawIter<T>,
2257 
2258     // The table is moved into the iterator for the duration of the drain. This
2259     // ensures that an empty table is left if the drain iterator is leaked
2260     // without dropping.
2261     table: ManuallyDrop<RawTable<T, A>>,
2262     orig_table: NonNull<RawTable<T, A>>,
2263 
2264     // We don't use a &'a mut RawTable<T> because we want RawDrain to be
2265     // covariant over T.
2266     marker: PhantomData<&'a RawTable<T, A>>,
2267 }
2268 
2269 impl<T, A: Allocator + Clone> RawDrain<'_, T, A> {
2270     #[cfg_attr(feature = "inline-more", inline)]
iter(&self) -> RawIter<T>2271     pub fn iter(&self) -> RawIter<T> {
2272         self.iter.clone()
2273     }
2274 }
2275 
2276 unsafe impl<T, A: Allocator + Copy> Send for RawDrain<'_, T, A>
2277 where
2278     T: Send,
2279     A: Send,
2280 {
2281 }
2282 unsafe impl<T, A: Allocator + Copy> Sync for RawDrain<'_, T, A>
2283 where
2284     T: Sync,
2285     A: Sync,
2286 {
2287 }
2288 
2289 impl<T, A: Allocator + Clone> Drop for RawDrain<'_, T, A> {
2290     #[cfg_attr(feature = "inline-more", inline)]
drop(&mut self)2291     fn drop(&mut self) {
2292         unsafe {
2293             // Drop all remaining elements. Note that this may panic.
2294             self.iter.drop_elements();
2295 
2296             // Reset the contents of the table now that all elements have been
2297             // dropped.
2298             self.table.clear_no_drop();
2299 
2300             // Move the now empty table back to its original location.
2301             self.orig_table
2302                 .as_ptr()
2303                 .copy_from_nonoverlapping(&*self.table, 1);
2304         }
2305     }
2306 }
2307 
2308 impl<T, A: Allocator + Clone> Iterator for RawDrain<'_, T, A> {
2309     type Item = T;
2310 
2311     #[cfg_attr(feature = "inline-more", inline)]
next(&mut self) -> Option<T>2312     fn next(&mut self) -> Option<T> {
2313         unsafe {
2314             let item = self.iter.next()?;
2315             Some(item.read())
2316         }
2317     }
2318 
2319     #[inline]
size_hint(&self) -> (usize, Option<usize>)2320     fn size_hint(&self) -> (usize, Option<usize>) {
2321         self.iter.size_hint()
2322     }
2323 }
2324 
2325 impl<T, A: Allocator + Clone> ExactSizeIterator for RawDrain<'_, T, A> {}
2326 impl<T, A: Allocator + Clone> FusedIterator for RawDrain<'_, T, A> {}
2327 
2328 /// Iterator over occupied buckets that could match a given hash.
2329 ///
2330 /// `RawTable` only stores 7 bits of the hash value, so this iterator may return
2331 /// items that have a hash value different than the one provided. You should
2332 /// always validate the returned values before using them.
2333 pub struct RawIterHash<'a, T, A: Allocator + Clone = Global> {
2334     inner: RawIterHashInner<'a, A>,
2335     _marker: PhantomData<T>,
2336 }
2337 
2338 struct RawIterHashInner<'a, A: Allocator + Clone> {
2339     table: &'a RawTableInner<A>,
2340 
2341     // The top 7 bits of the hash.
2342     h2_hash: u8,
2343 
2344     // The sequence of groups to probe in the search.
2345     probe_seq: ProbeSeq,
2346 
2347     group: Group,
2348 
2349     // The elements within the group with a matching h2-hash.
2350     bitmask: BitMaskIter,
2351 }
2352 
2353 impl<'a, T, A: Allocator + Clone> RawIterHash<'a, T, A> {
2354     #[cfg_attr(feature = "inline-more", inline)]
2355     #[cfg(feature = "raw")]
new(table: &'a RawTable<T, A>, hash: u64) -> Self2356     fn new(table: &'a RawTable<T, A>, hash: u64) -> Self {
2357         RawIterHash {
2358             inner: RawIterHashInner::new(&table.table, hash),
2359             _marker: PhantomData,
2360         }
2361     }
2362 }
2363 impl<'a, A: Allocator + Clone> RawIterHashInner<'a, A> {
2364     #[cfg_attr(feature = "inline-more", inline)]
2365     #[cfg(feature = "raw")]
new(table: &'a RawTableInner<A>, hash: u64) -> Self2366     fn new(table: &'a RawTableInner<A>, hash: u64) -> Self {
2367         unsafe {
2368             let h2_hash = h2(hash);
2369             let probe_seq = table.probe_seq(hash);
2370             let group = Group::load(table.ctrl(probe_seq.pos));
2371             let bitmask = group.match_byte(h2_hash).into_iter();
2372 
2373             RawIterHashInner {
2374                 table,
2375                 h2_hash,
2376                 probe_seq,
2377                 group,
2378                 bitmask,
2379             }
2380         }
2381     }
2382 }
2383 
2384 impl<'a, T, A: Allocator + Clone> Iterator for RawIterHash<'a, T, A> {
2385     type Item = Bucket<T>;
2386 
next(&mut self) -> Option<Bucket<T>>2387     fn next(&mut self) -> Option<Bucket<T>> {
2388         unsafe {
2389             match self.inner.next() {
2390                 Some(index) => Some(self.inner.table.bucket(index)),
2391                 None => None,
2392             }
2393         }
2394     }
2395 }
2396 
2397 impl<'a, A: Allocator + Clone> Iterator for RawIterHashInner<'a, A> {
2398     type Item = usize;
2399 
next(&mut self) -> Option<Self::Item>2400     fn next(&mut self) -> Option<Self::Item> {
2401         unsafe {
2402             loop {
2403                 if let Some(bit) = self.bitmask.next() {
2404                     let index = (self.probe_seq.pos + bit) & self.table.bucket_mask;
2405                     return Some(index);
2406                 }
2407                 if likely(self.group.match_empty().any_bit_set()) {
2408                     return None;
2409                 }
2410                 self.probe_seq.move_next(self.table.bucket_mask);
2411                 self.group = Group::load(self.table.ctrl(self.probe_seq.pos));
2412                 self.bitmask = self.group.match_byte(self.h2_hash).into_iter();
2413             }
2414         }
2415     }
2416 }
2417 
2418 #[cfg(test)]
2419 mod test_map {
2420     use super::*;
2421 
rehash_in_place<T>(table: &mut RawTable<T>, hasher: impl Fn(&T) -> u64)2422     fn rehash_in_place<T>(table: &mut RawTable<T>, hasher: impl Fn(&T) -> u64) {
2423         unsafe {
2424             table.table.rehash_in_place(
2425                 &|table, index| hasher(table.bucket::<T>(index).as_ref()),
2426                 mem::size_of::<T>(),
2427                 if mem::needs_drop::<T>() {
2428                     Some(mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T)))
2429                 } else {
2430                     None
2431                 },
2432             );
2433         }
2434     }
2435 
2436     #[test]
rehash()2437     fn rehash() {
2438         let mut table = RawTable::new();
2439         let hasher = |i: &u64| *i;
2440         for i in 0..100 {
2441             table.insert(i, i, hasher);
2442         }
2443 
2444         for i in 0..100 {
2445             unsafe {
2446                 assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
2447             }
2448             assert!(table.find(i + 100, |x| *x == i + 100).is_none());
2449         }
2450 
2451         rehash_in_place(&mut table, hasher);
2452 
2453         for i in 0..100 {
2454             unsafe {
2455                 assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
2456             }
2457             assert!(table.find(i + 100, |x| *x == i + 100).is_none());
2458         }
2459     }
2460 }
2461