1 use crate::fx::{FxHashMap, FxHasher};
2 #[cfg(parallel_compiler)]
3 use crate::sync::is_dyn_thread_safe;
4 use crate::sync::{CacheAligned, Lock, LockGuard};
5 use std::borrow::Borrow;
6 use std::collections::hash_map::RawEntryMut;
7 use std::hash::{Hash, Hasher};
8 use std::mem;
9
10 #[cfg(parallel_compiler)]
11 // 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700,
12 // but this should be tested on higher core count CPUs. How the `Sharded` type gets used
13 // may also affect the ideal number of shards.
14 const SHARD_BITS: usize = 5;
15
16 #[cfg(not(parallel_compiler))]
17 const SHARD_BITS: usize = 0;
18
19 pub const SHARDS: usize = 1 << SHARD_BITS;
20
21 /// An array of cache-line aligned inner locked structures with convenience methods.
22 pub struct Sharded<T> {
23 /// This mask is used to ensure that accesses are inbounds of `shards`.
24 /// When dynamic thread safety is off, this field is set to 0 causing only
25 /// a single shard to be used for greater cache efficiency.
26 #[cfg(parallel_compiler)]
27 mask: usize,
28 shards: [CacheAligned<Lock<T>>; SHARDS],
29 }
30
31 impl<T: Default> Default for Sharded<T> {
32 #[inline]
default() -> Self33 fn default() -> Self {
34 Self::new(T::default)
35 }
36 }
37
38 impl<T> Sharded<T> {
39 #[inline]
new(mut value: impl FnMut() -> T) -> Self40 pub fn new(mut value: impl FnMut() -> T) -> Self {
41 Sharded {
42 #[cfg(parallel_compiler)]
43 mask: if is_dyn_thread_safe() { SHARDS - 1 } else { 0 },
44 shards: [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))),
45 }
46 }
47
48 #[inline(always)]
mask(&self) -> usize49 fn mask(&self) -> usize {
50 #[cfg(parallel_compiler)]
51 {
52 if SHARDS == 1 { 0 } else { self.mask }
53 }
54 #[cfg(not(parallel_compiler))]
55 {
56 0
57 }
58 }
59
60 #[inline(always)]
count(&self) -> usize61 fn count(&self) -> usize {
62 // `self.mask` is always one below the used shard count
63 self.mask() + 1
64 }
65
66 /// The shard is selected by hashing `val` with `FxHasher`.
67 #[inline]
get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T>68 pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> {
69 self.get_shard_by_hash(if SHARDS == 1 { 0 } else { make_hash(val) })
70 }
71
72 #[inline]
get_shard_by_hash(&self, hash: u64) -> &Lock<T>73 pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
74 self.get_shard_by_index(get_shard_hash(hash))
75 }
76
77 #[inline]
get_shard_by_index(&self, i: usize) -> &Lock<T>78 pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> {
79 // SAFETY: The index get ANDed with the mask, ensuring it is always inbounds.
80 unsafe { &self.shards.get_unchecked(i & self.mask()).0 }
81 }
82
lock_shards(&self) -> Vec<LockGuard<'_, T>>83 pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> {
84 (0..self.count()).map(|i| self.get_shard_by_index(i).lock()).collect()
85 }
86
try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>>87 pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> {
88 (0..self.count()).map(|i| self.get_shard_by_index(i).try_lock()).collect()
89 }
90 }
91
92 pub type ShardedHashMap<K, V> = Sharded<FxHashMap<K, V>>;
93
94 impl<K: Eq, V> ShardedHashMap<K, V> {
len(&self) -> usize95 pub fn len(&self) -> usize {
96 self.lock_shards().iter().map(|shard| shard.len()).sum()
97 }
98 }
99
100 impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
101 #[inline]
intern_ref<Q: ?Sized>(&self, value: &Q, make: impl FnOnce() -> K) -> K where K: Borrow<Q>, Q: Hash + Eq,102 pub fn intern_ref<Q: ?Sized>(&self, value: &Q, make: impl FnOnce() -> K) -> K
103 where
104 K: Borrow<Q>,
105 Q: Hash + Eq,
106 {
107 let hash = make_hash(value);
108 let mut shard = self.get_shard_by_hash(hash).lock();
109 let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, value);
110
111 match entry {
112 RawEntryMut::Occupied(e) => *e.key(),
113 RawEntryMut::Vacant(e) => {
114 let v = make();
115 e.insert_hashed_nocheck(hash, v, ());
116 v
117 }
118 }
119 }
120
121 #[inline]
intern<Q>(&self, value: Q, make: impl FnOnce(Q) -> K) -> K where K: Borrow<Q>, Q: Hash + Eq,122 pub fn intern<Q>(&self, value: Q, make: impl FnOnce(Q) -> K) -> K
123 where
124 K: Borrow<Q>,
125 Q: Hash + Eq,
126 {
127 let hash = make_hash(&value);
128 let mut shard = self.get_shard_by_hash(hash).lock();
129 let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, &value);
130
131 match entry {
132 RawEntryMut::Occupied(e) => *e.key(),
133 RawEntryMut::Vacant(e) => {
134 let v = make(value);
135 e.insert_hashed_nocheck(hash, v, ());
136 v
137 }
138 }
139 }
140 }
141
142 pub trait IntoPointer {
143 /// Returns a pointer which outlives `self`.
into_pointer(&self) -> *const ()144 fn into_pointer(&self) -> *const ();
145 }
146
147 impl<K: Eq + Hash + Copy + IntoPointer> ShardedHashMap<K, ()> {
contains_pointer_to<T: Hash + IntoPointer>(&self, value: &T) -> bool148 pub fn contains_pointer_to<T: Hash + IntoPointer>(&self, value: &T) -> bool {
149 let hash = make_hash(&value);
150 let shard = self.get_shard_by_hash(hash).lock();
151 let value = value.into_pointer();
152 shard.raw_entry().from_hash(hash, |entry| entry.into_pointer() == value).is_some()
153 }
154 }
155
156 #[inline]
make_hash<K: Hash + ?Sized>(val: &K) -> u64157 pub fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 {
158 let mut state = FxHasher::default();
159 val.hash(&mut state);
160 state.finish()
161 }
162
163 /// Get a shard with a pre-computed hash value. If `get_shard_by_value` is
164 /// ever used in combination with `get_shard_by_hash` on a single `Sharded`
165 /// instance, then `hash` must be computed with `FxHasher`. Otherwise,
166 /// `hash` can be computed with any hasher, so long as that hasher is used
167 /// consistently for each `Sharded` instance.
168 #[inline]
get_shard_hash(hash: u64) -> usize169 fn get_shard_hash(hash: u64) -> usize {
170 let hash_len = mem::size_of::<usize>();
171 // Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
172 // hashbrown also uses the lowest bits, so we can't use those
173 (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize
174 }
175