• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Vendored from libstd:
2 // https://github.com/rust-lang/rust/blob/1.57.0/library/core/src/hash/sip.rs
3 //
4 // TODO: maybe depend on a hasher from crates.io if this becomes annoying to
5 // maintain, or change this to a simpler one.
6 
7 #![cfg(not(feature = "std"))]
8 
9 use core::cmp;
10 use core::hash::Hasher;
11 use core::mem;
12 use core::ptr;
13 
14 /// An implementation of SipHash 1-3.
15 ///
16 /// This is currently the default hashing function used by standard library
17 /// (e.g., `collections::HashMap` uses it by default).
18 ///
19 /// See: <https://131002.net/siphash>
20 pub struct SipHasher13 {
21     k0: u64,
22     k1: u64,
23     length: usize, // how many bytes we've processed
24     state: State,  // hash State
25     tail: u64,     // unprocessed bytes le
26     ntail: usize,  // how many bytes in tail are valid
27 }
28 
29 #[derive(Clone, Copy)]
30 #[repr(C)]
31 struct State {
32     // v0, v2 and v1, v3 show up in pairs in the algorithm,
33     // and simd implementations of SipHash will use vectors
34     // of v02 and v13. By placing them in this order in the struct,
35     // the compiler can pick up on just a few simd optimizations by itself.
36     v0: u64,
37     v2: u64,
38     v1: u64,
39     v3: u64,
40 }
41 
42 macro_rules! compress {
43     ($state:expr) => {
44         compress!($state.v0, $state.v1, $state.v2, $state.v3)
45     };
46     ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {
47         $v0 = $v0.wrapping_add($v1);
48         $v1 = $v1.rotate_left(13);
49         $v1 ^= $v0;
50         $v0 = $v0.rotate_left(32);
51         $v2 = $v2.wrapping_add($v3);
52         $v3 = $v3.rotate_left(16);
53         $v3 ^= $v2;
54         $v0 = $v0.wrapping_add($v3);
55         $v3 = $v3.rotate_left(21);
56         $v3 ^= $v0;
57         $v2 = $v2.wrapping_add($v1);
58         $v1 = $v1.rotate_left(17);
59         $v1 ^= $v2;
60         $v2 = $v2.rotate_left(32);
61     };
62 }
63 
64 /// Loads an integer of the desired type from a byte stream, in LE order. Uses
65 /// `copy_nonoverlapping` to let the compiler generate the most efficient way
66 /// to load it from a possibly unaligned address.
67 ///
68 /// Unsafe because: unchecked indexing at i..i+size_of(int_ty)
69 macro_rules! load_int_le {
70     ($buf:expr, $i:expr, $int_ty:ident) => {{
71         debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
72         let mut data = 0 as $int_ty;
73         ptr::copy_nonoverlapping(
74             $buf.as_ptr().add($i),
75             &mut data as *mut _ as *mut u8,
76             mem::size_of::<$int_ty>(),
77         );
78         data.to_le()
79     }};
80 }
81 
82 /// Loads a u64 using up to 7 bytes of a byte slice. It looks clumsy but the
83 /// `copy_nonoverlapping` calls that occur (via `load_int_le!`) all have fixed
84 /// sizes and avoid calling `memcpy`, which is good for speed.
85 ///
86 /// Unsafe because: unchecked indexing at start..start+len
u8to64_le(buf: &[u8], start: usize, len: usize) -> u6487 unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
88     debug_assert!(len < 8);
89     let mut i = 0; // current byte index (from LSB) in the output u64
90     let mut out = 0;
91     if i + 3 < len {
92         // SAFETY: `i` cannot be greater than `len`, and the caller must guarantee
93         // that the index start..start+len is in bounds.
94         out = unsafe { load_int_le!(buf, start + i, u32) } as u64;
95         i += 4;
96     }
97     if i + 1 < len {
98         // SAFETY: same as above.
99         out |= (unsafe { load_int_le!(buf, start + i, u16) } as u64) << (i * 8);
100         i += 2
101     }
102     if i < len {
103         // SAFETY: same as above.
104         out |= (unsafe { *buf.get_unchecked(start + i) } as u64) << (i * 8);
105         i += 1;
106     }
107     debug_assert_eq!(i, len);
108     out
109 }
110 
111 impl SipHasher13 {
112     /// Creates a new `SipHasher13` with the two initial keys set to 0.
new() -> Self113     pub fn new() -> Self {
114         Self::new_with_keys(0, 0)
115     }
116 
117     /// Creates a `SipHasher13` that is keyed off the provided keys.
new_with_keys(key0: u64, key1: u64) -> Self118     fn new_with_keys(key0: u64, key1: u64) -> Self {
119         let mut state = SipHasher13 {
120             k0: key0,
121             k1: key1,
122             length: 0,
123             state: State {
124                 v0: 0,
125                 v1: 0,
126                 v2: 0,
127                 v3: 0,
128             },
129             tail: 0,
130             ntail: 0,
131         };
132         state.reset();
133         state
134     }
135 
reset(&mut self)136     fn reset(&mut self) {
137         self.length = 0;
138         self.state.v0 = self.k0 ^ 0x736f6d6570736575;
139         self.state.v1 = self.k1 ^ 0x646f72616e646f6d;
140         self.state.v2 = self.k0 ^ 0x6c7967656e657261;
141         self.state.v3 = self.k1 ^ 0x7465646279746573;
142         self.ntail = 0;
143     }
144 }
145 
146 impl Hasher for SipHasher13 {
147     // Note: no integer hashing methods (`write_u*`, `write_i*`) are defined
148     // for this type. We could add them, copy the `short_write` implementation
149     // in librustc_data_structures/sip128.rs, and add `write_u*`/`write_i*`
150     // methods to `SipHasher`, `SipHasher13`, and `DefaultHasher`. This would
151     // greatly speed up integer hashing by those hashers, at the cost of
152     // slightly slowing down compile speeds on some benchmarks. See #69152 for
153     // details.
write(&mut self, msg: &[u8])154     fn write(&mut self, msg: &[u8]) {
155         let length = msg.len();
156         self.length += length;
157 
158         let mut needed = 0;
159 
160         if self.ntail != 0 {
161             needed = 8 - self.ntail;
162             // SAFETY: `cmp::min(length, needed)` is guaranteed to not be over `length`
163             self.tail |= unsafe { u8to64_le(msg, 0, cmp::min(length, needed)) } << (8 * self.ntail);
164             if length < needed {
165                 self.ntail += length;
166                 return;
167             } else {
168                 self.state.v3 ^= self.tail;
169                 Sip13Rounds::c_rounds(&mut self.state);
170                 self.state.v0 ^= self.tail;
171                 self.ntail = 0;
172             }
173         }
174 
175         // Buffered tail is now flushed, process new input.
176         let len = length - needed;
177         let left = len & 0x7; // len % 8
178 
179         let mut i = needed;
180         while i < len - left {
181             // SAFETY: because `len - left` is the biggest multiple of 8 under
182             // `len`, and because `i` starts at `needed` where `len` is `length - needed`,
183             // `i + 8` is guaranteed to be less than or equal to `length`.
184             let mi = unsafe { load_int_le!(msg, i, u64) };
185 
186             self.state.v3 ^= mi;
187             Sip13Rounds::c_rounds(&mut self.state);
188             self.state.v0 ^= mi;
189 
190             i += 8;
191         }
192 
193         // SAFETY: `i` is now `needed + len.div_euclid(8) * 8`,
194         // so `i + left` = `needed + len` = `length`, which is by
195         // definition equal to `msg.len()`.
196         self.tail = unsafe { u8to64_le(msg, i, left) };
197         self.ntail = left;
198     }
199 
finish(&self) -> u64200     fn finish(&self) -> u64 {
201         let mut state = self.state;
202 
203         let b: u64 = ((self.length as u64 & 0xff) << 56) | self.tail;
204 
205         state.v3 ^= b;
206         Sip13Rounds::c_rounds(&mut state);
207         state.v0 ^= b;
208 
209         state.v2 ^= 0xff;
210         Sip13Rounds::d_rounds(&mut state);
211 
212         state.v0 ^ state.v1 ^ state.v2 ^ state.v3
213     }
214 }
215 
216 struct Sip13Rounds;
217 
218 impl Sip13Rounds {
c_rounds(state: &mut State)219     fn c_rounds(state: &mut State) {
220         compress!(state);
221     }
222 
d_rounds(state: &mut State)223     fn d_rounds(state: &mut State) {
224         compress!(state);
225         compress!(state);
226         compress!(state);
227     }
228 }
229