1 /*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 //! An implementation of SipHash13
18
19 use std::cmp;
20 use std::mem;
21 use std::ptr;
22 use std::slice;
23
24 use std::hash::Hasher;
25
26 /// An implementation of SipHash 2-4.
27 ///
28 #[derive(Debug, Clone, Default)]
29 pub struct SipHasher13 {
30 k0: u64,
31 k1: u64,
32 length: usize, // how many bytes we've processed
33 state: State, // hash State
34 tail: u64, // unprocessed bytes le
35 ntail: usize, // how many bytes in tail are valid
36 }
37
38 #[derive(Debug, Clone, Copy, Default)]
39 #[repr(C)]
40 struct State {
41 // v0, v2 and v1, v3 show up in pairs in the algorithm,
42 // and simd implementations of SipHash will use vectors
43 // of v02 and v13. By placing them in this order in the struct,
44 // the compiler can pick up on just a few simd optimizations by itself.
45 v0: u64,
46 v2: u64,
47 v1: u64,
48 v3: u64,
49 }
50
51 macro_rules! compress {
52 ($state:expr) => {{
53 compress!($state.v0, $state.v1, $state.v2, $state.v3)
54 }};
55 ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {{
56 $v0 = $v0.wrapping_add($v1);
57 $v1 = $v1.rotate_left(13);
58 $v1 ^= $v0;
59 $v0 = $v0.rotate_left(32);
60 $v2 = $v2.wrapping_add($v3);
61 $v3 = $v3.rotate_left(16);
62 $v3 ^= $v2;
63 $v0 = $v0.wrapping_add($v3);
64 $v3 = $v3.rotate_left(21);
65 $v3 ^= $v0;
66 $v2 = $v2.wrapping_add($v1);
67 $v1 = $v1.rotate_left(17);
68 $v1 ^= $v2;
69 $v2 = $v2.rotate_left(32);
70 }};
71 }
72
73 /// Load an integer of the desired type from a byte stream, in LE order. Uses
74 /// `copy_nonoverlapping` to let the compiler generate the most efficient way
75 /// to load it from a possibly unaligned address.
76 ///
77 /// Unsafe because: unchecked indexing at i..i+size_of(int_ty)
78 macro_rules! load_int_le {
79 ($buf:expr, $i:expr, $int_ty:ident) => {{
80 debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
81 let mut data = 0 as $int_ty;
82 ptr::copy_nonoverlapping(
83 $buf.get_unchecked($i),
84 &mut data as *mut _ as *mut u8,
85 mem::size_of::<$int_ty>(),
86 );
87 data.to_le()
88 }};
89 }
90
91 /// Load an u64 using up to 7 bytes of a byte slice.
92 ///
93 /// Unsafe because: unchecked indexing at start..start+len
94 #[inline]
u8to64_le(buf: &[u8], start: usize, len: usize) -> u6495 unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
96 debug_assert!(len < 8);
97 let mut i = 0; // current byte index (from LSB) in the output u64
98 let mut out = 0;
99 if i + 3 < len {
100 out = load_int_le!(buf, start + i, u32) as u64;
101 i += 4;
102 }
103 if i + 1 < len {
104 out |= (load_int_le!(buf, start + i, u16) as u64) << (i * 8);
105 i += 2
106 }
107 if i < len {
108 out |= (*buf.get_unchecked(start + i) as u64) << (i * 8);
109 i += 1;
110 }
111 debug_assert_eq!(i, len);
112 out
113 }
114
115 impl SipHasher13 {
116 /// Creates a new `SipHasher13` with the two initial keys set to 0.
117 #[inline]
new() -> SipHasher13118 pub fn new() -> SipHasher13 {
119 SipHasher13::new_with_keys(0, 0)
120 }
121
122 /// Creates a `SipHasher13` that is keyed off the provided keys.
123 #[inline]
new_with_keys(key0: u64, key1: u64) -> SipHasher13124 pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher13 {
125 let mut sip_hasher = SipHasher13 {
126 k0: key0,
127 k1: key1,
128 length: 0,
129 state: State { v0: 0, v1: 0, v2: 0, v3: 0 },
130 tail: 0,
131 ntail: 0,
132 };
133 sip_hasher.reset();
134 sip_hasher
135 }
136
137 #[inline]
c_rounds(state: &mut State)138 fn c_rounds(state: &mut State) {
139 compress!(state);
140 }
141
142 #[inline]
d_rounds(state: &mut State)143 fn d_rounds(state: &mut State) {
144 compress!(state);
145 compress!(state);
146 compress!(state);
147 }
148
149 #[inline]
reset(&mut self)150 fn reset(&mut self) {
151 self.length = 0;
152 self.state.v0 = self.k0 ^ 0x736f6d6570736575;
153 self.state.v1 = self.k1 ^ 0x646f72616e646f6d;
154 self.state.v2 = self.k0 ^ 0x6c7967656e657261;
155 self.state.v3 = self.k1 ^ 0x7465646279746573;
156 self.ntail = 0;
157 }
158
159 // Specialized write function that is only valid for buffers with len <= 8.
160 // It's used to force inlining of write_u8 and write_usize, those would normally be inlined
161 // except for composite types (that includes slices and str hashing because of delimiter).
162 // Without this extra push the compiler is very reluctant to inline delimiter writes,
163 // degrading performance substantially for the most common use cases.
164 #[inline]
short_write(&mut self, msg: &[u8])165 fn short_write(&mut self, msg: &[u8]) {
166 debug_assert!(msg.len() <= 8);
167 let length = msg.len();
168 self.length += length;
169
170 let needed = 8 - self.ntail;
171 let fill = cmp::min(length, needed);
172 if fill == 8 {
173 // safe to call since msg hasn't been loaded
174 self.tail = unsafe { load_int_le!(msg, 0, u64) };
175 } else {
176 // safe to call since msg hasn't been loaded, and fill <= msg.len()
177 self.tail |= unsafe { u8to64_le(msg, 0, fill) } << (8 * self.ntail);
178 if length < needed {
179 self.ntail += length;
180 return;
181 }
182 }
183 self.state.v3 ^= self.tail;
184 Self::c_rounds(&mut self.state);
185 self.state.v0 ^= self.tail;
186
187 // Buffered tail is now flushed, process new input.
188 self.ntail = length - needed;
189 // safe to call since number of `needed` bytes has been loaded
190 // and self.ntail + needed == msg.len()
191 self.tail = unsafe { u8to64_le(msg, needed, self.ntail) };
192 }
193 }
194
195 impl Hasher for SipHasher13 {
196 // see short_write comment for explanation
197 #[inline]
write_usize(&mut self, i: usize)198 fn write_usize(&mut self, i: usize) {
199 // safe to call, since convert the pointer to u8
200 let bytes = unsafe {
201 slice::from_raw_parts(&i as *const usize as *const u8, mem::size_of::<usize>())
202 };
203 self.short_write(bytes);
204 }
205
206 // see short_write comment for explanation
207 #[inline]
write_u8(&mut self, i: u8)208 fn write_u8(&mut self, i: u8) {
209 self.short_write(&[i]);
210 }
211
212 #[inline]
write(&mut self, msg: &[u8])213 fn write(&mut self, msg: &[u8]) {
214 let length = msg.len();
215 self.length += length;
216
217 let mut needed = 0;
218
219 // loading unprocessed byte from last write
220 if self.ntail != 0 {
221 needed = 8 - self.ntail;
222 // safe to call, since msg hasn't been processed
223 // and cmp::min(length, needed) < 8
224 self.tail |= unsafe { u8to64_le(msg, 0, cmp::min(length, needed)) } << 8 * self.ntail;
225 if length < needed {
226 self.ntail += length;
227 return;
228 } else {
229 self.state.v3 ^= self.tail;
230 Self::c_rounds(&mut self.state);
231 self.state.v0 ^= self.tail;
232 self.ntail = 0;
233 }
234 }
235
236 // Buffered tail is now flushed, process new input.
237 let len = length - needed;
238 let left = len & 0x7;
239
240 let mut i = needed;
241 while i < len - left {
242 // safe to call since if i < len - left, it means msg has at least 1 byte to load
243 let mi = unsafe { load_int_le!(msg, i, u64) };
244
245 self.state.v3 ^= mi;
246 Self::c_rounds(&mut self.state);
247 self.state.v0 ^= mi;
248
249 i += 8;
250 }
251
252 // safe to call since if left == 0, since this call will load nothing
253 // if left > 0, it means there are number of `left` bytes in msg
254 self.tail = unsafe { u8to64_le(msg, i, left) };
255 self.ntail = left;
256 }
257
258 #[inline]
finish(&self) -> u64259 fn finish(&self) -> u64 {
260 let mut state = self.state;
261
262 let b: u64 = ((self.length as u64 & 0xff) << 56) | self.tail;
263
264 state.v3 ^= b;
265 Self::c_rounds(&mut state);
266 state.v0 ^= b;
267
268 state.v2 ^= 0xff;
269 Self::d_rounds(&mut state);
270
271 state.v0 ^ state.v1 ^ state.v2 ^ state.v3
272 }
273 }
274
275 #[cfg(test)]
276 mod tests {
277 use super::*;
278
279 use std::hash::{Hash, Hasher};
280 use std::string::String;
281
282 #[test]
283 // this test point locks down the value list serialization
test_sip_hash13_string_hash()284 fn test_sip_hash13_string_hash() {
285 let mut sip_hash13 = SipHasher13::new();
286 let test_str1 = String::from("com.google.android.test");
287 test_str1.hash(&mut sip_hash13);
288 assert_eq!(17898838669067067585, sip_hash13.finish());
289
290 let test_str2 = String::from("adfadfadf adfafadadf 1231241241");
291 test_str2.hash(&mut sip_hash13);
292 assert_eq!(13543518987672889310, sip_hash13.finish());
293 }
294
295 #[test]
test_sip_hash13_write()296 fn test_sip_hash13_write() {
297 let mut sip_hash13 = SipHasher13::new();
298 let test_str1 = String::from("com.google.android.test");
299 sip_hash13.write(test_str1.as_bytes());
300 sip_hash13.write_u8(0xff);
301 assert_eq!(17898838669067067585, sip_hash13.finish());
302
303 let mut sip_hash132 = SipHasher13::new();
304 let test_str1 = String::from("com.google.android.test");
305 sip_hash132.write(test_str1.as_bytes());
306 assert_eq!(9685440969685209025, sip_hash132.finish());
307 sip_hash132.write(test_str1.as_bytes());
308 assert_eq!(6719694176662736568, sip_hash132.finish());
309
310 let mut sip_hash133 = SipHasher13::new();
311 let test_str2 = String::from("abcdefg");
312 test_str2.hash(&mut sip_hash133);
313 assert_eq!(2492161047327640297, sip_hash133.finish());
314
315 let mut sip_hash134 = SipHasher13::new();
316 let test_str3 = String::from("abcdefgh");
317 test_str3.hash(&mut sip_hash134);
318 assert_eq!(6689927370435554326, sip_hash134.finish());
319 }
320
321 #[test]
test_sip_hash13_write_short()322 fn test_sip_hash13_write_short() {
323 let mut sip_hash13 = SipHasher13::new();
324 sip_hash13.write_u8(0x61);
325 assert_eq!(4644417185603328019, sip_hash13.finish());
326 }
327 }
328