1 /**************************************************************************************************
2 * MurmurHash3 was written by Austin Appleby, and is placed in the
3 * public domain. The author hereby disclaims copyright to this source
4 * code.
5 *************************************************************************************************/
6
7 #include "murmur3.h"
8 #include <string.h>
9
10 #if !defined(__x86_64__) || defined(IW_TESTS)
11
rotl32(uint32_t x,int8_t r)12 IW_INLINE uint32_t rotl32(uint32_t x, int8_t r) {
13 return (x << r) | (x >> (32 - r));
14 }
15
16 #endif
17
rotl64(uint64_t x,int8_t r)18 IW_INLINE uint64_t rotl64(uint64_t x, int8_t r) {
19 return (x << r) | (x >> (64 - r));
20 }
21
22 #define ROTL32(x, y) rotl32(x, y)
23 #define ROTL64(x, y) rotl64(x, y)
24
25 static uint32_t seed_value = 0x2fa1bca;
26
27 // Finalization mix - force all bits of a hash block to avalanche
28 #if !defined(__x86_64__) || defined(IW_TESTS)
29
fmix32(uint32_t h)30 IW_INLINE uint32_t fmix32(uint32_t h) {
31 h ^= h >> 16;
32 h *= 0x85ebca6b;
33 h ^= h >> 13;
34 h *= 0xc2b2ae35;
35 h ^= h >> 16;
36 return h;
37 }
38
39 #endif
40
fmix64(uint64_t k)41 IW_INLINE uint64_t fmix64(uint64_t k) {
42 k ^= k >> 33;
43 k *= 0xff51afd7ed558ccdLLU;
44 k ^= k >> 33;
45 k *= 0xc4ceb9fe1a85ec53LLU;
46 k ^= k >> 33;
47 return k;
48 }
49
50 #if !defined(__x86_64__) || defined(IW_TESTS)
51
murmur3_x86_32(const void * key,size_t len,uint32_t seed,void * out)52 void murmur3_x86_32(const void *key, size_t len, uint32_t seed, void *out) {
53 const uint8_t *data = (const uint8_t*) key;
54 const size_t nblocks = len / 4;
55 size_t i;
56 uint32_t h1 = seed;
57 uint32_t c1 = 0xcc9e2d51;
58 uint32_t c2 = 0x1b873593;
59
60 const uint32_t *blocks = (const uint32_t*) (data + nblocks * 4);
61 for (i = -nblocks; i; i++) {
62 uint32_t k1;
63
64 memcpy(&k1, blocks + i, sizeof(k1));
65
66 k1 *= c1;
67 k1 = ROTL32(k1, 15);
68 k1 *= c2;
69 h1 ^= k1;
70 h1 = ROTL32(h1, 13);
71 h1 = h1 * 5 + 0xe6546b64;
72 }
73
74 const uint8_t *tail = (const uint8_t*) (data + nblocks * 4);
75 uint32_t k1 = 0;
76 switch (len & 3) {
77 case 3:
78 k1 ^= (uint32_t) tail[2] << 16;
79 /* fallthrough */
80 case 2:
81 k1 ^= (uint32_t) tail[1] << 8;
82 /* fallthrough */
83 case 1:
84 k1 ^= (uint32_t) tail[0];
85 k1 *= c1;
86 k1 = ROTL32(k1, 15);
87 k1 *= c2;
88 h1 ^= k1;
89 /* fallthrough */
90 }
91 ;
92
93 h1 ^= (uint32_t) len;
94 h1 = fmix32(h1);
95 *(uint32_t*) out = h1;
96 }
97
murmur3_x86_128(const void * key,const size_t len,uint32_t seed,void * out)98 void murmur3_x86_128(const void *key, const size_t len, uint32_t seed, void *out) {
99 size_t i;
100 const uint8_t *data = (const uint8_t*) key;
101 const size_t nblocks = len / 16;
102
103 uint32_t h1 = seed;
104 uint32_t h2 = seed;
105 uint32_t h3 = seed;
106 uint32_t h4 = seed;
107
108 const uint32_t c1 = 0x239b961b;
109 const uint32_t c2 = 0xab0e9789;
110 const uint32_t c3 = 0x38b34ae5;
111 const uint32_t c4 = 0xa1e38b93;
112
113 const uint32_t *blocks = (const uint32_t*) (data + nblocks * 16);
114
115 for (i = -nblocks; i; i++) {
116 uint32_t k1, k2, k3, k4;
117
118 memcpy(&k1, blocks + i * 4 + 0, sizeof(k1));
119 memcpy(&k2, blocks + i * 4 + 1, sizeof(k2));
120 memcpy(&k3, blocks + i * 4 + 2, sizeof(k3));
121 memcpy(&k4, blocks + i * 4 + 3, sizeof(k4));
122
123 k1 *= c1;
124 k1 = ROTL32(k1, 15);
125 k1 *= c2;
126 h1 ^= k1;
127 h1 = ROTL32(h1, 19);
128 h1 += h2;
129 h1 = h1 * 5 + 0x561ccd1b;
130 k2 *= c2;
131 k2 = ROTL32(k2, 16);
132 k2 *= c3;
133 h2 ^= k2;
134 h2 = ROTL32(h2, 17);
135 h2 += h3;
136 h2 = h2 * 5 + 0x0bcaa747;
137 k3 *= c3;
138 k3 = ROTL32(k3, 17);
139 k3 *= c4;
140 h3 ^= k3;
141 h3 = ROTL32(h3, 15);
142 h3 += h4;
143 h3 = h3 * 5 + 0x96cd1c35;
144 k4 *= c4;
145 k4 = ROTL32(k4, 18);
146 k4 *= c1;
147 h4 ^= k4;
148 h4 = ROTL32(h4, 13);
149 h4 += h1;
150 h4 = h4 * 5 + 0x32ac3b17;
151 }
152
153 const uint8_t *tail = (const uint8_t*) (data + nblocks * 16);
154
155 uint32_t k1 = 0;
156 uint32_t k2 = 0;
157 uint32_t k3 = 0;
158 uint32_t k4 = 0;
159
160 switch (len & 15) {
161 case 15:
162 k4 ^= (uint32_t) tail[14] << 16;
163 /* fallthrough */
164 case 14:
165 k4 ^= (uint32_t) tail[13] << 8;
166 /* fallthrough */
167 case 13:
168 k4 ^= (uint32_t) tail[12] << 0;
169 k4 *= c4;
170 k4 = ROTL32(k4, 18);
171 k4 *= c1;
172 h4 ^= k4;
173 /* fallthrough */
174 case 12:
175 k3 ^= (uint32_t) tail[11] << 24;
176 /* fallthrough */
177 case 11:
178 k3 ^= (uint32_t) tail[10] << 16;
179 /* fallthrough */
180 case 10:
181 k3 ^= (uint32_t) tail[ 9] << 8;
182 /* fallthrough */
183 case 9:
184 k3 ^= (uint32_t) tail[ 8] << 0;
185 k3 *= c3;
186 k3 = ROTL32(k3, 17);
187 k3 *= c4;
188 h3 ^= k3;
189 /* fallthrough */
190 case 8:
191 k2 ^= (uint32_t) tail[ 7] << 24;
192 /* fallthrough */
193 case 7:
194 k2 ^= (uint32_t) tail[ 6] << 16;
195 /* fallthrough */
196 case 6:
197 k2 ^= (uint32_t) tail[ 5] << 8;
198 /* fallthrough */
199 case 5:
200 k2 ^= (uint32_t) tail[ 4] << 0;
201 k2 *= c2;
202 k2 = ROTL32(k2, 16);
203 k2 *= c3;
204 h2 ^= k2;
205 /* fallthrough */
206 case 4:
207 k1 ^= (uint32_t) tail[ 3] << 24;
208 /* fallthrough */
209 case 3:
210 k1 ^= (uint32_t) tail[ 2] << 16;
211 /* fallthrough */
212 case 2:
213 k1 ^= (uint32_t) tail[ 1] << 8;
214 /* fallthrough */
215 case 1:
216 k1 ^= (uint32_t) tail[ 0] << 0;
217 k1 *= c1;
218 k1 = ROTL32(k1, 15);
219 k1 *= c2;
220 h1 ^= k1;
221 /* fallthrough */
222 }
223
224 h1 ^= (uint32_t) len;
225 h2 ^= (uint32_t) len;
226 h3 ^= (uint32_t) len;
227 h4 ^= (uint32_t) len;
228
229 h1 += h2;
230 h1 += h3;
231 h1 += h4;
232 h2 += h1;
233 h3 += h1;
234 h4 += h1;
235
236 h1 = fmix32(h1);
237 h2 = fmix32(h2);
238 h3 = fmix32(h3);
239 h4 = fmix32(h4);
240
241 h1 += h2;
242 h1 += h3;
243 h1 += h4;
244 h2 += h1;
245 h3 += h1;
246 h4 += h1;
247
248 ((uint32_t*) out)[0] = h1;
249 ((uint32_t*) out)[1] = h2;
250 ((uint32_t*) out)[2] = h3;
251 ((uint32_t*) out)[3] = h4;
252 }
253
254 #endif
255
murmur3_x64_128(const void * key,const size_t len,const uint32_t seed,void * out)256 void murmur3_x64_128(const void *key, const size_t len, const uint32_t seed, void *out) {
257 const uint8_t *data = (const uint8_t*) key;
258 const size_t nblocks = len / 16;
259 size_t i;
260 uint64_t h1 = seed;
261 uint64_t h2 = seed;
262 uint64_t c1 = 0x87c37b91114253d5LLU;
263 uint64_t c2 = 0x4cf5ad432745937fLLU;
264
265 const uint64_t *blocks = (const uint64_t*) (data);
266 for (i = 0; i < nblocks; i++) {
267 uint64_t k1, k2;
268
269 memcpy(&k1, blocks + i * 2 + 0, sizeof(k1));
270 memcpy(&k2, blocks + i * 2 + 1, sizeof(k2));
271
272 k1 *= c1;
273 k1 = ROTL64(k1, 31);
274 k1 *= c2;
275 h1 ^= k1;
276 h1 = ROTL64(h1, 27);
277 h1 += h2;
278 h1 = h1 * 5 + 0x52dce729;
279 k2 *= c2;
280 k2 = ROTL64(k2, 33);
281 k2 *= c1;
282 h2 ^= k2;
283 h2 = ROTL64(h2, 31);
284 h2 += h1;
285 h2 = h2 * 5 + 0x38495ab5;
286 }
287
288 const uint8_t *tail = (data + nblocks * 16);
289 uint64_t k1 = 0;
290 uint64_t k2 = 0;
291 switch (len & 15) {
292 case 15:
293 k2 ^= (uint64_t) (tail[14]) << 48;
294 /* fallthrough */
295 case 14:
296 k2 ^= (uint64_t) (tail[13]) << 40;
297 /* fallthrough */
298 case 13:
299 k2 ^= (uint64_t) (tail[12]) << 32;
300 /* fallthrough */
301 case 12:
302 k2 ^= (uint64_t) (tail[11]) << 24;
303 /* fallthrough */
304 case 11:
305 k2 ^= (uint64_t) (tail[10]) << 16;
306 /* fallthrough */
307 case 10:
308 k2 ^= (uint64_t) (tail[9]) << 8;
309 /* fallthrough */
310 case 9:
311 k2 ^= (uint64_t) (tail[8]) << 0;
312 k2 *= c2;
313 k2 = ROTL64(k2, 33);
314 k2 *= c1;
315 h2 ^= k2;
316 /* fallthrough */
317 case 8:
318 k1 ^= (uint64_t) (tail[7]) << 56;
319 /* fallthrough */
320 case 7:
321 k1 ^= (uint64_t) (tail[6]) << 48;
322 /* fallthrough */
323 case 6:
324 k1 ^= (uint64_t) (tail[5]) << 40;
325 /* fallthrough */
326 case 5:
327 k1 ^= (uint64_t) (tail[4]) << 32;
328 /* fallthrough */
329 case 4:
330 k1 ^= (uint64_t) (tail[3]) << 24;
331 /* fallthrough */
332 case 3:
333 k1 ^= (uint64_t) (tail[2]) << 16;
334 /* fallthrough */
335 case 2:
336 k1 ^= (uint64_t) (tail[1]) << 8;
337 /* fallthrough */
338 case 1:
339 k1 ^= (uint64_t) (tail[0]) << 0;
340 k1 *= c1;
341 k1 = ROTL64(k1, 31);
342 k1 *= c2;
343 h1 ^= k1;
344 /* fallthrough */
345 }
346 ;
347
348 h1 ^= (uint64_t) len;
349 h2 ^= (uint64_t) len;
350 h1 += h2;
351 h2 += h1;
352 h1 = fmix64(h1);
353 h2 = fmix64(h2);
354 h1 += h2;
355 h2 += h1;
356 ((uint64_t*) out)[0] = h1;
357 ((uint64_t*) out)[1] = h2;
358 }
359
murmur3(const char * keyptr,size_t len)360 uint32_t murmur3(const char *keyptr, size_t len) {
361 #ifdef __x86_64__
362 uint64_t hash[2];
363 murmur3_x64_128(keyptr, len, seed_value, hash);
364 return (uint32_t) hash[1];
365 #else
366 if (len <= 16) {
367 uint32_t hash;
368 murmur3_x86_32(keyptr, len, seed_value, &hash);
369 return hash;
370 }
371 uint32_t hash[4];
372 murmur3_x86_128(keyptr, len, seed_value, hash);
373 return hash[3];
374 #endif
375 }
376
murmur3_set_seed(const uint32_t seed)377 void murmur3_set_seed(const uint32_t seed) {
378 seed_value = seed;
379 }
380