• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************************************
2  * MurmurHash3 was written by Austin Appleby, and is placed in the
3  * public domain. The author hereby disclaims copyright to this source
4  * code.
5  *************************************************************************************************/
6 
7 #include "murmur3.h"
8 #include <string.h>
9 
10 #if !defined(__x86_64__) || defined(IW_TESTS)
rotl32(uint32_t x,int8_t r)11 IW_INLINE uint32_t rotl32(uint32_t x, int8_t r) {
12   return (x << r) | (x >> (32 - r));
13 }
14 #endif
15 
rotl64(uint64_t x,int8_t r)16 IW_INLINE uint64_t rotl64(uint64_t x, int8_t r) {
17   return (x << r) | (x >> (64 - r));
18 }
19 
20 #define ROTL32(x,y) rotl32(x,y)
21 #define ROTL64(x,y) rotl64(x,y)
22 
23 static uint32_t seed_value = 0x2fa1bca;
24 
25 // Finalization mix - force all bits of a hash block to avalanche
26 #if !defined(__x86_64__) || defined(IW_TESTS)
fmix32(uint32_t h)27 IW_INLINE uint32_t fmix32(uint32_t h) {
28   h ^= h >> 16;
29   h *= 0x85ebca6b;
30   h ^= h >> 13;
31   h *= 0xc2b2ae35;
32   h ^= h >> 16;
33   return h;
34 }
35 #endif
36 
fmix64(uint64_t k)37 IW_INLINE uint64_t fmix64(uint64_t k) {
38   k ^= k >> 33;
39   k *= 0xff51afd7ed558ccdLLU;
40   k ^= k >> 33;
41   k *= 0xc4ceb9fe1a85ec53LLU;
42   k ^= k >> 33;
43   return k;
44 }
45 
46 #if !defined(__x86_64__) || defined(IW_TESTS)
47 
murmur3_x86_32(const void * key,size_t len,uint32_t seed,void * out)48 void murmur3_x86_32(const void *key, size_t len, uint32_t seed, void *out) {
49   const uint8_t *data = (const uint8_t *)key;
50   const size_t nblocks = len / 4;
51   size_t i;
52   uint32_t h1 = seed;
53   uint32_t c1 = 0xcc9e2d51;
54   uint32_t c2 = 0x1b873593;
55 
56   const uint32_t *blocks = (const uint32_t *)(data + nblocks * 4);
57   for (i = -nblocks; i; i++) {
58     uint32_t k1;
59 
60     memcpy(&k1, blocks + i, sizeof(k1));
61 
62     k1 *= c1;
63     k1 = ROTL32(k1, 15);
64     k1 *= c2;
65     h1 ^= k1;
66     h1 = ROTL32(h1, 13);
67     h1 = h1 * 5 + 0xe6546b64;
68   }
69 
70   const uint8_t *tail = (const uint8_t *)(data + nblocks * 4);
71   uint32_t k1 = 0;
72   switch (len & 3) {
73     case 3:
74       k1 ^= (uint32_t)tail[2] << 16;
75     /* fallthrough */
76     case 2:
77       k1 ^= (uint32_t)tail[1] << 8;
78     /* fallthrough */
79     case 1:
80       k1 ^= (uint32_t)tail[0];
81       k1 *= c1;
82       k1 = ROTL32(k1, 15);
83       k1 *= c2;
84       h1 ^= k1;
85       /* fallthrough */
86   };
87 
88   h1 ^= (uint32_t)len;
89   h1 = fmix32(h1);
90   *(uint32_t *) out = h1;
91 }
92 
murmur3_x86_128(const void * key,const size_t len,uint32_t seed,void * out)93 void murmur3_x86_128(const void *key, const size_t len, uint32_t seed, void *out) {
94   size_t i;
95   const uint8_t *data = (const uint8_t *)key;
96   const size_t nblocks = len / 16;
97 
98   uint32_t h1 = seed;
99   uint32_t h2 = seed;
100   uint32_t h3 = seed;
101   uint32_t h4 = seed;
102 
103   const uint32_t c1 = 0x239b961b;
104   const uint32_t c2 = 0xab0e9789;
105   const uint32_t c3 = 0x38b34ae5;
106   const uint32_t c4 = 0xa1e38b93;
107 
108   const uint32_t *blocks = (const uint32_t *)(data + nblocks * 16);
109 
110   for (i = -nblocks; i; i++) {
111     uint32_t k1, k2, k3, k4;
112 
113     memcpy(&k1, blocks + i * 4 + 0, sizeof(k1));
114     memcpy(&k2, blocks + i * 4 + 1, sizeof(k2));
115     memcpy(&k3, blocks + i * 4 + 2, sizeof(k3));
116     memcpy(&k4, blocks + i * 4 + 3, sizeof(k4));
117 
118     k1 *= c1;
119     k1 = ROTL32(k1, 15);
120     k1 *= c2;
121     h1 ^= k1;
122     h1 = ROTL32(h1, 19);
123     h1 += h2;
124     h1 = h1 * 5 + 0x561ccd1b;
125     k2 *= c2;
126     k2 = ROTL32(k2, 16);
127     k2 *= c3;
128     h2 ^= k2;
129     h2 = ROTL32(h2, 17);
130     h2 += h3;
131     h2 = h2 * 5 + 0x0bcaa747;
132     k3 *= c3;
133     k3 = ROTL32(k3, 17);
134     k3 *= c4;
135     h3 ^= k3;
136     h3 = ROTL32(h3, 15);
137     h3 += h4;
138     h3 = h3 * 5 + 0x96cd1c35;
139     k4 *= c4;
140     k4 = ROTL32(k4, 18);
141     k4 *= c1;
142     h4 ^= k4;
143     h4 = ROTL32(h4, 13);
144     h4 += h1;
145     h4 = h4 * 5 + 0x32ac3b17;
146   }
147 
148   const uint8_t *tail = (const uint8_t *)(data + nblocks * 16);
149 
150   uint32_t k1 = 0;
151   uint32_t k2 = 0;
152   uint32_t k3 = 0;
153   uint32_t k4 = 0;
154 
155   switch (len & 15) {
156     case 15:
157       k4 ^= (uint32_t)tail[14] << 16;
158     /* fallthrough */
159     case 14:
160       k4 ^= (uint32_t)tail[13] << 8;
161     /* fallthrough */
162     case 13:
163       k4 ^= (uint32_t)tail[12] << 0;
164       k4 *= c4;
165       k4 = ROTL32(k4, 18);
166       k4 *= c1;
167       h4 ^= k4;
168     /* fallthrough */
169     case 12:
170       k3 ^= (uint32_t)tail[11] << 24;
171     /* fallthrough */
172     case 11:
173       k3 ^= (uint32_t)tail[10] << 16;
174     /* fallthrough */
175     case 10:
176       k3 ^= (uint32_t)tail[ 9] << 8;
177     /* fallthrough */
178     case  9:
179       k3 ^= (uint32_t)tail[ 8] << 0;
180       k3 *= c3;
181       k3 = ROTL32(k3, 17);
182       k3 *= c4;
183       h3 ^= k3;
184     /* fallthrough */
185     case  8:
186       k2 ^= (uint32_t)tail[ 7] << 24;
187     /* fallthrough */
188     case  7:
189       k2 ^= (uint32_t)tail[ 6] << 16;
190     /* fallthrough */
191     case  6:
192       k2 ^= (uint32_t)tail[ 5] << 8;
193     /* fallthrough */
194     case  5:
195       k2 ^= (uint32_t)tail[ 4] << 0;
196       k2 *= c2;
197       k2 = ROTL32(k2, 16);
198       k2 *= c3;
199       h2 ^= k2;
200     /* fallthrough */
201     case  4:
202       k1 ^= (uint32_t)tail[ 3] << 24;
203     /* fallthrough */
204     case  3:
205       k1 ^= (uint32_t)tail[ 2] << 16;
206     /* fallthrough */
207     case  2:
208       k1 ^= (uint32_t)tail[ 1] << 8;
209     /* fallthrough */
210     case  1:
211       k1 ^= (uint32_t)tail[ 0] << 0;
212       k1 *= c1;
213       k1 = ROTL32(k1, 15);
214       k1 *= c2;
215       h1 ^= k1;
216       /* fallthrough */
217   }
218 
219   h1 ^= (uint32_t)len;
220   h2 ^= (uint32_t)len;
221   h3 ^= (uint32_t)len;
222   h4 ^= (uint32_t)len;
223 
224   h1 += h2;
225   h1 += h3;
226   h1 += h4;
227   h2 += h1;
228   h3 += h1;
229   h4 += h1;
230 
231   h1 = fmix32(h1);
232   h2 = fmix32(h2);
233   h3 = fmix32(h3);
234   h4 = fmix32(h4);
235 
236   h1 += h2;
237   h1 += h3;
238   h1 += h4;
239   h2 += h1;
240   h3 += h1;
241   h4 += h1;
242 
243   ((uint32_t *)out)[0] = h1;
244   ((uint32_t *)out)[1] = h2;
245   ((uint32_t *)out)[2] = h3;
246   ((uint32_t *)out)[3] = h4;
247 }
248 #endif
249 
murmur3_x64_128(const void * key,const size_t len,const uint32_t seed,void * out)250 void murmur3_x64_128(const void *key, const size_t len, const uint32_t seed, void *out) {
251   const uint8_t *data = (const uint8_t *)key;
252   const size_t nblocks = len / 16;
253   size_t i;
254   uint64_t h1 = seed;
255   uint64_t h2 = seed;
256   uint64_t c1 = 0x87c37b91114253d5LLU;
257   uint64_t c2 = 0x4cf5ad432745937fLLU;
258 
259   const uint64_t *blocks = (const uint64_t *)(data);
260   for (i = 0; i < nblocks; i++) {
261     uint64_t k1, k2;
262 
263     memcpy(&k1, blocks + i * 2 + 0, sizeof(k1));
264     memcpy(&k2, blocks + i * 2 + 1, sizeof(k2));
265 
266     k1 *= c1;
267     k1 = ROTL64(k1, 31);
268     k1 *= c2;
269     h1 ^= k1;
270     h1 = ROTL64(h1, 27);
271     h1 += h2;
272     h1 = h1 * 5 + 0x52dce729;
273     k2 *= c2;
274     k2 = ROTL64(k2, 33);
275     k2 *= c1;
276     h2 ^= k2;
277     h2 = ROTL64(h2, 31);
278     h2 += h1;
279     h2 = h2 * 5 + 0x38495ab5;
280   }
281 
282   const uint8_t *tail = (data + nblocks * 16);
283   uint64_t k1 = 0;
284   uint64_t k2 = 0;
285   switch (len & 15) {
286     case 15:
287       k2 ^= (uint64_t)(tail[14]) << 48;
288     /* fallthrough */
289     case 14:
290       k2 ^= (uint64_t)(tail[13]) << 40;
291     /* fallthrough */
292     case 13:
293       k2 ^= (uint64_t)(tail[12]) << 32;
294     /* fallthrough */
295     case 12:
296       k2 ^= (uint64_t)(tail[11]) << 24;
297     /* fallthrough */
298     case 11:
299       k2 ^= (uint64_t)(tail[10]) << 16;
300     /* fallthrough */
301     case 10:
302       k2 ^= (uint64_t)(tail[9]) << 8;
303     /* fallthrough */
304     case 9:
305       k2 ^= (uint64_t)(tail[8]) << 0;
306       k2 *= c2;
307       k2 = ROTL64(k2, 33);
308       k2 *= c1;
309       h2 ^= k2;
310     /* fallthrough */
311     case 8:
312       k1 ^= (uint64_t)(tail[7]) << 56;
313     /* fallthrough */
314     case 7:
315       k1 ^= (uint64_t)(tail[6]) << 48;
316     /* fallthrough */
317     case 6:
318       k1 ^= (uint64_t)(tail[5]) << 40;
319     /* fallthrough */
320     case 5:
321       k1 ^= (uint64_t)(tail[4]) << 32;
322     /* fallthrough */
323     case 4:
324       k1 ^= (uint64_t)(tail[3]) << 24;
325     /* fallthrough */
326     case 3:
327       k1 ^= (uint64_t)(tail[2]) << 16;
328     /* fallthrough */
329     case 2:
330       k1 ^= (uint64_t)(tail[1]) << 8;
331     /* fallthrough */
332     case 1:
333       k1 ^= (uint64_t)(tail[0]) << 0;
334       k1 *= c1;
335       k1 = ROTL64(k1, 31);
336       k1 *= c2;
337       h1 ^= k1;
338       /* fallthrough */
339   };
340 
341   h1 ^= (uint64_t)len;
342   h2 ^= (uint64_t)len;
343   h1 += h2;
344   h2 += h1;
345   h1 = fmix64(h1);
346   h2 = fmix64(h2);
347   h1 += h2;
348   h2 += h1;
349   ((uint64_t *) out)[0] = h1;
350   ((uint64_t *) out)[1] = h2;
351 }
352 
murmur3(const char * keyptr,size_t len)353 uint32_t murmur3(const char *keyptr, size_t len) {
354 #ifdef __x86_64__
355   uint64_t hash[2];
356   murmur3_x64_128(keyptr, len, seed_value, hash);
357   return (uint32_t) hash[1];
358 #else
359   if (len <= 16) {
360     uint32_t hash;
361     murmur3_x86_32(keyptr, len, seed_value, &hash);
362     return hash;
363   }
364   uint32_t hash[4];
365   murmur3_x86_128(keyptr, len, seed_value, hash);
366   return hash[3];
367 #endif
368 }
369 
murmur3_set_seed(const uint32_t seed)370 void murmur3_set_seed(const uint32_t seed) {
371   seed_value = seed;
372 }
373