• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *******************************************************************************
3  * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each
4  * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash
5  * functions are employed.  The original cuckoo hashing algorithm was described
6  * in:
7  *
8  *   Pagh, R., F.F. Rodler (2004) Cuckoo Hashing.  Journal of Algorithms
9  *     51(2):122-144.
10  *
11  * Generalization of cuckoo hashing was discussed in:
12  *
13  *   Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical
14  *     alternative to traditional hash tables.  In Proceedings of the 7th
15  *     Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA,
16  *     January 2006.
17  *
18  * This implementation uses precisely two hash functions because that is the
19  * fewest that can work, and supporting multiple hashes is an implementation
20  * burden.  Here is a reproduction of Figure 1 from Erlingsson et al. (2006)
21  * that shows approximate expected maximum load factors for various
22  * configurations:
23  *
24  *           |         #cells/bucket         |
25  *   #hashes |   1   |   2   |   4   |   8   |
26  *   --------+-------+-------+-------+-------+
27  *         1 | 0.006 | 0.006 | 0.03  | 0.12  |
28  *         2 | 0.49  | 0.86  |>0.93< |>0.96< |
29  *         3 | 0.91  | 0.97  | 0.98  | 0.999 |
30  *         4 | 0.97  | 0.99  | 0.999 |       |
31  *
32  * The number of cells per bucket is chosen such that a bucket fits in one cache
33  * line.  So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing,
34  * respectively.
35  *
36  ******************************************************************************/
37 #define	JEMALLOC_CKH_C_
38 #include "jemalloc/internal/jemalloc_internal.h"
39 
40 /******************************************************************************/
41 /* Function prototypes for non-inline static functions. */
42 
43 static bool	ckh_grow(tsd_t *tsd, ckh_t *ckh);
44 static void	ckh_shrink(tsd_t *tsd, ckh_t *ckh);
45 
46 /******************************************************************************/
47 
48 /*
49  * Search bucket for key and return the cell number if found; SIZE_T_MAX
50  * otherwise.
51  */
52 JEMALLOC_INLINE_C size_t
ckh_bucket_search(ckh_t * ckh,size_t bucket,const void * key)53 ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
54 {
55 	ckhc_t *cell;
56 	unsigned i;
57 
58 	for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
59 		cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
60 		if (cell->key != NULL && ckh->keycomp(key, cell->key))
61 			return ((bucket << LG_CKH_BUCKET_CELLS) + i);
62 	}
63 
64 	return (SIZE_T_MAX);
65 }
66 
67 /*
68  * Search table for key and return cell number if found; SIZE_T_MAX otherwise.
69  */
70 JEMALLOC_INLINE_C size_t
ckh_isearch(ckh_t * ckh,const void * key)71 ckh_isearch(ckh_t *ckh, const void *key)
72 {
73 	size_t hashes[2], bucket, cell;
74 
75 	assert(ckh != NULL);
76 
77 	ckh->hash(key, hashes);
78 
79 	/* Search primary bucket. */
80 	bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
81 	cell = ckh_bucket_search(ckh, bucket, key);
82 	if (cell != SIZE_T_MAX)
83 		return (cell);
84 
85 	/* Search secondary bucket. */
86 	bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
87 	cell = ckh_bucket_search(ckh, bucket, key);
88 	return (cell);
89 }
90 
91 JEMALLOC_INLINE_C bool
ckh_try_bucket_insert(ckh_t * ckh,size_t bucket,const void * key,const void * data)92 ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
93     const void *data)
94 {
95 	ckhc_t *cell;
96 	unsigned offset, i;
97 
98 	/*
99 	 * Cycle through the cells in the bucket, starting at a random position.
100 	 * The randomness avoids worst-case search overhead as buckets fill up.
101 	 */
102 	prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
103 	for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
104 		cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
105 		    ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
106 		if (cell->key == NULL) {
107 			cell->key = key;
108 			cell->data = data;
109 			ckh->count++;
110 			return (false);
111 		}
112 	}
113 
114 	return (true);
115 }
116 
117 /*
118  * No space is available in bucket.  Randomly evict an item, then try to find an
119  * alternate location for that item.  Iteratively repeat this
120  * eviction/relocation procedure until either success or detection of an
121  * eviction/relocation bucket cycle.
122  */
123 JEMALLOC_INLINE_C bool
ckh_evict_reloc_insert(ckh_t * ckh,size_t argbucket,void const ** argkey,void const ** argdata)124 ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
125     void const **argdata)
126 {
127 	const void *key, *data, *tkey, *tdata;
128 	ckhc_t *cell;
129 	size_t hashes[2], bucket, tbucket;
130 	unsigned i;
131 
132 	bucket = argbucket;
133 	key = *argkey;
134 	data = *argdata;
135 	while (true) {
136 		/*
137 		 * Choose a random item within the bucket to evict.  This is
138 		 * critical to correct function, because without (eventually)
139 		 * evicting all items within a bucket during iteration, it
140 		 * would be possible to get stuck in an infinite loop if there
141 		 * were an item for which both hashes indicated the same
142 		 * bucket.
143 		 */
144 		prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
145 		cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
146 		assert(cell->key != NULL);
147 
148 		/* Swap cell->{key,data} and {key,data} (evict). */
149 		tkey = cell->key; tdata = cell->data;
150 		cell->key = key; cell->data = data;
151 		key = tkey; data = tdata;
152 
153 #ifdef CKH_COUNT
154 		ckh->nrelocs++;
155 #endif
156 
157 		/* Find the alternate bucket for the evicted item. */
158 		ckh->hash(key, hashes);
159 		tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
160 		if (tbucket == bucket) {
161 			tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
162 			    - 1);
163 			/*
164 			 * It may be that (tbucket == bucket) still, if the
165 			 * item's hashes both indicate this bucket.  However,
166 			 * we are guaranteed to eventually escape this bucket
167 			 * during iteration, assuming pseudo-random item
168 			 * selection (true randomness would make infinite
169 			 * looping a remote possibility).  The reason we can
170 			 * never get trapped forever is that there are two
171 			 * cases:
172 			 *
173 			 * 1) This bucket == argbucket, so we will quickly
174 			 *    detect an eviction cycle and terminate.
175 			 * 2) An item was evicted to this bucket from another,
176 			 *    which means that at least one item in this bucket
177 			 *    has hashes that indicate distinct buckets.
178 			 */
179 		}
180 		/* Check for a cycle. */
181 		if (tbucket == argbucket) {
182 			*argkey = key;
183 			*argdata = data;
184 			return (true);
185 		}
186 
187 		bucket = tbucket;
188 		if (!ckh_try_bucket_insert(ckh, bucket, key, data))
189 			return (false);
190 	}
191 }
192 
193 JEMALLOC_INLINE_C bool
ckh_try_insert(ckh_t * ckh,void const ** argkey,void const ** argdata)194 ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
195 {
196 	size_t hashes[2], bucket;
197 	const void *key = *argkey;
198 	const void *data = *argdata;
199 
200 	ckh->hash(key, hashes);
201 
202 	/* Try to insert in primary bucket. */
203 	bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
204 	if (!ckh_try_bucket_insert(ckh, bucket, key, data))
205 		return (false);
206 
207 	/* Try to insert in secondary bucket. */
208 	bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
209 	if (!ckh_try_bucket_insert(ckh, bucket, key, data))
210 		return (false);
211 
212 	/*
213 	 * Try to find a place for this item via iterative eviction/relocation.
214 	 */
215 	return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata));
216 }
217 
218 /*
219  * Try to rebuild the hash table from scratch by inserting all items from the
220  * old table into the new.
221  */
222 JEMALLOC_INLINE_C bool
ckh_rebuild(ckh_t * ckh,ckhc_t * aTab)223 ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
224 {
225 	size_t count, i, nins;
226 	const void *key, *data;
227 
228 	count = ckh->count;
229 	ckh->count = 0;
230 	for (i = nins = 0; nins < count; i++) {
231 		if (aTab[i].key != NULL) {
232 			key = aTab[i].key;
233 			data = aTab[i].data;
234 			if (ckh_try_insert(ckh, &key, &data)) {
235 				ckh->count = count;
236 				return (true);
237 			}
238 			nins++;
239 		}
240 	}
241 
242 	return (false);
243 }
244 
245 static bool
ckh_grow(tsd_t * tsd,ckh_t * ckh)246 ckh_grow(tsd_t *tsd, ckh_t *ckh)
247 {
248 	bool ret;
249 	ckhc_t *tab, *ttab;
250 	size_t lg_curcells;
251 	unsigned lg_prevbuckets;
252 
253 #ifdef CKH_COUNT
254 	ckh->ngrows++;
255 #endif
256 
257 	/*
258 	 * It is possible (though unlikely, given well behaved hashes) that the
259 	 * table will have to be doubled more than once in order to create a
260 	 * usable table.
261 	 */
262 	lg_prevbuckets = ckh->lg_curbuckets;
263 	lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS;
264 	while (true) {
265 		size_t usize;
266 
267 		lg_curcells++;
268 		usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
269 		if (usize == 0) {
270 			ret = true;
271 			goto label_return;
272 		}
273 		tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL,
274 		    true, NULL);
275 		if (tab == NULL) {
276 			ret = true;
277 			goto label_return;
278 		}
279 		/* Swap in new table. */
280 		ttab = ckh->tab;
281 		ckh->tab = tab;
282 		tab = ttab;
283 		ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
284 
285 		if (!ckh_rebuild(ckh, tab)) {
286 			idalloctm(tsd, tab, tcache_get(tsd, false), true);
287 			break;
288 		}
289 
290 		/* Rebuilding failed, so back out partially rebuilt table. */
291 		idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
292 		ckh->tab = tab;
293 		ckh->lg_curbuckets = lg_prevbuckets;
294 	}
295 
296 	ret = false;
297 label_return:
298 	return (ret);
299 }
300 
301 static void
ckh_shrink(tsd_t * tsd,ckh_t * ckh)302 ckh_shrink(tsd_t *tsd, ckh_t *ckh)
303 {
304 	ckhc_t *tab, *ttab;
305 	size_t lg_curcells, usize;
306 	unsigned lg_prevbuckets;
307 
308 	/*
309 	 * It is possible (though unlikely, given well behaved hashes) that the
310 	 * table rebuild will fail.
311 	 */
312 	lg_prevbuckets = ckh->lg_curbuckets;
313 	lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
314 	usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
315 	if (usize == 0)
316 		return;
317 	tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
318 	    NULL);
319 	if (tab == NULL) {
320 		/*
321 		 * An OOM error isn't worth propagating, since it doesn't
322 		 * prevent this or future operations from proceeding.
323 		 */
324 		return;
325 	}
326 	/* Swap in new table. */
327 	ttab = ckh->tab;
328 	ckh->tab = tab;
329 	tab = ttab;
330 	ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
331 
332 	if (!ckh_rebuild(ckh, tab)) {
333 		idalloctm(tsd, tab, tcache_get(tsd, false), true);
334 #ifdef CKH_COUNT
335 		ckh->nshrinks++;
336 #endif
337 		return;
338 	}
339 
340 	/* Rebuilding failed, so back out partially rebuilt table. */
341 	idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
342 	ckh->tab = tab;
343 	ckh->lg_curbuckets = lg_prevbuckets;
344 #ifdef CKH_COUNT
345 	ckh->nshrinkfails++;
346 #endif
347 }
348 
349 bool
ckh_new(tsd_t * tsd,ckh_t * ckh,size_t minitems,ckh_hash_t * hash,ckh_keycomp_t * keycomp)350 ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
351     ckh_keycomp_t *keycomp)
352 {
353 	bool ret;
354 	size_t mincells, usize;
355 	unsigned lg_mincells;
356 
357 	assert(minitems > 0);
358 	assert(hash != NULL);
359 	assert(keycomp != NULL);
360 
361 #ifdef CKH_COUNT
362 	ckh->ngrows = 0;
363 	ckh->nshrinks = 0;
364 	ckh->nshrinkfails = 0;
365 	ckh->ninserts = 0;
366 	ckh->nrelocs = 0;
367 #endif
368 	ckh->prng_state = 42; /* Value doesn't really matter. */
369 	ckh->count = 0;
370 
371 	/*
372 	 * Find the minimum power of 2 that is large enough to fit minitems
373 	 * entries.  We are using (2+,2) cuckoo hashing, which has an expected
374 	 * maximum load factor of at least ~0.86, so 0.75 is a conservative load
375 	 * factor that will typically allow mincells items to fit without ever
376 	 * growing the table.
377 	 */
378 	assert(LG_CKH_BUCKET_CELLS > 0);
379 	mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
380 	for (lg_mincells = LG_CKH_BUCKET_CELLS;
381 	    (ZU(1) << lg_mincells) < mincells;
382 	    lg_mincells++)
383 		; /* Do nothing. */
384 	ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
385 	ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
386 	ckh->hash = hash;
387 	ckh->keycomp = keycomp;
388 
389 	usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
390 	if (usize == 0) {
391 		ret = true;
392 		goto label_return;
393 	}
394 	ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
395 	    NULL);
396 	if (ckh->tab == NULL) {
397 		ret = true;
398 		goto label_return;
399 	}
400 
401 	ret = false;
402 label_return:
403 	return (ret);
404 }
405 
406 void
ckh_delete(tsd_t * tsd,ckh_t * ckh)407 ckh_delete(tsd_t *tsd, ckh_t *ckh)
408 {
409 
410 	assert(ckh != NULL);
411 
412 #ifdef CKH_VERBOSE
413 	malloc_printf(
414 	    "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
415 	    " nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
416 	    " nrelocs: %"PRIu64"\n", __func__, ckh,
417 	    (unsigned long long)ckh->ngrows,
418 	    (unsigned long long)ckh->nshrinks,
419 	    (unsigned long long)ckh->nshrinkfails,
420 	    (unsigned long long)ckh->ninserts,
421 	    (unsigned long long)ckh->nrelocs);
422 #endif
423 
424 	idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
425 	if (config_debug)
426 		memset(ckh, 0x5a, sizeof(ckh_t));
427 }
428 
429 size_t
ckh_count(ckh_t * ckh)430 ckh_count(ckh_t *ckh)
431 {
432 
433 	assert(ckh != NULL);
434 
435 	return (ckh->count);
436 }
437 
438 bool
ckh_iter(ckh_t * ckh,size_t * tabind,void ** key,void ** data)439 ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
440 {
441 	size_t i, ncells;
442 
443 	for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
444 	    LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
445 		if (ckh->tab[i].key != NULL) {
446 			if (key != NULL)
447 				*key = (void *)ckh->tab[i].key;
448 			if (data != NULL)
449 				*data = (void *)ckh->tab[i].data;
450 			*tabind = i + 1;
451 			return (false);
452 		}
453 	}
454 
455 	return (true);
456 }
457 
458 bool
ckh_insert(tsd_t * tsd,ckh_t * ckh,const void * key,const void * data)459 ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
460 {
461 	bool ret;
462 
463 	assert(ckh != NULL);
464 	assert(ckh_search(ckh, key, NULL, NULL));
465 
466 #ifdef CKH_COUNT
467 	ckh->ninserts++;
468 #endif
469 
470 	while (ckh_try_insert(ckh, &key, &data)) {
471 		if (ckh_grow(tsd, ckh)) {
472 			ret = true;
473 			goto label_return;
474 		}
475 	}
476 
477 	ret = false;
478 label_return:
479 	return (ret);
480 }
481 
482 bool
ckh_remove(tsd_t * tsd,ckh_t * ckh,const void * searchkey,void ** key,void ** data)483 ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
484     void **data)
485 {
486 	size_t cell;
487 
488 	assert(ckh != NULL);
489 
490 	cell = ckh_isearch(ckh, searchkey);
491 	if (cell != SIZE_T_MAX) {
492 		if (key != NULL)
493 			*key = (void *)ckh->tab[cell].key;
494 		if (data != NULL)
495 			*data = (void *)ckh->tab[cell].data;
496 		ckh->tab[cell].key = NULL;
497 		ckh->tab[cell].data = NULL; /* Not necessary. */
498 
499 		ckh->count--;
500 		/* Try to halve the table if it is less than 1/4 full. */
501 		if (ckh->count < (ZU(1) << (ckh->lg_curbuckets
502 		    + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
503 		    > ckh->lg_minbuckets) {
504 			/* Ignore error due to OOM. */
505 			ckh_shrink(tsd, ckh);
506 		}
507 
508 		return (false);
509 	}
510 
511 	return (true);
512 }
513 
514 bool
ckh_search(ckh_t * ckh,const void * searchkey,void ** key,void ** data)515 ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
516 {
517 	size_t cell;
518 
519 	assert(ckh != NULL);
520 
521 	cell = ckh_isearch(ckh, searchkey);
522 	if (cell != SIZE_T_MAX) {
523 		if (key != NULL)
524 			*key = (void *)ckh->tab[cell].key;
525 		if (data != NULL)
526 			*data = (void *)ckh->tab[cell].data;
527 		return (false);
528 	}
529 
530 	return (true);
531 }
532 
533 void
ckh_string_hash(const void * key,size_t r_hash[2])534 ckh_string_hash(const void *key, size_t r_hash[2])
535 {
536 
537 	hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
538 }
539 
540 bool
ckh_string_keycomp(const void * k1,const void * k2)541 ckh_string_keycomp(const void *k1, const void *k2)
542 {
543 
544     assert(k1 != NULL);
545     assert(k2 != NULL);
546 
547     return (strcmp((char *)k1, (char *)k2) ? false : true);
548 }
549 
550 void
ckh_pointer_hash(const void * key,size_t r_hash[2])551 ckh_pointer_hash(const void *key, size_t r_hash[2])
552 {
553 	union {
554 		const void	*v;
555 		size_t		i;
556 	} u;
557 
558 	assert(sizeof(u.v) == sizeof(u.i));
559 	u.v = key;
560 	hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash);
561 }
562 
563 bool
ckh_pointer_keycomp(const void * k1,const void * k2)564 ckh_pointer_keycomp(const void *k1, const void *k2)
565 {
566 
567 	return ((k1 == k2) ? true : false);
568 }
569