• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  *
8  * Code partially derived from nft_hash
9  * Rewritten with rehash code from br_multicast plus single list
10  * pointer as suggested by Josh Triplett
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/mm.h>
25 #include <linux/jhash.h>
26 #include <linux/random.h>
27 #include <linux/err.h>
28 #include <linux/export.h>
29 
30 #define HASH_DEFAULT_SIZE	64UL
31 #define HASH_MIN_SIZE		4U
32 #define BUCKET_LOCKS_PER_CPU   128UL
33 
head_hashfn(struct rhashtable * ht,const struct bucket_table * tbl,const struct rhash_head * he)34 static u32 head_hashfn(struct rhashtable *ht,
35 		       const struct bucket_table *tbl,
36 		       const struct rhash_head *he)
37 {
38 	return rht_head_hashfn(ht, tbl, he, ht->p);
39 }
40 
41 #ifdef CONFIG_PROVE_LOCKING
42 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
43 
lockdep_rht_mutex_is_held(struct rhashtable * ht)44 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
45 {
46 	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
47 }
48 
lockdep_rht_bucket_is_held(const struct bucket_table * tbl,u32 hash)49 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
50 {
51 	spinlock_t *lock = rht_bucket_lock(tbl, hash);
52 
53 	return (debug_locks) ? lockdep_is_held(lock) : 1;
54 }
55 #else
56 #define ASSERT_RHT_MUTEX(HT)
57 #endif
58 
59 
alloc_bucket_locks(struct rhashtable * ht,struct bucket_table * tbl,gfp_t gfp)60 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
61 			      gfp_t gfp)
62 {
63 	unsigned int i, size;
64 #if defined(CONFIG_PROVE_LOCKING)
65 	unsigned int nr_pcpus = 2;
66 #else
67 	unsigned int nr_pcpus = num_possible_cpus();
68 #endif
69 
70 	nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
71 	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
72 
73 	/* Never allocate more than 0.5 locks per bucket */
74 	size = min_t(unsigned int, size, tbl->size >> 1);
75 
76 	if (sizeof(spinlock_t) != 0) {
77 #ifdef CONFIG_NUMA
78 		if (size * sizeof(spinlock_t) > PAGE_SIZE &&
79 		    gfp == GFP_KERNEL)
80 			tbl->locks = vmalloc(size * sizeof(spinlock_t));
81 		else
82 #endif
83 		tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
84 					   gfp);
85 		if (!tbl->locks)
86 			return -ENOMEM;
87 		for (i = 0; i < size; i++)
88 			spin_lock_init(&tbl->locks[i]);
89 	}
90 	tbl->locks_mask = size - 1;
91 
92 	return 0;
93 }
94 
bucket_table_free(const struct bucket_table * tbl)95 static void bucket_table_free(const struct bucket_table *tbl)
96 {
97 	if (tbl)
98 		kvfree(tbl->locks);
99 
100 	kvfree(tbl);
101 }
102 
bucket_table_free_rcu(struct rcu_head * head)103 static void bucket_table_free_rcu(struct rcu_head *head)
104 {
105 	bucket_table_free(container_of(head, struct bucket_table, rcu));
106 }
107 
bucket_table_alloc(struct rhashtable * ht,size_t nbuckets,gfp_t gfp)108 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
109 					       size_t nbuckets,
110 					       gfp_t gfp)
111 {
112 	struct bucket_table *tbl = NULL;
113 	size_t size;
114 	int i;
115 
116 	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
117 	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
118 	    gfp != GFP_KERNEL)
119 		tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
120 	if (tbl == NULL && gfp == GFP_KERNEL)
121 		tbl = vzalloc(size);
122 	if (tbl == NULL)
123 		return NULL;
124 
125 	tbl->size = nbuckets;
126 
127 	if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
128 		bucket_table_free(tbl);
129 		return NULL;
130 	}
131 
132 	INIT_LIST_HEAD(&tbl->walkers);
133 
134 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
135 
136 	for (i = 0; i < nbuckets; i++)
137 		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
138 
139 	return tbl;
140 }
141 
rhashtable_last_table(struct rhashtable * ht,struct bucket_table * tbl)142 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
143 						  struct bucket_table *tbl)
144 {
145 	struct bucket_table *new_tbl;
146 
147 	do {
148 		new_tbl = tbl;
149 		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
150 	} while (tbl);
151 
152 	return new_tbl;
153 }
154 
rhashtable_rehash_one(struct rhashtable * ht,unsigned int old_hash)155 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
156 {
157 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
158 	struct bucket_table *new_tbl = rhashtable_last_table(ht,
159 		rht_dereference_rcu(old_tbl->future_tbl, ht));
160 	struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
161 	int err = -ENOENT;
162 	struct rhash_head *head, *next, *entry;
163 	spinlock_t *new_bucket_lock;
164 	unsigned int new_hash;
165 
166 	rht_for_each(entry, old_tbl, old_hash) {
167 		err = 0;
168 		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
169 
170 		if (rht_is_a_nulls(next))
171 			break;
172 
173 		pprev = &entry->next;
174 	}
175 
176 	if (err)
177 		goto out;
178 
179 	new_hash = head_hashfn(ht, new_tbl, entry);
180 
181 	new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
182 
183 	spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
184 	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
185 				      new_tbl, new_hash);
186 
187 	RCU_INIT_POINTER(entry->next, head);
188 
189 	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
190 	spin_unlock(new_bucket_lock);
191 
192 	rcu_assign_pointer(*pprev, next);
193 
194 out:
195 	return err;
196 }
197 
rhashtable_rehash_chain(struct rhashtable * ht,unsigned int old_hash)198 static void rhashtable_rehash_chain(struct rhashtable *ht,
199 				    unsigned int old_hash)
200 {
201 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
202 	spinlock_t *old_bucket_lock;
203 
204 	old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
205 
206 	spin_lock_bh(old_bucket_lock);
207 	while (!rhashtable_rehash_one(ht, old_hash))
208 		;
209 	old_tbl->rehash++;
210 	spin_unlock_bh(old_bucket_lock);
211 }
212 
rhashtable_rehash_attach(struct rhashtable * ht,struct bucket_table * old_tbl,struct bucket_table * new_tbl)213 static int rhashtable_rehash_attach(struct rhashtable *ht,
214 				    struct bucket_table *old_tbl,
215 				    struct bucket_table *new_tbl)
216 {
217 	/* Protect future_tbl using the first bucket lock. */
218 	spin_lock_bh(old_tbl->locks);
219 
220 	/* Did somebody beat us to it? */
221 	if (rcu_access_pointer(old_tbl->future_tbl)) {
222 		spin_unlock_bh(old_tbl->locks);
223 		return -EEXIST;
224 	}
225 
226 	/* Make insertions go into the new, empty table right away. Deletions
227 	 * and lookups will be attempted in both tables until we synchronize.
228 	 */
229 	rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
230 
231 	/* Ensure the new table is visible to readers. */
232 	smp_wmb();
233 
234 	spin_unlock_bh(old_tbl->locks);
235 
236 	return 0;
237 }
238 
rhashtable_rehash_table(struct rhashtable * ht)239 static int rhashtable_rehash_table(struct rhashtable *ht)
240 {
241 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
242 	struct bucket_table *new_tbl;
243 	struct rhashtable_walker *walker;
244 	unsigned int old_hash;
245 
246 	new_tbl = rht_dereference(old_tbl->future_tbl, ht);
247 	if (!new_tbl)
248 		return 0;
249 
250 	for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
251 		rhashtable_rehash_chain(ht, old_hash);
252 
253 	/* Publish the new table pointer. */
254 	rcu_assign_pointer(ht->tbl, new_tbl);
255 
256 	spin_lock(&ht->lock);
257 	list_for_each_entry(walker, &old_tbl->walkers, list)
258 		walker->tbl = NULL;
259 	spin_unlock(&ht->lock);
260 
261 	/* Wait for readers. All new readers will see the new
262 	 * table, and thus no references to the old table will
263 	 * remain.
264 	 */
265 	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
266 
267 	return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
268 }
269 
270 /**
271  * rhashtable_expand - Expand hash table while allowing concurrent lookups
272  * @ht:		the hash table to expand
273  *
274  * A secondary bucket array is allocated and the hash entries are migrated.
275  *
276  * This function may only be called in a context where it is safe to call
277  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
278  *
279  * The caller must ensure that no concurrent resizing occurs by holding
280  * ht->mutex.
281  *
282  * It is valid to have concurrent insertions and deletions protected by per
283  * bucket locks or concurrent RCU protected lookups and traversals.
284  */
rhashtable_expand(struct rhashtable * ht)285 static int rhashtable_expand(struct rhashtable *ht)
286 {
287 	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
288 	int err;
289 
290 	ASSERT_RHT_MUTEX(ht);
291 
292 	old_tbl = rhashtable_last_table(ht, old_tbl);
293 
294 	new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
295 	if (new_tbl == NULL)
296 		return -ENOMEM;
297 
298 	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
299 	if (err)
300 		bucket_table_free(new_tbl);
301 
302 	return err;
303 }
304 
305 /**
306  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
307  * @ht:		the hash table to shrink
308  *
309  * This function shrinks the hash table to fit, i.e., the smallest
310  * size would not cause it to expand right away automatically.
311  *
312  * The caller must ensure that no concurrent resizing occurs by holding
313  * ht->mutex.
314  *
315  * The caller must ensure that no concurrent table mutations take place.
316  * It is however valid to have concurrent lookups if they are RCU protected.
317  *
318  * It is valid to have concurrent insertions and deletions protected by per
319  * bucket locks or concurrent RCU protected lookups and traversals.
320  */
rhashtable_shrink(struct rhashtable * ht)321 static int rhashtable_shrink(struct rhashtable *ht)
322 {
323 	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
324 	unsigned int size;
325 	int err;
326 
327 	ASSERT_RHT_MUTEX(ht);
328 
329 	size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
330 	if (size < ht->p.min_size)
331 		size = ht->p.min_size;
332 
333 	if (old_tbl->size <= size)
334 		return 0;
335 
336 	if (rht_dereference(old_tbl->future_tbl, ht))
337 		return -EEXIST;
338 
339 	new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
340 	if (new_tbl == NULL)
341 		return -ENOMEM;
342 
343 	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
344 	if (err)
345 		bucket_table_free(new_tbl);
346 
347 	return err;
348 }
349 
rht_deferred_worker(struct work_struct * work)350 static void rht_deferred_worker(struct work_struct *work)
351 {
352 	struct rhashtable *ht;
353 	struct bucket_table *tbl;
354 	int err = 0;
355 
356 	ht = container_of(work, struct rhashtable, run_work);
357 	mutex_lock(&ht->mutex);
358 
359 	tbl = rht_dereference(ht->tbl, ht);
360 	tbl = rhashtable_last_table(ht, tbl);
361 
362 	if (rht_grow_above_75(ht, tbl))
363 		rhashtable_expand(ht);
364 	else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
365 		rhashtable_shrink(ht);
366 
367 	err = rhashtable_rehash_table(ht);
368 
369 	mutex_unlock(&ht->mutex);
370 
371 	if (err)
372 		schedule_work(&ht->run_work);
373 }
374 
rhashtable_check_elasticity(struct rhashtable * ht,struct bucket_table * tbl,unsigned int hash)375 static bool rhashtable_check_elasticity(struct rhashtable *ht,
376 					struct bucket_table *tbl,
377 					unsigned int hash)
378 {
379 	unsigned int elasticity = ht->elasticity;
380 	struct rhash_head *head;
381 
382 	rht_for_each(head, tbl, hash)
383 		if (!--elasticity)
384 			return true;
385 
386 	return false;
387 }
388 
rhashtable_insert_rehash(struct rhashtable * ht,struct bucket_table * tbl)389 int rhashtable_insert_rehash(struct rhashtable *ht,
390 			     struct bucket_table *tbl)
391 {
392 	struct bucket_table *old_tbl;
393 	struct bucket_table *new_tbl;
394 	unsigned int size;
395 	int err;
396 
397 	old_tbl = rht_dereference_rcu(ht->tbl, ht);
398 
399 	size = tbl->size;
400 
401 	err = -EBUSY;
402 
403 	if (rht_grow_above_75(ht, tbl))
404 		size *= 2;
405 	/* Do not schedule more than one rehash */
406 	else if (old_tbl != tbl)
407 		goto fail;
408 
409 	err = -ENOMEM;
410 
411 	new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
412 	if (new_tbl == NULL)
413 		goto fail;
414 
415 	err = rhashtable_rehash_attach(ht, tbl, new_tbl);
416 	if (err) {
417 		bucket_table_free(new_tbl);
418 		if (err == -EEXIST)
419 			err = 0;
420 	} else
421 		schedule_work(&ht->run_work);
422 
423 	return err;
424 
425 fail:
426 	/* Do not fail the insert if someone else did a rehash. */
427 	if (likely(rcu_dereference_raw(tbl->future_tbl)))
428 		return 0;
429 
430 	/* Schedule async rehash to retry allocation in process context. */
431 	if (err == -ENOMEM)
432 		schedule_work(&ht->run_work);
433 
434 	return err;
435 }
436 
rhashtable_insert_slow(struct rhashtable * ht,const void * key,struct rhash_head * obj,struct bucket_table * tbl)437 struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
438 					    const void *key,
439 					    struct rhash_head *obj,
440 					    struct bucket_table *tbl)
441 {
442 	struct rhash_head *head;
443 	unsigned int hash;
444 	int err;
445 
446 	tbl = rhashtable_last_table(ht, tbl);
447 	hash = head_hashfn(ht, tbl, obj);
448 	spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
449 
450 	err = -EEXIST;
451 	if (key && rhashtable_lookup_fast(ht, key, ht->p))
452 		goto exit;
453 
454 	err = -E2BIG;
455 	if (unlikely(rht_grow_above_max(ht, tbl)))
456 		goto exit;
457 
458 	err = -EAGAIN;
459 	if (rhashtable_check_elasticity(ht, tbl, hash) ||
460 	    rht_grow_above_100(ht, tbl))
461 		goto exit;
462 
463 	err = 0;
464 
465 	head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
466 
467 	RCU_INIT_POINTER(obj->next, head);
468 
469 	rcu_assign_pointer(tbl->buckets[hash], obj);
470 
471 	atomic_inc(&ht->nelems);
472 
473 exit:
474 	spin_unlock(rht_bucket_lock(tbl, hash));
475 
476 	if (err == 0)
477 		return NULL;
478 	else if (err == -EAGAIN)
479 		return tbl;
480 	else
481 		return ERR_PTR(err);
482 }
483 
484 /**
485  * rhashtable_walk_init - Initialise an iterator
486  * @ht:		Table to walk over
487  * @iter:	Hash table Iterator
488  *
489  * This function prepares a hash table walk.
490  *
491  * Note that if you restart a walk after rhashtable_walk_stop you
492  * may see the same object twice.  Also, you may miss objects if
493  * there are removals in between rhashtable_walk_stop and the next
494  * call to rhashtable_walk_start.
495  *
496  * For a completely stable walk you should construct your own data
497  * structure outside the hash table.
498  *
499  * This function may sleep so you must not call it from interrupt
500  * context or with spin locks held.
501  *
502  * You must call rhashtable_walk_exit if this function returns
503  * successfully.
504  */
rhashtable_walk_init(struct rhashtable * ht,struct rhashtable_iter * iter)505 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
506 {
507 	iter->ht = ht;
508 	iter->p = NULL;
509 	iter->slot = 0;
510 	iter->skip = 0;
511 
512 	iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
513 	if (!iter->walker)
514 		return -ENOMEM;
515 
516 	spin_lock(&ht->lock);
517 	iter->walker->tbl =
518 		rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
519 	list_add(&iter->walker->list, &iter->walker->tbl->walkers);
520 	spin_unlock(&ht->lock);
521 
522 	return 0;
523 }
524 
525 /**
526  * rhashtable_walk_exit - Free an iterator
527  * @iter:	Hash table Iterator
528  *
529  * This function frees resources allocated by rhashtable_walk_init.
530  */
rhashtable_walk_exit(struct rhashtable_iter * iter)531 void rhashtable_walk_exit(struct rhashtable_iter *iter)
532 {
533 	spin_lock(&iter->ht->lock);
534 	if (iter->walker->tbl)
535 		list_del(&iter->walker->list);
536 	spin_unlock(&iter->ht->lock);
537 	kfree(iter->walker);
538 }
539 
540 /**
541  * rhashtable_walk_start - Start a hash table walk
542  * @iter:	Hash table iterator
543  *
544  * Start a hash table walk.  Note that we take the RCU lock in all
545  * cases including when we return an error.  So you must always call
546  * rhashtable_walk_stop to clean up.
547  *
548  * Returns zero if successful.
549  *
550  * Returns -EAGAIN if resize event occured.  Note that the iterator
551  * will rewind back to the beginning and you may use it immediately
552  * by calling rhashtable_walk_next.
553  */
rhashtable_walk_start(struct rhashtable_iter * iter)554 int rhashtable_walk_start(struct rhashtable_iter *iter)
555 	__acquires(RCU)
556 {
557 	struct rhashtable *ht = iter->ht;
558 
559 	rcu_read_lock();
560 
561 	spin_lock(&ht->lock);
562 	if (iter->walker->tbl)
563 		list_del(&iter->walker->list);
564 	spin_unlock(&ht->lock);
565 
566 	if (!iter->walker->tbl) {
567 		iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
568 		return -EAGAIN;
569 	}
570 
571 	return 0;
572 }
573 
574 /**
575  * rhashtable_walk_next - Return the next object and advance the iterator
576  * @iter:	Hash table iterator
577  *
578  * Note that you must call rhashtable_walk_stop when you are finished
579  * with the walk.
580  *
581  * Returns the next object or NULL when the end of the table is reached.
582  *
583  * Returns -EAGAIN if resize event occured.  Note that the iterator
584  * will rewind back to the beginning and you may continue to use it.
585  */
rhashtable_walk_next(struct rhashtable_iter * iter)586 void *rhashtable_walk_next(struct rhashtable_iter *iter)
587 {
588 	struct bucket_table *tbl = iter->walker->tbl;
589 	struct rhashtable *ht = iter->ht;
590 	struct rhash_head *p = iter->p;
591 
592 	if (p) {
593 		p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
594 		goto next;
595 	}
596 
597 	for (; iter->slot < tbl->size; iter->slot++) {
598 		int skip = iter->skip;
599 
600 		rht_for_each_rcu(p, tbl, iter->slot) {
601 			if (!skip)
602 				break;
603 			skip--;
604 		}
605 
606 next:
607 		if (!rht_is_a_nulls(p)) {
608 			iter->skip++;
609 			iter->p = p;
610 			return rht_obj(ht, p);
611 		}
612 
613 		iter->skip = 0;
614 	}
615 
616 	iter->p = NULL;
617 
618 	/* Ensure we see any new tables. */
619 	smp_rmb();
620 
621 	iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
622 	if (iter->walker->tbl) {
623 		iter->slot = 0;
624 		iter->skip = 0;
625 		return ERR_PTR(-EAGAIN);
626 	}
627 
628 	return NULL;
629 }
630 
631 /**
632  * rhashtable_walk_stop - Finish a hash table walk
633  * @iter:	Hash table iterator
634  *
635  * Finish a hash table walk.
636  */
rhashtable_walk_stop(struct rhashtable_iter * iter)637 void rhashtable_walk_stop(struct rhashtable_iter *iter)
638 	__releases(RCU)
639 {
640 	struct rhashtable *ht;
641 	struct bucket_table *tbl = iter->walker->tbl;
642 
643 	if (!tbl)
644 		goto out;
645 
646 	ht = iter->ht;
647 
648 	spin_lock(&ht->lock);
649 	if (tbl->rehash < tbl->size)
650 		list_add(&iter->walker->list, &tbl->walkers);
651 	else
652 		iter->walker->tbl = NULL;
653 	spin_unlock(&ht->lock);
654 
655 	iter->p = NULL;
656 
657 out:
658 	rcu_read_unlock();
659 }
660 
rounded_hashtable_size(const struct rhashtable_params * params)661 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
662 {
663 	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
664 		   (unsigned long)params->min_size);
665 }
666 
rhashtable_jhash2(const void * key,u32 length,u32 seed)667 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
668 {
669 	return jhash2(key, length, seed);
670 }
671 
672 /**
673  * rhashtable_init - initialize a new hash table
674  * @ht:		hash table to be initialized
675  * @params:	configuration parameters
676  *
677  * Initializes a new hash table based on the provided configuration
678  * parameters. A table can be configured either with a variable or
679  * fixed length key:
680  *
681  * Configuration Example 1: Fixed length keys
682  * struct test_obj {
683  *	int			key;
684  *	void *			my_member;
685  *	struct rhash_head	node;
686  * };
687  *
688  * struct rhashtable_params params = {
689  *	.head_offset = offsetof(struct test_obj, node),
690  *	.key_offset = offsetof(struct test_obj, key),
691  *	.key_len = sizeof(int),
692  *	.hashfn = jhash,
693  *	.nulls_base = (1U << RHT_BASE_SHIFT),
694  * };
695  *
696  * Configuration Example 2: Variable length keys
697  * struct test_obj {
698  *	[...]
699  *	struct rhash_head	node;
700  * };
701  *
702  * u32 my_hash_fn(const void *data, u32 len, u32 seed)
703  * {
704  *	struct test_obj *obj = data;
705  *
706  *	return [... hash ...];
707  * }
708  *
709  * struct rhashtable_params params = {
710  *	.head_offset = offsetof(struct test_obj, node),
711  *	.hashfn = jhash,
712  *	.obj_hashfn = my_hash_fn,
713  * };
714  */
rhashtable_init(struct rhashtable * ht,const struct rhashtable_params * params)715 int rhashtable_init(struct rhashtable *ht,
716 		    const struct rhashtable_params *params)
717 {
718 	struct bucket_table *tbl;
719 	size_t size;
720 
721 	size = HASH_DEFAULT_SIZE;
722 
723 	if ((!params->key_len && !params->obj_hashfn) ||
724 	    (params->obj_hashfn && !params->obj_cmpfn))
725 		return -EINVAL;
726 
727 	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
728 		return -EINVAL;
729 
730 	memset(ht, 0, sizeof(*ht));
731 	mutex_init(&ht->mutex);
732 	spin_lock_init(&ht->lock);
733 	memcpy(&ht->p, params, sizeof(*params));
734 
735 	if (params->min_size)
736 		ht->p.min_size = roundup_pow_of_two(params->min_size);
737 
738 	if (params->max_size)
739 		ht->p.max_size = rounddown_pow_of_two(params->max_size);
740 
741 	if (params->insecure_max_entries)
742 		ht->p.insecure_max_entries =
743 			rounddown_pow_of_two(params->insecure_max_entries);
744 	else
745 		ht->p.insecure_max_entries = ht->p.max_size * 2;
746 
747 	ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
748 
749 	if (params->nelem_hint)
750 		size = rounded_hashtable_size(&ht->p);
751 
752 	/* The maximum (not average) chain length grows with the
753 	 * size of the hash table, at a rate of (log N)/(log log N).
754 	 * The value of 16 is selected so that even if the hash
755 	 * table grew to 2^32 you would not expect the maximum
756 	 * chain length to exceed it unless we are under attack
757 	 * (or extremely unlucky).
758 	 *
759 	 * As this limit is only to detect attacks, we don't need
760 	 * to set it to a lower value as you'd need the chain
761 	 * length to vastly exceed 16 to have any real effect
762 	 * on the system.
763 	 */
764 	if (!params->insecure_elasticity)
765 		ht->elasticity = 16;
766 
767 	if (params->locks_mul)
768 		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
769 	else
770 		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
771 
772 	ht->key_len = ht->p.key_len;
773 	if (!params->hashfn) {
774 		ht->p.hashfn = jhash;
775 
776 		if (!(ht->key_len & (sizeof(u32) - 1))) {
777 			ht->key_len /= sizeof(u32);
778 			ht->p.hashfn = rhashtable_jhash2;
779 		}
780 	}
781 
782 	tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
783 	if (tbl == NULL)
784 		return -ENOMEM;
785 
786 	atomic_set(&ht->nelems, 0);
787 
788 	RCU_INIT_POINTER(ht->tbl, tbl);
789 
790 	INIT_WORK(&ht->run_work, rht_deferred_worker);
791 
792 	return 0;
793 }
794 
795 /**
796  * rhashtable_free_and_destroy - free elements and destroy hash table
797  * @ht:		the hash table to destroy
798  * @free_fn:	callback to release resources of element
799  * @arg:	pointer passed to free_fn
800  *
801  * Stops an eventual async resize. If defined, invokes free_fn for each
802  * element to releasal resources. Please note that RCU protected
803  * readers may still be accessing the elements. Releasing of resources
804  * must occur in a compatible manner. Then frees the bucket array.
805  *
806  * This function will eventually sleep to wait for an async resize
807  * to complete. The caller is responsible that no further write operations
808  * occurs in parallel.
809  */
rhashtable_free_and_destroy(struct rhashtable * ht,void (* free_fn)(void * ptr,void * arg),void * arg)810 void rhashtable_free_and_destroy(struct rhashtable *ht,
811 				 void (*free_fn)(void *ptr, void *arg),
812 				 void *arg)
813 {
814 	const struct bucket_table *tbl;
815 	unsigned int i;
816 
817 	cancel_work_sync(&ht->run_work);
818 
819 	mutex_lock(&ht->mutex);
820 	tbl = rht_dereference(ht->tbl, ht);
821 	if (free_fn) {
822 		for (i = 0; i < tbl->size; i++) {
823 			struct rhash_head *pos, *next;
824 
825 			for (pos = rht_dereference(tbl->buckets[i], ht),
826 			     next = !rht_is_a_nulls(pos) ?
827 					rht_dereference(pos->next, ht) : NULL;
828 			     !rht_is_a_nulls(pos);
829 			     pos = next,
830 			     next = !rht_is_a_nulls(pos) ?
831 					rht_dereference(pos->next, ht) : NULL)
832 				free_fn(rht_obj(ht, pos), arg);
833 		}
834 	}
835 
836 	bucket_table_free(tbl);
837 	mutex_unlock(&ht->mutex);
838 }
839 
rhashtable_destroy(struct rhashtable * ht)840 void rhashtable_destroy(struct rhashtable *ht)
841 {
842 	return rhashtable_free_and_destroy(ht, NULL, NULL);
843 }
844 
845