• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  *
8  * Code partially derived from nft_hash
9  * Rewritten with rehash code from br_multicast plus single list
10  * pointer as suggested by Josh Triplett
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/mm.h>
25 #include <linux/jhash.h>
26 #include <linux/random.h>
27 #include <linux/rhashtable.h>
28 #include <linux/err.h>
29 #include <linux/export.h>
30 
31 #define HASH_DEFAULT_SIZE	64UL
32 #define HASH_MIN_SIZE		4U
33 #define BUCKET_LOCKS_PER_CPU   128UL
34 
head_hashfn(struct rhashtable * ht,const struct bucket_table * tbl,const struct rhash_head * he)35 static u32 head_hashfn(struct rhashtable *ht,
36 		       const struct bucket_table *tbl,
37 		       const struct rhash_head *he)
38 {
39 	return rht_head_hashfn(ht, tbl, he, ht->p);
40 }
41 
42 #ifdef CONFIG_PROVE_LOCKING
43 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
44 
lockdep_rht_mutex_is_held(struct rhashtable * ht)45 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
46 {
47 	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
48 }
49 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
50 
lockdep_rht_bucket_is_held(const struct bucket_table * tbl,u32 hash)51 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
52 {
53 	spinlock_t *lock = rht_bucket_lock(tbl, hash);
54 
55 	return (debug_locks) ? lockdep_is_held(lock) : 1;
56 }
57 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
58 #else
59 #define ASSERT_RHT_MUTEX(HT)
60 #endif
61 
62 
alloc_bucket_locks(struct rhashtable * ht,struct bucket_table * tbl,gfp_t gfp)63 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
64 			      gfp_t gfp)
65 {
66 	unsigned int i, size;
67 #if defined(CONFIG_PROVE_LOCKING)
68 	unsigned int nr_pcpus = 2;
69 #else
70 	unsigned int nr_pcpus = num_possible_cpus();
71 #endif
72 
73 	nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
74 	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
75 
76 	/* Never allocate more than 0.5 locks per bucket */
77 	size = min_t(unsigned int, size, tbl->size >> 1);
78 
79 	if (sizeof(spinlock_t) != 0) {
80 #ifdef CONFIG_NUMA
81 		if (size * sizeof(spinlock_t) > PAGE_SIZE &&
82 		    gfp == GFP_KERNEL)
83 			tbl->locks = vmalloc(size * sizeof(spinlock_t));
84 		else
85 #endif
86 		tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
87 					   gfp);
88 		if (!tbl->locks)
89 			return -ENOMEM;
90 		for (i = 0; i < size; i++)
91 			spin_lock_init(&tbl->locks[i]);
92 	}
93 	tbl->locks_mask = size - 1;
94 
95 	return 0;
96 }
97 
bucket_table_free(const struct bucket_table * tbl)98 static void bucket_table_free(const struct bucket_table *tbl)
99 {
100 	if (tbl)
101 		kvfree(tbl->locks);
102 
103 	kvfree(tbl);
104 }
105 
bucket_table_free_rcu(struct rcu_head * head)106 static void bucket_table_free_rcu(struct rcu_head *head)
107 {
108 	bucket_table_free(container_of(head, struct bucket_table, rcu));
109 }
110 
bucket_table_alloc(struct rhashtable * ht,size_t nbuckets,gfp_t gfp)111 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
112 					       size_t nbuckets,
113 					       gfp_t gfp)
114 {
115 	struct bucket_table *tbl = NULL;
116 	size_t size;
117 	int i;
118 
119 	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
120 	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
121 	    gfp != GFP_KERNEL)
122 		tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
123 	if (tbl == NULL && gfp == GFP_KERNEL)
124 		tbl = vzalloc(size);
125 	if (tbl == NULL)
126 		return NULL;
127 
128 	tbl->size = nbuckets;
129 
130 	if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
131 		bucket_table_free(tbl);
132 		return NULL;
133 	}
134 
135 	INIT_LIST_HEAD(&tbl->walkers);
136 
137 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
138 
139 	for (i = 0; i < nbuckets; i++)
140 		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
141 
142 	return tbl;
143 }
144 
rhashtable_last_table(struct rhashtable * ht,struct bucket_table * tbl)145 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
146 						  struct bucket_table *tbl)
147 {
148 	struct bucket_table *new_tbl;
149 
150 	do {
151 		new_tbl = tbl;
152 		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
153 	} while (tbl);
154 
155 	return new_tbl;
156 }
157 
rhashtable_rehash_one(struct rhashtable * ht,unsigned int old_hash)158 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
159 {
160 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
161 	struct bucket_table *new_tbl = rhashtable_last_table(ht,
162 		rht_dereference_rcu(old_tbl->future_tbl, ht));
163 	struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
164 	int err = -ENOENT;
165 	struct rhash_head *head, *next, *entry;
166 	spinlock_t *new_bucket_lock;
167 	unsigned int new_hash;
168 
169 	rht_for_each(entry, old_tbl, old_hash) {
170 		err = 0;
171 		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
172 
173 		if (rht_is_a_nulls(next))
174 			break;
175 
176 		pprev = &entry->next;
177 	}
178 
179 	if (err)
180 		goto out;
181 
182 	new_hash = head_hashfn(ht, new_tbl, entry);
183 
184 	new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
185 
186 	spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
187 	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
188 				      new_tbl, new_hash);
189 
190 	RCU_INIT_POINTER(entry->next, head);
191 
192 	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
193 	spin_unlock(new_bucket_lock);
194 
195 	rcu_assign_pointer(*pprev, next);
196 
197 out:
198 	return err;
199 }
200 
rhashtable_rehash_chain(struct rhashtable * ht,unsigned int old_hash)201 static void rhashtable_rehash_chain(struct rhashtable *ht,
202 				    unsigned int old_hash)
203 {
204 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
205 	spinlock_t *old_bucket_lock;
206 
207 	old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
208 
209 	spin_lock_bh(old_bucket_lock);
210 	while (!rhashtable_rehash_one(ht, old_hash))
211 		;
212 	old_tbl->rehash++;
213 	spin_unlock_bh(old_bucket_lock);
214 }
215 
rhashtable_rehash_attach(struct rhashtable * ht,struct bucket_table * old_tbl,struct bucket_table * new_tbl)216 static int rhashtable_rehash_attach(struct rhashtable *ht,
217 				    struct bucket_table *old_tbl,
218 				    struct bucket_table *new_tbl)
219 {
220 	/* Protect future_tbl using the first bucket lock. */
221 	spin_lock_bh(old_tbl->locks);
222 
223 	/* Did somebody beat us to it? */
224 	if (rcu_access_pointer(old_tbl->future_tbl)) {
225 		spin_unlock_bh(old_tbl->locks);
226 		return -EEXIST;
227 	}
228 
229 	/* Make insertions go into the new, empty table right away. Deletions
230 	 * and lookups will be attempted in both tables until we synchronize.
231 	 */
232 	rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
233 
234 	/* Ensure the new table is visible to readers. */
235 	smp_wmb();
236 
237 	spin_unlock_bh(old_tbl->locks);
238 
239 	return 0;
240 }
241 
rhashtable_rehash_table(struct rhashtable * ht)242 static int rhashtable_rehash_table(struct rhashtable *ht)
243 {
244 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
245 	struct bucket_table *new_tbl;
246 	struct rhashtable_walker *walker;
247 	unsigned int old_hash;
248 
249 	new_tbl = rht_dereference(old_tbl->future_tbl, ht);
250 	if (!new_tbl)
251 		return 0;
252 
253 	for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
254 		rhashtable_rehash_chain(ht, old_hash);
255 		cond_resched();
256 	}
257 
258 	/* Publish the new table pointer. */
259 	rcu_assign_pointer(ht->tbl, new_tbl);
260 
261 	spin_lock(&ht->lock);
262 	list_for_each_entry(walker, &old_tbl->walkers, list)
263 		walker->tbl = NULL;
264 	spin_unlock(&ht->lock);
265 
266 	/* Wait for readers. All new readers will see the new
267 	 * table, and thus no references to the old table will
268 	 * remain.
269 	 */
270 	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
271 
272 	return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
273 }
274 
275 /**
276  * rhashtable_expand - Expand hash table while allowing concurrent lookups
277  * @ht:		the hash table to expand
278  *
279  * A secondary bucket array is allocated and the hash entries are migrated.
280  *
281  * This function may only be called in a context where it is safe to call
282  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
283  *
284  * The caller must ensure that no concurrent resizing occurs by holding
285  * ht->mutex.
286  *
287  * It is valid to have concurrent insertions and deletions protected by per
288  * bucket locks or concurrent RCU protected lookups and traversals.
289  */
rhashtable_expand(struct rhashtable * ht)290 static int rhashtable_expand(struct rhashtable *ht)
291 {
292 	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
293 	int err;
294 
295 	ASSERT_RHT_MUTEX(ht);
296 
297 	old_tbl = rhashtable_last_table(ht, old_tbl);
298 
299 	new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
300 	if (new_tbl == NULL)
301 		return -ENOMEM;
302 
303 	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
304 	if (err)
305 		bucket_table_free(new_tbl);
306 
307 	return err;
308 }
309 
310 /**
311  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
312  * @ht:		the hash table to shrink
313  *
314  * This function shrinks the hash table to fit, i.e., the smallest
315  * size would not cause it to expand right away automatically.
316  *
317  * The caller must ensure that no concurrent resizing occurs by holding
318  * ht->mutex.
319  *
320  * The caller must ensure that no concurrent table mutations take place.
321  * It is however valid to have concurrent lookups if they are RCU protected.
322  *
323  * It is valid to have concurrent insertions and deletions protected by per
324  * bucket locks or concurrent RCU protected lookups and traversals.
325  */
rhashtable_shrink(struct rhashtable * ht)326 static int rhashtable_shrink(struct rhashtable *ht)
327 {
328 	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
329 	unsigned int size;
330 	int err;
331 
332 	ASSERT_RHT_MUTEX(ht);
333 
334 	size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
335 	if (size < ht->p.min_size)
336 		size = ht->p.min_size;
337 
338 	if (old_tbl->size <= size)
339 		return 0;
340 
341 	if (rht_dereference(old_tbl->future_tbl, ht))
342 		return -EEXIST;
343 
344 	new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
345 	if (new_tbl == NULL)
346 		return -ENOMEM;
347 
348 	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
349 	if (err)
350 		bucket_table_free(new_tbl);
351 
352 	return err;
353 }
354 
rht_deferred_worker(struct work_struct * work)355 static void rht_deferred_worker(struct work_struct *work)
356 {
357 	struct rhashtable *ht;
358 	struct bucket_table *tbl;
359 	int err = 0;
360 
361 	ht = container_of(work, struct rhashtable, run_work);
362 	mutex_lock(&ht->mutex);
363 
364 	tbl = rht_dereference(ht->tbl, ht);
365 	tbl = rhashtable_last_table(ht, tbl);
366 
367 	if (rht_grow_above_75(ht, tbl))
368 		rhashtable_expand(ht);
369 	else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
370 		rhashtable_shrink(ht);
371 
372 	err = rhashtable_rehash_table(ht);
373 
374 	mutex_unlock(&ht->mutex);
375 
376 	if (err)
377 		schedule_work(&ht->run_work);
378 }
379 
rhashtable_check_elasticity(struct rhashtable * ht,struct bucket_table * tbl,unsigned int hash)380 static bool rhashtable_check_elasticity(struct rhashtable *ht,
381 					struct bucket_table *tbl,
382 					unsigned int hash)
383 {
384 	unsigned int elasticity = ht->elasticity;
385 	struct rhash_head *head;
386 
387 	rht_for_each(head, tbl, hash)
388 		if (!--elasticity)
389 			return true;
390 
391 	return false;
392 }
393 
rhashtable_insert_rehash(struct rhashtable * ht,struct bucket_table * tbl)394 int rhashtable_insert_rehash(struct rhashtable *ht,
395 			     struct bucket_table *tbl)
396 {
397 	struct bucket_table *old_tbl;
398 	struct bucket_table *new_tbl;
399 	unsigned int size;
400 	int err;
401 
402 	old_tbl = rht_dereference_rcu(ht->tbl, ht);
403 
404 	size = tbl->size;
405 
406 	err = -EBUSY;
407 
408 	if (rht_grow_above_75(ht, tbl))
409 		size *= 2;
410 	/* Do not schedule more than one rehash */
411 	else if (old_tbl != tbl)
412 		goto fail;
413 
414 	err = -ENOMEM;
415 
416 	new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
417 	if (new_tbl == NULL)
418 		goto fail;
419 
420 	err = rhashtable_rehash_attach(ht, tbl, new_tbl);
421 	if (err) {
422 		bucket_table_free(new_tbl);
423 		if (err == -EEXIST)
424 			err = 0;
425 	} else
426 		schedule_work(&ht->run_work);
427 
428 	return err;
429 
430 fail:
431 	/* Do not fail the insert if someone else did a rehash. */
432 	if (likely(rcu_dereference_raw(tbl->future_tbl)))
433 		return 0;
434 
435 	/* Schedule async rehash to retry allocation in process context. */
436 	if (err == -ENOMEM)
437 		schedule_work(&ht->run_work);
438 
439 	return err;
440 }
441 EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
442 
rhashtable_insert_slow(struct rhashtable * ht,const void * key,struct rhash_head * obj,struct bucket_table * tbl,void ** data)443 struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
444 					    const void *key,
445 					    struct rhash_head *obj,
446 					    struct bucket_table *tbl,
447 					    void **data)
448 {
449 	struct rhash_head *head;
450 	unsigned int hash;
451 	int err;
452 
453 	tbl = rhashtable_last_table(ht, tbl);
454 	hash = head_hashfn(ht, tbl, obj);
455 	spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
456 
457 	err = -EEXIST;
458 	if (key) {
459 		*data = rhashtable_lookup_fast(ht, key, ht->p);
460 		if (*data)
461 			goto exit;
462 	}
463 
464 	err = -E2BIG;
465 	if (unlikely(rht_grow_above_max(ht, tbl)))
466 		goto exit;
467 
468 	err = -EAGAIN;
469 	if (rhashtable_check_elasticity(ht, tbl, hash) ||
470 	    rht_grow_above_100(ht, tbl))
471 		goto exit;
472 
473 	err = 0;
474 
475 	head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
476 
477 	RCU_INIT_POINTER(obj->next, head);
478 
479 	rcu_assign_pointer(tbl->buckets[hash], obj);
480 
481 	atomic_inc(&ht->nelems);
482 
483 exit:
484 	spin_unlock(rht_bucket_lock(tbl, hash));
485 
486 	if (err == 0)
487 		return NULL;
488 	else if (err == -EAGAIN)
489 		return tbl;
490 	else
491 		return ERR_PTR(err);
492 }
493 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
494 
495 /**
496  * rhashtable_walk_init - Initialise an iterator
497  * @ht:		Table to walk over
498  * @iter:	Hash table Iterator
499  *
500  * This function prepares a hash table walk.
501  *
502  * Note that if you restart a walk after rhashtable_walk_stop you
503  * may see the same object twice.  Also, you may miss objects if
504  * there are removals in between rhashtable_walk_stop and the next
505  * call to rhashtable_walk_start.
506  *
507  * For a completely stable walk you should construct your own data
508  * structure outside the hash table.
509  *
510  * This function may sleep so you must not call it from interrupt
511  * context or with spin locks held.
512  *
513  * You must call rhashtable_walk_exit if this function returns
514  * successfully.
515  */
rhashtable_walk_init(struct rhashtable * ht,struct rhashtable_iter * iter)516 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
517 {
518 	iter->ht = ht;
519 	iter->p = NULL;
520 	iter->slot = 0;
521 	iter->skip = 0;
522 
523 	iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
524 	if (!iter->walker)
525 		return -ENOMEM;
526 
527 	spin_lock(&ht->lock);
528 	iter->walker->tbl =
529 		rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
530 	list_add(&iter->walker->list, &iter->walker->tbl->walkers);
531 	spin_unlock(&ht->lock);
532 
533 	return 0;
534 }
535 EXPORT_SYMBOL_GPL(rhashtable_walk_init);
536 
537 /**
538  * rhashtable_walk_exit - Free an iterator
539  * @iter:	Hash table Iterator
540  *
541  * This function frees resources allocated by rhashtable_walk_init.
542  */
rhashtable_walk_exit(struct rhashtable_iter * iter)543 void rhashtable_walk_exit(struct rhashtable_iter *iter)
544 {
545 	spin_lock(&iter->ht->lock);
546 	if (iter->walker->tbl)
547 		list_del(&iter->walker->list);
548 	spin_unlock(&iter->ht->lock);
549 	kfree(iter->walker);
550 }
551 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
552 
553 /**
554  * rhashtable_walk_start - Start a hash table walk
555  * @iter:	Hash table iterator
556  *
557  * Start a hash table walk.  Note that we take the RCU lock in all
558  * cases including when we return an error.  So you must always call
559  * rhashtable_walk_stop to clean up.
560  *
561  * Returns zero if successful.
562  *
563  * Returns -EAGAIN if resize event occured.  Note that the iterator
564  * will rewind back to the beginning and you may use it immediately
565  * by calling rhashtable_walk_next.
566  */
rhashtable_walk_start(struct rhashtable_iter * iter)567 int rhashtable_walk_start(struct rhashtable_iter *iter)
568 	__acquires(RCU)
569 {
570 	struct rhashtable *ht = iter->ht;
571 
572 	rcu_read_lock();
573 
574 	spin_lock(&ht->lock);
575 	if (iter->walker->tbl)
576 		list_del(&iter->walker->list);
577 	spin_unlock(&ht->lock);
578 
579 	if (!iter->walker->tbl) {
580 		iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
581 		return -EAGAIN;
582 	}
583 
584 	return 0;
585 }
586 EXPORT_SYMBOL_GPL(rhashtable_walk_start);
587 
588 /**
589  * rhashtable_walk_next - Return the next object and advance the iterator
590  * @iter:	Hash table iterator
591  *
592  * Note that you must call rhashtable_walk_stop when you are finished
593  * with the walk.
594  *
595  * Returns the next object or NULL when the end of the table is reached.
596  *
597  * Returns -EAGAIN if resize event occured.  Note that the iterator
598  * will rewind back to the beginning and you may continue to use it.
599  */
rhashtable_walk_next(struct rhashtable_iter * iter)600 void *rhashtable_walk_next(struct rhashtable_iter *iter)
601 {
602 	struct bucket_table *tbl = iter->walker->tbl;
603 	struct rhashtable *ht = iter->ht;
604 	struct rhash_head *p = iter->p;
605 
606 	if (p) {
607 		p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
608 		goto next;
609 	}
610 
611 	for (; iter->slot < tbl->size; iter->slot++) {
612 		int skip = iter->skip;
613 
614 		rht_for_each_rcu(p, tbl, iter->slot) {
615 			if (!skip)
616 				break;
617 			skip--;
618 		}
619 
620 next:
621 		if (!rht_is_a_nulls(p)) {
622 			iter->skip++;
623 			iter->p = p;
624 			return rht_obj(ht, p);
625 		}
626 
627 		iter->skip = 0;
628 	}
629 
630 	iter->p = NULL;
631 
632 	/* Ensure we see any new tables. */
633 	smp_rmb();
634 
635 	iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
636 	if (iter->walker->tbl) {
637 		iter->slot = 0;
638 		iter->skip = 0;
639 		return ERR_PTR(-EAGAIN);
640 	}
641 
642 	return NULL;
643 }
644 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
645 
646 /**
647  * rhashtable_walk_stop - Finish a hash table walk
648  * @iter:	Hash table iterator
649  *
650  * Finish a hash table walk.
651  */
rhashtable_walk_stop(struct rhashtable_iter * iter)652 void rhashtable_walk_stop(struct rhashtable_iter *iter)
653 	__releases(RCU)
654 {
655 	struct rhashtable *ht;
656 	struct bucket_table *tbl = iter->walker->tbl;
657 
658 	if (!tbl)
659 		goto out;
660 
661 	ht = iter->ht;
662 
663 	spin_lock(&ht->lock);
664 	if (tbl->rehash < tbl->size)
665 		list_add(&iter->walker->list, &tbl->walkers);
666 	else
667 		iter->walker->tbl = NULL;
668 	spin_unlock(&ht->lock);
669 
670 	iter->p = NULL;
671 
672 out:
673 	rcu_read_unlock();
674 }
675 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
676 
rounded_hashtable_size(const struct rhashtable_params * params)677 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
678 {
679 	size_t retsize;
680 
681 	if (params->nelem_hint)
682 		retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
683 			      (unsigned long)params->min_size);
684 	else
685 		retsize = max(HASH_DEFAULT_SIZE,
686 			      (unsigned long)params->min_size);
687 
688 	return retsize;
689 }
690 
rhashtable_jhash2(const void * key,u32 length,u32 seed)691 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
692 {
693 	return jhash2(key, length, seed);
694 }
695 
696 /**
697  * rhashtable_init - initialize a new hash table
698  * @ht:		hash table to be initialized
699  * @params:	configuration parameters
700  *
701  * Initializes a new hash table based on the provided configuration
702  * parameters. A table can be configured either with a variable or
703  * fixed length key:
704  *
705  * Configuration Example 1: Fixed length keys
706  * struct test_obj {
707  *	int			key;
708  *	void *			my_member;
709  *	struct rhash_head	node;
710  * };
711  *
712  * struct rhashtable_params params = {
713  *	.head_offset = offsetof(struct test_obj, node),
714  *	.key_offset = offsetof(struct test_obj, key),
715  *	.key_len = sizeof(int),
716  *	.hashfn = jhash,
717  *	.nulls_base = (1U << RHT_BASE_SHIFT),
718  * };
719  *
720  * Configuration Example 2: Variable length keys
721  * struct test_obj {
722  *	[...]
723  *	struct rhash_head	node;
724  * };
725  *
726  * u32 my_hash_fn(const void *data, u32 len, u32 seed)
727  * {
728  *	struct test_obj *obj = data;
729  *
730  *	return [... hash ...];
731  * }
732  *
733  * struct rhashtable_params params = {
734  *	.head_offset = offsetof(struct test_obj, node),
735  *	.hashfn = jhash,
736  *	.obj_hashfn = my_hash_fn,
737  * };
738  */
rhashtable_init(struct rhashtable * ht,const struct rhashtable_params * params)739 int rhashtable_init(struct rhashtable *ht,
740 		    const struct rhashtable_params *params)
741 {
742 	struct bucket_table *tbl;
743 	size_t size;
744 
745 	if ((!params->key_len && !params->obj_hashfn) ||
746 	    (params->obj_hashfn && !params->obj_cmpfn))
747 		return -EINVAL;
748 
749 	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
750 		return -EINVAL;
751 
752 	memset(ht, 0, sizeof(*ht));
753 	mutex_init(&ht->mutex);
754 	spin_lock_init(&ht->lock);
755 	memcpy(&ht->p, params, sizeof(*params));
756 
757 	if (params->min_size)
758 		ht->p.min_size = roundup_pow_of_two(params->min_size);
759 
760 	if (params->max_size)
761 		ht->p.max_size = rounddown_pow_of_two(params->max_size);
762 
763 	if (params->insecure_max_entries)
764 		ht->p.insecure_max_entries =
765 			rounddown_pow_of_two(params->insecure_max_entries);
766 	else
767 		ht->p.insecure_max_entries = ht->p.max_size * 2;
768 
769 	ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
770 
771 	size = rounded_hashtable_size(&ht->p);
772 
773 	/* The maximum (not average) chain length grows with the
774 	 * size of the hash table, at a rate of (log N)/(log log N).
775 	 * The value of 16 is selected so that even if the hash
776 	 * table grew to 2^32 you would not expect the maximum
777 	 * chain length to exceed it unless we are under attack
778 	 * (or extremely unlucky).
779 	 *
780 	 * As this limit is only to detect attacks, we don't need
781 	 * to set it to a lower value as you'd need the chain
782 	 * length to vastly exceed 16 to have any real effect
783 	 * on the system.
784 	 */
785 	if (!params->insecure_elasticity)
786 		ht->elasticity = 16;
787 
788 	if (params->locks_mul)
789 		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
790 	else
791 		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
792 
793 	ht->key_len = ht->p.key_len;
794 	if (!params->hashfn) {
795 		ht->p.hashfn = jhash;
796 
797 		if (!(ht->key_len & (sizeof(u32) - 1))) {
798 			ht->key_len /= sizeof(u32);
799 			ht->p.hashfn = rhashtable_jhash2;
800 		}
801 	}
802 
803 	tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
804 	if (tbl == NULL)
805 		return -ENOMEM;
806 
807 	atomic_set(&ht->nelems, 0);
808 
809 	RCU_INIT_POINTER(ht->tbl, tbl);
810 
811 	INIT_WORK(&ht->run_work, rht_deferred_worker);
812 
813 	return 0;
814 }
815 EXPORT_SYMBOL_GPL(rhashtable_init);
816 
817 /**
818  * rhashtable_free_and_destroy - free elements and destroy hash table
819  * @ht:		the hash table to destroy
820  * @free_fn:	callback to release resources of element
821  * @arg:	pointer passed to free_fn
822  *
823  * Stops an eventual async resize. If defined, invokes free_fn for each
824  * element to releasal resources. Please note that RCU protected
825  * readers may still be accessing the elements. Releasing of resources
826  * must occur in a compatible manner. Then frees the bucket array.
827  *
828  * This function will eventually sleep to wait for an async resize
829  * to complete. The caller is responsible that no further write operations
830  * occurs in parallel.
831  */
rhashtable_free_and_destroy(struct rhashtable * ht,void (* free_fn)(void * ptr,void * arg),void * arg)832 void rhashtable_free_and_destroy(struct rhashtable *ht,
833 				 void (*free_fn)(void *ptr, void *arg),
834 				 void *arg)
835 {
836 	const struct bucket_table *tbl;
837 	unsigned int i;
838 
839 	cancel_work_sync(&ht->run_work);
840 
841 	mutex_lock(&ht->mutex);
842 	tbl = rht_dereference(ht->tbl, ht);
843 	if (free_fn) {
844 		for (i = 0; i < tbl->size; i++) {
845 			struct rhash_head *pos, *next;
846 
847 			cond_resched();
848 			for (pos = rht_dereference(tbl->buckets[i], ht),
849 			     next = !rht_is_a_nulls(pos) ?
850 					rht_dereference(pos->next, ht) : NULL;
851 			     !rht_is_a_nulls(pos);
852 			     pos = next,
853 			     next = !rht_is_a_nulls(pos) ?
854 					rht_dereference(pos->next, ht) : NULL)
855 				free_fn(rht_obj(ht, pos), arg);
856 		}
857 	}
858 
859 	bucket_table_free(tbl);
860 	mutex_unlock(&ht->mutex);
861 }
862 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
863 
rhashtable_destroy(struct rhashtable * ht)864 void rhashtable_destroy(struct rhashtable *ht)
865 {
866 	return rhashtable_free_and_destroy(ht, NULL, NULL);
867 }
868 EXPORT_SYMBOL_GPL(rhashtable_destroy);
869