• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/libcfs/hash.c
37  *
38  * Implement a hash class for hash process in lustre system.
39  *
40  * Author: YuZhangyong <yzy@clusterfs.com>
41  *
42  * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43  * - Simplified API and improved documentation
44  * - Added per-hash feature flags:
45  *   * CFS_HASH_DEBUG additional validation
46  *   * CFS_HASH_REHASH dynamic rehashing
47  * - Added per-hash statistics
48  * - General performance enhancements
49  *
50  * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
51  * - move all stuff to libcfs
52  * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
53  * - ignore hs_rwlock if without CFS_HASH_REHASH setting
54  * - buckets are allocated one by one(instead of contiguous memory),
55  *   to avoid unnecessary cacheline conflict
56  *
57  * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
58  * - "bucket" is a group of hlist_head now, user can specify bucket size
59  *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
60  *   one lock for reducing memory overhead.
61  *
62  * - support lockless hash, caller will take care of locks:
63  *   avoid lock overhead for hash tables that are already protected
64  *   by locking in the caller for another reason
65  *
66  * - support both spin_lock/rwlock for bucket:
67  *   overhead of spinlock contention is lower than read/write
68  *   contention of rwlock, so using spinlock to serialize operations on
69  *   bucket is more reasonable for those frequently changed hash tables
70  *
71  * - support one-single lock mode:
72  *   one lock to protect all hash operations to avoid overhead of
73  *   multiple locks if hash table is always small
74  *
75  * - removed a lot of unnecessary addref & decref on hash element:
76  *   addref & decref are atomic operations in many use-cases which
77  *   are expensive.
78  *
79  * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
80  *   some lustre use-cases require these functions to be strictly
81  *   non-blocking, we need to schedule required rehash on a different
82  *   thread on those cases.
83  *
84  * - safer rehash on large hash table
85  *   In old implementation, rehash function will exclusively lock the
86  *   hash table and finish rehash in one batch, it's dangerous on SMP
87  *   system because rehash millions of elements could take long time.
88  *   New implemented rehash can release lock and relax CPU in middle
89  *   of rehash, it's safe for another thread to search/change on the
90  *   hash table even it's in rehasing.
91  *
92  * - support two different refcount modes
93  *   . hash table has refcount on element
94  *   . hash table doesn't change refcount on adding/removing element
95  *
96  * - support long name hash table (for param-tree)
97  *
98  * - fix a bug for cfs_hash_rehash_key:
99  *   in old implementation, cfs_hash_rehash_key could screw up the
100  *   hash-table because @key is overwritten without any protection.
101  *   Now we need user to define hs_keycpy for those rehash enabled
102  *   hash tables, cfs_hash_rehash_key will overwrite hash-key
103  *   inside lock by calling hs_keycpy.
104  *
105  * - better hash iteration:
106  *   Now we support both locked iteration & lockless iteration of hash
107  *   table. Also, user can break the iteration by return 1 in callback.
108  */
109 
110 #include "../../include/linux/libcfs/libcfs.h"
111 #include <linux/seq_file.h>
112 
113 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
114 static unsigned int warn_on_depth = 8;
115 module_param(warn_on_depth, uint, 0644);
116 MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
117 #endif
118 
119 struct cfs_wi_sched *cfs_sched_rehash;
120 
121 static inline void
cfs_hash_nl_lock(union cfs_hash_lock * lock,int exclusive)122 cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
123 
124 static inline void
cfs_hash_nl_unlock(union cfs_hash_lock * lock,int exclusive)125 cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
126 
127 static inline void
cfs_hash_spin_lock(union cfs_hash_lock * lock,int exclusive)128 cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
129 	__acquires(&lock->spin)
130 {
131 	spin_lock(&lock->spin);
132 }
133 
134 static inline void
cfs_hash_spin_unlock(union cfs_hash_lock * lock,int exclusive)135 cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
136 	__releases(&lock->spin)
137 {
138 	spin_unlock(&lock->spin);
139 }
140 
141 static inline void
cfs_hash_rw_lock(union cfs_hash_lock * lock,int exclusive)142 cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
143 	__acquires(&lock->rw)
144 {
145 	if (!exclusive)
146 		read_lock(&lock->rw);
147 	else
148 		write_lock(&lock->rw);
149 }
150 
151 static inline void
cfs_hash_rw_unlock(union cfs_hash_lock * lock,int exclusive)152 cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
153 	__releases(&lock->rw)
154 {
155 	if (!exclusive)
156 		read_unlock(&lock->rw);
157 	else
158 		write_unlock(&lock->rw);
159 }
160 
161 /** No lock hash */
162 static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
163 	.hs_lock	= cfs_hash_nl_lock,
164 	.hs_unlock      = cfs_hash_nl_unlock,
165 	.hs_bkt_lock    = cfs_hash_nl_lock,
166 	.hs_bkt_unlock  = cfs_hash_nl_unlock,
167 };
168 
169 /** no bucket lock, one spinlock to protect everything */
170 static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
171 	.hs_lock	= cfs_hash_spin_lock,
172 	.hs_unlock      = cfs_hash_spin_unlock,
173 	.hs_bkt_lock    = cfs_hash_nl_lock,
174 	.hs_bkt_unlock  = cfs_hash_nl_unlock,
175 };
176 
177 /** spin bucket lock, rehash is enabled */
178 static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
179 	.hs_lock	= cfs_hash_rw_lock,
180 	.hs_unlock      = cfs_hash_rw_unlock,
181 	.hs_bkt_lock    = cfs_hash_spin_lock,
182 	.hs_bkt_unlock  = cfs_hash_spin_unlock,
183 };
184 
185 /** rw bucket lock, rehash is enabled */
186 static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
187 	.hs_lock	= cfs_hash_rw_lock,
188 	.hs_unlock      = cfs_hash_rw_unlock,
189 	.hs_bkt_lock    = cfs_hash_rw_lock,
190 	.hs_bkt_unlock  = cfs_hash_rw_unlock,
191 };
192 
193 /** spin bucket lock, rehash is disabled */
194 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
195 	.hs_lock	= cfs_hash_nl_lock,
196 	.hs_unlock      = cfs_hash_nl_unlock,
197 	.hs_bkt_lock    = cfs_hash_spin_lock,
198 	.hs_bkt_unlock  = cfs_hash_spin_unlock,
199 };
200 
201 /** rw bucket lock, rehash is disabled */
202 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
203 	.hs_lock	= cfs_hash_nl_lock,
204 	.hs_unlock      = cfs_hash_nl_unlock,
205 	.hs_bkt_lock    = cfs_hash_rw_lock,
206 	.hs_bkt_unlock  = cfs_hash_rw_unlock,
207 };
208 
209 static void
cfs_hash_lock_setup(struct cfs_hash * hs)210 cfs_hash_lock_setup(struct cfs_hash *hs)
211 {
212 	if (cfs_hash_with_no_lock(hs)) {
213 		hs->hs_lops = &cfs_hash_nl_lops;
214 
215 	} else if (cfs_hash_with_no_bktlock(hs)) {
216 		hs->hs_lops = &cfs_hash_nbl_lops;
217 		spin_lock_init(&hs->hs_lock.spin);
218 
219 	} else if (cfs_hash_with_rehash(hs)) {
220 		rwlock_init(&hs->hs_lock.rw);
221 
222 		if (cfs_hash_with_rw_bktlock(hs))
223 			hs->hs_lops = &cfs_hash_bkt_rw_lops;
224 		else if (cfs_hash_with_spin_bktlock(hs))
225 			hs->hs_lops = &cfs_hash_bkt_spin_lops;
226 		else
227 			LBUG();
228 	} else {
229 		if (cfs_hash_with_rw_bktlock(hs))
230 			hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
231 		else if (cfs_hash_with_spin_bktlock(hs))
232 			hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
233 		else
234 			LBUG();
235 	}
236 }
237 
238 /**
239  * Simple hash head without depth tracking
240  * new element is always added to head of hlist
241  */
242 struct cfs_hash_head {
243 	struct hlist_head	hh_head;	/**< entries list */
244 };
245 
246 static int
cfs_hash_hh_hhead_size(struct cfs_hash * hs)247 cfs_hash_hh_hhead_size(struct cfs_hash *hs)
248 {
249 	return sizeof(struct cfs_hash_head);
250 }
251 
252 static struct hlist_head *
cfs_hash_hh_hhead(struct cfs_hash * hs,struct cfs_hash_bd * bd)253 cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
254 {
255 	struct cfs_hash_head *head;
256 
257 	head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
258 	return &head[bd->bd_offset].hh_head;
259 }
260 
261 static int
cfs_hash_hh_hnode_add(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)262 cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
263 		      struct hlist_node *hnode)
264 {
265 	hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
266 	return -1; /* unknown depth */
267 }
268 
269 static int
cfs_hash_hh_hnode_del(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)270 cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
271 		      struct hlist_node *hnode)
272 {
273 	hlist_del_init(hnode);
274 	return -1; /* unknown depth */
275 }
276 
277 /**
278  * Simple hash head with depth tracking
279  * new element is always added to head of hlist
280  */
281 struct cfs_hash_head_dep {
282 	struct hlist_head	hd_head;	/**< entries list */
283 	unsigned int		hd_depth;       /**< list length */
284 };
285 
286 static int
cfs_hash_hd_hhead_size(struct cfs_hash * hs)287 cfs_hash_hd_hhead_size(struct cfs_hash *hs)
288 {
289 	return sizeof(struct cfs_hash_head_dep);
290 }
291 
292 static struct hlist_head *
cfs_hash_hd_hhead(struct cfs_hash * hs,struct cfs_hash_bd * bd)293 cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
294 {
295 	struct cfs_hash_head_dep   *head;
296 
297 	head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
298 	return &head[bd->bd_offset].hd_head;
299 }
300 
301 static int
cfs_hash_hd_hnode_add(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)302 cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
303 		      struct hlist_node *hnode)
304 {
305 	struct cfs_hash_head_dep *hh;
306 
307 	hh = container_of(cfs_hash_hd_hhead(hs, bd),
308 			  struct cfs_hash_head_dep, hd_head);
309 	hlist_add_head(hnode, &hh->hd_head);
310 	return ++hh->hd_depth;
311 }
312 
313 static int
cfs_hash_hd_hnode_del(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)314 cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
315 		      struct hlist_node *hnode)
316 {
317 	struct cfs_hash_head_dep *hh;
318 
319 	hh = container_of(cfs_hash_hd_hhead(hs, bd),
320 			  struct cfs_hash_head_dep, hd_head);
321 	hlist_del_init(hnode);
322 	return --hh->hd_depth;
323 }
324 
325 /**
326  * double links hash head without depth tracking
327  * new element is always added to tail of hlist
328  */
329 struct cfs_hash_dhead {
330 	struct hlist_head	dh_head;	/**< entries list */
331 	struct hlist_node       *dh_tail;	/**< the last entry */
332 };
333 
334 static int
cfs_hash_dh_hhead_size(struct cfs_hash * hs)335 cfs_hash_dh_hhead_size(struct cfs_hash *hs)
336 {
337 	return sizeof(struct cfs_hash_dhead);
338 }
339 
340 static struct hlist_head *
cfs_hash_dh_hhead(struct cfs_hash * hs,struct cfs_hash_bd * bd)341 cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
342 {
343 	struct cfs_hash_dhead *head;
344 
345 	head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
346 	return &head[bd->bd_offset].dh_head;
347 }
348 
349 static int
cfs_hash_dh_hnode_add(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)350 cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
351 		      struct hlist_node *hnode)
352 {
353 	struct cfs_hash_dhead *dh;
354 
355 	dh = container_of(cfs_hash_dh_hhead(hs, bd),
356 			  struct cfs_hash_dhead, dh_head);
357 	if (dh->dh_tail != NULL) /* not empty */
358 		hlist_add_behind(hnode, dh->dh_tail);
359 	else /* empty list */
360 		hlist_add_head(hnode, &dh->dh_head);
361 	dh->dh_tail = hnode;
362 	return -1; /* unknown depth */
363 }
364 
365 static int
cfs_hash_dh_hnode_del(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnd)366 cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
367 		      struct hlist_node *hnd)
368 {
369 	struct cfs_hash_dhead *dh;
370 
371 	dh = container_of(cfs_hash_dh_hhead(hs, bd),
372 			  struct cfs_hash_dhead, dh_head);
373 	if (hnd->next == NULL) { /* it's the tail */
374 		dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
375 			      container_of(hnd->pprev, struct hlist_node, next);
376 	}
377 	hlist_del_init(hnd);
378 	return -1; /* unknown depth */
379 }
380 
381 /**
382  * double links hash head with depth tracking
383  * new element is always added to tail of hlist
384  */
385 struct cfs_hash_dhead_dep {
386 	struct hlist_head	dd_head;	/**< entries list */
387 	struct hlist_node       *dd_tail;	/**< the last entry */
388 	unsigned int	    dd_depth;       /**< list length */
389 };
390 
391 static int
cfs_hash_dd_hhead_size(struct cfs_hash * hs)392 cfs_hash_dd_hhead_size(struct cfs_hash *hs)
393 {
394 	return sizeof(struct cfs_hash_dhead_dep);
395 }
396 
397 static struct hlist_head *
cfs_hash_dd_hhead(struct cfs_hash * hs,struct cfs_hash_bd * bd)398 cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
399 {
400 	struct cfs_hash_dhead_dep *head;
401 
402 	head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
403 	return &head[bd->bd_offset].dd_head;
404 }
405 
406 static int
cfs_hash_dd_hnode_add(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)407 cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
408 		      struct hlist_node *hnode)
409 {
410 	struct cfs_hash_dhead_dep *dh;
411 
412 	dh = container_of(cfs_hash_dd_hhead(hs, bd),
413 			  struct cfs_hash_dhead_dep, dd_head);
414 	if (dh->dd_tail != NULL) /* not empty */
415 		hlist_add_behind(hnode, dh->dd_tail);
416 	else /* empty list */
417 		hlist_add_head(hnode, &dh->dd_head);
418 	dh->dd_tail = hnode;
419 	return ++dh->dd_depth;
420 }
421 
422 static int
cfs_hash_dd_hnode_del(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnd)423 cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
424 		      struct hlist_node *hnd)
425 {
426 	struct cfs_hash_dhead_dep *dh;
427 
428 	dh = container_of(cfs_hash_dd_hhead(hs, bd),
429 			  struct cfs_hash_dhead_dep, dd_head);
430 	if (hnd->next == NULL) { /* it's the tail */
431 		dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
432 			      container_of(hnd->pprev, struct hlist_node, next);
433 	}
434 	hlist_del_init(hnd);
435 	return --dh->dd_depth;
436 }
437 
438 static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
439 	.hop_hhead      = cfs_hash_hh_hhead,
440 	.hop_hhead_size = cfs_hash_hh_hhead_size,
441 	.hop_hnode_add  = cfs_hash_hh_hnode_add,
442 	.hop_hnode_del  = cfs_hash_hh_hnode_del,
443 };
444 
445 static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
446 	.hop_hhead      = cfs_hash_hd_hhead,
447 	.hop_hhead_size = cfs_hash_hd_hhead_size,
448 	.hop_hnode_add  = cfs_hash_hd_hnode_add,
449 	.hop_hnode_del  = cfs_hash_hd_hnode_del,
450 };
451 
452 static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
453 	.hop_hhead      = cfs_hash_dh_hhead,
454 	.hop_hhead_size = cfs_hash_dh_hhead_size,
455 	.hop_hnode_add  = cfs_hash_dh_hnode_add,
456 	.hop_hnode_del  = cfs_hash_dh_hnode_del,
457 };
458 
459 static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
460 	.hop_hhead      = cfs_hash_dd_hhead,
461 	.hop_hhead_size = cfs_hash_dd_hhead_size,
462 	.hop_hnode_add  = cfs_hash_dd_hnode_add,
463 	.hop_hnode_del  = cfs_hash_dd_hnode_del,
464 };
465 
466 static void
cfs_hash_hlist_setup(struct cfs_hash * hs)467 cfs_hash_hlist_setup(struct cfs_hash *hs)
468 {
469 	if (cfs_hash_with_add_tail(hs)) {
470 		hs->hs_hops = cfs_hash_with_depth(hs) ?
471 			      &cfs_hash_dd_hops : &cfs_hash_dh_hops;
472 	} else {
473 		hs->hs_hops = cfs_hash_with_depth(hs) ?
474 			      &cfs_hash_hd_hops : &cfs_hash_hh_hops;
475 	}
476 }
477 
478 static void
cfs_hash_bd_from_key(struct cfs_hash * hs,struct cfs_hash_bucket ** bkts,unsigned int bits,const void * key,struct cfs_hash_bd * bd)479 cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
480 		     unsigned int bits, const void *key, struct cfs_hash_bd *bd)
481 {
482 	unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
483 
484 	LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
485 
486 	bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
487 	bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
488 }
489 
490 void
cfs_hash_bd_get(struct cfs_hash * hs,const void * key,struct cfs_hash_bd * bd)491 cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
492 {
493 	/* NB: caller should hold hs->hs_rwlock if REHASH is set */
494 	if (likely(hs->hs_rehash_buckets == NULL)) {
495 		cfs_hash_bd_from_key(hs, hs->hs_buckets,
496 				     hs->hs_cur_bits, key, bd);
497 	} else {
498 		LASSERT(hs->hs_rehash_bits != 0);
499 		cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
500 				     hs->hs_rehash_bits, key, bd);
501 	}
502 }
503 EXPORT_SYMBOL(cfs_hash_bd_get);
504 
505 static inline void
cfs_hash_bd_dep_record(struct cfs_hash * hs,struct cfs_hash_bd * bd,int dep_cur)506 cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
507 {
508 	if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
509 		return;
510 
511 	bd->bd_bucket->hsb_depmax = dep_cur;
512 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
513 	if (likely(warn_on_depth == 0 ||
514 		   max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
515 		return;
516 
517 	spin_lock(&hs->hs_dep_lock);
518 	hs->hs_dep_max  = dep_cur;
519 	hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
520 	hs->hs_dep_off  = bd->bd_offset;
521 	hs->hs_dep_bits = hs->hs_cur_bits;
522 	spin_unlock(&hs->hs_dep_lock);
523 
524 	cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
525 # endif
526 }
527 
528 void
cfs_hash_bd_add_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)529 cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
530 		       struct hlist_node *hnode)
531 {
532 	int		rc;
533 
534 	rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
535 	cfs_hash_bd_dep_record(hs, bd, rc);
536 	bd->bd_bucket->hsb_version++;
537 	if (unlikely(bd->bd_bucket->hsb_version == 0))
538 		bd->bd_bucket->hsb_version++;
539 	bd->bd_bucket->hsb_count++;
540 
541 	if (cfs_hash_with_counter(hs))
542 		atomic_inc(&hs->hs_count);
543 	if (!cfs_hash_with_no_itemref(hs))
544 		cfs_hash_get(hs, hnode);
545 }
546 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
547 
548 void
cfs_hash_bd_del_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)549 cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
550 		       struct hlist_node *hnode)
551 {
552 	hs->hs_hops->hop_hnode_del(hs, bd, hnode);
553 
554 	LASSERT(bd->bd_bucket->hsb_count > 0);
555 	bd->bd_bucket->hsb_count--;
556 	bd->bd_bucket->hsb_version++;
557 	if (unlikely(bd->bd_bucket->hsb_version == 0))
558 		bd->bd_bucket->hsb_version++;
559 
560 	if (cfs_hash_with_counter(hs)) {
561 		LASSERT(atomic_read(&hs->hs_count) > 0);
562 		atomic_dec(&hs->hs_count);
563 	}
564 	if (!cfs_hash_with_no_itemref(hs))
565 		cfs_hash_put_locked(hs, hnode);
566 }
567 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
568 
569 void
cfs_hash_bd_move_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd_old,struct cfs_hash_bd * bd_new,struct hlist_node * hnode)570 cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
571 			struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
572 {
573 	struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
574 	struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
575 	int		rc;
576 
577 	if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
578 		return;
579 
580 	/* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
581 	 * in cfs_hash_bd_del/add_locked */
582 	hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
583 	rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
584 	cfs_hash_bd_dep_record(hs, bd_new, rc);
585 
586 	LASSERT(obkt->hsb_count > 0);
587 	obkt->hsb_count--;
588 	obkt->hsb_version++;
589 	if (unlikely(obkt->hsb_version == 0))
590 		obkt->hsb_version++;
591 	nbkt->hsb_count++;
592 	nbkt->hsb_version++;
593 	if (unlikely(nbkt->hsb_version == 0))
594 		nbkt->hsb_version++;
595 }
596 EXPORT_SYMBOL(cfs_hash_bd_move_locked);
597 
598 enum {
599 	/** always set, for sanity (avoid ZERO intent) */
600 	CFS_HS_LOOKUP_MASK_FIND     = BIT(0),
601 	/** return entry with a ref */
602 	CFS_HS_LOOKUP_MASK_REF      = BIT(1),
603 	/** add entry if not existing */
604 	CFS_HS_LOOKUP_MASK_ADD      = BIT(2),
605 	/** delete entry, ignore other masks */
606 	CFS_HS_LOOKUP_MASK_DEL      = BIT(3),
607 };
608 
609 enum cfs_hash_lookup_intent {
610 	/** return item w/o refcount */
611 	CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
612 	/** return item with refcount */
613 	CFS_HS_LOOKUP_IT_FIND       = (CFS_HS_LOOKUP_MASK_FIND |
614 				       CFS_HS_LOOKUP_MASK_REF),
615 	/** return item w/o refcount if existed, otherwise add */
616 	CFS_HS_LOOKUP_IT_ADD	= (CFS_HS_LOOKUP_MASK_FIND |
617 				       CFS_HS_LOOKUP_MASK_ADD),
618 	/** return item with refcount if existed, otherwise add */
619 	CFS_HS_LOOKUP_IT_FINDADD    = (CFS_HS_LOOKUP_IT_FIND |
620 				       CFS_HS_LOOKUP_MASK_ADD),
621 	/** delete if existed */
622 	CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
623 				       CFS_HS_LOOKUP_MASK_DEL)
624 };
625 
626 static struct hlist_node *
cfs_hash_bd_lookup_intent(struct cfs_hash * hs,struct cfs_hash_bd * bd,const void * key,struct hlist_node * hnode,enum cfs_hash_lookup_intent intent)627 cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
628 			  const void *key, struct hlist_node *hnode,
629 			  enum cfs_hash_lookup_intent intent)
630 
631 {
632 	struct hlist_head  *hhead = cfs_hash_bd_hhead(hs, bd);
633 	struct hlist_node  *ehnode;
634 	struct hlist_node  *match;
635 	int  intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
636 
637 	/* with this function, we can avoid a lot of useless refcount ops,
638 	 * which are expensive atomic operations most time. */
639 	match = intent_add ? NULL : hnode;
640 	hlist_for_each(ehnode, hhead) {
641 		if (!cfs_hash_keycmp(hs, key, ehnode))
642 			continue;
643 
644 		if (match != NULL && match != ehnode) /* can't match */
645 			continue;
646 
647 		/* match and ... */
648 		if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
649 			cfs_hash_bd_del_locked(hs, bd, ehnode);
650 			return ehnode;
651 		}
652 
653 		/* caller wants refcount? */
654 		if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
655 			cfs_hash_get(hs, ehnode);
656 		return ehnode;
657 	}
658 	/* no match item */
659 	if (!intent_add)
660 		return NULL;
661 
662 	LASSERT(hnode != NULL);
663 	cfs_hash_bd_add_locked(hs, bd, hnode);
664 	return hnode;
665 }
666 
667 struct hlist_node *
cfs_hash_bd_lookup_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,const void * key)668 cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
669 {
670 	return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
671 					 CFS_HS_LOOKUP_IT_FIND);
672 }
673 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
674 
675 struct hlist_node *
cfs_hash_bd_peek_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,const void * key)676 cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
677 {
678 	return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
679 					 CFS_HS_LOOKUP_IT_PEEK);
680 }
681 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
682 
683 struct hlist_node *
cfs_hash_bd_findadd_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,const void * key,struct hlist_node * hnode,int noref)684 cfs_hash_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
685 			   const void *key, struct hlist_node *hnode,
686 			   int noref)
687 {
688 	return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
689 					 (!noref * CFS_HS_LOOKUP_MASK_REF) |
690 					 CFS_HS_LOOKUP_IT_ADD);
691 }
692 EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
693 
694 struct hlist_node *
cfs_hash_bd_finddel_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,const void * key,struct hlist_node * hnode)695 cfs_hash_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
696 			   const void *key, struct hlist_node *hnode)
697 {
698 	/* hnode can be NULL, we find the first item with @key */
699 	return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
700 					 CFS_HS_LOOKUP_IT_FINDDEL);
701 }
702 EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
703 
704 static void
cfs_hash_multi_bd_lock(struct cfs_hash * hs,struct cfs_hash_bd * bds,unsigned n,int excl)705 cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
706 		       unsigned n, int excl)
707 {
708 	struct cfs_hash_bucket *prev = NULL;
709 	int		i;
710 
711 	/**
712 	 * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
713 	 * NB: it's possible that several bds point to the same bucket but
714 	 * have different bd::bd_offset, so need take care of deadlock.
715 	 */
716 	cfs_hash_for_each_bd(bds, n, i) {
717 		if (prev == bds[i].bd_bucket)
718 			continue;
719 
720 		LASSERT(prev == NULL ||
721 			prev->hsb_index < bds[i].bd_bucket->hsb_index);
722 		cfs_hash_bd_lock(hs, &bds[i], excl);
723 		prev = bds[i].bd_bucket;
724 	}
725 }
726 
727 static void
cfs_hash_multi_bd_unlock(struct cfs_hash * hs,struct cfs_hash_bd * bds,unsigned n,int excl)728 cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
729 			 unsigned n, int excl)
730 {
731 	struct cfs_hash_bucket *prev = NULL;
732 	int		i;
733 
734 	cfs_hash_for_each_bd(bds, n, i) {
735 		if (prev != bds[i].bd_bucket) {
736 			cfs_hash_bd_unlock(hs, &bds[i], excl);
737 			prev = bds[i].bd_bucket;
738 		}
739 	}
740 }
741 
742 static struct hlist_node *
cfs_hash_multi_bd_lookup_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,unsigned n,const void * key)743 cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
744 				unsigned n, const void *key)
745 {
746 	struct hlist_node  *ehnode;
747 	unsigned	   i;
748 
749 	cfs_hash_for_each_bd(bds, n, i) {
750 		ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
751 						   CFS_HS_LOOKUP_IT_FIND);
752 		if (ehnode != NULL)
753 			return ehnode;
754 	}
755 	return NULL;
756 }
757 
758 static struct hlist_node *
cfs_hash_multi_bd_findadd_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,unsigned n,const void * key,struct hlist_node * hnode,int noref)759 cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs,
760 				 struct cfs_hash_bd *bds, unsigned n, const void *key,
761 				 struct hlist_node *hnode, int noref)
762 {
763 	struct hlist_node  *ehnode;
764 	int		intent;
765 	unsigned	   i;
766 
767 	LASSERT(hnode != NULL);
768 	intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK;
769 
770 	cfs_hash_for_each_bd(bds, n, i) {
771 		ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
772 						   NULL, intent);
773 		if (ehnode != NULL)
774 			return ehnode;
775 	}
776 
777 	if (i == 1) { /* only one bucket */
778 		cfs_hash_bd_add_locked(hs, &bds[0], hnode);
779 	} else {
780 		struct cfs_hash_bd      mybd;
781 
782 		cfs_hash_bd_get(hs, key, &mybd);
783 		cfs_hash_bd_add_locked(hs, &mybd, hnode);
784 	}
785 
786 	return hnode;
787 }
788 
789 static struct hlist_node *
cfs_hash_multi_bd_finddel_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,unsigned n,const void * key,struct hlist_node * hnode)790 cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
791 				 unsigned n, const void *key,
792 				 struct hlist_node *hnode)
793 {
794 	struct hlist_node  *ehnode;
795 	unsigned	   i;
796 
797 	cfs_hash_for_each_bd(bds, n, i) {
798 		ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
799 						   CFS_HS_LOOKUP_IT_FINDDEL);
800 		if (ehnode != NULL)
801 			return ehnode;
802 	}
803 	return NULL;
804 }
805 
806 static void
cfs_hash_bd_order(struct cfs_hash_bd * bd1,struct cfs_hash_bd * bd2)807 cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
808 {
809 	int     rc;
810 
811 	if (bd2->bd_bucket == NULL)
812 		return;
813 
814 	if (bd1->bd_bucket == NULL) {
815 		*bd1 = *bd2;
816 		bd2->bd_bucket = NULL;
817 		return;
818 	}
819 
820 	rc = cfs_hash_bd_compare(bd1, bd2);
821 	if (rc == 0) {
822 		bd2->bd_bucket = NULL;
823 
824 	} else if (rc > 0) { /* swab bd1 and bd2 */
825 		struct cfs_hash_bd tmp;
826 
827 		tmp = *bd2;
828 		*bd2 = *bd1;
829 		*bd1 = tmp;
830 	}
831 }
832 
833 void
cfs_hash_dual_bd_get(struct cfs_hash * hs,const void * key,struct cfs_hash_bd * bds)834 cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bds)
835 {
836 	/* NB: caller should hold hs_lock.rw if REHASH is set */
837 	cfs_hash_bd_from_key(hs, hs->hs_buckets,
838 			     hs->hs_cur_bits, key, &bds[0]);
839 	if (likely(hs->hs_rehash_buckets == NULL)) {
840 		/* no rehash or not rehashing */
841 		bds[1].bd_bucket = NULL;
842 		return;
843 	}
844 
845 	LASSERT(hs->hs_rehash_bits != 0);
846 	cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
847 			     hs->hs_rehash_bits, key, &bds[1]);
848 
849 	cfs_hash_bd_order(&bds[0], &bds[1]);
850 }
851 EXPORT_SYMBOL(cfs_hash_dual_bd_get);
852 
853 void
cfs_hash_dual_bd_lock(struct cfs_hash * hs,struct cfs_hash_bd * bds,int excl)854 cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
855 {
856 	cfs_hash_multi_bd_lock(hs, bds, 2, excl);
857 }
858 EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
859 
860 void
cfs_hash_dual_bd_unlock(struct cfs_hash * hs,struct cfs_hash_bd * bds,int excl)861 cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
862 {
863 	cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
864 }
865 EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
866 
867 struct hlist_node *
cfs_hash_dual_bd_lookup_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,const void * key)868 cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
869 			       const void *key)
870 {
871 	return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
872 }
873 EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
874 
875 struct hlist_node *
cfs_hash_dual_bd_findadd_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,const void * key,struct hlist_node * hnode,int noref)876 cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
877 				const void *key, struct hlist_node *hnode,
878 				int noref)
879 {
880 	return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
881 						hnode, noref);
882 }
883 EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
884 
885 struct hlist_node *
cfs_hash_dual_bd_finddel_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,const void * key,struct hlist_node * hnode)886 cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
887 				const void *key, struct hlist_node *hnode)
888 {
889 	return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
890 }
891 EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
892 
893 static void
cfs_hash_buckets_free(struct cfs_hash_bucket ** buckets,int bkt_size,int prev_size,int size)894 cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
895 		      int bkt_size, int prev_size, int size)
896 {
897 	int     i;
898 
899 	for (i = prev_size; i < size; i++) {
900 		if (buckets[i] != NULL)
901 			LIBCFS_FREE(buckets[i], bkt_size);
902 	}
903 
904 	LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
905 }
906 
907 /*
908  * Create or grow bucket memory. Return old_buckets if no allocation was
909  * needed, the newly allocated buckets if allocation was needed and
910  * successful, and NULL on error.
911  */
912 static struct cfs_hash_bucket **
cfs_hash_buckets_realloc(struct cfs_hash * hs,struct cfs_hash_bucket ** old_bkts,unsigned int old_size,unsigned int new_size)913 cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
914 			 unsigned int old_size, unsigned int new_size)
915 {
916 	struct cfs_hash_bucket **new_bkts;
917 	int		 i;
918 
919 	LASSERT(old_size == 0 || old_bkts != NULL);
920 
921 	if (old_bkts != NULL && old_size == new_size)
922 		return old_bkts;
923 
924 	LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
925 	if (new_bkts == NULL)
926 		return NULL;
927 
928 	if (old_bkts != NULL) {
929 		memcpy(new_bkts, old_bkts,
930 		       min(old_size, new_size) * sizeof(*old_bkts));
931 	}
932 
933 	for (i = old_size; i < new_size; i++) {
934 		struct hlist_head *hhead;
935 		struct cfs_hash_bd     bd;
936 
937 		LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
938 		if (new_bkts[i] == NULL) {
939 			cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
940 					      old_size, new_size);
941 			return NULL;
942 		}
943 
944 		new_bkts[i]->hsb_index   = i;
945 		new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
946 		new_bkts[i]->hsb_depmax  = -1; /* unknown */
947 		bd.bd_bucket = new_bkts[i];
948 		cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
949 			INIT_HLIST_HEAD(hhead);
950 
951 		if (cfs_hash_with_no_lock(hs) ||
952 		    cfs_hash_with_no_bktlock(hs))
953 			continue;
954 
955 		if (cfs_hash_with_rw_bktlock(hs))
956 			rwlock_init(&new_bkts[i]->hsb_lock.rw);
957 		else if (cfs_hash_with_spin_bktlock(hs))
958 			spin_lock_init(&new_bkts[i]->hsb_lock.spin);
959 		else
960 			LBUG(); /* invalid use-case */
961 	}
962 	return new_bkts;
963 }
964 
965 /**
966  * Initialize new libcfs hash, where:
967  * @name     - Descriptive hash name
968  * @cur_bits - Initial hash table size, in bits
969  * @max_bits - Maximum allowed hash table resize, in bits
970  * @ops      - Registered hash table operations
971  * @flags    - CFS_HASH_REHASH enable synamic hash resizing
972  *	   - CFS_HASH_SORT enable chained hash sort
973  */
974 static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
975 
976 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
cfs_hash_dep_print(cfs_workitem_t * wi)977 static int cfs_hash_dep_print(cfs_workitem_t *wi)
978 {
979 	struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
980 	int	 dep;
981 	int	 bkt;
982 	int	 off;
983 	int	 bits;
984 
985 	spin_lock(&hs->hs_dep_lock);
986 	dep  = hs->hs_dep_max;
987 	bkt  = hs->hs_dep_bkt;
988 	off  = hs->hs_dep_off;
989 	bits = hs->hs_dep_bits;
990 	spin_unlock(&hs->hs_dep_lock);
991 
992 	LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
993 		      hs->hs_name, bits, dep, bkt, off);
994 	spin_lock(&hs->hs_dep_lock);
995 	hs->hs_dep_bits = 0; /* mark as workitem done */
996 	spin_unlock(&hs->hs_dep_lock);
997 	return 0;
998 }
999 
cfs_hash_depth_wi_init(struct cfs_hash * hs)1000 static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
1001 {
1002 	spin_lock_init(&hs->hs_dep_lock);
1003 	cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
1004 }
1005 
cfs_hash_depth_wi_cancel(struct cfs_hash * hs)1006 static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
1007 {
1008 	if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
1009 		return;
1010 
1011 	spin_lock(&hs->hs_dep_lock);
1012 	while (hs->hs_dep_bits != 0) {
1013 		spin_unlock(&hs->hs_dep_lock);
1014 		cond_resched();
1015 		spin_lock(&hs->hs_dep_lock);
1016 	}
1017 	spin_unlock(&hs->hs_dep_lock);
1018 }
1019 
1020 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1021 
cfs_hash_depth_wi_init(struct cfs_hash * hs)1022 static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
cfs_hash_depth_wi_cancel(struct cfs_hash * hs)1023 static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
1024 
1025 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1026 
1027 struct cfs_hash *
cfs_hash_create(char * name,unsigned cur_bits,unsigned max_bits,unsigned bkt_bits,unsigned extra_bytes,unsigned min_theta,unsigned max_theta,struct cfs_hash_ops * ops,unsigned flags)1028 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1029 		unsigned bkt_bits, unsigned extra_bytes,
1030 		unsigned min_theta, unsigned max_theta,
1031 		struct cfs_hash_ops *ops, unsigned flags)
1032 {
1033 	struct cfs_hash *hs;
1034 	int	 len;
1035 
1036 	CLASSERT(CFS_HASH_THETA_BITS < 15);
1037 
1038 	LASSERT(name != NULL);
1039 	LASSERT(ops != NULL);
1040 	LASSERT(ops->hs_key);
1041 	LASSERT(ops->hs_hash);
1042 	LASSERT(ops->hs_object);
1043 	LASSERT(ops->hs_keycmp);
1044 	LASSERT(ops->hs_get != NULL);
1045 	LASSERT(ops->hs_put_locked != NULL);
1046 
1047 	if ((flags & CFS_HASH_REHASH) != 0)
1048 		flags |= CFS_HASH_COUNTER; /* must have counter */
1049 
1050 	LASSERT(cur_bits > 0);
1051 	LASSERT(cur_bits >= bkt_bits);
1052 	LASSERT(max_bits >= cur_bits && max_bits < 31);
1053 	LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1054 	LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1055 		     (flags & CFS_HASH_NO_LOCK) == 0));
1056 	LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1057 		      ops->hs_keycpy != NULL));
1058 
1059 	len = (flags & CFS_HASH_BIGNAME) == 0 ?
1060 	      CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1061 	LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
1062 	if (hs == NULL)
1063 		return NULL;
1064 
1065 	strlcpy(hs->hs_name, name, len);
1066 	hs->hs_flags = flags;
1067 
1068 	atomic_set(&hs->hs_refcount, 1);
1069 	atomic_set(&hs->hs_count, 0);
1070 
1071 	cfs_hash_lock_setup(hs);
1072 	cfs_hash_hlist_setup(hs);
1073 
1074 	hs->hs_cur_bits = (__u8)cur_bits;
1075 	hs->hs_min_bits = (__u8)cur_bits;
1076 	hs->hs_max_bits = (__u8)max_bits;
1077 	hs->hs_bkt_bits = (__u8)bkt_bits;
1078 
1079 	hs->hs_ops	 = ops;
1080 	hs->hs_extra_bytes = extra_bytes;
1081 	hs->hs_rehash_bits = 0;
1082 	cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1083 	cfs_hash_depth_wi_init(hs);
1084 
1085 	if (cfs_hash_with_rehash(hs))
1086 		__cfs_hash_set_theta(hs, min_theta, max_theta);
1087 
1088 	hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1089 						  CFS_HASH_NBKT(hs));
1090 	if (hs->hs_buckets != NULL)
1091 		return hs;
1092 
1093 	LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
1094 	return NULL;
1095 }
1096 EXPORT_SYMBOL(cfs_hash_create);
1097 
1098 /**
1099  * Cleanup libcfs hash @hs.
1100  */
1101 static void
cfs_hash_destroy(struct cfs_hash * hs)1102 cfs_hash_destroy(struct cfs_hash *hs)
1103 {
1104 	struct hlist_node     *hnode;
1105 	struct hlist_node     *pos;
1106 	struct cfs_hash_bd	 bd;
1107 	int		   i;
1108 
1109 	LASSERT(hs != NULL);
1110 	LASSERT(!cfs_hash_is_exiting(hs) &&
1111 		!cfs_hash_is_iterating(hs));
1112 
1113 	/**
1114 	 * prohibit further rehashes, don't need any lock because
1115 	 * I'm the only (last) one can change it.
1116 	 */
1117 	hs->hs_exiting = 1;
1118 	if (cfs_hash_with_rehash(hs))
1119 		cfs_hash_rehash_cancel(hs);
1120 
1121 	cfs_hash_depth_wi_cancel(hs);
1122 	/* rehash should be done/canceled */
1123 	LASSERT(hs->hs_buckets != NULL &&
1124 		hs->hs_rehash_buckets == NULL);
1125 
1126 	cfs_hash_for_each_bucket(hs, &bd, i) {
1127 		struct hlist_head *hhead;
1128 
1129 		LASSERT(bd.bd_bucket != NULL);
1130 		/* no need to take this lock, just for consistent code */
1131 		cfs_hash_bd_lock(hs, &bd, 1);
1132 
1133 		cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1134 			hlist_for_each_safe(hnode, pos, hhead) {
1135 				LASSERTF(!cfs_hash_with_assert_empty(hs),
1136 					 "hash %s bucket %u(%u) is not empty: %u items left\n",
1137 					 hs->hs_name, bd.bd_bucket->hsb_index,
1138 					 bd.bd_offset, bd.bd_bucket->hsb_count);
1139 				/* can't assert key valicate, because we
1140 				 * can interrupt rehash */
1141 				cfs_hash_bd_del_locked(hs, &bd, hnode);
1142 				cfs_hash_exit(hs, hnode);
1143 			}
1144 		}
1145 		LASSERT(bd.bd_bucket->hsb_count == 0);
1146 		cfs_hash_bd_unlock(hs, &bd, 1);
1147 		cond_resched();
1148 	}
1149 
1150 	LASSERT(atomic_read(&hs->hs_count) == 0);
1151 
1152 	cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1153 			      0, CFS_HASH_NBKT(hs));
1154 	i = cfs_hash_with_bigname(hs) ?
1155 	    CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1156 	LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
1157 }
1158 
cfs_hash_getref(struct cfs_hash * hs)1159 struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
1160 {
1161 	if (atomic_inc_not_zero(&hs->hs_refcount))
1162 		return hs;
1163 	return NULL;
1164 }
1165 EXPORT_SYMBOL(cfs_hash_getref);
1166 
cfs_hash_putref(struct cfs_hash * hs)1167 void cfs_hash_putref(struct cfs_hash *hs)
1168 {
1169 	if (atomic_dec_and_test(&hs->hs_refcount))
1170 		cfs_hash_destroy(hs);
1171 }
1172 EXPORT_SYMBOL(cfs_hash_putref);
1173 
1174 static inline int
cfs_hash_rehash_bits(struct cfs_hash * hs)1175 cfs_hash_rehash_bits(struct cfs_hash *hs)
1176 {
1177 	if (cfs_hash_with_no_lock(hs) ||
1178 	    !cfs_hash_with_rehash(hs))
1179 		return -EOPNOTSUPP;
1180 
1181 	if (unlikely(cfs_hash_is_exiting(hs)))
1182 		return -ESRCH;
1183 
1184 	if (unlikely(cfs_hash_is_rehashing(hs)))
1185 		return -EALREADY;
1186 
1187 	if (unlikely(cfs_hash_is_iterating(hs)))
1188 		return -EAGAIN;
1189 
1190 	/* XXX: need to handle case with max_theta != 2.0
1191 	 *      and the case with min_theta != 0.5 */
1192 	if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1193 	    (__cfs_hash_theta(hs) > hs->hs_max_theta))
1194 		return hs->hs_cur_bits + 1;
1195 
1196 	if (!cfs_hash_with_shrink(hs))
1197 		return 0;
1198 
1199 	if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1200 	    (__cfs_hash_theta(hs) < hs->hs_min_theta))
1201 		return hs->hs_cur_bits - 1;
1202 
1203 	return 0;
1204 }
1205 
1206 /**
1207  * don't allow inline rehash if:
1208  * - user wants non-blocking change (add/del) on hash table
1209  * - too many elements
1210  */
1211 static inline int
cfs_hash_rehash_inline(struct cfs_hash * hs)1212 cfs_hash_rehash_inline(struct cfs_hash *hs)
1213 {
1214 	return !cfs_hash_with_nblk_change(hs) &&
1215 	       atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1216 }
1217 
1218 /**
1219  * Add item @hnode to libcfs hash @hs using @key.  The registered
1220  * ops->hs_get function will be called when the item is added.
1221  */
1222 void
cfs_hash_add(struct cfs_hash * hs,const void * key,struct hlist_node * hnode)1223 cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1224 {
1225 	struct cfs_hash_bd   bd;
1226 	int	     bits;
1227 
1228 	LASSERT(hlist_unhashed(hnode));
1229 
1230 	cfs_hash_lock(hs, 0);
1231 	cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1232 
1233 	cfs_hash_key_validate(hs, key, hnode);
1234 	cfs_hash_bd_add_locked(hs, &bd, hnode);
1235 
1236 	cfs_hash_bd_unlock(hs, &bd, 1);
1237 
1238 	bits = cfs_hash_rehash_bits(hs);
1239 	cfs_hash_unlock(hs, 0);
1240 	if (bits > 0)
1241 		cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1242 }
1243 EXPORT_SYMBOL(cfs_hash_add);
1244 
1245 static struct hlist_node *
cfs_hash_find_or_add(struct cfs_hash * hs,const void * key,struct hlist_node * hnode,int noref)1246 cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
1247 		     struct hlist_node *hnode, int noref)
1248 {
1249 	struct hlist_node *ehnode;
1250 	struct cfs_hash_bd     bds[2];
1251 	int	       bits = 0;
1252 
1253 	LASSERT(hlist_unhashed(hnode));
1254 
1255 	cfs_hash_lock(hs, 0);
1256 	cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1257 
1258 	cfs_hash_key_validate(hs, key, hnode);
1259 	ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1260 						 hnode, noref);
1261 	cfs_hash_dual_bd_unlock(hs, bds, 1);
1262 
1263 	if (ehnode == hnode) /* new item added */
1264 		bits = cfs_hash_rehash_bits(hs);
1265 	cfs_hash_unlock(hs, 0);
1266 	if (bits > 0)
1267 		cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1268 
1269 	return ehnode;
1270 }
1271 
1272 /**
1273  * Add item @hnode to libcfs hash @hs using @key.  The registered
1274  * ops->hs_get function will be called if the item was added.
1275  * Returns 0 on success or -EALREADY on key collisions.
1276  */
1277 int
cfs_hash_add_unique(struct cfs_hash * hs,const void * key,struct hlist_node * hnode)1278 cfs_hash_add_unique(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1279 {
1280 	return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1281 	       -EALREADY : 0;
1282 }
1283 EXPORT_SYMBOL(cfs_hash_add_unique);
1284 
1285 /**
1286  * Add item @hnode to libcfs hash @hs using @key.  If this @key
1287  * already exists in the hash then ops->hs_get will be called on the
1288  * conflicting entry and that entry will be returned to the caller.
1289  * Otherwise ops->hs_get is called on the item which was added.
1290  */
1291 void *
cfs_hash_findadd_unique(struct cfs_hash * hs,const void * key,struct hlist_node * hnode)1292 cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
1293 			struct hlist_node *hnode)
1294 {
1295 	hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1296 
1297 	return cfs_hash_object(hs, hnode);
1298 }
1299 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1300 
1301 /**
1302  * Delete item @hnode from the libcfs hash @hs using @key.  The @key
1303  * is required to ensure the correct hash bucket is locked since there
1304  * is no direct linkage from the item to the bucket.  The object
1305  * removed from the hash will be returned and obs->hs_put is called
1306  * on the removed object.
1307  */
1308 void *
cfs_hash_del(struct cfs_hash * hs,const void * key,struct hlist_node * hnode)1309 cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1310 {
1311 	void	   *obj  = NULL;
1312 	int	     bits = 0;
1313 	struct cfs_hash_bd   bds[2];
1314 
1315 	cfs_hash_lock(hs, 0);
1316 	cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1317 
1318 	/* NB: do nothing if @hnode is not in hash table */
1319 	if (hnode == NULL || !hlist_unhashed(hnode)) {
1320 		if (bds[1].bd_bucket == NULL && hnode != NULL) {
1321 			cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1322 		} else {
1323 			hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1324 								key, hnode);
1325 		}
1326 	}
1327 
1328 	if (hnode != NULL) {
1329 		obj  = cfs_hash_object(hs, hnode);
1330 		bits = cfs_hash_rehash_bits(hs);
1331 	}
1332 
1333 	cfs_hash_dual_bd_unlock(hs, bds, 1);
1334 	cfs_hash_unlock(hs, 0);
1335 	if (bits > 0)
1336 		cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1337 
1338 	return obj;
1339 }
1340 EXPORT_SYMBOL(cfs_hash_del);
1341 
1342 /**
1343  * Delete item given @key in libcfs hash @hs.  The first @key found in
1344  * the hash will be removed, if the key exists multiple times in the hash
1345  * @hs this function must be called once per key.  The removed object
1346  * will be returned and ops->hs_put is called on the removed object.
1347  */
1348 void *
cfs_hash_del_key(struct cfs_hash * hs,const void * key)1349 cfs_hash_del_key(struct cfs_hash *hs, const void *key)
1350 {
1351 	return cfs_hash_del(hs, key, NULL);
1352 }
1353 EXPORT_SYMBOL(cfs_hash_del_key);
1354 
1355 /**
1356  * Lookup an item using @key in the libcfs hash @hs and return it.
1357  * If the @key is found in the hash hs->hs_get() is called and the
1358  * matching objects is returned.  It is the callers responsibility
1359  * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1360  * when when finished with the object.  If the @key was not found
1361  * in the hash @hs NULL is returned.
1362  */
1363 void *
cfs_hash_lookup(struct cfs_hash * hs,const void * key)1364 cfs_hash_lookup(struct cfs_hash *hs, const void *key)
1365 {
1366 	void		 *obj = NULL;
1367 	struct hlist_node     *hnode;
1368 	struct cfs_hash_bd	 bds[2];
1369 
1370 	cfs_hash_lock(hs, 0);
1371 	cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1372 
1373 	hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1374 	if (hnode != NULL)
1375 		obj = cfs_hash_object(hs, hnode);
1376 
1377 	cfs_hash_dual_bd_unlock(hs, bds, 0);
1378 	cfs_hash_unlock(hs, 0);
1379 
1380 	return obj;
1381 }
1382 EXPORT_SYMBOL(cfs_hash_lookup);
1383 
1384 static void
cfs_hash_for_each_enter(struct cfs_hash * hs)1385 cfs_hash_for_each_enter(struct cfs_hash *hs) {
1386 	LASSERT(!cfs_hash_is_exiting(hs));
1387 
1388 	if (!cfs_hash_with_rehash(hs))
1389 		return;
1390 	/*
1391 	 * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1392 	 * because it's just an unreliable signal to rehash-thread,
1393 	 * rehash-thread will try to finish rehash ASAP when seeing this.
1394 	 */
1395 	hs->hs_iterating = 1;
1396 
1397 	cfs_hash_lock(hs, 1);
1398 	hs->hs_iterators++;
1399 
1400 	/* NB: iteration is mostly called by service thread,
1401 	 * we tend to cancel pending rehash-request, instead of
1402 	 * blocking service thread, we will relaunch rehash request
1403 	 * after iteration */
1404 	if (cfs_hash_is_rehashing(hs))
1405 		cfs_hash_rehash_cancel_locked(hs);
1406 	cfs_hash_unlock(hs, 1);
1407 }
1408 
1409 static void
cfs_hash_for_each_exit(struct cfs_hash * hs)1410 cfs_hash_for_each_exit(struct cfs_hash *hs) {
1411 	int remained;
1412 	int bits;
1413 
1414 	if (!cfs_hash_with_rehash(hs))
1415 		return;
1416 	cfs_hash_lock(hs, 1);
1417 	remained = --hs->hs_iterators;
1418 	bits = cfs_hash_rehash_bits(hs);
1419 	cfs_hash_unlock(hs, 1);
1420 	/* NB: it's race on cfs_has_t::hs_iterating, see above */
1421 	if (remained == 0)
1422 		hs->hs_iterating = 0;
1423 	if (bits > 0) {
1424 		cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1425 				    CFS_HASH_LOOP_HOG);
1426 	}
1427 }
1428 
1429 /**
1430  * For each item in the libcfs hash @hs call the passed callback @func
1431  * and pass to it as an argument each hash item and the private @data.
1432  *
1433  * a) the function may sleep!
1434  * b) during the callback:
1435  *    . the bucket lock is held so the callback must never sleep.
1436  *    . if @removal_safe is true, use can remove current item by
1437  *      cfs_hash_bd_del_locked
1438  */
1439 static __u64
cfs_hash_for_each_tight(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data,int remove_safe)1440 cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1441 			void *data, int remove_safe) {
1442 	struct hlist_node     *hnode;
1443 	struct hlist_node     *pos;
1444 	struct cfs_hash_bd	 bd;
1445 	__u64		 count = 0;
1446 	int		   excl  = !!remove_safe;
1447 	int		   loop  = 0;
1448 	int		   i;
1449 
1450 	cfs_hash_for_each_enter(hs);
1451 
1452 	cfs_hash_lock(hs, 0);
1453 	LASSERT(!cfs_hash_is_rehashing(hs));
1454 
1455 	cfs_hash_for_each_bucket(hs, &bd, i) {
1456 		struct hlist_head *hhead;
1457 
1458 		cfs_hash_bd_lock(hs, &bd, excl);
1459 		if (func == NULL) { /* only glimpse size */
1460 			count += bd.bd_bucket->hsb_count;
1461 			cfs_hash_bd_unlock(hs, &bd, excl);
1462 			continue;
1463 		}
1464 
1465 		cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1466 			hlist_for_each_safe(hnode, pos, hhead) {
1467 				cfs_hash_bucket_validate(hs, &bd, hnode);
1468 				count++;
1469 				loop++;
1470 				if (func(hs, &bd, hnode, data)) {
1471 					cfs_hash_bd_unlock(hs, &bd, excl);
1472 					goto out;
1473 				}
1474 			}
1475 		}
1476 		cfs_hash_bd_unlock(hs, &bd, excl);
1477 		if (loop < CFS_HASH_LOOP_HOG)
1478 			continue;
1479 		loop = 0;
1480 		cfs_hash_unlock(hs, 0);
1481 		cond_resched();
1482 		cfs_hash_lock(hs, 0);
1483 	}
1484  out:
1485 	cfs_hash_unlock(hs, 0);
1486 
1487 	cfs_hash_for_each_exit(hs);
1488 	return count;
1489 }
1490 
1491 struct cfs_hash_cond_arg {
1492 	cfs_hash_cond_opt_cb_t	func;
1493 	void			*arg;
1494 };
1495 
1496 static int
cfs_hash_cond_del_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode,void * data)1497 cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1498 			 struct hlist_node *hnode, void *data)
1499 {
1500 	struct cfs_hash_cond_arg *cond = data;
1501 
1502 	if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1503 		cfs_hash_bd_del_locked(hs, bd, hnode);
1504 	return 0;
1505 }
1506 
1507 /**
1508  * Delete item from the libcfs hash @hs when @func return true.
1509  * The write lock being hold during loop for each bucket to avoid
1510  * any object be reference.
1511  */
1512 void
cfs_hash_cond_del(struct cfs_hash * hs,cfs_hash_cond_opt_cb_t func,void * data)1513 cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
1514 {
1515 	struct cfs_hash_cond_arg arg = {
1516 		.func   = func,
1517 		.arg    = data,
1518 	};
1519 
1520 	cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1521 }
1522 EXPORT_SYMBOL(cfs_hash_cond_del);
1523 
1524 void
cfs_hash_for_each(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data)1525 cfs_hash_for_each(struct cfs_hash *hs,
1526 		  cfs_hash_for_each_cb_t func, void *data)
1527 {
1528 	cfs_hash_for_each_tight(hs, func, data, 0);
1529 }
1530 EXPORT_SYMBOL(cfs_hash_for_each);
1531 
1532 void
cfs_hash_for_each_safe(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data)1533 cfs_hash_for_each_safe(struct cfs_hash *hs,
1534 		       cfs_hash_for_each_cb_t func, void *data) {
1535 	cfs_hash_for_each_tight(hs, func, data, 1);
1536 }
1537 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1538 
1539 static int
cfs_hash_peek(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode,void * data)1540 cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1541 	      struct hlist_node *hnode, void *data)
1542 {
1543 	*(int *)data = 0;
1544 	return 1; /* return 1 to break the loop */
1545 }
1546 
1547 int
cfs_hash_is_empty(struct cfs_hash * hs)1548 cfs_hash_is_empty(struct cfs_hash *hs)
1549 {
1550 	int empty = 1;
1551 
1552 	cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1553 	return empty;
1554 }
1555 EXPORT_SYMBOL(cfs_hash_is_empty);
1556 
1557 __u64
cfs_hash_size_get(struct cfs_hash * hs)1558 cfs_hash_size_get(struct cfs_hash *hs)
1559 {
1560 	return cfs_hash_with_counter(hs) ?
1561 	       atomic_read(&hs->hs_count) :
1562 	       cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1563 }
1564 EXPORT_SYMBOL(cfs_hash_size_get);
1565 
1566 /*
1567  * cfs_hash_for_each_relax:
1568  * Iterate the hash table and call @func on each item without
1569  * any lock. This function can't guarantee to finish iteration
1570  * if these features are enabled:
1571  *
1572  *  a. if rehash_key is enabled, an item can be moved from
1573  *     one bucket to another bucket
1574  *  b. user can remove non-zero-ref item from hash-table,
1575  *     so the item can be removed from hash-table, even worse,
1576  *     it's possible that user changed key and insert to another
1577  *     hash bucket.
1578  * there's no way for us to finish iteration correctly on previous
1579  * two cases, so iteration has to be stopped on change.
1580  */
1581 static int
cfs_hash_for_each_relax(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data)1582 cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1583 			void *data) {
1584 	struct hlist_node *hnode;
1585 	struct hlist_node *tmp;
1586 	struct cfs_hash_bd     bd;
1587 	__u32	     version;
1588 	int	       count = 0;
1589 	int	       stop_on_change;
1590 	int	       rc;
1591 	int	       i;
1592 
1593 	stop_on_change = cfs_hash_with_rehash_key(hs) ||
1594 			 !cfs_hash_with_no_itemref(hs) ||
1595 			 hs->hs_ops->hs_put_locked == NULL;
1596 	cfs_hash_lock(hs, 0);
1597 	LASSERT(!cfs_hash_is_rehashing(hs));
1598 
1599 	cfs_hash_for_each_bucket(hs, &bd, i) {
1600 		struct hlist_head *hhead;
1601 
1602 		cfs_hash_bd_lock(hs, &bd, 0);
1603 		version = cfs_hash_bd_version_get(&bd);
1604 
1605 		cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1606 			for (hnode = hhead->first; hnode != NULL;) {
1607 				cfs_hash_bucket_validate(hs, &bd, hnode);
1608 				cfs_hash_get(hs, hnode);
1609 				cfs_hash_bd_unlock(hs, &bd, 0);
1610 				cfs_hash_unlock(hs, 0);
1611 
1612 				rc = func(hs, &bd, hnode, data);
1613 				if (stop_on_change)
1614 					cfs_hash_put(hs, hnode);
1615 				cond_resched();
1616 				count++;
1617 
1618 				cfs_hash_lock(hs, 0);
1619 				cfs_hash_bd_lock(hs, &bd, 0);
1620 				if (!stop_on_change) {
1621 					tmp = hnode->next;
1622 					cfs_hash_put_locked(hs, hnode);
1623 					hnode = tmp;
1624 				} else { /* bucket changed? */
1625 					if (version !=
1626 					    cfs_hash_bd_version_get(&bd))
1627 						break;
1628 					/* safe to continue because no change */
1629 					hnode = hnode->next;
1630 				}
1631 				if (rc) /* callback wants to break iteration */
1632 					break;
1633 			}
1634 			if (rc) /* callback wants to break iteration */
1635 				break;
1636 		}
1637 		cfs_hash_bd_unlock(hs, &bd, 0);
1638 		if (rc) /* callback wants to break iteration */
1639 			break;
1640 	}
1641 	cfs_hash_unlock(hs, 0);
1642 
1643 	return count;
1644 }
1645 
1646 int
cfs_hash_for_each_nolock(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data)1647 cfs_hash_for_each_nolock(struct cfs_hash *hs,
1648 			 cfs_hash_for_each_cb_t func, void *data) {
1649 	if (cfs_hash_with_no_lock(hs) ||
1650 	    cfs_hash_with_rehash_key(hs) ||
1651 	    !cfs_hash_with_no_itemref(hs))
1652 		return -EOPNOTSUPP;
1653 
1654 	if (hs->hs_ops->hs_get == NULL ||
1655 	    (hs->hs_ops->hs_put == NULL &&
1656 	     hs->hs_ops->hs_put_locked == NULL))
1657 		return -EOPNOTSUPP;
1658 
1659 	cfs_hash_for_each_enter(hs);
1660 	cfs_hash_for_each_relax(hs, func, data);
1661 	cfs_hash_for_each_exit(hs);
1662 
1663 	return 0;
1664 }
1665 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1666 
1667 /**
1668  * For each hash bucket in the libcfs hash @hs call the passed callback
1669  * @func until all the hash buckets are empty.  The passed callback @func
1670  * or the previously registered callback hs->hs_put must remove the item
1671  * from the hash.  You may either use the cfs_hash_del() or hlist_del()
1672  * functions.  No rwlocks will be held during the callback @func it is
1673  * safe to sleep if needed.  This function will not terminate until the
1674  * hash is empty.  Note it is still possible to concurrently add new
1675  * items in to the hash.  It is the callers responsibility to ensure
1676  * the required locking is in place to prevent concurrent insertions.
1677  */
1678 int
cfs_hash_for_each_empty(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data)1679 cfs_hash_for_each_empty(struct cfs_hash *hs,
1680 			cfs_hash_for_each_cb_t func, void *data) {
1681 	unsigned  i = 0;
1682 
1683 	if (cfs_hash_with_no_lock(hs))
1684 		return -EOPNOTSUPP;
1685 
1686 	if (hs->hs_ops->hs_get == NULL ||
1687 	    (hs->hs_ops->hs_put == NULL &&
1688 	     hs->hs_ops->hs_put_locked == NULL))
1689 		return -EOPNOTSUPP;
1690 
1691 	cfs_hash_for_each_enter(hs);
1692 	while (cfs_hash_for_each_relax(hs, func, data)) {
1693 		CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1694 		       hs->hs_name, i++);
1695 	}
1696 	cfs_hash_for_each_exit(hs);
1697 	return 0;
1698 }
1699 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1700 
1701 void
cfs_hash_hlist_for_each(struct cfs_hash * hs,unsigned hindex,cfs_hash_for_each_cb_t func,void * data)1702 cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
1703 			cfs_hash_for_each_cb_t func, void *data)
1704 {
1705 	struct hlist_head   *hhead;
1706 	struct hlist_node   *hnode;
1707 	struct cfs_hash_bd       bd;
1708 
1709 	cfs_hash_for_each_enter(hs);
1710 	cfs_hash_lock(hs, 0);
1711 	if (hindex >= CFS_HASH_NHLIST(hs))
1712 		goto out;
1713 
1714 	cfs_hash_bd_index_set(hs, hindex, &bd);
1715 
1716 	cfs_hash_bd_lock(hs, &bd, 0);
1717 	hhead = cfs_hash_bd_hhead(hs, &bd);
1718 	hlist_for_each(hnode, hhead) {
1719 		if (func(hs, &bd, hnode, data))
1720 			break;
1721 	}
1722 	cfs_hash_bd_unlock(hs, &bd, 0);
1723  out:
1724 	cfs_hash_unlock(hs, 0);
1725 	cfs_hash_for_each_exit(hs);
1726 }
1727 
1728 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1729 
1730 /*
1731  * For each item in the libcfs hash @hs which matches the @key call
1732  * the passed callback @func and pass to it as an argument each hash
1733  * item and the private @data. During the callback the bucket lock
1734  * is held so the callback must never sleep.
1735    */
1736 void
cfs_hash_for_each_key(struct cfs_hash * hs,const void * key,cfs_hash_for_each_cb_t func,void * data)1737 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
1738 		      cfs_hash_for_each_cb_t func, void *data) {
1739 	struct hlist_node   *hnode;
1740 	struct cfs_hash_bd       bds[2];
1741 	unsigned	    i;
1742 
1743 	cfs_hash_lock(hs, 0);
1744 
1745 	cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1746 
1747 	cfs_hash_for_each_bd(bds, 2, i) {
1748 		struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1749 
1750 		hlist_for_each(hnode, hlist) {
1751 			cfs_hash_bucket_validate(hs, &bds[i], hnode);
1752 
1753 			if (cfs_hash_keycmp(hs, key, hnode)) {
1754 				if (func(hs, &bds[i], hnode, data))
1755 					break;
1756 			}
1757 		}
1758 	}
1759 
1760 	cfs_hash_dual_bd_unlock(hs, bds, 0);
1761 	cfs_hash_unlock(hs, 0);
1762 }
1763 EXPORT_SYMBOL(cfs_hash_for_each_key);
1764 
1765 /**
1766  * Rehash the libcfs hash @hs to the given @bits.  This can be used
1767  * to grow the hash size when excessive chaining is detected, or to
1768  * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
1769  * flag is set in @hs the libcfs hash may be dynamically rehashed
1770  * during addition or removal if the hash's theta value exceeds
1771  * either the hs->hs_min_theta or hs->max_theta values.  By default
1772  * these values are tuned to keep the chained hash depth small, and
1773  * this approach assumes a reasonably uniform hashing function.  The
1774  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1775  */
1776 void
cfs_hash_rehash_cancel_locked(struct cfs_hash * hs)1777 cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
1778 {
1779 	int     i;
1780 
1781 	/* need hold cfs_hash_lock(hs, 1) */
1782 	LASSERT(cfs_hash_with_rehash(hs) &&
1783 		!cfs_hash_with_no_lock(hs));
1784 
1785 	if (!cfs_hash_is_rehashing(hs))
1786 		return;
1787 
1788 	if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1789 		hs->hs_rehash_bits = 0;
1790 		return;
1791 	}
1792 
1793 	for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1794 		cfs_hash_unlock(hs, 1);
1795 		/* raise console warning while waiting too long */
1796 		CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
1797 		       "hash %s is still rehashing, rescheded %d\n",
1798 		       hs->hs_name, i - 1);
1799 		cond_resched();
1800 		cfs_hash_lock(hs, 1);
1801 	}
1802 }
1803 EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
1804 
1805 void
cfs_hash_rehash_cancel(struct cfs_hash * hs)1806 cfs_hash_rehash_cancel(struct cfs_hash *hs)
1807 {
1808 	cfs_hash_lock(hs, 1);
1809 	cfs_hash_rehash_cancel_locked(hs);
1810 	cfs_hash_unlock(hs, 1);
1811 }
1812 EXPORT_SYMBOL(cfs_hash_rehash_cancel);
1813 
1814 int
cfs_hash_rehash(struct cfs_hash * hs,int do_rehash)1815 cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
1816 {
1817 	int     rc;
1818 
1819 	LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1820 
1821 	cfs_hash_lock(hs, 1);
1822 
1823 	rc = cfs_hash_rehash_bits(hs);
1824 	if (rc <= 0) {
1825 		cfs_hash_unlock(hs, 1);
1826 		return rc;
1827 	}
1828 
1829 	hs->hs_rehash_bits = rc;
1830 	if (!do_rehash) {
1831 		/* launch and return */
1832 		cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1833 		cfs_hash_unlock(hs, 1);
1834 		return 0;
1835 	}
1836 
1837 	/* rehash right now */
1838 	cfs_hash_unlock(hs, 1);
1839 
1840 	return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1841 }
1842 EXPORT_SYMBOL(cfs_hash_rehash);
1843 
1844 static int
cfs_hash_rehash_bd(struct cfs_hash * hs,struct cfs_hash_bd * old)1845 cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
1846 {
1847 	struct cfs_hash_bd      new;
1848 	struct hlist_head  *hhead;
1849 	struct hlist_node  *hnode;
1850 	struct hlist_node  *pos;
1851 	void	      *key;
1852 	int		c = 0;
1853 
1854 	/* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1855 	cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1856 		hlist_for_each_safe(hnode, pos, hhead) {
1857 			key = cfs_hash_key(hs, hnode);
1858 			LASSERT(key != NULL);
1859 			/* Validate hnode is in the correct bucket. */
1860 			cfs_hash_bucket_validate(hs, old, hnode);
1861 			/*
1862 			 * Delete from old hash bucket; move to new bucket.
1863 			 * ops->hs_key must be defined.
1864 			 */
1865 			cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1866 					     hs->hs_rehash_bits, key, &new);
1867 			cfs_hash_bd_move_locked(hs, old, &new, hnode);
1868 			c++;
1869 		}
1870 	}
1871 
1872 	return c;
1873 }
1874 
1875 static int
cfs_hash_rehash_worker(cfs_workitem_t * wi)1876 cfs_hash_rehash_worker(cfs_workitem_t *wi)
1877 {
1878 	struct cfs_hash	 *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
1879 	struct cfs_hash_bucket **bkts;
1880 	struct cfs_hash_bd       bd;
1881 	unsigned int	old_size;
1882 	unsigned int	new_size;
1883 	int		 bsize;
1884 	int		 count = 0;
1885 	int		 rc = 0;
1886 	int		 i;
1887 
1888 	LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
1889 
1890 	cfs_hash_lock(hs, 0);
1891 	LASSERT(cfs_hash_is_rehashing(hs));
1892 
1893 	old_size = CFS_HASH_NBKT(hs);
1894 	new_size = CFS_HASH_RH_NBKT(hs);
1895 
1896 	cfs_hash_unlock(hs, 0);
1897 
1898 	/*
1899 	 * don't need hs::hs_rwlock for hs::hs_buckets,
1900 	 * because nobody can change bkt-table except me.
1901 	 */
1902 	bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1903 					old_size, new_size);
1904 	cfs_hash_lock(hs, 1);
1905 	if (bkts == NULL) {
1906 		rc = -ENOMEM;
1907 		goto out;
1908 	}
1909 
1910 	if (bkts == hs->hs_buckets) {
1911 		bkts = NULL; /* do nothing */
1912 		goto out;
1913 	}
1914 
1915 	rc = __cfs_hash_theta(hs);
1916 	if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1917 		/* free the new allocated bkt-table */
1918 		old_size = new_size;
1919 		new_size = CFS_HASH_NBKT(hs);
1920 		rc = -EALREADY;
1921 		goto out;
1922 	}
1923 
1924 	LASSERT(hs->hs_rehash_buckets == NULL);
1925 	hs->hs_rehash_buckets = bkts;
1926 
1927 	rc = 0;
1928 	cfs_hash_for_each_bucket(hs, &bd, i) {
1929 		if (cfs_hash_is_exiting(hs)) {
1930 			rc = -ESRCH;
1931 			/* someone wants to destroy the hash, abort now */
1932 			if (old_size < new_size) /* OK to free old bkt-table */
1933 				break;
1934 			/* it's shrinking, need free new bkt-table */
1935 			hs->hs_rehash_buckets = NULL;
1936 			old_size = new_size;
1937 			new_size = CFS_HASH_NBKT(hs);
1938 			goto out;
1939 		}
1940 
1941 		count += cfs_hash_rehash_bd(hs, &bd);
1942 		if (count < CFS_HASH_LOOP_HOG ||
1943 		    cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1944 			continue;
1945 		}
1946 
1947 		count = 0;
1948 		cfs_hash_unlock(hs, 1);
1949 		cond_resched();
1950 		cfs_hash_lock(hs, 1);
1951 	}
1952 
1953 	hs->hs_rehash_count++;
1954 
1955 	bkts = hs->hs_buckets;
1956 	hs->hs_buckets = hs->hs_rehash_buckets;
1957 	hs->hs_rehash_buckets = NULL;
1958 
1959 	hs->hs_cur_bits = hs->hs_rehash_bits;
1960  out:
1961 	hs->hs_rehash_bits = 0;
1962 	if (rc == -ESRCH) /* never be scheduled again */
1963 		cfs_wi_exit(cfs_sched_rehash, wi);
1964 	bsize = cfs_hash_bkt_size(hs);
1965 	cfs_hash_unlock(hs, 1);
1966 	/* can't refer to @hs anymore because it could be destroyed */
1967 	if (bkts != NULL)
1968 		cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1969 	if (rc != 0)
1970 		CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
1971 	/* return 1 only if cfs_wi_exit is called */
1972 	return rc == -ESRCH;
1973 }
1974 
1975 /**
1976  * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
1977  * @old_key must be provided to locate the objects previous location
1978  * in the hash, and the @new_key will be used to reinsert the object.
1979  * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1980  * combo when it is critical that there is no window in time where the
1981  * object is missing from the hash.  When an object is being rehashed
1982  * the registered cfs_hash_get() and cfs_hash_put() functions will
1983  * not be called.
1984  */
cfs_hash_rehash_key(struct cfs_hash * hs,const void * old_key,void * new_key,struct hlist_node * hnode)1985 void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
1986 			 void *new_key, struct hlist_node *hnode)
1987 {
1988 	struct cfs_hash_bd	bds[3];
1989 	struct cfs_hash_bd	old_bds[2];
1990 	struct cfs_hash_bd	new_bd;
1991 
1992 	LASSERT(!hlist_unhashed(hnode));
1993 
1994 	cfs_hash_lock(hs, 0);
1995 
1996 	cfs_hash_dual_bd_get(hs, old_key, old_bds);
1997 	cfs_hash_bd_get(hs, new_key, &new_bd);
1998 
1999 	bds[0] = old_bds[0];
2000 	bds[1] = old_bds[1];
2001 	bds[2] = new_bd;
2002 
2003 	/* NB: bds[0] and bds[1] are ordered already */
2004 	cfs_hash_bd_order(&bds[1], &bds[2]);
2005 	cfs_hash_bd_order(&bds[0], &bds[1]);
2006 
2007 	cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2008 	if (likely(old_bds[1].bd_bucket == NULL)) {
2009 		cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2010 	} else {
2011 		cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2012 		cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2013 	}
2014 	/* overwrite key inside locks, otherwise may screw up with
2015 	 * other operations, i.e: rehash */
2016 	cfs_hash_keycpy(hs, new_key, hnode);
2017 
2018 	cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2019 	cfs_hash_unlock(hs, 0);
2020 }
2021 EXPORT_SYMBOL(cfs_hash_rehash_key);
2022 
cfs_hash_debug_header(struct seq_file * m)2023 void cfs_hash_debug_header(struct seq_file *m)
2024 {
2025 	seq_printf(m, "%-*s   cur   min   max theta t-min t-max flags rehash   count  maxdep maxdepb distribution\n",
2026 		   CFS_HASH_BIGNAME_LEN, "name");
2027 }
2028 EXPORT_SYMBOL(cfs_hash_debug_header);
2029 
2030 static struct cfs_hash_bucket **
cfs_hash_full_bkts(struct cfs_hash * hs)2031 cfs_hash_full_bkts(struct cfs_hash *hs)
2032 {
2033 	/* NB: caller should hold hs->hs_rwlock if REHASH is set */
2034 	if (hs->hs_rehash_buckets == NULL)
2035 		return hs->hs_buckets;
2036 
2037 	LASSERT(hs->hs_rehash_bits != 0);
2038 	return hs->hs_rehash_bits > hs->hs_cur_bits ?
2039 	       hs->hs_rehash_buckets : hs->hs_buckets;
2040 }
2041 
2042 static unsigned int
cfs_hash_full_nbkt(struct cfs_hash * hs)2043 cfs_hash_full_nbkt(struct cfs_hash *hs)
2044 {
2045 	/* NB: caller should hold hs->hs_rwlock if REHASH is set */
2046 	if (hs->hs_rehash_buckets == NULL)
2047 		return CFS_HASH_NBKT(hs);
2048 
2049 	LASSERT(hs->hs_rehash_bits != 0);
2050 	return hs->hs_rehash_bits > hs->hs_cur_bits ?
2051 	       CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2052 }
2053 
cfs_hash_debug_str(struct cfs_hash * hs,struct seq_file * m)2054 void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
2055 {
2056 	int		    dist[8] = { 0, };
2057 	int		    maxdep  = -1;
2058 	int		    maxdepb = -1;
2059 	int		    total   = 0;
2060 	int		    theta;
2061 	int		    i;
2062 
2063 	cfs_hash_lock(hs, 0);
2064 	theta = __cfs_hash_theta(hs);
2065 
2066 	seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d  0x%02x %6d ",
2067 		      CFS_HASH_BIGNAME_LEN, hs->hs_name,
2068 		      1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
2069 		      1 << hs->hs_max_bits,
2070 		      __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
2071 		      __cfs_hash_theta_int(hs->hs_min_theta),
2072 		      __cfs_hash_theta_frac(hs->hs_min_theta),
2073 		      __cfs_hash_theta_int(hs->hs_max_theta),
2074 		      __cfs_hash_theta_frac(hs->hs_max_theta),
2075 		      hs->hs_flags, hs->hs_rehash_count);
2076 
2077 	/*
2078 	 * The distribution is a summary of the chained hash depth in
2079 	 * each of the libcfs hash buckets.  Each buckets hsb_count is
2080 	 * divided by the hash theta value and used to generate a
2081 	 * histogram of the hash distribution.  A uniform hash will
2082 	 * result in all hash buckets being close to the average thus
2083 	 * only the first few entries in the histogram will be non-zero.
2084 	 * If you hash function results in a non-uniform hash the will
2085 	 * be observable by outlier bucks in the distribution histogram.
2086 	 *
2087 	 * Uniform hash distribution:      128/128/0/0/0/0/0/0
2088 	 * Non-Uniform hash distribution:  128/125/0/0/0/0/2/1
2089 	 */
2090 	for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2091 		struct cfs_hash_bd  bd;
2092 
2093 		bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2094 		cfs_hash_bd_lock(hs, &bd, 0);
2095 		if (maxdep < bd.bd_bucket->hsb_depmax) {
2096 			maxdep  = bd.bd_bucket->hsb_depmax;
2097 			maxdepb = ffz(~maxdep);
2098 		}
2099 		total += bd.bd_bucket->hsb_count;
2100 		dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
2101 		cfs_hash_bd_unlock(hs, &bd, 0);
2102 	}
2103 
2104 	seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
2105 	for (i = 0; i < 8; i++)
2106 		seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
2107 
2108 	cfs_hash_unlock(hs, 0);
2109 }
2110 EXPORT_SYMBOL(cfs_hash_debug_str);
2111