• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2012, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * libcfs/libcfs/hash.c
33  *
34  * Implement a hash class for hash process in lustre system.
35  *
36  * Author: YuZhangyong <yzy@clusterfs.com>
37  *
38  * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
39  * - Simplified API and improved documentation
40  * - Added per-hash feature flags:
41  *   * CFS_HASH_DEBUG additional validation
42  *   * CFS_HASH_REHASH dynamic rehashing
43  * - Added per-hash statistics
44  * - General performance enhancements
45  *
46  * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
47  * - move all stuff to libcfs
48  * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
49  * - ignore hs_rwlock if without CFS_HASH_REHASH setting
50  * - buckets are allocated one by one(instead of contiguous memory),
51  *   to avoid unnecessary cacheline conflict
52  *
53  * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
54  * - "bucket" is a group of hlist_head now, user can specify bucket size
55  *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
56  *   one lock for reducing memory overhead.
57  *
58  * - support lockless hash, caller will take care of locks:
59  *   avoid lock overhead for hash tables that are already protected
60  *   by locking in the caller for another reason
61  *
62  * - support both spin_lock/rwlock for bucket:
63  *   overhead of spinlock contention is lower than read/write
64  *   contention of rwlock, so using spinlock to serialize operations on
65  *   bucket is more reasonable for those frequently changed hash tables
66  *
67  * - support one-single lock mode:
68  *   one lock to protect all hash operations to avoid overhead of
69  *   multiple locks if hash table is always small
70  *
71  * - removed a lot of unnecessary addref & decref on hash element:
72  *   addref & decref are atomic operations in many use-cases which
73  *   are expensive.
74  *
75  * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
76  *   some lustre use-cases require these functions to be strictly
77  *   non-blocking, we need to schedule required rehash on a different
78  *   thread on those cases.
79  *
80  * - safer rehash on large hash table
81  *   In old implementation, rehash function will exclusively lock the
82  *   hash table and finish rehash in one batch, it's dangerous on SMP
83  *   system because rehash millions of elements could take long time.
84  *   New implemented rehash can release lock and relax CPU in middle
85  *   of rehash, it's safe for another thread to search/change on the
86  *   hash table even it's in rehasing.
87  *
88  * - support two different refcount modes
89  *   . hash table has refcount on element
90  *   . hash table doesn't change refcount on adding/removing element
91  *
92  * - support long name hash table (for param-tree)
93  *
94  * - fix a bug for cfs_hash_rehash_key:
95  *   in old implementation, cfs_hash_rehash_key could screw up the
96  *   hash-table because @key is overwritten without any protection.
97  *   Now we need user to define hs_keycpy for those rehash enabled
98  *   hash tables, cfs_hash_rehash_key will overwrite hash-key
99  *   inside lock by calling hs_keycpy.
100  *
101  * - better hash iteration:
102  *   Now we support both locked iteration & lockless iteration of hash
103  *   table. Also, user can break the iteration by return 1 in callback.
104  */
105 #include <linux/seq_file.h>
106 #include <linux/log2.h>
107 
108 #include <linux/libcfs/libcfs.h>
109 
110 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
111 static unsigned int warn_on_depth = 8;
112 module_param(warn_on_depth, uint, 0644);
113 MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
114 #endif
115 
116 struct cfs_wi_sched *cfs_sched_rehash;
117 
118 static inline void
cfs_hash_nl_lock(union cfs_hash_lock * lock,int exclusive)119 cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
120 
121 static inline void
cfs_hash_nl_unlock(union cfs_hash_lock * lock,int exclusive)122 cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
123 
124 static inline void
cfs_hash_spin_lock(union cfs_hash_lock * lock,int exclusive)125 cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
126 	__acquires(&lock->spin)
127 {
128 	spin_lock(&lock->spin);
129 }
130 
131 static inline void
cfs_hash_spin_unlock(union cfs_hash_lock * lock,int exclusive)132 cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
133 	__releases(&lock->spin)
134 {
135 	spin_unlock(&lock->spin);
136 }
137 
138 static inline void
cfs_hash_rw_lock(union cfs_hash_lock * lock,int exclusive)139 cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
140 	__acquires(&lock->rw)
141 {
142 	if (!exclusive)
143 		read_lock(&lock->rw);
144 	else
145 		write_lock(&lock->rw);
146 }
147 
148 static inline void
cfs_hash_rw_unlock(union cfs_hash_lock * lock,int exclusive)149 cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
150 	__releases(&lock->rw)
151 {
152 	if (!exclusive)
153 		read_unlock(&lock->rw);
154 	else
155 		write_unlock(&lock->rw);
156 }
157 
158 /** No lock hash */
159 static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
160 	.hs_lock	= cfs_hash_nl_lock,
161 	.hs_unlock	= cfs_hash_nl_unlock,
162 	.hs_bkt_lock	= cfs_hash_nl_lock,
163 	.hs_bkt_unlock	= cfs_hash_nl_unlock,
164 };
165 
166 /** no bucket lock, one spinlock to protect everything */
167 static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
168 	.hs_lock	= cfs_hash_spin_lock,
169 	.hs_unlock	= cfs_hash_spin_unlock,
170 	.hs_bkt_lock	= cfs_hash_nl_lock,
171 	.hs_bkt_unlock	= cfs_hash_nl_unlock,
172 };
173 
174 /** spin bucket lock, rehash is enabled */
175 static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
176 	.hs_lock	= cfs_hash_rw_lock,
177 	.hs_unlock	= cfs_hash_rw_unlock,
178 	.hs_bkt_lock	= cfs_hash_spin_lock,
179 	.hs_bkt_unlock	= cfs_hash_spin_unlock,
180 };
181 
182 /** rw bucket lock, rehash is enabled */
183 static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
184 	.hs_lock	= cfs_hash_rw_lock,
185 	.hs_unlock	= cfs_hash_rw_unlock,
186 	.hs_bkt_lock	= cfs_hash_rw_lock,
187 	.hs_bkt_unlock	= cfs_hash_rw_unlock,
188 };
189 
190 /** spin bucket lock, rehash is disabled */
191 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
192 	.hs_lock	= cfs_hash_nl_lock,
193 	.hs_unlock	= cfs_hash_nl_unlock,
194 	.hs_bkt_lock	= cfs_hash_spin_lock,
195 	.hs_bkt_unlock	= cfs_hash_spin_unlock,
196 };
197 
198 /** rw bucket lock, rehash is disabled */
199 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
200 	.hs_lock	= cfs_hash_nl_lock,
201 	.hs_unlock	= cfs_hash_nl_unlock,
202 	.hs_bkt_lock	= cfs_hash_rw_lock,
203 	.hs_bkt_unlock	= cfs_hash_rw_unlock,
204 };
205 
206 static void
cfs_hash_lock_setup(struct cfs_hash * hs)207 cfs_hash_lock_setup(struct cfs_hash *hs)
208 {
209 	if (cfs_hash_with_no_lock(hs)) {
210 		hs->hs_lops = &cfs_hash_nl_lops;
211 
212 	} else if (cfs_hash_with_no_bktlock(hs)) {
213 		hs->hs_lops = &cfs_hash_nbl_lops;
214 		spin_lock_init(&hs->hs_lock.spin);
215 
216 	} else if (cfs_hash_with_rehash(hs)) {
217 		rwlock_init(&hs->hs_lock.rw);
218 
219 		if (cfs_hash_with_rw_bktlock(hs))
220 			hs->hs_lops = &cfs_hash_bkt_rw_lops;
221 		else if (cfs_hash_with_spin_bktlock(hs))
222 			hs->hs_lops = &cfs_hash_bkt_spin_lops;
223 		else
224 			LBUG();
225 	} else {
226 		if (cfs_hash_with_rw_bktlock(hs))
227 			hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
228 		else if (cfs_hash_with_spin_bktlock(hs))
229 			hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
230 		else
231 			LBUG();
232 	}
233 }
234 
235 /**
236  * Simple hash head without depth tracking
237  * new element is always added to head of hlist
238  */
239 struct cfs_hash_head {
240 	struct hlist_head	hh_head;	/**< entries list */
241 };
242 
243 static int
cfs_hash_hh_hhead_size(struct cfs_hash * hs)244 cfs_hash_hh_hhead_size(struct cfs_hash *hs)
245 {
246 	return sizeof(struct cfs_hash_head);
247 }
248 
249 static struct hlist_head *
cfs_hash_hh_hhead(struct cfs_hash * hs,struct cfs_hash_bd * bd)250 cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
251 {
252 	struct cfs_hash_head *head;
253 
254 	head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
255 	return &head[bd->bd_offset].hh_head;
256 }
257 
258 static int
cfs_hash_hh_hnode_add(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)259 cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
260 		      struct hlist_node *hnode)
261 {
262 	hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
263 	return -1; /* unknown depth */
264 }
265 
266 static int
cfs_hash_hh_hnode_del(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)267 cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
268 		      struct hlist_node *hnode)
269 {
270 	hlist_del_init(hnode);
271 	return -1; /* unknown depth */
272 }
273 
274 /**
275  * Simple hash head with depth tracking
276  * new element is always added to head of hlist
277  */
278 struct cfs_hash_head_dep {
279 	struct hlist_head	hd_head;	/**< entries list */
280 	unsigned int		hd_depth;	/**< list length */
281 };
282 
283 static int
cfs_hash_hd_hhead_size(struct cfs_hash * hs)284 cfs_hash_hd_hhead_size(struct cfs_hash *hs)
285 {
286 	return sizeof(struct cfs_hash_head_dep);
287 }
288 
289 static struct hlist_head *
cfs_hash_hd_hhead(struct cfs_hash * hs,struct cfs_hash_bd * bd)290 cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
291 {
292 	struct cfs_hash_head_dep *head;
293 
294 	head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
295 	return &head[bd->bd_offset].hd_head;
296 }
297 
298 static int
cfs_hash_hd_hnode_add(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)299 cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
300 		      struct hlist_node *hnode)
301 {
302 	struct cfs_hash_head_dep *hh;
303 
304 	hh = container_of(cfs_hash_hd_hhead(hs, bd),
305 			  struct cfs_hash_head_dep, hd_head);
306 	hlist_add_head(hnode, &hh->hd_head);
307 	return ++hh->hd_depth;
308 }
309 
310 static int
cfs_hash_hd_hnode_del(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)311 cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
312 		      struct hlist_node *hnode)
313 {
314 	struct cfs_hash_head_dep *hh;
315 
316 	hh = container_of(cfs_hash_hd_hhead(hs, bd),
317 			  struct cfs_hash_head_dep, hd_head);
318 	hlist_del_init(hnode);
319 	return --hh->hd_depth;
320 }
321 
322 /**
323  * double links hash head without depth tracking
324  * new element is always added to tail of hlist
325  */
326 struct cfs_hash_dhead {
327 	struct hlist_head	dh_head;	/**< entries list */
328 	struct hlist_node	*dh_tail;	/**< the last entry */
329 };
330 
331 static int
cfs_hash_dh_hhead_size(struct cfs_hash * hs)332 cfs_hash_dh_hhead_size(struct cfs_hash *hs)
333 {
334 	return sizeof(struct cfs_hash_dhead);
335 }
336 
337 static struct hlist_head *
cfs_hash_dh_hhead(struct cfs_hash * hs,struct cfs_hash_bd * bd)338 cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
339 {
340 	struct cfs_hash_dhead *head;
341 
342 	head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
343 	return &head[bd->bd_offset].dh_head;
344 }
345 
346 static int
cfs_hash_dh_hnode_add(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)347 cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
348 		      struct hlist_node *hnode)
349 {
350 	struct cfs_hash_dhead *dh;
351 
352 	dh = container_of(cfs_hash_dh_hhead(hs, bd),
353 			  struct cfs_hash_dhead, dh_head);
354 	if (dh->dh_tail) /* not empty */
355 		hlist_add_behind(hnode, dh->dh_tail);
356 	else /* empty list */
357 		hlist_add_head(hnode, &dh->dh_head);
358 	dh->dh_tail = hnode;
359 	return -1; /* unknown depth */
360 }
361 
362 static int
cfs_hash_dh_hnode_del(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnd)363 cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
364 		      struct hlist_node *hnd)
365 {
366 	struct cfs_hash_dhead *dh;
367 
368 	dh = container_of(cfs_hash_dh_hhead(hs, bd),
369 			  struct cfs_hash_dhead, dh_head);
370 	if (!hnd->next) { /* it's the tail */
371 		dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
372 			      container_of(hnd->pprev, struct hlist_node, next);
373 	}
374 	hlist_del_init(hnd);
375 	return -1; /* unknown depth */
376 }
377 
378 /**
379  * double links hash head with depth tracking
380  * new element is always added to tail of hlist
381  */
382 struct cfs_hash_dhead_dep {
383 	struct hlist_head	dd_head;	/**< entries list */
384 	struct hlist_node	*dd_tail;	/**< the last entry */
385 	unsigned int		dd_depth;	/**< list length */
386 };
387 
388 static int
cfs_hash_dd_hhead_size(struct cfs_hash * hs)389 cfs_hash_dd_hhead_size(struct cfs_hash *hs)
390 {
391 	return sizeof(struct cfs_hash_dhead_dep);
392 }
393 
394 static struct hlist_head *
cfs_hash_dd_hhead(struct cfs_hash * hs,struct cfs_hash_bd * bd)395 cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
396 {
397 	struct cfs_hash_dhead_dep *head;
398 
399 	head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
400 	return &head[bd->bd_offset].dd_head;
401 }
402 
403 static int
cfs_hash_dd_hnode_add(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)404 cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
405 		      struct hlist_node *hnode)
406 {
407 	struct cfs_hash_dhead_dep *dh;
408 
409 	dh = container_of(cfs_hash_dd_hhead(hs, bd),
410 			  struct cfs_hash_dhead_dep, dd_head);
411 	if (dh->dd_tail) /* not empty */
412 		hlist_add_behind(hnode, dh->dd_tail);
413 	else /* empty list */
414 		hlist_add_head(hnode, &dh->dd_head);
415 	dh->dd_tail = hnode;
416 	return ++dh->dd_depth;
417 }
418 
419 static int
cfs_hash_dd_hnode_del(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnd)420 cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
421 		      struct hlist_node *hnd)
422 {
423 	struct cfs_hash_dhead_dep *dh;
424 
425 	dh = container_of(cfs_hash_dd_hhead(hs, bd),
426 			  struct cfs_hash_dhead_dep, dd_head);
427 	if (!hnd->next) { /* it's the tail */
428 		dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
429 			      container_of(hnd->pprev, struct hlist_node, next);
430 	}
431 	hlist_del_init(hnd);
432 	return --dh->dd_depth;
433 }
434 
435 static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
436 	.hop_hhead	= cfs_hash_hh_hhead,
437 	.hop_hhead_size	= cfs_hash_hh_hhead_size,
438 	.hop_hnode_add	= cfs_hash_hh_hnode_add,
439 	.hop_hnode_del	= cfs_hash_hh_hnode_del,
440 };
441 
442 static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
443 	.hop_hhead	= cfs_hash_hd_hhead,
444 	.hop_hhead_size	= cfs_hash_hd_hhead_size,
445 	.hop_hnode_add	= cfs_hash_hd_hnode_add,
446 	.hop_hnode_del	= cfs_hash_hd_hnode_del,
447 };
448 
449 static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
450 	.hop_hhead	= cfs_hash_dh_hhead,
451 	.hop_hhead_size	= cfs_hash_dh_hhead_size,
452 	.hop_hnode_add	= cfs_hash_dh_hnode_add,
453 	.hop_hnode_del	= cfs_hash_dh_hnode_del,
454 };
455 
456 static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
457 	.hop_hhead	= cfs_hash_dd_hhead,
458 	.hop_hhead_size	= cfs_hash_dd_hhead_size,
459 	.hop_hnode_add	= cfs_hash_dd_hnode_add,
460 	.hop_hnode_del	= cfs_hash_dd_hnode_del,
461 };
462 
463 static void
cfs_hash_hlist_setup(struct cfs_hash * hs)464 cfs_hash_hlist_setup(struct cfs_hash *hs)
465 {
466 	if (cfs_hash_with_add_tail(hs)) {
467 		hs->hs_hops = cfs_hash_with_depth(hs) ?
468 			      &cfs_hash_dd_hops : &cfs_hash_dh_hops;
469 	} else {
470 		hs->hs_hops = cfs_hash_with_depth(hs) ?
471 			      &cfs_hash_hd_hops : &cfs_hash_hh_hops;
472 	}
473 }
474 
475 static void
cfs_hash_bd_from_key(struct cfs_hash * hs,struct cfs_hash_bucket ** bkts,unsigned int bits,const void * key,struct cfs_hash_bd * bd)476 cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
477 		     unsigned int bits, const void *key, struct cfs_hash_bd *bd)
478 {
479 	unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
480 
481 	LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
482 
483 	bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
484 	bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
485 }
486 
487 void
cfs_hash_bd_get(struct cfs_hash * hs,const void * key,struct cfs_hash_bd * bd)488 cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
489 {
490 	/* NB: caller should hold hs->hs_rwlock if REHASH is set */
491 	if (likely(!hs->hs_rehash_buckets)) {
492 		cfs_hash_bd_from_key(hs, hs->hs_buckets,
493 				     hs->hs_cur_bits, key, bd);
494 	} else {
495 		LASSERT(hs->hs_rehash_bits);
496 		cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
497 				     hs->hs_rehash_bits, key, bd);
498 	}
499 }
500 EXPORT_SYMBOL(cfs_hash_bd_get);
501 
502 static inline void
cfs_hash_bd_dep_record(struct cfs_hash * hs,struct cfs_hash_bd * bd,int dep_cur)503 cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
504 {
505 	if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
506 		return;
507 
508 	bd->bd_bucket->hsb_depmax = dep_cur;
509 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
510 	if (likely(!warn_on_depth ||
511 		   max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
512 		return;
513 
514 	spin_lock(&hs->hs_dep_lock);
515 	hs->hs_dep_max = dep_cur;
516 	hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
517 	hs->hs_dep_off = bd->bd_offset;
518 	hs->hs_dep_bits = hs->hs_cur_bits;
519 	spin_unlock(&hs->hs_dep_lock);
520 
521 	cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
522 # endif
523 }
524 
525 void
cfs_hash_bd_add_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)526 cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
527 		       struct hlist_node *hnode)
528 {
529 	int rc;
530 
531 	rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
532 	cfs_hash_bd_dep_record(hs, bd, rc);
533 	bd->bd_bucket->hsb_version++;
534 	if (unlikely(!bd->bd_bucket->hsb_version))
535 		bd->bd_bucket->hsb_version++;
536 	bd->bd_bucket->hsb_count++;
537 
538 	if (cfs_hash_with_counter(hs))
539 		atomic_inc(&hs->hs_count);
540 	if (!cfs_hash_with_no_itemref(hs))
541 		cfs_hash_get(hs, hnode);
542 }
543 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
544 
545 void
cfs_hash_bd_del_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)546 cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
547 		       struct hlist_node *hnode)
548 {
549 	hs->hs_hops->hop_hnode_del(hs, bd, hnode);
550 
551 	LASSERT(bd->bd_bucket->hsb_count > 0);
552 	bd->bd_bucket->hsb_count--;
553 	bd->bd_bucket->hsb_version++;
554 	if (unlikely(!bd->bd_bucket->hsb_version))
555 		bd->bd_bucket->hsb_version++;
556 
557 	if (cfs_hash_with_counter(hs)) {
558 		LASSERT(atomic_read(&hs->hs_count) > 0);
559 		atomic_dec(&hs->hs_count);
560 	}
561 	if (!cfs_hash_with_no_itemref(hs))
562 		cfs_hash_put_locked(hs, hnode);
563 }
564 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
565 
566 void
cfs_hash_bd_move_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd_old,struct cfs_hash_bd * bd_new,struct hlist_node * hnode)567 cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
568 			struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
569 {
570 	struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
571 	struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
572 	int rc;
573 
574 	if (!cfs_hash_bd_compare(bd_old, bd_new))
575 		return;
576 
577 	/* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
578 	 * in cfs_hash_bd_del/add_locked
579 	 */
580 	hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
581 	rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
582 	cfs_hash_bd_dep_record(hs, bd_new, rc);
583 
584 	LASSERT(obkt->hsb_count > 0);
585 	obkt->hsb_count--;
586 	obkt->hsb_version++;
587 	if (unlikely(!obkt->hsb_version))
588 		obkt->hsb_version++;
589 	nbkt->hsb_count++;
590 	nbkt->hsb_version++;
591 	if (unlikely(!nbkt->hsb_version))
592 		nbkt->hsb_version++;
593 }
594 
595 enum {
596 	/** always set, for sanity (avoid ZERO intent) */
597 	CFS_HS_LOOKUP_MASK_FIND	= BIT(0),
598 	/** return entry with a ref */
599 	CFS_HS_LOOKUP_MASK_REF	= BIT(1),
600 	/** add entry if not existing */
601 	CFS_HS_LOOKUP_MASK_ADD	= BIT(2),
602 	/** delete entry, ignore other masks */
603 	CFS_HS_LOOKUP_MASK_DEL	= BIT(3),
604 };
605 
606 enum cfs_hash_lookup_intent {
607 	/** return item w/o refcount */
608 	CFS_HS_LOOKUP_IT_PEEK	 = CFS_HS_LOOKUP_MASK_FIND,
609 	/** return item with refcount */
610 	CFS_HS_LOOKUP_IT_FIND	 = (CFS_HS_LOOKUP_MASK_FIND |
611 				    CFS_HS_LOOKUP_MASK_REF),
612 	/** return item w/o refcount if existed, otherwise add */
613 	CFS_HS_LOOKUP_IT_ADD	 = (CFS_HS_LOOKUP_MASK_FIND |
614 				    CFS_HS_LOOKUP_MASK_ADD),
615 	/** return item with refcount if existed, otherwise add */
616 	CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND |
617 				    CFS_HS_LOOKUP_MASK_ADD),
618 	/** delete if existed */
619 	CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND |
620 				    CFS_HS_LOOKUP_MASK_DEL)
621 };
622 
623 static struct hlist_node *
cfs_hash_bd_lookup_intent(struct cfs_hash * hs,struct cfs_hash_bd * bd,const void * key,struct hlist_node * hnode,enum cfs_hash_lookup_intent intent)624 cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
625 			  const void *key, struct hlist_node *hnode,
626 			  enum cfs_hash_lookup_intent intent)
627 
628 {
629 	struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
630 	struct hlist_node *ehnode;
631 	struct hlist_node *match;
632 	int intent_add = intent & CFS_HS_LOOKUP_MASK_ADD;
633 
634 	/* with this function, we can avoid a lot of useless refcount ops,
635 	 * which are expensive atomic operations most time.
636 	 */
637 	match = intent_add ? NULL : hnode;
638 	hlist_for_each(ehnode, hhead) {
639 		if (!cfs_hash_keycmp(hs, key, ehnode))
640 			continue;
641 
642 		if (match && match != ehnode) /* can't match */
643 			continue;
644 
645 		/* match and ... */
646 		if (intent & CFS_HS_LOOKUP_MASK_DEL) {
647 			cfs_hash_bd_del_locked(hs, bd, ehnode);
648 			return ehnode;
649 		}
650 
651 		/* caller wants refcount? */
652 		if (intent & CFS_HS_LOOKUP_MASK_REF)
653 			cfs_hash_get(hs, ehnode);
654 		return ehnode;
655 	}
656 	/* no match item */
657 	if (!intent_add)
658 		return NULL;
659 
660 	LASSERT(hnode);
661 	cfs_hash_bd_add_locked(hs, bd, hnode);
662 	return hnode;
663 }
664 
665 struct hlist_node *
cfs_hash_bd_lookup_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,const void * key)666 cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
667 			  const void *key)
668 {
669 	return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
670 					 CFS_HS_LOOKUP_IT_FIND);
671 }
672 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
673 
674 struct hlist_node *
cfs_hash_bd_peek_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,const void * key)675 cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
676 			const void *key)
677 {
678 	return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
679 					 CFS_HS_LOOKUP_IT_PEEK);
680 }
681 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
682 
683 static void
cfs_hash_multi_bd_lock(struct cfs_hash * hs,struct cfs_hash_bd * bds,unsigned int n,int excl)684 cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
685 		       unsigned int n, int excl)
686 {
687 	struct cfs_hash_bucket *prev = NULL;
688 	int i;
689 
690 	/**
691 	 * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
692 	 * NB: it's possible that several bds point to the same bucket but
693 	 * have different bd::bd_offset, so need take care of deadlock.
694 	 */
695 	cfs_hash_for_each_bd(bds, n, i) {
696 		if (prev == bds[i].bd_bucket)
697 			continue;
698 
699 		LASSERT(!prev || prev->hsb_index < bds[i].bd_bucket->hsb_index);
700 		cfs_hash_bd_lock(hs, &bds[i], excl);
701 		prev = bds[i].bd_bucket;
702 	}
703 }
704 
705 static void
cfs_hash_multi_bd_unlock(struct cfs_hash * hs,struct cfs_hash_bd * bds,unsigned int n,int excl)706 cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
707 			 unsigned int n, int excl)
708 {
709 	struct cfs_hash_bucket *prev = NULL;
710 	int i;
711 
712 	cfs_hash_for_each_bd(bds, n, i) {
713 		if (prev != bds[i].bd_bucket) {
714 			cfs_hash_bd_unlock(hs, &bds[i], excl);
715 			prev = bds[i].bd_bucket;
716 		}
717 	}
718 }
719 
720 static struct hlist_node *
cfs_hash_multi_bd_lookup_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,unsigned int n,const void * key)721 cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
722 				unsigned int n, const void *key)
723 {
724 	struct hlist_node *ehnode;
725 	unsigned int i;
726 
727 	cfs_hash_for_each_bd(bds, n, i) {
728 		ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
729 						   CFS_HS_LOOKUP_IT_FIND);
730 		if (ehnode)
731 			return ehnode;
732 	}
733 	return NULL;
734 }
735 
736 static struct hlist_node *
cfs_hash_multi_bd_findadd_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,unsigned int n,const void * key,struct hlist_node * hnode,int noref)737 cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
738 				 unsigned int n, const void *key,
739 				 struct hlist_node *hnode, int noref)
740 {
741 	struct hlist_node *ehnode;
742 	int intent;
743 	unsigned int i;
744 
745 	LASSERT(hnode);
746 	intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK;
747 
748 	cfs_hash_for_each_bd(bds, n, i) {
749 		ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
750 						   NULL, intent);
751 		if (ehnode)
752 			return ehnode;
753 	}
754 
755 	if (i == 1) { /* only one bucket */
756 		cfs_hash_bd_add_locked(hs, &bds[0], hnode);
757 	} else {
758 		struct cfs_hash_bd mybd;
759 
760 		cfs_hash_bd_get(hs, key, &mybd);
761 		cfs_hash_bd_add_locked(hs, &mybd, hnode);
762 	}
763 
764 	return hnode;
765 }
766 
767 static struct hlist_node *
cfs_hash_multi_bd_finddel_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,unsigned int n,const void * key,struct hlist_node * hnode)768 cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
769 				 unsigned int n, const void *key,
770 				 struct hlist_node *hnode)
771 {
772 	struct hlist_node *ehnode;
773 	unsigned int i;
774 
775 	cfs_hash_for_each_bd(bds, n, i) {
776 		ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
777 						   CFS_HS_LOOKUP_IT_FINDDEL);
778 		if (ehnode)
779 			return ehnode;
780 	}
781 	return NULL;
782 }
783 
784 static void
cfs_hash_bd_order(struct cfs_hash_bd * bd1,struct cfs_hash_bd * bd2)785 cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
786 {
787 	int rc;
788 
789 	if (!bd2->bd_bucket)
790 		return;
791 
792 	if (!bd1->bd_bucket) {
793 		*bd1 = *bd2;
794 		bd2->bd_bucket = NULL;
795 		return;
796 	}
797 
798 	rc = cfs_hash_bd_compare(bd1, bd2);
799 	if (!rc)
800 		bd2->bd_bucket = NULL;
801 	else if (rc > 0)
802 		swap(*bd1, *bd2); /* swap bd1 and bd2 */
803 }
804 
805 void
cfs_hash_dual_bd_get(struct cfs_hash * hs,const void * key,struct cfs_hash_bd * bds)806 cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
807 		     struct cfs_hash_bd *bds)
808 {
809 	/* NB: caller should hold hs_lock.rw if REHASH is set */
810 	cfs_hash_bd_from_key(hs, hs->hs_buckets,
811 			     hs->hs_cur_bits, key, &bds[0]);
812 	if (likely(!hs->hs_rehash_buckets)) {
813 		/* no rehash or not rehashing */
814 		bds[1].bd_bucket = NULL;
815 		return;
816 	}
817 
818 	LASSERT(hs->hs_rehash_bits);
819 	cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
820 			     hs->hs_rehash_bits, key, &bds[1]);
821 
822 	cfs_hash_bd_order(&bds[0], &bds[1]);
823 }
824 
825 void
cfs_hash_dual_bd_lock(struct cfs_hash * hs,struct cfs_hash_bd * bds,int excl)826 cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
827 {
828 	cfs_hash_multi_bd_lock(hs, bds, 2, excl);
829 }
830 
831 void
cfs_hash_dual_bd_unlock(struct cfs_hash * hs,struct cfs_hash_bd * bds,int excl)832 cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
833 {
834 	cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
835 }
836 
837 struct hlist_node *
cfs_hash_dual_bd_lookup_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,const void * key)838 cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
839 			       const void *key)
840 {
841 	return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
842 }
843 
844 struct hlist_node *
cfs_hash_dual_bd_findadd_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,const void * key,struct hlist_node * hnode,int noref)845 cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
846 				const void *key, struct hlist_node *hnode,
847 				int noref)
848 {
849 	return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
850 						hnode, noref);
851 }
852 
853 struct hlist_node *
cfs_hash_dual_bd_finddel_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,const void * key,struct hlist_node * hnode)854 cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
855 				const void *key, struct hlist_node *hnode)
856 {
857 	return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
858 }
859 
860 static void
cfs_hash_buckets_free(struct cfs_hash_bucket ** buckets,int bkt_size,int prev_size,int size)861 cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
862 		      int bkt_size, int prev_size, int size)
863 {
864 	int i;
865 
866 	for (i = prev_size; i < size; i++) {
867 		if (buckets[i])
868 			LIBCFS_FREE(buckets[i], bkt_size);
869 	}
870 
871 	LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
872 }
873 
874 /*
875  * Create or grow bucket memory. Return old_buckets if no allocation was
876  * needed, the newly allocated buckets if allocation was needed and
877  * successful, and NULL on error.
878  */
879 static struct cfs_hash_bucket **
cfs_hash_buckets_realloc(struct cfs_hash * hs,struct cfs_hash_bucket ** old_bkts,unsigned int old_size,unsigned int new_size)880 cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
881 			 unsigned int old_size, unsigned int new_size)
882 {
883 	struct cfs_hash_bucket **new_bkts;
884 	int i;
885 
886 	LASSERT(!old_size || old_bkts);
887 
888 	if (old_bkts && old_size == new_size)
889 		return old_bkts;
890 
891 	LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
892 	if (!new_bkts)
893 		return NULL;
894 
895 	if (old_bkts) {
896 		memcpy(new_bkts, old_bkts,
897 		       min(old_size, new_size) * sizeof(*old_bkts));
898 	}
899 
900 	for (i = old_size; i < new_size; i++) {
901 		struct hlist_head *hhead;
902 		struct cfs_hash_bd bd;
903 
904 		LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
905 		if (!new_bkts[i]) {
906 			cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
907 					      old_size, new_size);
908 			return NULL;
909 		}
910 
911 		new_bkts[i]->hsb_index = i;
912 		new_bkts[i]->hsb_version = 1;	/* shouldn't be zero */
913 		new_bkts[i]->hsb_depmax = -1;	/* unknown */
914 		bd.bd_bucket = new_bkts[i];
915 		cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
916 			INIT_HLIST_HEAD(hhead);
917 
918 		if (cfs_hash_with_no_lock(hs) ||
919 		    cfs_hash_with_no_bktlock(hs))
920 			continue;
921 
922 		if (cfs_hash_with_rw_bktlock(hs))
923 			rwlock_init(&new_bkts[i]->hsb_lock.rw);
924 		else if (cfs_hash_with_spin_bktlock(hs))
925 			spin_lock_init(&new_bkts[i]->hsb_lock.spin);
926 		else
927 			LBUG(); /* invalid use-case */
928 	}
929 	return new_bkts;
930 }
931 
932 /**
933  * Initialize new libcfs hash, where:
934  * @name     - Descriptive hash name
935  * @cur_bits - Initial hash table size, in bits
936  * @max_bits - Maximum allowed hash table resize, in bits
937  * @ops      - Registered hash table operations
938  * @flags    - CFS_HASH_REHASH enable synamic hash resizing
939  *	     - CFS_HASH_SORT enable chained hash sort
940  */
941 static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
942 
943 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
cfs_hash_dep_print(struct cfs_workitem * wi)944 static int cfs_hash_dep_print(struct cfs_workitem *wi)
945 {
946 	struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
947 	int dep;
948 	int bkt;
949 	int off;
950 	int bits;
951 
952 	spin_lock(&hs->hs_dep_lock);
953 	dep = hs->hs_dep_max;
954 	bkt = hs->hs_dep_bkt;
955 	off = hs->hs_dep_off;
956 	bits = hs->hs_dep_bits;
957 	spin_unlock(&hs->hs_dep_lock);
958 
959 	LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
960 		      hs->hs_name, bits, dep, bkt, off);
961 	spin_lock(&hs->hs_dep_lock);
962 	hs->hs_dep_bits = 0; /* mark as workitem done */
963 	spin_unlock(&hs->hs_dep_lock);
964 	return 0;
965 }
966 
cfs_hash_depth_wi_init(struct cfs_hash * hs)967 static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
968 {
969 	spin_lock_init(&hs->hs_dep_lock);
970 	cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
971 }
972 
cfs_hash_depth_wi_cancel(struct cfs_hash * hs)973 static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
974 {
975 	if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
976 		return;
977 
978 	spin_lock(&hs->hs_dep_lock);
979 	while (hs->hs_dep_bits) {
980 		spin_unlock(&hs->hs_dep_lock);
981 		cond_resched();
982 		spin_lock(&hs->hs_dep_lock);
983 	}
984 	spin_unlock(&hs->hs_dep_lock);
985 }
986 
987 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
988 
cfs_hash_depth_wi_init(struct cfs_hash * hs)989 static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
cfs_hash_depth_wi_cancel(struct cfs_hash * hs)990 static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
991 
992 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
993 
994 struct cfs_hash *
cfs_hash_create(char * name,unsigned int cur_bits,unsigned int max_bits,unsigned int bkt_bits,unsigned int extra_bytes,unsigned int min_theta,unsigned int max_theta,struct cfs_hash_ops * ops,unsigned int flags)995 cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
996 		unsigned int bkt_bits, unsigned int extra_bytes,
997 		unsigned int min_theta, unsigned int max_theta,
998 		struct cfs_hash_ops *ops, unsigned int flags)
999 {
1000 	struct cfs_hash *hs;
1001 	int len;
1002 
1003 	BUILD_BUG_ON(CFS_HASH_THETA_BITS >= 15);
1004 
1005 	LASSERT(name);
1006 	LASSERT(ops->hs_key);
1007 	LASSERT(ops->hs_hash);
1008 	LASSERT(ops->hs_object);
1009 	LASSERT(ops->hs_keycmp);
1010 	LASSERT(ops->hs_get);
1011 	LASSERT(ops->hs_put || ops->hs_put_locked);
1012 
1013 	if (flags & CFS_HASH_REHASH)
1014 		flags |= CFS_HASH_COUNTER; /* must have counter */
1015 
1016 	LASSERT(cur_bits > 0);
1017 	LASSERT(cur_bits >= bkt_bits);
1018 	LASSERT(max_bits >= cur_bits && max_bits < 31);
1019 	LASSERT(ergo(!(flags & CFS_HASH_REHASH), cur_bits == max_bits));
1020 	LASSERT(ergo(flags & CFS_HASH_REHASH, !(flags & CFS_HASH_NO_LOCK)));
1021 	LASSERT(ergo(flags & CFS_HASH_REHASH_KEY, ops->hs_keycpy));
1022 
1023 	len = !(flags & CFS_HASH_BIGNAME) ?
1024 	      CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1025 	LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
1026 	if (!hs)
1027 		return NULL;
1028 
1029 	strlcpy(hs->hs_name, name, len);
1030 	hs->hs_flags = flags;
1031 
1032 	atomic_set(&hs->hs_refcount, 1);
1033 	atomic_set(&hs->hs_count, 0);
1034 
1035 	cfs_hash_lock_setup(hs);
1036 	cfs_hash_hlist_setup(hs);
1037 
1038 	hs->hs_cur_bits = (u8)cur_bits;
1039 	hs->hs_min_bits = (u8)cur_bits;
1040 	hs->hs_max_bits = (u8)max_bits;
1041 	hs->hs_bkt_bits = (u8)bkt_bits;
1042 
1043 	hs->hs_ops = ops;
1044 	hs->hs_extra_bytes = extra_bytes;
1045 	hs->hs_rehash_bits = 0;
1046 	cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1047 	cfs_hash_depth_wi_init(hs);
1048 
1049 	if (cfs_hash_with_rehash(hs))
1050 		__cfs_hash_set_theta(hs, min_theta, max_theta);
1051 
1052 	hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1053 						  CFS_HASH_NBKT(hs));
1054 	if (hs->hs_buckets)
1055 		return hs;
1056 
1057 	LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
1058 	return NULL;
1059 }
1060 EXPORT_SYMBOL(cfs_hash_create);
1061 
1062 /**
1063  * Cleanup libcfs hash @hs.
1064  */
1065 static void
cfs_hash_destroy(struct cfs_hash * hs)1066 cfs_hash_destroy(struct cfs_hash *hs)
1067 {
1068 	struct hlist_node *hnode;
1069 	struct hlist_node *pos;
1070 	struct cfs_hash_bd bd;
1071 	int i;
1072 
1073 	LASSERT(hs);
1074 	LASSERT(!cfs_hash_is_exiting(hs) &&
1075 		!cfs_hash_is_iterating(hs));
1076 
1077 	/**
1078 	 * prohibit further rehashes, don't need any lock because
1079 	 * I'm the only (last) one can change it.
1080 	 */
1081 	hs->hs_exiting = 1;
1082 	if (cfs_hash_with_rehash(hs))
1083 		cfs_hash_rehash_cancel(hs);
1084 
1085 	cfs_hash_depth_wi_cancel(hs);
1086 	/* rehash should be done/canceled */
1087 	LASSERT(hs->hs_buckets && !hs->hs_rehash_buckets);
1088 
1089 	cfs_hash_for_each_bucket(hs, &bd, i) {
1090 		struct hlist_head *hhead;
1091 
1092 		LASSERT(bd.bd_bucket);
1093 		/* no need to take this lock, just for consistent code */
1094 		cfs_hash_bd_lock(hs, &bd, 1);
1095 
1096 		cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1097 			hlist_for_each_safe(hnode, pos, hhead) {
1098 				LASSERTF(!cfs_hash_with_assert_empty(hs),
1099 					 "hash %s bucket %u(%u) is not empty: %u items left\n",
1100 					 hs->hs_name, bd.bd_bucket->hsb_index,
1101 					 bd.bd_offset, bd.bd_bucket->hsb_count);
1102 				/* can't assert key valicate, because we
1103 				 * can interrupt rehash
1104 				 */
1105 				cfs_hash_bd_del_locked(hs, &bd, hnode);
1106 				cfs_hash_exit(hs, hnode);
1107 			}
1108 		}
1109 		LASSERT(!bd.bd_bucket->hsb_count);
1110 		cfs_hash_bd_unlock(hs, &bd, 1);
1111 		cond_resched();
1112 	}
1113 
1114 	LASSERT(!atomic_read(&hs->hs_count));
1115 
1116 	cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1117 			      0, CFS_HASH_NBKT(hs));
1118 	i = cfs_hash_with_bigname(hs) ?
1119 	    CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1120 	LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
1121 }
1122 
cfs_hash_getref(struct cfs_hash * hs)1123 struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
1124 {
1125 	if (atomic_inc_not_zero(&hs->hs_refcount))
1126 		return hs;
1127 	return NULL;
1128 }
1129 EXPORT_SYMBOL(cfs_hash_getref);
1130 
cfs_hash_putref(struct cfs_hash * hs)1131 void cfs_hash_putref(struct cfs_hash *hs)
1132 {
1133 	if (atomic_dec_and_test(&hs->hs_refcount))
1134 		cfs_hash_destroy(hs);
1135 }
1136 EXPORT_SYMBOL(cfs_hash_putref);
1137 
1138 static inline int
cfs_hash_rehash_bits(struct cfs_hash * hs)1139 cfs_hash_rehash_bits(struct cfs_hash *hs)
1140 {
1141 	if (cfs_hash_with_no_lock(hs) ||
1142 	    !cfs_hash_with_rehash(hs))
1143 		return -EOPNOTSUPP;
1144 
1145 	if (unlikely(cfs_hash_is_exiting(hs)))
1146 		return -ESRCH;
1147 
1148 	if (unlikely(cfs_hash_is_rehashing(hs)))
1149 		return -EALREADY;
1150 
1151 	if (unlikely(cfs_hash_is_iterating(hs)))
1152 		return -EAGAIN;
1153 
1154 	/* XXX: need to handle case with max_theta != 2.0
1155 	 *      and the case with min_theta != 0.5
1156 	 */
1157 	if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1158 	    (__cfs_hash_theta(hs) > hs->hs_max_theta))
1159 		return hs->hs_cur_bits + 1;
1160 
1161 	if (!cfs_hash_with_shrink(hs))
1162 		return 0;
1163 
1164 	if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1165 	    (__cfs_hash_theta(hs) < hs->hs_min_theta))
1166 		return hs->hs_cur_bits - 1;
1167 
1168 	return 0;
1169 }
1170 
1171 /**
1172  * don't allow inline rehash if:
1173  * - user wants non-blocking change (add/del) on hash table
1174  * - too many elements
1175  */
1176 static inline int
cfs_hash_rehash_inline(struct cfs_hash * hs)1177 cfs_hash_rehash_inline(struct cfs_hash *hs)
1178 {
1179 	return !cfs_hash_with_nblk_change(hs) &&
1180 	       atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1181 }
1182 
1183 /**
1184  * Add item @hnode to libcfs hash @hs using @key.  The registered
1185  * ops->hs_get function will be called when the item is added.
1186  */
1187 void
cfs_hash_add(struct cfs_hash * hs,const void * key,struct hlist_node * hnode)1188 cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1189 {
1190 	struct cfs_hash_bd bd;
1191 	int bits;
1192 
1193 	LASSERT(hlist_unhashed(hnode));
1194 
1195 	cfs_hash_lock(hs, 0);
1196 	cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1197 
1198 	cfs_hash_key_validate(hs, key, hnode);
1199 	cfs_hash_bd_add_locked(hs, &bd, hnode);
1200 
1201 	cfs_hash_bd_unlock(hs, &bd, 1);
1202 
1203 	bits = cfs_hash_rehash_bits(hs);
1204 	cfs_hash_unlock(hs, 0);
1205 	if (bits > 0)
1206 		cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1207 }
1208 EXPORT_SYMBOL(cfs_hash_add);
1209 
1210 static struct hlist_node *
cfs_hash_find_or_add(struct cfs_hash * hs,const void * key,struct hlist_node * hnode,int noref)1211 cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
1212 		     struct hlist_node *hnode, int noref)
1213 {
1214 	struct hlist_node *ehnode;
1215 	struct cfs_hash_bd bds[2];
1216 	int bits = 0;
1217 
1218 	LASSERTF(hlist_unhashed(hnode), "hnode = %p\n", hnode);
1219 
1220 	cfs_hash_lock(hs, 0);
1221 	cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1222 
1223 	cfs_hash_key_validate(hs, key, hnode);
1224 	ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1225 						 hnode, noref);
1226 	cfs_hash_dual_bd_unlock(hs, bds, 1);
1227 
1228 	if (ehnode == hnode)	/* new item added */
1229 		bits = cfs_hash_rehash_bits(hs);
1230 	cfs_hash_unlock(hs, 0);
1231 	if (bits > 0)
1232 		cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1233 
1234 	return ehnode;
1235 }
1236 
1237 /**
1238  * Add item @hnode to libcfs hash @hs using @key.  The registered
1239  * ops->hs_get function will be called if the item was added.
1240  * Returns 0 on success or -EALREADY on key collisions.
1241  */
1242 int
cfs_hash_add_unique(struct cfs_hash * hs,const void * key,struct hlist_node * hnode)1243 cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
1244 		    struct hlist_node *hnode)
1245 {
1246 	return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1247 	       -EALREADY : 0;
1248 }
1249 EXPORT_SYMBOL(cfs_hash_add_unique);
1250 
1251 /**
1252  * Add item @hnode to libcfs hash @hs using @key.  If this @key
1253  * already exists in the hash then ops->hs_get will be called on the
1254  * conflicting entry and that entry will be returned to the caller.
1255  * Otherwise ops->hs_get is called on the item which was added.
1256  */
1257 void *
cfs_hash_findadd_unique(struct cfs_hash * hs,const void * key,struct hlist_node * hnode)1258 cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
1259 			struct hlist_node *hnode)
1260 {
1261 	hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1262 
1263 	return cfs_hash_object(hs, hnode);
1264 }
1265 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1266 
1267 /**
1268  * Delete item @hnode from the libcfs hash @hs using @key.  The @key
1269  * is required to ensure the correct hash bucket is locked since there
1270  * is no direct linkage from the item to the bucket.  The object
1271  * removed from the hash will be returned and obs->hs_put is called
1272  * on the removed object.
1273  */
1274 void *
cfs_hash_del(struct cfs_hash * hs,const void * key,struct hlist_node * hnode)1275 cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1276 {
1277 	void *obj = NULL;
1278 	int bits = 0;
1279 	struct cfs_hash_bd bds[2];
1280 
1281 	cfs_hash_lock(hs, 0);
1282 	cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1283 
1284 	/* NB: do nothing if @hnode is not in hash table */
1285 	if (!hnode || !hlist_unhashed(hnode)) {
1286 		if (!bds[1].bd_bucket && hnode) {
1287 			cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1288 		} else {
1289 			hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1290 								key, hnode);
1291 		}
1292 	}
1293 
1294 	if (hnode) {
1295 		obj = cfs_hash_object(hs, hnode);
1296 		bits = cfs_hash_rehash_bits(hs);
1297 	}
1298 
1299 	cfs_hash_dual_bd_unlock(hs, bds, 1);
1300 	cfs_hash_unlock(hs, 0);
1301 	if (bits > 0)
1302 		cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1303 
1304 	return obj;
1305 }
1306 EXPORT_SYMBOL(cfs_hash_del);
1307 
1308 /**
1309  * Delete item given @key in libcfs hash @hs.  The first @key found in
1310  * the hash will be removed, if the key exists multiple times in the hash
1311  * @hs this function must be called once per key.  The removed object
1312  * will be returned and ops->hs_put is called on the removed object.
1313  */
1314 void *
cfs_hash_del_key(struct cfs_hash * hs,const void * key)1315 cfs_hash_del_key(struct cfs_hash *hs, const void *key)
1316 {
1317 	return cfs_hash_del(hs, key, NULL);
1318 }
1319 EXPORT_SYMBOL(cfs_hash_del_key);
1320 
1321 /**
1322  * Lookup an item using @key in the libcfs hash @hs and return it.
1323  * If the @key is found in the hash hs->hs_get() is called and the
1324  * matching objects is returned.  It is the callers responsibility
1325  * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1326  * when when finished with the object.  If the @key was not found
1327  * in the hash @hs NULL is returned.
1328  */
1329 void *
cfs_hash_lookup(struct cfs_hash * hs,const void * key)1330 cfs_hash_lookup(struct cfs_hash *hs, const void *key)
1331 {
1332 	void *obj = NULL;
1333 	struct hlist_node *hnode;
1334 	struct cfs_hash_bd bds[2];
1335 
1336 	cfs_hash_lock(hs, 0);
1337 	cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1338 
1339 	hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1340 	if (hnode)
1341 		obj = cfs_hash_object(hs, hnode);
1342 
1343 	cfs_hash_dual_bd_unlock(hs, bds, 0);
1344 	cfs_hash_unlock(hs, 0);
1345 
1346 	return obj;
1347 }
1348 EXPORT_SYMBOL(cfs_hash_lookup);
1349 
1350 static void
cfs_hash_for_each_enter(struct cfs_hash * hs)1351 cfs_hash_for_each_enter(struct cfs_hash *hs)
1352 {
1353 	LASSERT(!cfs_hash_is_exiting(hs));
1354 
1355 	if (!cfs_hash_with_rehash(hs))
1356 		return;
1357 	/*
1358 	 * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1359 	 * because it's just an unreliable signal to rehash-thread,
1360 	 * rehash-thread will try to finish rehash ASAP when seeing this.
1361 	 */
1362 	hs->hs_iterating = 1;
1363 
1364 	cfs_hash_lock(hs, 1);
1365 	hs->hs_iterators++;
1366 
1367 	/* NB: iteration is mostly called by service thread,
1368 	 * we tend to cancel pending rehash-request, instead of
1369 	 * blocking service thread, we will relaunch rehash request
1370 	 * after iteration
1371 	 */
1372 	if (cfs_hash_is_rehashing(hs))
1373 		cfs_hash_rehash_cancel_locked(hs);
1374 	cfs_hash_unlock(hs, 1);
1375 }
1376 
1377 static void
cfs_hash_for_each_exit(struct cfs_hash * hs)1378 cfs_hash_for_each_exit(struct cfs_hash *hs)
1379 {
1380 	int remained;
1381 	int bits;
1382 
1383 	if (!cfs_hash_with_rehash(hs))
1384 		return;
1385 	cfs_hash_lock(hs, 1);
1386 	remained = --hs->hs_iterators;
1387 	bits = cfs_hash_rehash_bits(hs);
1388 	cfs_hash_unlock(hs, 1);
1389 	/* NB: it's race on cfs_has_t::hs_iterating, see above */
1390 	if (!remained)
1391 		hs->hs_iterating = 0;
1392 	if (bits > 0) {
1393 		cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1394 				    CFS_HASH_LOOP_HOG);
1395 	}
1396 }
1397 
1398 /**
1399  * For each item in the libcfs hash @hs call the passed callback @func
1400  * and pass to it as an argument each hash item and the private @data.
1401  *
1402  * a) the function may sleep!
1403  * b) during the callback:
1404  *    . the bucket lock is held so the callback must never sleep.
1405  *    . if @removal_safe is true, use can remove current item by
1406  *      cfs_hash_bd_del_locked
1407  */
1408 static u64
cfs_hash_for_each_tight(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data,int remove_safe)1409 cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1410 			void *data, int remove_safe)
1411 {
1412 	struct hlist_node *hnode;
1413 	struct hlist_node *pos;
1414 	struct cfs_hash_bd bd;
1415 	u64 count = 0;
1416 	int excl = !!remove_safe;
1417 	int loop = 0;
1418 	int i;
1419 
1420 	cfs_hash_for_each_enter(hs);
1421 
1422 	cfs_hash_lock(hs, 0);
1423 	LASSERT(!cfs_hash_is_rehashing(hs));
1424 
1425 	cfs_hash_for_each_bucket(hs, &bd, i) {
1426 		struct hlist_head *hhead;
1427 
1428 		cfs_hash_bd_lock(hs, &bd, excl);
1429 		if (!func) { /* only glimpse size */
1430 			count += bd.bd_bucket->hsb_count;
1431 			cfs_hash_bd_unlock(hs, &bd, excl);
1432 			continue;
1433 		}
1434 
1435 		cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1436 			hlist_for_each_safe(hnode, pos, hhead) {
1437 				cfs_hash_bucket_validate(hs, &bd, hnode);
1438 				count++;
1439 				loop++;
1440 				if (func(hs, &bd, hnode, data)) {
1441 					cfs_hash_bd_unlock(hs, &bd, excl);
1442 					goto out;
1443 				}
1444 			}
1445 		}
1446 		cfs_hash_bd_unlock(hs, &bd, excl);
1447 		if (loop < CFS_HASH_LOOP_HOG)
1448 			continue;
1449 		loop = 0;
1450 		cfs_hash_unlock(hs, 0);
1451 		cond_resched();
1452 		cfs_hash_lock(hs, 0);
1453 	}
1454  out:
1455 	cfs_hash_unlock(hs, 0);
1456 
1457 	cfs_hash_for_each_exit(hs);
1458 	return count;
1459 }
1460 
1461 struct cfs_hash_cond_arg {
1462 	cfs_hash_cond_opt_cb_t	func;
1463 	void			*arg;
1464 };
1465 
1466 static int
cfs_hash_cond_del_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode,void * data)1467 cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1468 			 struct hlist_node *hnode, void *data)
1469 {
1470 	struct cfs_hash_cond_arg *cond = data;
1471 
1472 	if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1473 		cfs_hash_bd_del_locked(hs, bd, hnode);
1474 	return 0;
1475 }
1476 
1477 /**
1478  * Delete item from the libcfs hash @hs when @func return true.
1479  * The write lock being hold during loop for each bucket to avoid
1480  * any object be reference.
1481  */
1482 void
cfs_hash_cond_del(struct cfs_hash * hs,cfs_hash_cond_opt_cb_t func,void * data)1483 cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
1484 {
1485 	struct cfs_hash_cond_arg arg = {
1486 		.func	= func,
1487 		.arg	= data,
1488 	};
1489 
1490 	cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1491 }
1492 EXPORT_SYMBOL(cfs_hash_cond_del);
1493 
1494 void
cfs_hash_for_each(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data)1495 cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1496 		  void *data)
1497 {
1498 	cfs_hash_for_each_tight(hs, func, data, 0);
1499 }
1500 EXPORT_SYMBOL(cfs_hash_for_each);
1501 
1502 void
cfs_hash_for_each_safe(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data)1503 cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1504 		       void *data)
1505 {
1506 	cfs_hash_for_each_tight(hs, func, data, 1);
1507 }
1508 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1509 
1510 static int
cfs_hash_peek(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode,void * data)1511 cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1512 	      struct hlist_node *hnode, void *data)
1513 {
1514 	*(int *)data = 0;
1515 	return 1; /* return 1 to break the loop */
1516 }
1517 
1518 int
cfs_hash_is_empty(struct cfs_hash * hs)1519 cfs_hash_is_empty(struct cfs_hash *hs)
1520 {
1521 	int empty = 1;
1522 
1523 	cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1524 	return empty;
1525 }
1526 EXPORT_SYMBOL(cfs_hash_is_empty);
1527 
1528 u64
cfs_hash_size_get(struct cfs_hash * hs)1529 cfs_hash_size_get(struct cfs_hash *hs)
1530 {
1531 	return cfs_hash_with_counter(hs) ?
1532 	       atomic_read(&hs->hs_count) :
1533 	       cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1534 }
1535 EXPORT_SYMBOL(cfs_hash_size_get);
1536 
1537 /*
1538  * cfs_hash_for_each_relax:
1539  * Iterate the hash table and call @func on each item without
1540  * any lock. This function can't guarantee to finish iteration
1541  * if these features are enabled:
1542  *
1543  *  a. if rehash_key is enabled, an item can be moved from
1544  *     one bucket to another bucket
1545  *  b. user can remove non-zero-ref item from hash-table,
1546  *     so the item can be removed from hash-table, even worse,
1547  *     it's possible that user changed key and insert to another
1548  *     hash bucket.
1549  * there's no way for us to finish iteration correctly on previous
1550  * two cases, so iteration has to be stopped on change.
1551  */
1552 static int
cfs_hash_for_each_relax(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data,int start)1553 cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1554 			void *data, int start)
1555 {
1556 	struct hlist_node *next = NULL;
1557 	struct hlist_node *hnode;
1558 	struct cfs_hash_bd bd;
1559 	u32 version;
1560 	int count = 0;
1561 	int stop_on_change;
1562 	int has_put_locked;
1563 	int end = -1;
1564 	int rc = 0;
1565 	int i;
1566 
1567 	stop_on_change = cfs_hash_with_rehash_key(hs) ||
1568 			 !cfs_hash_with_no_itemref(hs);
1569 	has_put_locked = hs->hs_ops->hs_put_locked != NULL;
1570 	cfs_hash_lock(hs, 0);
1571 again:
1572 	LASSERT(!cfs_hash_is_rehashing(hs));
1573 
1574 	cfs_hash_for_each_bucket(hs, &bd, i) {
1575 		struct hlist_head *hhead;
1576 
1577 		if (i < start)
1578 			continue;
1579 		else if (end > 0 && i >= end)
1580 			break;
1581 
1582 		cfs_hash_bd_lock(hs, &bd, 0);
1583 		version = cfs_hash_bd_version_get(&bd);
1584 
1585 		cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1586 			hnode = hhead->first;
1587 			if (!hnode)
1588 				continue;
1589 			cfs_hash_get(hs, hnode);
1590 
1591 			for (; hnode; hnode = next) {
1592 				cfs_hash_bucket_validate(hs, &bd, hnode);
1593 				next = hnode->next;
1594 				if (next)
1595 					cfs_hash_get(hs, next);
1596 				cfs_hash_bd_unlock(hs, &bd, 0);
1597 				cfs_hash_unlock(hs, 0);
1598 
1599 				rc = func(hs, &bd, hnode, data);
1600 				if (stop_on_change || !has_put_locked)
1601 					cfs_hash_put(hs, hnode);
1602 				cond_resched();
1603 				count++;
1604 
1605 				cfs_hash_lock(hs, 0);
1606 				cfs_hash_bd_lock(hs, &bd, 0);
1607 				if (stop_on_change) {
1608 					if (version !=
1609 					    cfs_hash_bd_version_get(&bd))
1610 						rc = -EINTR;
1611 				} else if (has_put_locked) {
1612 					cfs_hash_put_locked(hs, hnode);
1613 				}
1614 				if (rc) /* callback wants to break iteration */
1615 					break;
1616 			}
1617 			if (next) {
1618 				if (has_put_locked) {
1619 					cfs_hash_put_locked(hs, next);
1620 					next = NULL;
1621 				}
1622 				break;
1623 			} else if (rc) {
1624 				break;
1625 			}
1626 		}
1627 		cfs_hash_bd_unlock(hs, &bd, 0);
1628 		if (next && !has_put_locked) {
1629 			cfs_hash_put(hs, next);
1630 			next = NULL;
1631 		}
1632 		if (rc) /* callback wants to break iteration */
1633 			break;
1634 	}
1635 	if (start > 0 && !rc) {
1636 		end = start;
1637 		start = 0;
1638 		goto again;
1639 	}
1640 
1641 	cfs_hash_unlock(hs, 0);
1642 	return count;
1643 }
1644 
1645 int
cfs_hash_for_each_nolock(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data,int start)1646 cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1647 			 void *data, int start)
1648 {
1649 	if (cfs_hash_with_no_lock(hs) ||
1650 	    cfs_hash_with_rehash_key(hs) ||
1651 	    !cfs_hash_with_no_itemref(hs))
1652 		return -EOPNOTSUPP;
1653 
1654 	if (!hs->hs_ops->hs_get ||
1655 	    (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked))
1656 		return -EOPNOTSUPP;
1657 
1658 	cfs_hash_for_each_enter(hs);
1659 	cfs_hash_for_each_relax(hs, func, data, start);
1660 	cfs_hash_for_each_exit(hs);
1661 
1662 	return 0;
1663 }
1664 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1665 
1666 /**
1667  * For each hash bucket in the libcfs hash @hs call the passed callback
1668  * @func until all the hash buckets are empty.  The passed callback @func
1669  * or the previously registered callback hs->hs_put must remove the item
1670  * from the hash.  You may either use the cfs_hash_del() or hlist_del()
1671  * functions.  No rwlocks will be held during the callback @func it is
1672  * safe to sleep if needed.  This function will not terminate until the
1673  * hash is empty.  Note it is still possible to concurrently add new
1674  * items in to the hash.  It is the callers responsibility to ensure
1675  * the required locking is in place to prevent concurrent insertions.
1676  */
1677 int
cfs_hash_for_each_empty(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data)1678 cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1679 			void *data)
1680 {
1681 	unsigned int i = 0;
1682 
1683 	if (cfs_hash_with_no_lock(hs))
1684 		return -EOPNOTSUPP;
1685 
1686 	if (!hs->hs_ops->hs_get ||
1687 	    (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked))
1688 		return -EOPNOTSUPP;
1689 
1690 	cfs_hash_for_each_enter(hs);
1691 	while (cfs_hash_for_each_relax(hs, func, data, 0)) {
1692 		CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1693 		       hs->hs_name, i++);
1694 	}
1695 	cfs_hash_for_each_exit(hs);
1696 	return 0;
1697 }
1698 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1699 
1700 void
cfs_hash_hlist_for_each(struct cfs_hash * hs,unsigned int hindex,cfs_hash_for_each_cb_t func,void * data)1701 cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned int hindex,
1702 			cfs_hash_for_each_cb_t func, void *data)
1703 {
1704 	struct hlist_head *hhead;
1705 	struct hlist_node *hnode;
1706 	struct cfs_hash_bd bd;
1707 
1708 	cfs_hash_for_each_enter(hs);
1709 	cfs_hash_lock(hs, 0);
1710 	if (hindex >= CFS_HASH_NHLIST(hs))
1711 		goto out;
1712 
1713 	cfs_hash_bd_index_set(hs, hindex, &bd);
1714 
1715 	cfs_hash_bd_lock(hs, &bd, 0);
1716 	hhead = cfs_hash_bd_hhead(hs, &bd);
1717 	hlist_for_each(hnode, hhead) {
1718 		if (func(hs, &bd, hnode, data))
1719 			break;
1720 	}
1721 	cfs_hash_bd_unlock(hs, &bd, 0);
1722 out:
1723 	cfs_hash_unlock(hs, 0);
1724 	cfs_hash_for_each_exit(hs);
1725 }
1726 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1727 
1728 /*
1729  * For each item in the libcfs hash @hs which matches the @key call
1730  * the passed callback @func and pass to it as an argument each hash
1731  * item and the private @data. During the callback the bucket lock
1732  * is held so the callback must never sleep.
1733  */
1734 void
cfs_hash_for_each_key(struct cfs_hash * hs,const void * key,cfs_hash_for_each_cb_t func,void * data)1735 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
1736 		      cfs_hash_for_each_cb_t func, void *data)
1737 {
1738 	struct hlist_node *hnode;
1739 	struct cfs_hash_bd bds[2];
1740 	unsigned int i;
1741 
1742 	cfs_hash_lock(hs, 0);
1743 
1744 	cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1745 
1746 	cfs_hash_for_each_bd(bds, 2, i) {
1747 		struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1748 
1749 		hlist_for_each(hnode, hlist) {
1750 			cfs_hash_bucket_validate(hs, &bds[i], hnode);
1751 
1752 			if (cfs_hash_keycmp(hs, key, hnode)) {
1753 				if (func(hs, &bds[i], hnode, data))
1754 					break;
1755 			}
1756 		}
1757 	}
1758 
1759 	cfs_hash_dual_bd_unlock(hs, bds, 0);
1760 	cfs_hash_unlock(hs, 0);
1761 }
1762 EXPORT_SYMBOL(cfs_hash_for_each_key);
1763 
1764 /**
1765  * Rehash the libcfs hash @hs to the given @bits.  This can be used
1766  * to grow the hash size when excessive chaining is detected, or to
1767  * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
1768  * flag is set in @hs the libcfs hash may be dynamically rehashed
1769  * during addition or removal if the hash's theta value exceeds
1770  * either the hs->hs_min_theta or hs->max_theta values.  By default
1771  * these values are tuned to keep the chained hash depth small, and
1772  * this approach assumes a reasonably uniform hashing function.  The
1773  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1774  */
1775 void
cfs_hash_rehash_cancel_locked(struct cfs_hash * hs)1776 cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
1777 {
1778 	int i;
1779 
1780 	/* need hold cfs_hash_lock(hs, 1) */
1781 	LASSERT(cfs_hash_with_rehash(hs) &&
1782 		!cfs_hash_with_no_lock(hs));
1783 
1784 	if (!cfs_hash_is_rehashing(hs))
1785 		return;
1786 
1787 	if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1788 		hs->hs_rehash_bits = 0;
1789 		return;
1790 	}
1791 
1792 	for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1793 		cfs_hash_unlock(hs, 1);
1794 		/* raise console warning while waiting too long */
1795 		CDEBUG(is_power_of_2(i >> 3) ? D_WARNING : D_INFO,
1796 		       "hash %s is still rehashing, rescheded %d\n",
1797 		       hs->hs_name, i - 1);
1798 		cond_resched();
1799 		cfs_hash_lock(hs, 1);
1800 	}
1801 }
1802 
1803 void
cfs_hash_rehash_cancel(struct cfs_hash * hs)1804 cfs_hash_rehash_cancel(struct cfs_hash *hs)
1805 {
1806 	cfs_hash_lock(hs, 1);
1807 	cfs_hash_rehash_cancel_locked(hs);
1808 	cfs_hash_unlock(hs, 1);
1809 }
1810 
1811 int
cfs_hash_rehash(struct cfs_hash * hs,int do_rehash)1812 cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
1813 {
1814 	int rc;
1815 
1816 	LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1817 
1818 	cfs_hash_lock(hs, 1);
1819 
1820 	rc = cfs_hash_rehash_bits(hs);
1821 	if (rc <= 0) {
1822 		cfs_hash_unlock(hs, 1);
1823 		return rc;
1824 	}
1825 
1826 	hs->hs_rehash_bits = rc;
1827 	if (!do_rehash) {
1828 		/* launch and return */
1829 		cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1830 		cfs_hash_unlock(hs, 1);
1831 		return 0;
1832 	}
1833 
1834 	/* rehash right now */
1835 	cfs_hash_unlock(hs, 1);
1836 
1837 	return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1838 }
1839 
1840 static int
cfs_hash_rehash_bd(struct cfs_hash * hs,struct cfs_hash_bd * old)1841 cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
1842 {
1843 	struct cfs_hash_bd new;
1844 	struct hlist_head *hhead;
1845 	struct hlist_node *hnode;
1846 	struct hlist_node *pos;
1847 	void *key;
1848 	int c = 0;
1849 
1850 	/* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1851 	cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1852 		hlist_for_each_safe(hnode, pos, hhead) {
1853 			key = cfs_hash_key(hs, hnode);
1854 			LASSERT(key);
1855 			/* Validate hnode is in the correct bucket. */
1856 			cfs_hash_bucket_validate(hs, old, hnode);
1857 			/*
1858 			 * Delete from old hash bucket; move to new bucket.
1859 			 * ops->hs_key must be defined.
1860 			 */
1861 			cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1862 					     hs->hs_rehash_bits, key, &new);
1863 			cfs_hash_bd_move_locked(hs, old, &new, hnode);
1864 			c++;
1865 		}
1866 	}
1867 
1868 	return c;
1869 }
1870 
1871 static int
cfs_hash_rehash_worker(struct cfs_workitem * wi)1872 cfs_hash_rehash_worker(struct cfs_workitem *wi)
1873 {
1874 	struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
1875 	struct cfs_hash_bucket **bkts;
1876 	struct cfs_hash_bd bd;
1877 	unsigned int old_size;
1878 	unsigned int new_size;
1879 	int bsize;
1880 	int count = 0;
1881 	int rc = 0;
1882 	int i;
1883 
1884 	LASSERT(hs && cfs_hash_with_rehash(hs));
1885 
1886 	cfs_hash_lock(hs, 0);
1887 	LASSERT(cfs_hash_is_rehashing(hs));
1888 
1889 	old_size = CFS_HASH_NBKT(hs);
1890 	new_size = CFS_HASH_RH_NBKT(hs);
1891 
1892 	cfs_hash_unlock(hs, 0);
1893 
1894 	/*
1895 	 * don't need hs::hs_rwlock for hs::hs_buckets,
1896 	 * because nobody can change bkt-table except me.
1897 	 */
1898 	bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1899 					old_size, new_size);
1900 	cfs_hash_lock(hs, 1);
1901 	if (!bkts) {
1902 		rc = -ENOMEM;
1903 		goto out;
1904 	}
1905 
1906 	if (bkts == hs->hs_buckets) {
1907 		bkts = NULL; /* do nothing */
1908 		goto out;
1909 	}
1910 
1911 	rc = __cfs_hash_theta(hs);
1912 	if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1913 		/* free the new allocated bkt-table */
1914 		old_size = new_size;
1915 		new_size = CFS_HASH_NBKT(hs);
1916 		rc = -EALREADY;
1917 		goto out;
1918 	}
1919 
1920 	LASSERT(!hs->hs_rehash_buckets);
1921 	hs->hs_rehash_buckets = bkts;
1922 
1923 	rc = 0;
1924 	cfs_hash_for_each_bucket(hs, &bd, i) {
1925 		if (cfs_hash_is_exiting(hs)) {
1926 			rc = -ESRCH;
1927 			/* someone wants to destroy the hash, abort now */
1928 			if (old_size < new_size) /* OK to free old bkt-table */
1929 				break;
1930 			/* it's shrinking, need free new bkt-table */
1931 			hs->hs_rehash_buckets = NULL;
1932 			old_size = new_size;
1933 			new_size = CFS_HASH_NBKT(hs);
1934 			goto out;
1935 		}
1936 
1937 		count += cfs_hash_rehash_bd(hs, &bd);
1938 		if (count < CFS_HASH_LOOP_HOG ||
1939 		    cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1940 			continue;
1941 		}
1942 
1943 		count = 0;
1944 		cfs_hash_unlock(hs, 1);
1945 		cond_resched();
1946 		cfs_hash_lock(hs, 1);
1947 	}
1948 
1949 	hs->hs_rehash_count++;
1950 
1951 	bkts = hs->hs_buckets;
1952 	hs->hs_buckets = hs->hs_rehash_buckets;
1953 	hs->hs_rehash_buckets = NULL;
1954 
1955 	hs->hs_cur_bits = hs->hs_rehash_bits;
1956 out:
1957 	hs->hs_rehash_bits = 0;
1958 	if (rc == -ESRCH) /* never be scheduled again */
1959 		cfs_wi_exit(cfs_sched_rehash, wi);
1960 	bsize = cfs_hash_bkt_size(hs);
1961 	cfs_hash_unlock(hs, 1);
1962 	/* can't refer to @hs anymore because it could be destroyed */
1963 	if (bkts)
1964 		cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1965 	if (rc)
1966 		CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
1967 	/* return 1 only if cfs_wi_exit is called */
1968 	return rc == -ESRCH;
1969 }
1970 
1971 /**
1972  * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
1973  * @old_key must be provided to locate the objects previous location
1974  * in the hash, and the @new_key will be used to reinsert the object.
1975  * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1976  * combo when it is critical that there is no window in time where the
1977  * object is missing from the hash.  When an object is being rehashed
1978  * the registered cfs_hash_get() and cfs_hash_put() functions will
1979  * not be called.
1980  */
cfs_hash_rehash_key(struct cfs_hash * hs,const void * old_key,void * new_key,struct hlist_node * hnode)1981 void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
1982 			 void *new_key, struct hlist_node *hnode)
1983 {
1984 	struct cfs_hash_bd bds[3];
1985 	struct cfs_hash_bd old_bds[2];
1986 	struct cfs_hash_bd new_bd;
1987 
1988 	LASSERT(!hlist_unhashed(hnode));
1989 
1990 	cfs_hash_lock(hs, 0);
1991 
1992 	cfs_hash_dual_bd_get(hs, old_key, old_bds);
1993 	cfs_hash_bd_get(hs, new_key, &new_bd);
1994 
1995 	bds[0] = old_bds[0];
1996 	bds[1] = old_bds[1];
1997 	bds[2] = new_bd;
1998 
1999 	/* NB: bds[0] and bds[1] are ordered already */
2000 	cfs_hash_bd_order(&bds[1], &bds[2]);
2001 	cfs_hash_bd_order(&bds[0], &bds[1]);
2002 
2003 	cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2004 	if (likely(!old_bds[1].bd_bucket)) {
2005 		cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2006 	} else {
2007 		cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2008 		cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2009 	}
2010 	/* overwrite key inside locks, otherwise may screw up with
2011 	 * other operations, i.e: rehash
2012 	 */
2013 	cfs_hash_keycpy(hs, hnode, new_key);
2014 
2015 	cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2016 	cfs_hash_unlock(hs, 0);
2017 }
2018 EXPORT_SYMBOL(cfs_hash_rehash_key);
2019 
cfs_hash_debug_header(struct seq_file * m)2020 void cfs_hash_debug_header(struct seq_file *m)
2021 {
2022 	seq_printf(m, "%-*s   cur   min   max theta t-min t-max flags rehash   count  maxdep maxdepb distribution\n",
2023 		   CFS_HASH_BIGNAME_LEN, "name");
2024 }
2025 EXPORT_SYMBOL(cfs_hash_debug_header);
2026 
2027 static struct cfs_hash_bucket **
cfs_hash_full_bkts(struct cfs_hash * hs)2028 cfs_hash_full_bkts(struct cfs_hash *hs)
2029 {
2030 	/* NB: caller should hold hs->hs_rwlock if REHASH is set */
2031 	if (!hs->hs_rehash_buckets)
2032 		return hs->hs_buckets;
2033 
2034 	LASSERT(hs->hs_rehash_bits);
2035 	return hs->hs_rehash_bits > hs->hs_cur_bits ?
2036 	       hs->hs_rehash_buckets : hs->hs_buckets;
2037 }
2038 
2039 static unsigned int
cfs_hash_full_nbkt(struct cfs_hash * hs)2040 cfs_hash_full_nbkt(struct cfs_hash *hs)
2041 {
2042 	/* NB: caller should hold hs->hs_rwlock if REHASH is set */
2043 	if (!hs->hs_rehash_buckets)
2044 		return CFS_HASH_NBKT(hs);
2045 
2046 	LASSERT(hs->hs_rehash_bits);
2047 	return hs->hs_rehash_bits > hs->hs_cur_bits ?
2048 	       CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2049 }
2050 
cfs_hash_debug_str(struct cfs_hash * hs,struct seq_file * m)2051 void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
2052 {
2053 	int dist[8] = { 0, };
2054 	int maxdep = -1;
2055 	int maxdepb = -1;
2056 	int total = 0;
2057 	int theta;
2058 	int i;
2059 
2060 	cfs_hash_lock(hs, 0);
2061 	theta = __cfs_hash_theta(hs);
2062 
2063 	seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d  0x%02x %6d ",
2064 		   CFS_HASH_BIGNAME_LEN, hs->hs_name,
2065 		   1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
2066 		   1 << hs->hs_max_bits,
2067 		   __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
2068 		   __cfs_hash_theta_int(hs->hs_min_theta),
2069 		   __cfs_hash_theta_frac(hs->hs_min_theta),
2070 		   __cfs_hash_theta_int(hs->hs_max_theta),
2071 		   __cfs_hash_theta_frac(hs->hs_max_theta),
2072 		   hs->hs_flags, hs->hs_rehash_count);
2073 
2074 	/*
2075 	 * The distribution is a summary of the chained hash depth in
2076 	 * each of the libcfs hash buckets.  Each buckets hsb_count is
2077 	 * divided by the hash theta value and used to generate a
2078 	 * histogram of the hash distribution.  A uniform hash will
2079 	 * result in all hash buckets being close to the average thus
2080 	 * only the first few entries in the histogram will be non-zero.
2081 	 * If you hash function results in a non-uniform hash the will
2082 	 * be observable by outlier bucks in the distribution histogram.
2083 	 *
2084 	 * Uniform hash distribution:		128/128/0/0/0/0/0/0
2085 	 * Non-Uniform hash distribution:	128/125/0/0/0/0/2/1
2086 	 */
2087 	for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2088 		struct cfs_hash_bd bd;
2089 
2090 		bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2091 		cfs_hash_bd_lock(hs, &bd, 0);
2092 		if (maxdep < bd.bd_bucket->hsb_depmax) {
2093 			maxdep  = bd.bd_bucket->hsb_depmax;
2094 			maxdepb = ffz(~maxdep);
2095 		}
2096 		total += bd.bd_bucket->hsb_count;
2097 		dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
2098 		cfs_hash_bd_unlock(hs, &bd, 0);
2099 	}
2100 
2101 	seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
2102 	for (i = 0; i < 8; i++)
2103 		seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
2104 
2105 	cfs_hash_unlock(hs, 0);
2106 }
2107 EXPORT_SYMBOL(cfs_hash_debug_str);
2108