• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Original code taken from 'linux/include/linux/hash{,table}.h'
4  */
5 #ifndef __EROFS_HASHTABLE_H
6 #define __EROFS_HASHTABLE_H
7 
8 #ifdef __cplusplus
9 extern "C"
10 {
11 #endif
12 
13 /*
14  * Fast hashing routine for ints,  longs and pointers.
15  * (C) 2002 Nadia Yvette Chambers, IBM
16  */
17 
18 /*
19  * Statically sized hash table implementation
20  * (C) 2012  Sasha Levin <levinsasha928@gmail.com>
21  */
22 
23 #include "defs.h"
24 
25 /*
26  * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and
27  * fs/inode.c.  It's not actually prime any more (the previous primes
28  * were actively bad for hashing), but the name remains.
29  */
30 #if BITS_PER_LONG == 32
31 #define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32
32 #define hash_long(val, bits) hash_32(val, bits)
33 #elif BITS_PER_LONG == 64
34 #define hash_long(val, bits) hash_64(val, bits)
35 #define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64
36 #else
37 #error Wordsize not 32 or 64
38 #endif
39 
40 /*
41  * This hash multiplies the input by a large odd number and takes the
42  * high bits.  Since multiplication propagates changes to the most
43  * significant end only, it is essential that the high bits of the
44  * product be used for the hash value.
45  *
46  * Chuck Lever verified the effectiveness of this technique:
47  * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
48  *
49  * Although a random odd number will do, it turns out that the golden
50  * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
51  * properties.  (See Knuth vol 3, section 6.4, exercise 9.)
52  *
53  * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2,
54  * which is very slightly easier to multiply by and makes no
55  * difference to the hash distribution.
56  */
57 #define GOLDEN_RATIO_32 0x61C88647
58 #define GOLDEN_RATIO_64 0x61C8864680B583EBull
59 
60 struct hlist_head {
61 	struct hlist_node *first;
62 };
63 
64 struct hlist_node {
65 	struct hlist_node *next, **pprev;
66 };
67 
68 /*
69  * Architectures might want to move the poison pointer offset
70  * into some well-recognized area such as 0xdead000000000000,
71  * that is also not mappable by user-space exploits:
72  */
73 #ifdef CONFIG_ILLEGAL_POINTER_VALUE
74 # define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
75 #else
76 # define POISON_POINTER_DELTA 0
77 #endif
78 
79 /*
80  * These are non-NULL pointers that will result in page faults
81  * under normal circumstances, used to verify that nobody uses
82  * non-initialized list entries.
83  */
84 #define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
85 #define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
86 
87 /*
88  * Double linked lists with a single pointer list head.
89  * Mostly useful for hash tables where the two pointer list head is
90  * too wasteful.
91  * You lose the ability to access the tail in O(1).
92  */
93 
94 #define HLIST_HEAD_INIT { .first = NULL }
95 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
96 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
INIT_HLIST_NODE(struct hlist_node * h)97 static inline void INIT_HLIST_NODE(struct hlist_node *h)
98 {
99 	h->next = NULL;
100 	h->pprev = NULL;
101 }
102 
hlist_unhashed(const struct hlist_node * h)103 static inline int hlist_unhashed(const struct hlist_node *h)
104 {
105 	return !h->pprev;
106 }
107 
hlist_empty(const struct hlist_head * h)108 static inline int hlist_empty(const struct hlist_head *h)
109 {
110 	return !h->first;
111 }
112 
__hlist_del(struct hlist_node * n)113 static inline void __hlist_del(struct hlist_node *n)
114 {
115 	struct hlist_node *next = n->next;
116 	struct hlist_node **pprev = n->pprev;
117 
118 	*pprev = next;
119 	if (next)
120 		next->pprev = pprev;
121 }
122 
hlist_del(struct hlist_node * n)123 static inline void hlist_del(struct hlist_node *n)
124 {
125 	__hlist_del(n);
126 	n->next = LIST_POISON1;
127 	n->pprev = LIST_POISON2;
128 }
129 
hlist_del_init(struct hlist_node * n)130 static inline void hlist_del_init(struct hlist_node *n)
131 {
132 	if (!hlist_unhashed(n)) {
133 		__hlist_del(n);
134 		INIT_HLIST_NODE(n);
135 	}
136 }
137 
hlist_add_head(struct hlist_node * n,struct hlist_head * h)138 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
139 {
140 	struct hlist_node *first = h->first;
141 
142 	n->next = first;
143 	if (first)
144 		first->pprev = &n->next;
145 	h->first = n;
146 	n->pprev = &h->first;
147 }
148 
149 /* next must be != NULL */
hlist_add_before(struct hlist_node * n,struct hlist_node * next)150 static inline void hlist_add_before(struct hlist_node *n,
151 					struct hlist_node *next)
152 {
153 	n->pprev = next->pprev;
154 	n->next = next;
155 	next->pprev = &n->next;
156 	*(n->pprev) = n;
157 }
158 
hlist_add_behind(struct hlist_node * n,struct hlist_node * prev)159 static inline void hlist_add_behind(struct hlist_node *n,
160 				    struct hlist_node *prev)
161 {
162 	n->next = prev->next;
163 	prev->next = n;
164 	n->pprev = &prev->next;
165 
166 	if (n->next)
167 		n->next->pprev  = &n->next;
168 }
169 
170 /* after that we'll appear to be on some hlist and hlist_del will work */
hlist_add_fake(struct hlist_node * n)171 static inline void hlist_add_fake(struct hlist_node *n)
172 {
173 	n->pprev = &n->next;
174 }
175 
176 /*
177  * Move a list from one list head to another. Fixup the pprev
178  * reference of the first entry if it exists.
179  */
hlist_move_list(struct hlist_head * old,struct hlist_head * new)180 static inline void hlist_move_list(struct hlist_head *old,
181 				   struct hlist_head *new)
182 {
183 	new->first = old->first;
184 	if (new->first)
185 		new->first->pprev = &new->first;
186 	old->first = NULL;
187 }
188 
189 #define hlist_entry(ptr, type, member) container_of(ptr, type, member)
190 
191 #define hlist_for_each(pos, head) \
192 	for (pos = (head)->first; pos; pos = pos->next)
193 
194 #define hlist_for_each_safe(pos, n, head) \
195 	for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
196 	     pos = n)
197 
198 #define hlist_entry_safe(ptr, type, member) \
199 	({ typeof(ptr) ____ptr = (ptr); \
200 	   ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
201 	})
202 
203 /**
204  * hlist_for_each_entry	- iterate over list of given type
205  * @pos:the type * to use as a loop cursor.
206  * @head:the head for your list.
207  * @member:the name of the hlist_node within the struct.
208  */
209 #define hlist_for_each_entry(pos, head, member)				\
210 	for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
211 	     pos;							\
212 	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
213 
214 /**
215  * hlist_for_each_entry_continue
216  * iterate over a hlist continuing after current point
217  * @pos:the type * to use as a loop cursor.
218  * @member:the name of the hlist_node within the struct.
219  */
220 #define hlist_for_each_entry_continue(pos, member)			\
221 	for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
222 	     pos;							\
223 	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
224 
225 /**
226  * hlist_for_each_entry_from
227  * iterate over a hlist continuing from current point
228  * @pos:	the type * to use as a loop cursor.
229  * @member:	the name of the hlist_node within the struct.
230  */
231 #define hlist_for_each_entry_from(pos, member)				\
232 	for (; pos;							\
233 	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
234 
235 /**
236  * hlist_for_each_entry_safe
237  * iterate over list of given type safe against removal of list entry
238  * @pos:the type * to use as a loop cursor.
239  * @n:another &struct hlist_node to use as temporary storage
240  * @head:the head for your list.
241  * @member:the name of the hlist_node within the struct.
242  */
243 #define hlist_for_each_entry_safe(pos, n, head, member)			\
244 	for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
245 		pos && ({ n = pos->member.next; 1; });			\
246 		pos = hlist_entry_safe(n, typeof(*pos), member))
247 
__hash_32(u32 val)248 static inline u32 __hash_32(u32 val)
249 {
250 	return val * GOLDEN_RATIO_32;
251 }
252 
hash_32(u32 val,unsigned int bits)253 static inline u32 hash_32(u32 val, unsigned int bits)
254 {
255 	/* High bits are more random, so use them. */
256 	return __hash_32(val) >> (32 - bits);
257 }
258 
hash_64(u64 val,unsigned int bits)259 static inline u32 hash_64(u64 val, unsigned int bits)
260 {
261 #if BITS_PER_LONG == 64
262 	/* 64x64-bit multiply is efficient on all 64-bit processors */
263 	return val * GOLDEN_RATIO_64 >> (64 - bits);
264 #else
265 	/* Hash 64 bits using only 32x32-bit multiply. */
266 	return hash_32((u32)val ^ __hash_32(val >> 32), bits);
267 #endif
268 }
269 
270 #define DEFINE_HASHTABLE(name, bits)					\
271 	struct hlist_head name[1 << (bits)] =				\
272 			{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
273 
274 #define DECLARE_HASHTABLE(name, bits)					\
275 	struct hlist_head name[1 << (bits)]
276 
277 #define HASH_SIZE(name) (ARRAY_SIZE(name))
278 #define HASH_BITS(name) ilog2(HASH_SIZE(name))
279 
280 /* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels*/
281 #define hash_min(val, bits)						\
282 	(sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
283 
__hash_init(struct hlist_head * ht,unsigned int sz)284 static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
285 {
286 	unsigned int i;
287 
288 	for (i = 0; i < sz; i++)
289 		INIT_HLIST_HEAD(&ht[i]);
290 }
291 
292 /**
293  * hash_init - initialize a hash table
294  * @hashtable: hashtable to be initialized
295  *
296  * Calculates the size of the hashtable from the given parameter, otherwise
297  * same as hash_init_size.
298  *
299  * This has to be a macro since HASH_BITS() will not work on pointers since
300  * it calculates the size during preprocessing.
301  */
302 #define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
303 
304 /**
305  * hash_add - add an object to a hashtable
306  * @hashtable: hashtable to add to
307  * @node: the &struct hlist_node of the object to be added
308  * @key: the key of the object to be added
309  */
310 #define hash_add(hashtable, node, key)					\
311 	hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
312 
313 /**
314  * hash_hashed - check whether an object is in any hashtable
315  * @node: the &struct hlist_node of the object to be checked
316  */
hash_hashed(struct hlist_node * node)317 static inline bool hash_hashed(struct hlist_node *node)
318 {
319 	return !hlist_unhashed(node);
320 }
321 
__hash_empty(struct hlist_head * ht,unsigned int sz)322 static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
323 {
324 	unsigned int i;
325 
326 	for (i = 0; i < sz; i++)
327 		if (!hlist_empty(&ht[i]))
328 			return false;
329 
330 	return true;
331 }
332 
333 /**
334  * hash_empty - check whether a hashtable is empty
335  * @hashtable: hashtable to check
336  *
337  * This has to be a macro since HASH_BITS() will not work on pointers since
338  * it calculates the size during preprocessing.
339  */
340 #define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
341 
342 /**
343  * hash_del - remove an object from a hashtable
344  * @node: &struct hlist_node of the object to remove
345  */
hash_del(struct hlist_node * node)346 static inline void hash_del(struct hlist_node *node)
347 {
348 	hlist_del_init(node);
349 }
350 
351 /**
352  * hash_for_each - iterate over a hashtable
353  * @name: hashtable to iterate
354  * @bkt: integer to use as bucket loop cursor
355  * @obj: the type * to use as a loop cursor for each entry
356  * @member: the name of the hlist_node within the struct
357  */
358 #define hash_for_each(name, bkt, obj, member)				\
359 	for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
360 			(bkt)++)\
361 		hlist_for_each_entry(obj, &name[bkt], member)
362 
363 /**
364  * hash_for_each_safe - iterate over a hashtable safe against removal of
365  * hash entry
366  * @name: hashtable to iterate
367  * @bkt: integer to use as bucket loop cursor
368  * @tmp: a &struct used for temporary storage
369  * @obj: the type * to use as a loop cursor for each entry
370  * @member: the name of the hlist_node within the struct
371  */
372 #define hash_for_each_safe(name, bkt, tmp, obj, member)			\
373 	for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
374 			(bkt)++)\
375 		hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
376 
377 /**
378  * hash_for_each_possible - iterate over all possible objects hashing to the
379  * same bucket
380  * @name: hashtable to iterate
381  * @obj: the type * to use as a loop cursor for each entry
382  * @member: the name of the hlist_node within the struct
383  * @key: the key of the objects to iterate over
384  */
385 #define hash_for_each_possible(name, obj, member, key)			\
386 	hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
387 
388 #ifdef __cplusplus
389 }
390 #endif
391 
392 #endif
393