Lines Matching refs:tbl
119 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, in rht_bucket_index() argument
122 return hash & (tbl->size - 1); in rht_bucket_index()
156 struct rhashtable *ht, const struct bucket_table *tbl, in rht_key_hashfn() argument
159 unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); in rht_key_hashfn()
161 return rht_bucket_index(tbl, hash); in rht_key_hashfn()
165 struct rhashtable *ht, const struct bucket_table *tbl, in rht_head_hashfn() argument
171 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?: in rht_head_hashfn()
173 tbl->hash_rnd)) : in rht_head_hashfn()
174 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params); in rht_head_hashfn()
183 const struct bucket_table *tbl) in rht_grow_above_75() argument
186 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && in rht_grow_above_75()
187 (!ht->p.max_size || tbl->size < ht->p.max_size); in rht_grow_above_75()
196 const struct bucket_table *tbl) in rht_shrink_below_30() argument
199 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && in rht_shrink_below_30()
200 tbl->size > ht->p.min_size; in rht_shrink_below_30()
209 const struct bucket_table *tbl) in rht_grow_above_100() argument
211 return atomic_read(&ht->nelems) > tbl->size && in rht_grow_above_100()
212 (!ht->p.max_size || tbl->size < ht->p.max_size); in rht_grow_above_100()
221 const struct bucket_table *tbl) in rht_grow_above_max() argument
228 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
235 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, in lockdep_rht_bucket_is_held() argument
264 struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
266 struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
269 struct bucket_table *tbl,
278 #define rht_dereference_bucket(p, tbl, hash) \ argument
279 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
281 #define rht_dereference_bucket_rcu(p, tbl, hash) \ argument
282 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
288 const struct bucket_table *tbl, unsigned int hash) in rht_bucket() argument
290 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : in rht_bucket()
291 &tbl->buckets[hash]; in rht_bucket()
295 struct bucket_table *tbl, unsigned int hash) in rht_bucket_var() argument
297 return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : in rht_bucket_var()
298 &tbl->buckets[hash]; in rht_bucket_var()
302 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) in rht_bucket_insert() argument
304 return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : in rht_bucket_insert()
305 &tbl->buckets[hash]; in rht_bucket_insert()
327 static inline void rht_lock(struct bucket_table *tbl, in rht_lock() argument
332 lock_map_acquire(&tbl->dep_map); in rht_lock()
335 static inline void rht_lock_nested(struct bucket_table *tbl, in rht_lock_nested() argument
341 lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_); in rht_lock_nested()
344 static inline void rht_unlock(struct bucket_table *tbl, in rht_unlock() argument
347 lock_map_release(&tbl->dep_map); in rht_unlock()
377 struct bucket_table *tbl, in rht_ptr() argument
380 return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash); in rht_ptr()
399 static inline void rht_assign_unlock(struct bucket_table *tbl, in rht_assign_unlock() argument
407 lock_map_release(&tbl->dep_map); in rht_assign_unlock()
421 #define rht_for_each_from(pos, head, tbl, hash) \ argument
424 pos = rht_dereference_bucket((pos)->next, tbl, hash))
432 #define rht_for_each(pos, tbl, hash) \ argument
433 rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
434 tbl, hash)
445 #define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \ argument
448 pos = rht_dereference_bucket((pos)->next, tbl, hash))
458 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \ argument
460 rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
461 tbl, hash, member)
475 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ argument
476 for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
478 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
482 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
495 #define rht_for_each_rcu_from(pos, head, tbl, hash) \ argument
511 #define rht_for_each_rcu(pos, tbl, hash) \ argument
513 pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \
530 #define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \ argument
534 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
548 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ argument
550 rht_ptr_rcu(rht_bucket(tbl, hash)), \
551 tbl, hash, member)
597 struct bucket_table *tbl; in __rhashtable_lookup() local
601 tbl = rht_dereference_rcu(ht->tbl, ht); in __rhashtable_lookup()
603 hash = rht_key_hashfn(ht, tbl, key, params); in __rhashtable_lookup()
604 bkt = rht_bucket(tbl, hash); in __rhashtable_lookup()
606 rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) { in __rhashtable_lookup()
621 tbl = rht_dereference_rcu(tbl->future_tbl, ht); in __rhashtable_lookup()
622 if (unlikely(tbl)) in __rhashtable_lookup()
714 struct bucket_table *tbl; in __rhashtable_insert_fast() local
722 tbl = rht_dereference_rcu(ht->tbl, ht); in __rhashtable_insert_fast()
723 hash = rht_head_hashfn(ht, tbl, obj, params); in __rhashtable_insert_fast()
725 bkt = rht_bucket_insert(ht, tbl, hash); in __rhashtable_insert_fast()
730 rht_lock(tbl, bkt); in __rhashtable_insert_fast()
732 if (unlikely(rcu_access_pointer(tbl->future_tbl))) { in __rhashtable_insert_fast()
734 rht_unlock(tbl, bkt); in __rhashtable_insert_fast()
739 rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { in __rhashtable_insert_fast()
762 head = rht_dereference_bucket(head->next, tbl, hash); in __rhashtable_insert_fast()
766 rht_unlock(tbl, bkt); in __rhashtable_insert_fast()
768 rht_assign_unlock(tbl, bkt, obj); in __rhashtable_insert_fast()
777 if (unlikely(rht_grow_above_max(ht, tbl))) in __rhashtable_insert_fast()
780 if (unlikely(rht_grow_above_100(ht, tbl))) in __rhashtable_insert_fast()
784 head = rht_ptr(bkt, tbl, hash); in __rhashtable_insert_fast()
795 rht_assign_unlock(tbl, bkt, obj); in __rhashtable_insert_fast()
797 if (rht_grow_above_75(ht, tbl)) in __rhashtable_insert_fast()
807 rht_unlock(tbl, bkt); in __rhashtable_insert_fast()
994 struct rhashtable *ht, struct bucket_table *tbl, in __rhashtable_remove_fast_one() argument
1004 hash = rht_head_hashfn(ht, tbl, obj, params); in __rhashtable_remove_fast_one()
1005 bkt = rht_bucket_var(tbl, hash); in __rhashtable_remove_fast_one()
1009 rht_lock(tbl, bkt); in __rhashtable_remove_fast_one()
1011 rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { in __rhashtable_remove_fast_one()
1027 tbl, hash); in __rhashtable_remove_fast_one()
1033 list = rht_dereference_bucket(list->next, tbl, hash); in __rhashtable_remove_fast_one()
1039 obj = rht_dereference_bucket(obj->next, tbl, hash); in __rhashtable_remove_fast_one()
1043 list = rht_dereference_bucket(list->next, tbl, hash); in __rhashtable_remove_fast_one()
1053 rht_unlock(tbl, bkt); in __rhashtable_remove_fast_one()
1055 rht_assign_unlock(tbl, bkt, obj); in __rhashtable_remove_fast_one()
1060 rht_unlock(tbl, bkt); in __rhashtable_remove_fast_one()
1065 rht_shrink_below_30(ht, tbl))) in __rhashtable_remove_fast_one()
1078 struct bucket_table *tbl; in __rhashtable_remove_fast() local
1083 tbl = rht_dereference_rcu(ht->tbl, ht); in __rhashtable_remove_fast()
1090 while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params, in __rhashtable_remove_fast()
1092 (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) in __rhashtable_remove_fast()
1146 struct rhashtable *ht, struct bucket_table *tbl, in __rhashtable_replace_fast() argument
1159 hash = rht_head_hashfn(ht, tbl, obj_old, params); in __rhashtable_replace_fast()
1160 if (hash != rht_head_hashfn(ht, tbl, obj_new, params)) in __rhashtable_replace_fast()
1163 bkt = rht_bucket_var(tbl, hash); in __rhashtable_replace_fast()
1168 rht_lock(tbl, bkt); in __rhashtable_replace_fast()
1170 rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { in __rhashtable_replace_fast()
1179 rht_unlock(tbl, bkt); in __rhashtable_replace_fast()
1181 rht_assign_unlock(tbl, bkt, obj_new); in __rhashtable_replace_fast()
1187 rht_unlock(tbl, bkt); in __rhashtable_replace_fast()
1212 struct bucket_table *tbl; in rhashtable_replace_fast() local
1217 tbl = rht_dereference_rcu(ht->tbl, ht); in rhashtable_replace_fast()
1224 while ((err = __rhashtable_replace_fast(ht, tbl, obj_old, in rhashtable_replace_fast()
1226 (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) in rhashtable_replace_fast()