Home
last modified time | relevance | path

Searched refs:bucket (Results 1 – 25 of 26) sorted by relevance

12

/fs/nfs/
Dnfs42xattr.c87 struct nfs4_xattr_bucket *bucket; member
238 entry->bucket = NULL; in nfs4_xattr_alloc_entry()
389 struct nfs4_xattr_bucket *bucket; in nfs4_xattr_discard_cache() local
395 bucket = &cache->buckets[i]; in nfs4_xattr_discard_cache()
397 spin_lock(&bucket->lock); in nfs4_xattr_discard_cache()
398 bucket->draining = true; in nfs4_xattr_discard_cache()
399 hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) { in nfs4_xattr_discard_cache()
404 spin_unlock(&bucket->lock); in nfs4_xattr_discard_cache()
512 nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name) in nfs4_xattr_get_entry() argument
518 hlist_for_each_entry(entry, &bucket->hlist, hnode) { in nfs4_xattr_get_entry()
[all …]
Dpnfs_nfs.c63 pnfs_free_bucket_lseg(struct pnfs_commit_bucket *bucket) in pnfs_free_bucket_lseg() argument
65 if (list_empty(&bucket->committing) && list_empty(&bucket->written)) { in pnfs_free_bucket_lseg()
66 struct pnfs_layout_segment *freeme = bucket->lseg; in pnfs_free_bucket_lseg()
67 bucket->lseg = NULL; in pnfs_free_bucket_lseg()
81 struct pnfs_commit_bucket *bucket = NULL; in pnfs_generic_clear_request_commit() local
87 bucket = list_first_entry(&req->wb_list, in pnfs_generic_clear_request_commit()
91 if (bucket) in pnfs_generic_clear_request_commit()
92 pnfs_put_lseg(pnfs_free_bucket_lseg(bucket)); in pnfs_generic_clear_request_commit()
241 pnfs_bucket_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, in pnfs_bucket_scan_ds_commit_list() argument
245 struct list_head *src = &bucket->written; in pnfs_bucket_scan_ds_commit_list()
[all …]
/fs/dlm/
Ddebug_fs.c370 unsigned bucket; member
429 unsigned bucket, entry; in table_seq_start() local
432 bucket = n >> 32; in table_seq_start()
435 if (bucket >= ls->ls_rsbtbl_size) in table_seq_start()
452 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; in table_seq_start()
454 spin_lock(&ls->ls_rsbtbl[bucket].lock); in table_seq_start()
461 ri->bucket = bucket; in table_seq_start()
462 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in table_seq_start()
467 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in table_seq_start()
477 bucket++; in table_seq_start()
[all …]
Ddir.c203 uint32_t hash, bucket; in find_rsb_root() local
207 bucket = hash & (ls->ls_rsbtbl_size - 1); in find_rsb_root()
209 spin_lock(&ls->ls_rsbtbl[bucket].lock); in find_rsb_root()
210 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r); in find_rsb_root()
212 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss, in find_rsb_root()
214 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in find_rsb_root()
Dlock.c350 uint32_t bucket = r->res_bucket; in put_rsb() local
352 spin_lock(&ls->ls_rsbtbl[bucket].lock); in put_rsb()
354 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in put_rsb()
5484 static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) in find_grant_rsb() argument
5489 spin_lock(&ls->ls_rsbtbl[bucket].lock); in find_grant_rsb()
5490 for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { in find_grant_rsb()
5500 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in find_grant_rsb()
5503 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in find_grant_rsb()
5527 int bucket = 0; in dlm_recover_grant() local
5533 r = find_grant_rsb(ls, bucket); in dlm_recover_grant()
[all …]
/fs/ocfs2/
Dxattr.c125 struct ocfs2_xattr_bucket *bucket; member
279 struct ocfs2_xattr_bucket *bucket,
301 struct ocfs2_xattr_bucket *bucket,
322 struct ocfs2_xattr_bucket *bucket; in ocfs2_xattr_bucket_new() local
327 bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS); in ocfs2_xattr_bucket_new()
328 if (bucket) { in ocfs2_xattr_bucket_new()
329 bucket->bu_inode = inode; in ocfs2_xattr_bucket_new()
330 bucket->bu_blocks = blks; in ocfs2_xattr_bucket_new()
333 return bucket; in ocfs2_xattr_bucket_new()
336 static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket *bucket) in ocfs2_xattr_bucket_relse() argument
[all …]
/fs/btrfs/
Dcompression.c887 struct bucket_item *bucket; member
902 kfree(workspace->bucket); in free_heuristic_ws()
919 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL); in alloc_heuristic_ws()
920 if (!ws->bucket) in alloc_heuristic_ws()
1396 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) { in shannon_entropy()
1397 p = ws->bucket[i].count; in shannon_entropy()
1529 struct bucket_item *bucket = ws->bucket; in byte_core_set_size() local
1532 radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE); in byte_core_set_size()
1535 coreset_sum += bucket[i].count; in byte_core_set_size()
1540 for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) { in byte_core_set_size()
[all …]
Draid56.c340 int bucket = rbio_bucket(rbio); in __remove_rbio_from_cache() local
352 h = table->table + bucket; in __remove_rbio_from_cache()
750 int bucket; in unlock_stripe() local
755 bucket = rbio_bucket(rbio); in unlock_stripe()
756 h = rbio->fs_info->stripe_hash_table->table + bucket; in unlock_stripe()
/fs/xfs/libxfs/
Dxfs_ag.c568 int bucket; in xfs_agflblock_init() local
577 for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++) in xfs_agflblock_init()
578 agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); in xfs_agflblock_init()
588 int bucket; in xfs_agiblock_init() local
606 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) in xfs_agiblock_init()
607 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); in xfs_agiblock_init()
/fs/fuse/
Dinode.c680 struct fuse_sync_bucket *bucket; in fuse_sync_bucket_alloc() local
682 bucket = kzalloc(sizeof(*bucket), GFP_KERNEL | __GFP_NOFAIL); in fuse_sync_bucket_alloc()
683 if (bucket) { in fuse_sync_bucket_alloc()
684 init_waitqueue_head(&bucket->waitq); in fuse_sync_bucket_alloc()
686 atomic_set(&bucket->count, 1); in fuse_sync_bucket_alloc()
688 return bucket; in fuse_sync_bucket_alloc()
693 struct fuse_sync_bucket *bucket, *new_bucket; in fuse_sync_fs_writes() local
698 bucket = rcu_dereference_protected(fc->curr_bucket, 1); in fuse_sync_fs_writes()
699 count = atomic_read(&bucket->count); in fuse_sync_fs_writes()
720 atomic_dec(&bucket->count); in fuse_sync_fs_writes()
[all …]
Dfuse_i.h1033 static inline void fuse_sync_bucket_dec(struct fuse_sync_bucket *bucket) in fuse_sync_bucket_dec() argument
1037 if (atomic_dec_and_test(&bucket->count)) in fuse_sync_bucket_dec()
1038 wake_up(&bucket->waitq); in fuse_sync_bucket_dec()
Dfile.c433 struct fuse_sync_bucket *bucket; member
1721 if (wpa->bucket) in fuse_writepage_free()
1722 fuse_sync_bucket_dec(wpa->bucket); in fuse_writepage_free()
2017 wpa->bucket = rcu_dereference(fc->curr_bucket); in fuse_writepage_add_to_bucket()
2018 } while (unlikely(!atomic_inc_not_zero(&wpa->bucket->count))); in fuse_writepage_add_to_bucket()
/fs/notify/fanotify/
Dfanotify.c159 unsigned int bucket = fanotify_event_hash_bucket(group, new); in fanotify_merge() local
160 struct hlist_head *hlist = &group->fanotify_data.merge_hash[bucket]; in fanotify_merge()
164 group, event, bucket); in fanotify_merge()
700 unsigned int bucket = fanotify_event_hash_bucket(group, event); in fanotify_insert_event() local
701 struct hlist_head *hlist = &group->fanotify_data.merge_hash[bucket]; in fanotify_insert_event()
706 group, event, bucket); in fanotify_insert_event()
/fs/fscache/
Dcookie.c208 unsigned int bucket; in fscache_hash_cookie() local
210 bucket = candidate->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1); in fscache_hash_cookie()
211 h = &fscache_cookie_hash[bucket]; in fscache_hash_cookie()
847 unsigned int bucket; in fscache_unhash_cookie() local
849 bucket = cookie->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1); in fscache_unhash_cookie()
850 h = &fscache_cookie_hash[bucket]; in fscache_unhash_cookie()
/fs/crypto/
Dkeyring.c226 struct hlist_head *bucket = &keyring->key_hashtable[i]; in fscrypt_destroy_keyring() local
230 hlist_for_each_entry_safe(mk, tmp, bucket, mk_node) { in fscrypt_destroy_keyring()
276 struct hlist_head *bucket; in fscrypt_find_master_key() local
289 bucket = fscrypt_mk_hash_bucket(keyring, mk_spec); in fscrypt_find_master_key()
293 hlist_for_each_entry_rcu(mk, bucket, mk_node) { in fscrypt_find_master_key()
304 hlist_for_each_entry_rcu(mk, bucket, mk_node) { in fscrypt_find_master_key()
/fs/xfs/
Dxfs_log_recover.c2664 int bucket) in xlog_recover_clear_agi_bucket() argument
2681 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); in xlog_recover_clear_agi_bucket()
2683 (sizeof(xfs_agino_t) * bucket); in xlog_recover_clear_agi_bucket()
2704 int bucket) in xlog_recover_process_one_iunlink() argument
2748 xlog_recover_clear_agi_bucket(mp, agno, bucket); in xlog_recover_process_one_iunlink()
2785 int bucket; in xlog_recover_process_iunlinks() local
2811 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) { in xlog_recover_process_iunlinks()
2812 agino = be32_to_cpu(agi->agi_unlinked[bucket]); in xlog_recover_process_iunlinks()
2815 pag->pag_agno, agino, bucket); in xlog_recover_process_iunlinks()
Dxfs_buf_item_recover.c52 struct list_head *bucket; in xlog_find_buffer_cancelled() local
58 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno); in xlog_find_buffer_cancelled()
59 list_for_each_entry(bcp, bucket, bc_list) { in xlog_find_buffer_cancelled()
Dxfs_trace.h3582 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, unsigned int bucket,
3584 TP_ARGS(mp, agno, bucket, old_ptr, new_ptr),
3588 __field(unsigned int, bucket)
3595 __entry->bucket = bucket;
3602 __entry->bucket,
/fs/ocfs2/dlm/
Ddlmdomain.c158 struct hlist_head *bucket; in __dlm_insert_lockres() local
162 bucket = dlm_lockres_hash(dlm, res->lockname.hash); in __dlm_insert_lockres()
167 hlist_add_head(&res->hash_node, bucket); in __dlm_insert_lockres()
178 struct hlist_head *bucket; in __dlm_lookup_lockres_full() local
185 bucket = dlm_lockres_hash(dlm, hash); in __dlm_lookup_lockres_full()
187 hlist_for_each_entry(res, bucket, hash_node) { in __dlm_lookup_lockres_full()
408 struct hlist_head *bucket; in dlm_migrate_all_locks() local
418 bucket = dlm_lockres_hash(dlm, i); in dlm_migrate_all_locks()
419 iter = bucket->first; in dlm_migrate_all_locks()
Ddlmmaster.c313 struct hlist_head *bucket; in __dlm_insert_mle() local
317 bucket = dlm_master_hash(dlm, mle->mnamehash); in __dlm_insert_mle()
318 hlist_add_head(&mle->master_hash_node, bucket); in __dlm_insert_mle()
327 struct hlist_head *bucket; in dlm_find_mle() local
333 bucket = dlm_master_hash(dlm, hash); in dlm_find_mle()
334 hlist_for_each_entry(tmpmle, bucket, master_hash_node) { in dlm_find_mle()
3331 struct hlist_head *bucket; in dlm_clean_master_list() local
3342 bucket = dlm_master_hash(dlm, i); in dlm_clean_master_list()
3343 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { in dlm_clean_master_list()
3531 struct hlist_head *bucket; in dlm_force_free_mles() local
[all …]
Ddlmdebug.c407 struct hlist_head *bucket; in debug_mle_print() local
416 bucket = dlm_master_hash(dlm, i); in debug_mle_print()
417 hlist_for_each_entry(mle, bucket, master_hash_node) { in debug_mle_print()
Ddlmrecovery.c2132 struct hlist_head *bucket; in dlm_finish_local_lockres_recovery() local
2161 bucket = dlm_lockres_hash(dlm, i); in dlm_finish_local_lockres_recovery()
2162 hlist_for_each_entry(res, bucket, hash_node) { in dlm_finish_local_lockres_recovery()
2324 struct hlist_head *bucket; in dlm_do_local_recovery_cleanup() local
2347 bucket = dlm_lockres_hash(dlm, i); in dlm_do_local_recovery_cleanup()
2348 hlist_for_each_entry_safe(res, tmp, bucket, hash_node) { in dlm_do_local_recovery_cleanup()
/fs/jffs2/
Dgc.c141 int bucket, want_ino; in jffs2_garbage_collect_pass() local
158 for (bucket = c->check_ino % c->inocache_hashsize ; bucket < c->inocache_hashsize; bucket++) { in jffs2_garbage_collect_pass()
159 for (ic = c->inocache_list[bucket]; ic; ic = ic->next) { in jffs2_garbage_collect_pass()
/fs/omfs/
Ddir.c28 int bucket = omfs_hash(name, namelen, nbuckets); in omfs_get_bucket() local
30 *ofs = OMFS_DIR_START + bucket * 8; in omfs_get_bucket()
/fs/
Dseq_file.c1154 struct hlist_head *bucket = per_cpu_ptr(head, *cpu); in seq_hlist_next_percpu() local
1156 if (!hlist_empty(bucket)) in seq_hlist_next_percpu()
1157 return bucket->first; in seq_hlist_next_percpu()

12