/fs/notify/ |
D | group.c | 101 refcount_inc(&group->refcnt); in fsnotify_get_group() 109 if (refcount_dec_and_test(&group->refcnt)) in fsnotify_put_group() 126 refcount_set(&group->refcnt, 1); in __fsnotify_alloc_group()
|
D | mark.c | 96 WARN_ON_ONCE(!refcount_read(&mark->refcnt)); in fsnotify_get_mark() 97 refcount_inc(&mark->refcnt); in fsnotify_get_mark() 302 if (refcount_dec_and_test(&mark->refcnt)) in fsnotify_put_mark() 311 if (!refcount_dec_and_lock(&mark->refcnt, &conn->lock)) in fsnotify_put_mark() 360 if (refcount_inc_not_zero(&mark->refcnt)) { in fsnotify_get_mark_safe() 446 refcount_read(&mark->refcnt) < 1 + in fsnotify_detach_mark() 877 refcount_set(&mark->refcnt, 1); in fsnotify_init_mark()
|
/fs/hfs/ |
D | bnode.c | 265 atomic_set(&node->refcnt, 1); in __hfs_bnode_create() 307 node->tree->cnid, node->this, atomic_read(&node->refcnt)); in hfs_bnode_unhash() 450 atomic_inc(&node->refcnt); in hfs_bnode_get() 453 atomic_read(&node->refcnt)); in hfs_bnode_get() 466 atomic_read(&node->refcnt)); in hfs_bnode_put() 467 BUG_ON(!atomic_read(&node->refcnt)); in hfs_bnode_put() 468 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) in hfs_bnode_put()
|
D | btree.c | 150 if (atomic_read(&node->refcnt)) in hfs_btree_close() 153 atomic_read(&node->refcnt)); in hfs_btree_close()
|
D | btree.h | 68 atomic_t refcnt; member
|
D | inode.c | 103 else if (atomic_read(&node->refcnt)) in hfs_release_folio() 118 if (atomic_read(&node->refcnt)) { in hfs_release_folio()
|
/fs/fuse/ |
D | dax.c | 59 refcount_t refcnt; member 328 WARN_ON(refcount_read(&dmap->refcnt) > 1); in inode_reclaim_dmap_range() 420 refcount_inc(&dmap->refcnt); in fuse_fill_iomap() 532 if (refcount_dec_and_test(&dmap->refcnt)) { in fuse_upgrade_dax_mapping() 602 refcount_inc(&dmap->refcnt); in fuse_iomap_begin() 642 if (refcount_dec_and_test(&dmap->refcnt)) { in fuse_iomap_end() 932 if (refcount_read(&dmap->refcnt) > 1) in inode_lookup_first_dmap() 993 if (refcount_read(&dmap->refcnt) > 1) { in inode_inline_reclaim_one_dmap() 1087 if (refcount_read(&dmap->refcnt) > 1) in lookup_and_reclaim_dmap_locked() 1158 if (refcount_read(&pos->refcnt) > 1) in try_to_free_dmap_chunks() [all …]
|
D | file.c | 696 kfree(container_of(kref, struct fuse_io_priv, refcnt)); in fuse_io_release() 757 kref_put(&io->refcnt, fuse_io_release); in fuse_aio_complete() 820 kref_get(&io->refcnt); in fuse_async_req_send() 3084 kref_init(&io->refcnt); in fuse_direct_IO() 3118 kref_get(&io->refcnt); in fuse_direct_IO() 3143 kref_put(&io->refcnt, fuse_io_release); in fuse_direct_IO()
|
D | fuse_i.h | 387 struct kref refcnt; member 404 .refcnt = KREF_INIT(1), \
|
/fs/hfsplus/ |
D | bnode.c | 415 atomic_set(&node->refcnt, 1); in __hfs_bnode_create() 457 node->tree->cnid, node->this, atomic_read(&node->refcnt)); in hfs_bnode_unhash() 601 atomic_inc(&node->refcnt); in hfs_bnode_get() 604 atomic_read(&node->refcnt)); in hfs_bnode_get() 617 atomic_read(&node->refcnt)); in hfs_bnode_put() 618 BUG_ON(!atomic_read(&node->refcnt)); in hfs_bnode_put() 619 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) in hfs_bnode_put()
|
D | btree.c | 270 if (atomic_read(&node->refcnt)) in hfs_btree_close() 274 atomic_read(&node->refcnt)); in hfs_btree_close()
|
D | inode.c | 99 else if (atomic_read(&node->refcnt)) in hfsplus_release_folio() 115 if (atomic_read(&node->refcnt)) { in hfsplus_release_folio()
|
D | hfsplus_fs.h | 118 atomic_t refcnt; member
|
/fs/coda/ |
D | file.c | 34 refcount_t refcnt; member 130 refcount_inc(&cvm_ops->refcnt); in coda_vm_open() 145 if (refcount_dec_and_test(&cvm_ops->refcnt)) { in coda_vm_close() 221 refcount_set(&cvm_ops->refcnt, 1); in coda_file_mmap()
|
/fs/proc/ |
D | internal.h | 36 refcount_t refcnt; member 197 refcount_inc(&pde->refcnt); in pde_get()
|
D | generic.c | 444 refcount_set(&ent->refcnt, 1); in __proc_create() 676 if (refcount_dec_and_test(&pde->refcnt)) { in pde_put()
|
D | root.c | 370 .refcnt = REFCOUNT_INIT(1),
|
/fs/ |
D | pnode.c | 407 int propagate_mount_busy(struct mount *mnt, int refcnt) in propagate_mount_busy() argument 413 return do_refcount_check(mnt, refcnt); in propagate_mount_busy() 420 if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt)) in propagate_mount_busy()
|
D | namei.c | 191 atomic_set(&result->refcnt, 1); in getname_flags() 252 atomic_set(&result->refcnt, 1); in getname_kernel() 264 if (WARN_ON_ONCE(!atomic_read(&name->refcnt))) in putname() 267 if (!atomic_dec_and_test(&name->refcnt)) in putname()
|
/fs/nfs/blocklayout/ |
D | blocklayout.c | 68 struct kref refcnt; member 80 kref_init(&rv->refcnt); in alloc_parallel() 87 kref_get(&p->refcnt); in get_parallel() 92 struct parallel_io *p = container_of(kref, struct parallel_io, refcnt); in destroy_parallel() 101 kref_put(&p->refcnt, destroy_parallel); in put_parallel()
|
/fs/jffs2/ |
D | xattr.c | 355 atomic_inc(&xd->refcnt); in create_xattr_datum() 375 atomic_set(&xd->refcnt, 1); in create_xattr_datum() 407 if (atomic_dec_and_lock(&xd->refcnt, &c->erase_completion_lock)) { in unrefer_xattr_datum() 624 if (atomic_dec_and_test(&xd->refcnt)) { in jffs2_xattr_free_inode() 857 atomic_inc(&xd->refcnt); in jffs2_build_xattr_subsystem() 868 if (!atomic_read(&xd->refcnt)) { in jffs2_build_xattr_subsystem() 1332 if (atomic_read(&xd->refcnt) || xd->node != (void *)xd) in jffs2_release_xattr_datum()
|
D | xattr.h | 32 atomic_t refcnt; /* # of xattr_ref refers this */ member
|
/fs/notify/inotify/ |
D | inotify_user.c | 430 BUG_ON(refcount_read(&fsn_mark->refcnt) < 2); in inotify_idr_find_locked() 500 if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) { in inotify_remove_from_idr()
|
/fs/f2fs/ |
D | compress.c | 1682 refcount_set(&dic->refcnt, 1); in f2fs_alloc_dic() 1757 if (refcount_dec_and_test(&dic->refcnt)) { in f2fs_put_dic()
|
D | data.c | 2349 refcount_inc(&dic->refcnt); in f2fs_read_multi_pages()
|