/kernel/linux/linux-5.10/fs/nfs/ |
D | nfs3proc.c | 901 struct nfs_lock_context *l_ctx = data; in nfs3_nlm_alloc_call() local 902 if (l_ctx && test_bit(NFS_CONTEXT_UNLOCK, &l_ctx->open_context->flags)) { in nfs3_nlm_alloc_call() 903 get_nfs_open_context(l_ctx->open_context); in nfs3_nlm_alloc_call() 904 nfs_get_lock_context(l_ctx->open_context); in nfs3_nlm_alloc_call() 910 struct nfs_lock_context *l_ctx = data; in nfs3_nlm_unlock_prepare() local 911 if (l_ctx && test_bit(NFS_CONTEXT_UNLOCK, &l_ctx->open_context->flags)) in nfs3_nlm_unlock_prepare() 912 return nfs_async_iocounter_wait(task, l_ctx); in nfs3_nlm_unlock_prepare() 919 struct nfs_lock_context *l_ctx = data; in nfs3_nlm_release_call() local 921 if (l_ctx && test_bit(NFS_CONTEXT_UNLOCK, &l_ctx->open_context->flags)) { in nfs3_nlm_release_call() 922 ctx = l_ctx->open_context; in nfs3_nlm_release_call() [all …]
|
D | pagelist.c | 116 nfs_iocounter_wait(struct nfs_lock_context *l_ctx) in nfs_iocounter_wait() argument 118 return wait_var_event_killable(&l_ctx->io_count, in nfs_iocounter_wait() 119 !atomic_read(&l_ctx->io_count)); in nfs_iocounter_wait() 132 nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx) in nfs_async_iocounter_wait() argument 134 struct inode *inode = d_inode(l_ctx->open_context->dentry); in nfs_async_iocounter_wait() 137 if (atomic_read(&l_ctx->io_count) > 0) { in nfs_async_iocounter_wait() 142 if (atomic_read(&l_ctx->io_count) == 0) { in nfs_async_iocounter_wait() 435 __nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page, in __nfs_create_request() argument 440 struct nfs_open_context *ctx = l_ctx->open_context; in __nfs_create_request() 449 req->wb_lock_context = l_ctx; in __nfs_create_request() [all …]
|
D | direct.c | 72 struct nfs_lock_context *l_ctx; /* Lock context info */ member 220 if (dreq->l_ctx != NULL) in nfs_direct_req_free() 221 nfs_put_lock_context(dreq->l_ctx); in nfs_direct_req_free() 450 struct nfs_lock_context *l_ctx; in nfs_file_direct_read() local 473 l_ctx = nfs_get_lock_context(dreq->ctx); in nfs_file_direct_read() 474 if (IS_ERR(l_ctx)) { in nfs_file_direct_read() 475 result = PTR_ERR(l_ctx); in nfs_file_direct_read() 479 dreq->l_ctx = l_ctx; in nfs_file_direct_read() 908 struct nfs_lock_context *l_ctx; in nfs_file_direct_write() local 938 l_ctx = nfs_get_lock_context(dreq->ctx); in nfs_file_direct_write() [all …]
|
D | file.c | 703 struct nfs_lock_context *l_ctx; in do_unlk() local 712 l_ctx = nfs_get_lock_context(nfs_file_open_context(filp)); in do_unlk() 713 if (!IS_ERR(l_ctx)) { in do_unlk() 714 status = nfs_iocounter_wait(l_ctx); in do_unlk() 715 nfs_put_lock_context(l_ctx); in do_unlk()
|
D | inode.c | 883 static void nfs_init_lock_context(struct nfs_lock_context *l_ctx) in nfs_init_lock_context() argument 885 refcount_set(&l_ctx->count, 1); in nfs_init_lock_context() 886 l_ctx->lockowner = current->files; in nfs_init_lock_context() 887 INIT_LIST_HEAD(&l_ctx->list); in nfs_init_lock_context() 888 atomic_set(&l_ctx->io_count, 0); in nfs_init_lock_context() 936 void nfs_put_lock_context(struct nfs_lock_context *l_ctx) in nfs_put_lock_context() argument 938 struct nfs_open_context *ctx = l_ctx->open_context; in nfs_put_lock_context() 941 if (!refcount_dec_and_lock(&l_ctx->count, &inode->i_lock)) in nfs_put_lock_context() 943 list_del_rcu(&l_ctx->list); in nfs_put_lock_context() 946 kfree_rcu(l_ctx, rcu_head); in nfs_put_lock_context()
|
D | nfs42proc.c | 566 struct nfs_lock_context *l_ctx; in _nfs42_proc_copy_notify() local 569 l_ctx = nfs_get_lock_context(ctx); in _nfs42_proc_copy_notify() 570 if (IS_ERR(l_ctx)) { in _nfs42_proc_copy_notify() 571 status = PTR_ERR(l_ctx); in _nfs42_proc_copy_notify() 575 status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx, in _nfs42_proc_copy_notify() 577 nfs_put_lock_context(l_ctx); in _nfs42_proc_copy_notify()
|
D | nfs4state.c | 990 const struct nfs_lock_context *l_ctx) in nfs4_copy_lock_stateid() argument 996 if (l_ctx == NULL) in nfs4_copy_lock_stateid() 1002 fl_owner = l_ctx->lockowner; in nfs4_copy_lock_stateid() 1003 fl_flock_owner = l_ctx->open_context->flock_owner; in nfs4_copy_lock_stateid() 1043 fmode_t fmode, const struct nfs_lock_context *l_ctx, in nfs4_select_rw_stateid() argument 1052 ret = nfs4_copy_lock_stateid(dst, state, l_ctx); in nfs4_select_rw_stateid()
|
D | write.c | 1175 struct nfs_lock_context *l_ctx; in nfs_flush_incompatible() local 1191 l_ctx = req->wb_lock_context; in nfs_flush_incompatible() 1194 if (l_ctx && flctx && in nfs_flush_incompatible() 1197 do_flush |= l_ctx->lockowner != current->files; in nfs_flush_incompatible()
|
D | nfs4proc.c | 3325 struct nfs_lock_context *l_ctx; in _nfs4_do_setattr() local 3328 l_ctx = nfs_get_lock_context(ctx); in _nfs4_do_setattr() 3329 if (IS_ERR(l_ctx)) in _nfs4_do_setattr() 3330 return PTR_ERR(l_ctx); in _nfs4_do_setattr() 3331 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx, in _nfs4_do_setattr() 3333 nfs_put_lock_context(l_ctx); in _nfs4_do_setattr() 5241 const struct nfs_lock_context *l_ctx, in nfs4_set_rw_stateid() argument 5244 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL); in nfs4_set_rw_stateid() 5250 const struct nfs_lock_context *l_ctx, in nfs4_stateid_is_current() argument 5256 if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO) in nfs4_stateid_is_current() [all …]
|
D | nfs4_fs.h | 318 const struct nfs_lock_context *l_ctx,
|
D | internal.h | 294 int nfs_iocounter_wait(struct nfs_lock_context *l_ctx);
|
/kernel/linux/linux-5.10/include/linux/ |
D | nfs_fs.h | 401 extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx);
|