Home
last modified time | relevance | path

Searched refs:req (Results 1 – 25 of 71) sorted by relevance

123

/fs/ncpfs/
Dsock.c58 struct list_head req; member
75 struct ncp_request_reply *req; in ncp_alloc_req() local
77 req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL); in ncp_alloc_req()
78 if (!req) in ncp_alloc_req()
81 init_waitqueue_head(&req->wq); in ncp_alloc_req()
82 atomic_set(&req->refs, (1)); in ncp_alloc_req()
83 req->status = RQ_IDLE; in ncp_alloc_req()
85 return req; in ncp_alloc_req()
88 static void ncp_req_get(struct ncp_request_reply *req) in ncp_req_get() argument
90 atomic_inc(&req->refs); in ncp_req_get()
[all …]
/fs/nfs/
Dpagelist.c61 struct nfs_page *req; in nfs_create_request() local
64 req = nfs_page_alloc(); in nfs_create_request()
65 if (req == NULL) in nfs_create_request()
69 req->wb_lock_context = nfs_get_lock_context(ctx); in nfs_create_request()
70 if (req->wb_lock_context == NULL) { in nfs_create_request()
71 nfs_page_free(req); in nfs_create_request()
78 req->wb_page = page; in nfs_create_request()
79 atomic_set(&req->wb_complete, 0); in nfs_create_request()
80 req->wb_index = page->index; in nfs_create_request()
85 req->wb_offset = offset; in nfs_create_request()
[all …]
Dwrite.c44 static void nfs_redirty_request(struct nfs_page *req);
116 struct nfs_page *req = NULL; in nfs_page_find_request_locked() local
119 req = (struct nfs_page *)page_private(page); in nfs_page_find_request_locked()
120 if (req != NULL) in nfs_page_find_request_locked()
121 kref_get(&req->wb_kref); in nfs_page_find_request_locked()
123 return req; in nfs_page_find_request_locked()
129 struct nfs_page *req = NULL; in nfs_page_find_request() local
132 req = nfs_page_find_request_locked(page); in nfs_page_find_request()
134 return req; in nfs_page_find_request()
230 struct nfs_page *req; in nfs_find_and_lock_request() local
[all …]
Dread.c155 static void nfs_readpage_release(struct nfs_page *req) in nfs_readpage_release() argument
157 struct inode *d_inode = req->wb_context->dentry->d_inode; in nfs_readpage_release()
159 if (PageUptodate(req->wb_page)) in nfs_readpage_release()
160 nfs_readpage_to_fscache(d_inode, req->wb_page, 0); in nfs_readpage_release()
162 unlock_page(req->wb_page); in nfs_readpage_release()
165 req->wb_context->dentry->d_inode->i_sb->s_id, in nfs_readpage_release()
166 (long long)NFS_FILEID(req->wb_context->dentry->d_inode), in nfs_readpage_release()
167 req->wb_bytes, in nfs_readpage_release()
168 (long long)req_offset(req)); in nfs_readpage_release()
169 nfs_release_request(req); in nfs_readpage_release()
[all …]
/fs/fuse/
Dfile.c25 struct fuse_req *req; in fuse_send_open() local
28 req = fuse_get_req(fc); in fuse_send_open()
29 if (IS_ERR(req)) in fuse_send_open()
30 return PTR_ERR(req); in fuse_send_open()
36 req->in.h.opcode = opcode; in fuse_send_open()
37 req->in.h.nodeid = nodeid; in fuse_send_open()
38 req->in.numargs = 1; in fuse_send_open()
39 req->in.args[0].size = sizeof(inarg); in fuse_send_open()
40 req->in.args[0].value = &inarg; in fuse_send_open()
41 req->out.numargs = 1; in fuse_send_open()
[all …]
Ddir.c115 static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_req *req, in fuse_lookup_init() argument
120 req->in.h.opcode = FUSE_LOOKUP; in fuse_lookup_init()
121 req->in.h.nodeid = nodeid; in fuse_lookup_init()
122 req->in.numargs = 1; in fuse_lookup_init()
123 req->in.args[0].size = name->len + 1; in fuse_lookup_init()
124 req->in.args[0].value = name->name; in fuse_lookup_init()
125 req->out.numargs = 1; in fuse_lookup_init()
127 req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; in fuse_lookup_init()
129 req->out.args[0].size = sizeof(struct fuse_entry_out); in fuse_lookup_init()
130 req->out.args[0].value = outarg; in fuse_lookup_init()
[all …]
Ddev.c38 static void fuse_request_init(struct fuse_req *req) in fuse_request_init() argument
40 memset(req, 0, sizeof(*req)); in fuse_request_init()
41 INIT_LIST_HEAD(&req->list); in fuse_request_init()
42 INIT_LIST_HEAD(&req->intr_entry); in fuse_request_init()
43 init_waitqueue_head(&req->waitq); in fuse_request_init()
44 atomic_set(&req->count, 1); in fuse_request_init()
49 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL); in fuse_request_alloc() local
50 if (req) in fuse_request_alloc()
51 fuse_request_init(req); in fuse_request_alloc()
52 return req; in fuse_request_alloc()
[all …]
Dcuse.c306 static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) in cuse_process_init_reply() argument
309 struct cuse_init_out *arg = req->out.args[0].value; in cuse_process_init_reply()
310 struct page *page = req->pages[0]; in cuse_process_init_reply()
317 if (req->out.h.error || in cuse_process_init_reply()
329 rc = cuse_parse_devinfo(page_address(page), req->out.args[1].size, in cuse_process_init_reply()
406 struct fuse_req *req; in cuse_send_init() local
414 req = fuse_get_req(fc); in cuse_send_init()
415 if (IS_ERR(req)) { in cuse_send_init()
416 rc = PTR_ERR(req); in cuse_send_init()
429 arg = &req->misc.cuse_init_in; in cuse_send_init()
[all …]
/fs/ecryptfs/
Dkthread.c55 struct ecryptfs_open_req *req; in ecryptfs_threadfn() local
67 req = list_first_entry(&ecryptfs_kthread_ctl.req_list, in ecryptfs_threadfn()
70 mutex_lock(&req->mux); in ecryptfs_threadfn()
71 list_del(&req->kthread_ctl_list); in ecryptfs_threadfn()
72 if (!(req->flags & ECRYPTFS_REQ_ZOMBIE)) { in ecryptfs_threadfn()
73 dget(req->lower_dentry); in ecryptfs_threadfn()
74 mntget(req->lower_mnt); in ecryptfs_threadfn()
75 (*req->lower_file) = dentry_open( in ecryptfs_threadfn()
76 req->lower_dentry, req->lower_mnt, in ecryptfs_threadfn()
78 req->flags |= ECRYPTFS_REQ_PROCESSED; in ecryptfs_threadfn()
[all …]
/fs/ubifs/
Dbudget.c376 const struct ubifs_budget_req *req) in calc_idx_growth() argument
380 znodes = req->new_ino + (req->new_page << UBIFS_BLOCKS_PER_PAGE_SHIFT) + in calc_idx_growth()
381 req->new_dent; in calc_idx_growth()
392 const struct ubifs_budget_req *req) in calc_data_growth() argument
396 data_growth = req->new_ino ? c->bi.inode_budget : 0; in calc_data_growth()
397 if (req->new_page) in calc_data_growth()
399 if (req->new_dent) in calc_data_growth()
401 data_growth += req->new_ino_d; in calc_data_growth()
412 const struct ubifs_budget_req *req) in calc_dd_growth() argument
416 dd_growth = req->dirtied_page ? c->bi.page_budget : 0; in calc_dd_growth()
[all …]
/fs/nilfs2/
Ddat.c48 struct nilfs_palloc_req *req, int create) in nilfs_dat_prepare_entry() argument
50 return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr, in nilfs_dat_prepare_entry()
51 create, &req->pr_entry_bh); in nilfs_dat_prepare_entry()
55 struct nilfs_palloc_req *req) in nilfs_dat_commit_entry() argument
57 mark_buffer_dirty(req->pr_entry_bh); in nilfs_dat_commit_entry()
59 brelse(req->pr_entry_bh); in nilfs_dat_commit_entry()
63 struct nilfs_palloc_req *req) in nilfs_dat_abort_entry() argument
65 brelse(req->pr_entry_bh); in nilfs_dat_abort_entry()
68 int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req) in nilfs_dat_prepare_alloc() argument
72 ret = nilfs_palloc_prepare_alloc_entry(dat, req); in nilfs_dat_prepare_alloc()
[all …]
Difile.c64 struct nilfs_palloc_req req; in nilfs_ifile_create_inode() local
67 req.pr_entry_nr = 0; /* 0 says find free inode from beginning of in nilfs_ifile_create_inode()
69 req.pr_entry_bh = NULL; in nilfs_ifile_create_inode()
71 ret = nilfs_palloc_prepare_alloc_entry(ifile, &req); in nilfs_ifile_create_inode()
73 ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 1, in nilfs_ifile_create_inode()
74 &req.pr_entry_bh); in nilfs_ifile_create_inode()
76 nilfs_palloc_abort_alloc_entry(ifile, &req); in nilfs_ifile_create_inode()
79 brelse(req.pr_entry_bh); in nilfs_ifile_create_inode()
82 nilfs_palloc_commit_alloc_entry(ifile, &req); in nilfs_ifile_create_inode()
83 mark_buffer_dirty(req.pr_entry_bh); in nilfs_ifile_create_inode()
[all …]
Dalloc.c406 struct nilfs_palloc_req *req) in nilfs_palloc_prepare_alloc_entry() argument
420 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset); in nilfs_palloc_prepare_alloc_entry()
428 maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr, in nilfs_palloc_prepare_alloc_entry()
455 req->pr_entry_nr = in nilfs_palloc_prepare_alloc_entry()
460 req->pr_desc_bh = desc_bh; in nilfs_palloc_prepare_alloc_entry()
461 req->pr_bitmap_bh = bitmap_bh; in nilfs_palloc_prepare_alloc_entry()
490 struct nilfs_palloc_req *req) in nilfs_palloc_commit_alloc_entry() argument
492 mark_buffer_dirty(req->pr_bitmap_bh); in nilfs_palloc_commit_alloc_entry()
493 mark_buffer_dirty(req->pr_desc_bh); in nilfs_palloc_commit_alloc_entry()
496 brelse(req->pr_bitmap_bh); in nilfs_palloc_commit_alloc_entry()
[all …]
/fs/ceph/
Dmds_client.c467 static void put_request_session(struct ceph_mds_request *req) in put_request_session() argument
469 if (req->r_session) { in put_request_session()
470 ceph_put_mds_session(req->r_session); in put_request_session()
471 req->r_session = NULL; in put_request_session()
477 struct ceph_mds_request *req = container_of(kref, in ceph_mdsc_release_request() local
480 if (req->r_request) in ceph_mdsc_release_request()
481 ceph_msg_put(req->r_request); in ceph_mdsc_release_request()
482 if (req->r_reply) { in ceph_mdsc_release_request()
483 ceph_msg_put(req->r_reply); in ceph_mdsc_release_request()
484 destroy_reply_info(&req->r_reply_info); in ceph_mdsc_release_request()
[all …]
Ddir.c311 struct ceph_mds_request *req; in ceph_readdir() local
326 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); in ceph_readdir()
327 if (IS_ERR(req)) in ceph_readdir()
328 return PTR_ERR(req); in ceph_readdir()
329 req->r_inode = inode; in ceph_readdir()
331 req->r_dentry = dget(filp->f_dentry); in ceph_readdir()
333 req->r_direct_mode = USE_AUTH_MDS; in ceph_readdir()
334 req->r_direct_hash = ceph_frag_value(frag); in ceph_readdir()
335 req->r_direct_is_hash = true; in ceph_readdir()
336 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS); in ceph_readdir()
[all …]
Ddebugfs.c53 struct ceph_mds_request *req; in mdsc_show() local
61 req = rb_entry(rp, struct ceph_mds_request, r_node); in mdsc_show()
63 if (req->r_request && req->r_session) in mdsc_show()
64 seq_printf(s, "%lld\tmds%d\t", req->r_tid, in mdsc_show()
65 req->r_session->s_mds); in mdsc_show()
66 else if (!req->r_request) in mdsc_show()
67 seq_printf(s, "%lld\t(no request)\t", req->r_tid); in mdsc_show()
69 seq_printf(s, "%lld\t(no session)\t", req->r_tid); in mdsc_show()
71 seq_printf(s, "%s", ceph_mds_op_name(req->r_op)); in mdsc_show()
73 if (req->r_got_unsafe) in mdsc_show()
[all …]
Dexport.c108 struct ceph_mds_request *req; in __fh_to_dentry() local
110 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPINO, in __fh_to_dentry()
112 if (IS_ERR(req)) in __fh_to_dentry()
113 return ERR_CAST(req); in __fh_to_dentry()
115 req->r_ino1 = vino; in __fh_to_dentry()
116 req->r_num_caps = 1; in __fh_to_dentry()
117 err = ceph_mdsc_do_request(mdsc, NULL, req); in __fh_to_dentry()
118 inode = req->r_target_inode; in __fh_to_dentry()
121 ceph_mdsc_put_request(req); in __fh_to_dentry()
164 struct ceph_mds_request *req; in __cfh_to_dentry() local
[all …]
Dfile.c44 struct ceph_mds_request *req; in prepare_open_request() local
51 req = ceph_mdsc_create_request(mdsc, op, want_auth); in prepare_open_request()
52 if (IS_ERR(req)) in prepare_open_request()
54 req->r_fmode = ceph_flags_to_mode(flags); in prepare_open_request()
55 req->r_args.open.flags = cpu_to_le32(flags); in prepare_open_request()
56 req->r_args.open.mode = cpu_to_le32(create_mode); in prepare_open_request()
57 req->r_args.open.preferred = cpu_to_le32(-1); in prepare_open_request()
59 return req; in prepare_open_request()
123 struct ceph_mds_request *req; in ceph_open() local
189 req = prepare_open_request(inode->i_sb, flags, 0); in ceph_open()
[all …]
Dioctl.c43 struct ceph_mds_request *req; in ceph_ioctl_set_layout() local
94 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT, in ceph_ioctl_set_layout()
96 if (IS_ERR(req)) in ceph_ioctl_set_layout()
97 return PTR_ERR(req); in ceph_ioctl_set_layout()
98 req->r_inode = inode; in ceph_ioctl_set_layout()
100 req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL; in ceph_ioctl_set_layout()
102 req->r_args.setlayout.layout.fl_stripe_unit = in ceph_ioctl_set_layout()
104 req->r_args.setlayout.layout.fl_stripe_count = in ceph_ioctl_set_layout()
106 req->r_args.setlayout.layout.fl_object_size = in ceph_ioctl_set_layout()
108 req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool); in ceph_ioctl_set_layout()
[all …]
Dlocks.c19 struct ceph_mds_request *req; in ceph_lock_message() local
23 req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS); in ceph_lock_message()
24 if (IS_ERR(req)) in ceph_lock_message()
25 return PTR_ERR(req); in ceph_lock_message()
26 req->r_inode = inode; in ceph_lock_message()
40 req->r_args.filelock_change.rule = lock_type; in ceph_lock_message()
41 req->r_args.filelock_change.type = cmd; in ceph_lock_message()
42 req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid); in ceph_lock_message()
45 req->r_args.filelock_change.pid_namespace = in ceph_lock_message()
47 req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start); in ceph_lock_message()
[all …]
Daddr.c236 static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) in finish_read() argument
238 struct inode *inode = req->r_inode; in finish_read()
249 dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); in finish_read()
252 for (i = 0; i < req->r_num_pages; i++, bytes -= PAGE_CACHE_SIZE) { in finish_read()
253 struct page *page = req->r_pages[i]; in finish_read()
267 kfree(req->r_pages); in finish_read()
288 struct ceph_osd_request *req; in start_read() local
313 req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), in start_read()
319 if (IS_ERR(req)) in start_read()
320 return PTR_ERR(req); in start_read()
[all …]
/fs/lockd/
Dclntproc.c124 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) in nlmclnt_setlockargs() argument
126 struct nlm_args *argp = &req->a_args; in nlmclnt_setlockargs()
132 lock->oh.data = req->a_owner; in nlmclnt_setlockargs()
133 lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", in nlmclnt_setlockargs()
142 static void nlmclnt_release_lockargs(struct nlm_rqst *req) in nlmclnt_release_lockargs() argument
144 BUG_ON(req->a_args.lock.fl.fl_ops != NULL); in nlmclnt_release_lockargs()
248 nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc) in nlmclnt_call() argument
250 struct nlm_host *host = req->a_host; in nlmclnt_call()
252 struct nlm_args *argp = &req->a_args; in nlmclnt_call()
253 struct nlm_res *resp = &req->a_res; in nlmclnt_call()
[all …]
/fs/coda/
Dpsdev.c100 struct upc_req *req = NULL; in coda_psdev_write() local
151 req = tmp; in coda_psdev_write()
152 list_del(&req->uc_chain); in coda_psdev_write()
158 if (!req) { in coda_psdev_write()
166 if (req->uc_outSize < nbytes) { in coda_psdev_write()
168 req->uc_outSize, (long)nbytes, hdr.opcode, hdr.unique); in coda_psdev_write()
169 nbytes = req->uc_outSize; /* don't have more space! */ in coda_psdev_write()
171 if (copy_from_user(req->uc_data, buf, nbytes)) { in coda_psdev_write()
172 req->uc_flags |= CODA_REQ_ABORT; in coda_psdev_write()
173 wake_up(&req->uc_sleep); in coda_psdev_write()
[all …]
/fs/ocfs2/
Dioctl.c42 struct ocfs2_info_request __user *req) in o2info_set_request_error() argument
45 (void)put_user(kreq->ir_flags, (__u32 __user *)&(req->ir_flags)); in o2info_set_request_error()
48 static inline void o2info_set_request_filled(struct ocfs2_info_request *req) in o2info_set_request_filled() argument
50 req->ir_flags |= OCFS2_INFO_FL_FILLED; in o2info_set_request_filled()
53 static inline void o2info_clear_request_filled(struct ocfs2_info_request *req) in o2info_clear_request_filled() argument
55 req->ir_flags &= ~OCFS2_INFO_FL_FILLED; in o2info_clear_request_filled()
58 static inline int o2info_coherent(struct ocfs2_info_request *req) in o2info_coherent() argument
60 return (!(req->ir_flags & OCFS2_INFO_FL_NON_COHERENT)); in o2info_coherent()
146 struct ocfs2_info_request __user *req) in ocfs2_info_handle_blocksize() argument
151 if (o2info_from_user(oib, req)) in ocfs2_info_handle_blocksize()
[all …]
/fs/
Daio.c418 struct kiocb *req = NULL; in __aio_get_req() local
420 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); in __aio_get_req()
421 if (unlikely(!req)) in __aio_get_req()
424 req->ki_flags = 0; in __aio_get_req()
425 req->ki_users = 2; in __aio_get_req()
426 req->ki_key = 0; in __aio_get_req()
427 req->ki_ctx = ctx; in __aio_get_req()
428 req->ki_cancel = NULL; in __aio_get_req()
429 req->ki_retry = NULL; in __aio_get_req()
430 req->ki_dtor = NULL; in __aio_get_req()
[all …]

123