/fs/squashfs/ |
D | block.c | 79 static int squashfs_bio_submit(struct squashfs_read_request *req); 93 static void free_read_request(struct squashfs_read_request *req, int error) in free_read_request() argument 95 if (!req->synchronous) in free_read_request() 96 squashfs_page_actor_free(req->output, error); in free_read_request() 97 if (req->res) in free_read_request() 98 *(req->res) = error; in free_read_request() 99 kfree(req->bh); in free_read_request() 100 kfree(req); in free_read_request() 103 static void squashfs_process_blocks(struct squashfs_read_request *req) in squashfs_process_blocks() argument 107 struct squashfs_sb_info *msblk = req->sb->s_fs_info; in squashfs_process_blocks() [all …]
|
/fs/ncpfs/ |
D | sock.c | 59 struct list_head req; member 76 struct ncp_request_reply *req; in ncp_alloc_req() local 78 req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL); in ncp_alloc_req() 79 if (!req) in ncp_alloc_req() 82 init_waitqueue_head(&req->wq); in ncp_alloc_req() 83 atomic_set(&req->refs, (1)); in ncp_alloc_req() 84 req->status = RQ_IDLE; in ncp_alloc_req() 86 return req; in ncp_alloc_req() 89 static void ncp_req_get(struct ncp_request_reply *req) in ncp_req_get() argument 91 atomic_inc(&req->refs); in ncp_req_get() [all …]
|
/fs/fuse/ |
D | file.c | 27 struct fuse_req *req; in fuse_send_open() local 30 req = fuse_get_req_nopages(fc); in fuse_send_open() 31 if (IS_ERR(req)) in fuse_send_open() 32 return PTR_ERR(req); in fuse_send_open() 38 req->in.h.opcode = opcode; in fuse_send_open() 39 req->in.h.nodeid = nodeid; in fuse_send_open() 40 req->in.numargs = 1; in fuse_send_open() 41 req->in.args[0].size = sizeof(inarg); in fuse_send_open() 42 req->in.args[0].value = &inarg; in fuse_send_open() 43 req->out.numargs = 1; in fuse_send_open() [all …]
|
D | dir.c | 148 static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_req *req, in fuse_lookup_init() argument 153 req->in.h.opcode = FUSE_LOOKUP; in fuse_lookup_init() 154 req->in.h.nodeid = nodeid; in fuse_lookup_init() 155 req->in.numargs = 1; in fuse_lookup_init() 156 req->in.args[0].size = name->len + 1; in fuse_lookup_init() 157 req->in.args[0].value = name->name; in fuse_lookup_init() 158 req->out.numargs = 1; in fuse_lookup_init() 160 req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; in fuse_lookup_init() 162 req->out.args[0].size = sizeof(struct fuse_entry_out); in fuse_lookup_init() 163 req->out.args[0].value = outarg; in fuse_lookup_init() [all …]
|
D | dev.c | 40 static void fuse_request_init(struct fuse_req *req, struct page **pages, in fuse_request_init() argument 44 memset(req, 0, sizeof(*req)); in fuse_request_init() 47 INIT_LIST_HEAD(&req->list); in fuse_request_init() 48 INIT_LIST_HEAD(&req->intr_entry); in fuse_request_init() 49 init_waitqueue_head(&req->waitq); in fuse_request_init() 50 atomic_set(&req->count, 1); in fuse_request_init() 51 req->pages = pages; in fuse_request_init() 52 req->page_descs = page_descs; in fuse_request_init() 53 req->max_pages = npages; in fuse_request_init() 58 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags); in __fuse_request_alloc() local [all …]
|
/fs/nfs/ |
D | pagelist.c | 49 hdr->req = nfs_list_entry(desc->pg_list.next); in nfs_pgheader_init() 51 hdr->cred = hdr->req->wb_context->cred; in nfs_pgheader_init() 52 hdr->io_start = req_offset(hdr->req); in nfs_pgheader_init() 154 nfs_page_group_lock(struct nfs_page *req, bool nonblock) in nfs_page_group_lock() argument 156 struct nfs_page *head = req->wb_head; in nfs_page_group_lock() 177 nfs_page_group_lock_wait(struct nfs_page *req) in nfs_page_group_lock_wait() argument 179 struct nfs_page *head = req->wb_head; in nfs_page_group_lock_wait() 192 nfs_page_group_unlock(struct nfs_page *req) in nfs_page_group_unlock() argument 194 struct nfs_page *head = req->wb_head; in nfs_page_group_unlock() 210 nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit) in nfs_page_group_sync_on_bit_locked() argument [all …]
|
D | write.c | 44 static void nfs_redirty_request(struct nfs_page *req); 49 static void nfs_clear_request_commit(struct nfs_page *req); 110 struct nfs_page *req = NULL; in nfs_page_find_head_request_locked() local 113 req = (struct nfs_page *)page_private(page); in nfs_page_find_head_request_locked() 115 req = nfs_page_search_commits_for_head_request_locked(nfsi, in nfs_page_find_head_request_locked() 118 if (req) { in nfs_page_find_head_request_locked() 119 WARN_ON_ONCE(req->wb_head != req); in nfs_page_find_head_request_locked() 120 kref_get(&req->wb_kref); in nfs_page_find_head_request_locked() 123 return req; in nfs_page_find_head_request_locked() 134 struct nfs_page *req = NULL; in nfs_page_find_head_request() local [all …]
|
D | direct.c | 350 static void nfs_direct_readpage_release(struct nfs_page *req) in nfs_direct_readpage_release() argument 353 req->wb_context->dentry->d_inode->i_sb->s_id, in nfs_direct_readpage_release() 354 (unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode), in nfs_direct_readpage_release() 355 req->wb_bytes, in nfs_direct_readpage_release() 356 (long long)req_offset(req)); in nfs_direct_readpage_release() 357 nfs_release_request(req); in nfs_direct_readpage_release() 376 struct nfs_page *req = nfs_list_entry(hdr->pages.next); in nfs_direct_read_completion() local 377 struct page *page = req->wb_page; in nfs_direct_read_completion() 381 bytes += req->wb_bytes; in nfs_direct_read_completion() 382 nfs_list_remove_request(req); in nfs_direct_read_completion() [all …]
|
D | read.c | 104 static void nfs_readpage_release(struct nfs_page *req) in nfs_readpage_release() argument 106 struct inode *d_inode = req->wb_context->dentry->d_inode; in nfs_readpage_release() 109 (unsigned long long)NFS_FILEID(d_inode), req->wb_bytes, in nfs_readpage_release() 110 (long long)req_offset(req)); in nfs_readpage_release() 112 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { in nfs_readpage_release() 113 if (PageUptodate(req->wb_page)) in nfs_readpage_release() 114 nfs_readpage_to_fscache(d_inode, req->wb_page, 0); in nfs_readpage_release() 116 unlock_page(req->wb_page); in nfs_readpage_release() 118 nfs_release_request(req); in nfs_readpage_release() 121 static void nfs_page_group_set_uptodate(struct nfs_page *req) in nfs_page_group_set_uptodate() argument [all …]
|
/fs/ubifs/ |
D | budget.c | 375 const struct ubifs_budget_req *req) in calc_idx_growth() argument 379 znodes = req->new_ino + (req->new_page << UBIFS_BLOCKS_PER_PAGE_SHIFT) + in calc_idx_growth() 380 req->new_dent; in calc_idx_growth() 391 const struct ubifs_budget_req *req) in calc_data_growth() argument 395 data_growth = req->new_ino ? c->bi.inode_budget : 0; in calc_data_growth() 396 if (req->new_page) in calc_data_growth() 398 if (req->new_dent) in calc_data_growth() 400 data_growth += req->new_ino_d; in calc_data_growth() 411 const struct ubifs_budget_req *req) in calc_dd_growth() argument 415 dd_growth = req->dirtied_page ? c->bi.page_budget : 0; in calc_dd_growth() [all …]
|
/fs/nilfs2/ |
D | dat.c | 54 struct nilfs_palloc_req *req, int create) in nilfs_dat_prepare_entry() argument 56 return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr, in nilfs_dat_prepare_entry() 57 create, &req->pr_entry_bh); in nilfs_dat_prepare_entry() 61 struct nilfs_palloc_req *req) in nilfs_dat_commit_entry() argument 63 mark_buffer_dirty(req->pr_entry_bh); in nilfs_dat_commit_entry() 65 brelse(req->pr_entry_bh); in nilfs_dat_commit_entry() 69 struct nilfs_palloc_req *req) in nilfs_dat_abort_entry() argument 71 brelse(req->pr_entry_bh); in nilfs_dat_abort_entry() 74 int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req) in nilfs_dat_prepare_alloc() argument 78 ret = nilfs_palloc_prepare_alloc_entry(dat, req); in nilfs_dat_prepare_alloc() [all …]
|
D | ifile.c | 68 struct nilfs_palloc_req req; in nilfs_ifile_create_inode() local 71 req.pr_entry_nr = 0; /* 0 says find free inode from beginning of in nilfs_ifile_create_inode() 73 req.pr_entry_bh = NULL; in nilfs_ifile_create_inode() 75 ret = nilfs_palloc_prepare_alloc_entry(ifile, &req); in nilfs_ifile_create_inode() 77 ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 1, in nilfs_ifile_create_inode() 78 &req.pr_entry_bh); in nilfs_ifile_create_inode() 80 nilfs_palloc_abort_alloc_entry(ifile, &req); in nilfs_ifile_create_inode() 83 brelse(req.pr_entry_bh); in nilfs_ifile_create_inode() 86 nilfs_palloc_commit_alloc_entry(ifile, &req); in nilfs_ifile_create_inode() 87 mark_buffer_dirty(req.pr_entry_bh); in nilfs_ifile_create_inode() [all …]
|
D | alloc.c | 469 struct nilfs_palloc_req *req) in nilfs_palloc_prepare_alloc_entry() argument 483 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset); in nilfs_palloc_prepare_alloc_entry() 491 maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr, in nilfs_palloc_prepare_alloc_entry() 518 req->pr_entry_nr = in nilfs_palloc_prepare_alloc_entry() 523 req->pr_desc_bh = desc_bh; in nilfs_palloc_prepare_alloc_entry() 524 req->pr_bitmap_bh = bitmap_bh; in nilfs_palloc_prepare_alloc_entry() 553 struct nilfs_palloc_req *req) in nilfs_palloc_commit_alloc_entry() argument 555 mark_buffer_dirty(req->pr_bitmap_bh); in nilfs_palloc_commit_alloc_entry() 556 mark_buffer_dirty(req->pr_desc_bh); in nilfs_palloc_commit_alloc_entry() 559 brelse(req->pr_bitmap_bh); in nilfs_palloc_commit_alloc_entry() [all …]
|
/fs/cifs/ |
D | smb2pdu.c | 326 struct smb2_negotiate_req *req; in SMB2_negotiate() local 343 rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req); in SMB2_negotiate() 347 req->hdr.SessionId = 0; in SMB2_negotiate() 349 req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id); in SMB2_negotiate() 351 req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */ in SMB2_negotiate() 352 inc_rfc1001_len(req, 2); in SMB2_negotiate() 356 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); in SMB2_negotiate() 358 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); in SMB2_negotiate() 360 req->SecurityMode = 0; in SMB2_negotiate() 362 req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities); in SMB2_negotiate() [all …]
|
/fs/ceph/ |
D | mds_client.c | 503 static void put_request_session(struct ceph_mds_request *req) in put_request_session() argument 505 if (req->r_session) { in put_request_session() 506 ceph_put_mds_session(req->r_session); in put_request_session() 507 req->r_session = NULL; in put_request_session() 513 struct ceph_mds_request *req = container_of(kref, in ceph_mdsc_release_request() local 516 destroy_reply_info(&req->r_reply_info); in ceph_mdsc_release_request() 517 if (req->r_request) in ceph_mdsc_release_request() 518 ceph_msg_put(req->r_request); in ceph_mdsc_release_request() 519 if (req->r_reply) in ceph_mdsc_release_request() 520 ceph_msg_put(req->r_reply); in ceph_mdsc_release_request() [all …]
|
D | dir.c | 318 struct ceph_mds_request *req; in ceph_readdir() local 330 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); in ceph_readdir() 331 if (IS_ERR(req)) in ceph_readdir() 332 return PTR_ERR(req); in ceph_readdir() 333 err = ceph_alloc_readdir_reply_buffer(req, inode); in ceph_readdir() 335 ceph_mdsc_put_request(req); in ceph_readdir() 338 req->r_inode = inode; in ceph_readdir() 340 req->r_dentry = dget(file->f_dentry); in ceph_readdir() 342 req->r_direct_mode = USE_AUTH_MDS; in ceph_readdir() 343 req->r_direct_hash = ceph_frag_value(frag); in ceph_readdir() [all …]
|
D | debugfs.c | 53 struct ceph_mds_request *req; in mdsc_show() local 61 req = rb_entry(rp, struct ceph_mds_request, r_node); in mdsc_show() 63 if (req->r_request && req->r_session) in mdsc_show() 64 seq_printf(s, "%lld\tmds%d\t", req->r_tid, in mdsc_show() 65 req->r_session->s_mds); in mdsc_show() 66 else if (!req->r_request) in mdsc_show() 67 seq_printf(s, "%lld\t(no request)\t", req->r_tid); in mdsc_show() 69 seq_printf(s, "%lld\t(no session)\t", req->r_tid); in mdsc_show() 71 seq_printf(s, "%s", ceph_mds_op_name(req->r_op)); in mdsc_show() 73 if (req->r_got_unsafe) in mdsc_show() [all …]
|
D | export.c | 73 struct ceph_mds_request *req; in __fh_to_dentry() local 75 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPINO, in __fh_to_dentry() 77 if (IS_ERR(req)) in __fh_to_dentry() 78 return ERR_CAST(req); in __fh_to_dentry() 80 req->r_ino1 = vino; in __fh_to_dentry() 81 req->r_num_caps = 1; in __fh_to_dentry() 82 err = ceph_mdsc_do_request(mdsc, NULL, req); in __fh_to_dentry() 83 inode = req->r_target_inode; in __fh_to_dentry() 86 ceph_mdsc_put_request(req); in __fh_to_dentry() 128 struct ceph_mds_request *req; in __get_parent() local [all …]
|
D | file.c | 48 struct ceph_mds_request *req; in prepare_open_request() local 55 req = ceph_mdsc_create_request(mdsc, op, want_auth); in prepare_open_request() 56 if (IS_ERR(req)) in prepare_open_request() 58 req->r_fmode = ceph_flags_to_mode(flags); in prepare_open_request() 59 req->r_args.open.flags = cpu_to_le32(flags); in prepare_open_request() 60 req->r_args.open.mode = cpu_to_le32(create_mode); in prepare_open_request() 62 return req; in prepare_open_request() 137 struct ceph_mds_request *req; in ceph_open() local 204 req = prepare_open_request(inode->i_sb, flags, 0); in ceph_open() 205 if (IS_ERR(req)) { in ceph_open() [all …]
|
D | ioctl.c | 67 struct ceph_mds_request *req; in ceph_ioctl_set_layout() local 106 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT, in ceph_ioctl_set_layout() 108 if (IS_ERR(req)) in ceph_ioctl_set_layout() 109 return PTR_ERR(req); in ceph_ioctl_set_layout() 110 req->r_inode = inode; in ceph_ioctl_set_layout() 112 req->r_num_caps = 1; in ceph_ioctl_set_layout() 114 req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL; in ceph_ioctl_set_layout() 116 req->r_args.setlayout.layout.fl_stripe_unit = in ceph_ioctl_set_layout() 118 req->r_args.setlayout.layout.fl_stripe_count = in ceph_ioctl_set_layout() 120 req->r_args.setlayout.layout.fl_object_size = in ceph_ioctl_set_layout() [all …]
|
D | locks.c | 38 struct ceph_mds_request *req; in ceph_lock_message() local 43 req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS); in ceph_lock_message() 44 if (IS_ERR(req)) in ceph_lock_message() 45 return PTR_ERR(req); in ceph_lock_message() 46 req->r_inode = inode; in ceph_lock_message() 48 req->r_num_caps = 1; in ceph_lock_message() 63 req->r_args.filelock_change.rule = lock_type; in ceph_lock_message() 64 req->r_args.filelock_change.type = cmd; in ceph_lock_message() 65 req->r_args.filelock_change.owner = cpu_to_le64(owner); in ceph_lock_message() 66 req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid); in ceph_lock_message() [all …]
|
/fs/lockd/ |
D | clntproc.c | 124 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) in nlmclnt_setlockargs() argument 126 struct nlm_args *argp = &req->a_args; in nlmclnt_setlockargs() 128 char *nodename = req->a_host->h_rpcclnt->cl_nodename; in nlmclnt_setlockargs() 133 lock->oh.data = req->a_owner; in nlmclnt_setlockargs() 134 lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", in nlmclnt_setlockargs() 143 static void nlmclnt_release_lockargs(struct nlm_rqst *req) in nlmclnt_release_lockargs() argument 145 WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL); in nlmclnt_release_lockargs() 249 nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc) in nlmclnt_call() argument 251 struct nlm_host *host = req->a_host; in nlmclnt_call() 253 struct nlm_args *argp = &req->a_args; in nlmclnt_call() [all …]
|
/fs/coda/ |
D | psdev.c | 101 struct upc_req *req = NULL; in coda_psdev_write() local 153 req = tmp; in coda_psdev_write() 154 list_del(&req->uc_chain); in coda_psdev_write() 160 if (!req) { in coda_psdev_write() 168 if (req->uc_outSize < nbytes) { in coda_psdev_write() 170 __func__, req->uc_outSize, (long)nbytes, in coda_psdev_write() 172 nbytes = req->uc_outSize; /* don't have more space! */ in coda_psdev_write() 174 if (copy_from_user(req->uc_data, buf, nbytes)) { in coda_psdev_write() 175 req->uc_flags |= CODA_REQ_ABORT; in coda_psdev_write() 176 wake_up(&req->uc_sleep); in coda_psdev_write() [all …]
|
/fs/ecryptfs/ |
D | kthread.c | 60 struct ecryptfs_open_req *req; in ecryptfs_threadfn() local 72 req = list_first_entry(&ecryptfs_kthread_ctl.req_list, in ecryptfs_threadfn() 75 list_del(&req->kthread_ctl_list); in ecryptfs_threadfn() 76 *req->lower_file = dentry_open(&req->path, in ecryptfs_threadfn() 78 complete(&req->done); in ecryptfs_threadfn() 105 struct ecryptfs_open_req *req, *tmp; in ecryptfs_destroy_kthread() local 109 list_for_each_entry_safe(req, tmp, &ecryptfs_kthread_ctl.req_list, in ecryptfs_destroy_kthread() 111 list_del(&req->kthread_ctl_list); in ecryptfs_destroy_kthread() 112 *req->lower_file = ERR_PTR(-EIO); in ecryptfs_destroy_kthread() 113 complete(&req->done); in ecryptfs_destroy_kthread() [all …]
|
/fs/ocfs2/ |
D | ioctl.c | 42 struct ocfs2_info_request __user *req) in o2info_set_request_error() argument 45 (void)put_user(kreq->ir_flags, (__u32 __user *)&(req->ir_flags)); in o2info_set_request_error() 48 static inline void o2info_set_request_filled(struct ocfs2_info_request *req) in o2info_set_request_filled() argument 50 req->ir_flags |= OCFS2_INFO_FL_FILLED; in o2info_set_request_filled() 53 static inline void o2info_clear_request_filled(struct ocfs2_info_request *req) in o2info_clear_request_filled() argument 55 req->ir_flags &= ~OCFS2_INFO_FL_FILLED; in o2info_clear_request_filled() 58 static inline int o2info_coherent(struct ocfs2_info_request *req) in o2info_coherent() argument 60 return (!(req->ir_flags & OCFS2_INFO_FL_NON_COHERENT)); in o2info_coherent() 146 struct ocfs2_info_request __user *req) in ocfs2_info_handle_blocksize() argument 150 if (o2info_from_user(oib, req)) in ocfs2_info_handle_blocksize() [all …]
|