/drivers/staging/lustre/lustre/ptlrpc/ |
D | client.c | 46 static int ptlrpc_send_new_req(struct ptlrpc_request *req); 47 static int ptlrpcd_check_work(struct ptlrpc_request *req); 135 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, in ptlrpc_prep_bulk_imp() argument 139 struct obd_import *imp = req->rq_import; in ptlrpc_prep_bulk_imp() 147 desc->bd_import_generation = req->rq_import_generation; in ptlrpc_prep_bulk_imp() 149 desc->bd_req = req; in ptlrpc_prep_bulk_imp() 155 req->rq_bulk = desc; in ptlrpc_prep_bulk_imp() 216 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req) in ptlrpc_at_set_req_timeout() argument 222 LASSERT(req->rq_import); in ptlrpc_at_set_req_timeout() 234 req->rq_timeout = req->rq_import->imp_server_timeout ? in ptlrpc_at_set_req_timeout() [all …]
|
D | sec_null.c | 76 int null_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) in null_ctx_sign() argument 78 req->rq_reqbuf->lm_secflvr = SPTLRPC_FLVR_NULL; in null_ctx_sign() 80 if (!req->rq_import->imp_dlm_fake) { in null_ctx_sign() 81 struct obd_device *obd = req->rq_import->imp_obd; in null_ctx_sign() 83 null_encode_sec_part(req->rq_reqbuf, in null_ctx_sign() 86 req->rq_reqdata_len = req->rq_reqlen; in null_ctx_sign() 91 int null_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) in null_ctx_verify() argument 95 LASSERT(req->rq_repdata); in null_ctx_verify() 97 req->rq_repmsg = req->rq_repdata; in null_ctx_verify() 98 req->rq_replen = req->rq_repdata_len; in null_ctx_verify() [all …]
|
D | sec.c | 363 int sptlrpc_req_get_ctx(struct ptlrpc_request *req) in sptlrpc_req_get_ctx() argument 365 struct obd_import *imp = req->rq_import; in sptlrpc_req_get_ctx() 369 LASSERT(!req->rq_cli_ctx); in sptlrpc_req_get_ctx() 376 req->rq_cli_ctx = get_my_ctx(sec); in sptlrpc_req_get_ctx() 380 if (!req->rq_cli_ctx) { in sptlrpc_req_get_ctx() 381 CERROR("req %p: fail to get context\n", req); in sptlrpc_req_get_ctx() 397 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync) in sptlrpc_req_put_ctx() argument 399 LASSERT(req); in sptlrpc_req_put_ctx() 400 LASSERT(req->rq_cli_ctx); in sptlrpc_req_put_ctx() 405 if (!list_empty(&req->rq_ctx_chain)) { in sptlrpc_req_put_ctx() [all …]
|
D | sec_plain.c | 187 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) in plain_ctx_sign() argument 189 struct lustre_msg *msg = req->rq_reqbuf; in plain_ctx_sign() 192 msg->lm_secflvr = req->rq_flvr.sf_rpc; in plain_ctx_sign() 198 phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg; in plain_ctx_sign() 200 if (req->rq_pack_udesc) in plain_ctx_sign() 202 if (req->rq_pack_bulk) in plain_ctx_sign() 205 req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount, in plain_ctx_sign() 211 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) in plain_ctx_verify() argument 213 struct lustre_msg *msg = req->rq_repdata; in plain_ctx_verify() 223 swabbed = ptlrpc_rep_need_swab(req); in plain_ctx_verify() [all …]
|
D | niobuf.c | 109 static int ptlrpc_register_bulk(struct ptlrpc_request *req) in ptlrpc_register_bulk() argument 111 struct ptlrpc_bulk_desc *desc = req->rq_bulk; in ptlrpc_register_bulk() 134 if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY) in ptlrpc_register_bulk() 154 xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1); in ptlrpc_register_bulk() 156 req->rq_send_state != LUSTRE_IMP_REPLAY) || in ptlrpc_register_bulk() 204 req->rq_status = -ENOMEM; in ptlrpc_register_bulk() 211 req->rq_xid = --xid; in ptlrpc_register_bulk() 212 LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK), in ptlrpc_register_bulk() 214 desc->bd_last_xid, req->rq_xid); in ptlrpc_register_bulk() 228 desc->bd_last_xid, req->rq_xid, desc->bd_portal); in ptlrpc_register_bulk() [all …]
|
D | events.c | 53 struct ptlrpc_request *req = cbid->cbid_arg; in request_out_callback() local 59 DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status); in request_out_callback() 61 sptlrpc_request_out_callback(req); in request_out_callback() 63 spin_lock(&req->rq_lock); in request_out_callback() 64 req->rq_real_sent = ktime_get_real_seconds(); in request_out_callback() 65 req->rq_req_unlinked = 1; in request_out_callback() 67 if (req->rq_reply_unlinked) in request_out_callback() 74 req->rq_net_err = 1; in request_out_callback() 79 ptlrpc_client_wake_req(req); in request_out_callback() 81 spin_unlock(&req->rq_lock); in request_out_callback() [all …]
|
/drivers/macintosh/ |
D | via-pmu68k.c | 106 static int pmu_send_request(struct adb_request *req, int sync); 114 static void pmu_done(struct adb_request *req); 192 volatile struct adb_request req; in pmu_init() local 197 pmu_request((struct adb_request *) &req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB); in pmu_init() 199 while (!req.complete) { in pmu_init() 224 pmu_request((struct adb_request *) &req, NULL, 2, PMU_SET_INTR_MASK, in pmu_init() 227 while (!req.complete) { in pmu_init() 272 pmu_send_request(struct adb_request *req, int sync) in pmu_send_request() argument 278 req->complete = 1; in pmu_send_request() 284 switch (req->data[0]) { in pmu_send_request() [all …]
|
D | adb-iop.c | 66 static void adb_iop_end_req(struct adb_request *req, int state) in adb_iop_end_req() argument 68 req->complete = 1; in adb_iop_end_req() 69 current_req = req->next; in adb_iop_end_req() 70 if (req->done) (*req->done)(req); in adb_iop_end_req() 82 struct adb_request *req; in adb_iop_complete() local 87 req = current_req; in adb_iop_complete() 88 if ((adb_iop_state == sending) && req && req->reply_expected) { in adb_iop_complete() 105 struct adb_request *req; in adb_iop_listen() local 113 req = current_req; in adb_iop_listen() 116 printk("adb_iop_listen %p: rcvd packet, %d bytes: %02X %02X", req, in adb_iop_listen() [all …]
|
D | via-pmu.c | 184 static int pmu_send_request(struct adb_request *req, int sync); 216 int pmu_polled_request(struct adb_request *req); 532 struct adb_request req; in init_pmu() local 537 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); in init_pmu() 539 while (!req.complete) { in init_pmu() 564 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); in init_pmu() 565 while (!req.complete) in init_pmu() 570 pmu_request(&req, NULL, 1, PMU_GET_VERSION); in init_pmu() 571 pmu_wait_complete(&req); in init_pmu() 572 if (req.reply_len > 0) in init_pmu() [all …]
|
D | via-macii.c | 86 static int macii_send_request(struct adb_request *req, int sync); 87 static int macii_write(struct adb_request *req); 123 static int request_is_queued(struct adb_request *req) { in request_is_queued() argument 129 if (cur == req) { in request_is_queued() 205 static struct adb_request req; in macii_queue_poll() local 215 BUG_ON(request_is_queued(&req)); in macii_queue_poll() 217 adb_request(&req, NULL, ADBREQ_NOSEND, 1, in macii_queue_poll() 220 req.sent = 0; in macii_queue_poll() 221 req.complete = 0; in macii_queue_poll() 222 req.reply_len = 0; in macii_queue_poll() [all …]
|
/drivers/s390/scsi/ |
D | zfcp_fsf.c | 66 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) in zfcp_fsf_class_not_supp() argument 68 dev_err(&req->adapter->ccw_device->dev, "FCP device not " in zfcp_fsf_class_not_supp() 70 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1"); in zfcp_fsf_class_not_supp() 71 req->status |= ZFCP_STATUS_FSFREQ_ERROR; in zfcp_fsf_class_not_supp() 78 void zfcp_fsf_req_free(struct zfcp_fsf_req *req) in zfcp_fsf_req_free() argument 80 if (likely(req->pool)) { in zfcp_fsf_req_free() 81 if (likely(req->qtcb)) in zfcp_fsf_req_free() 82 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); in zfcp_fsf_req_free() 83 mempool_free(req, req->pool); in zfcp_fsf_req_free() 87 if (likely(req->qtcb)) in zfcp_fsf_req_free() [all …]
|
/drivers/block/drbd/ |
D | drbd_req.c | 37 static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req) in _drbd_start_io_acct() argument 39 generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9, in _drbd_start_io_acct() 44 static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) in _drbd_end_io_acct() argument 46 generic_end_io_acct(bio_data_dir(req->master_bio), in _drbd_end_io_acct() 47 &device->vdisk->part0, req->start_jif); in _drbd_end_io_acct() 52 struct drbd_request *req; in drbd_req_new() local 54 req = mempool_alloc(drbd_request_mempool, GFP_NOIO); in drbd_req_new() 55 if (!req) in drbd_req_new() 57 memset(req, 0, sizeof(*req)); in drbd_req_new() 59 drbd_req_make_private_bio(req, bio_src); in drbd_req_new() [all …]
|
/drivers/staging/greybus/ |
D | audio_apbridgea.c | 18 struct audio_apbridgea_set_config_request req; in gb_audio_apbridgea_set_config() local 20 req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_CONFIG; in gb_audio_apbridgea_set_config() 21 req.hdr.i2s_port = cpu_to_le16(i2s_port); in gb_audio_apbridgea_set_config() 22 req.format = cpu_to_le32(format); in gb_audio_apbridgea_set_config() 23 req.rate = cpu_to_le32(rate); in gb_audio_apbridgea_set_config() 24 req.mclk_freq = cpu_to_le32(mclk_freq); in gb_audio_apbridgea_set_config() 26 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_set_config() 35 struct audio_apbridgea_register_cport_request req; in gb_audio_apbridgea_register_cport() local 38 req.hdr.type = AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT; in gb_audio_apbridgea_register_cport() 39 req.hdr.i2s_port = cpu_to_le16(i2s_port); in gb_audio_apbridgea_register_cport() [all …]
|
D | audio_gb.c | 53 struct gb_audio_get_control_request req; in gb_audio_gb_get_control() local 57 req.control_id = control_id; in gb_audio_gb_get_control() 58 req.index = index; in gb_audio_gb_get_control() 61 &req, sizeof(req), &resp, sizeof(resp)); in gb_audio_gb_get_control() 75 struct gb_audio_set_control_request req; in gb_audio_gb_set_control() local 77 req.control_id = control_id; in gb_audio_gb_set_control() 78 req.index = index; in gb_audio_gb_set_control() 79 memcpy(&req.value, value, sizeof(req.value)); in gb_audio_gb_set_control() 82 &req, sizeof(req), NULL, 0); in gb_audio_gb_set_control() 89 struct gb_audio_enable_widget_request req; in gb_audio_gb_enable_widget() local [all …]
|
/drivers/infiniband/hw/hfi1/ |
D | user_sdma.c | 270 struct user_sdma_request *req; member 276 #define SDMA_DBG(req, fmt, ...) \ argument 277 hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \ 278 (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \ 547 struct user_sdma_request *req; in hfi1_user_sdma_process_request() local 553 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) { in hfi1_user_sdma_process_request() 558 iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr)); in hfi1_user_sdma_process_request() 609 req = pq->reqs + info.comp_idx; in hfi1_user_sdma_process_request() 610 memset(req, 0, sizeof(*req)); in hfi1_user_sdma_process_request() 611 req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */ in hfi1_user_sdma_process_request() [all …]
|
/drivers/clk/sunxi/ |
D | clk-sunxi.c | 41 static void sun4i_get_pll1_factors(struct factors_request *req) in sun4i_get_pll1_factors() argument 46 div = req->rate / 6000000; in sun4i_get_pll1_factors() 47 req->rate = 6000000 * div; in sun4i_get_pll1_factors() 50 req->m = 0; in sun4i_get_pll1_factors() 53 if (req->rate >= 768000000 || req->rate == 42000000 || in sun4i_get_pll1_factors() 54 req->rate == 54000000) in sun4i_get_pll1_factors() 55 req->k = 1; in sun4i_get_pll1_factors() 57 req->k = 0; in sun4i_get_pll1_factors() 61 req->p = 3; in sun4i_get_pll1_factors() 65 req->p = 2; in sun4i_get_pll1_factors() [all …]
|
D | clk-sun9i-core.c | 36 static void sun9i_a80_get_pll4_factors(struct factors_request *req) in sun9i_a80_get_pll4_factors() argument 43 n = DIV_ROUND_UP(req->rate, 6000000); in sun9i_a80_get_pll4_factors() 63 req->rate = ((24000000 * n) >> p) / (m + 1); in sun9i_a80_get_pll4_factors() 64 req->n = n; in sun9i_a80_get_pll4_factors() 65 req->m = m; in sun9i_a80_get_pll4_factors() 66 req->p = p; in sun9i_a80_get_pll4_factors() 109 static void sun9i_a80_get_gt_factors(struct factors_request *req) in sun9i_a80_get_gt_factors() argument 113 if (req->parent_rate < req->rate) in sun9i_a80_get_gt_factors() 114 req->rate = req->parent_rate; in sun9i_a80_get_gt_factors() 116 div = DIV_ROUND_UP(req->parent_rate, req->rate); in sun9i_a80_get_gt_factors() [all …]
|
/drivers/nvme/target/ |
D | io-cmd.c | 21 struct nvmet_req *req = bio->bi_private; in nvmet_bio_done() local 23 nvmet_req_complete(req, in nvmet_bio_done() 26 if (bio != &req->inline_bio) in nvmet_bio_done() 30 static inline u32 nvmet_rw_len(struct nvmet_req *req) in nvmet_rw_len() argument 32 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) << in nvmet_rw_len() 33 req->ns->blksize_shift; in nvmet_rw_len() 36 static void nvmet_inline_bio_init(struct nvmet_req *req) in nvmet_inline_bio_init() argument 38 struct bio *bio = &req->inline_bio; in nvmet_inline_bio_init() 42 bio->bi_io_vec = req->inline_bvec; in nvmet_inline_bio_init() 45 static void nvmet_execute_rw(struct nvmet_req *req) in nvmet_execute_rw() argument [all …]
|
D | fabrics-cmd.c | 18 static void nvmet_execute_prop_set(struct nvmet_req *req) in nvmet_execute_prop_set() argument 22 if (!(req->cmd->prop_set.attrib & 1)) { in nvmet_execute_prop_set() 23 u64 val = le64_to_cpu(req->cmd->prop_set.value); in nvmet_execute_prop_set() 25 switch (le32_to_cpu(req->cmd->prop_set.offset)) { in nvmet_execute_prop_set() 27 nvmet_update_cc(req->sq->ctrl, val); in nvmet_execute_prop_set() 37 nvmet_req_complete(req, status); in nvmet_execute_prop_set() 40 static void nvmet_execute_prop_get(struct nvmet_req *req) in nvmet_execute_prop_get() argument 42 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_prop_get() 46 if (req->cmd->prop_get.attrib & 1) { in nvmet_execute_prop_get() 47 switch (le32_to_cpu(req->cmd->prop_get.offset)) { in nvmet_execute_prop_get() [all …]
|
D | admin-cmd.c | 33 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, in nvmet_get_smart_log_nsid() argument 41 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid); in nvmet_get_smart_log_nsid() 45 le32_to_cpu(req->cmd->get_log_page.nsid)); in nvmet_get_smart_log_nsid() 63 static u16 nvmet_get_smart_log_all(struct nvmet_req *req, in nvmet_get_smart_log_all() argument 73 ctrl = req->sq->ctrl; in nvmet_get_smart_log_all() 95 static u16 nvmet_get_smart_log(struct nvmet_req *req, in nvmet_get_smart_log() argument 100 WARN_ON(req == NULL || slog == NULL); in nvmet_get_smart_log() 101 if (req->cmd->get_log_page.nsid == 0xFFFFFFFF) in nvmet_get_smart_log() 102 status = nvmet_get_smart_log_all(req, slog); in nvmet_get_smart_log() 104 status = nvmet_get_smart_log_nsid(req, slog); in nvmet_get_smart_log() [all …]
|
/drivers/s390/cio/ |
D | ccwreq.c | 42 struct ccw_request *req = &cdev->private->req; in ccwreq_next_path() local 44 if (!req->singlepath) { in ccwreq_next_path() 45 req->mask = 0; in ccwreq_next_path() 48 req->retries = req->maxretries; in ccwreq_next_path() 49 req->mask = lpm_adjust(req->mask >> 1, req->lpm); in ccwreq_next_path() 51 return req->mask; in ccwreq_next_path() 59 struct ccw_request *req = &cdev->private->req; in ccwreq_stop() local 61 if (req->done) in ccwreq_stop() 63 req->done = 1; in ccwreq_stop() 66 if (rc && rc != -ENODEV && req->drc) in ccwreq_stop() [all …]
|
/drivers/staging/emxx_udc/ |
D | emxx_udc.c | 179 udc->ep0_req.req.buf = p_buf; in _nbu2ss_create_ep0_packet() 180 udc->ep0_req.req.length = length; in _nbu2ss_create_ep0_packet() 181 udc->ep0_req.req.dma = 0; in _nbu2ss_create_ep0_packet() 182 udc->ep0_req.req.zero = TRUE; in _nbu2ss_create_ep0_packet() 183 udc->ep0_req.req.complete = _nbu2ss_ep0_complete; in _nbu2ss_create_ep0_packet() 184 udc->ep0_req.req.status = -EINPROGRESS; in _nbu2ss_create_ep0_packet() 185 udc->ep0_req.req.context = udc; in _nbu2ss_create_ep0_packet() 186 udc->ep0_req.req.actual = 0; in _nbu2ss_create_ep0_packet() 475 struct nbu2ss_req *req, in _nbu2ss_dma_map_single() argument 479 if (req->req.dma == DMA_ADDR_INVALID) { in _nbu2ss_dma_map_single() [all …]
|
/drivers/staging/lustre/lustre/mdc/ |
D | mdc_reint.c | 106 struct ptlrpc_request *req; in mdc_setattr() local 119 req = ptlrpc_request_alloc(class_exp2cliimp(exp), in mdc_setattr() 121 if (!req) { in mdc_setattr() 126 req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT, in mdc_setattr() 128 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen); in mdc_setattr() 129 req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT, in mdc_setattr() 132 rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count); in mdc_setattr() 134 ptlrpc_request_free(req); in mdc_setattr() 144 mdc_setattr_pack(req, op_data, ea, ealen, ea2, ea2len); in mdc_setattr() 146 ptlrpc_request_set_replen(req); in mdc_setattr() [all …]
|
/drivers/crypto/marvell/ |
D | cipher.c | 43 struct ablkcipher_request *req) in mv_cesa_ablkcipher_req_iter_init() argument 45 mv_cesa_req_dma_iter_init(&iter->base, req->nbytes); in mv_cesa_ablkcipher_req_iter_init() 46 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); in mv_cesa_ablkcipher_req_iter_init() 47 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE); in mv_cesa_ablkcipher_req_iter_init() 60 mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req) in mv_cesa_ablkcipher_dma_cleanup() argument 62 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); in mv_cesa_ablkcipher_dma_cleanup() 64 if (req->dst != req->src) { in mv_cesa_ablkcipher_dma_cleanup() 65 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, in mv_cesa_ablkcipher_dma_cleanup() 67 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, in mv_cesa_ablkcipher_dma_cleanup() 70 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, in mv_cesa_ablkcipher_dma_cleanup() [all …]
|
D | hash.c | 27 struct ahash_request *req) in mv_cesa_ahash_req_iter_init() argument 29 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); in mv_cesa_ahash_req_iter_init() 30 unsigned int len = req->nbytes + creq->cache_ptr; in mv_cesa_ahash_req_iter_init() 36 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); in mv_cesa_ahash_req_iter_init() 49 mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags) in mv_cesa_ahash_dma_alloc_cache() argument 51 req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags, in mv_cesa_ahash_dma_alloc_cache() 52 &req->cache_dma); in mv_cesa_ahash_dma_alloc_cache() 53 if (!req->cache) in mv_cesa_ahash_dma_alloc_cache() 60 mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req) in mv_cesa_ahash_dma_free_cache() argument 62 if (!req->cache) in mv_cesa_ahash_dma_free_cache() [all …]
|