Home
last modified time | relevance | path

Searched refs:io_req (Results 1 – 20 of 20) sorted by relevance

/drivers/scsi/bnx2fc/
Dbnx2fc_io.c18 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
20 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
21 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
22 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
23 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
24 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
28 void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, in bnx2fc_cmd_timer_set() argument
31 struct bnx2fc_interface *interface = io_req->port->priv; in bnx2fc_cmd_timer_set()
34 &io_req->timeout_work, in bnx2fc_cmd_timer_set()
36 kref_get(&io_req->refcount); in bnx2fc_cmd_timer_set()
[all …]
Dbnx2fc_tgt.c168 struct bnx2fc_cmd *io_req; in bnx2fc_flush_active_ios() local
178 list_for_each_entry_safe(io_req, tmp, &tgt->active_cmd_queue, link) { in bnx2fc_flush_active_ios()
180 list_del_init(&io_req->link); in bnx2fc_flush_active_ios()
181 io_req->on_active_queue = 0; in bnx2fc_flush_active_ios()
182 BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n"); in bnx2fc_flush_active_ios()
184 if (cancel_delayed_work(&io_req->timeout_work)) { in bnx2fc_flush_active_ios()
186 &io_req->req_flags)) { in bnx2fc_flush_active_ios()
188 BNX2FC_IO_DBG(io_req, "eh_abort for IO " in bnx2fc_flush_active_ios()
190 complete(&io_req->tm_done); in bnx2fc_flush_active_ios()
192 kref_put(&io_req->refcount, in bnx2fc_flush_active_ios()
[all …]
Dbnx2fc_debug.c16 void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...) in BNX2FC_IO_DBG() argument
29 if (io_req && io_req->port && io_req->port->lport && in BNX2FC_IO_DBG()
30 io_req->port->lport->host) in BNX2FC_IO_DBG()
31 shost_printk(KERN_INFO, io_req->port->lport->host, in BNX2FC_IO_DBG()
33 io_req->xid, &vaf); in BNX2FC_IO_DBG()
Dbnx2fc_hwi.c634 struct bnx2fc_cmd *io_req = NULL; in bnx2fc_process_unsol_compl() local
717 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; in bnx2fc_process_unsol_compl()
718 if (!io_req) in bnx2fc_process_unsol_compl()
721 if (io_req->cmd_type != BNX2FC_SCSI_CMD) { in bnx2fc_process_unsol_compl()
727 &io_req->req_flags)) { in bnx2fc_process_unsol_compl()
728 BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in " in bnx2fc_process_unsol_compl()
750 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { in bnx2fc_process_unsol_compl()
767 memcpy(&io_req->err_entry, err_entry, in bnx2fc_process_unsol_compl()
770 &io_req->req_flags)) { in bnx2fc_process_unsol_compl()
772 rc = bnx2fc_send_rec(io_req); in bnx2fc_process_unsol_compl()
[all …]
Dbnx2fc.h406 struct bnx2fc_cmd *io_req; member
470 struct bnx2fc_cmd *io_req; member
521 int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req);
522 int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req);
523 void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
525 int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
526 void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
533 void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
535 void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
546 void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
[all …]
Dbnx2fc_debug.h40 void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...);
Dbnx2fc_els.c34 rrq_req = cb_arg->io_req; in bnx2fc_rrq_compl()
130 els_req = cb_arg->io_req; in bnx2fc_l2_els_compl()
271 srr_req = cb_arg->io_req; in bnx2fc_srr_compl()
391 rec_req = cb_arg->io_req; in bnx2fc_rec_compl()
707 cb_arg->io_req = els_req; in bnx2fc_initiate_els()
/drivers/scsi/fnic/
Dfnic_scsi.c125 struct fnic_io_req *io_req, in fnic_release_ioreq_buf() argument
128 if (io_req->sgl_list_pa) in fnic_release_ioreq_buf()
129 pci_unmap_single(fnic->pdev, io_req->sgl_list_pa, in fnic_release_ioreq_buf()
130 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt, in fnic_release_ioreq_buf()
134 if (io_req->sgl_cnt) in fnic_release_ioreq_buf()
135 mempool_free(io_req->sgl_list_alloc, in fnic_release_ioreq_buf()
136 fnic->io_sgl_pool[io_req->sgl_type]); in fnic_release_ioreq_buf()
137 if (io_req->sense_buf_pa) in fnic_release_ioreq_buf()
138 pci_unmap_single(fnic->pdev, io_req->sense_buf_pa, in fnic_release_ioreq_buf()
319 struct fnic_io_req *io_req, in fnic_queue_wq_copy_desc() argument
[all …]
/drivers/md/
Ddm-io.c465 static int dp_init(struct dm_io_request *io_req, struct dpages *dp, in dp_init() argument
473 switch (io_req->mem.type) { in dp_init()
475 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); in dp_init()
479 bio_dp_init(dp, io_req->mem.ptr.bio); in dp_init()
483 flush_kernel_vmap_range(io_req->mem.ptr.vma, size); in dp_init()
484 if ((io_req->bi_rw & RW_MASK) == READ) { in dp_init()
485 dp->vma_invalidate_address = io_req->mem.ptr.vma; in dp_init()
488 vm_dp_init(dp, io_req->mem.ptr.vma); in dp_init()
492 km_dp_init(dp, io_req->mem.ptr.addr); in dp_init()
510 int dm_io(struct dm_io_request *io_req, unsigned num_regions, in dm_io() argument
[all …]
Ddm-log.c240 struct dm_io_request io_req; member
296 lc->io_req.bi_rw = rw; in rw_header()
298 return dm_io(&lc->io_req, 1, &lc->header_location, NULL); in rw_header()
309 lc->io_req.bi_rw = WRITE_FLUSH; in flush_header()
311 return dm_io(&lc->io_req, 1, &null_location, NULL); in flush_header()
457 lc->io_req.mem.type = DM_IO_VMA; in create_log_context()
458 lc->io_req.notify.fn = NULL; in create_log_context()
459 lc->io_req.client = dm_io_client_create(); in create_log_context()
460 if (IS_ERR(lc->io_req.client)) { in create_log_context()
461 r = PTR_ERR(lc->io_req.client); in create_log_context()
[all …]
Ddm-snap-persistent.c214 struct dm_io_request *io_req; member
223 req->result = dm_io(req->io_req, 1, req->where, NULL); in do_metadata()
237 struct dm_io_request io_req = { in chunk_io() local
247 return dm_io(&io_req, 1, &where, NULL); in chunk_io()
250 req.io_req = &io_req; in chunk_io()
Ddm-raid1.c262 struct dm_io_request io_req = { in mirror_flush() local
276 dm_io(&io_req, ms->nr_mirrors, io, &error_bits); in mirror_flush()
543 struct dm_io_request io_req = { in read_async_bio() local
554 BUG_ON(dm_io(&io_req, 1, &io, NULL)); in read_async_bio()
656 struct dm_io_request io_req = { in do_write() local
666 io_req.bi_rw |= REQ_DISCARD; in do_write()
667 io_req.mem.type = DM_IO_KMEM; in do_write()
668 io_req.mem.ptr.addr = NULL; in do_write()
680 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL)); in do_write()
Ddm-bufio.c556 struct dm_io_request io_req = { in use_dmio() local
569 io_req.mem.type = DM_IO_KMEM; in use_dmio()
570 io_req.mem.ptr.addr = b->data; in use_dmio()
572 io_req.mem.type = DM_IO_VMA; in use_dmio()
573 io_req.mem.ptr.vma = b->data; in use_dmio()
578 r = dm_io(&io_req, 1, &region, NULL); in use_dmio()
1317 struct dm_io_request io_req = { in dm_bufio_issue_flush() local
1331 return dm_io(&io_req, 1, &io_reg, NULL); in dm_bufio_issue_flush()
Ddm-kcopyd.c502 struct dm_io_request io_req = { in run_io_job() local
515 r = dm_io(&io_req, 1, &job->source, NULL); in run_io_job()
517 r = dm_io(&io_req, job->num_dests, job->dests, NULL); in run_io_job()
/drivers/scsi/csiostor/
Dcsio_lnode.c1428 struct csio_ioreq *io_req = NULL; in csio_ln_mgmt_wr_handler() local
1441 io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie); in csio_ln_mgmt_wr_handler()
1442 io_req->wr_status = csio_wr_status(wr_cmd); in csio_ln_mgmt_wr_handler()
1446 if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) { in csio_ln_mgmt_wr_handler()
1449 io_req); in csio_ln_mgmt_wr_handler()
1458 list_del_init(&io_req->sm.sm_list); in csio_ln_mgmt_wr_handler()
1463 if (io_req->io_cbfn) in csio_ln_mgmt_wr_handler()
1464 io_req->io_cbfn(hw, io_req); in csio_ln_mgmt_wr_handler()
1673 csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len, in csio_ln_prep_ecwr() argument
1690 wr->cookie = io_req->fw_handle; in csio_ln_prep_ecwr()
[all …]
Dcsio_hw.c3625 csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req) in csio_mgmt_req_lookup() argument
3631 if (io_req == (struct csio_ioreq *)tmp) in csio_mgmt_req_lookup()
3650 struct csio_ioreq *io_req; in csio_mgmt_tmo_handler() local
3657 io_req = (struct csio_ioreq *) tmp; in csio_mgmt_tmo_handler()
3658 io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO); in csio_mgmt_tmo_handler()
3660 if (!io_req->tmo) { in csio_mgmt_tmo_handler()
3663 list_del_init(&io_req->sm.sm_list); in csio_mgmt_tmo_handler()
3664 if (io_req->io_cbfn) { in csio_mgmt_tmo_handler()
3666 io_req->wr_status = -ETIMEDOUT; in csio_mgmt_tmo_handler()
3667 io_req->io_cbfn(mgmtm->hw, io_req); in csio_mgmt_tmo_handler()
[all …]
/drivers/staging/rtl8712/
Drtl871x_io.c113 struct io_req *pio_req; in r8712_alloc_io_queue()
123 (sizeof(struct io_req)) + 4, in r8712_alloc_io_queue()
128 (NUM_IOREQ * (sizeof(struct io_req)) + 4)); in r8712_alloc_io_queue()
132 pio_req = (struct io_req *)(pio_queue->free_ioreqs_buf); in r8712_alloc_io_queue()
Drtl871x_io.h113 struct io_req { struct
121 struct io_req *pio_req, u8 *cnxt); argument
/drivers/staging/rtl8723au/include/
Drtw_io.h101 struct io_req { struct
110 void (*_async_io_callback)(struct rtw_adapter *padater, struct io_req *pio_req, u8 *cnxt); argument
/drivers/scsi/megaraid/
Dmegaraid_sas_fusion.c2330 struct MPI2_RAID_SCSI_IO_REQUEST *io_req; in build_mpt_mfi_pass_thru() local
2352 io_req = cmd->io_request; in build_mpt_mfi_pass_thru()
2356 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL; in build_mpt_mfi_pass_thru()
2362 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; in build_mpt_mfi_pass_thru()
2364 io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; in build_mpt_mfi_pass_thru()
2365 io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, in build_mpt_mfi_pass_thru()
2367 io_req->ChainOffset = fusion->chain_offset_mfi_pthru; in build_mpt_mfi_pass_thru()