/drivers/infiniband/sw/siw/ |
D | siw_qp.c | 278 wqe->sqe.flags = 0; in siw_qp_mpa_rts() 279 wqe->sqe.num_sge = 1; in siw_qp_mpa_rts() 280 wqe->sqe.sge[0].length = 0; in siw_qp_mpa_rts() 281 wqe->sqe.sge[0].laddr = 0; in siw_qp_mpa_rts() 282 wqe->sqe.sge[0].lkey = 0; in siw_qp_mpa_rts() 287 wqe->sqe.rkey = 1; in siw_qp_mpa_rts() 288 wqe->sqe.raddr = 0; in siw_qp_mpa_rts() 292 wqe->sqe.opcode = SIW_OP_WRITE; in siw_qp_mpa_rts() 296 wqe->sqe.opcode = SIW_OP_READ; in siw_qp_mpa_rts() 303 siw_read_to_orq(rreq, &wqe->sqe); in siw_qp_mpa_rts() [all …]
|
D | siw_qp_tx.c | 43 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg() 46 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) in siw_try_1seg() 53 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg() 136 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); in siw_qp_prepare_tx() 138 cpu_to_be64(wqe->sqe.sge[0].laddr); in siw_qp_prepare_tx() 139 c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx() 140 c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->sqe.raddr); in siw_qp_prepare_tx() 141 c_tx->pkt.rreq.read_size = htonl(wqe->sqe.sge[0].length); in siw_qp_prepare_tx() 184 c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey); in siw_qp_prepare_tx() 196 c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx() [all …]
|
D | siw_verbs.c | 644 struct siw_sqe *sqe) in siw_copy_inline_sgl() argument 647 void *kbuf = &sqe->sge[1]; in siw_copy_inline_sgl() 650 sqe->sge[0].laddr = (uintptr_t)kbuf; in siw_copy_inline_sgl() 651 sqe->sge[0].lkey = 0; in siw_copy_inline_sgl() 669 sqe->sge[0].length = max(bytes, 0); in siw_copy_inline_sgl() 670 sqe->num_sge = bytes > 0 ? 1 : 0; in siw_copy_inline_sgl() 682 struct siw_sqe sqe = {}; in siw_sq_flush_wr() local 686 sqe.opcode = SIW_OP_WRITE; in siw_sq_flush_wr() 689 sqe.opcode = SIW_OP_READ; in siw_sq_flush_wr() 692 sqe.opcode = SIW_OP_READ_LOCAL_INV; in siw_sq_flush_wr() [all …]
|
D | siw.h | 193 struct siw_sqe sqe; member 478 #define tx_type(wqe) ((wqe)->sqe.opcode) 480 #define tx_flags(wqe) ((wqe)->sqe.flags) 525 void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe); 526 int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes, 630 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; in siw_sq_empty() local 632 return READ_ONCE(sqe->flags) == 0; in siw_sq_empty() 637 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; in sq_get_next() local 639 if (READ_ONCE(sqe->flags) & SIW_WQE_VALID) in sq_get_next() 640 return sqe; in sq_get_next()
|
D | siw_qp_rx.c | 176 srx->ddp_stag = wqe->sqe.sge[0].lkey; in siw_rresp_check_ntoh() 177 srx->ddp_to = wqe->sqe.sge[0].laddr; in siw_rresp_check_ntoh() 695 resp = &tx_work->sqe; in siw_init_rresp() 758 wqe->sqe.id = orqe->id; in siw_orqe_start_rx() 759 wqe->sqe.opcode = orqe->opcode; in siw_orqe_start_rx() 760 wqe->sqe.sge[0].laddr = orqe->sge[0].laddr; in siw_orqe_start_rx() 761 wqe->sqe.sge[0].lkey = orqe->sge[0].lkey; in siw_orqe_start_rx() 762 wqe->sqe.sge[0].length = orqe->sge[0].length; in siw_orqe_start_rx() 763 wqe->sqe.flags = orqe->flags; in siw_orqe_start_rx() 764 wqe->sqe.num_sge = 1; in siw_orqe_start_rx() [all …]
|
/drivers/crypto/hisilicon/zip/ |
D | zip_crypto.c | 101 void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req); 102 void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req); 103 void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type); 104 void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type); 105 void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req); 106 void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type); 107 u32 (*get_tag)(struct hisi_zip_sqe *sqe); 108 u32 (*get_status)(struct hisi_zip_sqe *sqe); 109 u32 (*get_dstlen)(struct hisi_zip_sqe *sqe); 263 static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) in hisi_zip_fill_addr() argument [all …]
|
/drivers/net/ethernet/qlogic/qed/ |
D | qed_nvmetcp_fw_funcs.c | 68 if (!task_params->sqe) in init_sqe() 71 memset(task_params->sqe, 0, sizeof(*task_params->sqe)); in init_sqe() 72 task_params->sqe->task_id = cpu_to_le16(task_params->itid); in init_sqe() 79 SET_FIELD(task_params->sqe->contlen_cdbsize, in init_sqe() 81 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe() 94 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, num_sges); in init_sqe() 95 SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, buf_size); in init_sqe() 99 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe() 101 SET_FIELD(task_params->sqe->contlen_cdbsize, in init_sqe() 106 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe() [all …]
|
/drivers/scsi/qedf/ |
D | drv_fcoe_fw_funcs.c | 13 memset(task_params->sqe, 0, sizeof(*(task_params->sqe))); in init_common_sqe() 14 SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE, in init_common_sqe() 16 task_params->sqe->task_id = task_params->itid; in init_common_sqe() 167 task_params->sqe->additional_info_union.burst_length = in init_initiator_midpath_unsolicited_fcoe_task() 169 SET_FIELD(task_params->sqe->flags, in init_initiator_midpath_unsolicited_fcoe_task() 171 SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE, in init_initiator_midpath_unsolicited_fcoe_task() 193 task_params->sqe->additional_info_union.seq_rec_updated_offset = in init_initiator_sequence_recovery_fcoe_task()
|
D | qedf_io.c | 588 struct fcoe_wqe *sqe) in qedf_init_task() argument 624 io_req->task_params->sqe = sqe; in qedf_init_task() 677 struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) in qedf_init_mp_task() argument 703 io_req->task_params->sqe = sqe; in qedf_init_mp_task() 854 struct fcoe_wqe *sqe; in qedf_post_io_req() local 901 sqe = &fcport->sq[sqe_idx]; in qedf_post_io_req() 902 memset(sqe, 0, sizeof(struct fcoe_wqe)); in qedf_post_io_req() 915 qedf_init_task(fcport, lport, io_req, task_ctx, sqe); in qedf_post_io_req() 1860 struct fcoe_wqe *sqe; in qedf_initiate_abts() local 1942 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_abts() [all …]
|
D | qedf_els.c | 23 struct fcoe_wqe *sqe; in qedf_initiate_els() local 120 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_els() 121 memset(sqe, 0, sizeof(struct fcoe_wqe)); in qedf_initiate_els() 125 qedf_init_mp_task(els_req, task, sqe); in qedf_initiate_els() 702 struct fcoe_wqe *sqe; in qedf_initiate_seq_cleanup() local 732 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_seq_cleanup() 733 memset(sqe, 0, sizeof(struct fcoe_wqe)); in qedf_initiate_seq_cleanup() 734 orig_io_req->task_params->sqe = sqe; in qedf_initiate_seq_cleanup()
|
D | drv_fcoe_fw_funcs.h | 16 struct fcoe_wqe *sqe; member
|
/drivers/scsi/qedi/ |
D | qedi_fw_api.c | 98 if (!task_params->sqe) in init_sqe() 101 memset(task_params->sqe, 0, sizeof(*task_params->sqe)); in init_sqe() 102 task_params->sqe->task_id = cpu_to_le16(task_params->itid); in init_sqe() 104 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, in init_sqe() 115 init_dif_context_flags(&task_params->sqe->prot_flags, in init_sqe() 118 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, in init_sqe() 134 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, in init_sqe() 136 SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN, in init_sqe() 141 SET_FIELD(task_params->sqe->contlen_cdbsize, in init_sqe() 147 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, in init_sqe() [all …]
|
D | qedi_fw.c | 1042 task_params.sqe = &ep->sq[sq_idx]; in qedi_send_iscsi_login() 1044 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); in qedi_send_iscsi_login() 1116 task_params.sqe = &ep->sq[sq_idx]; in qedi_send_iscsi_logout() 1117 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); in qedi_send_iscsi_logout() 1490 task_params.sqe = &ep->sq[sq_idx]; in send_iscsi_tmf() 1492 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); in send_iscsi_tmf() 1614 task_params.sqe = &ep->sq[sq_idx]; in qedi_send_iscsi_text() 1616 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); in qedi_send_iscsi_text() 1731 task_params.sqe = &ep->sq[sq_idx]; in qedi_send_iscsi_nopout() 1733 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); in qedi_send_iscsi_nopout() [all …]
|
/drivers/infiniband/hw/bnxt_re/ |
D | qplib_fp.c | 1857 struct sq_send_raweth_qp1_hdr *sqe = base_hdr; in bnxt_qplib_post_send() local 1861 sqe->wqe_type = wqe->type; in bnxt_qplib_post_send() 1862 sqe->flags = wqe->flags; in bnxt_qplib_post_send() 1863 sqe->wqe_size = wqe_sz; in bnxt_qplib_post_send() 1864 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action); in bnxt_qplib_post_send() 1865 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags); in bnxt_qplib_post_send() 1866 sqe->length = cpu_to_le32(data_len); in bnxt_qplib_post_send() 1878 struct sq_send_hdr *sqe = base_hdr; in bnxt_qplib_post_send() local 1880 sqe->wqe_type = wqe->type; in bnxt_qplib_post_send() 1881 sqe->flags = wqe->flags; in bnxt_qplib_post_send() [all …]
|
/drivers/infiniband/hw/cxgb4/ |
D | restrack.c | 96 struct t4_swsqe *sqe) in fill_swsqe() argument 100 if (rdma_nl_put_driver_u32(msg, "opcode", sqe->opcode)) in fill_swsqe() 102 if (rdma_nl_put_driver_u32(msg, "complete", sqe->complete)) in fill_swsqe() 104 if (sqe->complete && in fill_swsqe() 105 rdma_nl_put_driver_u32(msg, "cqe_status", CQE_STATUS(&sqe->cqe))) in fill_swsqe() 107 if (rdma_nl_put_driver_u32(msg, "signaled", sqe->signaled)) in fill_swsqe() 109 if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed)) in fill_swsqe()
|
/drivers/scsi/bnx2i/ |
D | bnx2i.h | 498 struct sqe { struct 634 struct sqe *sq_virt; 638 struct sqe *sq_prod_qe; 639 struct sqe *sq_cons_qe; 640 struct sqe *sq_first_qe; 641 struct sqe *sq_last_qe;
|
/drivers/nvme/host/ |
D | rdma.c | 65 struct nvme_rdma_qe sqe; member 290 kfree(req->sqe.data); in nvme_rdma_exit_request() 303 req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL); in nvme_rdma_init_request() 304 if (!req->sqe.data) in nvme_rdma_init_request() 314 nvme_req(rq)->cmd = req->sqe.data; in nvme_rdma_init_request() 1575 container_of(qe, struct nvme_rdma_request, sqe); in nvme_rdma_send_done() 1660 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; in nvme_rdma_submit_async_event() local 1661 struct nvme_command *cmd = sqe->data; in nvme_rdma_submit_async_event() 1665 ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); in nvme_rdma_submit_async_event() 1673 sqe->cqe.done = nvme_rdma_async_done; in nvme_rdma_submit_async_event() [all …]
|
D | fc.c | 1935 struct nvme_command *sqe = &op->cmd_iu.sqe; in nvme_fc_fcpio_done() local 2039 sqe->common.command_id != cqe->command_id)) { in nvme_fc_fcpio_done() 2049 sqe->common.command_id, in nvme_fc_fcpio_done() 2156 nvme_req(rq)->cmd = &op->op.cmd_iu.sqe; in nvme_fc_init_request() 2165 struct nvme_command *sqe; in nvme_fc_init_aen_ops() local 2179 sqe = &cmdiu->sqe; in nvme_fc_init_aen_ops() 2191 memset(sqe, 0, sizeof(*sqe)); in nvme_fc_init_aen_ops() 2192 sqe->common.opcode = nvme_admin_async_event; in nvme_fc_init_aen_ops() 2194 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i; in nvme_fc_init_aen_ops() 2571 struct nvme_command *sqe = &cmdiu->sqe; in nvme_fc_timeout() local [all …]
|
/drivers/dma/ |
D | hisi_dma.c | 141 struct hisi_dma_sqe sqe; member 492 desc->sqe.length = cpu_to_le32(len); in hisi_dma_prep_dma_memcpy() 493 desc->sqe.src_addr = cpu_to_le64(src); in hisi_dma_prep_dma_memcpy() 494 desc->sqe.dst_addr = cpu_to_le64(dst); in hisi_dma_prep_dma_memcpy() 508 struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail; in hisi_dma_start_transfer() local 522 memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe)); in hisi_dma_start_transfer() 525 sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M)); in hisi_dma_start_transfer() 526 sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN); in hisi_dma_start_transfer()
|
/drivers/crypto/hisilicon/hpre/ |
D | hpre_crypto.c | 60 typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe); 318 struct hpre_sqe *sqe = &req->req; in hpre_hw_data_clr_all() local 321 tmp = le64_to_cpu(sqe->in); in hpre_hw_data_clr_all() 332 tmp = le64_to_cpu(sqe->out); in hpre_hw_data_clr_all() 346 static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, in hpre_alg_res_post_hf() argument 358 id = (int)le16_to_cpu(sqe->tag); in hpre_alg_res_post_hf() 363 err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) & in hpre_alg_res_post_hf() 366 done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) & in hpre_alg_res_post_hf() 372 alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK; in hpre_alg_res_post_hf() 478 struct hpre_sqe *sqe = resp; in hpre_alg_cb() local [all …]
|
/drivers/crypto/hisilicon/ |
D | debugfs.c | 299 void *sqe, *sqe_curr; in qm_sq_dump() local 308 sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL); in qm_sq_dump() 309 if (!sqe) in qm_sq_dump() 313 memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth); in qm_sq_dump() 314 sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size); in qm_sq_dump() 320 kfree(sqe); in qm_sq_dump()
|
/drivers/scsi/lpfc/ |
D | lpfc_nvme.c | 1016 cid = cp->sqe.common.command_id; in lpfc_nvme_io_cmd_cmpl() 1085 cp->sqe.common.opcode, in lpfc_nvme_io_cmd_cmpl() 1086 cp->sqe.common.command_id, in lpfc_nvme_io_cmd_cmpl() 1098 cp->sqe.common.opcode, in lpfc_nvme_io_cmd_cmpl() 1099 cp->sqe.common.command_id, in lpfc_nvme_io_cmd_cmpl() 1212 struct nvme_common_command *sqe; in lpfc_nvme_prep_io_cmd() local 1272 sqe = &((struct nvme_fc_cmd_iu *) in lpfc_nvme_prep_io_cmd() 1273 nCmd->cmdaddr)->sqe.common; in lpfc_nvme_prep_io_cmd() 1274 if (sqe->opcode == nvme_admin_async_event) in lpfc_nvme_prep_io_cmd() 1539 struct nvme_common_command *sqe; in lpfc_nvme_fcp_io_submit() local [all …]
|
/drivers/net/ethernet/marvell/octeontx2/nic/ |
D | otx2_txrx.c | 800 struct sk_buff *skb, int sqe, int hdr_len) in otx2_dma_map_tso_skb() argument 803 struct sg_list *sg = &sq->sg[sqe]; in otx2_dma_map_tso_skb() 833 u64 seg_addr, int hdr_len, int sqe) in otx2_tso_frag_dma_addr() argument 835 struct sg_list *sg = &sq->sg[sqe]; in otx2_tso_frag_dma_addr() 1325 int sq_idx, sqe; in otx2_free_pending_sqe() local 1329 for (sqe = 0; sqe < sq->sqe_cnt; sqe++) { in otx2_free_pending_sqe() 1330 sg = &sq->sg[sqe]; in otx2_free_pending_sqe()
|
/drivers/block/ |
D | ublk_drv.c | 1827 const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe); in ublk_ch_uring_cmd() 2148 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); in ublk_ctrl_start_dev() 2219 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); in ublk_ctrl_get_queue_affinity() 2270 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); in ublk_ctrl_add_dev() 2456 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); in ublk_ctrl_cmd_dump() 2475 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); in ublk_ctrl_get_dev_info() 2506 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); in ublk_ctrl_get_params() 2537 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); in ublk_ctrl_set_params() 2599 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); in ublk_ctrl_start_recovery() 2641 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); in ublk_ctrl_end_recovery() [all …]
|
/drivers/nvme/target/ |
D | fcloop.c | 585 struct nvme_command *sqe = &cmdiu->sqe; in check_for_drop() local 592 __func__, sqe->common.opcode, sqe->fabrics.fctype, in check_for_drop() 597 (sqe->common.opcode != nvme_fabrics_command || in check_for_drop() 598 sqe->fabrics.fctype != drop_opcode)) || in check_for_drop() 599 (!drop_fabric_opcode && sqe->common.opcode != drop_opcode)) in check_for_drop()
|