/kernel/linux/linux-5.10/io_uring/ |
D | io-wq.c | 47 struct io_wqe *wqe; member 138 static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index); 140 static bool io_acct_cancel_pending_work(struct io_wqe *wqe, 157 static inline struct io_wqe_acct *io_get_acct(struct io_wqe *wqe, bool bound) in io_get_acct() argument 159 return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND]; in io_get_acct() 162 static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe, in io_work_get_acct() argument 165 return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND)); in io_work_get_acct() 170 return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND); in io_wqe_get_acct() 182 struct io_wqe *wqe = worker->wqe; in io_worker_cancel_cb() local 183 struct io_wq *wq = wqe->wq; in io_worker_cancel_cb() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/sw/rxe/ |
D | rxe_req.c | 14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 18 struct rxe_send_wqe *wqe, in retry_first_write_send() argument 24 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send() 25 qp->mtu : wqe->dma.resid; in retry_first_write_send() 27 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send() 28 wqe->wr.opcode); in retry_first_write_send() 30 if (wqe->wr.send_flags & IB_SEND_INLINE) { in retry_first_write_send() 31 wqe->dma.resid -= to_send; in retry_first_write_send() 32 wqe->dma.sge_offset += to_send; in retry_first_write_send() 34 advance_dma_data(&wqe->dma, to_send); in retry_first_write_send() [all …]
|
D | rxe_comp.c | 139 struct rxe_send_wqe *wqe; in get_wqe() local 144 wqe = queue_head(qp->sq.queue); in get_wqe() 145 *wqe_p = wqe; in get_wqe() 148 if (!wqe || wqe->state == wqe_state_posted) in get_wqe() 152 if (wqe->state == wqe_state_done) in get_wqe() 156 if (wqe->state == wqe_state_error) in get_wqe() 172 struct rxe_send_wqe *wqe) in check_psn() argument 179 diff = psn_compare(pkt->psn, wqe->last_psn); in check_psn() 181 if (wqe->state == wqe_state_pending) { in check_psn() 182 if (wqe->mask & WR_ATOMIC_OR_READ_MASK) in check_psn() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/sw/siw/ |
D | siw_qp_tx.c | 42 struct siw_wqe *wqe = &c_tx->wqe_active; in siw_try_1seg() local 43 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg() 46 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) in siw_try_1seg() 52 if (tx_flags(wqe) & SIW_WQE_INLINE) { in siw_try_1seg() 53 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg() 55 struct siw_mem *mem = wqe->mem[0]; in siw_try_1seg() 121 struct siw_wqe *wqe = &c_tx->wqe_active; in siw_qp_prepare_tx() local 125 switch (tx_type(wqe)) { in siw_qp_prepare_tx() 137 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); in siw_qp_prepare_tx() 139 cpu_to_be64(wqe->sqe.sge[0].laddr); in siw_qp_prepare_tx() [all …]
|
D | siw_qp.c | 262 struct siw_wqe *wqe = tx_wqe(qp); in siw_qp_mpa_rts() local 268 if (unlikely(wqe->wr_status != SIW_WR_IDLE)) { in siw_qp_mpa_rts() 272 memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); in siw_qp_mpa_rts() 274 wqe->wr_status = SIW_WR_QUEUED; in siw_qp_mpa_rts() 275 wqe->sqe.flags = 0; in siw_qp_mpa_rts() 276 wqe->sqe.num_sge = 1; in siw_qp_mpa_rts() 277 wqe->sqe.sge[0].length = 0; in siw_qp_mpa_rts() 278 wqe->sqe.sge[0].laddr = 0; in siw_qp_mpa_rts() 279 wqe->sqe.sge[0].lkey = 0; in siw_qp_mpa_rts() 284 wqe->sqe.rkey = 1; in siw_qp_mpa_rts() [all …]
|
D | siw_qp_rx.c | 169 struct siw_wqe *wqe = &frx->wqe_active; in siw_rresp_check_ntoh() local 176 srx->ddp_stag = wqe->sqe.sge[0].lkey; in siw_rresp_check_ntoh() 177 srx->ddp_to = wqe->sqe.sge[0].laddr; in siw_rresp_check_ntoh() 204 (wqe->processed + srx->fpdu_part_rem != wqe->bytes))) { in siw_rresp_check_ntoh() 207 wqe->processed + srx->fpdu_part_rem, wqe->bytes); in siw_rresp_check_ntoh() 281 struct siw_wqe *wqe = &frx->wqe_active; in siw_send_check_ntoh() local 301 if (unlikely(ddp_mo != wqe->processed)) { in siw_send_check_ntoh() 303 qp_id(rx_qp(srx)), ddp_mo, wqe->processed); in siw_send_check_ntoh() 316 if (unlikely(wqe->bytes < wqe->processed + srx->fpdu_part_rem)) { in siw_send_check_ntoh() 318 wqe->bytes, wqe->processed, srx->fpdu_part_rem); in siw_send_check_ntoh() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/sw/rdmavt/ |
D | trace_tx.h | 91 TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, int wr_num_sge), 92 TP_ARGS(qp, wqe, wr_num_sge), 96 __field(struct rvt_swqe *, wqe) 115 __entry->wqe = wqe; 116 __entry->wr_id = wqe->wr.wr_id; 119 __entry->psn = wqe->psn; 120 __entry->lpsn = wqe->lpsn; 121 __entry->length = wqe->length; 122 __entry->opcode = wqe->wr.opcode; 128 __entry->ssn = wqe->ssn; [all …]
|
D | qp.c | 633 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); in rvt_clear_mr_refs() local 635 rvt_put_qp_swqe(qp, wqe); in rvt_clear_mr_refs() 663 static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey) in rvt_swqe_has_lkey() argument 667 for (i = 0; i < wqe->wr.num_sge; i++) { in rvt_swqe_has_lkey() 668 struct rvt_sge *sge = &wqe->sg_list[i]; in rvt_swqe_has_lkey() 686 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last); in rvt_qp_sends_has_lkey() local 688 if (rvt_swqe_has_lkey(wqe, lkey)) in rvt_qp_sends_has_lkey() 1018 struct rvt_swqe *wqe; in free_ud_wq_attr() local 1022 wqe = rvt_get_swqe_ptr(qp, i); in free_ud_wq_attr() 1023 kfree(wqe->ud_wr.attr); in free_ud_wq_attr() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/ |
D | rc.c | 435 struct rvt_swqe *wqe; in hfi1_make_rc_req() local 490 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_rc_req() 491 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in hfi1_make_rc_req() 510 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in hfi1_make_rc_req() 536 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in hfi1_make_rc_req() 538 (wqe->wr.opcode != IB_WR_TID_RDMA_READ || in hfi1_make_rc_req() 547 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_rc_req() 548 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_rc_req() 558 if (!(wqe->wr.send_flags & in hfi1_make_rc_req() 562 wqe->wr.ex.invalidate_rkey); in hfi1_make_rc_req() [all …]
|
D | uc.c | 67 struct rvt_swqe *wqe; in hfi1_make_uc_req() local 90 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_uc_req() 91 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in hfi1_make_uc_req() 113 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in hfi1_make_uc_req() 129 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_uc_req() 130 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_uc_req() 138 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { in hfi1_make_uc_req() 140 qp, wqe->wr.ex.invalidate_rkey); in hfi1_make_uc_req() 143 rvt_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR in hfi1_make_uc_req() 152 qp->s_psn = wqe->psn; in hfi1_make_uc_req() [all …]
|
D | tid_rdma.h | 214 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe); 221 static inline void trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) in trdma_clean_swqe() argument 223 if (!wqe->priv) in trdma_clean_swqe() 225 __trdma_clean_swqe(qp, wqe); in trdma_clean_swqe() 244 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, 247 u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe, 258 void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe, 261 bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe); 263 void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe); 265 struct rvt_swqe *wqe) in hfi1_setup_tid_rdma_wqe() argument [all …]
|
D | tid_rdma.c | 377 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i); in hfi1_qp_priv_init() local 385 priv->tid_req.e.swqe = wqe; in hfi1_qp_priv_init() 386 wqe->priv = priv; in hfi1_qp_priv_init() 415 struct rvt_swqe *wqe; in hfi1_qp_priv_tid_free() local 420 wqe = rvt_get_swqe_ptr(qp, i); in hfi1_qp_priv_tid_free() 421 kfree(wqe->priv); in hfi1_qp_priv_tid_free() 422 wqe->priv = NULL; in hfi1_qp_priv_tid_free() 1614 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) in __trdma_clean_swqe() argument 1616 struct hfi1_swqe_priv *p = wqe->priv; in __trdma_clean_swqe() 1696 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, in hfi1_build_tid_rdma_read_packet() argument [all …]
|
D | ud.c | 266 static void hfi1_make_bth_deth(struct rvt_qp *qp, struct rvt_swqe *wqe, in hfi1_make_bth_deth() argument 274 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { in hfi1_make_bth_deth() 275 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data; in hfi1_make_bth_deth() 281 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_bth_deth() 285 *pkey = hfi1_get_pkey(ibp, rvt_get_swqe_pkey_index(wqe)); in hfi1_make_bth_deth() 291 ohdr->bth[1] = cpu_to_be32(rvt_get_swqe_remote_qpn(wqe)); in hfi1_make_bth_deth() 292 ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn)); in hfi1_make_bth_deth() 298 cpu_to_be32((int)rvt_get_swqe_remote_qkey(wqe) < 0 ? qp->qkey : in hfi1_make_bth_deth() 299 rvt_get_swqe_remote_qkey(wqe)); in hfi1_make_bth_deth() 304 struct rvt_swqe *wqe) in hfi1_make_ud_req_9B() argument [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/i40iw/ |
D | i40iw_ctrl.c | 51 void i40iw_insert_wqe_hdr(u64 *wqe, u64 header) in i40iw_insert_wqe_hdr() argument 54 set_64bit_val(wqe, 24, header); in i40iw_insert_wqe_hdr() 607 u64 *wqe = NULL; in i40iw_sc_cqp_get_next_send_wqe_idx() local 627 wqe = cqp->sq_base[*wqe_idx].elem; in i40iw_sc_cqp_get_next_send_wqe_idx() 629 I40IW_CQP_INIT_WQE(wqe); in i40iw_sc_cqp_get_next_send_wqe_idx() 631 return wqe; in i40iw_sc_cqp_get_next_send_wqe_idx() 835 u64 *wqe; in i40iw_sc_manage_push_page() local 841 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); in i40iw_sc_manage_push_page() 842 if (!wqe) in i40iw_sc_manage_push_page() 845 set_64bit_val(wqe, 16, info->qs_handle); in i40iw_sc_manage_push_page() [all …]
|
D | i40iw_uk.c | 49 u64 header, *wqe; in i40iw_nop_1() local 58 wqe = qp->sq_base[wqe_idx].elem; in i40iw_nop_1() 69 set_64bit_val(wqe, 0, 0); in i40iw_nop_1() 70 set_64bit_val(wqe, 8, 0); in i40iw_nop_1() 71 set_64bit_val(wqe, 16, 0); in i40iw_nop_1() 79 set_64bit_val(wqe, 24, header); in i40iw_nop_1() 141 u64 *wqe = NULL; in i40iw_qp_get_next_send_wqe() local 183 wqe = qp->sq_base[*wqe_idx].elem; in i40iw_qp_get_next_send_wqe() 196 return wqe; in i40iw_qp_get_next_send_wqe() 205 static void i40iw_set_fragment(u64 *wqe, u32 offset, struct i40iw_sge *sge) in i40iw_set_fragment() argument [all …]
|
D | i40iw_vf.c | 56 u64 *wqe; in i40iw_manage_vf_pble_bp() local 59 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); in i40iw_manage_vf_pble_bp() 60 if (!wqe) in i40iw_manage_vf_pble_bp() 66 set_64bit_val(wqe, 16, temp); in i40iw_manage_vf_pble_bp() 71 set_64bit_val(wqe, 24, header); in i40iw_manage_vf_pble_bp() 74 set_64bit_val(wqe, 32, pd_pl_pba); in i40iw_manage_vf_pble_bp() 76 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE VF_PBLE_BP WQE", wqe, I40IW_CQP_WQE_SIZE * 8); in i40iw_manage_vf_pble_bp()
|
/kernel/linux/linux-5.10/drivers/scsi/lpfc/ |
D | lpfc_nvme.c | 73 union lpfc_wqe128 *wqe; in lpfc_nvme_cmd_template() local 76 wqe = &lpfc_iread_cmd_template; in lpfc_nvme_cmd_template() 77 memset(wqe, 0, sizeof(union lpfc_wqe128)); in lpfc_nvme_cmd_template() 90 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE); in lpfc_nvme_cmd_template() 91 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK); in lpfc_nvme_cmd_template() 92 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3); in lpfc_nvme_cmd_template() 93 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI); in lpfc_nvme_cmd_template() 100 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0); in lpfc_nvme_cmd_template() 101 bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1); in lpfc_nvme_cmd_template() 102 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); in lpfc_nvme_cmd_template() [all …]
|
D | lpfc_nvmet.c | 80 union lpfc_wqe128 *wqe; in lpfc_nvmet_cmd_template() local 83 wqe = &lpfc_tsend_cmd_template; in lpfc_nvmet_cmd_template() 84 memset(wqe, 0, sizeof(union lpfc_wqe128)); in lpfc_nvmet_cmd_template() 97 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); in lpfc_nvmet_cmd_template() 98 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF); in lpfc_nvmet_cmd_template() 99 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3); in lpfc_nvmet_cmd_template() 100 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI); in lpfc_nvmet_cmd_template() 101 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); in lpfc_nvmet_cmd_template() 108 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); in lpfc_nvmet_cmd_template() 109 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); in lpfc_nvmet_cmd_template() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/qib/ |
D | qib_rc.c | 42 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, in restart_sge() argument 47 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu; in restart_sge() 48 return rvt_restart_sge(ss, wqe, len); in restart_sge() 221 struct rvt_swqe *wqe; in qib_make_rc_req() local 251 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_rc_req() 252 rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in qib_make_rc_req() 275 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in qib_make_rc_req() 296 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in qib_make_rc_req() 302 qp->s_psn = wqe->psn; in qib_make_rc_req() 309 len = wqe->length; in qib_make_rc_req() [all …]
|
D | qib_uc.c | 52 struct rvt_swqe *wqe; in qib_make_uc_req() local 70 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_uc_req() 71 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_uc_req() 84 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in qib_make_uc_req() 97 qp->s_psn = wqe->psn; in qib_make_uc_req() 98 qp->s_sge.sge = wqe->sg_list[0]; in qib_make_uc_req() 99 qp->s_sge.sg_list = wqe->sg_list + 1; in qib_make_uc_req() 100 qp->s_sge.num_sge = wqe->wr.num_sge; in qib_make_uc_req() 101 qp->s_sge.total_len = wqe->length; in qib_make_uc_req() 102 len = wqe->length; in qib_make_uc_req() [all …]
|
D | qib_ud.c | 237 struct rvt_swqe *wqe; in qib_make_ud_req() local 257 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_ud_req() 258 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_ud_req() 266 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in qib_make_ud_req() 274 ah_attr = rvt_get_swqe_ah_attr(wqe); in qib_make_ud_req() 299 qib_ud_loopback(qp, wqe); in qib_make_ud_req() 302 rvt_send_complete(qp, wqe, IB_WC_SUCCESS); in qib_make_ud_req() 308 extra_bytes = -wqe->length & 3; in qib_make_ud_req() 309 nwords = (wqe->length + extra_bytes) >> 2; in qib_make_ud_req() 313 qp->s_cur_size = wqe->length; in qib_make_ud_req() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/cxgb4/ |
D | qp.c | 489 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, in build_rdma_send() argument 501 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 504 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 506 wqe->send.stag_inv = 0; in build_rdma_send() 510 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 513 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 515 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send() 521 wqe->send.r3 = 0; in build_rdma_send() 522 wqe->send.r4 = 0; in build_rdma_send() 527 ret = build_immd(sq, wqe->send.u.immd_src, wr, in build_rdma_send() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
D | ktls_txrx.c | 55 mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, in mlx5e_ktls_build_static_params() argument 61 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; in mlx5e_ktls_build_static_params() 62 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; in mlx5e_ktls_build_static_params() 67 #define STATIC_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) in mlx5e_ktls_build_static_params() 78 fill_static_params(&wqe->params, info, key_id, resync_tcp_sn); in mlx5e_ktls_build_static_params() 98 mlx5e_ktls_build_progress_params(struct mlx5e_set_tls_progress_params_wqe *wqe, in mlx5e_ktls_build_progress_params() argument 104 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; in mlx5e_ktls_build_progress_params() 109 #define PROGRESS_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) in mlx5e_ktls_build_progress_params() 117 fill_progress_params(&wqe->params, tis_tir_num, next_record_tcp_sn); in mlx5e_ktls_build_progress_params()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/bnxt_re/ |
D | ib_verbs.c | 388 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; in bnxt_re_create_fence_wqe() local 390 memset(wqe, 0, sizeof(*wqe)); in bnxt_re_create_fence_wqe() 391 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; in bnxt_re_create_fence_wqe() 392 wqe->wr_id = BNXT_QPLIB_FENCE_WRID; in bnxt_re_create_fence_wqe() 393 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; in bnxt_re_create_fence_wqe() 394 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; in bnxt_re_create_fence_wqe() 395 wqe->bind.zero_based = false; in bnxt_re_create_fence_wqe() 396 wqe->bind.parent_l_key = ib_mr->lkey; in bnxt_re_create_fence_wqe() 397 wqe->bind.va = (u64)(unsigned long)fence->va; in bnxt_re_create_fence_wqe() 398 wqe->bind.length = fence->size; in bnxt_re_create_fence_wqe() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mthca/ |
D | mthca_srq.c | 92 static inline int *wqe_to_link(void *wqe) in wqe_to_link() argument 94 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); in wqe_to_link() 158 void *wqe; in mthca_alloc_srq_buf() local 185 next = wqe = get_wqe(srq, i); in mthca_alloc_srq_buf() 188 *wqe_to_link(wqe) = i + 1; in mthca_alloc_srq_buf() 191 *wqe_to_link(wqe) = -1; in mthca_alloc_srq_buf() 195 for (scatter = wqe + sizeof (struct mthca_next_seg); in mthca_alloc_srq_buf() 196 (void *) scatter < wqe + (1 << srq->wqe_shift); in mthca_alloc_srq_buf() 495 void *wqe; in mthca_tavor_post_srq_recv() local 504 wqe = get_wqe(srq, ind); in mthca_tavor_post_srq_recv() [all …]
|