Home
last modified time | relevance | path

Searched refs:wr (Results 1 – 25 of 145) sorted by relevance

123456

/drivers/media/dvb-frontends/
Ddib3000mb.c148 wr(DIB3000MB_REG_LOCK1_MASK, DIB3000MB_LOCK1_SEARCH_4); in dib3000mb_set_frontend()
153 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_2K); in dib3000mb_set_frontend()
157 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_8K); in dib3000mb_set_frontend()
169 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_32); in dib3000mb_set_frontend()
173 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_16); in dib3000mb_set_frontend()
177 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_8); in dib3000mb_set_frontend()
181 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_4); in dib3000mb_set_frontend()
193 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_OFF); in dib3000mb_set_frontend()
200 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_ON); in dib3000mb_set_frontend()
209 wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_QPSK); in dib3000mb_set_frontend()
[all …]
/drivers/infiniband/hw/mlx5/
Dwr.c104 static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp, in set_eth_seg() argument
111 if (wr->send_flags & IB_SEND_IP_CSUM) in set_eth_seg()
115 if (wr->opcode == IB_WR_LSO) { in set_eth_seg()
116 struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr); in set_eth_seg()
153 const struct ib_send_wr *wr) in set_datagram_seg() argument
155 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av)); in set_datagram_seg()
157 cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV); in set_datagram_seg()
158 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey); in set_datagram_seg()
321 const struct ib_send_wr *wr) in set_reg_umr_segment() argument
323 const struct mlx5_umr_wr *umrwr = umr_wr(wr); in set_reg_umr_segment()
[all …]
Dwr.h44 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
46 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
50 const struct ib_send_wr *wr, in mlx5_ib_post_send_nodrain() argument
53 return mlx5_ib_post_send(ibqp, wr, bad_wr, false); in mlx5_ib_post_send_nodrain()
57 const struct ib_send_wr *wr, in mlx5_ib_post_send_drain() argument
60 return mlx5_ib_post_send(ibqp, wr, bad_wr, true); in mlx5_ib_post_send_drain()
64 const struct ib_recv_wr *wr, in mlx5_ib_post_recv_nodrain() argument
67 return mlx5_ib_post_recv(ibqp, wr, bad_wr, false); in mlx5_ib_post_recv_nodrain()
71 const struct ib_recv_wr *wr, in mlx5_ib_post_recv_drain() argument
74 return mlx5_ib_post_recv(ibqp, wr, bad_wr, true); in mlx5_ib_post_recv_drain()
Dgsi.c51 struct mlx5_ib_gsi_wr *wr; in generate_completions() local
56 wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr]; in generate_completions()
58 if (!wr->completed) in generate_completions()
61 WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc)); in generate_completions()
62 wr->completed = false; in generate_completions()
71 struct mlx5_ib_gsi_wr *wr = in handle_single_completion() local
78 wr->completed = true; in handle_single_completion()
79 wr_id = wr->wc.wr_id; in handle_single_completion()
80 wr->wc = *wc; in handle_single_completion()
81 wr->wc.wr_id = wr_id; in handle_single_completion()
[all …]
/drivers/infiniband/core/
Drw.c80 reg->inv_wr.next = &reg->reg_wr.wr; in rdma_rw_inv_key()
111 reg->reg_wr.wr.opcode = IB_WR_REG_MR; in rdma_rw_init_one_mr()
151 prev->wr.wr.next = &reg->inv_wr; in rdma_rw_init_mr_wrs()
153 prev->wr.wr.next = &reg->reg_wr.wr; in rdma_rw_init_mr_wrs()
156 reg->reg_wr.wr.next = &reg->wr.wr; in rdma_rw_init_mr_wrs()
158 reg->wr.wr.sg_list = &reg->sge; in rdma_rw_init_mr_wrs()
159 reg->wr.wr.num_sge = 1; in rdma_rw_init_mr_wrs()
160 reg->wr.remote_addr = remote_addr; in rdma_rw_init_mr_wrs()
161 reg->wr.rkey = rkey; in rdma_rw_init_mr_wrs()
163 reg->wr.wr.opcode = IB_WR_RDMA_WRITE; in rdma_rw_init_mr_wrs()
[all …]
/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_qp.c655 const struct ib_reg_wr *wr) in set_reg_seg() argument
657 struct pvrdma_user_mr *mr = to_vmr(wr->mr); in set_reg_seg()
659 wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova; in set_reg_seg()
660 wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma; in set_reg_seg()
661 wqe_hdr->wr.fast_reg.page_shift = mr->page_shift; in set_reg_seg()
662 wqe_hdr->wr.fast_reg.page_list_len = mr->npages; in set_reg_seg()
663 wqe_hdr->wr.fast_reg.length = mr->ibmr.length; in set_reg_seg()
664 wqe_hdr->wr.fast_reg.access_flags = wr->access; in set_reg_seg()
665 wqe_hdr->wr.fast_reg.rkey = wr->key; in set_reg_seg()
679 int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in pvrdma_post_send() argument
[all …]
/drivers/infiniband/ulp/iser/
Diser_memory.c243 struct ib_reg_wr *wr = &tx_desc->reg_wr; in iser_reg_sig_mr() local
254 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr); in iser_reg_sig_mr()
266 memset(wr, 0, sizeof(*wr)); in iser_reg_sig_mr()
267 wr->wr.next = &tx_desc->send_wr; in iser_reg_sig_mr()
268 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY; in iser_reg_sig_mr()
269 wr->wr.wr_cqe = cqe; in iser_reg_sig_mr()
270 wr->wr.num_sge = 0; in iser_reg_sig_mr()
271 wr->wr.send_flags = 0; in iser_reg_sig_mr()
272 wr->mr = mr; in iser_reg_sig_mr()
273 wr->key = mr->rkey; in iser_reg_sig_mr()
[all …]
Diser_verbs.c830 struct ib_recv_wr wr; in iser_post_recvl() local
838 wr.wr_cqe = &desc->cqe; in iser_post_recvl()
839 wr.sg_list = &desc->sge; in iser_post_recvl()
840 wr.num_sge = 1; in iser_post_recvl()
841 wr.next = NULL; in iser_post_recvl()
844 ib_ret = ib_post_recv(ib_conn->qp, &wr, NULL); in iser_post_recvl()
858 struct ib_recv_wr *wr; in iser_post_recvm() local
861 for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) { in iser_post_recvm()
864 wr->wr_cqe = &rx_desc->cqe; in iser_post_recvm()
865 wr->sg_list = &rx_desc->rx_sg; in iser_post_recvm()
[all …]
/drivers/infiniband/sw/rxe/
Drxe_verbs.c355 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, in rxe_post_srq_recv() argument
364 while (wr) { in rxe_post_srq_recv()
365 err = post_one_recv(&srq->rq, wr); in rxe_post_srq_recv()
368 wr = wr->next; in rxe_post_srq_recv()
374 *bad_wr = wr; in rxe_post_srq_recv()
498 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, in init_send_wr() argument
501 wr->wr_id = ibwr->wr_id; in init_send_wr()
502 wr->num_sge = ibwr->num_sge; in init_send_wr()
503 wr->opcode = ibwr->opcode; in init_send_wr()
504 wr->send_flags = ibwr->send_flags; in init_send_wr()
[all …]
Drxe_req.c28 wqe->wr.opcode); in retry_first_write_send()
30 if (wqe->wr.send_flags & IB_SEND_INLINE) { in retry_first_write_send()
57 mask = wr_opcode_mask(wqe->wr.opcode, qp); in req_retry()
66 wqe->wr.wr.atomic.remote_addr : in req_retry()
68 wqe->wr.wr.rdma.remote_addr : in req_retry()
157 if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) && in req_next_wqe()
163 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp); in req_next_wqe()
358 struct rxe_send_wr *ibwr = &wqe->wr; in init_req_packet()
396 qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn : in init_req_packet()
409 reth_set_rkey(pkt, ibwr->wr.rdma.rkey); in init_req_packet()
[all …]
/drivers/infiniband/sw/siw/
Dsiw_verbs.c672 static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr, in siw_sq_flush_wr() argument
677 while (wr) { in siw_sq_flush_wr()
680 switch (wr->opcode) { in siw_sq_flush_wr()
710 sqe.id = wr->wr_id; in siw_sq_flush_wr()
716 *bad_wr = wr; in siw_sq_flush_wr()
719 wr = wr->next; in siw_sq_flush_wr()
725 static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr, in siw_rq_flush_wr() argument
731 while (wr) { in siw_rq_flush_wr()
732 rqe.id = wr->wr_id; in siw_rq_flush_wr()
736 *bad_wr = wr; in siw_rq_flush_wr()
[all …]
/drivers/infiniband/hw/mthca/
Dmthca_qp.c1498 const struct ib_ud_wr *wr, in build_mlx_header() argument
1508 mthca_ah_grh_present(to_mah(wr->ah)), 0, 0, 0, in build_mlx_header()
1511 err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header); in build_mlx_header()
1522 switch (wr->wr.opcode) { in build_mlx_header()
1530 sqp->ud_header.immediate_data = wr->wr.ex.imm_data; in build_mlx_header()
1539 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); in build_mlx_header()
1544 ib_get_cached_pkey(&dev->ib_dev, qp->port, wr->pkey_index, in build_mlx_header()
1547 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); in build_mlx_header()
1549 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? in build_mlx_header()
1550 sqp->qkey : wr->remote_qkey); in build_mlx_header()
[all …]
Dmthca_srq.c483 int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, in mthca_tavor_post_srq_recv() argument
502 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_srq_recv()
510 *bad_wr = wr; in mthca_tavor_post_srq_recv()
522 if (unlikely(wr->num_sge > srq->max_gs)) { in mthca_tavor_post_srq_recv()
524 *bad_wr = wr; in mthca_tavor_post_srq_recv()
529 for (i = 0; i < wr->num_sge; ++i) { in mthca_tavor_post_srq_recv()
530 mthca_set_data_seg(wqe, wr->sg_list + i); in mthca_tavor_post_srq_recv()
540 srq->wrid[ind] = wr->wr_id; in mthca_tavor_post_srq_recv()
577 int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, in mthca_arbel_post_srq_recv() argument
592 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_srq_recv()
[all …]
/drivers/infiniband/hw/cxgb4/
Dqp.c415 const struct ib_send_wr *wr, int max, u32 *plenp) in build_immd() argument
423 for (i = 0; i < wr->num_sge; i++) { in build_immd()
424 if ((plen + wr->sg_list[i].length) > max) in build_immd()
426 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; in build_immd()
427 plen += wr->sg_list[i].length; in build_immd()
428 rem = wr->sg_list[i].length; in build_immd()
490 const struct ib_send_wr *wr, u8 *len16) in build_rdma_send() argument
496 if (wr->num_sge > T4_MAX_SEND_SGE) in build_rdma_send()
498 switch (wr->opcode) { in build_rdma_send()
500 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send()
[all …]
/drivers/infiniband/ulp/rtrs/
Drtrs.c75 struct ib_recv_wr wr; in rtrs_iu_post_recv() local
87 wr = (struct ib_recv_wr) { in rtrs_iu_post_recv()
93 return ib_post_recv(con->qp, &wr, NULL); in rtrs_iu_post_recv()
99 struct ib_recv_wr wr; in rtrs_post_recv_empty() local
101 wr = (struct ib_recv_wr) { in rtrs_post_recv_empty()
105 return ib_post_recv(con->qp, &wr, NULL); in rtrs_post_recv_empty()
110 struct ib_send_wr *wr) in rtrs_post_send() argument
117 tail->next = wr; in rtrs_post_send()
119 head = wr; in rtrs_post_send()
129 struct ib_send_wr wr; in rtrs_iu_post_send() local
[all …]
Drtrs-srv.c237 struct ib_rdma_wr *wr = NULL; in rdma_write_sg() local
254 wr = &id->tx_wr; in rdma_write_sg()
270 wr->wr.sg_list = plist; in rdma_write_sg()
271 wr->wr.num_sge = 1; in rdma_write_sg()
272 wr->remote_addr = le64_to_cpu(id->rd_msg->desc[0].addr); in rdma_write_sg()
273 wr->rkey = le32_to_cpu(id->rd_msg->desc[0].key); in rdma_write_sg()
275 rkey = wr->rkey; in rdma_write_sg()
278 WARN_ON_ONCE(rkey != wr->rkey); in rdma_write_sg()
280 wr->wr.opcode = IB_WR_RDMA_WRITE; in rdma_write_sg()
281 wr->wr.wr_cqe = &io_comp_cqe; in rdma_write_sg()
[all …]
/drivers/infiniband/sw/rdmavt/
Dqp.c665 for (i = 0; i < wqe->wr.num_sge; i++) { in rvt_swqe_has_lkey()
1833 int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, in rvt_post_recv() argument
1844 *bad_wr = wr; in rvt_post_recv()
1848 for (; wr; wr = wr->next) { in rvt_post_recv()
1853 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) { in rvt_post_recv()
1854 *bad_wr = wr; in rvt_post_recv()
1864 *bad_wr = wr; in rvt_post_recv()
1873 wc.wr_id = wr->wr_id; in rvt_post_recv()
1878 wqe->wr_id = wr->wr_id; in rvt_post_recv()
1879 wqe->num_sge = wr->num_sge; in rvt_post_recv()
[all …]
/drivers/infiniband/hw/mlx4/
Dqp.c2825 const struct ib_ud_wr *wr, in build_sriov_qp0_header() argument
2833 struct mlx4_ib_ah *ah = to_mah(wr->ah); in build_sriov_qp0_header()
2842 if (wr->wr.opcode != IB_WR_SEND) in build_sriov_qp0_header()
2847 for (i = 0; i < wr->wr.num_sge; ++i) in build_sriov_qp0_header()
2848 send_size += wr->wr.sg_list[i].length; in build_sriov_qp0_header()
2873 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); in build_sriov_qp0_header()
2879 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); in build_sriov_qp0_header()
2977 static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr, in build_mlx_header() argument
2986 struct mlx4_ib_ah *ah = to_mah(wr->ah); in build_mlx_header()
3002 for (i = 0; i < wr->wr.num_sge; ++i) in build_mlx_header()
[all …]
Dsrq.c303 int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, in mlx4_ib_post_srq_recv() argument
318 *bad_wr = wr; in mlx4_ib_post_srq_recv()
323 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_srq_recv()
324 if (unlikely(wr->num_sge > srq->msrq.max_gs)) { in mlx4_ib_post_srq_recv()
326 *bad_wr = wr; in mlx4_ib_post_srq_recv()
332 *bad_wr = wr; in mlx4_ib_post_srq_recv()
336 srq->wrid[srq->head] = wr->wr_id; in mlx4_ib_post_srq_recv()
342 for (i = 0; i < wr->num_sge; ++i) { in mlx4_ib_post_srq_recv()
343 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); in mlx4_ib_post_srq_recv()
344 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); in mlx4_ib_post_srq_recv()
[all …]
/drivers/scsi/csiostor/
Dcsio_scsi.c205 struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr; in csio_scsi_init_cmd_wr() local
209 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) | in csio_scsi_init_cmd_wr()
211 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | in csio_scsi_init_cmd_wr()
215 wr->cookie = (uintptr_t) req; in csio_scsi_init_cmd_wr()
216 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); in csio_scsi_init_cmd_wr()
217 wr->tmo_val = (uint8_t) req->tmo; in csio_scsi_init_cmd_wr()
218 wr->r3 = 0; in csio_scsi_init_cmd_wr()
219 memset(&wr->r5, 0, 8); in csio_scsi_init_cmd_wr()
225 wr->rsp_dmalen = cpu_to_be32(dma_buf->len); in csio_scsi_init_cmd_wr()
226 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); in csio_scsi_init_cmd_wr()
[all …]
/drivers/infiniband/hw/qedr/
Dqedr_roce_cm.c543 int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in qedr_gsi_post_send() argument
553 *bad_wr = wr; in qedr_gsi_post_send()
560 if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) { in qedr_gsi_post_send()
562 wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE); in qedr_gsi_post_send()
567 if (wr->opcode != IB_WR_SEND) { in qedr_gsi_post_send()
570 wr->opcode); in qedr_gsi_post_send()
577 rc = qedr_gsi_build_packet(dev, qp, wr, &pkt); in qedr_gsi_post_send()
586 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; in qedr_gsi_post_send()
590 wr->opcode, in_irq(), irqs_disabled(), wr->wr_id); in qedr_gsi_post_send()
594 *bad_wr = wr; in qedr_gsi_post_send()
[all …]
/drivers/infiniband/hw/bnxt_re/
Dib_verbs.c1759 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr, in bnxt_re_post_srq_recv() argument
1769 while (wr) { in bnxt_re_post_srq_recv()
1771 wqe.num_sge = wr->num_sge; in bnxt_re_post_srq_recv()
1772 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge); in bnxt_re_post_srq_recv()
1773 wqe.wr_id = wr->wr_id; in bnxt_re_post_srq_recv()
1778 *bad_wr = wr; in bnxt_re_post_srq_recv()
1781 wr = wr->next; in bnxt_re_post_srq_recv()
2120 const struct ib_send_wr *wr, in bnxt_re_build_qp1_send_v2() argument
2124 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, in bnxt_re_build_qp1_send_v2()
2221 if (wr->opcode == IB_WR_SEND_WITH_IMM) { in bnxt_re_build_qp1_send_v2()
[all …]
/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c1874 const struct ib_send_wr *wr) in ocrdma_build_ud_hdr() argument
1878 struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah); in ocrdma_build_ud_hdr()
1880 ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn; in ocrdma_build_ud_hdr()
1884 ud_hdr->qkey = ud_wr(wr)->remote_qkey; in ocrdma_build_ud_hdr()
1921 const struct ib_send_wr *wr, u32 wqe_size) in ocrdma_build_inline_sges() argument
1926 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) { in ocrdma_build_inline_sges()
1927 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge); in ocrdma_build_inline_sges()
1935 for (i = 0; i < wr->num_sge; i++) { in ocrdma_build_inline_sges()
1937 (void *)(unsigned long)wr->sg_list[i].addr, in ocrdma_build_inline_sges()
1938 wr->sg_list[i].length); in ocrdma_build_inline_sges()
[all …]
/drivers/infiniband/hw/hfi1/
Duc.c129 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_uc_req()
130 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_uc_req()
138 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { in hfi1_make_uc_req()
140 qp, wqe->wr.ex.invalidate_rkey); in hfi1_make_uc_req()
155 qp->s_sge.num_sge = wqe->wr.num_sge; in hfi1_make_uc_req()
159 switch (wqe->wr.opcode) { in hfi1_make_uc_req()
167 if (wqe->wr.opcode == IB_WR_SEND) { in hfi1_make_uc_req()
173 ohdr->u.imm_data = wqe->wr.ex.imm_data; in hfi1_make_uc_req()
176 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_uc_req()
196 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { in hfi1_make_uc_req()
[all …]
Drc.c536 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in hfi1_make_rc_req()
538 (wqe->wr.opcode != IB_WR_TID_RDMA_READ || in hfi1_make_rc_req()
547 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_rc_req()
548 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_rc_req()
558 if (!(wqe->wr.send_flags & in hfi1_make_rc_req()
562 wqe->wr.ex.invalidate_rkey); in hfi1_make_rc_req()
593 switch (wqe->wr.opcode) { in hfi1_make_rc_req()
605 if (wqe->wr.opcode == IB_WR_SEND) { in hfi1_make_rc_req()
607 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { in hfi1_make_rc_req()
610 ohdr->u.imm_data = wqe->wr.ex.imm_data; in hfi1_make_rc_req()
[all …]

123456