Home
last modified time | relevance | path

Searched refs:wr (Results 1 – 25 of 149) sorted by relevance

123456

/drivers/media/dvb-frontends/
Ddib3000mb.c148 wr(DIB3000MB_REG_LOCK1_MASK, DIB3000MB_LOCK1_SEARCH_4); in dib3000mb_set_frontend()
153 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_2K); in dib3000mb_set_frontend()
157 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_8K); in dib3000mb_set_frontend()
169 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_32); in dib3000mb_set_frontend()
173 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_16); in dib3000mb_set_frontend()
177 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_8); in dib3000mb_set_frontend()
181 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_4); in dib3000mb_set_frontend()
193 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_OFF); in dib3000mb_set_frontend()
200 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_ON); in dib3000mb_set_frontend()
209 wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_QPSK); in dib3000mb_set_frontend()
[all …]
/drivers/infiniband/hw/mlx5/
Dwr.c104 static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp, in set_eth_seg() argument
111 if (wr->send_flags & IB_SEND_IP_CSUM) in set_eth_seg()
115 if (wr->opcode == IB_WR_LSO) { in set_eth_seg()
116 struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr); in set_eth_seg()
153 const struct ib_send_wr *wr) in set_datagram_seg() argument
155 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av)); in set_datagram_seg()
157 cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV); in set_datagram_seg()
158 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey); in set_datagram_seg()
321 const struct ib_send_wr *wr) in set_reg_umr_segment() argument
323 const struct mlx5_umr_wr *umrwr = umr_wr(wr); in set_reg_umr_segment()
[all …]
Dgsi.c51 struct mlx5_ib_gsi_wr *wr; in generate_completions() local
56 wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr]; in generate_completions()
58 if (!wr->completed) in generate_completions()
61 WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc)); in generate_completions()
62 wr->completed = false; in generate_completions()
71 struct mlx5_ib_gsi_wr *wr = in handle_single_completion() local
78 wr->completed = true; in handle_single_completion()
79 wr_id = wr->wc.wr_id; in handle_single_completion()
80 wr->wc = *wc; in handle_single_completion()
81 wr->wc.wr_id = wr_id; in handle_single_completion()
[all …]
Dwr.h44 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
46 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
50 const struct ib_send_wr *wr, in mlx5_ib_post_send_nodrain() argument
53 return mlx5_ib_post_send(ibqp, wr, bad_wr, false); in mlx5_ib_post_send_nodrain()
57 const struct ib_send_wr *wr, in mlx5_ib_post_send_drain() argument
60 return mlx5_ib_post_send(ibqp, wr, bad_wr, true); in mlx5_ib_post_send_drain()
64 const struct ib_recv_wr *wr, in mlx5_ib_post_recv_nodrain() argument
67 return mlx5_ib_post_recv(ibqp, wr, bad_wr, false); in mlx5_ib_post_recv_nodrain()
71 const struct ib_recv_wr *wr, in mlx5_ib_post_recv_drain() argument
74 return mlx5_ib_post_recv(ibqp, wr, bad_wr, true); in mlx5_ib_post_recv_drain()
/drivers/infiniband/core/
Drw.c80 reg->inv_wr.next = &reg->reg_wr.wr; in rdma_rw_inv_key()
111 reg->reg_wr.wr.opcode = IB_WR_REG_MR; in rdma_rw_init_one_mr()
151 prev->wr.wr.next = &reg->inv_wr; in rdma_rw_init_mr_wrs()
153 prev->wr.wr.next = &reg->reg_wr.wr; in rdma_rw_init_mr_wrs()
156 reg->reg_wr.wr.next = &reg->wr.wr; in rdma_rw_init_mr_wrs()
158 reg->wr.wr.sg_list = &reg->sge; in rdma_rw_init_mr_wrs()
159 reg->wr.wr.num_sge = 1; in rdma_rw_init_mr_wrs()
160 reg->wr.remote_addr = remote_addr; in rdma_rw_init_mr_wrs()
161 reg->wr.rkey = rkey; in rdma_rw_init_mr_wrs()
163 reg->wr.wr.opcode = IB_WR_RDMA_WRITE; in rdma_rw_init_mr_wrs()
[all …]
/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_qp.c647 const struct ib_reg_wr *wr) in set_reg_seg() argument
649 struct pvrdma_user_mr *mr = to_vmr(wr->mr); in set_reg_seg()
651 wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova; in set_reg_seg()
652 wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma; in set_reg_seg()
653 wqe_hdr->wr.fast_reg.page_shift = mr->page_shift; in set_reg_seg()
654 wqe_hdr->wr.fast_reg.page_list_len = mr->npages; in set_reg_seg()
655 wqe_hdr->wr.fast_reg.length = mr->ibmr.length; in set_reg_seg()
656 wqe_hdr->wr.fast_reg.access_flags = wr->access; in set_reg_seg()
657 wqe_hdr->wr.fast_reg.rkey = wr->key; in set_reg_seg()
671 int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in pvrdma_post_send() argument
[all …]
/drivers/infiniband/ulp/iser/
Diser_memory.c243 struct ib_reg_wr *wr = &tx_desc->reg_wr; in iser_reg_sig_mr() local
254 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr); in iser_reg_sig_mr()
266 memset(wr, 0, sizeof(*wr)); in iser_reg_sig_mr()
267 wr->wr.next = &tx_desc->send_wr; in iser_reg_sig_mr()
268 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY; in iser_reg_sig_mr()
269 wr->wr.wr_cqe = cqe; in iser_reg_sig_mr()
270 wr->wr.num_sge = 0; in iser_reg_sig_mr()
271 wr->wr.send_flags = 0; in iser_reg_sig_mr()
272 wr->mr = mr; in iser_reg_sig_mr()
273 wr->key = mr->rkey; in iser_reg_sig_mr()
[all …]
Diser_verbs.c830 struct ib_recv_wr wr; in iser_post_recvl() local
838 wr.wr_cqe = &desc->cqe; in iser_post_recvl()
839 wr.sg_list = &desc->sge; in iser_post_recvl()
840 wr.num_sge = 1; in iser_post_recvl()
841 wr.next = NULL; in iser_post_recvl()
844 ib_ret = ib_post_recv(ib_conn->qp, &wr, NULL); in iser_post_recvl()
858 struct ib_recv_wr *wr; in iser_post_recvm() local
861 for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) { in iser_post_recvm()
864 wr->wr_cqe = &rx_desc->cqe; in iser_post_recvm()
865 wr->sg_list = &rx_desc->rx_sg; in iser_post_recvm()
[all …]
/drivers/infiniband/sw/rxe/
Drxe_mw.c104 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) { in rxe_check_bind_mw()
138 if (unlikely(wqe->wr.wr.mw.length > mr->length)) { in rxe_check_bind_mw()
144 if (unlikely((wqe->wr.wr.mw.addr < mr->iova) || in rxe_check_bind_mw()
145 ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) > in rxe_check_bind_mw()
159 u32 key = wqe->wr.wr.mw.rkey & 0xff; in rxe_do_bind_mw()
162 mw->access = wqe->wr.wr.mw.access; in rxe_do_bind_mw()
164 mw->addr = wqe->wr.wr.mw.addr; in rxe_do_bind_mw()
165 mw->length = wqe->wr.wr.mw.length; in rxe_do_bind_mw()
191 u32 mw_rkey = wqe->wr.wr.mw.mw_rkey; in rxe_bind_mw()
192 u32 mr_lkey = wqe->wr.wr.mw.mr_lkey; in rxe_bind_mw()
[all …]
Drxe_verbs.c356 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, in rxe_post_srq_recv() argument
365 while (wr) { in rxe_post_srq_recv()
366 err = post_one_recv(&srq->rq, wr); in rxe_post_srq_recv()
369 wr = wr->next; in rxe_post_srq_recv()
375 *bad_wr = wr; in rxe_post_srq_recv()
500 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, in init_send_wr() argument
503 wr->wr_id = ibwr->wr_id; in init_send_wr()
504 wr->num_sge = ibwr->num_sge; in init_send_wr()
505 wr->opcode = ibwr->opcode; in init_send_wr()
506 wr->send_flags = ibwr->send_flags; in init_send_wr()
[all …]
/drivers/infiniband/sw/siw/
Dsiw_verbs.c674 static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr, in siw_sq_flush_wr() argument
679 while (wr) { in siw_sq_flush_wr()
682 switch (wr->opcode) { in siw_sq_flush_wr()
712 sqe.id = wr->wr_id; in siw_sq_flush_wr()
718 *bad_wr = wr; in siw_sq_flush_wr()
721 wr = wr->next; in siw_sq_flush_wr()
727 static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr, in siw_rq_flush_wr() argument
733 while (wr) { in siw_rq_flush_wr()
734 rqe.id = wr->wr_id; in siw_rq_flush_wr()
738 *bad_wr = wr; in siw_rq_flush_wr()
[all …]
/drivers/infiniband/hw/mthca/
Dmthca_qp.c1501 const struct ib_ud_wr *wr, in build_mlx_header() argument
1511 mthca_ah_grh_present(to_mah(wr->ah)), 0, 0, 0, in build_mlx_header()
1514 err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header); in build_mlx_header()
1525 switch (wr->wr.opcode) { in build_mlx_header()
1533 sqp->ud_header.immediate_data = wr->wr.ex.imm_data; in build_mlx_header()
1542 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); in build_mlx_header()
1547 ib_get_cached_pkey(&dev->ib_dev, qp->port, wr->pkey_index, in build_mlx_header()
1550 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); in build_mlx_header()
1552 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? in build_mlx_header()
1553 sqp->qkey : wr->remote_qkey); in build_mlx_header()
[all …]
Dmthca_srq.c483 int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, in mthca_tavor_post_srq_recv() argument
502 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_srq_recv()
510 *bad_wr = wr; in mthca_tavor_post_srq_recv()
522 if (unlikely(wr->num_sge > srq->max_gs)) { in mthca_tavor_post_srq_recv()
524 *bad_wr = wr; in mthca_tavor_post_srq_recv()
529 for (i = 0; i < wr->num_sge; ++i) { in mthca_tavor_post_srq_recv()
530 mthca_set_data_seg(wqe, wr->sg_list + i); in mthca_tavor_post_srq_recv()
540 srq->wrid[ind] = wr->wr_id; in mthca_tavor_post_srq_recv()
577 int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, in mthca_arbel_post_srq_recv() argument
592 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_srq_recv()
[all …]
/drivers/infiniband/hw/cxgb4/
Dqp.c415 const struct ib_send_wr *wr, int max, u32 *plenp) in build_immd() argument
423 for (i = 0; i < wr->num_sge; i++) { in build_immd()
424 if ((plen + wr->sg_list[i].length) > max) in build_immd()
426 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; in build_immd()
427 plen += wr->sg_list[i].length; in build_immd()
428 rem = wr->sg_list[i].length; in build_immd()
490 const struct ib_send_wr *wr, u8 *len16) in build_rdma_send() argument
496 if (wr->num_sge > T4_MAX_SEND_SGE) in build_rdma_send()
498 switch (wr->opcode) { in build_rdma_send()
500 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send()
[all …]
/drivers/infiniband/ulp/rtrs/
Drtrs.c75 struct ib_recv_wr wr; in rtrs_iu_post_recv() local
87 wr = (struct ib_recv_wr) { in rtrs_iu_post_recv()
93 return ib_post_recv(con->qp, &wr, NULL); in rtrs_iu_post_recv()
99 struct ib_recv_wr wr; in rtrs_post_recv_empty() local
101 wr = (struct ib_recv_wr) { in rtrs_post_recv_empty()
105 return ib_post_recv(con->qp, &wr, NULL); in rtrs_post_recv_empty()
110 struct ib_send_wr *wr, struct ib_send_wr *tail) in rtrs_post_send() argument
117 next->next = wr; in rtrs_post_send()
119 head = wr; in rtrs_post_send()
123 wr->next = tail; in rtrs_post_send()
[all …]
Drtrs-srv.c210 struct ib_rdma_wr *wr = NULL; in rdma_write_sg() local
227 wr = &id->tx_wr; in rdma_write_sg()
243 wr->wr.sg_list = plist; in rdma_write_sg()
244 wr->wr.num_sge = 1; in rdma_write_sg()
245 wr->remote_addr = le64_to_cpu(id->rd_msg->desc[0].addr); in rdma_write_sg()
246 wr->rkey = le32_to_cpu(id->rd_msg->desc[0].key); in rdma_write_sg()
248 rkey = wr->rkey; in rdma_write_sg()
251 WARN_ON_ONCE(rkey != wr->rkey); in rdma_write_sg()
253 wr->wr.opcode = IB_WR_RDMA_WRITE; in rdma_write_sg()
254 wr->wr.wr_cqe = &io_comp_cqe; in rdma_write_sg()
[all …]
/drivers/infiniband/sw/rdmavt/
Dqp.c625 for (i = 0; i < wqe->wr.num_sge; i++) { in rvt_swqe_has_lkey()
1781 int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, in rvt_post_recv() argument
1792 *bad_wr = wr; in rvt_post_recv()
1796 for (; wr; wr = wr->next) { in rvt_post_recv()
1801 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) { in rvt_post_recv()
1802 *bad_wr = wr; in rvt_post_recv()
1812 *bad_wr = wr; in rvt_post_recv()
1821 wc.wr_id = wr->wr_id; in rvt_post_recv()
1826 wqe->wr_id = wr->wr_id; in rvt_post_recv()
1827 wqe->num_sge = wr->num_sge; in rvt_post_recv()
[all …]
/drivers/infiniband/hw/mlx4/
Dqp.c2826 const struct ib_ud_wr *wr, in build_sriov_qp0_header() argument
2834 struct mlx4_ib_ah *ah = to_mah(wr->ah); in build_sriov_qp0_header()
2843 if (wr->wr.opcode != IB_WR_SEND) in build_sriov_qp0_header()
2848 for (i = 0; i < wr->wr.num_sge; ++i) in build_sriov_qp0_header()
2849 send_size += wr->wr.sg_list[i].length; in build_sriov_qp0_header()
2874 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); in build_sriov_qp0_header()
2880 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); in build_sriov_qp0_header()
2978 static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr, in build_mlx_header() argument
2987 struct mlx4_ib_ah *ah = to_mah(wr->ah); in build_mlx_header()
3003 for (i = 0; i < wr->wr.num_sge; ++i) in build_mlx_header()
[all …]
/drivers/scsi/csiostor/
Dcsio_scsi.c205 struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr; in csio_scsi_init_cmd_wr() local
209 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) | in csio_scsi_init_cmd_wr()
211 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | in csio_scsi_init_cmd_wr()
215 wr->cookie = (uintptr_t) req; in csio_scsi_init_cmd_wr()
216 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); in csio_scsi_init_cmd_wr()
217 wr->tmo_val = (uint8_t) req->tmo; in csio_scsi_init_cmd_wr()
218 wr->r3 = 0; in csio_scsi_init_cmd_wr()
219 memset(&wr->r5, 0, 8); in csio_scsi_init_cmd_wr()
225 wr->rsp_dmalen = cpu_to_be32(dma_buf->len); in csio_scsi_init_cmd_wr()
226 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); in csio_scsi_init_cmd_wr()
[all …]
/drivers/infiniband/hw/qedr/
Dqedr_roce_cm.c542 int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in qedr_gsi_post_send() argument
552 *bad_wr = wr; in qedr_gsi_post_send()
559 if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) { in qedr_gsi_post_send()
561 wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE); in qedr_gsi_post_send()
566 if (wr->opcode != IB_WR_SEND) { in qedr_gsi_post_send()
569 wr->opcode); in qedr_gsi_post_send()
576 rc = qedr_gsi_build_packet(dev, qp, wr, &pkt); in qedr_gsi_post_send()
585 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; in qedr_gsi_post_send()
588 "gsi post send: opcode=%d, wr_id=%llx\n", wr->opcode, in qedr_gsi_post_send()
589 wr->wr_id); in qedr_gsi_post_send()
[all …]
/drivers/infiniband/hw/bnxt_re/
Dib_verbs.c1754 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr, in bnxt_re_post_srq_recv() argument
1764 while (wr) { in bnxt_re_post_srq_recv()
1766 wqe.num_sge = wr->num_sge; in bnxt_re_post_srq_recv()
1767 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge); in bnxt_re_post_srq_recv()
1768 wqe.wr_id = wr->wr_id; in bnxt_re_post_srq_recv()
1773 *bad_wr = wr; in bnxt_re_post_srq_recv()
1776 wr = wr->next; in bnxt_re_post_srq_recv()
2118 const struct ib_send_wr *wr, in bnxt_re_build_qp1_send_v2() argument
2122 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, in bnxt_re_build_qp1_send_v2()
2219 if (wr->opcode == IB_WR_SEND_WITH_IMM) { in bnxt_re_build_qp1_send_v2()
[all …]
/drivers/misc/bcm-vk/
Dbcm_vk_tty.c27 u32 wr; member
35 #define VK_BAR_CHAN_WR(v, DIR) VK_BAR_CHAN(v, DIR, wr)
69 int wr; in bcm_vk_tty_wq_handler() local
88 wr = vkread32(vk, BAR_1, VK_BAR_CHAN_WR(vktty, from)); in bcm_vk_tty_wq_handler()
94 if (wr >= vktty->from_size) { in bcm_vk_tty_wq_handler()
97 i, wr, vktty->from_size); in bcm_vk_tty_wq_handler()
106 while (vk->tty[i].rd != wr) { in bcm_vk_tty_wq_handler()
158 vktty->wr = vkread32(vk, BAR_1, VK_BAR_CHAN_WR(vktty, to)); in bcm_vk_tty_open()
205 VK_BAR_CHAN_DATA(vktty, to, vktty->wr)); in bcm_vk_tty_write()
206 vktty->wr++; in bcm_vk_tty_write()
[all …]
/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c1876 const struct ib_send_wr *wr) in ocrdma_build_ud_hdr() argument
1880 struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah); in ocrdma_build_ud_hdr()
1882 ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn; in ocrdma_build_ud_hdr()
1886 ud_hdr->qkey = ud_wr(wr)->remote_qkey; in ocrdma_build_ud_hdr()
1923 const struct ib_send_wr *wr, u32 wqe_size) in ocrdma_build_inline_sges() argument
1928 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) { in ocrdma_build_inline_sges()
1929 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge); in ocrdma_build_inline_sges()
1937 for (i = 0; i < wr->num_sge; i++) { in ocrdma_build_inline_sges()
1939 (void *)(unsigned long)wr->sg_list[i].addr, in ocrdma_build_inline_sges()
1940 wr->sg_list[i].length); in ocrdma_build_inline_sges()
[all …]
/drivers/infiniband/hw/hfi1/
Duc.c88 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_uc_req()
89 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_uc_req()
97 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { in hfi1_make_uc_req()
99 qp, wqe->wr.ex.invalidate_rkey); in hfi1_make_uc_req()
114 qp->s_sge.num_sge = wqe->wr.num_sge; in hfi1_make_uc_req()
118 switch (wqe->wr.opcode) { in hfi1_make_uc_req()
126 if (wqe->wr.opcode == IB_WR_SEND) { in hfi1_make_uc_req()
132 ohdr->u.imm_data = wqe->wr.ex.imm_data; in hfi1_make_uc_req()
135 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_uc_req()
155 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { in hfi1_make_uc_req()
[all …]
Drc.c495 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in hfi1_make_rc_req()
497 (wqe->wr.opcode != IB_WR_TID_RDMA_READ || in hfi1_make_rc_req()
506 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_rc_req()
507 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_rc_req()
517 if (!(wqe->wr.send_flags & in hfi1_make_rc_req()
521 wqe->wr.ex.invalidate_rkey); in hfi1_make_rc_req()
552 switch (wqe->wr.opcode) { in hfi1_make_rc_req()
564 if (wqe->wr.opcode == IB_WR_SEND) { in hfi1_make_rc_req()
566 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { in hfi1_make_rc_req()
569 ohdr->u.imm_data = wqe->wr.ex.imm_data; in hfi1_make_rc_req()
[all …]

123456