Home
last modified time | relevance | path

Searched refs:wr (Results 1 – 25 of 119) sorted by relevance

12345

/drivers/media/dvb-frontends/
Ddib3000mb.c153 wr(DIB3000MB_REG_LOCK1_MASK, DIB3000MB_LOCK1_SEARCH_4); in dib3000mb_set_frontend()
159 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_2K); in dib3000mb_set_frontend()
163 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_8K); in dib3000mb_set_frontend()
176 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_32); in dib3000mb_set_frontend()
180 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_16); in dib3000mb_set_frontend()
184 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_8); in dib3000mb_set_frontend()
188 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_4); in dib3000mb_set_frontend()
201 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_OFF); in dib3000mb_set_frontend()
208 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_ON); in dib3000mb_set_frontend()
218 wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_QPSK); in dib3000mb_set_frontend()
[all …]
/drivers/infiniband/core/
Drw.c83 reg->inv_wr.next = &reg->reg_wr.wr; in rdma_rw_init_one_mr()
95 reg->reg_wr.wr.opcode = IB_WR_REG_MR; in rdma_rw_init_one_mr()
134 prev->wr.wr.next = &reg->inv_wr; in rdma_rw_init_mr_wrs()
136 prev->wr.wr.next = &reg->reg_wr.wr; in rdma_rw_init_mr_wrs()
139 reg->reg_wr.wr.next = &reg->wr.wr; in rdma_rw_init_mr_wrs()
141 reg->wr.wr.sg_list = &reg->sge; in rdma_rw_init_mr_wrs()
142 reg->wr.wr.num_sge = 1; in rdma_rw_init_mr_wrs()
143 reg->wr.remote_addr = remote_addr; in rdma_rw_init_mr_wrs()
144 reg->wr.rkey = rkey; in rdma_rw_init_mr_wrs()
146 reg->wr.wr.opcode = IB_WR_RDMA_WRITE; in rdma_rw_init_mr_wrs()
[all …]
/drivers/infiniband/hw/cxgb3/
Diwch_qp.c42 static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, in build_rdma_send() argument
48 switch (wr->opcode) { in build_rdma_send()
50 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send()
57 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send()
61 wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send()
66 if (wr->num_sge > T3_MAX_SGE) in build_rdma_send()
72 for (i = 0; i < wr->num_sge; i++) { in build_rdma_send()
73 if ((plen + wr->sg_list[i].length) < plen) in build_rdma_send()
76 plen += wr->sg_list[i].length; in build_rdma_send()
77 wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey); in build_rdma_send()
[all …]
/drivers/infiniband/hw/mlx5/
Dgsi.c76 struct mlx5_ib_gsi_wr *wr; in generate_completions() local
81 wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr]; in generate_completions()
83 if (!wr->completed) in generate_completions()
87 wr->send_flags & IB_SEND_SIGNALED) in generate_completions()
88 WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc)); in generate_completions()
90 wr->completed = false; in generate_completions()
99 struct mlx5_ib_gsi_wr *wr = in handle_single_completion() local
105 wr->completed = true; in handle_single_completion()
106 wr_id = wr->wc.wr_id; in handle_single_completion()
107 wr->wc = *wc; in handle_single_completion()
[all …]
Dqp.c2982 struct ib_send_wr *wr, void *qend, in set_eth_seg() argument
2989 if (wr->send_flags & IB_SEND_IP_CSUM) in set_eth_seg()
2996 if (wr->opcode == IB_WR_LSO) { in set_eth_seg()
2997 struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr); in set_eth_seg()
3035 struct ib_send_wr *wr) in set_datagram_seg() argument
3037 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av)); in set_datagram_seg()
3038 dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV); in set_datagram_seg()
3039 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey); in set_datagram_seg()
3194 struct ib_send_wr *wr) in set_reg_umr_segment() argument
3196 struct mlx5_umr_wr *umrwr = umr_wr(wr); in set_reg_umr_segment()
[all …]
Dmr.c796 static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr, in prep_umr_wqe_common() argument
801 struct mlx5_umr_wr *umrwr = umr_wr(wr); in prep_umr_wqe_common()
807 wr->next = NULL; in prep_umr_wqe_common()
808 wr->sg_list = sg; in prep_umr_wqe_common()
810 wr->num_sge = 1; in prep_umr_wqe_common()
812 wr->num_sge = 0; in prep_umr_wqe_common()
814 wr->opcode = MLX5_IB_WR_UMR; in prep_umr_wqe_common()
821 static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, in prep_umr_reg_wqe() argument
826 struct mlx5_umr_wr *umrwr = umr_wr(wr); in prep_umr_reg_wqe()
828 prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift); in prep_umr_reg_wqe()
[all …]
Dsrq.c430 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, in mlx5_ib_post_srq_recv() argument
447 *bad_wr = wr; in mlx5_ib_post_srq_recv()
451 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_srq_recv()
452 if (unlikely(wr->num_sge > srq->msrq.max_gs)) { in mlx5_ib_post_srq_recv()
454 *bad_wr = wr; in mlx5_ib_post_srq_recv()
460 *bad_wr = wr; in mlx5_ib_post_srq_recv()
464 srq->wrid[srq->head] = wr->wr_id; in mlx5_ib_post_srq_recv()
470 for (i = 0; i < wr->num_sge; i++) { in mlx5_ib_post_srq_recv()
471 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); in mlx5_ib_post_srq_recv()
472 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); in mlx5_ib_post_srq_recv()
[all …]
/drivers/infiniband/hw/mthca/
Dmthca_qp.c1479 int ind, struct ib_ud_wr *wr, in build_mlx_header() argument
1488 mthca_ah_grh_present(to_mah(wr->ah)), 0, 0, 0, in build_mlx_header()
1491 err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header); in build_mlx_header()
1502 switch (wr->wr.opcode) { in build_mlx_header()
1510 sqp->ud_header.immediate_data = wr->wr.ex.imm_data; in build_mlx_header()
1519 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); in build_mlx_header()
1525 wr->pkey_index, &pkey); in build_mlx_header()
1527 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); in build_mlx_header()
1529 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? in build_mlx_header()
1530 sqp->qkey : wr->remote_qkey); in build_mlx_header()
[all …]
Dmthca_srq.c475 int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, in mthca_tavor_post_srq_recv() argument
494 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_srq_recv()
502 *bad_wr = wr; in mthca_tavor_post_srq_recv()
514 if (unlikely(wr->num_sge > srq->max_gs)) { in mthca_tavor_post_srq_recv()
516 *bad_wr = wr; in mthca_tavor_post_srq_recv()
521 for (i = 0; i < wr->num_sge; ++i) { in mthca_tavor_post_srq_recv()
522 mthca_set_data_seg(wqe, wr->sg_list + i); in mthca_tavor_post_srq_recv()
532 srq->wrid[ind] = wr->wr_id; in mthca_tavor_post_srq_recv()
575 int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, in mthca_arbel_post_srq_recv() argument
590 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_srq_recv()
[all …]
/drivers/infiniband/sw/rxe/
Drxe_verbs.c521 static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, in rxe_post_srq_recv() argument
530 while (wr) { in rxe_post_srq_recv()
531 err = post_one_recv(&srq->rq, wr); in rxe_post_srq_recv()
534 wr = wr->next; in rxe_post_srq_recv()
540 *bad_wr = wr; in rxe_post_srq_recv()
656 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, in init_send_wr() argument
659 wr->wr_id = ibwr->wr_id; in init_send_wr()
660 wr->num_sge = ibwr->num_sge; in init_send_wr()
661 wr->opcode = ibwr->opcode; in init_send_wr()
662 wr->send_flags = ibwr->send_flags; in init_send_wr()
[all …]
Drxe_req.c54 wqe->wr.opcode); in retry_first_write_send()
56 if (wqe->wr.send_flags & IB_SEND_INLINE) { in retry_first_write_send()
86 mask = wr_opcode_mask(wqe->wr.opcode, qp); in req_retry()
95 wqe->wr.wr.atomic.remote_addr : in req_retry()
97 wqe->wr.wr.rdma.remote_addr : in req_retry()
180 if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) && in req_next_wqe()
186 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp); in req_next_wqe()
387 struct rxe_send_wr *ibwr = &wqe->wr; in init_req_packet()
424 port->pkey_tbl[ibwr->wr.ud.pkey_index] : in init_req_packet()
427 qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn : in init_req_packet()
[all …]
/drivers/infiniband/ulp/iser/
Diser_memory.c392 struct ib_sig_handover_wr *wr; in iser_reg_sig_mr() local
408 wr = sig_handover_wr(iser_tx_next_wr(tx_desc)); in iser_reg_sig_mr()
409 wr->wr.opcode = IB_WR_REG_SIG_MR; in iser_reg_sig_mr()
410 wr->wr.wr_cqe = cqe; in iser_reg_sig_mr()
411 wr->wr.sg_list = &data_reg->sge; in iser_reg_sig_mr()
412 wr->wr.num_sge = 1; in iser_reg_sig_mr()
413 wr->wr.send_flags = 0; in iser_reg_sig_mr()
414 wr->sig_attrs = sig_attrs; in iser_reg_sig_mr()
415 wr->sig_mr = mr; in iser_reg_sig_mr()
417 wr->prot = &prot_reg->sge; in iser_reg_sig_mr()
[all …]
Diser_verbs.c1012 struct ib_recv_wr wr, *wr_failed; in iser_post_recvl() local
1020 wr.wr_cqe = &desc->cqe; in iser_post_recvl()
1021 wr.sg_list = &desc->sge; in iser_post_recvl()
1022 wr.num_sge = 1; in iser_post_recvl()
1023 wr.next = NULL; in iser_post_recvl()
1026 ib_ret = ib_post_recv(ib_conn->qp, &wr, &wr_failed); in iser_post_recvl()
1040 struct ib_recv_wr *wr, *wr_failed; in iser_post_recvm() local
1043 for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) { in iser_post_recvm()
1046 wr->wr_cqe = &rx_desc->cqe; in iser_post_recvm()
1047 wr->sg_list = &rx_desc->rx_sg; in iser_post_recvm()
[all …]
/drivers/infiniband/sw/rdmavt/
Dqp.c412 for (i = 0; i < wqe->wr.num_sge; i++) { in rvt_clear_mr_refs()
1406 int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, in rvt_post_recv() argument
1417 *bad_wr = wr; in rvt_post_recv()
1421 for (; wr; wr = wr->next) { in rvt_post_recv()
1426 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) { in rvt_post_recv()
1427 *bad_wr = wr; in rvt_post_recv()
1437 *bad_wr = wr; in rvt_post_recv()
1446 wc.wr_id = wr->wr_id; in rvt_post_recv()
1451 wqe->wr_id = wr->wr_id; in rvt_post_recv()
1452 wqe->num_sge = wr->num_sge; in rvt_post_recv()
[all …]
/drivers/infiniband/hw/cxgb4/
Dqp.c390 struct ib_send_wr *wr, int max, u32 *plenp) in build_immd() argument
398 for (i = 0; i < wr->num_sge; i++) { in build_immd()
399 if ((plen + wr->sg_list[i].length) > max) in build_immd()
401 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; in build_immd()
402 plen += wr->sg_list[i].length; in build_immd()
403 rem = wr->sg_list[i].length; in build_immd()
460 struct ib_send_wr *wr, u8 *len16) in build_rdma_send() argument
466 if (wr->num_sge > T4_MAX_SEND_SGE) in build_rdma_send()
468 switch (wr->opcode) { in build_rdma_send()
470 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send()
[all …]
/drivers/infiniband/hw/qedr/
Dqedr_cm.c425 int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, in qedr_gsi_post_send() argument
436 *bad_wr = wr; in qedr_gsi_post_send()
443 if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) { in qedr_gsi_post_send()
445 wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE); in qedr_gsi_post_send()
450 if (wr->opcode != IB_WR_SEND) { in qedr_gsi_post_send()
453 wr->opcode); in qedr_gsi_post_send()
462 rc = qedr_gsi_build_packet(dev, qp, wr, &pkt); in qedr_gsi_post_send()
470 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; in qedr_gsi_post_send()
474 wr->opcode, in_irq(), irqs_disabled(), wr->wr_id); in qedr_gsi_post_send()
489 *bad_wr = wr; in qedr_gsi_post_send()
[all …]
Dverbs.c2526 struct ib_send_wr *wr, in qedr_prepare_sq_inline_data() argument
2530 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge); in qedr_prepare_sq_inline_data()
2536 *bad_wr = wr; in qedr_prepare_sq_inline_data()
2550 for (i = 0; i < wr->num_sge; i++) { in qedr_prepare_sq_inline_data()
2551 u32 len = wr->sg_list[i].length; in qedr_prepare_sq_inline_data()
2552 void *src = (void *)(uintptr_t)wr->sg_list[i].addr; in qedr_prepare_sq_inline_data()
2611 struct ib_send_wr *wr) in qedr_prepare_sq_sges() argument
2616 for (i = 0; i < wr->num_sge; i++) { in qedr_prepare_sq_sges()
2619 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr); in qedr_prepare_sq_sges()
2620 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey); in qedr_prepare_sq_sges()
[all …]
/drivers/infiniband/hw/mlx4/
Dqp.c2297 struct ib_ud_wr *wr, in build_sriov_qp0_header() argument
2304 struct mlx4_ib_ah *ah = to_mah(wr->ah); in build_sriov_qp0_header()
2312 if (wr->wr.opcode != IB_WR_SEND) in build_sriov_qp0_header()
2317 for (i = 0; i < wr->wr.num_sge; ++i) in build_sriov_qp0_header()
2318 send_size += wr->wr.sg_list[i].length; in build_sriov_qp0_header()
2343 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); in build_sriov_qp0_header()
2347 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); in build_sriov_qp0_header()
2426 static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, in build_mlx_header() argument
2433 struct mlx4_ib_ah *ah = to_mah(wr->ah); in build_mlx_header()
2449 for (i = 0; i < wr->wr.num_sge; ++i) in build_mlx_header()
[all …]
Dsrq.c315 int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, in mlx4_ib_post_srq_recv() argument
330 *bad_wr = wr; in mlx4_ib_post_srq_recv()
335 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_srq_recv()
336 if (unlikely(wr->num_sge > srq->msrq.max_gs)) { in mlx4_ib_post_srq_recv()
338 *bad_wr = wr; in mlx4_ib_post_srq_recv()
344 *bad_wr = wr; in mlx4_ib_post_srq_recv()
348 srq->wrid[srq->head] = wr->wr_id; in mlx4_ib_post_srq_recv()
354 for (i = 0; i < wr->num_sge; ++i) { in mlx4_ib_post_srq_recv()
355 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); in mlx4_ib_post_srq_recv()
356 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); in mlx4_ib_post_srq_recv()
[all …]
/drivers/scsi/csiostor/
Dcsio_scsi.c205 struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr; in csio_scsi_init_cmd_wr() local
209 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) | in csio_scsi_init_cmd_wr()
211 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) | in csio_scsi_init_cmd_wr()
215 wr->cookie = (uintptr_t) req; in csio_scsi_init_cmd_wr()
216 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); in csio_scsi_init_cmd_wr()
217 wr->tmo_val = (uint8_t) req->tmo; in csio_scsi_init_cmd_wr()
218 wr->r3 = 0; in csio_scsi_init_cmd_wr()
219 memset(&wr->r5, 0, 8); in csio_scsi_init_cmd_wr()
225 wr->rsp_dmalen = cpu_to_be32(dma_buf->len); in csio_scsi_init_cmd_wr()
226 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); in csio_scsi_init_cmd_wr()
[all …]
/drivers/infiniband/hw/hfi1/
Duc.c118 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_uc_req()
119 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_uc_req()
127 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { in hfi1_make_uc_req()
129 qp, wqe->wr.ex.invalidate_rkey); in hfi1_make_uc_req()
145 qp->s_sge.num_sge = wqe->wr.num_sge; in hfi1_make_uc_req()
149 switch (wqe->wr.opcode) { in hfi1_make_uc_req()
157 if (wqe->wr.opcode == IB_WR_SEND) { in hfi1_make_uc_req()
163 ohdr->u.imm_data = wqe->wr.ex.imm_data; in hfi1_make_uc_req()
166 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_uc_req()
186 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { in hfi1_make_uc_req()
[all …]
Drc.c195 ss->num_sge = wqe->wr.num_sge; in restart_sge()
468 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in hfi1_make_rc_req()
477 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_rc_req()
478 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_rc_req()
488 if (!(wqe->wr.send_flags & in hfi1_make_rc_req()
492 wqe->wr.ex.invalidate_rkey); in hfi1_make_rc_req()
515 switch (wqe->wr.opcode) { in hfi1_make_rc_req()
530 if (wqe->wr.opcode == IB_WR_SEND) { in hfi1_make_rc_req()
532 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { in hfi1_make_rc_req()
535 ohdr->u.imm_data = wqe->wr.ex.imm_data; in hfi1_make_rc_req()
[all …]
/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c1994 struct ib_send_wr *wr) in ocrdma_build_ud_hdr() argument
1998 struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah); in ocrdma_build_ud_hdr()
2000 ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn; in ocrdma_build_ud_hdr()
2004 ud_hdr->qkey = ud_wr(wr)->remote_qkey; in ocrdma_build_ud_hdr()
2041 struct ib_send_wr *wr, u32 wqe_size) in ocrdma_build_inline_sges() argument
2046 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) { in ocrdma_build_inline_sges()
2047 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge); in ocrdma_build_inline_sges()
2055 for (i = 0; i < wr->num_sge; i++) { in ocrdma_build_inline_sges()
2057 (void *)(unsigned long)wr->sg_list[i].addr, in ocrdma_build_inline_sges()
2058 wr->sg_list[i].length); in ocrdma_build_inline_sges()
[all …]
/drivers/infiniband/hw/qib/
Dqib_uc.c102 qp->s_sge.num_sge = wqe->wr.num_sge; in qib_make_uc_req()
106 switch (wqe->wr.opcode) { in qib_make_uc_req()
114 if (wqe->wr.opcode == IB_WR_SEND) in qib_make_uc_req()
120 ohdr->u.imm_data = wqe->wr.ex.imm_data; in qib_make_uc_req()
123 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in qib_make_uc_req()
143 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) in qib_make_uc_req()
149 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; in qib_make_uc_req()
151 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in qib_make_uc_req()
173 if (wqe->wr.opcode == IB_WR_SEND) in qib_make_uc_req()
178 ohdr->u.imm_data = wqe->wr.ex.imm_data; in qib_make_uc_req()
[all …]
Dqib_rc.c51 ss->num_sge = wqe->wr.num_sge; in restart_sge()
312 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in qib_make_rc_req()
328 switch (wqe->wr.opcode) { in qib_make_rc_req()
342 if (wqe->wr.opcode == IB_WR_SEND) in qib_make_rc_req()
347 ohdr->u.imm_data = wqe->wr.ex.imm_data; in qib_make_rc_req()
350 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in qib_make_rc_req()
380 if (wqe->rdma_wr.wr.opcode == IB_WR_RDMA_WRITE) in qib_make_rc_req()
386 wqe->rdma_wr.wr.ex.imm_data; in qib_make_rc_req()
388 if (wqe->rdma_wr.wr.send_flags & IB_SEND_SOLICITED) in qib_make_rc_req()
442 if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { in qib_make_rc_req()
[all …]

12345