• Home
  • Raw
  • Download

Lines Matching refs:wr

2825 				  const struct ib_ud_wr *wr,  in build_sriov_qp0_header()  argument
2833 struct mlx4_ib_ah *ah = to_mah(wr->ah); in build_sriov_qp0_header()
2842 if (wr->wr.opcode != IB_WR_SEND) in build_sriov_qp0_header()
2847 for (i = 0; i < wr->wr.num_sge; ++i) in build_sriov_qp0_header()
2848 send_size += wr->wr.sg_list[i].length; in build_sriov_qp0_header()
2873 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); in build_sriov_qp0_header()
2879 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); in build_sriov_qp0_header()
2977 static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr, in build_mlx_header() argument
2986 struct mlx4_ib_ah *ah = to_mah(wr->ah); in build_mlx_header()
3002 for (i = 0; i < wr->wr.num_sge; ++i) in build_mlx_header()
3003 send_size += wr->wr.sg_list[i].length; in build_mlx_header()
3121 switch (wr->wr.opcode) { in build_mlx_header()
3129 sqp->ud_header.immediate_data = wr->wr.ex.imm_data; in build_mlx_header()
3172 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); in build_mlx_header()
3177 err = ib_get_cached_pkey(ib_dev, qp->port, wr->pkey_index, in build_mlx_header()
3183 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); in build_mlx_header()
3185 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? in build_mlx_header()
3186 sqp->qkey : wr->remote_qkey); in build_mlx_header()
3275 const struct ib_reg_wr *wr) in set_reg_seg() argument
3277 struct mlx4_ib_mr *mr = to_mmr(wr->mr); in set_reg_seg()
3279 fseg->flags = convert_access(wr->access); in set_reg_seg()
3280 fseg->mem_key = cpu_to_be32(wr->key); in set_reg_seg()
3305 const struct ib_atomic_wr *wr) in set_atomic_seg() argument
3307 if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { in set_atomic_seg()
3308 aseg->swap_add = cpu_to_be64(wr->swap); in set_atomic_seg()
3309 aseg->compare = cpu_to_be64(wr->compare_add); in set_atomic_seg()
3310 } else if (wr->wr.opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { in set_atomic_seg()
3311 aseg->swap_add = cpu_to_be64(wr->compare_add); in set_atomic_seg()
3312 aseg->compare = cpu_to_be64(wr->compare_add_mask); in set_atomic_seg()
3314 aseg->swap_add = cpu_to_be64(wr->compare_add); in set_atomic_seg()
3321 const struct ib_atomic_wr *wr) in set_masked_atomic_seg() argument
3323 aseg->swap_add = cpu_to_be64(wr->swap); in set_masked_atomic_seg()
3324 aseg->swap_add_mask = cpu_to_be64(wr->swap_mask); in set_masked_atomic_seg()
3325 aseg->compare = cpu_to_be64(wr->compare_add); in set_masked_atomic_seg()
3326 aseg->compare_mask = cpu_to_be64(wr->compare_add_mask); in set_masked_atomic_seg()
3330 const struct ib_ud_wr *wr) in set_datagram_seg() argument
3332 memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av)); in set_datagram_seg()
3333 dseg->dqpn = cpu_to_be32(wr->remote_qpn); in set_datagram_seg()
3334 dseg->qkey = cpu_to_be32(wr->remote_qkey); in set_datagram_seg()
3335 dseg->vlan = to_mah(wr->ah)->av.eth.vlan; in set_datagram_seg()
3336 memcpy(dseg->mac, to_mah(wr->ah)->av.eth.mac, 6); in set_datagram_seg()
3341 const struct ib_ud_wr *wr, in set_tunnel_datagram_seg() argument
3344 union mlx4_ext_av *av = &to_mah(wr->ah)->av; in set_tunnel_datagram_seg()
3363 static void build_tunnel_header(const struct ib_ud_wr *wr, void *wqe, in build_tunnel_header() argument
3368 struct mlx4_ib_ah *ah = to_mah(wr->ah); in build_tunnel_header()
3373 hdr.remote_qpn = cpu_to_be32(wr->remote_qpn); in build_tunnel_header()
3374 hdr.pkey_index = cpu_to_be16(wr->pkey_index); in build_tunnel_header()
3375 hdr.qkey = cpu_to_be32(wr->remote_qkey); in build_tunnel_header()
3448 const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp, in build_lso_seg() argument
3451 unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16); in build_lso_seg()
3457 wr->wr.num_sge > qp->sq.max_gs - (halign >> 4))) in build_lso_seg()
3460 memcpy(wqe->header, wr->header, wr->hlen); in build_lso_seg()
3462 *lso_hdr_sz = cpu_to_be32(wr->mss << 16 | wr->hlen); in build_lso_seg()
3467 static __be32 send_ieth(const struct ib_send_wr *wr) in send_ieth() argument
3469 switch (wr->opcode) { in send_ieth()
3472 return wr->ex.imm_data; in send_ieth()
3475 return cpu_to_be32(wr->ex.invalidate_rkey); in send_ieth()
3489 static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in _mlx4_ib_post_send() argument
3513 struct mlx4_ib_ah *ah = to_mah(ud_wr(wr)->ah); in _mlx4_ib_post_send()
3532 *bad_wr = wr; in _mlx4_ib_post_send()
3539 for (nreq = 0; wr; ++nreq, wr = wr->next) { in _mlx4_ib_post_send()
3545 *bad_wr = wr; in _mlx4_ib_post_send()
3549 if (unlikely(wr->num_sge > qp->sq.max_gs)) { in _mlx4_ib_post_send()
3551 *bad_wr = wr; in _mlx4_ib_post_send()
3556 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; in _mlx4_ib_post_send()
3559 (wr->send_flags & IB_SEND_SIGNALED ? in _mlx4_ib_post_send()
3561 (wr->send_flags & IB_SEND_SOLICITED ? in _mlx4_ib_post_send()
3563 ((wr->send_flags & IB_SEND_IP_CSUM) ? in _mlx4_ib_post_send()
3568 ctrl->imm = send_ieth(wr); in _mlx4_ib_post_send()
3576 switch (wr->opcode) { in _mlx4_ib_post_send()
3580 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, in _mlx4_ib_post_send()
3581 atomic_wr(wr)->rkey); in _mlx4_ib_post_send()
3584 set_atomic_seg(wqe, atomic_wr(wr)); in _mlx4_ib_post_send()
3593 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, in _mlx4_ib_post_send()
3594 atomic_wr(wr)->rkey); in _mlx4_ib_post_send()
3597 set_masked_atomic_seg(wqe, atomic_wr(wr)); in _mlx4_ib_post_send()
3608 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, in _mlx4_ib_post_send()
3609 rdma_wr(wr)->rkey); in _mlx4_ib_post_send()
3617 set_local_inv_seg(wqe, wr->ex.invalidate_rkey); in _mlx4_ib_post_send()
3625 set_reg_seg(wqe, reg_wr(wr)); in _mlx4_ib_post_send()
3637 err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl, in _mlx4_ib_post_send()
3640 *bad_wr = wr; in _mlx4_ib_post_send()
3649 set_datagram_seg(wqe, ud_wr(wr)); in _mlx4_ib_post_send()
3656 set_datagram_seg(wqe, ud_wr(wr)); in _mlx4_ib_post_send()
3660 if (wr->opcode == IB_WR_LSO) { in _mlx4_ib_post_send()
3661 err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen, in _mlx4_ib_post_send()
3664 *bad_wr = wr; in _mlx4_ib_post_send()
3674 err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl, in _mlx4_ib_post_send()
3677 *bad_wr = wr; in _mlx4_ib_post_send()
3686 build_tunnel_header(ud_wr(wr), wqe, &seglen); in _mlx4_ib_post_send()
3697 ud_wr(wr), in _mlx4_ib_post_send()
3701 build_tunnel_header(ud_wr(wr), wqe, &seglen); in _mlx4_ib_post_send()
3708 err = build_mlx_header(qp, ud_wr(wr), ctrl, &seglen); in _mlx4_ib_post_send()
3710 *bad_wr = wr; in _mlx4_ib_post_send()
3729 dseg += wr->num_sge - 1; in _mlx4_ib_post_send()
3730 size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16); in _mlx4_ib_post_send()
3741 for (i = wr->num_sge - 1; i >= 0; --i, --dseg) in _mlx4_ib_post_send()
3742 set_data_seg(dseg, wr->sg_list + i); in _mlx4_ib_post_send()
3752 ctrl->qpn_vlan.fence_size = (wr->send_flags & IB_SEND_FENCE ? in _mlx4_ib_post_send()
3762 if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) { in _mlx4_ib_post_send()
3763 *bad_wr = wr; in _mlx4_ib_post_send()
3768 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | in _mlx4_ib_post_send()
3776 if (wr->next) in _mlx4_ib_post_send()
3804 int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in mlx4_ib_post_send() argument
3807 return _mlx4_ib_post_send(ibqp, wr, bad_wr, false); in mlx4_ib_post_send()
3810 static int _mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, in _mlx4_ib_post_recv() argument
3829 *bad_wr = wr; in _mlx4_ib_post_recv()
3836 for (nreq = 0; wr; ++nreq, wr = wr->next) { in _mlx4_ib_post_recv()
3839 *bad_wr = wr; in _mlx4_ib_post_recv()
3843 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in _mlx4_ib_post_recv()
3845 *bad_wr = wr; in _mlx4_ib_post_recv()
3860 scat->lkey = cpu_to_be32(wr->sg_list->lkey); in _mlx4_ib_post_recv()
3866 for (i = 0; i < wr->num_sge; ++i) in _mlx4_ib_post_recv()
3867 __set_data_seg(scat + i, wr->sg_list + i); in _mlx4_ib_post_recv()
3875 qp->rq.wrid[ind] = wr->wr_id; in _mlx4_ib_post_recv()
3898 int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, in mlx4_ib_post_recv() argument
3901 return _mlx4_ib_post_recv(ibqp, wr, bad_wr, false); in mlx4_ib_post_recv()
4418 .wr = { in mlx4_ib_drain_sq()
4437 ret = _mlx4_ib_post_send(qp, &swr.wr, &bad_swr, true); in mlx4_ib_drain_sq()