• Home
  • Raw
  • Download

Lines Matching full:seg

31  * @seg: Current WQE position (16B aligned).
35 static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg, in handle_post_send_edge() argument
40 if (likely(*seg != *cur_edge)) in handle_post_send_edge()
46 *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx); in handle_post_send_edge()
50 * pointers. At the end @seg is aligned to 16B regardless the copied size.
53 * @seg: Current WQE position (16B aligned).
59 void **seg, u32 *wqe_sz, const void *src, in memcpy_send_wqe() argument
63 size_t leftlen = *cur_edge - *seg; in memcpy_send_wqe()
67 memcpy(*seg, src, copysz); in memcpy_send_wqe()
72 *seg += stride; in memcpy_send_wqe()
74 handle_post_send_edge(sq, seg, *wqe_sz, cur_edge); in memcpy_send_wqe()
105 void **seg, int *size, void **cur_edge) in set_eth_seg() argument
107 struct mlx5_wqe_eth_seg *eseg = *seg; in set_eth_seg()
135 *seg += stride; in set_eth_seg()
138 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_eth_seg()
141 memcpy_send_wqe(&qp->sq, cur_edge, seg, size, pdata, in set_eth_seg()
148 *seg += sizeof(struct mlx5_wqe_eth_seg); in set_eth_seg()
373 static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg, in set_reg_mkey_seg() argument
379 memset(seg, 0, sizeof(*seg)); in set_reg_mkey_seg()
382 seg->log2_page_size = ilog2(mr->ibmr.page_size); in set_reg_mkey_seg()
387 seg->flags = get_umr_flags(access) | mr->access_mode; in set_reg_mkey_seg()
388 seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00); in set_reg_mkey_seg()
389 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); in set_reg_mkey_seg()
390 seg->start_addr = cpu_to_be64(mr->ibmr.iova); in set_reg_mkey_seg()
391 seg->len = cpu_to_be64(mr->ibmr.length); in set_reg_mkey_seg()
392 seg->xlt_oct_size = cpu_to_be32(ndescs); in set_reg_mkey_seg()
395 static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg) in set_linv_mkey_seg() argument
397 memset(seg, 0, sizeof(*seg)); in set_linv_mkey_seg()
398 seg->status = MLX5_MKEY_STATUS_FREE; in set_linv_mkey_seg()
402 struct mlx5_mkey_seg *seg, in set_reg_mkey_segment() argument
407 memset(seg, 0, sizeof(*seg)); in set_reg_mkey_segment()
409 MLX5_SET(mkc, seg, free, 1); in set_reg_mkey_segment()
411 MLX5_SET(mkc, seg, a, in set_reg_mkey_segment()
413 MLX5_SET(mkc, seg, rw, in set_reg_mkey_segment()
415 MLX5_SET(mkc, seg, rr, !!(umrwr->access_flags & IB_ACCESS_REMOTE_READ)); in set_reg_mkey_segment()
416 MLX5_SET(mkc, seg, lw, !!(umrwr->access_flags & IB_ACCESS_LOCAL_WRITE)); in set_reg_mkey_segment()
417 MLX5_SET(mkc, seg, lr, 1); in set_reg_mkey_segment()
419 MLX5_SET(mkc, seg, relaxed_ordering_write, in set_reg_mkey_segment()
422 MLX5_SET(mkc, seg, relaxed_ordering_read, in set_reg_mkey_segment()
426 MLX5_SET(mkc, seg, pd, to_mpd(umrwr->pd)->pdn); in set_reg_mkey_segment()
429 MLX5_SET(mkc, seg, length64, 1); in set_reg_mkey_segment()
431 MLX5_SET64(mkc, seg, start_addr, umrwr->virt_addr); in set_reg_mkey_segment()
432 MLX5_SET64(mkc, seg, len, umrwr->length); in set_reg_mkey_segment()
433 MLX5_SET(mkc, seg, log_page_size, umrwr->page_shift); in set_reg_mkey_segment()
434 MLX5_SET(mkc, seg, qpn, 0xffffff); in set_reg_mkey_segment()
435 MLX5_SET(mkc, seg, mkey_7_0, mlx5_mkey_variant(umrwr->mkey)); in set_reg_mkey_segment()
484 struct mlx5_wqe_inline_seg *seg; in set_data_inl_seg() local
489 seg = *wqe; in set_data_inl_seg()
490 *wqe += sizeof(*seg); in set_data_inl_seg()
491 offset = sizeof(*seg); in set_data_inl_seg()
521 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG); in set_data_inl_seg()
523 *wqe_sz += ALIGN(inl + sizeof(seg->byte_count), 16) / 16; in set_data_inl_seg()
640 struct mlx5_ib_qp *qp, void **seg, int *size, in set_sig_data_segment() argument
678 struct mlx5_klm *data_klm = *seg; in set_sig_data_segment()
704 sblock_ctrl = *seg; in set_sig_data_segment()
733 *seg += wqe_size; in set_sig_data_segment()
735 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_sig_data_segment()
737 bsf = *seg; in set_sig_data_segment()
742 *seg += sizeof(*bsf); in set_sig_data_segment()
744 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_sig_data_segment()
749 static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, in set_sig_mkey_segment() argument
756 memset(seg, 0, sizeof(*seg)); in set_sig_mkey_segment()
758 seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS; in set_sig_mkey_segment()
759 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00); in set_sig_mkey_segment()
760 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | in set_sig_mkey_segment()
762 seg->len = cpu_to_be64(length); in set_sig_mkey_segment()
763 seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size)); in set_sig_mkey_segment()
764 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE); in set_sig_mkey_segment()
779 struct mlx5_ib_qp *qp, void **seg, int *size, in set_pi_umr_wr() argument
809 set_sig_umr_segment(*seg, xlt_size); in set_pi_umr_wr()
810 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); in set_pi_umr_wr()
812 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_pi_umr_wr()
814 set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len, in set_pi_umr_wr()
816 *seg += sizeof(struct mlx5_mkey_seg); in set_pi_umr_wr()
818 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_pi_umr_wr()
820 ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size, in set_pi_umr_wr()
830 u32 psv_idx, void **seg, int *size) in set_psv_wr() argument
832 struct mlx5_seg_set_psv *psv_seg = *seg; in set_psv_wr()
850 *seg += sizeof(*psv_seg); in set_psv_wr()
858 void **seg, int *size, void **cur_edge, in set_reg_wr() argument
888 set_reg_umr_seg(*seg, mr, flags, atomic); in set_reg_wr()
889 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); in set_reg_wr()
891 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_reg_wr()
893 set_reg_mkey_seg(*seg, mr, wr->key, wr->access); in set_reg_wr()
894 *seg += sizeof(struct mlx5_mkey_seg); in set_reg_wr()
896 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_reg_wr()
899 memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs, in set_reg_wr()
903 set_reg_data_seg(*seg, mr, pd); in set_reg_wr()
904 *seg += sizeof(struct mlx5_wqe_data_seg); in set_reg_wr()
910 static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size, in set_linv_wr() argument
913 set_linv_umr_seg(*seg); in set_linv_wr()
914 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); in set_linv_wr()
916 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_linv_wr()
917 set_linv_mkey_seg(*seg); in set_linv_wr()
918 *seg += sizeof(struct mlx5_mkey_seg); in set_linv_wr()
920 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_linv_wr()
942 static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg, in __begin_wqe() argument
952 *seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx); in __begin_wqe()
953 *ctrl = *seg; in __begin_wqe()
954 *(uint32_t *)(*seg + 8) = 0; in __begin_wqe()
960 *seg += sizeof(**ctrl); in __begin_wqe()
967 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, in begin_wqe() argument
972 return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq, in begin_wqe()
979 void *seg, u8 size, void *cur_edge, in finish_wqe() argument
1001 seg = PTR_ALIGN(seg, MLX5_SEND_WQE_BB); in finish_wqe()
1002 qp->sq.cur_edge = (unlikely(seg == cur_edge)) ? in finish_wqe()
1008 static void handle_rdma_op(const struct ib_send_wr *wr, void **seg, int *size) in handle_rdma_op() argument
1010 set_raddr_seg(*seg, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey); in handle_rdma_op()
1011 *seg += sizeof(struct mlx5_wqe_raddr_seg); in handle_rdma_op()
1016 struct mlx5_wqe_ctrl_seg **ctrl, void **seg, in handle_local_inv() argument
1021 set_linv_wr(qp, seg, size, cur_edge); in handle_local_inv()
1025 struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, in handle_reg_mr() argument
1030 return set_reg_wr(qp, reg_wr(wr), seg, size, cur_edge, true); in handle_reg_mr()
1035 struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, in handle_psv() argument
1045 err = __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq, in handle_psv()
1052 err = set_psv_wr(domain, psv_index, seg, size); in handle_psv()
1057 finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, nreq, in handle_psv()
1067 struct mlx5_wqe_ctrl_seg **ctrl, void **seg, in handle_reg_mr_integrity() argument
1094 err = set_reg_wr(qp, &reg_pi_wr, seg, size, cur_edge, false); in handle_reg_mr_integrity()
1098 finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, in handle_reg_mr_integrity()
1101 err = begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq); in handle_reg_mr_integrity()
1125 err = set_pi_umr_wr(wr, qp, seg, size, cur_edge); in handle_reg_mr_integrity()
1130 finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, nreq, in handle_reg_mr_integrity()
1134 err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq, in handle_reg_mr_integrity()
1140 err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq, in handle_reg_mr_integrity()
1154 struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, in handle_qpt_rc() argument
1164 handle_rdma_op(wr, seg, size); in handle_qpt_rc()
1175 handle_local_inv(qp, wr, ctrl, seg, size, cur_edge, *idx); in handle_qpt_rc()
1180 err = handle_reg_mr(qp, wr, ctrl, seg, size, cur_edge, *idx); in handle_qpt_rc()
1187 err = handle_reg_mr_integrity(dev, qp, wr, ctrl, seg, size, in handle_qpt_rc()
1203 static void handle_qpt_uc(const struct ib_send_wr *wr, void **seg, int *size) in handle_qpt_uc() argument
1208 handle_rdma_op(wr, seg, size); in handle_qpt_uc()
1216 const struct ib_send_wr *wr, void **seg, in handle_qpt_hw_gsi() argument
1219 set_datagram_seg(*seg, wr); in handle_qpt_hw_gsi()
1220 *seg += sizeof(struct mlx5_wqe_datagram_seg); in handle_qpt_hw_gsi()
1222 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in handle_qpt_hw_gsi()
1226 void **seg, int *size, void **cur_edge) in handle_qpt_ud() argument
1228 set_datagram_seg(*seg, wr); in handle_qpt_ud()
1229 *seg += sizeof(struct mlx5_wqe_datagram_seg); in handle_qpt_ud()
1231 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in handle_qpt_ud()
1237 pad = *seg; in handle_qpt_ud()
1239 *seg += sizeof(struct mlx5_wqe_eth_pad); in handle_qpt_ud()
1241 set_eth_seg(wr, qp, seg, size, cur_edge); in handle_qpt_ud()
1242 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in handle_qpt_ud()
1248 struct mlx5_wqe_ctrl_seg **ctrl, void **seg, in handle_qpt_reg_umr() argument
1261 err = set_reg_umr_segment(dev, *seg, wr); in handle_qpt_reg_umr()
1264 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); in handle_qpt_reg_umr()
1266 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in handle_qpt_reg_umr()
1267 set_reg_mkey_segment(dev, *seg, wr); in handle_qpt_reg_umr()
1268 *seg += sizeof(struct mlx5_mkey_seg); in handle_qpt_reg_umr()
1270 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in handle_qpt_reg_umr()
1290 void *seg; in mlx5_ib_post_send() local
1326 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge, in mlx5_ib_post_send()
1352 xrc = seg; in mlx5_ib_post_send()
1353 seg += sizeof(*xrc); in mlx5_ib_post_send()
1357 err = handle_qpt_rc(dev, qp, wr, &ctrl, &seg, &size, in mlx5_ib_post_send()
1369 handle_qpt_uc(wr, &seg, &size); in mlx5_ib_post_send()
1380 handle_qpt_hw_gsi(qp, wr, &seg, &size, &cur_edge); in mlx5_ib_post_send()
1383 handle_qpt_ud(qp, wr, &seg, &size, &cur_edge); in mlx5_ib_post_send()
1386 err = handle_qpt_reg_umr(dev, qp, wr, &ctrl, &seg, in mlx5_ib_post_send()
1397 err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge); in mlx5_ib_post_send()
1405 handle_post_send_edge(&qp->sq, &seg, size, in mlx5_ib_post_send()
1411 (struct mlx5_wqe_data_seg *)seg, in mlx5_ib_post_send()
1414 seg += sizeof(struct mlx5_wqe_data_seg); in mlx5_ib_post_send()
1419 finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq, in mlx5_ib_post_send()