Lines Matching refs:qp
75 struct mlx4_ib_qp qp; member
112 return container_of(mqp, struct mlx4_ib_sqp, qp); in to_msqp()
115 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_tunnel_qp() argument
120 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp()
121 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp()
125 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_sqp() argument
132 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp()
133 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp()
139 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || in is_sqp()
140 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { in is_sqp()
150 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_qp0() argument
157 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_qp0()
158 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); in is_qp0()
164 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) { in is_qp0()
173 static void *get_wqe(struct mlx4_ib_qp *qp, int offset) in get_wqe() argument
175 return mlx4_buf_offset(&qp->buf, offset); in get_wqe()
178 static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) in get_recv_wqe() argument
180 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); in get_recv_wqe()
183 static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) in get_send_wqe() argument
185 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); in get_send_wqe()
197 static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) in stamp_send_wqe() argument
207 if (qp->sq_max_wqes_per_wr > 1) { in stamp_send_wqe()
208 s = roundup(size, 1U << qp->sq.wqe_shift); in stamp_send_wqe()
210 ind = (i >> qp->sq.wqe_shift) + n; in stamp_send_wqe()
211 stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) : in stamp_send_wqe()
213 buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); in stamp_send_wqe()
214 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); in stamp_send_wqe()
218 ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in stamp_send_wqe()
227 static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size) in post_nop_wqe() argument
234 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in post_nop_wqe()
237 if (qp->ibqp.qp_type == IB_QPT_UD) { in post_nop_wqe()
241 av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn); in post_nop_wqe()
259 (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); in post_nop_wqe()
261 stamp_send_wqe(qp, n + qp->sq_spare_wqes, size); in post_nop_wqe()
265 static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind) in pad_wraparound() argument
267 unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1)); in pad_wraparound()
268 if (unlikely(s < qp->sq_max_wqes_per_wr)) { in pad_wraparound()
269 post_nop_wqe(qp, ind, s << qp->sq.wqe_shift); in pad_wraparound()
275 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) in mlx4_ib_qp_event() argument
278 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; in mlx4_ib_qp_event()
281 to_mibqp(qp)->port = to_mibqp(qp)->alt_port; in mlx4_ib_qp_event()
285 event.element.qp = ibqp; in mlx4_ib_qp_event()
313 "on QP %06x\n", type, qp->qpn); in mlx4_ib_qp_event()
368 int is_user, int has_rq, struct mlx4_ib_qp *qp) in set_rq_size() argument
379 qp->rq.wqe_cnt = qp->rq.max_gs = 0; in set_rq_size()
385 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); in set_rq_size()
386 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); in set_rq_size()
387 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); in set_rq_size()
392 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
393 cap->max_recv_sge = qp->rq.max_gs; in set_rq_size()
395 cap->max_recv_wr = qp->rq.max_post = in set_rq_size()
396 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); in set_rq_size()
397 cap->max_recv_sge = min(qp->rq.max_gs, in set_rq_size()
406 enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) in set_kernel_sq_size() argument
413 cap->max_inline_data + send_wqe_overhead(type, qp->flags) + in set_kernel_sq_size()
428 send_wqe_overhead(type, qp->flags); in set_kernel_sq_size()
465 qp->sq_signal_bits && BITS_PER_LONG == 64 && in set_kernel_sq_size()
469 qp->sq.wqe_shift = ilog2(64); in set_kernel_sq_size()
471 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); in set_kernel_sq_size()
474 qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); in set_kernel_sq_size()
480 qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr; in set_kernel_sq_size()
481 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr * in set_kernel_sq_size()
482 qp->sq_max_wqes_per_wr + in set_kernel_sq_size()
483 qp->sq_spare_wqes); in set_kernel_sq_size()
485 if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes) in set_kernel_sq_size()
488 if (qp->sq_max_wqes_per_wr <= 1) in set_kernel_sq_size()
491 ++qp->sq.wqe_shift; in set_kernel_sq_size()
494 qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz, in set_kernel_sq_size()
495 (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) - in set_kernel_sq_size()
496 send_wqe_overhead(type, qp->flags)) / in set_kernel_sq_size()
499 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_kernel_sq_size()
500 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in set_kernel_sq_size()
501 if (qp->rq.wqe_shift > qp->sq.wqe_shift) { in set_kernel_sq_size()
502 qp->rq.offset = 0; in set_kernel_sq_size()
503 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; in set_kernel_sq_size()
505 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; in set_kernel_sq_size()
506 qp->sq.offset = 0; in set_kernel_sq_size()
509 cap->max_send_wr = qp->sq.max_post = in set_kernel_sq_size()
510 (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; in set_kernel_sq_size()
511 cap->max_send_sge = min(qp->sq.max_gs, in set_kernel_sq_size()
521 struct mlx4_ib_qp *qp, in set_user_sq_size() argument
531 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; in set_user_sq_size()
532 qp->sq.wqe_shift = ucmd->log_sq_stride; in set_user_sq_size()
534 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_user_sq_size()
535 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in set_user_sq_size()
540 static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) in alloc_proxy_bufs() argument
544 qp->sqp_proxy_rcv = in alloc_proxy_bufs()
545 kmalloc(sizeof (struct mlx4_ib_buf) * qp->rq.wqe_cnt, in alloc_proxy_bufs()
547 if (!qp->sqp_proxy_rcv) in alloc_proxy_bufs()
549 for (i = 0; i < qp->rq.wqe_cnt; i++) { in alloc_proxy_bufs()
550 qp->sqp_proxy_rcv[i].addr = in alloc_proxy_bufs()
553 if (!qp->sqp_proxy_rcv[i].addr) in alloc_proxy_bufs()
555 qp->sqp_proxy_rcv[i].map = in alloc_proxy_bufs()
556 ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, in alloc_proxy_bufs()
565 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, in alloc_proxy_bufs()
568 kfree(qp->sqp_proxy_rcv[i].addr); in alloc_proxy_bufs()
570 kfree(qp->sqp_proxy_rcv); in alloc_proxy_bufs()
571 qp->sqp_proxy_rcv = NULL; in alloc_proxy_bufs()
575 static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) in free_proxy_bufs() argument
579 for (i = 0; i < qp->rq.wqe_cnt; i++) { in free_proxy_bufs()
580 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, in free_proxy_bufs()
583 kfree(qp->sqp_proxy_rcv[i].addr); in free_proxy_bufs()
585 kfree(qp->sqp_proxy_rcv); in free_proxy_bufs()
603 struct mlx4_ib_qp *qp; in create_qp_common() local
649 qp = &sqp->qp; in create_qp_common()
651 qp = kzalloc(sizeof (struct mlx4_ib_qp), GFP_KERNEL); in create_qp_common()
652 if (!qp) in create_qp_common()
656 qp = *caller_qp; in create_qp_common()
658 qp->mlx4_ib_qp_type = qp_type; in create_qp_common()
660 mutex_init(&qp->mutex); in create_qp_common()
661 spin_lock_init(&qp->sq.lock); in create_qp_common()
662 spin_lock_init(&qp->rq.lock); in create_qp_common()
663 INIT_LIST_HEAD(&qp->gid_list); in create_qp_common()
664 INIT_LIST_HEAD(&qp->steering_rules); in create_qp_common()
666 qp->state = IB_QPS_RESET; in create_qp_common()
668 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); in create_qp_common()
670 err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp); in create_qp_common()
682 qp->sq_no_prefetch = ucmd.sq_no_prefetch; in create_qp_common()
684 err = set_user_sq_size(dev, qp, &ucmd); in create_qp_common()
688 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in create_qp_common()
689 qp->buf_size, 0, 0); in create_qp_common()
690 if (IS_ERR(qp->umem)) { in create_qp_common()
691 err = PTR_ERR(qp->umem); in create_qp_common()
695 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem), in create_qp_common()
696 ilog2(qp->umem->page_size), &qp->mtt); in create_qp_common()
700 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_qp_common()
706 ucmd.db_addr, &qp->db); in create_qp_common()
711 qp->sq_no_prefetch = 0; in create_qp_common()
714 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; in create_qp_common()
717 qp->flags |= MLX4_IB_QP_LSO; in create_qp_common()
719 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); in create_qp_common()
724 err = mlx4_db_alloc(dev->dev, &qp->db, 0); in create_qp_common()
728 *qp->db.db = 0; in create_qp_common()
731 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) { in create_qp_common()
736 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, in create_qp_common()
737 &qp->mtt); in create_qp_common()
741 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); in create_qp_common()
745 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), GFP_KERNEL); in create_qp_common()
746 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), GFP_KERNEL); in create_qp_common()
748 if (!qp->sq.wrid || !qp->rq.wrid) { in create_qp_common()
755 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in create_qp_common()
757 if (alloc_proxy_bufs(pd->device, qp)) { in create_qp_common()
773 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); in create_qp_common()
778 qp->mqp.qpn |= (1 << 23); in create_qp_common()
785 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_qp_common()
787 qp->mqp.event = mlx4_ib_qp_event; in create_qp_common()
789 *caller_qp = qp; in create_qp_common()
796 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) in create_qp_common()
797 free_proxy_bufs(pd->device, qp); in create_qp_common()
801 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); in create_qp_common()
803 kfree(qp->sq.wrid); in create_qp_common()
804 kfree(qp->rq.wrid); in create_qp_common()
808 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in create_qp_common()
812 ib_umem_release(qp->umem); in create_qp_common()
814 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); in create_qp_common()
818 mlx4_db_free(dev->dev, &qp->db); in create_qp_common()
822 kfree(qp); in create_qp_common()
870 static void del_gid_entries(struct mlx4_ib_qp *qp) in del_gid_entries() argument
874 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { in del_gid_entries()
880 static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp) in get_pd() argument
882 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) in get_pd()
883 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); in get_pd()
885 return to_mpd(qp->ibqp.pd); in get_pd()
888 static void get_cqs(struct mlx4_ib_qp *qp, in get_cqs() argument
891 switch (qp->ibqp.qp_type) { in get_cqs()
893 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); in get_cqs()
897 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
901 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
902 *recv_cq = to_mcq(qp->ibqp.recv_cq); in get_cqs()
907 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, in destroy_qp_common() argument
912 if (qp->state != IB_QPS_RESET) in destroy_qp_common()
913 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), in destroy_qp_common()
914 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) in destroy_qp_common()
916 qp->mqp.qpn); in destroy_qp_common()
918 get_cqs(qp, &send_cq, &recv_cq); in destroy_qp_common()
923 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, in destroy_qp_common()
924 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); in destroy_qp_common()
926 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in destroy_qp_common()
929 mlx4_qp_remove(dev->dev, &qp->mqp); in destroy_qp_common()
933 mlx4_qp_free(dev->dev, &qp->mqp); in destroy_qp_common()
935 if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) in destroy_qp_common()
936 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); in destroy_qp_common()
938 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in destroy_qp_common()
941 if (qp->rq.wqe_cnt) in destroy_qp_common()
942 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), in destroy_qp_common()
943 &qp->db); in destroy_qp_common()
944 ib_umem_release(qp->umem); in destroy_qp_common()
946 kfree(qp->sq.wrid); in destroy_qp_common()
947 kfree(qp->rq.wrid); in destroy_qp_common()
948 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in destroy_qp_common()
950 free_proxy_bufs(&dev->ib_dev, qp); in destroy_qp_common()
951 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); in destroy_qp_common()
952 if (qp->rq.wqe_cnt) in destroy_qp_common()
953 mlx4_db_free(dev->dev, &qp->db); in destroy_qp_common()
956 del_gid_entries(qp); in destroy_qp_common()
980 struct mlx4_ib_qp *qp = NULL; in mlx4_ib_create_qp() local
1015 qp = kzalloc(sizeof *qp, GFP_KERNEL); in mlx4_ib_create_qp()
1016 if (!qp) in mlx4_ib_create_qp()
1022 udata, 0, &qp); in mlx4_ib_create_qp()
1026 qp->ibqp.qp_num = qp->mqp.qpn; in mlx4_ib_create_qp()
1027 qp->xrcdn = xrcdn; in mlx4_ib_create_qp()
1040 &qp); in mlx4_ib_create_qp()
1044 qp->port = init_attr->port_num; in mlx4_ib_create_qp()
1045 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; in mlx4_ib_create_qp()
1054 return &qp->ibqp; in mlx4_ib_create_qp()
1057 int mlx4_ib_destroy_qp(struct ib_qp *qp) in mlx4_ib_destroy_qp() argument
1059 struct mlx4_ib_dev *dev = to_mdev(qp->device); in mlx4_ib_destroy_qp()
1060 struct mlx4_ib_qp *mqp = to_mqp(qp); in mlx4_ib_destroy_qp()
1101 static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, in to_mlx4_access_flags() argument
1111 dest_rd_atomic = qp->resp_depth; in to_mlx4_access_flags()
1116 access_flags = qp->atomic_rd_en; in to_mlx4_access_flags()
1215 static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in update_mcg_macs() argument
1219 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { in update_mcg_macs()
1220 if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) { in update_mcg_macs()
1222 ge->port = qp->port; in update_mcg_macs()
1232 struct mlx4_ib_qp *qp = to_mqp(ibqp); in __mlx4_ib_modify_qp() local
1245 (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16)); in __mlx4_ib_modify_qp()
1269 if (qp->flags & MLX4_IB_QP_LSO) in __mlx4_ib_modify_qp()
1284 if (qp->rq.wqe_cnt) in __mlx4_ib_modify_qp()
1285 context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; in __mlx4_ib_modify_qp()
1286 context->rq_size_stride |= qp->rq.wqe_shift - 4; in __mlx4_ib_modify_qp()
1288 if (qp->sq.wqe_cnt) in __mlx4_ib_modify_qp()
1289 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; in __mlx4_ib_modify_qp()
1290 context->sq_size_stride |= qp->sq.wqe_shift - 4; in __mlx4_ib_modify_qp()
1293 context->sq_size_stride |= !!qp->sq_no_prefetch << 7; in __mlx4_ib_modify_qp()
1294 context->xrcd = cpu_to_be32((u32) qp->xrcdn); in __mlx4_ib_modify_qp()
1299 if (qp->ibqp.uobject) in __mlx4_ib_modify_qp()
1316 if (dev->counters[qp->port - 1] != -1) { in __mlx4_ib_modify_qp()
1318 dev->counters[qp->port - 1]; in __mlx4_ib_modify_qp()
1325 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) in __mlx4_ib_modify_qp()
1334 attr->port_num : qp->port)) in __mlx4_ib_modify_qp()
1364 pd = get_pd(qp); in __mlx4_ib_modify_qp()
1365 get_cqs(qp, &send_cq, &recv_cq); in __mlx4_ib_modify_qp()
1372 if (!qp->ibqp.uobject) in __mlx4_ib_modify_qp()
1403 context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); in __mlx4_ib_modify_qp()
1419 if (qp->mlx4_ib_qp_type & in __mlx4_ib_modify_qp()
1424 !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) && in __mlx4_ib_modify_qp()
1441 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) in __mlx4_ib_modify_qp()
1442 context->db_rec_addr = cpu_to_be64(qp->db.dma); in __mlx4_ib_modify_qp()
1449 context->pri_path.sched_queue = (qp->port - 1) << 6; in __mlx4_ib_modify_qp()
1450 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || in __mlx4_ib_modify_qp()
1451 qp->mlx4_ib_qp_type & in __mlx4_ib_modify_qp()
1454 if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI) in __mlx4_ib_modify_qp()
1457 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) in __mlx4_ib_modify_qp()
1463 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) in __mlx4_ib_modify_qp()
1486 for (i = 0; i < qp->sq.wqe_cnt; ++i) { in __mlx4_ib_modify_qp()
1487 ctrl = get_send_wqe(qp, i); in __mlx4_ib_modify_qp()
1489 if (qp->sq_max_wqes_per_wr == 1) in __mlx4_ib_modify_qp()
1490 ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4); in __mlx4_ib_modify_qp()
1492 stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift); in __mlx4_ib_modify_qp()
1496 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), in __mlx4_ib_modify_qp()
1498 sqd_event, &qp->mqp); in __mlx4_ib_modify_qp()
1502 qp->state = new_state; in __mlx4_ib_modify_qp()
1505 qp->atomic_rd_en = attr->qp_access_flags; in __mlx4_ib_modify_qp()
1507 qp->resp_depth = attr->max_dest_rd_atomic; in __mlx4_ib_modify_qp()
1509 qp->port = attr->port_num; in __mlx4_ib_modify_qp()
1510 update_mcg_macs(dev, qp); in __mlx4_ib_modify_qp()
1513 qp->alt_port = attr->alt_port_num; in __mlx4_ib_modify_qp()
1515 if (is_sqp(dev, qp)) in __mlx4_ib_modify_qp()
1516 store_sqp_attrs(to_msqp(qp), attr, attr_mask); in __mlx4_ib_modify_qp()
1522 if (is_qp0(dev, qp)) { in __mlx4_ib_modify_qp()
1524 if (mlx4_INIT_PORT(dev->dev, qp->port)) in __mlx4_ib_modify_qp()
1526 qp->port); in __mlx4_ib_modify_qp()
1530 mlx4_CLOSE_PORT(dev->dev, qp->port); in __mlx4_ib_modify_qp()
1538 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, in __mlx4_ib_modify_qp()
1541 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in __mlx4_ib_modify_qp()
1543 qp->rq.head = 0; in __mlx4_ib_modify_qp()
1544 qp->rq.tail = 0; in __mlx4_ib_modify_qp()
1545 qp->sq.head = 0; in __mlx4_ib_modify_qp()
1546 qp->sq.tail = 0; in __mlx4_ib_modify_qp()
1547 qp->sq_next_wqe = 0; in __mlx4_ib_modify_qp()
1548 if (qp->rq.wqe_cnt) in __mlx4_ib_modify_qp()
1549 *qp->db.db = 0; in __mlx4_ib_modify_qp()
1561 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_modify_qp() local
1565 mutex_lock(&qp->mutex); in mlx4_ib_modify_qp()
1567 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; in mlx4_ib_modify_qp()
1594 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in mlx4_ib_modify_qp()
1630 mutex_unlock(&qp->mutex); in mlx4_ib_modify_qp()
1638 struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device); in build_sriov_qp0_header()
1660 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) in build_sriov_qp0_header()
1665 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) { in build_sriov_qp0_header()
1682 ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); in build_sriov_qp0_header()
1684 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) in build_sriov_qp0_header()
1688 cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]); in build_sriov_qp0_header()
1691 if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) in build_sriov_qp0_header()
1694 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn); in build_sriov_qp0_header()
1745 struct ib_device *ib_dev = sqp->qp.ibqp.device; in build_mlx_header()
1766 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; in build_mlx_header()
1774 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. in build_mlx_header()
1777 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. in build_mlx_header()
1810 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. in build_mlx_header()
1813 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. in build_mlx_header()
1827 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | in build_mlx_header()
1858 ndev = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]; in build_mlx_header()
1872 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; in build_mlx_header()
1877 if (!sqp->qp.ibqp.qp_num) in build_mlx_header()
1878 ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); in build_mlx_header()
1880 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); in build_mlx_header()
1886 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); in build_mlx_header()
2163 struct mlx4_ib_qp *qp, unsigned *lso_seg_len, in build_lso_seg() argument
2171 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && in build_lso_seg()
2172 wr->num_sge > qp->sq.max_gs - (halign >> 4))) in build_lso_seg()
2208 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_post_send() local
2225 spin_lock_irqsave(&qp->sq.lock, flags); in mlx4_ib_post_send()
2227 ind = qp->sq_next_wqe; in mlx4_ib_post_send()
2233 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mlx4_ib_post_send()
2239 if (unlikely(wr->num_sge > qp->sq.max_gs)) { in mlx4_ib_post_send()
2245 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); in mlx4_ib_post_send()
2246 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; in mlx4_ib_post_send()
2256 qp->sq_signal_bits; in mlx4_ib_post_send()
2263 switch (qp->mlx4_ib_qp_type) { in mlx4_ib_post_send()
2334 err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); in mlx4_ib_post_send()
2357 err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh); in mlx4_ib_post_send()
2374 err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); in mlx4_ib_post_send()
2409 err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen); in mlx4_ib_post_send()
2434 if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || in mlx4_ib_post_send()
2435 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI || in mlx4_ib_post_send()
2436 qp->mlx4_ib_qp_type & in mlx4_ib_post_send()
2470 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; in mlx4_ib_post_send()
2472 stamp = ind + qp->sq_spare_wqes; in mlx4_ib_post_send()
2473 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); in mlx4_ib_post_send()
2485 stamp_send_wqe(qp, stamp, size * 16); in mlx4_ib_post_send()
2486 ind = pad_wraparound(qp, ind); in mlx4_ib_post_send()
2492 qp->sq.head += nreq; in mlx4_ib_post_send()
2500 writel(qp->doorbell_qpn, in mlx4_ib_post_send()
2509 stamp_send_wqe(qp, stamp, size * 16); in mlx4_ib_post_send()
2511 ind = pad_wraparound(qp, ind); in mlx4_ib_post_send()
2512 qp->sq_next_wqe = ind; in mlx4_ib_post_send()
2515 spin_unlock_irqrestore(&qp->sq.lock, flags); in mlx4_ib_post_send()
2523 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_post_recv() local
2532 max_gs = qp->rq.max_gs; in mlx4_ib_post_recv()
2533 spin_lock_irqsave(&qp->rq.lock, flags); in mlx4_ib_post_recv()
2535 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); in mlx4_ib_post_recv()
2538 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { in mlx4_ib_post_recv()
2544 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mlx4_ib_post_recv()
2550 scat = get_recv_wqe(qp, ind); in mlx4_ib_post_recv()
2552 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in mlx4_ib_post_recv()
2555 qp->sqp_proxy_rcv[ind].map, in mlx4_ib_post_recv()
2562 scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); in mlx4_ib_post_recv()
2576 qp->rq.wrid[ind] = wr->wr_id; in mlx4_ib_post_recv()
2578 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); in mlx4_ib_post_recv()
2583 qp->rq.head += nreq; in mlx4_ib_post_recv()
2591 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); in mlx4_ib_post_recv()
2594 spin_unlock_irqrestore(&qp->rq.lock, flags); in mlx4_ib_post_recv()
2678 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_query_qp() local
2683 mutex_lock(&qp->mutex); in mlx4_ib_query_qp()
2685 if (qp->state == IB_QPS_RESET) { in mlx4_ib_query_qp()
2690 err = mlx4_qp_query(dev->dev, &qp->mqp, &context); in mlx4_ib_query_qp()
2698 qp->state = to_ib_qp_state(mlx4_state); in mlx4_ib_query_qp()
2699 qp_attr->qp_state = qp->state; in mlx4_ib_query_qp()
2710 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { in mlx4_ib_query_qp()
2719 qp_attr->port_num = qp->port; in mlx4_ib_query_qp()
2739 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; in mlx4_ib_query_qp()
2740 qp_attr->cap.max_recv_sge = qp->rq.max_gs; in mlx4_ib_query_qp()
2743 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; in mlx4_ib_query_qp()
2744 qp_attr->cap.max_send_sge = qp->sq.max_gs; in mlx4_ib_query_qp()
2759 if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) in mlx4_ib_query_qp()
2762 if (qp->flags & MLX4_IB_QP_LSO) in mlx4_ib_query_qp()
2766 qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ? in mlx4_ib_query_qp()
2770 mutex_unlock(&qp->mutex); in mlx4_ib_query_qp()