Lines Matching refs:qp
61 struct mlx4_ib_qp qp; member
89 return container_of(mqp, struct mlx4_ib_sqp, qp); in to_msqp()
92 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_sqp() argument
94 return qp->mqp.qpn >= dev->dev->caps.sqp_start && in is_sqp()
95 qp->mqp.qpn <= dev->dev->caps.sqp_start + 3; in is_sqp()
98 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_qp0() argument
100 return qp->mqp.qpn >= dev->dev->caps.sqp_start && in is_qp0()
101 qp->mqp.qpn <= dev->dev->caps.sqp_start + 1; in is_qp0()
104 static void *get_wqe(struct mlx4_ib_qp *qp, int offset) in get_wqe() argument
106 return mlx4_buf_offset(&qp->buf, offset); in get_wqe()
109 static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) in get_recv_wqe() argument
111 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); in get_recv_wqe()
114 static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) in get_send_wqe() argument
116 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); in get_send_wqe()
128 static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) in stamp_send_wqe() argument
138 if (qp->sq_max_wqes_per_wr > 1) { in stamp_send_wqe()
139 s = roundup(size, 1U << qp->sq.wqe_shift); in stamp_send_wqe()
141 ind = (i >> qp->sq.wqe_shift) + n; in stamp_send_wqe()
142 stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) : in stamp_send_wqe()
144 buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); in stamp_send_wqe()
145 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); in stamp_send_wqe()
149 ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in stamp_send_wqe()
158 static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size) in post_nop_wqe() argument
165 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in post_nop_wqe()
168 if (qp->ibqp.qp_type == IB_QPT_UD) { in post_nop_wqe()
172 av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn); in post_nop_wqe()
190 (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); in post_nop_wqe()
192 stamp_send_wqe(qp, n + qp->sq_spare_wqes, size); in post_nop_wqe()
196 static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind) in pad_wraparound() argument
198 unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1)); in pad_wraparound()
199 if (unlikely(s < qp->sq_max_wqes_per_wr)) { in pad_wraparound()
200 post_nop_wqe(qp, ind, s << qp->sq.wqe_shift); in pad_wraparound()
206 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) in mlx4_ib_qp_event() argument
209 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; in mlx4_ib_qp_event()
212 to_mibqp(qp)->port = to_mibqp(qp)->alt_port; in mlx4_ib_qp_event()
216 event.element.qp = ibqp; in mlx4_ib_qp_event()
244 "on QP %06x\n", type, qp->qpn); in mlx4_ib_qp_event()
289 int is_user, int has_srq, struct mlx4_ib_qp *qp) in set_rq_size() argument
301 qp->rq.wqe_cnt = qp->rq.max_gs = 0; in set_rq_size()
307 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); in set_rq_size()
308 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); in set_rq_size()
309 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); in set_rq_size()
312 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
313 cap->max_recv_sge = qp->rq.max_gs; in set_rq_size()
319 enum ib_qp_type type, struct mlx4_ib_qp *qp) in set_kernel_sq_size() argument
326 cap->max_inline_data + send_wqe_overhead(type, qp->flags) + in set_kernel_sq_size()
340 send_wqe_overhead(type, qp->flags); in set_kernel_sq_size()
377 qp->sq_signal_bits && BITS_PER_LONG == 64 && in set_kernel_sq_size()
379 qp->sq.wqe_shift = ilog2(64); in set_kernel_sq_size()
381 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); in set_kernel_sq_size()
384 qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); in set_kernel_sq_size()
390 qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr; in set_kernel_sq_size()
391 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr * in set_kernel_sq_size()
392 qp->sq_max_wqes_per_wr + in set_kernel_sq_size()
393 qp->sq_spare_wqes); in set_kernel_sq_size()
395 if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes) in set_kernel_sq_size()
398 if (qp->sq_max_wqes_per_wr <= 1) in set_kernel_sq_size()
401 ++qp->sq.wqe_shift; in set_kernel_sq_size()
404 qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz, in set_kernel_sq_size()
405 (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) - in set_kernel_sq_size()
406 send_wqe_overhead(type, qp->flags)) / in set_kernel_sq_size()
409 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_kernel_sq_size()
410 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in set_kernel_sq_size()
411 if (qp->rq.wqe_shift > qp->sq.wqe_shift) { in set_kernel_sq_size()
412 qp->rq.offset = 0; in set_kernel_sq_size()
413 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; in set_kernel_sq_size()
415 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; in set_kernel_sq_size()
416 qp->sq.offset = 0; in set_kernel_sq_size()
419 cap->max_send_wr = qp->sq.max_post = in set_kernel_sq_size()
420 (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; in set_kernel_sq_size()
421 cap->max_send_sge = min(qp->sq.max_gs, in set_kernel_sq_size()
431 struct mlx4_ib_qp *qp, in set_user_sq_size() argument
441 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; in set_user_sq_size()
442 qp->sq.wqe_shift = ucmd->log_sq_stride; in set_user_sq_size()
444 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_user_sq_size()
445 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in set_user_sq_size()
452 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) in create_qp_common() argument
457 mutex_init(&qp->mutex); in create_qp_common()
458 spin_lock_init(&qp->sq.lock); in create_qp_common()
459 spin_lock_init(&qp->rq.lock); in create_qp_common()
461 qp->state = IB_QPS_RESET; in create_qp_common()
463 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); in create_qp_common()
465 err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp); in create_qp_common()
477 qp->sq_no_prefetch = ucmd.sq_no_prefetch; in create_qp_common()
479 err = set_user_sq_size(dev, qp, &ucmd); in create_qp_common()
483 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in create_qp_common()
484 qp->buf_size, 0, 0); in create_qp_common()
485 if (IS_ERR(qp->umem)) { in create_qp_common()
486 err = PTR_ERR(qp->umem); in create_qp_common()
490 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem), in create_qp_common()
491 ilog2(qp->umem->page_size), &qp->mtt); in create_qp_common()
495 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_qp_common()
501 ucmd.db_addr, &qp->db); in create_qp_common()
506 qp->sq_no_prefetch = 0; in create_qp_common()
509 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; in create_qp_common()
512 qp->flags |= MLX4_IB_QP_LSO; in create_qp_common()
514 err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp); in create_qp_common()
519 err = mlx4_db_alloc(dev->dev, &qp->db, 0); in create_qp_common()
523 *qp->db.db = 0; in create_qp_common()
526 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) { in create_qp_common()
531 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, in create_qp_common()
532 &qp->mtt); in create_qp_common()
536 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); in create_qp_common()
540 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), GFP_KERNEL); in create_qp_common()
541 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), GFP_KERNEL); in create_qp_common()
543 if (!qp->sq.wrid || !qp->rq.wrid) { in create_qp_common()
557 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); in create_qp_common()
566 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_qp_common()
568 qp->mqp.event = mlx4_ib_qp_event; in create_qp_common()
580 &qp->db); in create_qp_common()
582 kfree(qp->sq.wrid); in create_qp_common()
583 kfree(qp->rq.wrid); in create_qp_common()
587 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in create_qp_common()
591 ib_umem_release(qp->umem); in create_qp_common()
593 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); in create_qp_common()
597 mlx4_db_free(dev->dev, &qp->db); in create_qp_common()
643 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, in destroy_qp_common() argument
648 if (qp->state != IB_QPS_RESET) in destroy_qp_common()
649 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), in destroy_qp_common()
650 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) in destroy_qp_common()
652 qp->mqp.qpn); in destroy_qp_common()
654 send_cq = to_mcq(qp->ibqp.send_cq); in destroy_qp_common()
655 recv_cq = to_mcq(qp->ibqp.recv_cq); in destroy_qp_common()
660 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, in destroy_qp_common()
661 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); in destroy_qp_common()
663 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in destroy_qp_common()
666 mlx4_qp_remove(dev->dev, &qp->mqp); in destroy_qp_common()
670 mlx4_qp_free(dev->dev, &qp->mqp); in destroy_qp_common()
672 if (!is_sqp(dev, qp)) in destroy_qp_common()
673 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); in destroy_qp_common()
675 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in destroy_qp_common()
678 if (!qp->ibqp.srq) in destroy_qp_common()
679 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), in destroy_qp_common()
680 &qp->db); in destroy_qp_common()
681 ib_umem_release(qp->umem); in destroy_qp_common()
683 kfree(qp->sq.wrid); in destroy_qp_common()
684 kfree(qp->rq.wrid); in destroy_qp_common()
685 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); in destroy_qp_common()
686 if (!qp->ibqp.srq) in destroy_qp_common()
687 mlx4_db_free(dev->dev, &qp->db); in destroy_qp_common()
697 struct mlx4_ib_qp *qp; in mlx4_ib_create_qp() local
717 qp = kzalloc(sizeof *qp, GFP_KERNEL); in mlx4_ib_create_qp()
718 if (!qp) in mlx4_ib_create_qp()
721 err = create_qp_common(dev, pd, init_attr, udata, 0, qp); in mlx4_ib_create_qp()
723 kfree(qp); in mlx4_ib_create_qp()
727 qp->ibqp.qp_num = qp->mqp.qpn; in mlx4_ib_create_qp()
742 qp = &sqp->qp; in mlx4_ib_create_qp()
748 qp); in mlx4_ib_create_qp()
754 qp->port = init_attr->port_num; in mlx4_ib_create_qp()
755 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; in mlx4_ib_create_qp()
764 return &qp->ibqp; in mlx4_ib_create_qp()
767 int mlx4_ib_destroy_qp(struct ib_qp *qp) in mlx4_ib_destroy_qp() argument
769 struct mlx4_ib_dev *dev = to_mdev(qp->device); in mlx4_ib_destroy_qp()
770 struct mlx4_ib_qp *mqp = to_mqp(qp); in mlx4_ib_destroy_qp()
775 destroy_qp_common(dev, mqp, !!qp->pd->uobject); in mlx4_ib_destroy_qp()
797 static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, in to_mlx4_access_flags() argument
807 dest_rd_atomic = qp->resp_depth; in to_mlx4_access_flags()
812 access_flags = qp->atomic_rd_en; in to_mlx4_access_flags()
884 struct mlx4_ib_qp *qp = to_mqp(ibqp); in __mlx4_ib_modify_qp() local
918 if (qp->flags & MLX4_IB_QP_LSO) in __mlx4_ib_modify_qp()
933 if (qp->rq.wqe_cnt) in __mlx4_ib_modify_qp()
934 context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; in __mlx4_ib_modify_qp()
935 context->rq_size_stride |= qp->rq.wqe_shift - 4; in __mlx4_ib_modify_qp()
937 if (qp->sq.wqe_cnt) in __mlx4_ib_modify_qp()
938 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; in __mlx4_ib_modify_qp()
939 context->sq_size_stride |= qp->sq.wqe_shift - 4; in __mlx4_ib_modify_qp()
942 context->sq_size_stride |= !!qp->sq_no_prefetch << 7; in __mlx4_ib_modify_qp()
944 if (qp->ibqp.uobject) in __mlx4_ib_modify_qp()
967 attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) in __mlx4_ib_modify_qp()
1001 if (!qp->ibqp.uobject) in __mlx4_ib_modify_qp()
1034 context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); in __mlx4_ib_modify_qp()
1059 context->db_rec_addr = cpu_to_be64(qp->db.dma); in __mlx4_ib_modify_qp()
1065 context->pri_path.sched_queue = (qp->port - 1) << 6; in __mlx4_ib_modify_qp()
1066 if (is_qp0(dev, qp)) in __mlx4_ib_modify_qp()
1091 for (i = 0; i < qp->sq.wqe_cnt; ++i) { in __mlx4_ib_modify_qp()
1092 ctrl = get_send_wqe(qp, i); in __mlx4_ib_modify_qp()
1094 if (qp->sq_max_wqes_per_wr == 1) in __mlx4_ib_modify_qp()
1095 ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4); in __mlx4_ib_modify_qp()
1097 stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift); in __mlx4_ib_modify_qp()
1101 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), in __mlx4_ib_modify_qp()
1103 sqd_event, &qp->mqp); in __mlx4_ib_modify_qp()
1107 qp->state = new_state; in __mlx4_ib_modify_qp()
1110 qp->atomic_rd_en = attr->qp_access_flags; in __mlx4_ib_modify_qp()
1112 qp->resp_depth = attr->max_dest_rd_atomic; in __mlx4_ib_modify_qp()
1114 qp->port = attr->port_num; in __mlx4_ib_modify_qp()
1116 qp->alt_port = attr->alt_port_num; in __mlx4_ib_modify_qp()
1118 if (is_sqp(dev, qp)) in __mlx4_ib_modify_qp()
1119 store_sqp_attrs(to_msqp(qp), attr, attr_mask); in __mlx4_ib_modify_qp()
1125 if (is_qp0(dev, qp)) { in __mlx4_ib_modify_qp()
1127 if (mlx4_INIT_PORT(dev->dev, qp->port)) in __mlx4_ib_modify_qp()
1129 qp->port); in __mlx4_ib_modify_qp()
1133 mlx4_CLOSE_PORT(dev->dev, qp->port); in __mlx4_ib_modify_qp()
1141 mlx4_ib_cq_clean(to_mcq(ibqp->recv_cq), qp->mqp.qpn, in __mlx4_ib_modify_qp()
1144 mlx4_ib_cq_clean(to_mcq(ibqp->send_cq), qp->mqp.qpn, NULL); in __mlx4_ib_modify_qp()
1146 qp->rq.head = 0; in __mlx4_ib_modify_qp()
1147 qp->rq.tail = 0; in __mlx4_ib_modify_qp()
1148 qp->sq.head = 0; in __mlx4_ib_modify_qp()
1149 qp->sq.tail = 0; in __mlx4_ib_modify_qp()
1150 qp->sq_next_wqe = 0; in __mlx4_ib_modify_qp()
1152 *qp->db.db = 0; in __mlx4_ib_modify_qp()
1164 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_modify_qp() local
1168 mutex_lock(&qp->mutex); in mlx4_ib_modify_qp()
1170 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; in mlx4_ib_modify_qp()
1182 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in mlx4_ib_modify_qp()
1205 mutex_unlock(&qp->mutex); in mlx4_ib_modify_qp()
1212 struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev; in build_mlx_header()
1245 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | in build_mlx_header()
1265 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; in build_mlx_header()
1269 if (!sqp->qp.ibqp.qp_num) in build_mlx_header()
1270 ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); in build_mlx_header()
1272 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); in build_mlx_header()
1278 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); in build_mlx_header()
1465 struct mlx4_ib_qp *qp, unsigned *lso_seg_len, in build_lso_seg() argument
1477 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && in build_lso_seg()
1478 wr->num_sge > qp->sq.max_gs - (halign >> 4))) in build_lso_seg()
1507 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_post_send() local
1523 spin_lock_irqsave(&qp->sq.lock, flags); in mlx4_ib_post_send()
1525 ind = qp->sq_next_wqe; in mlx4_ib_post_send()
1530 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mlx4_ib_post_send()
1536 if (unlikely(wr->num_sge > qp->sq.max_gs)) { in mlx4_ib_post_send()
1542 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); in mlx4_ib_post_send()
1543 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; in mlx4_ib_post_send()
1553 qp->sq_signal_bits; in mlx4_ib_post_send()
1611 err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz); in mlx4_ib_post_send()
1624 err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen); in mlx4_ib_post_send()
1649 if (unlikely(qp->ibqp.qp_type == IB_QPT_SMI || in mlx4_ib_post_send()
1650 qp->ibqp.qp_type == IB_QPT_GSI)) { in mlx4_ib_post_send()
1682 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); in mlx4_ib_post_send()
1684 stamp = ind + qp->sq_spare_wqes; in mlx4_ib_post_send()
1685 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); in mlx4_ib_post_send()
1697 stamp_send_wqe(qp, stamp, size * 16); in mlx4_ib_post_send()
1698 ind = pad_wraparound(qp, ind); in mlx4_ib_post_send()
1704 qp->sq.head += nreq; in mlx4_ib_post_send()
1712 writel(qp->doorbell_qpn, in mlx4_ib_post_send()
1721 stamp_send_wqe(qp, stamp, size * 16); in mlx4_ib_post_send()
1723 ind = pad_wraparound(qp, ind); in mlx4_ib_post_send()
1724 qp->sq_next_wqe = ind; in mlx4_ib_post_send()
1727 spin_unlock_irqrestore(&qp->sq.lock, flags); in mlx4_ib_post_send()
1735 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_post_recv() local
1743 spin_lock_irqsave(&qp->rq.lock, flags); in mlx4_ib_post_recv()
1745 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); in mlx4_ib_post_recv()
1748 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.send_cq)) { in mlx4_ib_post_recv()
1754 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mlx4_ib_post_recv()
1760 scat = get_recv_wqe(qp, ind); in mlx4_ib_post_recv()
1765 if (i < qp->rq.max_gs) { in mlx4_ib_post_recv()
1771 qp->rq.wrid[ind] = wr->wr_id; in mlx4_ib_post_recv()
1773 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); in mlx4_ib_post_recv()
1778 qp->rq.head += nreq; in mlx4_ib_post_recv()
1786 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); in mlx4_ib_post_recv()
1789 spin_unlock_irqrestore(&qp->rq.lock, flags); in mlx4_ib_post_recv()
1863 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_query_qp() local
1868 mutex_lock(&qp->mutex); in mlx4_ib_query_qp()
1870 if (qp->state == IB_QPS_RESET) { in mlx4_ib_query_qp()
1875 err = mlx4_qp_query(dev->dev, &qp->mqp, &context); in mlx4_ib_query_qp()
1883 qp->state = to_ib_qp_state(mlx4_state); in mlx4_ib_query_qp()
1884 qp_attr->qp_state = qp->state; in mlx4_ib_query_qp()
1895 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { in mlx4_ib_query_qp()
1904 qp_attr->port_num = qp->port; in mlx4_ib_query_qp()
1924 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; in mlx4_ib_query_qp()
1925 qp_attr->cap.max_recv_sge = qp->rq.max_gs; in mlx4_ib_query_qp()
1928 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; in mlx4_ib_query_qp()
1929 qp_attr->cap.max_send_sge = qp->sq.max_gs; in mlx4_ib_query_qp()
1944 if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) in mlx4_ib_query_qp()
1947 if (qp->flags & MLX4_IB_QP_LSO) in mlx4_ib_query_qp()
1951 mutex_unlock(&qp->mutex); in mlx4_ib_query_qp()