• Home
  • Raw
  • Download

Lines Matching refs:qp

119 void c2_set_qp_state(struct c2_qp *qp, int c2_state)  in c2_set_qp_state()  argument
125 qp, in c2_set_qp_state()
126 to_ib_state_str(qp->state), in c2_set_qp_state()
128 qp->state = new_state; in c2_set_qp_state()
133 int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, in c2_qp_modify() argument
145 qp, in c2_qp_modify()
146 to_ib_state_str(qp->state), in c2_qp_modify()
156 wr.qp_handle = qp->adapter_handle; in c2_qp_modify()
172 spin_lock_irqsave(&qp->lock, flags); in c2_qp_modify()
173 if (qp->cm_id && qp->state == IB_QPS_RTS) { in c2_qp_modify()
175 "qp=%p, cm_id=%p\n",qp,qp->cm_id); in c2_qp_modify()
177 vq_req->cm_id = qp->cm_id; in c2_qp_modify()
180 spin_unlock_irqrestore(&qp->lock, flags); in c2_qp_modify()
224 qp->state = next_state; in c2_qp_modify()
234 spin_lock_irqsave(&qp->lock, flags); in c2_qp_modify()
235 if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) { in c2_qp_modify()
236 qp->cm_id->rem_ref(qp->cm_id); in c2_qp_modify()
237 qp->cm_id = NULL; in c2_qp_modify()
239 spin_unlock_irqrestore(&qp->lock, flags); in c2_qp_modify()
247 qp, in c2_qp_modify()
248 to_ib_state_str(qp->state)); in c2_qp_modify()
252 int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, in c2_qp_set_read_limits() argument
267 wr.qp_handle = qp->adapter_handle; in c2_qp_set_read_limits()
301 static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp) in destroy_qp() argument
323 wr.qp_handle = qp->adapter_handle; in destroy_qp()
330 spin_lock_irqsave(&qp->lock, flags); in destroy_qp()
331 if (qp->cm_id && qp->state == IB_QPS_RTS) { in destroy_qp()
333 "qp=%p, cm_id=%p\n",qp,qp->cm_id); in destroy_qp()
335 vq_req->qp = qp; in destroy_qp()
336 vq_req->cm_id = qp->cm_id; in destroy_qp()
339 spin_unlock_irqrestore(&qp->lock, flags); in destroy_qp()
367 spin_lock_irqsave(&qp->lock, flags); in destroy_qp()
368 if (qp->cm_id) { in destroy_qp()
369 qp->cm_id->rem_ref(qp->cm_id); in destroy_qp()
370 qp->cm_id = NULL; in destroy_qp()
372 spin_unlock_irqrestore(&qp->lock, flags); in destroy_qp()
380 static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp) in c2_alloc_qpn() argument
386 ret = idr_get_new_above(&c2dev->qp_table.idr, qp, in c2_alloc_qpn()
387 c2dev->qp_table.last++, &qp->qpn); in c2_alloc_qpn()
404 struct c2_qp *qp; in c2_find_qpn() local
407 qp = idr_find(&c2dev->qp_table.idr, qpn); in c2_find_qpn()
409 return qp; in c2_find_qpn()
414 struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp) in c2_alloc_qp() argument
426 err = c2_alloc_qpn(c2dev, qp); in c2_alloc_qp()
429 qp->ibqp.qp_num = qp->qpn; in c2_alloc_qp()
430 qp->ibqp.qp_type = IB_QPT_RC; in c2_alloc_qp()
433 qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, in c2_alloc_qp()
434 &qp->sq_mq.shared_dma, GFP_KERNEL); in c2_alloc_qp()
435 if (!qp->sq_mq.shared) { in c2_alloc_qp()
440 qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, in c2_alloc_qp()
441 &qp->rq_mq.shared_dma, GFP_KERNEL); in c2_alloc_qp()
442 if (!qp->rq_mq.shared) { in c2_alloc_qp()
469 wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma); in c2_alloc_qp()
470 wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma); in c2_alloc_qp()
474 wr.user_context = (unsigned long) qp; in c2_alloc_qp()
503 atomic_set(&qp->refcount, 1); in c2_alloc_qp()
504 qp->adapter_handle = reply->qp_handle; in c2_alloc_qp()
505 qp->state = IB_QPS_RESET; in c2_alloc_qp()
506 qp->send_sgl_depth = qp_attrs->cap.max_send_sge; in c2_alloc_qp()
507 qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge; in c2_alloc_qp()
508 qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge; in c2_alloc_qp()
509 init_waitqueue_head(&qp->wait); in c2_alloc_qp()
522 c2_mq_req_init(&qp->sq_mq, in c2_alloc_qp()
541 c2_mq_req_init(&qp->rq_mq, in c2_alloc_qp()
555 iounmap(qp->sq_mq.peer); in c2_alloc_qp()
557 destroy_qp(c2dev, qp); in c2_alloc_qp()
563 c2_free_mqsp(qp->rq_mq.shared); in c2_alloc_qp()
565 c2_free_mqsp(qp->sq_mq.shared); in c2_alloc_qp()
567 c2_free_qpn(c2dev, qp->qpn); in c2_alloc_qp()
597 void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) in c2_free_qp() argument
602 send_cq = to_c2cq(qp->ibqp.send_cq); in c2_free_qp()
603 recv_cq = to_c2cq(qp->ibqp.recv_cq); in c2_free_qp()
610 c2_free_qpn(c2dev, qp->qpn); in c2_free_qp()
616 destroy_qp(c2dev, qp); in c2_free_qp()
621 c2_cq_clean(c2dev, qp, send_cq->cqn); in c2_free_qp()
623 c2_cq_clean(c2dev, qp, recv_cq->cqn); in c2_free_qp()
628 iounmap(qp->sq_mq.peer); in c2_free_qp()
629 iounmap(qp->rq_mq.peer); in c2_free_qp()
630 c2_free_mqsp(qp->sq_mq.shared); in c2_free_qp()
631 c2_free_mqsp(qp->rq_mq.shared); in c2_free_qp()
633 atomic_dec(&qp->refcount); in c2_free_qp()
634 wait_event(qp->wait, !atomic_read(&qp->refcount)); in c2_free_qp()
759 static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size) in qp_wr_post() argument
791 struct c2_qp *qp = to_c2qp(ibqp); in c2_post_send() local
801 if (qp->state > IB_QPS_RTS) in c2_post_send()
832 if (ib_wr->num_sge > qp->send_sgl_depth) { in c2_post_send()
850 if (ib_wr->num_sge > qp->rdma_write_sgl_depth) { in c2_post_send()
917 spin_lock_irqsave(&qp->lock, lock_flags); in c2_post_send()
918 err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size); in c2_post_send()
920 spin_unlock_irqrestore(&qp->lock, lock_flags); in c2_post_send()
927 c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count); in c2_post_send()
928 spin_unlock_irqrestore(&qp->lock, lock_flags); in c2_post_send()
942 struct c2_qp *qp = to_c2qp(ibqp); in c2_post_receive() local
947 if (qp->state > IB_QPS_RTS) in c2_post_receive()
957 if (ib_wr->num_sge > qp->recv_sgl_depth) { in c2_post_receive()
985 spin_lock_irqsave(&qp->lock, lock_flags); in c2_post_receive()
986 err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size); in c2_post_receive()
988 spin_unlock_irqrestore(&qp->lock, lock_flags); in c2_post_receive()
995 c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count); in c2_post_receive()
996 spin_unlock_irqrestore(&qp->lock, lock_flags); in c2_post_receive()