• Home
  • Raw
  • Download

Lines Matching refs:qp

120 void c2_set_qp_state(struct c2_qp *qp, int c2_state)  in c2_set_qp_state()  argument
126 qp, in c2_set_qp_state()
127 to_ib_state_str(qp->state), in c2_set_qp_state()
129 qp->state = new_state; in c2_set_qp_state()
134 int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, in c2_qp_modify() argument
146 qp, in c2_qp_modify()
147 to_ib_state_str(qp->state), in c2_qp_modify()
157 wr.qp_handle = qp->adapter_handle; in c2_qp_modify()
173 spin_lock_irqsave(&qp->lock, flags); in c2_qp_modify()
174 if (qp->cm_id && qp->state == IB_QPS_RTS) { in c2_qp_modify()
176 "qp=%p, cm_id=%p\n",qp,qp->cm_id); in c2_qp_modify()
178 vq_req->cm_id = qp->cm_id; in c2_qp_modify()
181 spin_unlock_irqrestore(&qp->lock, flags); in c2_qp_modify()
225 qp->state = next_state; in c2_qp_modify()
235 spin_lock_irqsave(&qp->lock, flags); in c2_qp_modify()
236 if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) { in c2_qp_modify()
237 qp->cm_id->rem_ref(qp->cm_id); in c2_qp_modify()
238 qp->cm_id = NULL; in c2_qp_modify()
240 spin_unlock_irqrestore(&qp->lock, flags); in c2_qp_modify()
248 qp, in c2_qp_modify()
249 to_ib_state_str(qp->state)); in c2_qp_modify()
253 int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, in c2_qp_set_read_limits() argument
268 wr.qp_handle = qp->adapter_handle; in c2_qp_set_read_limits()
302 static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp) in destroy_qp() argument
324 wr.qp_handle = qp->adapter_handle; in destroy_qp()
331 spin_lock_irqsave(&qp->lock, flags); in destroy_qp()
332 if (qp->cm_id && qp->state == IB_QPS_RTS) { in destroy_qp()
334 "qp=%p, cm_id=%p\n",qp,qp->cm_id); in destroy_qp()
336 vq_req->qp = qp; in destroy_qp()
337 vq_req->cm_id = qp->cm_id; in destroy_qp()
340 spin_unlock_irqrestore(&qp->lock, flags); in destroy_qp()
368 spin_lock_irqsave(&qp->lock, flags); in destroy_qp()
369 if (qp->cm_id) { in destroy_qp()
370 qp->cm_id->rem_ref(qp->cm_id); in destroy_qp()
371 qp->cm_id = NULL; in destroy_qp()
373 spin_unlock_irqrestore(&qp->lock, flags); in destroy_qp()
381 static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp) in c2_alloc_qpn() argument
388 ret = idr_alloc_cyclic(&c2dev->qp_table.idr, qp, 0, 0, GFP_NOWAIT); in c2_alloc_qpn()
390 qp->qpn = ret; in c2_alloc_qpn()
407 struct c2_qp *qp; in c2_find_qpn() local
410 qp = idr_find(&c2dev->qp_table.idr, qpn); in c2_find_qpn()
412 return qp; in c2_find_qpn()
417 struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp) in c2_alloc_qp() argument
429 err = c2_alloc_qpn(c2dev, qp); in c2_alloc_qp()
432 qp->ibqp.qp_num = qp->qpn; in c2_alloc_qp()
433 qp->ibqp.qp_type = IB_QPT_RC; in c2_alloc_qp()
436 qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, in c2_alloc_qp()
437 &qp->sq_mq.shared_dma, GFP_KERNEL); in c2_alloc_qp()
438 if (!qp->sq_mq.shared) { in c2_alloc_qp()
443 qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, in c2_alloc_qp()
444 &qp->rq_mq.shared_dma, GFP_KERNEL); in c2_alloc_qp()
445 if (!qp->rq_mq.shared) { in c2_alloc_qp()
472 wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma); in c2_alloc_qp()
473 wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma); in c2_alloc_qp()
477 wr.user_context = (unsigned long) qp; in c2_alloc_qp()
506 atomic_set(&qp->refcount, 1); in c2_alloc_qp()
507 qp->adapter_handle = reply->qp_handle; in c2_alloc_qp()
508 qp->state = IB_QPS_RESET; in c2_alloc_qp()
509 qp->send_sgl_depth = qp_attrs->cap.max_send_sge; in c2_alloc_qp()
510 qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge; in c2_alloc_qp()
511 qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge; in c2_alloc_qp()
512 init_waitqueue_head(&qp->wait); in c2_alloc_qp()
525 c2_mq_req_init(&qp->sq_mq, in c2_alloc_qp()
544 c2_mq_req_init(&qp->rq_mq, in c2_alloc_qp()
558 iounmap(qp->sq_mq.peer); in c2_alloc_qp()
560 destroy_qp(c2dev, qp); in c2_alloc_qp()
566 c2_free_mqsp(qp->rq_mq.shared); in c2_alloc_qp()
568 c2_free_mqsp(qp->sq_mq.shared); in c2_alloc_qp()
570 c2_free_qpn(c2dev, qp->qpn); in c2_alloc_qp()
600 void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) in c2_free_qp() argument
605 send_cq = to_c2cq(qp->ibqp.send_cq); in c2_free_qp()
606 recv_cq = to_c2cq(qp->ibqp.recv_cq); in c2_free_qp()
613 c2_free_qpn(c2dev, qp->qpn); in c2_free_qp()
619 destroy_qp(c2dev, qp); in c2_free_qp()
624 c2_cq_clean(c2dev, qp, send_cq->cqn); in c2_free_qp()
626 c2_cq_clean(c2dev, qp, recv_cq->cqn); in c2_free_qp()
631 iounmap(qp->sq_mq.peer); in c2_free_qp()
632 iounmap(qp->rq_mq.peer); in c2_free_qp()
633 c2_free_mqsp(qp->sq_mq.shared); in c2_free_qp()
634 c2_free_mqsp(qp->rq_mq.shared); in c2_free_qp()
636 atomic_dec(&qp->refcount); in c2_free_qp()
637 wait_event(qp->wait, !atomic_read(&qp->refcount)); in c2_free_qp()
762 static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size) in qp_wr_post() argument
794 struct c2_qp *qp = to_c2qp(ibqp); in c2_post_send() local
804 if (qp->state > IB_QPS_RTS) { in c2_post_send()
837 if (ib_wr->num_sge > qp->send_sgl_depth) { in c2_post_send()
855 if (ib_wr->num_sge > qp->rdma_write_sgl_depth) { in c2_post_send()
922 spin_lock_irqsave(&qp->lock, lock_flags); in c2_post_send()
923 err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size); in c2_post_send()
925 spin_unlock_irqrestore(&qp->lock, lock_flags); in c2_post_send()
932 c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count); in c2_post_send()
933 spin_unlock_irqrestore(&qp->lock, lock_flags); in c2_post_send()
948 struct c2_qp *qp = to_c2qp(ibqp); in c2_post_receive() local
953 if (qp->state > IB_QPS_RTS) { in c2_post_receive()
965 if (ib_wr->num_sge > qp->recv_sgl_depth) { in c2_post_receive()
993 spin_lock_irqsave(&qp->lock, lock_flags); in c2_post_receive()
994 err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size); in c2_post_receive()
996 spin_unlock_irqrestore(&qp->lock, lock_flags); in c2_post_receive()
1003 c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count); in c2_post_receive()
1004 spin_unlock_irqrestore(&qp->lock, lock_flags); in c2_post_receive()