Lines Matching refs:qp
114 struct rxe_qp *qp = from_timer(qp, t, retrans_timer); in retransmit_timer() local
116 if (qp->valid) { in retransmit_timer()
117 qp->comp.timeout = 1; in retransmit_timer()
118 rxe_run_task(&qp->comp.task, 1); in retransmit_timer()
122 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_comp_queue_pkt() argument
126 skb_queue_tail(&qp->resp_pkts, skb); in rxe_comp_queue_pkt()
128 must_sched = skb_queue_len(&qp->resp_pkts) > 1; in rxe_comp_queue_pkt()
132 rxe_run_task(&qp->comp.task, must_sched); in rxe_comp_queue_pkt()
135 static inline enum comp_state get_wqe(struct rxe_qp *qp, in get_wqe() argument
144 wqe = queue_head(qp->sq.queue); in get_wqe()
163 static inline void reset_retry_counters(struct rxe_qp *qp) in reset_retry_counters() argument
165 qp->comp.retry_cnt = qp->attr.retry_cnt; in reset_retry_counters()
166 qp->comp.rnr_retry = qp->attr.rnr_retry; in reset_retry_counters()
167 qp->comp.started_retry = 0; in reset_retry_counters()
170 static inline enum comp_state check_psn(struct rxe_qp *qp, in check_psn() argument
185 reset_retry_counters(qp); in check_psn()
193 diff = psn_compare(pkt->psn, qp->comp.psn); in check_psn()
209 static inline enum comp_state check_ack(struct rxe_qp *qp, in check_ack() argument
215 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in check_ack()
218 switch (qp->comp.opcode) { in check_ack()
267 reset_retry_counters(qp); in check_ack()
279 reset_retry_counters(qp); in check_ack()
286 reset_retry_counters(qp); in check_ack()
299 if (psn_compare(pkt->psn, qp->comp.psn) > 0) { in check_ack()
302 qp->comp.psn = pkt->psn; in check_ack()
303 if (qp->req.wait_psn) { in check_ack()
304 qp->req.wait_psn = 0; in check_ack()
305 rxe_run_task(&qp->req.task, 0); in check_ack()
340 static inline enum comp_state do_read(struct rxe_qp *qp, in do_read() argument
346 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, in do_read()
358 static inline enum comp_state do_atomic(struct rxe_qp *qp, in do_atomic() argument
366 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, in do_atomic()
375 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, in make_send_cqe() argument
380 if (!qp->is_user) { in make_send_cqe()
390 wc->qp = &qp->ibqp; in make_send_cqe()
401 uwc->qp_num = qp->ibqp.qp_num; in make_send_cqe()
413 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) in do_complete() argument
415 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in do_complete()
418 if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) || in do_complete()
421 make_send_cqe(qp, wqe, &cqe); in do_complete()
422 advance_consumer(qp->sq.queue); in do_complete()
423 rxe_cq_post(qp->scq, &cqe, 0); in do_complete()
425 advance_consumer(qp->sq.queue); in do_complete()
437 if (qp->req.wait_fence) { in do_complete()
438 qp->req.wait_fence = 0; in do_complete()
439 rxe_run_task(&qp->req.task, 0); in do_complete()
443 static inline enum comp_state complete_ack(struct rxe_qp *qp, in complete_ack() argument
451 atomic_inc(&qp->req.rd_atomic); in complete_ack()
452 if (qp->req.need_rd_atomic) { in complete_ack()
453 qp->comp.timeout_retry = 0; in complete_ack()
454 qp->req.need_rd_atomic = 0; in complete_ack()
455 rxe_run_task(&qp->req.task, 0); in complete_ack()
459 if (unlikely(qp->req.state == QP_STATE_DRAIN)) { in complete_ack()
461 spin_lock_irqsave(&qp->state_lock, flags); in complete_ack()
462 if ((qp->req.state == QP_STATE_DRAIN) && in complete_ack()
463 (qp->comp.psn == qp->req.psn)) { in complete_ack()
464 qp->req.state = QP_STATE_DRAINED; in complete_ack()
465 spin_unlock_irqrestore(&qp->state_lock, flags); in complete_ack()
467 if (qp->ibqp.event_handler) { in complete_ack()
470 ev.device = qp->ibqp.device; in complete_ack()
471 ev.element.qp = &qp->ibqp; in complete_ack()
473 qp->ibqp.event_handler(&ev, in complete_ack()
474 qp->ibqp.qp_context); in complete_ack()
477 spin_unlock_irqrestore(&qp->state_lock, flags); in complete_ack()
481 do_complete(qp, wqe); in complete_ack()
483 if (psn_compare(pkt->psn, qp->comp.psn) >= 0) in complete_ack()
489 static inline enum comp_state complete_wqe(struct rxe_qp *qp, in complete_wqe() argument
494 if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) { in complete_wqe()
495 qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK; in complete_wqe()
496 qp->comp.opcode = -1; in complete_wqe()
499 if (qp->req.wait_psn) { in complete_wqe()
500 qp->req.wait_psn = 0; in complete_wqe()
501 rxe_run_task(&qp->req.task, 1); in complete_wqe()
505 do_complete(qp, wqe); in complete_wqe()
510 static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify) in rxe_drain_resp_pkts() argument
515 while ((skb = skb_dequeue(&qp->resp_pkts))) { in rxe_drain_resp_pkts()
516 rxe_drop_ref(qp); in rxe_drain_resp_pkts()
520 while ((wqe = queue_head(qp->sq.queue))) { in rxe_drain_resp_pkts()
523 do_complete(qp, wqe); in rxe_drain_resp_pkts()
525 advance_consumer(qp->sq.queue); in rxe_drain_resp_pkts()
532 struct rxe_qp *qp = (struct rxe_qp *)arg; in rxe_completer() local
533 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_completer()
539 rxe_add_ref(qp); in rxe_completer()
541 if (!qp->valid || qp->req.state == QP_STATE_ERROR || in rxe_completer()
542 qp->req.state == QP_STATE_RESET) { in rxe_completer()
543 rxe_drain_resp_pkts(qp, qp->valid && in rxe_completer()
544 qp->req.state == QP_STATE_ERROR); in rxe_completer()
548 if (qp->comp.timeout) { in rxe_completer()
549 qp->comp.timeout_retry = 1; in rxe_completer()
550 qp->comp.timeout = 0; in rxe_completer()
552 qp->comp.timeout_retry = 0; in rxe_completer()
555 if (qp->req.need_retry) in rxe_completer()
561 pr_debug("qp#%d state = %s\n", qp_num(qp), in rxe_completer()
565 skb = skb_dequeue(&qp->resp_pkts); in rxe_completer()
568 qp->comp.timeout_retry = 0; in rxe_completer()
574 state = get_wqe(qp, pkt, &wqe); in rxe_completer()
578 state = check_psn(qp, pkt, wqe); in rxe_completer()
582 state = check_ack(qp, pkt, wqe); in rxe_completer()
586 state = do_read(qp, pkt, wqe); in rxe_completer()
590 state = do_atomic(qp, pkt, wqe); in rxe_completer()
602 state = complete_ack(qp, pkt, wqe); in rxe_completer()
606 state = complete_wqe(qp, pkt, wqe); in rxe_completer()
611 qp->comp.opcode = -1; in rxe_completer()
613 qp->comp.opcode = pkt->opcode; in rxe_completer()
615 if (psn_compare(pkt->psn, qp->comp.psn) >= 0) in rxe_completer()
616 qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in rxe_completer()
618 if (qp->req.wait_psn) { in rxe_completer()
619 qp->req.wait_psn = 0; in rxe_completer()
620 rxe_run_task(&qp->req.task, 1); in rxe_completer()
628 rxe_drop_ref(pkt->qp); in rxe_completer()
635 if (qp->comp.timeout_retry && wqe) { in rxe_completer()
648 if ((qp_type(qp) == IB_QPT_RC) && in rxe_completer()
649 (qp->req.state == QP_STATE_READY) && in rxe_completer()
650 (psn_compare(qp->req.psn, qp->comp.psn) > 0) && in rxe_completer()
651 qp->qp_timeout_jiffies) in rxe_completer()
652 mod_timer(&qp->retrans_timer, in rxe_completer()
653 jiffies + qp->qp_timeout_jiffies); in rxe_completer()
672 if (qp->comp.started_retry && in rxe_completer()
673 !qp->comp.timeout_retry) { in rxe_completer()
675 rxe_drop_ref(pkt->qp); in rxe_completer()
683 if (qp->comp.retry_cnt > 0) { in rxe_completer()
684 if (qp->comp.retry_cnt != 7) in rxe_completer()
685 qp->comp.retry_cnt--; in rxe_completer()
691 if (psn_compare(qp->req.psn, in rxe_completer()
692 qp->comp.psn) > 0) { in rxe_completer()
698 qp->req.need_retry = 1; in rxe_completer()
699 qp->comp.started_retry = 1; in rxe_completer()
700 rxe_run_task(&qp->req.task, 0); in rxe_completer()
704 rxe_drop_ref(pkt->qp); in rxe_completer()
719 if (qp->comp.rnr_retry > 0) { in rxe_completer()
720 if (qp->comp.rnr_retry != 7) in rxe_completer()
721 qp->comp.rnr_retry--; in rxe_completer()
723 qp->req.need_retry = 1; in rxe_completer()
725 qp_num(qp)); in rxe_completer()
726 mod_timer(&qp->rnr_nak_timer, in rxe_completer()
729 rxe_drop_ref(pkt->qp); in rxe_completer()
743 do_complete(qp, wqe); in rxe_completer()
744 rxe_qp_error(qp); in rxe_completer()
747 rxe_drop_ref(pkt->qp); in rxe_completer()
761 rxe_drop_ref(qp); in rxe_completer()
769 rxe_drop_ref(qp); in rxe_completer()