Lines Matching +full:cct +full:- +full:increase
2 * Copyright(c) 2015 - 2018 Intel Corporation.
24 * - Redistributions of source code must retain the above copyright
26 * - Redistributions in binary form must reproduce the above copyright
30 * - Neither the name of Intel Corporation nor the names of its
60 __must_hold(&qp->s_lock) in find_prev_entry()
66 for (i = qp->r_head_ack_queue; ; i = p) { in find_prev_entry()
67 if (i == qp->s_tail_ack_queue) in find_prev_entry()
70 p = i - 1; in find_prev_entry()
72 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device)); in find_prev_entry()
73 if (p == qp->r_head_ack_queue) { in find_prev_entry()
77 e = &qp->s_ack_queue[p]; in find_prev_entry()
78 if (!e->opcode) { in find_prev_entry()
82 if (cmp_psn(psn, e->psn) >= 0) { in find_prev_entry()
83 if (p == qp->s_tail_ack_queue && in find_prev_entry()
84 cmp_psn(psn, e->lpsn) <= 0) in find_prev_entry()
99 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
117 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT); in make_rc_ack()
119 u32 pmtu = qp->pmtu; in make_rc_ack()
120 struct hfi1_qp_priv *qpriv = qp->priv; in make_rc_ack()
123 u8 next = qp->s_tail_ack_queue; in make_rc_ack()
127 lockdep_assert_held(&qp->s_lock); in make_rc_ack()
129 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) in make_rc_ack()
132 if (qpriv->hdr_type == HFI1_PKT_TYPE_9B) in make_rc_ack()
133 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ in make_rc_ack()
136 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */ in make_rc_ack()
139 switch (qp->s_ack_state) { in make_rc_ack()
142 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in make_rc_ack()
151 if (++next > rvt_size_atomic(&dev->rdi)) in make_rc_ack()
157 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in make_rc_ack()
158 if (e->opcode != TID_OP(WRITE_REQ) && in make_rc_ack()
159 qp->s_acked_ack_queue == qp->s_tail_ack_queue) in make_rc_ack()
160 qp->s_acked_ack_queue = next; in make_rc_ack()
161 qp->s_tail_ack_queue = next; in make_rc_ack()
162 trace_hfi1_rsp_make_rc_ack(qp, e->psn); in make_rc_ack()
167 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { in make_rc_ack()
168 if (qp->s_flags & RVT_S_ACK_PENDING) in make_rc_ack()
173 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in make_rc_ack()
175 if ((qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK) || in make_rc_ack()
177 iowait_set_flag(&qpriv->s_iowait, IOWAIT_PENDING_IB); in make_rc_ack()
180 if (e->opcode == OP(RDMA_READ_REQUEST)) { in make_rc_ack()
185 * responder has seen until the requester re-sends it. in make_rc_ack()
187 len = e->rdma_sge.sge_length; in make_rc_ack()
188 if (len && !e->rdma_sge.mr) { in make_rc_ack()
189 if (qp->s_acked_ack_queue == in make_rc_ack()
190 qp->s_tail_ack_queue) in make_rc_ack()
191 qp->s_acked_ack_queue = in make_rc_ack()
192 qp->r_head_ack_queue; in make_rc_ack()
193 qp->s_tail_ack_queue = qp->r_head_ack_queue; in make_rc_ack()
197 ps->s_txreq->mr = e->rdma_sge.mr; in make_rc_ack()
198 if (ps->s_txreq->mr) in make_rc_ack()
199 rvt_get_mr(ps->s_txreq->mr); in make_rc_ack()
200 qp->s_ack_rdma_sge.sge = e->rdma_sge; in make_rc_ack()
201 qp->s_ack_rdma_sge.num_sge = 1; in make_rc_ack()
202 ps->s_txreq->ss = &qp->s_ack_rdma_sge; in make_rc_ack()
205 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); in make_rc_ack()
207 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); in make_rc_ack()
208 e->sent = 1; in make_rc_ack()
210 ohdr->u.aeth = rvt_compute_aeth(qp); in make_rc_ack()
212 qp->s_ack_rdma_psn = e->psn; in make_rc_ack()
213 bth2 = mask_psn(qp->s_ack_rdma_psn++); in make_rc_ack()
214 } else if (e->opcode == TID_OP(WRITE_REQ)) { in make_rc_ack()
223 if (req->state == TID_REQUEST_RESEND || in make_rc_ack()
224 req->state == TID_REQUEST_INIT_RESEND) in make_rc_ack()
226 qp->s_ack_state = TID_OP(WRITE_RESP); in make_rc_ack()
227 qp->s_ack_rdma_psn = mask_psn(e->psn + req->cur_seg); in make_rc_ack()
229 } else if (e->opcode == TID_OP(READ_REQ)) { in make_rc_ack()
234 * responder has seen until the requester re-sends it. in make_rc_ack()
236 len = e->rdma_sge.sge_length; in make_rc_ack()
237 if (len && !e->rdma_sge.mr) { in make_rc_ack()
238 if (qp->s_acked_ack_queue == in make_rc_ack()
239 qp->s_tail_ack_queue) in make_rc_ack()
240 qp->s_acked_ack_queue = in make_rc_ack()
241 qp->r_head_ack_queue; in make_rc_ack()
242 qp->s_tail_ack_queue = qp->r_head_ack_queue; in make_rc_ack()
246 ps->s_txreq->mr = e->rdma_sge.mr; in make_rc_ack()
247 if (ps->s_txreq->mr) in make_rc_ack()
248 rvt_get_mr(ps->s_txreq->mr); in make_rc_ack()
249 qp->s_ack_rdma_sge.sge = e->rdma_sge; in make_rc_ack()
250 qp->s_ack_rdma_sge.num_sge = 1; in make_rc_ack()
251 qp->s_ack_state = TID_OP(READ_RESP); in make_rc_ack()
255 ps->s_txreq->ss = NULL; in make_rc_ack()
257 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); in make_rc_ack()
258 ohdr->u.at.aeth = rvt_compute_aeth(qp); in make_rc_ack()
259 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth); in make_rc_ack()
260 hwords += sizeof(ohdr->u.at) / sizeof(u32); in make_rc_ack()
261 bth2 = mask_psn(e->psn); in make_rc_ack()
262 e->sent = 1; in make_rc_ack()
265 bth0 = qp->s_ack_state << 24; in make_rc_ack()
269 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); in make_rc_ack()
272 ps->s_txreq->ss = &qp->s_ack_rdma_sge; in make_rc_ack()
273 ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr; in make_rc_ack()
274 if (ps->s_txreq->mr) in make_rc_ack()
275 rvt_get_mr(ps->s_txreq->mr); in make_rc_ack()
276 len = qp->s_ack_rdma_sge.sge.sge_length; in make_rc_ack()
281 ohdr->u.aeth = rvt_compute_aeth(qp); in make_rc_ack()
283 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); in make_rc_ack()
284 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in make_rc_ack()
285 e->sent = 1; in make_rc_ack()
287 bth0 = qp->s_ack_state << 24; in make_rc_ack()
288 bth2 = mask_psn(qp->s_ack_rdma_psn++); in make_rc_ack()
307 * 5.3 If more resources needed, do 2.1 - 2.3. in make_rc_ack()
312 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in make_rc_ack()
320 if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND && in make_rc_ack()
321 qp->s_tail_ack_queue == qpriv->r_tid_alloc && in make_rc_ack()
322 req->cur_seg == req->alloc_seg) { in make_rc_ack()
323 qpriv->rnr_nak_state = TID_RNR_NAK_SENT; in make_rc_ack()
327 bth2 = mask_psn(qp->s_ack_rdma_psn); in make_rc_ack()
330 &ps->s_txreq->ss); in make_rc_ack()
335 bth0 = qp->s_ack_state << 24; in make_rc_ack()
336 qp->s_ack_rdma_psn++; in make_rc_ack()
337 trace_hfi1_tid_req_make_rc_ack_write(qp, 0, e->opcode, e->psn, in make_rc_ack()
338 e->lpsn, req); in make_rc_ack()
339 if (req->cur_seg != req->total_segs) in make_rc_ack()
342 e->sent = 1; in make_rc_ack()
343 /* Do not free e->rdma_sge until all data are received */ in make_rc_ack()
344 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); in make_rc_ack()
349 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in make_rc_ack()
350 ps->s_txreq->ss = &qp->s_ack_rdma_sge; in make_rc_ack()
358 e->sent = 1; in make_rc_ack()
360 * Increment qp->s_tail_ack_queue through s_ack_state in make_rc_ack()
363 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); in make_rc_ack()
377 qp->s_ack_state = OP(SEND_ONLY); in make_rc_ack()
379 if (qp->s_nak_state) in make_rc_ack()
380 ohdr->u.aeth = in make_rc_ack()
381 cpu_to_be32((qp->r_msn & IB_MSN_MASK) | in make_rc_ack()
382 (qp->s_nak_state << in make_rc_ack()
385 ohdr->u.aeth = rvt_compute_aeth(qp); in make_rc_ack()
389 bth2 = mask_psn(qp->s_ack_psn); in make_rc_ack()
390 qp->s_flags &= ~RVT_S_ACK_PENDING; in make_rc_ack()
391 ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP; in make_rc_ack()
392 ps->s_txreq->ss = NULL; in make_rc_ack()
394 qp->s_rdma_ack_cnt++; in make_rc_ack()
395 ps->s_txreq->sde = qpriv->s_sde; in make_rc_ack()
396 ps->s_txreq->s_cur_size = len; in make_rc_ack()
397 ps->s_txreq->hdr_dwords = hwords; in make_rc_ack()
401 spin_unlock_irqrestore(&qp->s_lock, ps->flags); in make_rc_ack()
402 spin_lock_irqsave(&qp->r_lock, ps->flags); in make_rc_ack()
403 spin_lock(&qp->s_lock); in make_rc_ack()
405 spin_unlock(&qp->s_lock); in make_rc_ack()
406 spin_unlock_irqrestore(&qp->r_lock, ps->flags); in make_rc_ack()
407 spin_lock_irqsave(&qp->s_lock, ps->flags); in make_rc_ack()
409 qp->s_ack_state = OP(ACKNOWLEDGE); in make_rc_ack()
415 qp->s_flags &= ~(RVT_S_RESP_PENDING in make_rc_ack()
422 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
431 struct hfi1_qp_priv *priv = qp->priv; in hfi1_make_rc_req()
432 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); in hfi1_make_rc_req()
438 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ in hfi1_make_rc_req()
442 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT); in hfi1_make_rc_req()
443 u32 pmtu = qp->pmtu; in hfi1_make_rc_req()
451 lockdep_assert_held(&qp->s_lock); in hfi1_make_rc_req()
452 ps->s_txreq = get_txreq(ps->dev, qp); in hfi1_make_rc_req()
453 if (!ps->s_txreq) in hfi1_make_rc_req()
456 if (priv->hdr_type == HFI1_PKT_TYPE_9B) { in hfi1_make_rc_req()
457 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ in hfi1_make_rc_req()
459 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) in hfi1_make_rc_req()
460 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth; in hfi1_make_rc_req()
462 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth; in hfi1_make_rc_req()
464 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */ in hfi1_make_rc_req()
466 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && in hfi1_make_rc_req()
467 (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr)))) in hfi1_make_rc_req()
468 ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth; in hfi1_make_rc_req()
470 ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth; in hfi1_make_rc_req()
474 if ((qp->s_flags & RVT_S_RESP_PENDING) && in hfi1_make_rc_req()
478 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in hfi1_make_rc_req()
479 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in hfi1_make_rc_req()
482 if (qp->s_last == READ_ONCE(qp->s_head)) in hfi1_make_rc_req()
485 if (iowait_sdma_pending(&priv->s_iowait)) { in hfi1_make_rc_req()
486 qp->s_flags |= RVT_S_WAIT_DMA; in hfi1_make_rc_req()
490 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_rc_req()
491 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in hfi1_make_rc_req()
497 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK | HFI1_S_WAIT_HALT)) in hfi1_make_rc_req()
500 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) { in hfi1_make_rc_req()
501 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { in hfi1_make_rc_req()
502 qp->s_flags |= RVT_S_WAIT_PSN; in hfi1_make_rc_req()
505 qp->s_sending_psn = qp->s_psn; in hfi1_make_rc_req()
506 qp->s_sending_hpsn = qp->s_psn - 1; in hfi1_make_rc_req()
510 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in hfi1_make_rc_req()
512 switch (qp->s_state) { in hfi1_make_rc_req()
514 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) in hfi1_make_rc_req()
524 if (qp->s_cur == qp->s_tail) { in hfi1_make_rc_req()
526 if (qp->s_tail == READ_ONCE(qp->s_head)) { in hfi1_make_rc_req()
536 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in hfi1_make_rc_req()
537 qp->s_num_rd_atomic && in hfi1_make_rc_req()
538 (wqe->wr.opcode != IB_WR_TID_RDMA_READ || in hfi1_make_rc_req()
539 priv->pending_tid_r_segs < qp->s_num_rd_atomic)) { in hfi1_make_rc_req()
540 qp->s_flags |= RVT_S_WAIT_FENCE; in hfi1_make_rc_req()
547 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_rc_req()
548 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_rc_req()
552 if (qp->s_last != qp->s_cur) in hfi1_make_rc_req()
554 if (++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
555 qp->s_cur = 0; in hfi1_make_rc_req()
556 if (++qp->s_tail == qp->s_size) in hfi1_make_rc_req()
557 qp->s_tail = 0; in hfi1_make_rc_req()
558 if (!(wqe->wr.send_flags & in hfi1_make_rc_req()
562 wqe->wr.ex.invalidate_rkey); in hfi1_make_rc_req()
569 atomic_dec(&qp->local_ops_pending); in hfi1_make_rc_req()
574 qp->s_psn = wqe->psn; in hfi1_make_rc_req()
581 len = wqe->length; in hfi1_make_rc_req()
582 ss = &qp->s_sge; in hfi1_make_rc_req()
583 bth2 = mask_psn(qp->s_psn); in hfi1_make_rc_req()
589 if ((priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) || in hfi1_make_rc_req()
593 switch (wqe->wr.opcode) { in hfi1_make_rc_req()
601 qp->s_state = OP(SEND_FIRST); in hfi1_make_rc_req()
605 if (wqe->wr.opcode == IB_WR_SEND) { in hfi1_make_rc_req()
606 qp->s_state = OP(SEND_ONLY); in hfi1_make_rc_req()
607 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { in hfi1_make_rc_req()
608 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); in hfi1_make_rc_req()
610 ohdr->u.imm_data = wqe->wr.ex.imm_data; in hfi1_make_rc_req()
613 qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE); in hfi1_make_rc_req()
615 ohdr->u.ieth = cpu_to_be32( in hfi1_make_rc_req()
616 wqe->wr.ex.invalidate_rkey); in hfi1_make_rc_req()
619 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_rc_req()
622 if (++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
623 qp->s_cur = 0; in hfi1_make_rc_req()
627 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) in hfi1_make_rc_req()
628 qp->s_lsn++; in hfi1_make_rc_req()
636 wqe->rdma_wr.remote_addr, in hfi1_make_rc_req()
637 &ohdr->u.rc.reth); in hfi1_make_rc_req()
638 ohdr->u.rc.reth.rkey = in hfi1_make_rc_req()
639 cpu_to_be32(wqe->rdma_wr.rkey); in hfi1_make_rc_req()
640 ohdr->u.rc.reth.length = cpu_to_be32(len); in hfi1_make_rc_req()
643 qp->s_state = OP(RDMA_WRITE_FIRST); in hfi1_make_rc_req()
647 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { in hfi1_make_rc_req()
648 qp->s_state = OP(RDMA_WRITE_ONLY); in hfi1_make_rc_req()
650 qp->s_state = in hfi1_make_rc_req()
653 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; in hfi1_make_rc_req()
655 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_rc_req()
659 if (++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
660 qp->s_cur = 0; in hfi1_make_rc_req()
668 if (atomic_read(&priv->n_tid_requests) >= in hfi1_make_rc_req()
672 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) in hfi1_make_rc_req()
673 qp->s_lsn++; in hfi1_make_rc_req()
680 if (priv->s_tid_cur == HFI1_QP_WQE_INVALID) { in hfi1_make_rc_req()
681 priv->s_tid_cur = qp->s_cur; in hfi1_make_rc_req()
682 if (priv->s_tid_tail == HFI1_QP_WQE_INVALID) { in hfi1_make_rc_req()
683 priv->s_tid_tail = qp->s_cur; in hfi1_make_rc_req()
684 priv->s_state = TID_OP(WRITE_RESP); in hfi1_make_rc_req()
686 } else if (priv->s_tid_cur == priv->s_tid_head) { in hfi1_make_rc_req()
690 __w = rvt_get_swqe_ptr(qp, priv->s_tid_cur); in hfi1_make_rc_req()
713 if (__w->wr.opcode != IB_WR_TID_RDMA_WRITE || in hfi1_make_rc_req()
714 __r->state == TID_REQUEST_INACTIVE || in hfi1_make_rc_req()
715 __r->state == TID_REQUEST_COMPLETE || in hfi1_make_rc_req()
716 ((__r->state == TID_REQUEST_ACTIVE || in hfi1_make_rc_req()
717 __r->state == TID_REQUEST_SYNC) && in hfi1_make_rc_req()
718 __r->comp_seg == __r->total_segs)) { in hfi1_make_rc_req()
719 if (priv->s_tid_tail == in hfi1_make_rc_req()
720 priv->s_tid_cur && in hfi1_make_rc_req()
721 priv->s_state == in hfi1_make_rc_req()
723 priv->s_tid_tail = qp->s_cur; in hfi1_make_rc_req()
724 priv->s_state = in hfi1_make_rc_req()
727 priv->s_tid_cur = qp->s_cur; in hfi1_make_rc_req()
737 * updated. However, the priv->s_state should. in hfi1_make_rc_req()
739 if (priv->s_tid_tail == qp->s_cur && in hfi1_make_rc_req()
740 priv->s_state == TID_OP(WRITE_DATA_LAST)) in hfi1_make_rc_req()
741 priv->s_state = TID_OP(WRITE_RESP); in hfi1_make_rc_req()
745 priv->s_tid_head = qp->s_cur; in hfi1_make_rc_req()
746 priv->pending_tid_w_resp += req->total_segs; in hfi1_make_rc_req()
747 atomic_inc(&priv->n_tid_requests); in hfi1_make_rc_req()
748 atomic_dec(&priv->n_requests); in hfi1_make_rc_req()
750 req->state = TID_REQUEST_RESEND; in hfi1_make_rc_req()
751 req->comp_seg = delta_psn(bth2, wqe->psn); in hfi1_make_rc_req()
754 * to re-receive them. in hfi1_make_rc_req()
756 req->setup_head = req->clear_tail; in hfi1_make_rc_req()
757 priv->pending_tid_w_resp += in hfi1_make_rc_req()
758 delta_psn(wqe->lpsn, bth2) + 1; in hfi1_make_rc_req()
763 wqe->wr.opcode, in hfi1_make_rc_req()
764 wqe->psn, wqe->lpsn, in hfi1_make_rc_req()
766 if (++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
767 qp->s_cur = 0; in hfi1_make_rc_req()
775 if (qp->s_num_rd_atomic >= in hfi1_make_rc_req()
776 qp->s_max_rd_atomic) { in hfi1_make_rc_req()
777 qp->s_flags |= RVT_S_WAIT_RDMAR; in hfi1_make_rc_req()
780 qp->s_num_rd_atomic++; in hfi1_make_rc_req()
781 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) in hfi1_make_rc_req()
782 qp->s_lsn++; in hfi1_make_rc_req()
784 wqe->rdma_wr.remote_addr, in hfi1_make_rc_req()
785 &ohdr->u.rc.reth); in hfi1_make_rc_req()
786 ohdr->u.rc.reth.rkey = in hfi1_make_rc_req()
787 cpu_to_be32(wqe->rdma_wr.rkey); in hfi1_make_rc_req()
788 ohdr->u.rc.reth.length = cpu_to_be32(len); in hfi1_make_rc_req()
789 qp->s_state = OP(RDMA_READ_REQUEST); in hfi1_make_rc_req()
790 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); in hfi1_make_rc_req()
794 if (++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
795 qp->s_cur = 0; in hfi1_make_rc_req()
800 wpriv = wqe->priv; in hfi1_make_rc_req()
803 wqe->wr.opcode, in hfi1_make_rc_req()
804 wqe->psn, wqe->lpsn, in hfi1_make_rc_req()
806 delta = cmp_psn(qp->s_psn, wqe->psn); in hfi1_make_rc_req()
813 * but the qp->s_state is set to OP(RDMA_READ_REQUEST) in hfi1_make_rc_req()
815 * received just before this; (3) We are re-sending a in hfi1_make_rc_req()
818 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { in hfi1_make_rc_req()
819 qp->s_flags |= RVT_S_WAIT_RDMAR; in hfi1_make_rc_req()
824 &req->flows[req->setup_head]; in hfi1_make_rc_req()
832 if (!flow->npagesets) { in hfi1_make_rc_req()
833 qp->s_sge.sge = wqe->sg_list[0]; in hfi1_make_rc_req()
834 qp->s_sge.sg_list = wqe->sg_list + 1; in hfi1_make_rc_req()
835 qp->s_sge.num_sge = wqe->wr.num_sge; in hfi1_make_rc_req()
836 qp->s_sge.total_len = wqe->length; in hfi1_make_rc_req()
837 qp->s_len = wqe->length; in hfi1_make_rc_req()
838 req->isge = 0; in hfi1_make_rc_req()
839 req->clear_tail = req->setup_head; in hfi1_make_rc_req()
840 req->flow_idx = req->setup_head; in hfi1_make_rc_req()
841 req->state = TID_REQUEST_ACTIVE; in hfi1_make_rc_req()
844 /* Re-send a request */ in hfi1_make_rc_req()
845 req->cur_seg = 0; in hfi1_make_rc_req()
846 req->comp_seg = 0; in hfi1_make_rc_req()
847 req->ack_pending = 0; in hfi1_make_rc_req()
848 req->flow_idx = req->clear_tail; in hfi1_make_rc_req()
849 req->state = TID_REQUEST_RESEND; in hfi1_make_rc_req()
851 req->s_next_psn = qp->s_psn; in hfi1_make_rc_req()
853 len = min_t(u32, req->seg_len, in hfi1_make_rc_req()
854 wqe->length - req->seg_len * req->cur_seg); in hfi1_make_rc_req()
862 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) in hfi1_make_rc_req()
863 qp->s_lsn++; in hfi1_make_rc_req()
865 ss = &wpriv->ss; in hfi1_make_rc_req()
867 if (req->cur_seg >= req->total_segs && in hfi1_make_rc_req()
868 ++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
869 qp->s_cur = 0; in hfi1_make_rc_req()
878 if (qp->s_num_rd_atomic >= in hfi1_make_rc_req()
879 qp->s_max_rd_atomic) { in hfi1_make_rc_req()
880 qp->s_flags |= RVT_S_WAIT_RDMAR; in hfi1_make_rc_req()
883 qp->s_num_rd_atomic++; in hfi1_make_rc_req()
886 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) in hfi1_make_rc_req()
887 qp->s_lsn++; in hfi1_make_rc_req()
888 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || in hfi1_make_rc_req()
889 wqe->wr.opcode == IB_WR_OPFN) { in hfi1_make_rc_req()
890 qp->s_state = OP(COMPARE_SWAP); in hfi1_make_rc_req()
891 put_ib_ateth_swap(wqe->atomic_wr.swap, in hfi1_make_rc_req()
892 &ohdr->u.atomic_eth); in hfi1_make_rc_req()
893 put_ib_ateth_compare(wqe->atomic_wr.compare_add, in hfi1_make_rc_req()
894 &ohdr->u.atomic_eth); in hfi1_make_rc_req()
896 qp->s_state = OP(FETCH_ADD); in hfi1_make_rc_req()
897 put_ib_ateth_swap(wqe->atomic_wr.compare_add, in hfi1_make_rc_req()
898 &ohdr->u.atomic_eth); in hfi1_make_rc_req()
899 put_ib_ateth_compare(0, &ohdr->u.atomic_eth); in hfi1_make_rc_req()
901 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr, in hfi1_make_rc_req()
902 &ohdr->u.atomic_eth); in hfi1_make_rc_req()
903 ohdr->u.atomic_eth.rkey = cpu_to_be32( in hfi1_make_rc_req()
904 wqe->atomic_wr.rkey); in hfi1_make_rc_req()
909 if (++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
910 qp->s_cur = 0; in hfi1_make_rc_req()
916 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) { in hfi1_make_rc_req()
917 qp->s_sge.sge = wqe->sg_list[0]; in hfi1_make_rc_req()
918 qp->s_sge.sg_list = wqe->sg_list + 1; in hfi1_make_rc_req()
919 qp->s_sge.num_sge = wqe->wr.num_sge; in hfi1_make_rc_req()
920 qp->s_sge.total_len = wqe->length; in hfi1_make_rc_req()
921 qp->s_len = wqe->length; in hfi1_make_rc_req()
924 qp->s_tail++; in hfi1_make_rc_req()
925 if (qp->s_tail >= qp->s_size) in hfi1_make_rc_req()
926 qp->s_tail = 0; in hfi1_make_rc_req()
928 if (wqe->wr.opcode == IB_WR_RDMA_READ || in hfi1_make_rc_req()
929 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) in hfi1_make_rc_req()
930 qp->s_psn = wqe->lpsn + 1; in hfi1_make_rc_req()
931 else if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) in hfi1_make_rc_req()
932 qp->s_psn = req->s_next_psn; in hfi1_make_rc_req()
934 qp->s_psn++; in hfi1_make_rc_req()
939 * qp->s_state is normally set to the opcode of the in hfi1_make_rc_req()
947 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); in hfi1_make_rc_req()
950 qp->s_state = OP(SEND_MIDDLE); in hfi1_make_rc_req()
953 bth2 = mask_psn(qp->s_psn++); in hfi1_make_rc_req()
954 ss = &qp->s_sge; in hfi1_make_rc_req()
955 len = qp->s_len; in hfi1_make_rc_req()
961 if (wqe->wr.opcode == IB_WR_SEND) { in hfi1_make_rc_req()
962 qp->s_state = OP(SEND_LAST); in hfi1_make_rc_req()
963 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { in hfi1_make_rc_req()
964 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); in hfi1_make_rc_req()
966 ohdr->u.imm_data = wqe->wr.ex.imm_data; in hfi1_make_rc_req()
969 qp->s_state = OP(SEND_LAST_WITH_INVALIDATE); in hfi1_make_rc_req()
971 ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey); in hfi1_make_rc_req()
974 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_rc_req()
977 qp->s_cur++; in hfi1_make_rc_req()
978 if (qp->s_cur >= qp->s_size) in hfi1_make_rc_req()
979 qp->s_cur = 0; in hfi1_make_rc_req()
984 * qp->s_state is normally set to the opcode of the in hfi1_make_rc_req()
992 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); in hfi1_make_rc_req()
995 qp->s_state = OP(RDMA_WRITE_MIDDLE); in hfi1_make_rc_req()
998 bth2 = mask_psn(qp->s_psn++); in hfi1_make_rc_req()
999 ss = &qp->s_sge; in hfi1_make_rc_req()
1000 len = qp->s_len; in hfi1_make_rc_req()
1006 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { in hfi1_make_rc_req()
1007 qp->s_state = OP(RDMA_WRITE_LAST); in hfi1_make_rc_req()
1009 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); in hfi1_make_rc_req()
1011 ohdr->u.imm_data = wqe->wr.ex.imm_data; in hfi1_make_rc_req()
1013 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_rc_req()
1017 qp->s_cur++; in hfi1_make_rc_req()
1018 if (qp->s_cur >= qp->s_size) in hfi1_make_rc_req()
1019 qp->s_cur = 0; in hfi1_make_rc_req()
1024 * qp->s_state is normally set to the opcode of the in hfi1_make_rc_req()
1032 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu; in hfi1_make_rc_req()
1034 wqe->rdma_wr.remote_addr + len, in hfi1_make_rc_req()
1035 &ohdr->u.rc.reth); in hfi1_make_rc_req()
1036 ohdr->u.rc.reth.rkey = in hfi1_make_rc_req()
1037 cpu_to_be32(wqe->rdma_wr.rkey); in hfi1_make_rc_req()
1038 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); in hfi1_make_rc_req()
1039 qp->s_state = OP(RDMA_READ_REQUEST); in hfi1_make_rc_req()
1040 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); in hfi1_make_rc_req()
1041 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK; in hfi1_make_rc_req()
1042 qp->s_psn = wqe->lpsn + 1; in hfi1_make_rc_req()
1045 qp->s_cur++; in hfi1_make_rc_req()
1046 if (qp->s_cur == qp->s_size) in hfi1_make_rc_req()
1047 qp->s_cur = 0; in hfi1_make_rc_req()
1057 req->state = TID_REQUEST_RESEND; in hfi1_make_rc_req()
1059 remote = rcu_dereference(priv->tid_rdma.remote); in hfi1_make_rc_req()
1060 req->comp_seg = delta_psn(qp->s_psn, wqe->psn); in hfi1_make_rc_req()
1061 len = wqe->length - (req->comp_seg * remote->max_len); in hfi1_make_rc_req()
1064 bth2 = mask_psn(qp->s_psn); in hfi1_make_rc_req()
1067 qp->s_psn = wqe->lpsn + 1; in hfi1_make_rc_req()
1069 qp->s_state = TID_OP(WRITE_REQ); in hfi1_make_rc_req()
1070 priv->pending_tid_w_resp += delta_psn(wqe->lpsn, bth2) + 1; in hfi1_make_rc_req()
1071 priv->s_tid_cur = qp->s_cur; in hfi1_make_rc_req()
1072 if (++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
1073 qp->s_cur = 0; in hfi1_make_rc_req()
1074 trace_hfi1_tid_req_make_req_write(qp, 0, wqe->wr.opcode, in hfi1_make_rc_req()
1075 wqe->psn, wqe->lpsn, req); in hfi1_make_rc_req()
1079 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) in hfi1_make_rc_req()
1083 wpriv = wqe->priv; in hfi1_make_rc_req()
1085 * Back down. The field qp->s_psn has been set to the psn with in hfi1_make_rc_req()
1089 req->cur_seg = delta_psn(qp->s_psn, wqe->psn) / priv->pkts_ps; in hfi1_make_rc_req()
1094 * time, we can use the req->state change to check if the in hfi1_make_rc_req()
1097 req->state = TID_REQUEST_RESEND; in hfi1_make_rc_req()
1099 if (req->state != TID_REQUEST_ACTIVE) { in hfi1_make_rc_req()
1105 hfi1_kern_clear_hw_flow(priv->rcd, qp); in hfi1_make_rc_req()
1110 req->state = TID_REQUEST_RESEND; in hfi1_make_rc_req()
1111 len = min_t(u32, req->seg_len, in hfi1_make_rc_req()
1112 wqe->length - req->seg_len * req->cur_seg); in hfi1_make_rc_req()
1113 flow = &req->flows[req->flow_idx]; in hfi1_make_rc_req()
1114 len -= flow->sent; in hfi1_make_rc_req()
1115 req->s_next_psn = flow->flow_state.ib_lpsn + 1; in hfi1_make_rc_req()
1123 ss = &wpriv->ss; in hfi1_make_rc_req()
1125 if (req->cur_seg >= req->total_segs && in hfi1_make_rc_req()
1126 ++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
1127 qp->s_cur = 0; in hfi1_make_rc_req()
1128 qp->s_psn = req->s_next_psn; in hfi1_make_rc_req()
1129 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode, in hfi1_make_rc_req()
1130 wqe->psn, wqe->lpsn, req); in hfi1_make_rc_req()
1134 delta = cmp_psn(qp->s_psn, wqe->psn); in hfi1_make_rc_req()
1137 * of a new request, we need to change the qp->s_state so that in hfi1_make_rc_req()
1140 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ || delta == 0 || in hfi1_make_rc_req()
1141 qp->s_cur == qp->s_tail) { in hfi1_make_rc_req()
1142 qp->s_state = OP(RDMA_READ_REQUEST); in hfi1_make_rc_req()
1143 if (delta == 0 || qp->s_cur == qp->s_tail) in hfi1_make_rc_req()
1150 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { in hfi1_make_rc_req()
1151 qp->s_flags |= RVT_S_WAIT_RDMAR; in hfi1_make_rc_req()
1155 wpriv = wqe->priv; in hfi1_make_rc_req()
1157 len = min_t(u32, req->seg_len, in hfi1_make_rc_req()
1158 wqe->length - req->seg_len * req->cur_seg); in hfi1_make_rc_req()
1166 ss = &wpriv->ss; in hfi1_make_rc_req()
1168 if (req->cur_seg >= req->total_segs && in hfi1_make_rc_req()
1169 ++qp->s_cur == qp->s_size) in hfi1_make_rc_req()
1170 qp->s_cur = 0; in hfi1_make_rc_req()
1171 qp->s_psn = req->s_next_psn; in hfi1_make_rc_req()
1172 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode, in hfi1_make_rc_req()
1173 wqe->psn, wqe->lpsn, req); in hfi1_make_rc_req()
1176 qp->s_sending_hpsn = bth2; in hfi1_make_rc_req()
1177 delta = delta_psn(bth2, wqe->psn); in hfi1_make_rc_req()
1179 wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) in hfi1_make_rc_req()
1181 if (qp->s_flags & RVT_S_SEND_ONE) { in hfi1_make_rc_req()
1182 qp->s_flags &= ~RVT_S_SEND_ONE; in hfi1_make_rc_req()
1183 qp->s_flags |= RVT_S_WAIT_ACK; in hfi1_make_rc_req()
1186 qp->s_len -= len; in hfi1_make_rc_req()
1187 ps->s_txreq->hdr_dwords = hwords; in hfi1_make_rc_req()
1188 ps->s_txreq->sde = priv->s_sde; in hfi1_make_rc_req()
1189 ps->s_txreq->ss = ss; in hfi1_make_rc_req()
1190 ps->s_txreq->s_cur_size = len; in hfi1_make_rc_req()
1194 bth0 | (qp->s_state << 24), in hfi1_make_rc_req()
1202 hfi1_put_txreq(ps->s_txreq); in hfi1_make_rc_req()
1203 ps->s_txreq = NULL; in hfi1_make_rc_req()
1207 hfi1_put_txreq(ps->s_txreq); in hfi1_make_rc_req()
1210 ps->s_txreq = NULL; in hfi1_make_rc_req()
1211 qp->s_flags &= ~RVT_S_BUSY; in hfi1_make_rc_req()
1217 iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB); in hfi1_make_rc_req()
1225 if (qp->r_nak_state) in hfi1_make_bth_aeth()
1226 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) | in hfi1_make_bth_aeth()
1227 (qp->r_nak_state << in hfi1_make_bth_aeth()
1230 ohdr->u.aeth = rvt_compute_aeth(qp); in hfi1_make_bth_aeth()
1232 ohdr->bth[0] = cpu_to_be32(bth0); in hfi1_make_bth_aeth()
1233 ohdr->bth[1] = cpu_to_be32(bth1 | qp->remote_qpn); in hfi1_make_bth_aeth()
1234 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn)); in hfi1_make_bth_aeth()
1239 struct rvt_qp *qp = packet->qp; in hfi1_queue_rc_ack()
1243 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_queue_rc_ack()
1244 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) in hfi1_queue_rc_ack()
1246 ibp = rcd_to_iport(packet->rcd); in hfi1_queue_rc_ack()
1247 this_cpu_inc(*ibp->rvp.rc_qacks); in hfi1_queue_rc_ack()
1248 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING; in hfi1_queue_rc_ack()
1249 qp->s_nak_state = qp->r_nak_state; in hfi1_queue_rc_ack()
1250 qp->s_ack_psn = qp->r_ack_psn; in hfi1_queue_rc_ack()
1252 qp->s_flags |= RVT_S_ECN; in hfi1_queue_rc_ack()
1257 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_queue_rc_ack()
1266 struct rvt_qp *qp = packet->qp; in hfi1_make_rc_ack_9B()
1267 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); in hfi1_make_rc_ack_9B()
1269 struct ib_header *hdr = &opa_hdr->ibh; in hfi1_make_rc_ack_9B()
1275 opa_hdr->hdr_type = HFI1_PKT_TYPE_9B; in hfi1_make_rc_ack_9B()
1276 ohdr = &hdr->u.oth; in hfi1_make_rc_ack_9B()
1277 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */ in hfi1_make_rc_ack_9B()
1280 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) { in hfi1_make_rc_ack_9B()
1281 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh, in hfi1_make_rc_ack_9B()
1282 rdma_ah_read_grh(&qp->remote_ah_attr), in hfi1_make_rc_ack_9B()
1283 *hwords - 2, SIZE_OF_CRC); in hfi1_make_rc_ack_9B()
1284 ohdr = &hdr->u.l.oth; in hfi1_make_rc_ack_9B()
1291 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); in hfi1_make_rc_ack_9B()
1294 (rdma_ah_get_sl(&qp->remote_ah_attr) & IB_SL_MASK) << in hfi1_make_rc_ack_9B()
1298 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B), in hfi1_make_rc_ack_9B()
1299 ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr)); in hfi1_make_rc_ack_9B()
1302 if (qp->s_mig_state == IB_MIG_MIGRATED) in hfi1_make_rc_ack_9B()
1319 struct rvt_qp *qp = packet->qp; in hfi1_make_rc_ack_16B()
1320 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); in hfi1_make_rc_ack_16B()
1322 struct hfi1_16b_header *hdr = &opa_hdr->opah; in hfi1_make_rc_ack_16B()
1330 opa_hdr->hdr_type = HFI1_PKT_TYPE_16B; in hfi1_make_rc_ack_16B()
1331 ohdr = &hdr->u.oth; in hfi1_make_rc_ack_16B()
1332 /* header size in 32-bit words 16B LRH+BTH+AETH = (16+12+4)/4 */ in hfi1_make_rc_ack_16B()
1337 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && in hfi1_make_rc_ack_16B()
1338 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) { in hfi1_make_rc_ack_16B()
1339 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh, in hfi1_make_rc_ack_16B()
1340 rdma_ah_read_grh(&qp->remote_ah_attr), in hfi1_make_rc_ack_16B()
1341 *hwords - 4, *nwords); in hfi1_make_rc_ack_16B()
1342 ohdr = &hdr->u.l.oth; in hfi1_make_rc_ack_16B()
1348 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); in hfi1_make_rc_ack_16B()
1353 hfi1_make_16b_hdr(hdr, ppd->lid | in hfi1_make_rc_ack_16B()
1354 (rdma_ah_get_path_bits(&qp->remote_ah_attr) & in hfi1_make_rc_ack_16B()
1355 ((1 << ppd->lmc) - 1)), in hfi1_make_rc_ack_16B()
1356 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), in hfi1_make_rc_ack_16B()
1361 if (qp->s_mig_state == IB_MIG_MIGRATED) in hfi1_make_rc_ack_16B()
1372 /* We support only two types - 9B and 16B for now */
1379 * hfi1_send_rc_ack - Construct an ACK packet and send it
1388 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_send_rc_ack()
1389 struct rvt_qp *qp = packet->qp; in hfi1_send_rc_ack()
1391 struct hfi1_qp_priv *priv = qp->priv; in hfi1_send_rc_ack()
1393 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; in hfi1_send_rc_ack()
1402 qp->r_adefered = 0; in hfi1_send_rc_ack()
1405 if (qp->s_flags & RVT_S_RESP_PENDING) { in hfi1_send_rc_ack()
1411 if (qp->s_rdma_ack_cnt) { in hfi1_send_rc_ack()
1421 hfi1_make_rc_ack_tbl[priv->hdr_type](packet, &opa_hdr, sc5, is_fecn, in hfi1_send_rc_ack()
1425 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, in hfi1_send_rc_ack()
1426 sc_to_vlt(ppd->dd, sc5), plen); in hfi1_send_rc_ack()
1427 pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL); in hfi1_send_rc_ack()
1438 trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), in hfi1_send_rc_ack()
1442 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, in hfi1_send_rc_ack()
1443 (priv->hdr_type == HFI1_PKT_TYPE_9B ? in hfi1_send_rc_ack()
1450 * update_num_rd_atomic - update the qp->s_num_rd_atomic
1455 * This is called from reset_psn() to update qp->s_num_rd_atomic
1462 u32 opcode = wqe->wr.opcode; in update_num_rd_atomic()
1467 qp->s_num_rd_atomic++; in update_num_rd_atomic()
1470 struct hfi1_qp_priv *priv = qp->priv; in update_num_rd_atomic()
1472 if (cmp_psn(psn, wqe->lpsn) <= 0) { in update_num_rd_atomic()
1475 cur_seg = (psn - wqe->psn) / priv->pkts_ps; in update_num_rd_atomic()
1476 req->ack_pending = cur_seg - req->comp_seg; in update_num_rd_atomic()
1477 priv->pending_tid_r_segs += req->ack_pending; in update_num_rd_atomic()
1478 qp->s_num_rd_atomic += req->ack_pending; in update_num_rd_atomic()
1480 wqe->wr.opcode, in update_num_rd_atomic()
1481 wqe->psn, in update_num_rd_atomic()
1482 wqe->lpsn, in update_num_rd_atomic()
1485 priv->pending_tid_r_segs += req->total_segs; in update_num_rd_atomic()
1486 qp->s_num_rd_atomic += req->total_segs; in update_num_rd_atomic()
1492 * reset_psn - reset the QP state to send starting from PSN
1502 u32 n = qp->s_acked; in reset_psn()
1505 struct hfi1_qp_priv *priv = qp->priv; in reset_psn()
1507 lockdep_assert_held(&qp->s_lock); in reset_psn()
1508 qp->s_cur = n; in reset_psn()
1509 priv->pending_tid_r_segs = 0; in reset_psn()
1510 priv->pending_tid_w_resp = 0; in reset_psn()
1511 qp->s_num_rd_atomic = 0; in reset_psn()
1517 if (cmp_psn(psn, wqe->psn) <= 0) { in reset_psn()
1518 qp->s_state = OP(SEND_LAST); in reset_psn()
1527 if (++n == qp->s_size) in reset_psn()
1529 if (n == qp->s_tail) in reset_psn()
1532 diff = cmp_psn(psn, wqe->psn); in reset_psn()
1535 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in reset_psn()
1538 qp->s_cur = n; in reset_psn()
1544 qp->s_state = OP(SEND_LAST); in reset_psn()
1550 opcode = wqe->wr.opcode; in reset_psn()
1560 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST); in reset_psn()
1565 qp->s_state = OP(RDMA_READ_RESPONSE_LAST); in reset_psn()
1569 qp->s_state = TID_OP(WRITE_RESP); in reset_psn()
1573 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE); in reset_psn()
1577 qp->s_state = TID_OP(READ_RESP); in reset_psn()
1585 qp->s_state = OP(SEND_LAST); in reset_psn()
1588 priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK; in reset_psn()
1589 qp->s_psn = psn; in reset_psn()
1595 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) && in reset_psn()
1596 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) in reset_psn()
1597 qp->s_flags |= RVT_S_WAIT_PSN; in reset_psn()
1598 qp->s_flags &= ~HFI1_S_AHG_VALID; in reset_psn()
1603 * Back up requester to resend the last un-ACKed request.
1608 struct hfi1_qp_priv *priv = qp->priv; in hfi1_restart_rc()
1609 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in hfi1_restart_rc()
1612 lockdep_assert_held(&qp->r_lock); in hfi1_restart_rc()
1613 lockdep_assert_held(&qp->s_lock); in hfi1_restart_rc()
1615 if (qp->s_retry == 0) { in hfi1_restart_rc()
1616 if (qp->s_mig_state == IB_MIG_ARMED) { in hfi1_restart_rc()
1618 qp->s_retry = qp->s_retry_cnt; in hfi1_restart_rc()
1619 } else if (qp->s_last == qp->s_acked) { in hfi1_restart_rc()
1624 if (wqe->wr.opcode == IB_WR_OPFN) { in hfi1_restart_rc()
1626 to_iport(qp->ibqp.device, qp->port_num); in hfi1_restart_rc()
1632 opfn_conn_reply(qp, priv->opfn.curr); in hfi1_restart_rc()
1634 qp->s_flags &= ~RVT_S_WAIT_ACK; in hfi1_restart_rc()
1637 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { in hfi1_restart_rc()
1642 hfi1_kern_clear_hw_flow(priv->rcd, qp); in hfi1_restart_rc()
1654 qp->s_retry--; in hfi1_restart_rc()
1657 ibp = to_iport(qp->ibqp.device, qp->port_num); in hfi1_restart_rc()
1658 if (wqe->wr.opcode == IB_WR_RDMA_READ || in hfi1_restart_rc()
1659 wqe->wr.opcode == IB_WR_TID_RDMA_READ) in hfi1_restart_rc()
1660 ibp->rvp.n_rc_resends++; in hfi1_restart_rc()
1662 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); in hfi1_restart_rc()
1664 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | in hfi1_restart_rc()
1668 qp->s_flags |= RVT_S_SEND_ONE; in hfi1_restart_rc()
1673 * Set qp->s_sending_psn to the next PSN after the given one.
1680 u32 n = qp->s_last; in reset_sending_psn()
1682 lockdep_assert_held(&qp->s_lock); in reset_sending_psn()
1686 if (cmp_psn(psn, wqe->lpsn) <= 0) { in reset_sending_psn()
1687 if (wqe->wr.opcode == IB_WR_RDMA_READ || in reset_sending_psn()
1688 wqe->wr.opcode == IB_WR_TID_RDMA_READ || in reset_sending_psn()
1689 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) in reset_sending_psn()
1690 qp->s_sending_psn = wqe->lpsn + 1; in reset_sending_psn()
1692 qp->s_sending_psn = psn + 1; in reset_sending_psn()
1695 if (++n == qp->s_size) in reset_sending_psn()
1697 if (n == qp->s_tail) in reset_sending_psn()
1703 * hfi1_rc_verbs_aborted - handle abort status
1728 ohdr->bth[2] = cpu_to_be32(psn); in hfi1_rc_verbs_aborted()
1729 qp->s_flags |= RVT_S_SEND_ONE; in hfi1_rc_verbs_aborted()
1738 struct hfi1_qp_priv *priv = qp->priv; in hfi1_rc_send_complete()
1744 lockdep_assert_held(&qp->s_lock); in hfi1_rc_send_complete()
1745 if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK)) in hfi1_rc_send_complete()
1754 WARN_ON(!qp->s_rdma_ack_cnt); in hfi1_rc_send_complete()
1755 qp->s_rdma_ack_cnt--; in hfi1_rc_send_complete()
1772 head = priv->s_tid_head; in hfi1_rc_send_complete()
1773 tail = priv->s_tid_cur; in hfi1_rc_send_complete()
1784 if (head == tail && req->comp_seg < req->total_segs) { in hfi1_rc_send_complete()
1786 tail = qp->s_size - 1; in hfi1_rc_send_complete()
1788 tail -= 1; in hfi1_rc_send_complete()
1791 head = qp->s_tail; in hfi1_rc_send_complete()
1792 tail = qp->s_acked; in hfi1_rc_send_complete()
1802 !(qp->s_flags & in hfi1_rc_send_complete()
1804 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { in hfi1_rc_send_complete()
1806 rvt_add_retry_timer_ext(qp, priv->timeout_shift); in hfi1_rc_send_complete()
1816 !(priv->s_flags & HFI1_S_TID_RETRY_TIMER) && in hfi1_rc_send_complete()
1817 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { in hfi1_rc_send_complete()
1823 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in hfi1_rc_send_complete()
1825 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && in hfi1_rc_send_complete()
1826 req->ack_seg < req->cur_seg) in hfi1_rc_send_complete()
1830 while (qp->s_last != qp->s_acked) { in hfi1_rc_send_complete()
1831 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_rc_send_complete()
1832 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 && in hfi1_rc_send_complete()
1833 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) in hfi1_rc_send_complete()
1836 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); in hfi1_rc_send_complete()
1839 ib_hfi1_wc_opcode[wqe->wr.opcode], in hfi1_rc_send_complete()
1843 * If we were waiting for sends to complete before re-sending, in hfi1_rc_send_complete()
1847 if (qp->s_flags & RVT_S_WAIT_PSN && in hfi1_rc_send_complete()
1848 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { in hfi1_rc_send_complete()
1849 qp->s_flags &= ~RVT_S_WAIT_PSN; in hfi1_rc_send_complete()
1850 qp->s_sending_psn = qp->s_psn; in hfi1_rc_send_complete()
1851 qp->s_sending_hpsn = qp->s_psn - 1; in hfi1_rc_send_complete()
1858 qp->s_last_psn = psn; in update_last_psn()
1870 struct hfi1_qp_priv *priv = qp->priv; in do_rc_completion()
1872 lockdep_assert_held(&qp->s_lock); in do_rc_completion()
1878 trace_hfi1_rc_completion(qp, wqe->lpsn); in do_rc_completion()
1879 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || in do_rc_completion()
1880 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { in do_rc_completion()
1882 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); in do_rc_completion()
1885 ib_hfi1_wc_opcode[wqe->wr.opcode], in do_rc_completion()
1890 this_cpu_inc(*ibp->rvp.rc_delayed_comp); in do_rc_completion()
1895 if (ppd->dd->flags & HFI1_HAS_SEND_DMA) { in do_rc_completion()
1897 u8 sl = rdma_ah_get_sl(&qp->remote_ah_attr); in do_rc_completion()
1901 sc5 = ibp->sl_to_sc[sl]; in do_rc_completion()
1907 qp->s_retry = qp->s_retry_cnt; in do_rc_completion()
1916 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) in do_rc_completion()
1917 update_last_psn(qp, wqe->lpsn); in do_rc_completion()
1921 * being resent, we can stop re-sending it since we know the in do_rc_completion()
1924 if (qp->s_acked == qp->s_cur) { in do_rc_completion()
1925 if (++qp->s_cur >= qp->s_size) in do_rc_completion()
1926 qp->s_cur = 0; in do_rc_completion()
1927 qp->s_acked = qp->s_cur; in do_rc_completion()
1928 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in do_rc_completion()
1929 if (qp->s_acked != qp->s_tail) { in do_rc_completion()
1930 qp->s_state = OP(SEND_LAST); in do_rc_completion()
1931 qp->s_psn = wqe->psn; in do_rc_completion()
1934 if (++qp->s_acked >= qp->s_size) in do_rc_completion()
1935 qp->s_acked = 0; in do_rc_completion()
1936 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur) in do_rc_completion()
1937 qp->s_draining = 0; in do_rc_completion()
1938 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in do_rc_completion()
1940 if (priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) { in do_rc_completion()
1941 priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK; in do_rc_completion()
1950 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) { in set_restart_qp()
1951 qp->r_flags |= RVT_R_RDMAR_SEQ; in set_restart_qp()
1952 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0); in set_restart_qp()
1953 if (list_empty(&qp->rspwait)) { in set_restart_qp()
1954 qp->r_flags |= RVT_R_RSP_SEND; in set_restart_qp()
1956 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); in set_restart_qp()
1962 * update_qp_retry_state - Update qp retry state.
1975 struct hfi1_qp_priv *qpriv = qp->priv; in update_qp_retry_state()
1977 qp->s_psn = psn + 1; in update_qp_retry_state()
1985 qp->s_cur = qpriv->s_tid_cur + 1; in update_qp_retry_state()
1986 if (qp->s_cur >= qp->s_size) in update_qp_retry_state()
1987 qp->s_cur = 0; in update_qp_retry_state()
1988 qp->s_state = TID_OP(WRITE_REQ); in update_qp_retry_state()
1990 qp->s_cur = qpriv->s_tid_cur; in update_qp_retry_state()
1991 qp->s_state = TID_OP(WRITE_RESP); in update_qp_retry_state()
1996 * do_rc_ack - process an incoming RC ACK
2011 struct hfi1_qp_priv *qpriv = qp->priv; in do_rc_ack()
2018 lockdep_assert_held(&qp->s_lock); in do_rc_ack()
2027 ack_psn--; in do_rc_ack()
2028 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in do_rc_ack()
2035 while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) { in do_rc_ack()
2042 if (wqe->wr.opcode == IB_WR_RDMA_READ && in do_rc_ack()
2057 if ((wqe->wr.opcode == IB_WR_RDMA_READ && in do_rc_ack()
2059 (wqe->wr.opcode == IB_WR_TID_RDMA_READ && in do_rc_ack()
2061 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || in do_rc_ack()
2062 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && in do_rc_ack()
2064 (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && in do_rc_ack()
2065 (delta_psn(psn, qp->s_last_psn) != 1))) { in do_rc_ack()
2073 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || in do_rc_ack()
2074 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { in do_rc_ack()
2075 u64 *vaddr = wqe->sg_list[0].vaddr; in do_rc_ack()
2078 if (wqe->wr.opcode == IB_WR_OPFN) in do_rc_ack()
2081 if (qp->s_num_rd_atomic && in do_rc_ack()
2082 (wqe->wr.opcode == IB_WR_RDMA_READ || in do_rc_ack()
2083 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || in do_rc_ack()
2084 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { in do_rc_ack()
2085 qp->s_num_rd_atomic--; in do_rc_ack()
2087 if ((qp->s_flags & RVT_S_WAIT_FENCE) && in do_rc_ack()
2088 !qp->s_num_rd_atomic) { in do_rc_ack()
2089 qp->s_flags &= ~(RVT_S_WAIT_FENCE | in do_rc_ack()
2092 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) { in do_rc_ack()
2093 qp->s_flags &= ~(RVT_S_WAIT_RDMAR | in do_rc_ack()
2103 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) in do_rc_ack()
2107 if (qp->s_acked == qp->s_tail) in do_rc_ack()
2115 this_cpu_inc(*ibp->rvp.rc_acks); in do_rc_ack()
2116 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { in do_rc_ack()
2117 if (wqe_to_tid_req(wqe)->ack_pending) in do_rc_ack()
2119 qpriv->timeout_shift); in do_rc_ack()
2122 } else if (qp->s_acked != qp->s_tail) { in do_rc_ack()
2125 if (qpriv->s_tid_cur != HFI1_QP_WQE_INVALID) in do_rc_ack()
2126 __w = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur); in do_rc_ack()
2132 if (__w && __w->wr.opcode == IB_WR_TID_RDMA_WRITE && in do_rc_ack()
2146 if (cmp_psn(psn, qp->s_last_psn + 1)) { in do_rc_ack()
2154 if (qp->s_cur != qp->s_tail && in do_rc_ack()
2155 cmp_psn(qp->s_psn, psn) <= 0) in do_rc_ack()
2157 __w->psn, in do_rc_ack()
2158 __w->lpsn); in do_rc_ack()
2159 else if (--qpriv->pending_tid_w_resp) in do_rc_ack()
2170 * We can stop re-sending the earlier packets in do_rc_ack()
2174 if (cmp_psn(qp->s_psn, psn) <= 0) in do_rc_ack()
2178 /* No more acks - kill all timers */ in do_rc_ack()
2180 if (cmp_psn(qp->s_psn, psn) <= 0) { in do_rc_ack()
2181 qp->s_state = OP(SEND_LAST); in do_rc_ack()
2182 qp->s_psn = psn + 1; in do_rc_ack()
2185 if (qp->s_flags & RVT_S_WAIT_ACK) { in do_rc_ack()
2186 qp->s_flags &= ~RVT_S_WAIT_ACK; in do_rc_ack()
2190 qp->s_rnr_retry = qp->s_rnr_retry_cnt; in do_rc_ack()
2191 qp->s_retry = qp->s_retry_cnt; in do_rc_ack()
2197 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && in do_rc_ack()
2199 cmp_psn(psn, wqe->psn) >= 0) in do_rc_ack()
2205 ibp->rvp.n_rnr_naks++; in do_rc_ack()
2206 if (qp->s_acked == qp->s_tail) in do_rc_ack()
2208 if (qp->s_flags & RVT_S_WAIT_RNR) in do_rc_ack()
2210 rdi = ib_to_rvt(qp->ibqp.device); in do_rc_ack()
2211 if (!(rdi->post_parms[wqe->wr.opcode].flags & in do_rc_ack()
2213 if (qp->s_rnr_retry == 0) { in do_rc_ack()
2217 if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0) in do_rc_ack()
2218 qp->s_rnr_retry--; in do_rc_ack()
2227 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) { in do_rc_ack()
2228 reset_psn(qp, qp->s_last_psn + 1); in do_rc_ack()
2230 update_last_psn(qp, psn - 1); in do_rc_ack()
2234 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); in do_rc_ack()
2235 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK); in do_rc_ack()
2241 if (qp->s_acked == qp->s_tail) in do_rc_ack()
2244 update_last_psn(qp, psn - 1); in do_rc_ack()
2248 ibp->rvp.n_seq_naks++; in do_rc_ack()
2261 ibp->rvp.n_other_naks++; in do_rc_ack()
2266 ibp->rvp.n_other_naks++; in do_rc_ack()
2271 ibp->rvp.n_other_naks++; in do_rc_ack()
2273 if (qp->s_last == qp->s_acked) { in do_rc_ack()
2274 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) in do_rc_ack()
2286 qp->s_retry = qp->s_retry_cnt; in do_rc_ack()
2287 qp->s_rnr_retry = qp->s_rnr_retry_cnt; in do_rc_ack()
2310 lockdep_assert_held(&qp->s_lock); in rdma_seq_err()
2314 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in rdma_seq_err()
2316 while (cmp_psn(psn, wqe->lpsn) > 0) { in rdma_seq_err()
2317 if (wqe->wr.opcode == IB_WR_RDMA_READ || in rdma_seq_err()
2318 wqe->wr.opcode == IB_WR_TID_RDMA_READ || in rdma_seq_err()
2319 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE || in rdma_seq_err()
2320 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || in rdma_seq_err()
2321 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) in rdma_seq_err()
2326 ibp->rvp.n_rdma_seq++; in rdma_seq_err()
2327 qp->r_flags |= RVT_R_RDMAR_SEQ; in rdma_seq_err()
2328 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0); in rdma_seq_err()
2329 if (list_empty(&qp->rspwait)) { in rdma_seq_err()
2330 qp->r_flags |= RVT_R_RSP_SEND; in rdma_seq_err()
2332 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); in rdma_seq_err()
2337 * rc_rcv_resp - process an incoming RC response packet
2346 struct hfi1_ctxtdata *rcd = packet->rcd; in rc_rcv_resp()
2347 void *data = packet->payload; in rc_rcv_resp()
2348 u32 tlen = packet->tlen; in rc_rcv_resp()
2349 struct rvt_qp *qp = packet->qp; in rc_rcv_resp()
2351 struct ib_other_headers *ohdr = packet->ohdr; in rc_rcv_resp()
2358 u32 psn = ib_bth_get_psn(packet->ohdr); in rc_rcv_resp()
2359 u32 pmtu = qp->pmtu; in rc_rcv_resp()
2360 u16 hdrsize = packet->hlen; in rc_rcv_resp()
2361 u8 opcode = packet->opcode; in rc_rcv_resp()
2362 u8 pad = packet->pad; in rc_rcv_resp()
2363 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2); in rc_rcv_resp()
2365 spin_lock_irqsave(&qp->s_lock, flags); in rc_rcv_resp()
2369 if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0) in rc_rcv_resp()
2373 diff = cmp_psn(psn, qp->s_last_psn); in rc_rcv_resp()
2377 aeth = be32_to_cpu(ohdr->u.aeth); in rc_rcv_resp()
2388 if (qp->r_flags & RVT_R_RDMAR_SEQ) { in rc_rcv_resp()
2389 if (cmp_psn(psn, qp->s_last_psn + 1) != 0) in rc_rcv_resp()
2391 qp->r_flags &= ~RVT_R_RDMAR_SEQ; in rc_rcv_resp()
2394 if (unlikely(qp->s_acked == qp->s_tail)) in rc_rcv_resp()
2396 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in rc_rcv_resp()
2403 aeth = be32_to_cpu(ohdr->u.aeth); in rc_rcv_resp()
2405 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth); in rc_rcv_resp()
2411 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in rc_rcv_resp()
2412 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) in rc_rcv_resp()
2419 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, in rc_rcv_resp()
2425 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) in rc_rcv_resp()
2427 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) in rc_rcv_resp()
2432 if (unlikely(pmtu >= qp->s_rdma_read_len)) in rc_rcv_resp()
2437 * 4.096 usec. * (1 << qp->timeout) in rc_rcv_resp()
2440 if (qp->s_flags & RVT_S_WAIT_ACK) { in rc_rcv_resp()
2441 qp->s_flags &= ~RVT_S_WAIT_ACK; in rc_rcv_resp()
2446 qp->s_retry = qp->s_retry_cnt; in rc_rcv_resp()
2452 qp->s_rdma_read_len -= pmtu; in rc_rcv_resp()
2454 spin_unlock_irqrestore(&qp->s_lock, flags); in rc_rcv_resp()
2455 rvt_copy_sge(qp, &qp->s_rdma_read_sge, in rc_rcv_resp()
2460 aeth = be32_to_cpu(ohdr->u.aeth); in rc_rcv_resp()
2474 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in rc_rcv_resp()
2475 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, in rc_rcv_resp()
2481 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) in rc_rcv_resp()
2483 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) in rc_rcv_resp()
2492 tlen -= hdrsize + extra_bytes; in rc_rcv_resp()
2493 if (unlikely(tlen != qp->s_rdma_read_len)) in rc_rcv_resp()
2495 aeth = be32_to_cpu(ohdr->u.aeth); in rc_rcv_resp()
2496 rvt_copy_sge(qp, &qp->s_rdma_read_sge, in rc_rcv_resp()
2498 WARN_ON(qp->s_rdma_read_sge.num_sge); in rc_rcv_resp()
2516 if (qp->s_last == qp->s_acked) { in rc_rcv_resp()
2521 spin_unlock_irqrestore(&qp->s_lock, flags); in rc_rcv_resp()
2528 qp->r_adefered = 0; in rc_cancel_ack()
2529 if (list_empty(&qp->rspwait)) in rc_cancel_ack()
2531 list_del_init(&qp->rspwait); in rc_cancel_ack()
2532 qp->r_flags &= ~RVT_R_RSP_NAK; in rc_cancel_ack()
2537 * rc_rcv_error - process an incoming duplicate or error RC packet
2569 if (!qp->r_nak_state) { in rc_rcv_error()
2570 ibp->rvp.n_rc_seqnak++; in rc_rcv_error()
2571 qp->r_nak_state = IB_NAK_PSN_ERROR; in rc_rcv_error()
2573 qp->r_ack_psn = qp->r_psn; in rc_rcv_error()
2585 * Handle a duplicate request. Don't re-execute SEND, RDMA in rc_rcv_error()
2602 ibp->rvp.n_rc_dupreq++; in rc_rcv_error()
2604 spin_lock_irqsave(&qp->s_lock, flags); in rc_rcv_error()
2618 if (!e || e->opcode != OP(RDMA_READ_REQUEST)) in rc_rcv_error()
2621 reth = &ohdr->u.rc.reth; in rc_rcv_error()
2629 offset = delta_psn(psn, e->psn) * qp->pmtu; in rc_rcv_error()
2630 len = be32_to_cpu(reth->length); in rc_rcv_error()
2631 if (unlikely(offset + len != e->rdma_sge.sge_length)) in rc_rcv_error()
2635 u32 rkey = be32_to_cpu(reth->rkey); in rc_rcv_error()
2639 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, in rc_rcv_error()
2644 e->rdma_sge.vaddr = NULL; in rc_rcv_error()
2645 e->rdma_sge.length = 0; in rc_rcv_error()
2646 e->rdma_sge.sge_length = 0; in rc_rcv_error()
2648 e->psn = psn; in rc_rcv_error()
2651 if (qp->s_acked_ack_queue == qp->s_tail_ack_queue) in rc_rcv_error()
2652 qp->s_acked_ack_queue = prev; in rc_rcv_error()
2653 qp->s_tail_ack_queue = prev; in rc_rcv_error()
2664 if (!e || e->opcode != (u8)opcode || old_req) in rc_rcv_error()
2666 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue) in rc_rcv_error()
2667 qp->s_acked_ack_queue = prev; in rc_rcv_error()
2668 qp->s_tail_ack_queue = prev; in rc_rcv_error()
2683 if (mra == qp->r_head_ack_queue) { in rc_rcv_error()
2684 spin_unlock_irqrestore(&qp->s_lock, flags); in rc_rcv_error()
2685 qp->r_nak_state = 0; in rc_rcv_error()
2686 qp->r_ack_psn = qp->r_psn - 1; in rc_rcv_error()
2694 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue) in rc_rcv_error()
2695 qp->s_acked_ack_queue = mra; in rc_rcv_error()
2696 qp->s_tail_ack_queue = mra; in rc_rcv_error()
2699 qp->s_ack_state = OP(ACKNOWLEDGE); in rc_rcv_error()
2700 qp->s_flags |= RVT_S_RESP_PENDING; in rc_rcv_error()
2701 qp->r_nak_state = 0; in rc_rcv_error()
2705 spin_unlock_irqrestore(&qp->s_lock, flags); in rc_rcv_error()
2722 spin_lock_irqsave(&ppd->cc_log_lock, flags); in log_cca_event()
2724 ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8); in log_cca_event()
2725 ppd->threshold_event_counter++; in log_cca_event()
2727 cc_event = &ppd->cc_events[ppd->cc_log_idx++]; in log_cca_event()
2728 if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS) in log_cca_event()
2729 ppd->cc_log_idx = 0; in log_cca_event()
2730 cc_event->lqpn = lqpn & RVT_QPN_MASK; in log_cca_event()
2731 cc_event->rqpn = rqpn & RVT_QPN_MASK; in log_cca_event()
2732 cc_event->sl = sl; in log_cca_event()
2733 cc_event->svc_type = svc_type; in log_cca_event()
2734 cc_event->rlid = rlid; in log_cca_event()
2736 cc_event->timestamp = ktime_get_ns() / 1024; in log_cca_event()
2738 spin_unlock_irqrestore(&ppd->cc_log_lock, flags); in log_cca_event()
2759 * 1) increase CCTI (for this SL) in process_becn()
2763 ccti_limit = cc_state->cct.ccti_limit; in process_becn()
2764 ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase; in process_becn()
2765 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; in process_becn()
2767 cc_state->cong_setting.entries[sl].trigger_threshold; in process_becn()
2769 spin_lock_irqsave(&ppd->cca_timer_lock, flags); in process_becn()
2771 cca_timer = &ppd->cca_timer[sl]; in process_becn()
2772 if (cca_timer->ccti < ccti_limit) { in process_becn()
2773 if (cca_timer->ccti + ccti_incr <= ccti_limit) in process_becn()
2774 cca_timer->ccti += ccti_incr; in process_becn()
2776 cca_timer->ccti = ccti_limit; in process_becn()
2780 ccti = cca_timer->ccti; in process_becn()
2782 if (!hrtimer_active(&cca_timer->hrtimer)) { in process_becn()
2786 hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec), in process_becn()
2790 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); in process_becn()
2797 * hfi1_rc_rcv - process an incoming RC packet
2806 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_rc_rcv()
2807 void *data = packet->payload; in hfi1_rc_rcv()
2808 u32 tlen = packet->tlen; in hfi1_rc_rcv()
2809 struct rvt_qp *qp = packet->qp; in hfi1_rc_rcv()
2810 struct hfi1_qp_priv *qpriv = qp->priv; in hfi1_rc_rcv()
2812 struct ib_other_headers *ohdr = packet->ohdr; in hfi1_rc_rcv()
2813 u32 opcode = packet->opcode; in hfi1_rc_rcv()
2814 u32 hdrsize = packet->hlen; in hfi1_rc_rcv()
2815 u32 psn = ib_bth_get_psn(packet->ohdr); in hfi1_rc_rcv()
2816 u32 pad = packet->pad; in hfi1_rc_rcv()
2818 u32 pmtu = qp->pmtu; in hfi1_rc_rcv()
2825 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2); in hfi1_rc_rcv()
2827 lockdep_assert_held(&qp->r_lock); in hfi1_rc_rcv()
2833 opfn_trigger_conn_request(qp, be32_to_cpu(ohdr->bth[1])); in hfi1_rc_rcv()
2848 diff = delta_psn(psn, qp->r_psn); in hfi1_rc_rcv()
2856 switch (qp->r_state) { in hfi1_rc_rcv()
2891 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) in hfi1_rc_rcv()
2902 qp->r_rcv_len = 0; in hfi1_rc_rcv()
2915 qp->r_rcv_len += pmtu; in hfi1_rc_rcv()
2916 if (unlikely(qp->r_rcv_len > qp->r_len)) in hfi1_rc_rcv()
2918 rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false); in hfi1_rc_rcv()
2938 qp->r_rcv_len = 0; in hfi1_rc_rcv()
2946 wc.ex.imm_data = ohdr->u.imm_data; in hfi1_rc_rcv()
2951 rkey = be32_to_cpu(ohdr->u.ieth); in hfi1_rc_rcv()
2970 tlen -= (hdrsize + extra_bytes); in hfi1_rc_rcv()
2971 wc.byte_len = tlen + qp->r_rcv_len; in hfi1_rc_rcv()
2972 if (unlikely(wc.byte_len > qp->r_len)) in hfi1_rc_rcv()
2974 rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, copy_last); in hfi1_rc_rcv()
2975 rvt_put_ss(&qp->r_sge); in hfi1_rc_rcv()
2976 qp->r_msn++; in hfi1_rc_rcv()
2977 if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) in hfi1_rc_rcv()
2979 wc.wr_id = qp->r_wr_id; in hfi1_rc_rcv()
2986 wc.qp = &qp->ibqp; in hfi1_rc_rcv()
2987 wc.src_qp = qp->remote_qpn; in hfi1_rc_rcv()
2988 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX; in hfi1_rc_rcv()
2998 * See also OPA Vol. 1, section 9.7.6, and table 9-17. in hfi1_rc_rcv()
3000 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr); in hfi1_rc_rcv()
3015 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) in hfi1_rc_rcv()
3018 reth = &ohdr->u.rc.reth; in hfi1_rc_rcv()
3019 qp->r_len = be32_to_cpu(reth->length); in hfi1_rc_rcv()
3020 qp->r_rcv_len = 0; in hfi1_rc_rcv()
3021 qp->r_sge.sg_list = NULL; in hfi1_rc_rcv()
3022 if (qp->r_len != 0) { in hfi1_rc_rcv()
3023 u32 rkey = be32_to_cpu(reth->rkey); in hfi1_rc_rcv()
3028 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, in hfi1_rc_rcv()
3032 qp->r_sge.num_sge = 1; in hfi1_rc_rcv()
3034 qp->r_sge.num_sge = 0; in hfi1_rc_rcv()
3035 qp->r_sge.sge.mr = NULL; in hfi1_rc_rcv()
3036 qp->r_sge.sge.vaddr = NULL; in hfi1_rc_rcv()
3037 qp->r_sge.sge.length = 0; in hfi1_rc_rcv()
3038 qp->r_sge.sge.sge_length = 0; in hfi1_rc_rcv()
3049 rvt_put_ss(&qp->r_sge); in hfi1_rc_rcv()
3052 wc.ex.imm_data = ohdr->u.rc.imm_data; in hfi1_rc_rcv()
3061 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) in hfi1_rc_rcv()
3063 next = qp->r_head_ack_queue + 1; in hfi1_rc_rcv()
3065 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) in hfi1_rc_rcv()
3067 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv()
3068 if (unlikely(next == qp->s_acked_ack_queue)) { in hfi1_rc_rcv()
3069 if (!qp->s_ack_queue[next].sent) in hfi1_rc_rcv()
3073 e = &qp->s_ack_queue[qp->r_head_ack_queue]; in hfi1_rc_rcv()
3075 reth = &ohdr->u.rc.reth; in hfi1_rc_rcv()
3076 len = be32_to_cpu(reth->length); in hfi1_rc_rcv()
3078 u32 rkey = be32_to_cpu(reth->rkey); in hfi1_rc_rcv()
3083 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, in hfi1_rc_rcv()
3091 qp->r_psn += rvt_div_mtu(qp, len - 1); in hfi1_rc_rcv()
3093 e->rdma_sge.mr = NULL; in hfi1_rc_rcv()
3094 e->rdma_sge.vaddr = NULL; in hfi1_rc_rcv()
3095 e->rdma_sge.length = 0; in hfi1_rc_rcv()
3096 e->rdma_sge.sge_length = 0; in hfi1_rc_rcv()
3098 e->opcode = opcode; in hfi1_rc_rcv()
3099 e->sent = 0; in hfi1_rc_rcv()
3100 e->psn = psn; in hfi1_rc_rcv()
3101 e->lpsn = qp->r_psn; in hfi1_rc_rcv()
3107 qp->r_msn++; in hfi1_rc_rcv()
3108 qp->r_psn++; in hfi1_rc_rcv()
3109 qp->r_state = opcode; in hfi1_rc_rcv()
3110 qp->r_nak_state = 0; in hfi1_rc_rcv()
3111 qp->r_head_ack_queue = next; in hfi1_rc_rcv()
3112 qpriv->r_tid_alloc = qp->r_head_ack_queue; in hfi1_rc_rcv()
3115 qp->s_flags |= RVT_S_RESP_PENDING; in hfi1_rc_rcv()
3117 qp->s_flags |= RVT_S_ECN; in hfi1_rc_rcv()
3120 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv()
3126 struct ib_atomic_eth *ateth = &ohdr->u.atomic_eth; in hfi1_rc_rcv()
3136 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) && in hfi1_rc_rcv()
3139 next = qp->r_head_ack_queue + 1; in hfi1_rc_rcv()
3140 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) in hfi1_rc_rcv()
3142 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_rc_rcv()
3143 if (unlikely(next == qp->s_acked_ack_queue)) { in hfi1_rc_rcv()
3144 if (!qp->s_ack_queue[next].sent) in hfi1_rc_rcv()
3148 e = &qp->s_ack_queue[qp->r_head_ack_queue]; in hfi1_rc_rcv()
3155 if (unlikely(vaddr & (sizeof(u64) - 1))) in hfi1_rc_rcv()
3157 rkey = be32_to_cpu(ateth->rkey); in hfi1_rc_rcv()
3159 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), in hfi1_rc_rcv()
3164 maddr = (atomic64_t *)qp->r_sge.sge.vaddr; in hfi1_rc_rcv()
3166 e->atomic_data = (opcode == OP(FETCH_ADD)) ? in hfi1_rc_rcv()
3167 (u64)atomic64_add_return(sdata, maddr) - sdata : in hfi1_rc_rcv()
3168 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, in hfi1_rc_rcv()
3171 rvt_put_mr(qp->r_sge.sge.mr); in hfi1_rc_rcv()
3172 qp->r_sge.num_sge = 0; in hfi1_rc_rcv()
3174 e->opcode = opcode; in hfi1_rc_rcv()
3175 e->sent = 0; in hfi1_rc_rcv()
3176 e->psn = psn; in hfi1_rc_rcv()
3177 e->lpsn = psn; in hfi1_rc_rcv()
3178 qp->r_msn++; in hfi1_rc_rcv()
3179 qp->r_psn++; in hfi1_rc_rcv()
3180 qp->r_state = opcode; in hfi1_rc_rcv()
3181 qp->r_nak_state = 0; in hfi1_rc_rcv()
3182 qp->r_head_ack_queue = next; in hfi1_rc_rcv()
3183 qpriv->r_tid_alloc = qp->r_head_ack_queue; in hfi1_rc_rcv()
3186 qp->s_flags |= RVT_S_RESP_PENDING; in hfi1_rc_rcv()
3188 qp->s_flags |= RVT_S_ECN; in hfi1_rc_rcv()
3191 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv()
3199 qp->r_psn++; in hfi1_rc_rcv()
3200 qp->r_state = opcode; in hfi1_rc_rcv()
3201 qp->r_ack_psn = psn; in hfi1_rc_rcv()
3202 qp->r_nak_state = 0; in hfi1_rc_rcv()
3205 if (packet->numpkt == 0 || fecn || in hfi1_rc_rcv()
3206 qp->r_adefered >= HFI1_PSN_CREDIT) { in hfi1_rc_rcv()
3210 qp->r_adefered++; in hfi1_rc_rcv()
3216 qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK; in hfi1_rc_rcv()
3217 qp->r_ack_psn = qp->r_psn; in hfi1_rc_rcv()
3224 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; in hfi1_rc_rcv()
3225 qp->r_ack_psn = qp->r_psn; in hfi1_rc_rcv()
3231 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv()
3234 qp->r_nak_state = IB_NAK_INVALID_REQUEST; in hfi1_rc_rcv()
3235 qp->r_ack_psn = qp->r_psn; in hfi1_rc_rcv()
3241 spin_unlock_irqrestore(&qp->s_lock, flags); in hfi1_rc_rcv()
3244 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; in hfi1_rc_rcv()
3245 qp->r_ack_psn = qp->r_psn; in hfi1_rc_rcv()
3263 psn = ib_bth_get_psn(packet->ohdr); in hfi1_rc_hdrerr()
3264 opcode = ib_bth_get_opcode(packet->ohdr); in hfi1_rc_hdrerr()
3268 diff = delta_psn(psn, qp->r_psn); in hfi1_rc_hdrerr()
3269 if (!qp->r_nak_state && diff >= 0) { in hfi1_rc_hdrerr()
3270 ibp->rvp.n_rc_seqnak++; in hfi1_rc_hdrerr()
3271 qp->r_nak_state = IB_NAK_PSN_ERROR; in hfi1_rc_hdrerr()
3273 qp->r_ack_psn = qp->r_psn; in hfi1_rc_hdrerr()