Lines Matching refs:wqe
14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
18 struct rxe_send_wqe *wqe, in retry_first_write_send() argument
24 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send()
25 qp->mtu : wqe->dma.resid; in retry_first_write_send()
27 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send()
28 wqe->wr.opcode); in retry_first_write_send()
30 if (wqe->wr.send_flags & IB_SEND_INLINE) { in retry_first_write_send()
31 wqe->dma.resid -= to_send; in retry_first_write_send()
32 wqe->dma.sge_offset += to_send; in retry_first_write_send()
34 advance_dma_data(&wqe->dma, to_send); in retry_first_write_send()
37 wqe->iova += qp->mtu; in retry_first_write_send()
43 struct rxe_send_wqe *wqe; in req_retry() local
56 wqe = addr_from_index(qp->sq.queue, wqe_index); in req_retry()
57 mask = wr_opcode_mask(wqe->wr.opcode, qp); in req_retry()
59 if (wqe->state == wqe_state_posted) in req_retry()
62 if (wqe->state == wqe_state_done) in req_retry()
65 wqe->iova = (mask & WR_ATOMIC_MASK) ? in req_retry()
66 wqe->wr.wr.atomic.remote_addr : in req_retry()
68 wqe->wr.wr.rdma.remote_addr : in req_retry()
72 wqe->dma.resid = wqe->dma.length; in req_retry()
73 wqe->dma.cur_sge = 0; in req_retry()
74 wqe->dma.sge_offset = 0; in req_retry()
81 npsn = (qp->comp.psn - wqe->first_psn) & in req_retry()
83 retry_first_write_send(qp, wqe, mask, npsn); in req_retry()
87 npsn = (wqe->dma.length - wqe->dma.resid) / in req_retry()
89 wqe->iova += npsn * qp->mtu; in req_retry()
93 wqe->state = wqe_state_posted; in req_retry()
107 struct rxe_send_wqe *wqe = queue_head(qp->sq.queue); in req_next_wqe() local
123 if (wqe && ((qp->req.wqe_index != in req_next_wqe()
125 (wqe->state != wqe_state_posted))) { in req_next_wqe()
150 wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index); in req_next_wqe()
154 (wqe->state != wqe_state_processing))) in req_next_wqe()
157 if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) && in req_next_wqe()
163 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp); in req_next_wqe()
164 return wqe; in req_next_wqe()
290 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, in next_opcode() argument
293 int fits = (wqe->dma.resid <= qp->mtu); in next_opcode()
321 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe) in check_init_depth() argument
325 if (wqe->has_rd_atomic) in check_init_depth()
333 wqe->has_rd_atomic = 1; in check_init_depth()
352 struct rxe_send_wqe *wqe, in init_req_packet() argument
358 struct rxe_send_wr *ibwr = &wqe->wr; in init_req_packet()
379 pkt->wqe = wqe; in init_req_packet()
410 reth_set_va(pkt, wqe->iova); in init_req_packet()
411 reth_set_len(pkt, wqe->dma.resid); in init_req_packet()
421 atmeth_set_va(pkt, wqe->iova); in init_req_packet()
443 static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, in fill_packet() argument
457 if (wqe->wr.send_flags & IB_SEND_INLINE) { in fill_packet()
458 u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset]; in fill_packet()
463 wqe->dma.resid -= paylen; in fill_packet()
464 wqe->dma.sge_offset += paylen; in fill_packet()
466 err = copy_data(qp->pd, 0, &wqe->dma, in fill_packet()
488 struct rxe_send_wqe *wqe, in update_wqe_state() argument
493 wqe->state = wqe_state_pending; in update_wqe_state()
495 wqe->state = wqe_state_processing; in update_wqe_state()
500 struct rxe_send_wqe *wqe, in update_wqe_psn() argument
505 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu; in update_wqe_psn()
512 wqe->first_psn = qp->req.psn; in update_wqe_psn()
513 wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK; in update_wqe_psn()
517 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK; in update_wqe_psn()
522 static void save_state(struct rxe_send_wqe *wqe, in save_state() argument
527 rollback_wqe->state = wqe->state; in save_state()
528 rollback_wqe->first_psn = wqe->first_psn; in save_state()
529 rollback_wqe->last_psn = wqe->last_psn; in save_state()
533 static void rollback_state(struct rxe_send_wqe *wqe, in rollback_state() argument
538 wqe->state = rollback_wqe->state; in rollback_state()
539 wqe->first_psn = rollback_wqe->first_psn; in rollback_state()
540 wqe->last_psn = rollback_wqe->last_psn; in rollback_state()
544 static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, in update_state() argument
564 struct rxe_send_wqe *wqe; in rxe_requester() local
593 wqe = req_next_wqe(qp); in rxe_requester()
594 if (unlikely(!wqe)) in rxe_requester()
597 if (wqe->mask & WR_REG_MASK) { in rxe_requester()
598 if (wqe->wr.opcode == IB_WR_LOCAL_INV) { in rxe_requester()
603 wqe->wr.ex.invalidate_rkey >> 8); in rxe_requester()
606 wqe->wr.ex.invalidate_rkey); in rxe_requester()
607 wqe->state = wqe_state_error; in rxe_requester()
608 wqe->status = IB_WC_MW_BIND_ERR; in rxe_requester()
613 wqe->state = wqe_state_done; in rxe_requester()
614 wqe->status = IB_WC_SUCCESS; in rxe_requester()
615 } else if (wqe->wr.opcode == IB_WR_REG_MR) { in rxe_requester()
616 struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr); in rxe_requester()
619 rmr->access = wqe->wr.wr.reg.access; in rxe_requester()
620 rmr->ibmr.lkey = wqe->wr.wr.reg.key; in rxe_requester()
621 rmr->ibmr.rkey = wqe->wr.wr.reg.key; in rxe_requester()
622 rmr->iova = wqe->wr.wr.reg.mr->iova; in rxe_requester()
623 wqe->state = wqe_state_done; in rxe_requester()
624 wqe->status = IB_WC_SUCCESS; in rxe_requester()
628 if ((wqe->wr.send_flags & IB_SEND_SIGNALED) || in rxe_requester()
650 opcode = next_opcode(qp, wqe, wqe->wr.opcode); in rxe_requester()
652 wqe->status = IB_WC_LOC_QP_OP_ERR; in rxe_requester()
658 if (check_init_depth(qp, wqe)) in rxe_requester()
663 payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0; in rxe_requester()
673 wqe->first_psn = qp->req.psn; in rxe_requester()
674 wqe->last_psn = qp->req.psn; in rxe_requester()
679 wqe->state = wqe_state_done; in rxe_requester()
680 wqe->status = IB_WC_SUCCESS; in rxe_requester()
688 skb = init_req_packet(qp, wqe, opcode, payload, &pkt); in rxe_requester()
694 if (fill_packet(qp, wqe, &pkt, skb, payload)) { in rxe_requester()
706 save_state(wqe, qp, &rollback_wqe, &rollback_psn); in rxe_requester()
707 update_wqe_state(qp, wqe, &pkt); in rxe_requester()
708 update_wqe_psn(qp, wqe, &pkt, payload); in rxe_requester()
713 rollback_state(wqe, qp, &rollback_wqe, rollback_psn); in rxe_requester()
723 update_state(qp, wqe, &pkt, payload); in rxe_requester()
728 wqe->status = IB_WC_LOC_PROT_ERR; in rxe_requester()
729 wqe->state = wqe_state_error; in rxe_requester()