• Home
  • Raw
  • Download

Lines Matching refs:wqe

140 	struct rxe_send_wqe *wqe;  in get_wqe()  local
145 wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT); in get_wqe()
146 *wqe_p = wqe; in get_wqe()
149 if (!wqe || wqe->state == wqe_state_posted) in get_wqe()
153 if (wqe->state == wqe_state_done) in get_wqe()
157 if (wqe->state == wqe_state_error) in get_wqe()
173 struct rxe_send_wqe *wqe) in check_psn() argument
180 diff = psn_compare(pkt->psn, wqe->last_psn); in check_psn()
182 if (wqe->state == wqe_state_pending) { in check_psn()
183 if (wqe->mask & WR_ATOMIC_OR_READ_MASK) in check_psn()
199 if (pkt->psn == wqe->last_psn) in check_psn()
203 } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) { in check_psn()
212 struct rxe_send_wqe *wqe) in check_ack() argument
234 if ((pkt->psn == wqe->first_psn && in check_ack()
237 (wqe->first_psn == wqe->last_psn && in check_ack()
263 if (wqe->wr.opcode != IB_WR_RDMA_READ && in check_ack()
264 wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) { in check_ack()
265 wqe->status = IB_WC_FATAL_ERR; in check_ack()
277 if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP && in check_ack()
278 wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD) in check_ack()
312 wqe->status = IB_WC_REM_INV_REQ_ERR; in check_ack()
316 wqe->status = IB_WC_REM_ACCESS_ERR; in check_ack()
320 wqe->status = IB_WC_REM_OP_ERR; in check_ack()
325 wqe->status = IB_WC_REM_OP_ERR; in check_ack()
343 struct rxe_send_wqe *wqe) in do_read() argument
348 &wqe->dma, payload_addr(pkt), in do_read()
351 wqe->status = IB_WC_LOC_PROT_ERR; in do_read()
355 if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK)) in do_read()
363 struct rxe_send_wqe *wqe) in do_atomic() argument
370 &wqe->dma, &atomic_orig, in do_atomic()
373 wqe->status = IB_WC_LOC_PROT_ERR; in do_atomic()
380 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, in make_send_cqe() argument
388 wc->wr_id = wqe->wr.wr_id; in make_send_cqe()
389 wc->status = wqe->status; in make_send_cqe()
390 wc->opcode = wr_to_wc_opcode(wqe->wr.opcode); in make_send_cqe()
391 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM || in make_send_cqe()
392 wqe->wr.opcode == IB_WR_SEND_WITH_IMM) in make_send_cqe()
394 wc->byte_len = wqe->dma.length; in make_send_cqe()
399 uwc->wr_id = wqe->wr.wr_id; in make_send_cqe()
400 uwc->status = wqe->status; in make_send_cqe()
401 uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode); in make_send_cqe()
402 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM || in make_send_cqe()
403 wqe->wr.opcode == IB_WR_SEND_WITH_IMM) in make_send_cqe()
405 uwc->byte_len = wqe->dma.length; in make_send_cqe()
418 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) in do_complete() argument
426 (wqe->wr.send_flags & IB_SEND_SIGNALED) || in do_complete()
427 wqe->status != IB_WC_SUCCESS); in do_complete()
430 make_send_cqe(qp, wqe, &cqe); in do_complete()
437 if (wqe->wr.opcode == IB_WR_SEND || in do_complete()
438 wqe->wr.opcode == IB_WR_SEND_WITH_IMM || in do_complete()
439 wqe->wr.opcode == IB_WR_SEND_WITH_INV) in do_complete()
454 struct rxe_send_wqe *wqe) in complete_ack() argument
458 if (wqe->has_rd_atomic) { in complete_ack()
459 wqe->has_rd_atomic = 0; in complete_ack()
490 do_complete(qp, wqe); in complete_ack()
500 struct rxe_send_wqe *wqe) in complete_wqe() argument
502 if (pkt && wqe->state == wqe_state_pending) { in complete_wqe()
503 if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) { in complete_wqe()
504 qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK; in complete_wqe()
514 do_complete(qp, wqe); in complete_wqe()
522 struct rxe_send_wqe *wqe; in rxe_drain_resp_pkts() local
531 while ((wqe = queue_head(q, q->type))) { in rxe_drain_resp_pkts()
533 wqe->status = IB_WC_WR_FLUSH_ERR; in rxe_drain_resp_pkts()
534 do_complete(qp, wqe); in rxe_drain_resp_pkts()
556 struct rxe_send_wqe *wqe = NULL; in rxe_completer() local
600 state = get_wqe(qp, pkt, &wqe); in rxe_completer()
604 state = check_psn(qp, pkt, wqe); in rxe_completer()
608 state = check_ack(qp, pkt, wqe); in rxe_completer()
612 state = do_read(qp, pkt, wqe); in rxe_completer()
616 state = do_atomic(qp, pkt, wqe); in rxe_completer()
620 if (wqe->state == wqe_state_pending && in rxe_completer()
621 wqe->last_psn == pkt->psn) in rxe_completer()
628 state = complete_ack(qp, pkt, wqe); in rxe_completer()
632 state = complete_wqe(qp, pkt, wqe); in rxe_completer()
656 if (qp->comp.timeout_retry && wqe) { in rxe_completer()
688 if (!wqe || (wqe->state == wqe_state_posted)) { in rxe_completer()
723 wqe->status = IB_WC_RETRY_EXC_ERR; in rxe_completer()
744 wqe->status = IB_WC_RNR_RETRY_EXC_ERR; in rxe_completer()
750 WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS); in rxe_completer()
751 do_complete(qp, wqe); in rxe_completer()