Lines Matching full:qp
50 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_resp_queue_pkt() argument
55 skb_queue_tail(&qp->req_pkts, skb); in rxe_resp_queue_pkt()
58 (skb_queue_len(&qp->req_pkts) > 1); in rxe_resp_queue_pkt()
61 rxe_sched_task(&qp->resp.task); in rxe_resp_queue_pkt()
63 rxe_run_task(&qp->resp.task); in rxe_resp_queue_pkt()
66 static inline enum resp_states get_req(struct rxe_qp *qp, in get_req() argument
71 skb = skb_peek(&qp->req_pkts); in get_req()
77 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN; in get_req()
80 static enum resp_states check_psn(struct rxe_qp *qp, in check_psn() argument
83 int diff = psn_compare(pkt->psn, qp->resp.psn); in check_psn()
84 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in check_psn()
86 switch (qp_type(qp)) { in check_psn()
89 if (qp->resp.sent_psn_nak) in check_psn()
92 qp->resp.sent_psn_nak = 1; in check_psn()
101 if (qp->resp.sent_psn_nak) in check_psn()
102 qp->resp.sent_psn_nak = 0; in check_psn()
107 if (qp->resp.drop_msg || diff != 0) { in check_psn()
109 qp->resp.drop_msg = 0; in check_psn()
113 qp->resp.drop_msg = 1; in check_psn()
124 static enum resp_states check_op_seq(struct rxe_qp *qp, in check_op_seq() argument
127 switch (qp_type(qp)) { in check_op_seq()
129 switch (qp->resp.opcode) { in check_op_seq()
170 switch (qp->resp.opcode) { in check_op_seq()
201 qp->resp.drop_msg = 1; in check_op_seq()
214 static bool check_qp_attr_access(struct rxe_qp *qp, in check_qp_attr_access() argument
218 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) || in check_qp_attr_access()
220 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) || in check_qp_attr_access()
222 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) in check_qp_attr_access()
229 !(qp->attr.qp_access_flags & IB_ACCESS_FLUSH_GLOBAL)) || in check_qp_attr_access()
231 !(qp->attr.qp_access_flags & IB_ACCESS_FLUSH_PERSISTENT))) in check_qp_attr_access()
238 static enum resp_states check_op_valid(struct rxe_qp *qp, in check_op_valid() argument
241 switch (qp_type(qp)) { in check_op_valid()
243 if (!check_qp_attr_access(qp, pkt)) in check_op_valid()
250 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) { in check_op_valid()
251 qp->resp.drop_msg = 1; in check_op_valid()
269 static enum resp_states get_srq_wqe(struct rxe_qp *qp) in get_srq_wqe() argument
271 struct rxe_srq *srq = qp->srq; in get_srq_wqe()
293 rxe_dbg_qp(qp, "invalid num_sge in SRQ entry\n"); in get_srq_wqe()
297 memcpy(&qp->resp.srq_wqe, wqe, size); in get_srq_wqe()
299 qp->resp.wqe = &qp->resp.srq_wqe.wqe; in get_srq_wqe()
313 ev.device = qp->ibqp.device; in get_srq_wqe()
314 ev.element.srq = qp->ibqp.srq; in get_srq_wqe()
320 static enum resp_states check_resource(struct rxe_qp *qp, in check_resource() argument
323 struct rxe_srq *srq = qp->srq; in check_resource()
330 if (likely(qp->attr.max_dest_rd_atomic > 0)) in check_resource()
338 return get_srq_wqe(qp); in check_resource()
340 qp->resp.wqe = queue_head(qp->rq.queue, in check_resource()
342 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR; in check_resource()
348 static enum resp_states rxe_resp_check_length(struct rxe_qp *qp, in rxe_resp_check_length() argument
357 if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) || in rxe_resp_check_length()
358 (qp_type(qp) == IB_QPT_UC))) { in rxe_resp_check_length()
359 unsigned int mtu = qp->mtu; in rxe_resp_check_length()
365 rxe_dbg_qp(qp, "only packet too long"); in rxe_resp_check_length()
371 rxe_dbg_qp(qp, "first or middle packet not mtu"); in rxe_resp_check_length()
376 rxe_dbg_qp(qp, "last packet zero or too long"); in rxe_resp_check_length()
385 rxe_dbg_qp(qp, "dma length too long"); in rxe_resp_check_length()
398 * Instead set qp->resp.rkey to 0 which is an invalid rkey
401 static void qp_resp_from_reth(struct rxe_qp *qp, struct rxe_pkt_info *pkt) in qp_resp_from_reth() argument
405 qp->resp.va = reth_va(pkt); in qp_resp_from_reth()
406 qp->resp.offset = 0; in qp_resp_from_reth()
407 qp->resp.resid = length; in qp_resp_from_reth()
408 qp->resp.length = length; in qp_resp_from_reth()
410 qp->resp.rkey = 0; in qp_resp_from_reth()
412 qp->resp.rkey = reth_rkey(pkt); in qp_resp_from_reth()
415 static void qp_resp_from_atmeth(struct rxe_qp *qp, struct rxe_pkt_info *pkt) in qp_resp_from_atmeth() argument
417 qp->resp.va = atmeth_va(pkt); in qp_resp_from_atmeth()
418 qp->resp.offset = 0; in qp_resp_from_atmeth()
419 qp->resp.rkey = atmeth_rkey(pkt); in qp_resp_from_atmeth()
420 qp->resp.resid = sizeof(u64); in qp_resp_from_atmeth()
423 /* resolve the packet rkey to qp->resp.mr or set qp->resp.mr to NULL
427 static enum resp_states check_rkey(struct rxe_qp *qp, in check_rkey() argument
436 int mtu = qp->mtu; in check_rkey()
446 qp_resp_from_reth(qp, pkt); in check_rkey()
454 qp_resp_from_reth(qp, pkt); in check_rkey()
461 qp_resp_from_atmeth(qp, pkt); in check_rkey()
473 qp->resp.mr = NULL; in check_rkey()
477 va = qp->resp.va; in check_rkey()
478 rkey = qp->resp.rkey; in check_rkey()
479 resid = qp->resp.resid; in check_rkey()
483 mw = rxe_lookup_mw(qp, access, rkey); in check_rkey()
485 rxe_dbg_qp(qp, "no MW matches rkey %#x\n", rkey); in check_rkey()
492 rxe_dbg_qp(qp, "MW doesn't have an MR\n"); in check_rkey()
498 qp->resp.offset = mw->addr; in check_rkey()
504 mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE); in check_rkey()
506 rxe_dbg_qp(qp, "no MR matches rkey %#x\n", rkey); in check_rkey()
520 if (mr_check_range(mr, va + qp->resp.offset, resid)) { in check_rkey()
547 WARN_ON_ONCE(qp->resp.mr); in check_rkey()
549 qp->resp.mr = mr; in check_rkey()
553 qp->resp.mr = NULL; in check_rkey()
562 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr, in send_data_in() argument
567 err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma, in send_data_in()
576 static enum resp_states write_data_in(struct rxe_qp *qp, in write_data_in() argument
583 err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset, in write_data_in()
590 qp->resp.va += data_len; in write_data_in()
591 qp->resp.resid -= data_len; in write_data_in()
597 static struct resp_res *rxe_prepare_res(struct rxe_qp *qp, in rxe_prepare_res() argument
604 res = &qp->resp.resources[qp->resp.res_head]; in rxe_prepare_res()
605 rxe_advance_resp_resource(qp); in rxe_prepare_res()
613 res->read.va = qp->resp.va + qp->resp.offset; in rxe_prepare_res()
614 res->read.va_org = qp->resp.va + qp->resp.offset; in rxe_prepare_res()
615 res->read.resid = qp->resp.resid; in rxe_prepare_res()
616 res->read.length = qp->resp.resid; in rxe_prepare_res()
617 res->read.rkey = qp->resp.rkey; in rxe_prepare_res()
619 pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1); in rxe_prepare_res()
633 res->flush.va = qp->resp.va + qp->resp.offset; in rxe_prepare_res()
634 res->flush.length = qp->resp.length; in rxe_prepare_res()
642 static enum resp_states process_flush(struct rxe_qp *qp, in process_flush() argument
646 struct rxe_mr *mr = qp->resp.mr; in process_flush()
647 struct resp_res *res = qp->resp.res; in process_flush()
653 res = rxe_prepare_res(qp, pkt, RXE_FLUSH_MASK); in process_flush()
654 qp->resp.res = res; in process_flush()
675 qp->resp.msn++; in process_flush()
678 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in process_flush()
679 qp->resp.ack_psn = qp->resp.psn; in process_flush()
681 qp->resp.opcode = pkt->opcode; in process_flush()
682 qp->resp.status = IB_WC_SUCCESS; in process_flush()
687 static enum resp_states atomic_reply(struct rxe_qp *qp, in atomic_reply() argument
690 struct rxe_mr *mr = qp->resp.mr; in atomic_reply()
691 struct resp_res *res = qp->resp.res; in atomic_reply()
695 res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK); in atomic_reply()
696 qp->resp.res = res; in atomic_reply()
700 u64 iova = qp->resp.va + qp->resp.offset; in atomic_reply()
709 qp->resp.msn++; in atomic_reply()
712 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in atomic_reply()
713 qp->resp.ack_psn = qp->resp.psn; in atomic_reply()
715 qp->resp.opcode = pkt->opcode; in atomic_reply()
716 qp->resp.status = IB_WC_SUCCESS; in atomic_reply()
722 static enum resp_states atomic_write_reply(struct rxe_qp *qp, in atomic_write_reply() argument
725 struct resp_res *res = qp->resp.res; in atomic_write_reply()
732 res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK); in atomic_write_reply()
733 qp->resp.res = res; in atomic_write_reply()
739 mr = qp->resp.mr; in atomic_write_reply()
741 iova = qp->resp.va + qp->resp.offset; in atomic_write_reply()
747 qp->resp.resid = 0; in atomic_write_reply()
748 qp->resp.msn++; in atomic_write_reply()
751 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in atomic_write_reply()
752 qp->resp.ack_psn = qp->resp.psn; in atomic_write_reply()
754 qp->resp.opcode = pkt->opcode; in atomic_write_reply()
755 qp->resp.status = IB_WC_SUCCESS; in atomic_write_reply()
760 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, in prepare_ack_packet() argument
767 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in prepare_ack_packet()
779 skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack); in prepare_ack_packet()
783 ack->qp = qp; in prepare_ack_packet()
790 qp->attr.dest_qp_num, 0, psn); in prepare_ack_packet()
794 aeth_set_msn(ack, qp->resp.msn); in prepare_ack_packet()
798 atmack_set_orig(ack, qp->resp.res->atomic.orig_val); in prepare_ack_packet()
800 err = rxe_prepare(&qp->pri_av, ack, skb); in prepare_ack_packet()
811 * @qp: the qp
826 static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey) in rxe_recheck_mr() argument
828 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_recheck_mr()
865 static enum resp_states read_reply(struct rxe_qp *qp, in read_reply() argument
870 int mtu = qp->mtu; in read_reply()
875 struct resp_res *res = qp->resp.res; in read_reply()
879 res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK); in read_reply()
880 qp->resp.res = res; in read_reply()
884 if (!res->replay || qp->resp.length == 0) { in read_reply()
886 * otherwise qp->resp.mr holds a ref on mr in read_reply()
889 mr = qp->resp.mr; in read_reply()
890 qp->resp.mr = NULL; in read_reply()
892 mr = rxe_recheck_mr(qp, res->read.rkey); in read_reply()
906 mr = rxe_recheck_mr(qp, res->read.rkey); in read_reply()
920 skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload, in read_reply()
942 err = rxe_xmit_packet(qp, &ack_pkt, skb); in read_reply()
955 qp->resp.res = NULL; in read_reply()
957 qp->resp.opcode = -1; in read_reply()
958 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0) in read_reply()
959 qp->resp.psn = res->cur_psn; in read_reply()
969 static int invalidate_rkey(struct rxe_qp *qp, u32 rkey) in invalidate_rkey() argument
972 return rxe_invalidate_mw(qp, rkey); in invalidate_rkey()
974 return rxe_invalidate_mr(qp, rkey); in invalidate_rkey()
980 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) in execute() argument
987 if (qp_type(qp) == IB_QPT_UD || in execute()
988 qp_type(qp) == IB_QPT_GSI) { in execute()
994 err = send_data_in(qp, &hdr, sizeof(hdr)); in execute()
996 err = send_data_in(qp, ipv6_hdr(skb), in execute()
1002 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt)); in execute()
1006 err = write_data_in(qp, pkt); in execute()
1011 qp->resp.msn++; in execute()
1027 err = invalidate_rkey(qp, rkey); in execute()
1034 qp->resp.msn++; in execute()
1037 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in execute()
1038 qp->resp.ack_psn = qp->resp.psn; in execute()
1040 qp->resp.opcode = pkt->opcode; in execute()
1041 qp->resp.status = IB_WC_SUCCESS; in execute()
1045 else if (qp_type(qp) == IB_QPT_RC) in execute()
1051 static enum resp_states do_complete(struct rxe_qp *qp, in do_complete() argument
1057 struct rxe_recv_wqe *wqe = qp->resp.wqe; in do_complete()
1058 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in do_complete()
1066 if (qp->rcq->is_user) { in do_complete()
1067 uwc->status = qp->resp.status; in do_complete()
1068 uwc->qp_num = qp->ibqp.qp_num; in do_complete()
1071 wc->status = qp->resp.status; in do_complete()
1072 wc->qp = &qp->ibqp; in do_complete()
1083 qp->resp.length : wqe->dma.length - wqe->dma.resid; in do_complete()
1088 if (qp->rcq->is_user) { in do_complete()
1104 uwc->port_num = qp->attr.port_num; in do_complete()
1132 wc->port_num = qp->attr.port_num; in do_complete()
1136 rxe_err_qp(qp, "non-flush error status = %d", in do_complete()
1141 if (!qp->srq) in do_complete()
1142 queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT); in do_complete()
1144 qp->resp.wqe = NULL; in do_complete()
1146 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1)) in do_complete()
1150 spin_lock_irqsave(&qp->state_lock, flags); in do_complete()
1151 if (unlikely(qp_state(qp) == IB_QPS_ERR)) { in do_complete()
1152 spin_unlock_irqrestore(&qp->state_lock, flags); in do_complete()
1155 spin_unlock_irqrestore(&qp->state_lock, flags); in do_complete()
1159 if (qp_type(qp) == IB_QPT_RC) in do_complete()
1166 static int send_common_ack(struct rxe_qp *qp, u8 syndrome, u32 psn, in send_common_ack() argument
1173 skb = prepare_ack_packet(qp, &ack_pkt, opcode, 0, psn, syndrome); in send_common_ack()
1177 err = rxe_xmit_packet(qp, &ack_pkt, skb); in send_common_ack()
1179 rxe_dbg_qp(qp, "Failed sending %s\n", msg); in send_common_ack()
1184 static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) in send_ack() argument
1186 return send_common_ack(qp, syndrome, psn, in send_ack()
1190 static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) in send_atomic_ack() argument
1192 int ret = send_common_ack(qp, syndrome, psn, in send_atomic_ack()
1198 qp->resp.res = NULL; in send_atomic_ack()
1202 static int send_read_response_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) in send_read_response_ack() argument
1204 int ret = send_common_ack(qp, syndrome, psn, in send_read_response_ack()
1211 qp->resp.res = NULL; in send_read_response_ack()
1215 static enum resp_states acknowledge(struct rxe_qp *qp, in acknowledge() argument
1218 if (qp_type(qp) != IB_QPT_RC) in acknowledge()
1221 if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED) in acknowledge()
1222 send_ack(qp, qp->resp.aeth_syndrome, pkt->psn); in acknowledge()
1224 send_atomic_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); in acknowledge()
1226 send_read_response_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); in acknowledge()
1228 send_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); in acknowledge()
1233 static enum resp_states cleanup(struct rxe_qp *qp, in cleanup() argument
1239 skb = skb_dequeue(&qp->req_pkts); in cleanup()
1240 rxe_put(qp); in cleanup()
1242 ib_device_put(qp->ibqp.device); in cleanup()
1245 if (qp->resp.mr) { in cleanup()
1246 rxe_put(qp->resp.mr); in cleanup()
1247 qp->resp.mr = NULL; in cleanup()
1253 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn) in find_resource() argument
1257 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { in find_resource()
1258 struct resp_res *res = &qp->resp.resources[i]; in find_resource()
1272 static enum resp_states duplicate_request(struct rxe_qp *qp, in duplicate_request() argument
1276 u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK; in duplicate_request()
1281 send_ack(qp, AETH_ACK_UNLIMITED, prev_psn); in duplicate_request()
1287 res = find_resource(qp, pkt->psn); in duplicate_request()
1291 qp->resp.res = res; in duplicate_request()
1302 res = find_resource(qp, pkt->psn); in duplicate_request()
1341 qp->resp.res = res; in duplicate_request()
1349 res = find_resource(qp, pkt->psn); in duplicate_request()
1353 qp->resp.res = res; in duplicate_request()
1369 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome, in do_class_ac_error() argument
1372 qp->resp.aeth_syndrome = syndrome; in do_class_ac_error()
1373 qp->resp.status = status; in do_class_ac_error()
1376 qp->resp.goto_error = 1; in do_class_ac_error()
1379 static enum resp_states do_class_d1e_error(struct rxe_qp *qp) in do_class_d1e_error() argument
1382 if (qp->srq) { in do_class_d1e_error()
1384 qp->resp.drop_msg = 1; in do_class_d1e_error()
1385 if (qp->resp.wqe) { in do_class_d1e_error()
1386 qp->resp.status = IB_WC_REM_INV_REQ_ERR; in do_class_d1e_error()
1397 if (qp->resp.wqe) { in do_class_d1e_error()
1398 qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length; in do_class_d1e_error()
1399 qp->resp.wqe->dma.cur_sge = 0; in do_class_d1e_error()
1400 qp->resp.wqe->dma.sge_offset = 0; in do_class_d1e_error()
1401 qp->resp.opcode = -1; in do_class_d1e_error()
1404 if (qp->resp.mr) { in do_class_d1e_error()
1405 rxe_put(qp->resp.mr); in do_class_d1e_error()
1406 qp->resp.mr = NULL; in do_class_d1e_error()
1414 static void drain_req_pkts(struct rxe_qp *qp) in drain_req_pkts() argument
1418 while ((skb = skb_dequeue(&qp->req_pkts))) { in drain_req_pkts()
1419 rxe_put(qp); in drain_req_pkts()
1421 ib_device_put(qp->ibqp.device); in drain_req_pkts()
1426 static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe) in flush_recv_wqe() argument
1433 if (qp->rcq->is_user) { in flush_recv_wqe()
1436 uwc->qp_num = qp_num(qp); in flush_recv_wqe()
1440 wc->qp = &qp->ibqp; in flush_recv_wqe()
1443 err = rxe_cq_post(qp->rcq, &cqe, 0); in flush_recv_wqe()
1445 rxe_dbg_cq(qp->rcq, "post cq failed err = %d", err); in flush_recv_wqe()
1454 static void flush_recv_queue(struct rxe_qp *qp, bool notify) in flush_recv_queue() argument
1456 struct rxe_queue *q = qp->rq.queue; in flush_recv_queue()
1460 if (qp->srq) { in flush_recv_queue()
1461 if (notify && qp->ibqp.event_handler) { in flush_recv_queue()
1464 ev.device = qp->ibqp.device; in flush_recv_queue()
1465 ev.element.qp = &qp->ibqp; in flush_recv_queue()
1467 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in flush_recv_queue()
1473 if (!qp->rq.queue) in flush_recv_queue()
1478 err = flush_recv_wqe(qp, wqe); in flush_recv_queue()
1485 qp->resp.wqe = NULL; in flush_recv_queue()
1488 int rxe_responder(struct rxe_qp *qp) in rxe_responder() argument
1490 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_responder()
1496 spin_lock_irqsave(&qp->state_lock, flags); in rxe_responder()
1497 if (!qp->valid || qp_state(qp) == IB_QPS_ERR || in rxe_responder()
1498 qp_state(qp) == IB_QPS_RESET) { in rxe_responder()
1499 bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR); in rxe_responder()
1501 drain_req_pkts(qp); in rxe_responder()
1502 flush_recv_queue(qp, notify); in rxe_responder()
1503 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_responder()
1506 spin_unlock_irqrestore(&qp->state_lock, flags); in rxe_responder()
1508 qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED; in rxe_responder()
1513 rxe_dbg_qp(qp, "state = %s\n", resp_state_name[state]); in rxe_responder()
1516 state = get_req(qp, &pkt); in rxe_responder()
1519 state = check_psn(qp, pkt); in rxe_responder()
1522 state = check_op_seq(qp, pkt); in rxe_responder()
1525 state = check_op_valid(qp, pkt); in rxe_responder()
1528 state = check_resource(qp, pkt); in rxe_responder()
1531 state = rxe_resp_check_length(qp, pkt); in rxe_responder()
1534 state = check_rkey(qp, pkt); in rxe_responder()
1537 state = execute(qp, pkt); in rxe_responder()
1540 state = do_complete(qp, pkt); in rxe_responder()
1543 state = read_reply(qp, pkt); in rxe_responder()
1546 state = atomic_reply(qp, pkt); in rxe_responder()
1549 state = atomic_write_reply(qp, pkt); in rxe_responder()
1552 state = process_flush(qp, pkt); in rxe_responder()
1555 state = acknowledge(qp, pkt); in rxe_responder()
1558 state = cleanup(qp, pkt); in rxe_responder()
1561 state = duplicate_request(qp, pkt); in rxe_responder()
1565 send_ack(qp, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn); in rxe_responder()
1575 do_class_ac_error(qp, AETH_NAK_INVALID_REQ, in rxe_responder()
1581 state = do_class_d1e_error(qp); in rxe_responder()
1584 if (qp_type(qp) == IB_QPT_RC) { in rxe_responder()
1587 send_ack(qp, AETH_RNR_NAK | in rxe_responder()
1589 qp->attr.min_rnr_timer), in rxe_responder()
1593 qp->resp.drop_msg = 1; in rxe_responder()
1599 if (qp_type(qp) == IB_QPT_RC) { in rxe_responder()
1601 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR, in rxe_responder()
1605 qp->resp.drop_msg = 1; in rxe_responder()
1606 if (qp->srq) { in rxe_responder()
1608 qp->resp.status = IB_WC_REM_ACCESS_ERR; in rxe_responder()
1619 qp->resp.goto_error = 1; in rxe_responder()
1620 qp->resp.status = IB_WC_REM_INV_REQ_ERR; in rxe_responder()
1625 if (qp_type(qp) == IB_QPT_RC) { in rxe_responder()
1627 do_class_ac_error(qp, AETH_NAK_INVALID_REQ, in rxe_responder()
1630 } else if (qp->srq) { in rxe_responder()
1632 qp->resp.status = IB_WC_REM_INV_REQ_ERR; in rxe_responder()
1636 qp->resp.drop_msg = 1; in rxe_responder()
1643 do_class_ac_error(qp, AETH_NAK_REM_OP_ERR, in rxe_responder()
1654 if (qp->resp.goto_error) { in rxe_responder()
1662 if (qp->resp.goto_error) { in rxe_responder()
1670 qp->resp.goto_error = 0; in rxe_responder()
1671 rxe_dbg_qp(qp, "moved to error state\n"); in rxe_responder()
1672 rxe_qp_error(qp); in rxe_responder()