Lines Matching refs:qp
80 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_resp_queue_pkt() argument
85 skb_queue_tail(&qp->req_pkts, skb); in rxe_resp_queue_pkt()
88 (skb_queue_len(&qp->req_pkts) > 1); in rxe_resp_queue_pkt()
90 rxe_run_task(&qp->resp.task, must_sched); in rxe_resp_queue_pkt()
93 static inline enum resp_states get_req(struct rxe_qp *qp, in get_req() argument
98 if (qp->resp.state == QP_STATE_ERROR) { in get_req()
99 while ((skb = skb_dequeue(&qp->req_pkts))) { in get_req()
100 rxe_drop_ref(qp); in get_req()
108 skb = skb_peek(&qp->req_pkts); in get_req()
114 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN; in get_req()
117 static enum resp_states check_psn(struct rxe_qp *qp, in check_psn() argument
120 int diff = psn_compare(pkt->psn, qp->resp.psn); in check_psn()
121 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in check_psn()
123 switch (qp_type(qp)) { in check_psn()
126 if (qp->resp.sent_psn_nak) in check_psn()
129 qp->resp.sent_psn_nak = 1; in check_psn()
138 if (qp->resp.sent_psn_nak) in check_psn()
139 qp->resp.sent_psn_nak = 0; in check_psn()
144 if (qp->resp.drop_msg || diff != 0) { in check_psn()
146 qp->resp.drop_msg = 0; in check_psn()
150 qp->resp.drop_msg = 1; in check_psn()
161 static enum resp_states check_op_seq(struct rxe_qp *qp, in check_op_seq() argument
164 switch (qp_type(qp)) { in check_op_seq()
166 switch (qp->resp.opcode) { in check_op_seq()
207 switch (qp->resp.opcode) { in check_op_seq()
238 qp->resp.drop_msg = 1; in check_op_seq()
251 static enum resp_states check_op_valid(struct rxe_qp *qp, in check_op_valid() argument
254 switch (qp_type(qp)) { in check_op_valid()
257 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) || in check_op_valid()
259 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) || in check_op_valid()
261 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) { in check_op_valid()
269 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) { in check_op_valid()
270 qp->resp.drop_msg = 1; in check_op_valid()
289 static enum resp_states get_srq_wqe(struct rxe_qp *qp) in get_srq_wqe() argument
291 struct rxe_srq *srq = qp->srq; in get_srq_wqe()
308 memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe)); in get_srq_wqe()
310 qp->resp.wqe = &qp->resp.srq_wqe.wqe; in get_srq_wqe()
324 ev.device = qp->ibqp.device; in get_srq_wqe()
325 ev.element.srq = qp->ibqp.srq; in get_srq_wqe()
331 static enum resp_states check_resource(struct rxe_qp *qp, in check_resource() argument
334 struct rxe_srq *srq = qp->srq; in check_resource()
336 if (qp->resp.state == QP_STATE_ERROR) { in check_resource()
337 if (qp->resp.wqe) { in check_resource()
338 qp->resp.status = IB_WC_WR_FLUSH_ERR; in check_resource()
341 qp->resp.wqe = queue_head(qp->rq.queue); in check_resource()
342 if (qp->resp.wqe) { in check_resource()
343 qp->resp.status = IB_WC_WR_FLUSH_ERR; in check_resource()
358 if (likely(qp->attr.max_dest_rd_atomic > 0)) in check_resource()
366 return get_srq_wqe(qp); in check_resource()
368 qp->resp.wqe = queue_head(qp->rq.queue); in check_resource()
369 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR; in check_resource()
375 static enum resp_states check_length(struct rxe_qp *qp, in check_length() argument
378 switch (qp_type(qp)) { in check_length()
390 static enum resp_states check_rkey(struct rxe_qp *qp, in check_rkey() argument
398 int mtu = qp->mtu; in check_rkey()
404 qp->resp.va = reth_va(pkt); in check_rkey()
405 qp->resp.rkey = reth_rkey(pkt); in check_rkey()
406 qp->resp.resid = reth_len(pkt); in check_rkey()
407 qp->resp.length = reth_len(pkt); in check_rkey()
412 qp->resp.va = atmeth_va(pkt); in check_rkey()
413 qp->resp.rkey = atmeth_rkey(pkt); in check_rkey()
414 qp->resp.resid = sizeof(u64); in check_rkey()
427 va = qp->resp.va; in check_rkey()
428 rkey = qp->resp.rkey; in check_rkey()
429 resid = qp->resp.resid; in check_rkey()
432 mem = lookup_mem(qp->pd, access, rkey, lookup_remote); in check_rkey()
469 WARN_ON_ONCE(qp->resp.mr); in check_rkey()
471 qp->resp.mr = mem; in check_rkey()
480 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr, in send_data_in() argument
485 err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma, in send_data_in()
494 static enum resp_states write_data_in(struct rxe_qp *qp, in write_data_in() argument
501 err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt), in write_data_in()
508 qp->resp.va += data_len; in write_data_in()
509 qp->resp.resid -= data_len; in write_data_in()
518 static enum resp_states process_atomic(struct rxe_qp *qp, in process_atomic() argument
524 struct rxe_mem *mr = qp->resp.mr; in process_atomic()
541 qp->resp.atomic_orig = *vaddr; in process_atomic()
558 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, in prepare_ack_packet() argument
567 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in prepare_ack_packet()
581 skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack); in prepare_ack_packet()
585 ack->qp = qp; in prepare_ack_packet()
595 bth_set_qpn(ack, qp->attr.dest_qp_num); in prepare_ack_packet()
604 aeth_set_msn(ack, qp->resp.msn); in prepare_ack_packet()
608 atmack_set_orig(ack, qp->resp.atomic_orig); in prepare_ack_packet()
630 static enum resp_states read_reply(struct rxe_qp *qp, in read_reply() argument
635 int mtu = qp->mtu; in read_reply()
640 struct resp_res *res = qp->resp.res; in read_reply()
648 res = &qp->resp.resources[qp->resp.res_head]; in read_reply()
650 free_rd_atomic_resource(qp, res); in read_reply()
651 rxe_advance_resp_resource(qp); in read_reply()
656 res->read.va = qp->resp.va; in read_reply()
657 res->read.va_org = qp->resp.va; in read_reply()
670 res->read.resid = qp->resp.resid; in read_reply()
671 res->read.length = qp->resp.resid; in read_reply()
672 res->read.rkey = qp->resp.rkey; in read_reply()
675 res->read.mr = qp->resp.mr; in read_reply()
676 qp->resp.mr = NULL; in read_reply()
678 qp->resp.res = res; in read_reply()
698 skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload, in read_reply()
709 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in read_reply()
718 err = rxe_xmit_packet(qp, &ack_pkt, skb); in read_reply()
731 qp->resp.res = NULL; in read_reply()
733 qp->resp.opcode = -1; in read_reply()
734 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0) in read_reply()
735 qp->resp.psn = res->cur_psn; in read_reply()
757 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) in execute() argument
762 if (qp_type(qp) == IB_QPT_UD || in execute()
763 qp_type(qp) == IB_QPT_SMI || in execute()
764 qp_type(qp) == IB_QPT_GSI) { in execute()
769 err = send_data_in(qp, &hdr, sizeof(hdr)); in execute()
773 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt)); in execute()
777 err = write_data_in(qp, pkt); in execute()
782 qp->resp.msn++; in execute()
785 err = process_atomic(qp, pkt); in execute()
794 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in execute()
795 qp->resp.ack_psn = qp->resp.psn; in execute()
797 qp->resp.opcode = pkt->opcode; in execute()
798 qp->resp.status = IB_WC_SUCCESS; in execute()
802 qp->resp.msn++; in execute()
804 } else if (qp_type(qp) == IB_QPT_RC) in execute()
810 static enum resp_states do_complete(struct rxe_qp *qp, in do_complete() argument
816 struct rxe_recv_wqe *wqe = qp->resp.wqe; in do_complete()
817 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in do_complete()
824 if (qp->rcq->is_user) { in do_complete()
825 uwc->status = qp->resp.status; in do_complete()
826 uwc->qp_num = qp->ibqp.qp_num; in do_complete()
829 wc->status = qp->resp.status; in do_complete()
830 wc->qp = &qp->ibqp; in do_complete()
842 qp->resp.length : wqe->dma.length - wqe->dma.resid; in do_complete()
847 if (qp->rcq->is_user) { in do_complete()
860 uwc->qp_num = qp->ibqp.qp_num; in do_complete()
865 uwc->port_num = qp->attr.port_num; in do_complete()
902 wc->qp = &qp->ibqp; in do_complete()
907 wc->port_num = qp->attr.port_num; in do_complete()
912 if (!qp->srq) in do_complete()
913 advance_consumer(qp->rq.queue); in do_complete()
915 qp->resp.wqe = NULL; in do_complete()
917 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1)) in do_complete()
920 if (qp->resp.state == QP_STATE_ERROR) in do_complete()
925 else if (qp_type(qp) == IB_QPT_RC) in do_complete()
931 static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, in send_ack() argument
938 skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE, in send_ack()
945 err = rxe_xmit_packet(qp, &ack_pkt, skb); in send_ack()
953 static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, in send_atomic_ack() argument
961 skb = prepare_ack_packet(qp, pkt, &ack_pkt, in send_atomic_ack()
969 res = &qp->resp.resources[qp->resp.res_head]; in send_atomic_ack()
970 free_rd_atomic_resource(qp, res); in send_atomic_ack()
971 rxe_advance_resp_resource(qp); in send_atomic_ack()
984 rc = rxe_xmit_packet(qp, &ack_pkt, skb); in send_atomic_ack()
987 rxe_drop_ref(qp); in send_atomic_ack()
993 static enum resp_states acknowledge(struct rxe_qp *qp, in acknowledge() argument
996 if (qp_type(qp) != IB_QPT_RC) in acknowledge()
999 if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED) in acknowledge()
1000 send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn); in acknowledge()
1002 send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED); in acknowledge()
1004 send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn); in acknowledge()
1009 static enum resp_states cleanup(struct rxe_qp *qp, in cleanup() argument
1015 skb = skb_dequeue(&qp->req_pkts); in cleanup()
1016 rxe_drop_ref(qp); in cleanup()
1020 if (qp->resp.mr) { in cleanup()
1021 rxe_drop_ref(qp->resp.mr); in cleanup()
1022 qp->resp.mr = NULL; in cleanup()
1028 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn) in find_resource() argument
1032 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { in find_resource()
1033 struct resp_res *res = &qp->resp.resources[i]; in find_resource()
1047 static enum resp_states duplicate_request(struct rxe_qp *qp, in duplicate_request() argument
1051 u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK; in duplicate_request()
1057 send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn); in duplicate_request()
1063 res = find_resource(qp, pkt->psn); in duplicate_request()
1102 qp->resp.res = res; in duplicate_request()
1110 res = find_resource(qp, pkt->psn); in duplicate_request()
1114 rc = rxe_xmit_packet(qp, pkt, res->atomic.skb); in duplicate_request()
1131 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome, in do_class_ac_error() argument
1134 qp->resp.aeth_syndrome = syndrome; in do_class_ac_error()
1135 qp->resp.status = status; in do_class_ac_error()
1138 qp->resp.goto_error = 1; in do_class_ac_error()
1141 static enum resp_states do_class_d1e_error(struct rxe_qp *qp) in do_class_d1e_error() argument
1144 if (qp->srq) { in do_class_d1e_error()
1146 qp->resp.drop_msg = 1; in do_class_d1e_error()
1147 if (qp->resp.wqe) { in do_class_d1e_error()
1148 qp->resp.status = IB_WC_REM_INV_REQ_ERR; in do_class_d1e_error()
1159 if (qp->resp.wqe) { in do_class_d1e_error()
1160 qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length; in do_class_d1e_error()
1161 qp->resp.wqe->dma.cur_sge = 0; in do_class_d1e_error()
1162 qp->resp.wqe->dma.sge_offset = 0; in do_class_d1e_error()
1163 qp->resp.opcode = -1; in do_class_d1e_error()
1166 if (qp->resp.mr) { in do_class_d1e_error()
1167 rxe_drop_ref(qp->resp.mr); in do_class_d1e_error()
1168 qp->resp.mr = NULL; in do_class_d1e_error()
1175 static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) in rxe_drain_req_pkts() argument
1179 while ((skb = skb_dequeue(&qp->req_pkts))) { in rxe_drain_req_pkts()
1180 rxe_drop_ref(qp); in rxe_drain_req_pkts()
1187 while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue)) in rxe_drain_req_pkts()
1188 advance_consumer(qp->rq.queue); in rxe_drain_req_pkts()
1193 struct rxe_qp *qp = (struct rxe_qp *)arg; in rxe_responder() local
1194 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_responder()
1199 rxe_add_ref(qp); in rxe_responder()
1201 qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED; in rxe_responder()
1203 if (!qp->valid) { in rxe_responder()
1208 switch (qp->resp.state) { in rxe_responder()
1219 pr_debug("qp#%d state = %s\n", qp_num(qp), in rxe_responder()
1223 state = get_req(qp, &pkt); in rxe_responder()
1226 state = check_psn(qp, pkt); in rxe_responder()
1229 state = check_op_seq(qp, pkt); in rxe_responder()
1232 state = check_op_valid(qp, pkt); in rxe_responder()
1235 state = check_resource(qp, pkt); in rxe_responder()
1238 state = check_length(qp, pkt); in rxe_responder()
1241 state = check_rkey(qp, pkt); in rxe_responder()
1244 state = execute(qp, pkt); in rxe_responder()
1247 state = do_complete(qp, pkt); in rxe_responder()
1250 state = read_reply(qp, pkt); in rxe_responder()
1253 state = acknowledge(qp, pkt); in rxe_responder()
1256 state = cleanup(qp, pkt); in rxe_responder()
1259 state = duplicate_request(qp, pkt); in rxe_responder()
1263 send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn); in rxe_responder()
1273 do_class_ac_error(qp, AETH_NAK_INVALID_REQ, in rxe_responder()
1279 state = do_class_d1e_error(qp); in rxe_responder()
1282 if (qp_type(qp) == IB_QPT_RC) { in rxe_responder()
1285 send_ack(qp, pkt, AETH_RNR_NAK | in rxe_responder()
1287 qp->attr.min_rnr_timer), in rxe_responder()
1291 qp->resp.drop_msg = 1; in rxe_responder()
1297 if (qp_type(qp) == IB_QPT_RC) { in rxe_responder()
1299 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR, in rxe_responder()
1303 qp->resp.drop_msg = 1; in rxe_responder()
1304 if (qp->srq) { in rxe_responder()
1306 qp->resp.status = IB_WC_REM_ACCESS_ERR; in rxe_responder()
1316 if (qp_type(qp) == IB_QPT_RC) { in rxe_responder()
1318 do_class_ac_error(qp, AETH_NAK_INVALID_REQ, in rxe_responder()
1321 } else if (qp->srq) { in rxe_responder()
1323 qp->resp.status = IB_WC_REM_INV_REQ_ERR; in rxe_responder()
1327 qp->resp.drop_msg = 1; in rxe_responder()
1334 do_class_ac_error(qp, AETH_NAK_REM_OP_ERR, in rxe_responder()
1345 if (qp->resp.goto_error) { in rxe_responder()
1353 if (qp->resp.goto_error) { in rxe_responder()
1361 rxe_drain_req_pkts(qp, false); in rxe_responder()
1362 qp->resp.wqe = NULL; in rxe_responder()
1366 qp->resp.goto_error = 0; in rxe_responder()
1367 pr_warn("qp#%d moved to error state\n", qp_num(qp)); in rxe_responder()
1368 rxe_qp_error(qp); in rxe_responder()
1379 rxe_drop_ref(qp); in rxe_responder()