Lines Matching refs:pkt
83 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); in rxe_resp_queue_pkt() local
87 must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) || in rxe_resp_queue_pkt()
118 struct rxe_pkt_info *pkt) in check_psn() argument
120 int diff = psn_compare(pkt->psn, qp->resp.psn); in check_psn()
145 if (pkt->mask & RXE_START_MASK) { in check_psn()
162 struct rxe_pkt_info *pkt) in check_op_seq() argument
169 switch (pkt->opcode) { in check_op_seq()
181 switch (pkt->opcode) { in check_op_seq()
191 switch (pkt->opcode) { in check_op_seq()
210 switch (pkt->opcode) { in check_op_seq()
221 switch (pkt->opcode) { in check_op_seq()
231 switch (pkt->opcode) { in check_op_seq()
252 struct rxe_pkt_info *pkt) in check_op_valid() argument
256 if (((pkt->mask & RXE_READ_MASK) && in check_op_valid()
258 ((pkt->mask & RXE_WRITE_MASK) && in check_op_valid()
260 ((pkt->mask & RXE_ATOMIC_MASK) && in check_op_valid()
268 if ((pkt->mask & RXE_WRITE_MASK) && in check_op_valid()
332 struct rxe_pkt_info *pkt) in check_resource() argument
353 if (pkt->mask & RXE_READ_OR_ATOMIC) { in check_resource()
364 if (pkt->mask & RXE_RWR_MASK) { in check_resource()
376 struct rxe_pkt_info *pkt) in check_length() argument
391 struct rxe_pkt_info *pkt) in check_rkey() argument
402 if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) { in check_rkey()
403 if (pkt->mask & RXE_RETH_MASK) { in check_rkey()
404 qp->resp.va = reth_va(pkt); in check_rkey()
405 qp->resp.rkey = reth_rkey(pkt); in check_rkey()
406 qp->resp.resid = reth_len(pkt); in check_rkey()
407 qp->resp.length = reth_len(pkt); in check_rkey()
409 access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ in check_rkey()
411 } else if (pkt->mask & RXE_ATOMIC_MASK) { in check_rkey()
412 qp->resp.va = atmeth_va(pkt); in check_rkey()
413 qp->resp.rkey = atmeth_rkey(pkt); in check_rkey()
421 if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) && in check_rkey()
422 (pkt->mask & RXE_RETH_MASK) && in check_rkey()
423 reth_len(pkt) == 0) { in check_rkey()
430 pktlen = payload_size(pkt); in check_rkey()
448 if (pkt->mask & RXE_WRITE_MASK) { in check_rkey()
450 if (pktlen != mtu || bth_pad(pkt)) { in check_rkey()
459 if ((bth_pad(pkt) != (0x3 & (-resid)))) { in check_rkey()
495 struct rxe_pkt_info *pkt) in write_data_in() argument
499 int data_len = payload_size(pkt); in write_data_in()
501 err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt), in write_data_in()
519 struct rxe_pkt_info *pkt) in process_atomic() argument
521 u64 iova = atmeth_va(pkt); in process_atomic()
543 if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP || in process_atomic()
544 pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) { in process_atomic()
545 if (*vaddr == atmeth_comp(pkt)) in process_atomic()
546 *vaddr = atmeth_swap_add(pkt); in process_atomic()
548 *vaddr += atmeth_swap_add(pkt); in process_atomic()
559 struct rxe_pkt_info *pkt, in prepare_ack_packet() argument
588 ack->offset = pkt->offset; in prepare_ack_packet()
592 memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES); in prepare_ack_packet()
743 struct rxe_pkt_info *pkt) in build_rdma_network_hdr() argument
745 struct sk_buff *skb = PKT_TO_SKB(pkt); in build_rdma_network_hdr()
757 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) in execute() argument
761 if (pkt->mask & RXE_SEND_MASK) { in execute()
767 build_rdma_network_hdr(&hdr, pkt); in execute()
773 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt)); in execute()
776 } else if (pkt->mask & RXE_WRITE_MASK) { in execute()
777 err = write_data_in(qp, pkt); in execute()
780 } else if (pkt->mask & RXE_READ_MASK) { in execute()
784 } else if (pkt->mask & RXE_ATOMIC_MASK) { in execute()
785 err = process_atomic(qp, pkt); in execute()
794 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in execute()
797 qp->resp.opcode = pkt->opcode; in execute()
800 if (pkt->mask & RXE_COMP_MASK) { in execute()
811 struct rxe_pkt_info *pkt) in do_complete() argument
836 wc->opcode = (pkt->mask & RXE_IMMDT_MASK && in do_complete()
837 pkt->mask & RXE_WRITE_MASK) ? in do_complete()
840 wc->byte_len = (pkt->mask & RXE_IMMDT_MASK && in do_complete()
841 pkt->mask & RXE_WRITE_MASK) ? in do_complete()
850 if (pkt->mask & RXE_IMMDT_MASK) { in do_complete()
852 uwc->ex.imm_data = immdt_imm(pkt); in do_complete()
855 if (pkt->mask & RXE_IETH_MASK) { in do_complete()
857 uwc->ex.invalidate_rkey = ieth_rkey(pkt); in do_complete()
862 if (pkt->mask & RXE_DETH_MASK) in do_complete()
863 uwc->src_qp = deth_sqp(pkt); in do_complete()
867 struct sk_buff *skb = PKT_TO_SKB(pkt); in do_complete()
880 if (pkt->mask & RXE_IMMDT_MASK) { in do_complete()
882 wc->ex.imm_data = immdt_imm(pkt); in do_complete()
885 if (pkt->mask & RXE_IETH_MASK) { in do_complete()
889 wc->ex.invalidate_rkey = ieth_rkey(pkt); in do_complete()
904 if (pkt->mask & RXE_DETH_MASK) in do_complete()
905 wc->src_qp = deth_sqp(pkt); in do_complete()
917 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1)) in do_complete()
923 if (!pkt) in do_complete()
931 static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, in send_ack() argument
938 skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE, in send_ack()
953 static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, in send_atomic_ack() argument
961 skb = prepare_ack_packet(qp, pkt, &ack_pkt, in send_atomic_ack()
962 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn, in send_atomic_ack()
994 struct rxe_pkt_info *pkt) in acknowledge() argument
1000 send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn); in acknowledge()
1001 else if (pkt->mask & RXE_ATOMIC_MASK) in acknowledge()
1002 send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED); in acknowledge()
1003 else if (bth_ack(pkt)) in acknowledge()
1004 send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn); in acknowledge()
1010 struct rxe_pkt_info *pkt) in cleanup() argument
1014 if (pkt) { in cleanup()
1048 struct rxe_pkt_info *pkt) in duplicate_request() argument
1053 if (pkt->mask & RXE_SEND_MASK || in duplicate_request()
1054 pkt->mask & RXE_WRITE_MASK) { in duplicate_request()
1056 if (bth_ack(pkt)) in duplicate_request()
1057 send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn); in duplicate_request()
1060 } else if (pkt->mask & RXE_READ_MASK) { in duplicate_request()
1063 res = find_resource(qp, pkt->psn); in duplicate_request()
1074 u64 iova = reth_va(pkt); in duplicate_request()
1075 u32 resid = reth_len(pkt); in duplicate_request()
1085 if (reth_rkey(pkt) != res->read.rkey) { in duplicate_request()
1090 res->cur_psn = pkt->psn; in duplicate_request()
1091 res->state = (pkt->psn == res->first_psn) ? in duplicate_request()
1110 res = find_resource(qp, pkt->psn); in duplicate_request()
1114 rc = rxe_xmit_packet(qp, pkt, res->atomic.skb); in duplicate_request()
1196 struct rxe_pkt_info *pkt = NULL; in rxe_responder() local
1223 state = get_req(qp, &pkt); in rxe_responder()
1226 state = check_psn(qp, pkt); in rxe_responder()
1229 state = check_op_seq(qp, pkt); in rxe_responder()
1232 state = check_op_valid(qp, pkt); in rxe_responder()
1235 state = check_resource(qp, pkt); in rxe_responder()
1238 state = check_length(qp, pkt); in rxe_responder()
1241 state = check_rkey(qp, pkt); in rxe_responder()
1244 state = execute(qp, pkt); in rxe_responder()
1247 state = do_complete(qp, pkt); in rxe_responder()
1250 state = read_reply(qp, pkt); in rxe_responder()
1253 state = acknowledge(qp, pkt); in rxe_responder()
1256 state = cleanup(qp, pkt); in rxe_responder()
1259 state = duplicate_request(qp, pkt); in rxe_responder()
1263 send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn); in rxe_responder()
1285 send_ack(qp, pkt, AETH_RNR_NAK | in rxe_responder()
1288 pkt->psn); in rxe_responder()