1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #include <linux/skbuff.h>
8
9 #include "rxe.h"
10 #include "rxe_loc.h"
11 #include "rxe_queue.h"
12
13 enum resp_states {
14 RESPST_NONE,
15 RESPST_GET_REQ,
16 RESPST_CHK_PSN,
17 RESPST_CHK_OP_SEQ,
18 RESPST_CHK_OP_VALID,
19 RESPST_CHK_RESOURCE,
20 RESPST_CHK_LENGTH,
21 RESPST_CHK_RKEY,
22 RESPST_EXECUTE,
23 RESPST_READ_REPLY,
24 RESPST_COMPLETE,
25 RESPST_ACKNOWLEDGE,
26 RESPST_CLEANUP,
27 RESPST_DUPLICATE_REQUEST,
28 RESPST_ERR_MALFORMED_WQE,
29 RESPST_ERR_UNSUPPORTED_OPCODE,
30 RESPST_ERR_MISALIGNED_ATOMIC,
31 RESPST_ERR_PSN_OUT_OF_SEQ,
32 RESPST_ERR_MISSING_OPCODE_FIRST,
33 RESPST_ERR_MISSING_OPCODE_LAST_C,
34 RESPST_ERR_MISSING_OPCODE_LAST_D1E,
35 RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
36 RESPST_ERR_RNR,
37 RESPST_ERR_RKEY_VIOLATION,
38 RESPST_ERR_INVALIDATE_RKEY,
39 RESPST_ERR_LENGTH,
40 RESPST_ERR_CQ_OVERFLOW,
41 RESPST_ERROR,
42 RESPST_RESET,
43 RESPST_DONE,
44 RESPST_EXIT,
45 };
46
47 static char *resp_state_name[] = {
48 [RESPST_NONE] = "NONE",
49 [RESPST_GET_REQ] = "GET_REQ",
50 [RESPST_CHK_PSN] = "CHK_PSN",
51 [RESPST_CHK_OP_SEQ] = "CHK_OP_SEQ",
52 [RESPST_CHK_OP_VALID] = "CHK_OP_VALID",
53 [RESPST_CHK_RESOURCE] = "CHK_RESOURCE",
54 [RESPST_CHK_LENGTH] = "CHK_LENGTH",
55 [RESPST_CHK_RKEY] = "CHK_RKEY",
56 [RESPST_EXECUTE] = "EXECUTE",
57 [RESPST_READ_REPLY] = "READ_REPLY",
58 [RESPST_COMPLETE] = "COMPLETE",
59 [RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE",
60 [RESPST_CLEANUP] = "CLEANUP",
61 [RESPST_DUPLICATE_REQUEST] = "DUPLICATE_REQUEST",
62 [RESPST_ERR_MALFORMED_WQE] = "ERR_MALFORMED_WQE",
63 [RESPST_ERR_UNSUPPORTED_OPCODE] = "ERR_UNSUPPORTED_OPCODE",
64 [RESPST_ERR_MISALIGNED_ATOMIC] = "ERR_MISALIGNED_ATOMIC",
65 [RESPST_ERR_PSN_OUT_OF_SEQ] = "ERR_PSN_OUT_OF_SEQ",
66 [RESPST_ERR_MISSING_OPCODE_FIRST] = "ERR_MISSING_OPCODE_FIRST",
67 [RESPST_ERR_MISSING_OPCODE_LAST_C] = "ERR_MISSING_OPCODE_LAST_C",
68 [RESPST_ERR_MISSING_OPCODE_LAST_D1E] = "ERR_MISSING_OPCODE_LAST_D1E",
69 [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ] = "ERR_TOO_MANY_RDMA_ATM_REQ",
70 [RESPST_ERR_RNR] = "ERR_RNR",
71 [RESPST_ERR_RKEY_VIOLATION] = "ERR_RKEY_VIOLATION",
72 [RESPST_ERR_INVALIDATE_RKEY] = "ERR_INVALIDATE_RKEY_VIOLATION",
73 [RESPST_ERR_LENGTH] = "ERR_LENGTH",
74 [RESPST_ERR_CQ_OVERFLOW] = "ERR_CQ_OVERFLOW",
75 [RESPST_ERROR] = "ERROR",
76 [RESPST_RESET] = "RESET",
77 [RESPST_DONE] = "DONE",
78 [RESPST_EXIT] = "EXIT",
79 };
80
81 /* rxe_recv calls here to add a request packet to the input queue */
rxe_resp_queue_pkt(struct rxe_qp * qp,struct sk_buff * skb)82 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
83 {
84 int must_sched;
85 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
86
87 skb_queue_tail(&qp->req_pkts, skb);
88
89 must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
90 (skb_queue_len(&qp->req_pkts) > 1);
91
92 rxe_run_task(&qp->resp.task, must_sched);
93 }
94
get_req(struct rxe_qp * qp,struct rxe_pkt_info ** pkt_p)95 static inline enum resp_states get_req(struct rxe_qp *qp,
96 struct rxe_pkt_info **pkt_p)
97 {
98 struct sk_buff *skb;
99
100 if (qp->resp.state == QP_STATE_ERROR) {
101 while ((skb = skb_dequeue(&qp->req_pkts))) {
102 rxe_drop_ref(qp);
103 kfree_skb(skb);
104 ib_device_put(qp->ibqp.device);
105 }
106
107 /* go drain recv wr queue */
108 return RESPST_CHK_RESOURCE;
109 }
110
111 skb = skb_peek(&qp->req_pkts);
112 if (!skb)
113 return RESPST_EXIT;
114
115 *pkt_p = SKB_TO_PKT(skb);
116
117 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
118 }
119
check_psn(struct rxe_qp * qp,struct rxe_pkt_info * pkt)120 static enum resp_states check_psn(struct rxe_qp *qp,
121 struct rxe_pkt_info *pkt)
122 {
123 int diff = psn_compare(pkt->psn, qp->resp.psn);
124 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
125
126 switch (qp_type(qp)) {
127 case IB_QPT_RC:
128 if (diff > 0) {
129 if (qp->resp.sent_psn_nak)
130 return RESPST_CLEANUP;
131
132 qp->resp.sent_psn_nak = 1;
133 rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
134 return RESPST_ERR_PSN_OUT_OF_SEQ;
135
136 } else if (diff < 0) {
137 rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
138 return RESPST_DUPLICATE_REQUEST;
139 }
140
141 if (qp->resp.sent_psn_nak)
142 qp->resp.sent_psn_nak = 0;
143
144 break;
145
146 case IB_QPT_UC:
147 if (qp->resp.drop_msg || diff != 0) {
148 if (pkt->mask & RXE_START_MASK) {
149 qp->resp.drop_msg = 0;
150 return RESPST_CHK_OP_SEQ;
151 }
152
153 qp->resp.drop_msg = 1;
154 return RESPST_CLEANUP;
155 }
156 break;
157 default:
158 break;
159 }
160
161 return RESPST_CHK_OP_SEQ;
162 }
163
check_op_seq(struct rxe_qp * qp,struct rxe_pkt_info * pkt)164 static enum resp_states check_op_seq(struct rxe_qp *qp,
165 struct rxe_pkt_info *pkt)
166 {
167 switch (qp_type(qp)) {
168 case IB_QPT_RC:
169 switch (qp->resp.opcode) {
170 case IB_OPCODE_RC_SEND_FIRST:
171 case IB_OPCODE_RC_SEND_MIDDLE:
172 switch (pkt->opcode) {
173 case IB_OPCODE_RC_SEND_MIDDLE:
174 case IB_OPCODE_RC_SEND_LAST:
175 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
176 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
177 return RESPST_CHK_OP_VALID;
178 default:
179 return RESPST_ERR_MISSING_OPCODE_LAST_C;
180 }
181
182 case IB_OPCODE_RC_RDMA_WRITE_FIRST:
183 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
184 switch (pkt->opcode) {
185 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
186 case IB_OPCODE_RC_RDMA_WRITE_LAST:
187 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
188 return RESPST_CHK_OP_VALID;
189 default:
190 return RESPST_ERR_MISSING_OPCODE_LAST_C;
191 }
192
193 default:
194 switch (pkt->opcode) {
195 case IB_OPCODE_RC_SEND_MIDDLE:
196 case IB_OPCODE_RC_SEND_LAST:
197 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
198 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
199 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
200 case IB_OPCODE_RC_RDMA_WRITE_LAST:
201 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
202 return RESPST_ERR_MISSING_OPCODE_FIRST;
203 default:
204 return RESPST_CHK_OP_VALID;
205 }
206 }
207 break;
208
209 case IB_QPT_UC:
210 switch (qp->resp.opcode) {
211 case IB_OPCODE_UC_SEND_FIRST:
212 case IB_OPCODE_UC_SEND_MIDDLE:
213 switch (pkt->opcode) {
214 case IB_OPCODE_UC_SEND_MIDDLE:
215 case IB_OPCODE_UC_SEND_LAST:
216 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
217 return RESPST_CHK_OP_VALID;
218 default:
219 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
220 }
221
222 case IB_OPCODE_UC_RDMA_WRITE_FIRST:
223 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
224 switch (pkt->opcode) {
225 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
226 case IB_OPCODE_UC_RDMA_WRITE_LAST:
227 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
228 return RESPST_CHK_OP_VALID;
229 default:
230 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
231 }
232
233 default:
234 switch (pkt->opcode) {
235 case IB_OPCODE_UC_SEND_MIDDLE:
236 case IB_OPCODE_UC_SEND_LAST:
237 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
238 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
239 case IB_OPCODE_UC_RDMA_WRITE_LAST:
240 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
241 qp->resp.drop_msg = 1;
242 return RESPST_CLEANUP;
243 default:
244 return RESPST_CHK_OP_VALID;
245 }
246 }
247 break;
248
249 default:
250 return RESPST_CHK_OP_VALID;
251 }
252 }
253
check_op_valid(struct rxe_qp * qp,struct rxe_pkt_info * pkt)254 static enum resp_states check_op_valid(struct rxe_qp *qp,
255 struct rxe_pkt_info *pkt)
256 {
257 switch (qp_type(qp)) {
258 case IB_QPT_RC:
259 if (((pkt->mask & RXE_READ_MASK) &&
260 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
261 ((pkt->mask & RXE_WRITE_MASK) &&
262 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
263 ((pkt->mask & RXE_ATOMIC_MASK) &&
264 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
265 return RESPST_ERR_UNSUPPORTED_OPCODE;
266 }
267
268 break;
269
270 case IB_QPT_UC:
271 if ((pkt->mask & RXE_WRITE_MASK) &&
272 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
273 qp->resp.drop_msg = 1;
274 return RESPST_CLEANUP;
275 }
276
277 break;
278
279 case IB_QPT_UD:
280 case IB_QPT_SMI:
281 case IB_QPT_GSI:
282 break;
283
284 default:
285 WARN_ON_ONCE(1);
286 break;
287 }
288
289 return RESPST_CHK_RESOURCE;
290 }
291
get_srq_wqe(struct rxe_qp * qp)292 static enum resp_states get_srq_wqe(struct rxe_qp *qp)
293 {
294 struct rxe_srq *srq = qp->srq;
295 struct rxe_queue *q = srq->rq.queue;
296 struct rxe_recv_wqe *wqe;
297 struct ib_event ev;
298 unsigned int count;
299 size_t size;
300
301 if (srq->error)
302 return RESPST_ERR_RNR;
303
304 spin_lock_bh(&srq->rq.consumer_lock);
305
306 wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
307 if (!wqe) {
308 spin_unlock_bh(&srq->rq.consumer_lock);
309 return RESPST_ERR_RNR;
310 }
311
312 /* don't trust user space data */
313 if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) {
314 spin_unlock_bh(&srq->rq.consumer_lock);
315 pr_warn("%s: invalid num_sge in SRQ entry\n", __func__);
316 return RESPST_ERR_MALFORMED_WQE;
317 }
318 size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
319 memcpy(&qp->resp.srq_wqe, wqe, size);
320
321 qp->resp.wqe = &qp->resp.srq_wqe.wqe;
322 queue_advance_consumer(q, QUEUE_TYPE_FROM_CLIENT);
323 count = queue_count(q, QUEUE_TYPE_FROM_CLIENT);
324
325 if (srq->limit && srq->ibsrq.event_handler && (count < srq->limit)) {
326 srq->limit = 0;
327 goto event;
328 }
329
330 spin_unlock_bh(&srq->rq.consumer_lock);
331 return RESPST_CHK_LENGTH;
332
333 event:
334 spin_unlock_bh(&srq->rq.consumer_lock);
335 ev.device = qp->ibqp.device;
336 ev.element.srq = qp->ibqp.srq;
337 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
338 srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
339 return RESPST_CHK_LENGTH;
340 }
341
check_resource(struct rxe_qp * qp,struct rxe_pkt_info * pkt)342 static enum resp_states check_resource(struct rxe_qp *qp,
343 struct rxe_pkt_info *pkt)
344 {
345 struct rxe_srq *srq = qp->srq;
346
347 if (qp->resp.state == QP_STATE_ERROR) {
348 if (qp->resp.wqe) {
349 qp->resp.status = IB_WC_WR_FLUSH_ERR;
350 return RESPST_COMPLETE;
351 } else if (!srq) {
352 qp->resp.wqe = queue_head(qp->rq.queue,
353 QUEUE_TYPE_FROM_CLIENT);
354 if (qp->resp.wqe) {
355 qp->resp.status = IB_WC_WR_FLUSH_ERR;
356 return RESPST_COMPLETE;
357 } else {
358 return RESPST_EXIT;
359 }
360 } else {
361 return RESPST_EXIT;
362 }
363 }
364
365 if (pkt->mask & RXE_READ_OR_ATOMIC) {
366 /* it is the requesters job to not send
367 * too many read/atomic ops, we just
368 * recycle the responder resource queue
369 */
370 if (likely(qp->attr.max_dest_rd_atomic > 0))
371 return RESPST_CHK_LENGTH;
372 else
373 return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
374 }
375
376 if (pkt->mask & RXE_RWR_MASK) {
377 if (srq)
378 return get_srq_wqe(qp);
379
380 qp->resp.wqe = queue_head(qp->rq.queue,
381 QUEUE_TYPE_FROM_CLIENT);
382 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
383 }
384
385 return RESPST_CHK_LENGTH;
386 }
387
check_length(struct rxe_qp * qp,struct rxe_pkt_info * pkt)388 static enum resp_states check_length(struct rxe_qp *qp,
389 struct rxe_pkt_info *pkt)
390 {
391 switch (qp_type(qp)) {
392 case IB_QPT_RC:
393 return RESPST_CHK_RKEY;
394
395 case IB_QPT_UC:
396 return RESPST_CHK_RKEY;
397
398 default:
399 return RESPST_CHK_RKEY;
400 }
401 }
402
check_rkey(struct rxe_qp * qp,struct rxe_pkt_info * pkt)403 static enum resp_states check_rkey(struct rxe_qp *qp,
404 struct rxe_pkt_info *pkt)
405 {
406 struct rxe_mr *mr = NULL;
407 struct rxe_mw *mw = NULL;
408 u64 va;
409 u32 rkey;
410 u32 resid;
411 u32 pktlen;
412 int mtu = qp->mtu;
413 enum resp_states state;
414 int access;
415
416 if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
417 if (pkt->mask & RXE_RETH_MASK) {
418 qp->resp.va = reth_va(pkt);
419 qp->resp.offset = 0;
420 qp->resp.rkey = reth_rkey(pkt);
421 qp->resp.resid = reth_len(pkt);
422 qp->resp.length = reth_len(pkt);
423 }
424 access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
425 : IB_ACCESS_REMOTE_WRITE;
426 } else if (pkt->mask & RXE_ATOMIC_MASK) {
427 qp->resp.va = atmeth_va(pkt);
428 qp->resp.offset = 0;
429 qp->resp.rkey = atmeth_rkey(pkt);
430 qp->resp.resid = sizeof(u64);
431 access = IB_ACCESS_REMOTE_ATOMIC;
432 } else {
433 return RESPST_EXECUTE;
434 }
435
436 /* A zero-byte op is not required to set an addr or rkey. */
437 if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
438 (pkt->mask & RXE_RETH_MASK) &&
439 reth_len(pkt) == 0) {
440 return RESPST_EXECUTE;
441 }
442
443 va = qp->resp.va;
444 rkey = qp->resp.rkey;
445 resid = qp->resp.resid;
446 pktlen = payload_size(pkt);
447
448 if (rkey_is_mw(rkey)) {
449 mw = rxe_lookup_mw(qp, access, rkey);
450 if (!mw) {
451 pr_err("%s: no MW matches rkey %#x\n", __func__, rkey);
452 state = RESPST_ERR_RKEY_VIOLATION;
453 goto err;
454 }
455
456 mr = mw->mr;
457 if (!mr) {
458 pr_err("%s: MW doesn't have an MR\n", __func__);
459 state = RESPST_ERR_RKEY_VIOLATION;
460 goto err;
461 }
462
463 if (mw->access & IB_ZERO_BASED)
464 qp->resp.offset = mw->addr;
465
466 rxe_drop_ref(mw);
467 rxe_add_ref(mr);
468 } else {
469 mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
470 if (!mr) {
471 pr_err("%s: no MR matches rkey %#x\n", __func__, rkey);
472 state = RESPST_ERR_RKEY_VIOLATION;
473 goto err;
474 }
475 }
476
477 if (mr_check_range(mr, va + qp->resp.offset, resid)) {
478 state = RESPST_ERR_RKEY_VIOLATION;
479 goto err;
480 }
481
482 if (pkt->mask & RXE_WRITE_MASK) {
483 if (resid > mtu) {
484 if (pktlen != mtu || bth_pad(pkt)) {
485 state = RESPST_ERR_LENGTH;
486 goto err;
487 }
488 } else {
489 if (pktlen != resid) {
490 state = RESPST_ERR_LENGTH;
491 goto err;
492 }
493 if ((bth_pad(pkt) != (0x3 & (-resid)))) {
494 /* This case may not be exactly that
495 * but nothing else fits.
496 */
497 state = RESPST_ERR_LENGTH;
498 goto err;
499 }
500 }
501 }
502
503 WARN_ON_ONCE(qp->resp.mr);
504
505 qp->resp.mr = mr;
506 return RESPST_EXECUTE;
507
508 err:
509 if (mr)
510 rxe_drop_ref(mr);
511 if (mw)
512 rxe_drop_ref(mw);
513
514 return state;
515 }
516
send_data_in(struct rxe_qp * qp,void * data_addr,int data_len)517 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
518 int data_len)
519 {
520 int err;
521
522 err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
523 data_addr, data_len, RXE_TO_MR_OBJ);
524 if (unlikely(err))
525 return (err == -ENOSPC) ? RESPST_ERR_LENGTH
526 : RESPST_ERR_MALFORMED_WQE;
527
528 return RESPST_NONE;
529 }
530
write_data_in(struct rxe_qp * qp,struct rxe_pkt_info * pkt)531 static enum resp_states write_data_in(struct rxe_qp *qp,
532 struct rxe_pkt_info *pkt)
533 {
534 enum resp_states rc = RESPST_NONE;
535 int err;
536 int data_len = payload_size(pkt);
537
538 err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset,
539 payload_addr(pkt), data_len, RXE_TO_MR_OBJ);
540 if (err) {
541 rc = RESPST_ERR_RKEY_VIOLATION;
542 goto out;
543 }
544
545 qp->resp.va += data_len;
546 qp->resp.resid -= data_len;
547
548 out:
549 return rc;
550 }
551
552 /* Guarantee atomicity of atomic operations at the machine level. */
553 static DEFINE_SPINLOCK(atomic_ops_lock);
554
process_atomic(struct rxe_qp * qp,struct rxe_pkt_info * pkt)555 static enum resp_states process_atomic(struct rxe_qp *qp,
556 struct rxe_pkt_info *pkt)
557 {
558 u64 *vaddr;
559 enum resp_states ret;
560 struct rxe_mr *mr = qp->resp.mr;
561
562 if (mr->state != RXE_MR_STATE_VALID) {
563 ret = RESPST_ERR_RKEY_VIOLATION;
564 goto out;
565 }
566
567 vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, sizeof(u64));
568
569 /* check vaddr is 8 bytes aligned. */
570 if (!vaddr || (uintptr_t)vaddr & 7) {
571 ret = RESPST_ERR_MISALIGNED_ATOMIC;
572 goto out;
573 }
574
575 spin_lock_bh(&atomic_ops_lock);
576
577 qp->resp.atomic_orig = *vaddr;
578
579 if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
580 pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
581 if (*vaddr == atmeth_comp(pkt))
582 *vaddr = atmeth_swap_add(pkt);
583 } else {
584 *vaddr += atmeth_swap_add(pkt);
585 }
586
587 spin_unlock_bh(&atomic_ops_lock);
588
589 ret = RESPST_NONE;
590 out:
591 return ret;
592 }
593
prepare_ack_packet(struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct rxe_pkt_info * ack,int opcode,int payload,u32 psn,u8 syndrome)594 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
595 struct rxe_pkt_info *pkt,
596 struct rxe_pkt_info *ack,
597 int opcode,
598 int payload,
599 u32 psn,
600 u8 syndrome)
601 {
602 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
603 struct sk_buff *skb;
604 int paylen;
605 int pad;
606 int err;
607
608 /*
609 * allocate packet
610 */
611 pad = (-payload) & 0x3;
612 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
613
614 skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
615 if (!skb)
616 return NULL;
617
618 ack->qp = qp;
619 ack->opcode = opcode;
620 ack->mask = rxe_opcode[opcode].mask;
621 ack->paylen = paylen;
622 ack->psn = psn;
623
624 bth_init(ack, opcode, 0, 0, pad, IB_DEFAULT_PKEY_FULL,
625 qp->attr.dest_qp_num, 0, psn);
626
627 if (ack->mask & RXE_AETH_MASK) {
628 aeth_set_syn(ack, syndrome);
629 aeth_set_msn(ack, qp->resp.msn);
630 }
631
632 if (ack->mask & RXE_ATMACK_MASK)
633 atmack_set_orig(ack, qp->resp.atomic_orig);
634
635 err = rxe_prepare(ack, skb);
636 if (err) {
637 kfree_skb(skb);
638 return NULL;
639 }
640
641 return skb;
642 }
643
644 /* RDMA read response. If res is not NULL, then we have a current RDMA request
645 * being processed or replayed.
646 */
read_reply(struct rxe_qp * qp,struct rxe_pkt_info * req_pkt)647 static enum resp_states read_reply(struct rxe_qp *qp,
648 struct rxe_pkt_info *req_pkt)
649 {
650 struct rxe_pkt_info ack_pkt;
651 struct sk_buff *skb;
652 int mtu = qp->mtu;
653 enum resp_states state;
654 int payload;
655 int opcode;
656 int err;
657 struct resp_res *res = qp->resp.res;
658
659 if (!res) {
660 /* This is the first time we process that request. Get a
661 * resource
662 */
663 res = &qp->resp.resources[qp->resp.res_head];
664
665 free_rd_atomic_resource(qp, res);
666 rxe_advance_resp_resource(qp);
667
668 res->type = RXE_READ_MASK;
669 res->replay = 0;
670
671 res->read.va = qp->resp.va +
672 qp->resp.offset;
673 res->read.va_org = qp->resp.va +
674 qp->resp.offset;
675
676 res->first_psn = req_pkt->psn;
677
678 if (reth_len(req_pkt)) {
679 res->last_psn = (req_pkt->psn +
680 (reth_len(req_pkt) + mtu - 1) /
681 mtu - 1) & BTH_PSN_MASK;
682 } else {
683 res->last_psn = res->first_psn;
684 }
685 res->cur_psn = req_pkt->psn;
686
687 res->read.resid = qp->resp.resid;
688 res->read.length = qp->resp.resid;
689 res->read.rkey = qp->resp.rkey;
690
691 /* note res inherits the reference to mr from qp */
692 res->read.mr = qp->resp.mr;
693 qp->resp.mr = NULL;
694
695 qp->resp.res = res;
696 res->state = rdatm_res_state_new;
697 }
698
699 if (res->state == rdatm_res_state_new) {
700 if (res->read.resid <= mtu)
701 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
702 else
703 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
704 } else {
705 if (res->read.resid > mtu)
706 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
707 else
708 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
709 }
710
711 res->state = rdatm_res_state_next;
712
713 payload = min_t(int, res->read.resid, mtu);
714
715 skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
716 res->cur_psn, AETH_ACK_UNLIMITED);
717 if (!skb)
718 return RESPST_ERR_RNR;
719
720 err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
721 payload, RXE_FROM_MR_OBJ);
722 if (err)
723 pr_err("Failed copying memory\n");
724
725 if (bth_pad(&ack_pkt)) {
726 u8 *pad = payload_addr(&ack_pkt) + payload;
727
728 memset(pad, 0, bth_pad(&ack_pkt));
729 }
730
731 err = rxe_xmit_packet(qp, &ack_pkt, skb);
732 if (err) {
733 pr_err("Failed sending RDMA reply.\n");
734 return RESPST_ERR_RNR;
735 }
736
737 res->read.va += payload;
738 res->read.resid -= payload;
739 res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
740
741 if (res->read.resid > 0) {
742 state = RESPST_DONE;
743 } else {
744 qp->resp.res = NULL;
745 if (!res->replay)
746 qp->resp.opcode = -1;
747 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
748 qp->resp.psn = res->cur_psn;
749 state = RESPST_CLEANUP;
750 }
751
752 return state;
753 }
754
invalidate_rkey(struct rxe_qp * qp,u32 rkey)755 static int invalidate_rkey(struct rxe_qp *qp, u32 rkey)
756 {
757 if (rkey_is_mw(rkey))
758 return rxe_invalidate_mw(qp, rkey);
759 else
760 return rxe_invalidate_mr(qp, rkey);
761 }
762
763 /* Executes a new request. A retried request never reach that function (send
764 * and writes are discarded, and reads and atomics are retried elsewhere.
765 */
execute(struct rxe_qp * qp,struct rxe_pkt_info * pkt)766 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
767 {
768 enum resp_states err;
769 struct sk_buff *skb = PKT_TO_SKB(pkt);
770 union rdma_network_hdr hdr;
771
772 if (pkt->mask & RXE_SEND_MASK) {
773 if (qp_type(qp) == IB_QPT_UD ||
774 qp_type(qp) == IB_QPT_SMI ||
775 qp_type(qp) == IB_QPT_GSI) {
776 if (skb->protocol == htons(ETH_P_IP)) {
777 memset(&hdr.reserved, 0,
778 sizeof(hdr.reserved));
779 memcpy(&hdr.roce4grh, ip_hdr(skb),
780 sizeof(hdr.roce4grh));
781 err = send_data_in(qp, &hdr, sizeof(hdr));
782 } else {
783 err = send_data_in(qp, ipv6_hdr(skb),
784 sizeof(hdr));
785 }
786 if (err)
787 return err;
788 }
789 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
790 if (err)
791 return err;
792 } else if (pkt->mask & RXE_WRITE_MASK) {
793 err = write_data_in(qp, pkt);
794 if (err)
795 return err;
796 } else if (pkt->mask & RXE_READ_MASK) {
797 /* For RDMA Read we can increment the msn now. See C9-148. */
798 qp->resp.msn++;
799 return RESPST_READ_REPLY;
800 } else if (pkt->mask & RXE_ATOMIC_MASK) {
801 err = process_atomic(qp, pkt);
802 if (err)
803 return err;
804 } else {
805 /* Unreachable */
806 WARN_ON_ONCE(1);
807 }
808
809 if (pkt->mask & RXE_IETH_MASK) {
810 u32 rkey = ieth_rkey(pkt);
811
812 err = invalidate_rkey(qp, rkey);
813 if (err)
814 return RESPST_ERR_INVALIDATE_RKEY;
815 }
816
817 if (pkt->mask & RXE_END_MASK)
818 /* We successfully processed this new request. */
819 qp->resp.msn++;
820
821 /* next expected psn, read handles this separately */
822 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
823 qp->resp.ack_psn = qp->resp.psn;
824
825 qp->resp.opcode = pkt->opcode;
826 qp->resp.status = IB_WC_SUCCESS;
827
828 if (pkt->mask & RXE_COMP_MASK)
829 return RESPST_COMPLETE;
830 else if (qp_type(qp) == IB_QPT_RC)
831 return RESPST_ACKNOWLEDGE;
832 else
833 return RESPST_CLEANUP;
834 }
835
do_complete(struct rxe_qp * qp,struct rxe_pkt_info * pkt)836 static enum resp_states do_complete(struct rxe_qp *qp,
837 struct rxe_pkt_info *pkt)
838 {
839 struct rxe_cqe cqe;
840 struct ib_wc *wc = &cqe.ibwc;
841 struct ib_uverbs_wc *uwc = &cqe.uibwc;
842 struct rxe_recv_wqe *wqe = qp->resp.wqe;
843 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
844
845 if (!wqe)
846 goto finish;
847
848 memset(&cqe, 0, sizeof(cqe));
849
850 if (qp->rcq->is_user) {
851 uwc->status = qp->resp.status;
852 uwc->qp_num = qp->ibqp.qp_num;
853 uwc->wr_id = wqe->wr_id;
854 } else {
855 wc->status = qp->resp.status;
856 wc->qp = &qp->ibqp;
857 wc->wr_id = wqe->wr_id;
858 }
859
860 if (wc->status == IB_WC_SUCCESS) {
861 rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV);
862 wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
863 pkt->mask & RXE_WRITE_MASK) ?
864 IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
865 wc->vendor_err = 0;
866 wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
867 pkt->mask & RXE_WRITE_MASK) ?
868 qp->resp.length : wqe->dma.length - wqe->dma.resid;
869
870 /* fields after byte_len are different between kernel and user
871 * space
872 */
873 if (qp->rcq->is_user) {
874 uwc->wc_flags = IB_WC_GRH;
875
876 if (pkt->mask & RXE_IMMDT_MASK) {
877 uwc->wc_flags |= IB_WC_WITH_IMM;
878 uwc->ex.imm_data = immdt_imm(pkt);
879 }
880
881 if (pkt->mask & RXE_IETH_MASK) {
882 uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
883 uwc->ex.invalidate_rkey = ieth_rkey(pkt);
884 }
885
886 uwc->qp_num = qp->ibqp.qp_num;
887
888 if (pkt->mask & RXE_DETH_MASK)
889 uwc->src_qp = deth_sqp(pkt);
890
891 uwc->port_num = qp->attr.port_num;
892 } else {
893 struct sk_buff *skb = PKT_TO_SKB(pkt);
894
895 wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
896 if (skb->protocol == htons(ETH_P_IP))
897 wc->network_hdr_type = RDMA_NETWORK_IPV4;
898 else
899 wc->network_hdr_type = RDMA_NETWORK_IPV6;
900
901 if (is_vlan_dev(skb->dev)) {
902 wc->wc_flags |= IB_WC_WITH_VLAN;
903 wc->vlan_id = vlan_dev_vlan_id(skb->dev);
904 }
905
906 if (pkt->mask & RXE_IMMDT_MASK) {
907 wc->wc_flags |= IB_WC_WITH_IMM;
908 wc->ex.imm_data = immdt_imm(pkt);
909 }
910
911 if (pkt->mask & RXE_IETH_MASK) {
912 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
913 wc->ex.invalidate_rkey = ieth_rkey(pkt);
914 }
915
916 if (pkt->mask & RXE_DETH_MASK)
917 wc->src_qp = deth_sqp(pkt);
918
919 wc->qp = &qp->ibqp;
920 wc->port_num = qp->attr.port_num;
921 }
922 }
923
924 /* have copy for srq and reference for !srq */
925 if (!qp->srq)
926 queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT);
927
928 qp->resp.wqe = NULL;
929
930 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
931 return RESPST_ERR_CQ_OVERFLOW;
932
933 finish:
934 if (unlikely(qp->resp.state == QP_STATE_ERROR))
935 return RESPST_CHK_RESOURCE;
936 if (unlikely(!pkt))
937 return RESPST_DONE;
938 if (qp_type(qp) == IB_QPT_RC)
939 return RESPST_ACKNOWLEDGE;
940 else
941 return RESPST_CLEANUP;
942 }
943
send_ack(struct rxe_qp * qp,struct rxe_pkt_info * pkt,u8 syndrome,u32 psn)944 static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
945 u8 syndrome, u32 psn)
946 {
947 int err = 0;
948 struct rxe_pkt_info ack_pkt;
949 struct sk_buff *skb;
950
951 skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
952 0, psn, syndrome);
953 if (!skb) {
954 err = -ENOMEM;
955 goto err1;
956 }
957
958 err = rxe_xmit_packet(qp, &ack_pkt, skb);
959 if (err)
960 pr_err_ratelimited("Failed sending ack\n");
961
962 err1:
963 return err;
964 }
965
send_atomic_ack(struct rxe_qp * qp,struct rxe_pkt_info * pkt,u8 syndrome)966 static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
967 u8 syndrome)
968 {
969 int rc = 0;
970 struct rxe_pkt_info ack_pkt;
971 struct sk_buff *skb;
972 struct resp_res *res;
973
974 skb = prepare_ack_packet(qp, pkt, &ack_pkt,
975 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
976 syndrome);
977 if (!skb) {
978 rc = -ENOMEM;
979 goto out;
980 }
981
982 res = &qp->resp.resources[qp->resp.res_head];
983 free_rd_atomic_resource(qp, res);
984 rxe_advance_resp_resource(qp);
985
986 skb_get(skb);
987 res->type = RXE_ATOMIC_MASK;
988 res->atomic.skb = skb;
989 res->first_psn = ack_pkt.psn;
990 res->last_psn = ack_pkt.psn;
991 res->cur_psn = ack_pkt.psn;
992
993 rc = rxe_xmit_packet(qp, &ack_pkt, skb);
994 if (rc) {
995 pr_err_ratelimited("Failed sending ack\n");
996 rxe_drop_ref(qp);
997 }
998 out:
999 return rc;
1000 }
1001
acknowledge(struct rxe_qp * qp,struct rxe_pkt_info * pkt)1002 static enum resp_states acknowledge(struct rxe_qp *qp,
1003 struct rxe_pkt_info *pkt)
1004 {
1005 if (qp_type(qp) != IB_QPT_RC)
1006 return RESPST_CLEANUP;
1007
1008 if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1009 send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
1010 else if (pkt->mask & RXE_ATOMIC_MASK)
1011 send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1012 else if (bth_ack(pkt))
1013 send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1014
1015 return RESPST_CLEANUP;
1016 }
1017
cleanup(struct rxe_qp * qp,struct rxe_pkt_info * pkt)1018 static enum resp_states cleanup(struct rxe_qp *qp,
1019 struct rxe_pkt_info *pkt)
1020 {
1021 struct sk_buff *skb;
1022
1023 if (pkt) {
1024 skb = skb_dequeue(&qp->req_pkts);
1025 rxe_drop_ref(qp);
1026 kfree_skb(skb);
1027 ib_device_put(qp->ibqp.device);
1028 }
1029
1030 if (qp->resp.mr) {
1031 rxe_drop_ref(qp->resp.mr);
1032 qp->resp.mr = NULL;
1033 }
1034
1035 return RESPST_DONE;
1036 }
1037
find_resource(struct rxe_qp * qp,u32 psn)1038 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1039 {
1040 int i;
1041
1042 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1043 struct resp_res *res = &qp->resp.resources[i];
1044
1045 if (res->type == 0)
1046 continue;
1047
1048 if (psn_compare(psn, res->first_psn) >= 0 &&
1049 psn_compare(psn, res->last_psn) <= 0) {
1050 return res;
1051 }
1052 }
1053
1054 return NULL;
1055 }
1056
duplicate_request(struct rxe_qp * qp,struct rxe_pkt_info * pkt)1057 static enum resp_states duplicate_request(struct rxe_qp *qp,
1058 struct rxe_pkt_info *pkt)
1059 {
1060 enum resp_states rc;
1061 u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
1062
1063 if (pkt->mask & RXE_SEND_MASK ||
1064 pkt->mask & RXE_WRITE_MASK) {
1065 /* SEND. Ack again and cleanup. C9-105. */
1066 send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
1067 return RESPST_CLEANUP;
1068 } else if (pkt->mask & RXE_READ_MASK) {
1069 struct resp_res *res;
1070
1071 res = find_resource(qp, pkt->psn);
1072 if (!res) {
1073 /* Resource not found. Class D error. Drop the
1074 * request.
1075 */
1076 rc = RESPST_CLEANUP;
1077 goto out;
1078 } else {
1079 /* Ensure this new request is the same as the previous
1080 * one or a subset of it.
1081 */
1082 u64 iova = reth_va(pkt);
1083 u32 resid = reth_len(pkt);
1084
1085 if (iova < res->read.va_org ||
1086 resid > res->read.length ||
1087 (iova + resid) > (res->read.va_org +
1088 res->read.length)) {
1089 rc = RESPST_CLEANUP;
1090 goto out;
1091 }
1092
1093 if (reth_rkey(pkt) != res->read.rkey) {
1094 rc = RESPST_CLEANUP;
1095 goto out;
1096 }
1097
1098 res->cur_psn = pkt->psn;
1099 res->state = (pkt->psn == res->first_psn) ?
1100 rdatm_res_state_new :
1101 rdatm_res_state_replay;
1102 res->replay = 1;
1103
1104 /* Reset the resource, except length. */
1105 res->read.va_org = iova;
1106 res->read.va = iova;
1107 res->read.resid = resid;
1108
1109 /* Replay the RDMA read reply. */
1110 qp->resp.res = res;
1111 rc = RESPST_READ_REPLY;
1112 goto out;
1113 }
1114 } else {
1115 struct resp_res *res;
1116
1117 /* Find the operation in our list of responder resources. */
1118 res = find_resource(qp, pkt->psn);
1119 if (res) {
1120 skb_get(res->atomic.skb);
1121 /* Resend the result. */
1122 rc = rxe_xmit_packet(qp, pkt, res->atomic.skb);
1123 if (rc) {
1124 pr_err("Failed resending result. This flow is not handled - skb ignored\n");
1125 rc = RESPST_CLEANUP;
1126 goto out;
1127 }
1128 }
1129
1130 /* Resource not found. Class D error. Drop the request. */
1131 rc = RESPST_CLEANUP;
1132 goto out;
1133 }
1134 out:
1135 return rc;
1136 }
1137
1138 /* Process a class A or C. Both are treated the same in this implementation. */
do_class_ac_error(struct rxe_qp * qp,u8 syndrome,enum ib_wc_status status)1139 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1140 enum ib_wc_status status)
1141 {
1142 qp->resp.aeth_syndrome = syndrome;
1143 qp->resp.status = status;
1144
1145 /* indicate that we should go through the ERROR state */
1146 qp->resp.goto_error = 1;
1147 }
1148
do_class_d1e_error(struct rxe_qp * qp)1149 static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1150 {
1151 /* UC */
1152 if (qp->srq) {
1153 /* Class E */
1154 qp->resp.drop_msg = 1;
1155 if (qp->resp.wqe) {
1156 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1157 return RESPST_COMPLETE;
1158 } else {
1159 return RESPST_CLEANUP;
1160 }
1161 } else {
1162 /* Class D1. This packet may be the start of a
1163 * new message and could be valid. The previous
1164 * message is invalid and ignored. reset the
1165 * recv wr to its original state
1166 */
1167 if (qp->resp.wqe) {
1168 qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1169 qp->resp.wqe->dma.cur_sge = 0;
1170 qp->resp.wqe->dma.sge_offset = 0;
1171 qp->resp.opcode = -1;
1172 }
1173
1174 if (qp->resp.mr) {
1175 rxe_drop_ref(qp->resp.mr);
1176 qp->resp.mr = NULL;
1177 }
1178
1179 return RESPST_CLEANUP;
1180 }
1181 }
1182
rxe_drain_req_pkts(struct rxe_qp * qp,bool notify)1183 static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1184 {
1185 struct sk_buff *skb;
1186 struct rxe_queue *q = qp->rq.queue;
1187
1188 while ((skb = skb_dequeue(&qp->req_pkts))) {
1189 rxe_drop_ref(qp);
1190 kfree_skb(skb);
1191 ib_device_put(qp->ibqp.device);
1192 }
1193
1194 if (notify)
1195 return;
1196
1197 while (!qp->srq && q && queue_head(q, q->type))
1198 queue_advance_consumer(q, q->type);
1199 }
1200
rxe_responder(void * arg)1201 int rxe_responder(void *arg)
1202 {
1203 struct rxe_qp *qp = (struct rxe_qp *)arg;
1204 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1205 enum resp_states state;
1206 struct rxe_pkt_info *pkt = NULL;
1207 int ret = 0;
1208
1209 rxe_add_ref(qp);
1210
1211 qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1212
1213 if (!qp->valid) {
1214 ret = -EINVAL;
1215 goto done;
1216 }
1217
1218 switch (qp->resp.state) {
1219 case QP_STATE_RESET:
1220 state = RESPST_RESET;
1221 break;
1222
1223 default:
1224 state = RESPST_GET_REQ;
1225 break;
1226 }
1227
1228 while (1) {
1229 pr_debug("qp#%d state = %s\n", qp_num(qp),
1230 resp_state_name[state]);
1231 switch (state) {
1232 case RESPST_GET_REQ:
1233 state = get_req(qp, &pkt);
1234 break;
1235 case RESPST_CHK_PSN:
1236 state = check_psn(qp, pkt);
1237 break;
1238 case RESPST_CHK_OP_SEQ:
1239 state = check_op_seq(qp, pkt);
1240 break;
1241 case RESPST_CHK_OP_VALID:
1242 state = check_op_valid(qp, pkt);
1243 break;
1244 case RESPST_CHK_RESOURCE:
1245 state = check_resource(qp, pkt);
1246 break;
1247 case RESPST_CHK_LENGTH:
1248 state = check_length(qp, pkt);
1249 break;
1250 case RESPST_CHK_RKEY:
1251 state = check_rkey(qp, pkt);
1252 break;
1253 case RESPST_EXECUTE:
1254 state = execute(qp, pkt);
1255 break;
1256 case RESPST_COMPLETE:
1257 state = do_complete(qp, pkt);
1258 break;
1259 case RESPST_READ_REPLY:
1260 state = read_reply(qp, pkt);
1261 break;
1262 case RESPST_ACKNOWLEDGE:
1263 state = acknowledge(qp, pkt);
1264 break;
1265 case RESPST_CLEANUP:
1266 state = cleanup(qp, pkt);
1267 break;
1268 case RESPST_DUPLICATE_REQUEST:
1269 state = duplicate_request(qp, pkt);
1270 break;
1271 case RESPST_ERR_PSN_OUT_OF_SEQ:
1272 /* RC only - Class B. Drop packet. */
1273 send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1274 state = RESPST_CLEANUP;
1275 break;
1276
1277 case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1278 case RESPST_ERR_MISSING_OPCODE_FIRST:
1279 case RESPST_ERR_MISSING_OPCODE_LAST_C:
1280 case RESPST_ERR_UNSUPPORTED_OPCODE:
1281 case RESPST_ERR_MISALIGNED_ATOMIC:
1282 /* RC Only - Class C. */
1283 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1284 IB_WC_REM_INV_REQ_ERR);
1285 state = RESPST_COMPLETE;
1286 break;
1287
1288 case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1289 state = do_class_d1e_error(qp);
1290 break;
1291 case RESPST_ERR_RNR:
1292 if (qp_type(qp) == IB_QPT_RC) {
1293 rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
1294 /* RC - class B */
1295 send_ack(qp, pkt, AETH_RNR_NAK |
1296 (~AETH_TYPE_MASK &
1297 qp->attr.min_rnr_timer),
1298 pkt->psn);
1299 } else {
1300 /* UD/UC - class D */
1301 qp->resp.drop_msg = 1;
1302 }
1303 state = RESPST_CLEANUP;
1304 break;
1305
1306 case RESPST_ERR_RKEY_VIOLATION:
1307 if (qp_type(qp) == IB_QPT_RC) {
1308 /* Class C */
1309 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1310 IB_WC_REM_ACCESS_ERR);
1311 state = RESPST_COMPLETE;
1312 } else {
1313 qp->resp.drop_msg = 1;
1314 if (qp->srq) {
1315 /* UC/SRQ Class D */
1316 qp->resp.status = IB_WC_REM_ACCESS_ERR;
1317 state = RESPST_COMPLETE;
1318 } else {
1319 /* UC/non-SRQ Class E. */
1320 state = RESPST_CLEANUP;
1321 }
1322 }
1323 break;
1324
1325 case RESPST_ERR_INVALIDATE_RKEY:
1326 /* RC - Class J. */
1327 qp->resp.goto_error = 1;
1328 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1329 state = RESPST_COMPLETE;
1330 break;
1331
1332 case RESPST_ERR_LENGTH:
1333 if (qp_type(qp) == IB_QPT_RC) {
1334 /* Class C */
1335 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1336 IB_WC_REM_INV_REQ_ERR);
1337 state = RESPST_COMPLETE;
1338 } else if (qp->srq) {
1339 /* UC/UD - class E */
1340 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1341 state = RESPST_COMPLETE;
1342 } else {
1343 /* UC/UD - class D */
1344 qp->resp.drop_msg = 1;
1345 state = RESPST_CLEANUP;
1346 }
1347 break;
1348
1349 case RESPST_ERR_MALFORMED_WQE:
1350 /* All, Class A. */
1351 do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1352 IB_WC_LOC_QP_OP_ERR);
1353 state = RESPST_COMPLETE;
1354 break;
1355
1356 case RESPST_ERR_CQ_OVERFLOW:
1357 /* All - Class G */
1358 state = RESPST_ERROR;
1359 break;
1360
1361 case RESPST_DONE:
1362 if (qp->resp.goto_error) {
1363 state = RESPST_ERROR;
1364 break;
1365 }
1366
1367 goto done;
1368
1369 case RESPST_EXIT:
1370 if (qp->resp.goto_error) {
1371 state = RESPST_ERROR;
1372 break;
1373 }
1374
1375 goto exit;
1376
1377 case RESPST_RESET:
1378 rxe_drain_req_pkts(qp, false);
1379 qp->resp.wqe = NULL;
1380 goto exit;
1381
1382 case RESPST_ERROR:
1383 qp->resp.goto_error = 0;
1384 pr_warn("qp#%d moved to error state\n", qp_num(qp));
1385 rxe_qp_error(qp);
1386 goto exit;
1387
1388 default:
1389 WARN_ON_ONCE(1);
1390 }
1391 }
1392
1393 exit:
1394 ret = -EAGAIN;
1395 done:
1396 rxe_drop_ref(qp);
1397 return ret;
1398 }
1399