1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #include <linux/skbuff.h>
8
9 #include "rxe.h"
10 #include "rxe_loc.h"
11 #include "rxe_queue.h"
12 #include "rxe_task.h"
13
14 enum comp_state {
15 COMPST_GET_ACK,
16 COMPST_GET_WQE,
17 COMPST_COMP_WQE,
18 COMPST_COMP_ACK,
19 COMPST_CHECK_PSN,
20 COMPST_CHECK_ACK,
21 COMPST_READ,
22 COMPST_ATOMIC,
23 COMPST_WRITE_SEND,
24 COMPST_UPDATE_COMP,
25 COMPST_ERROR_RETRY,
26 COMPST_RNR_RETRY,
27 COMPST_ERROR,
28 COMPST_EXIT, /* We have an issue, and we want to rerun the completer */
29 COMPST_DONE, /* The completer finished successflly */
30 };
31
32 static char *comp_state_name[] = {
33 [COMPST_GET_ACK] = "GET ACK",
34 [COMPST_GET_WQE] = "GET WQE",
35 [COMPST_COMP_WQE] = "COMP WQE",
36 [COMPST_COMP_ACK] = "COMP ACK",
37 [COMPST_CHECK_PSN] = "CHECK PSN",
38 [COMPST_CHECK_ACK] = "CHECK ACK",
39 [COMPST_READ] = "READ",
40 [COMPST_ATOMIC] = "ATOMIC",
41 [COMPST_WRITE_SEND] = "WRITE/SEND",
42 [COMPST_UPDATE_COMP] = "UPDATE COMP",
43 [COMPST_ERROR_RETRY] = "ERROR RETRY",
44 [COMPST_RNR_RETRY] = "RNR RETRY",
45 [COMPST_ERROR] = "ERROR",
46 [COMPST_EXIT] = "EXIT",
47 [COMPST_DONE] = "DONE",
48 };
49
50 static unsigned long rnrnak_usec[32] = {
51 [IB_RNR_TIMER_655_36] = 655360,
52 [IB_RNR_TIMER_000_01] = 10,
53 [IB_RNR_TIMER_000_02] = 20,
54 [IB_RNR_TIMER_000_03] = 30,
55 [IB_RNR_TIMER_000_04] = 40,
56 [IB_RNR_TIMER_000_06] = 60,
57 [IB_RNR_TIMER_000_08] = 80,
58 [IB_RNR_TIMER_000_12] = 120,
59 [IB_RNR_TIMER_000_16] = 160,
60 [IB_RNR_TIMER_000_24] = 240,
61 [IB_RNR_TIMER_000_32] = 320,
62 [IB_RNR_TIMER_000_48] = 480,
63 [IB_RNR_TIMER_000_64] = 640,
64 [IB_RNR_TIMER_000_96] = 960,
65 [IB_RNR_TIMER_001_28] = 1280,
66 [IB_RNR_TIMER_001_92] = 1920,
67 [IB_RNR_TIMER_002_56] = 2560,
68 [IB_RNR_TIMER_003_84] = 3840,
69 [IB_RNR_TIMER_005_12] = 5120,
70 [IB_RNR_TIMER_007_68] = 7680,
71 [IB_RNR_TIMER_010_24] = 10240,
72 [IB_RNR_TIMER_015_36] = 15360,
73 [IB_RNR_TIMER_020_48] = 20480,
74 [IB_RNR_TIMER_030_72] = 30720,
75 [IB_RNR_TIMER_040_96] = 40960,
76 [IB_RNR_TIMER_061_44] = 61410,
77 [IB_RNR_TIMER_081_92] = 81920,
78 [IB_RNR_TIMER_122_88] = 122880,
79 [IB_RNR_TIMER_163_84] = 163840,
80 [IB_RNR_TIMER_245_76] = 245760,
81 [IB_RNR_TIMER_327_68] = 327680,
82 [IB_RNR_TIMER_491_52] = 491520,
83 };
84
rnrnak_jiffies(u8 timeout)85 static inline unsigned long rnrnak_jiffies(u8 timeout)
86 {
87 return max_t(unsigned long,
88 usecs_to_jiffies(rnrnak_usec[timeout]), 1);
89 }
90
wr_to_wc_opcode(enum ib_wr_opcode opcode)91 static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
92 {
93 switch (opcode) {
94 case IB_WR_RDMA_WRITE: return IB_WC_RDMA_WRITE;
95 case IB_WR_RDMA_WRITE_WITH_IMM: return IB_WC_RDMA_WRITE;
96 case IB_WR_SEND: return IB_WC_SEND;
97 case IB_WR_SEND_WITH_IMM: return IB_WC_SEND;
98 case IB_WR_RDMA_READ: return IB_WC_RDMA_READ;
99 case IB_WR_ATOMIC_CMP_AND_SWP: return IB_WC_COMP_SWAP;
100 case IB_WR_ATOMIC_FETCH_AND_ADD: return IB_WC_FETCH_ADD;
101 case IB_WR_LSO: return IB_WC_LSO;
102 case IB_WR_SEND_WITH_INV: return IB_WC_SEND;
103 case IB_WR_RDMA_READ_WITH_INV: return IB_WC_RDMA_READ;
104 case IB_WR_LOCAL_INV: return IB_WC_LOCAL_INV;
105 case IB_WR_REG_MR: return IB_WC_REG_MR;
106 case IB_WR_BIND_MW: return IB_WC_BIND_MW;
107
108 default:
109 return 0xff;
110 }
111 }
112
retransmit_timer(struct timer_list * t)113 void retransmit_timer(struct timer_list *t)
114 {
115 struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
116
117 if (qp->valid) {
118 qp->comp.timeout = 1;
119 rxe_run_task(&qp->comp.task, 1);
120 }
121 }
122
rxe_comp_queue_pkt(struct rxe_qp * qp,struct sk_buff * skb)123 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
124 {
125 int must_sched;
126
127 skb_queue_tail(&qp->resp_pkts, skb);
128
129 must_sched = skb_queue_len(&qp->resp_pkts) > 1;
130 if (must_sched != 0)
131 rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
132
133 rxe_run_task(&qp->comp.task, must_sched);
134 }
135
get_wqe(struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct rxe_send_wqe ** wqe_p)136 static inline enum comp_state get_wqe(struct rxe_qp *qp,
137 struct rxe_pkt_info *pkt,
138 struct rxe_send_wqe **wqe_p)
139 {
140 struct rxe_send_wqe *wqe;
141
142 /* we come here whether or not we found a response packet to see if
143 * there are any posted WQEs
144 */
145 wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT);
146 *wqe_p = wqe;
147
148 /* no WQE or requester has not started it yet */
149 if (!wqe || wqe->state == wqe_state_posted)
150 return pkt ? COMPST_DONE : COMPST_EXIT;
151
152 /* WQE does not require an ack */
153 if (wqe->state == wqe_state_done)
154 return COMPST_COMP_WQE;
155
156 /* WQE caused an error */
157 if (wqe->state == wqe_state_error)
158 return COMPST_ERROR;
159
160 /* we have a WQE, if we also have an ack check its PSN */
161 return pkt ? COMPST_CHECK_PSN : COMPST_EXIT;
162 }
163
reset_retry_counters(struct rxe_qp * qp)164 static inline void reset_retry_counters(struct rxe_qp *qp)
165 {
166 qp->comp.retry_cnt = qp->attr.retry_cnt;
167 qp->comp.rnr_retry = qp->attr.rnr_retry;
168 qp->comp.started_retry = 0;
169 }
170
check_psn(struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct rxe_send_wqe * wqe)171 static inline enum comp_state check_psn(struct rxe_qp *qp,
172 struct rxe_pkt_info *pkt,
173 struct rxe_send_wqe *wqe)
174 {
175 s32 diff;
176
177 /* check to see if response is past the oldest WQE. if it is, complete
178 * send/write or error read/atomic
179 */
180 diff = psn_compare(pkt->psn, wqe->last_psn);
181 if (diff > 0) {
182 if (wqe->state == wqe_state_pending) {
183 if (wqe->mask & WR_ATOMIC_OR_READ_MASK)
184 return COMPST_ERROR_RETRY;
185
186 reset_retry_counters(qp);
187 return COMPST_COMP_WQE;
188 } else {
189 return COMPST_DONE;
190 }
191 }
192
193 /* compare response packet to expected response */
194 diff = psn_compare(pkt->psn, qp->comp.psn);
195 if (diff < 0) {
196 /* response is most likely a retried packet if it matches an
197 * uncompleted WQE go complete it else ignore it
198 */
199 if (pkt->psn == wqe->last_psn)
200 return COMPST_COMP_ACK;
201 else
202 return COMPST_DONE;
203 } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
204 return COMPST_DONE;
205 } else {
206 return COMPST_CHECK_ACK;
207 }
208 }
209
check_ack(struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct rxe_send_wqe * wqe)210 static inline enum comp_state check_ack(struct rxe_qp *qp,
211 struct rxe_pkt_info *pkt,
212 struct rxe_send_wqe *wqe)
213 {
214 unsigned int mask = pkt->mask;
215 u8 syn;
216 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
217
218 /* Check the sequence only */
219 switch (qp->comp.opcode) {
220 case -1:
221 /* Will catch all *_ONLY cases. */
222 if (!(mask & RXE_START_MASK))
223 return COMPST_ERROR;
224
225 break;
226
227 case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
228 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
229 if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
230 pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
231 /* read retries of partial data may restart from
232 * read response first or response only.
233 */
234 if ((pkt->psn == wqe->first_psn &&
235 pkt->opcode ==
236 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) ||
237 (wqe->first_psn == wqe->last_psn &&
238 pkt->opcode ==
239 IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY))
240 break;
241
242 return COMPST_ERROR;
243 }
244 break;
245 default:
246 WARN_ON_ONCE(1);
247 }
248
249 /* Check operation validity. */
250 switch (pkt->opcode) {
251 case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
252 case IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST:
253 case IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY:
254 syn = aeth_syn(pkt);
255
256 if ((syn & AETH_TYPE_MASK) != AETH_ACK)
257 return COMPST_ERROR;
258
259 fallthrough;
260 /* (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE doesn't have an AETH)
261 */
262 case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
263 if (wqe->wr.opcode != IB_WR_RDMA_READ &&
264 wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
265 wqe->status = IB_WC_FATAL_ERR;
266 return COMPST_ERROR;
267 }
268 reset_retry_counters(qp);
269 return COMPST_READ;
270
271 case IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE:
272 syn = aeth_syn(pkt);
273
274 if ((syn & AETH_TYPE_MASK) != AETH_ACK)
275 return COMPST_ERROR;
276
277 if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP &&
278 wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD)
279 return COMPST_ERROR;
280 reset_retry_counters(qp);
281 return COMPST_ATOMIC;
282
283 case IB_OPCODE_RC_ACKNOWLEDGE:
284 syn = aeth_syn(pkt);
285 switch (syn & AETH_TYPE_MASK) {
286 case AETH_ACK:
287 reset_retry_counters(qp);
288 return COMPST_WRITE_SEND;
289
290 case AETH_RNR_NAK:
291 rxe_counter_inc(rxe, RXE_CNT_RCV_RNR);
292 return COMPST_RNR_RETRY;
293
294 case AETH_NAK:
295 switch (syn) {
296 case AETH_NAK_PSN_SEQ_ERROR:
297 /* a nak implicitly acks all packets with psns
298 * before
299 */
300 if (psn_compare(pkt->psn, qp->comp.psn) > 0) {
301 rxe_counter_inc(rxe,
302 RXE_CNT_RCV_SEQ_ERR);
303 qp->comp.psn = pkt->psn;
304 if (qp->req.wait_psn) {
305 qp->req.wait_psn = 0;
306 rxe_run_task(&qp->req.task, 0);
307 }
308 }
309 return COMPST_ERROR_RETRY;
310
311 case AETH_NAK_INVALID_REQ:
312 wqe->status = IB_WC_REM_INV_REQ_ERR;
313 return COMPST_ERROR;
314
315 case AETH_NAK_REM_ACC_ERR:
316 wqe->status = IB_WC_REM_ACCESS_ERR;
317 return COMPST_ERROR;
318
319 case AETH_NAK_REM_OP_ERR:
320 wqe->status = IB_WC_REM_OP_ERR;
321 return COMPST_ERROR;
322
323 default:
324 pr_warn("unexpected nak %x\n", syn);
325 wqe->status = IB_WC_REM_OP_ERR;
326 return COMPST_ERROR;
327 }
328
329 default:
330 return COMPST_ERROR;
331 }
332 break;
333
334 default:
335 pr_warn("unexpected opcode\n");
336 }
337
338 return COMPST_ERROR;
339 }
340
do_read(struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct rxe_send_wqe * wqe)341 static inline enum comp_state do_read(struct rxe_qp *qp,
342 struct rxe_pkt_info *pkt,
343 struct rxe_send_wqe *wqe)
344 {
345 int ret;
346
347 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
348 &wqe->dma, payload_addr(pkt),
349 payload_size(pkt), RXE_TO_MR_OBJ);
350 if (ret) {
351 wqe->status = IB_WC_LOC_PROT_ERR;
352 return COMPST_ERROR;
353 }
354
355 if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
356 return COMPST_COMP_ACK;
357
358 return COMPST_UPDATE_COMP;
359 }
360
do_atomic(struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct rxe_send_wqe * wqe)361 static inline enum comp_state do_atomic(struct rxe_qp *qp,
362 struct rxe_pkt_info *pkt,
363 struct rxe_send_wqe *wqe)
364 {
365 int ret;
366
367 u64 atomic_orig = atmack_orig(pkt);
368
369 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
370 &wqe->dma, &atomic_orig,
371 sizeof(u64), RXE_TO_MR_OBJ);
372 if (ret) {
373 wqe->status = IB_WC_LOC_PROT_ERR;
374 return COMPST_ERROR;
375 }
376
377 return COMPST_COMP_ACK;
378 }
379
make_send_cqe(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_cqe * cqe)380 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
381 struct rxe_cqe *cqe)
382 {
383 memset(cqe, 0, sizeof(*cqe));
384
385 if (!qp->is_user) {
386 struct ib_wc *wc = &cqe->ibwc;
387
388 wc->wr_id = wqe->wr.wr_id;
389 wc->status = wqe->status;
390 wc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
391 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
392 wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
393 wc->wc_flags = IB_WC_WITH_IMM;
394 wc->byte_len = wqe->dma.length;
395 wc->qp = &qp->ibqp;
396 } else {
397 struct ib_uverbs_wc *uwc = &cqe->uibwc;
398
399 uwc->wr_id = wqe->wr.wr_id;
400 uwc->status = wqe->status;
401 uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
402 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
403 wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
404 uwc->wc_flags = IB_WC_WITH_IMM;
405 uwc->byte_len = wqe->dma.length;
406 uwc->qp_num = qp->ibqp.qp_num;
407 }
408 }
409
410 /*
411 * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
412 * ---------8<---------8<-------------
413 * ...Note that if a completion error occurs, a Work Completion
414 * will always be generated, even if the signaling
415 * indicator requests an Unsignaled Completion.
416 * ---------8<---------8<-------------
417 */
do_complete(struct rxe_qp * qp,struct rxe_send_wqe * wqe)418 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
419 {
420 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
421 struct rxe_cqe cqe;
422 bool post;
423
424 /* do we need to post a completion */
425 post = ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
426 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
427 wqe->status != IB_WC_SUCCESS);
428
429 if (post)
430 make_send_cqe(qp, wqe, &cqe);
431
432 queue_advance_consumer(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT);
433
434 if (post)
435 rxe_cq_post(qp->scq, &cqe, 0);
436
437 if (wqe->wr.opcode == IB_WR_SEND ||
438 wqe->wr.opcode == IB_WR_SEND_WITH_IMM ||
439 wqe->wr.opcode == IB_WR_SEND_WITH_INV)
440 rxe_counter_inc(rxe, RXE_CNT_RDMA_SEND);
441
442 /*
443 * we completed something so let req run again
444 * if it is trying to fence
445 */
446 if (qp->req.wait_fence) {
447 qp->req.wait_fence = 0;
448 rxe_run_task(&qp->req.task, 0);
449 }
450 }
451
complete_ack(struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct rxe_send_wqe * wqe)452 static inline enum comp_state complete_ack(struct rxe_qp *qp,
453 struct rxe_pkt_info *pkt,
454 struct rxe_send_wqe *wqe)
455 {
456 unsigned long flags;
457
458 if (wqe->has_rd_atomic) {
459 wqe->has_rd_atomic = 0;
460 atomic_inc(&qp->req.rd_atomic);
461 if (qp->req.need_rd_atomic) {
462 qp->comp.timeout_retry = 0;
463 qp->req.need_rd_atomic = 0;
464 rxe_run_task(&qp->req.task, 0);
465 }
466 }
467
468 if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
469 /* state_lock used by requester & completer */
470 spin_lock_irqsave(&qp->state_lock, flags);
471 if ((qp->req.state == QP_STATE_DRAIN) &&
472 (qp->comp.psn == qp->req.psn)) {
473 qp->req.state = QP_STATE_DRAINED;
474 spin_unlock_irqrestore(&qp->state_lock, flags);
475
476 if (qp->ibqp.event_handler) {
477 struct ib_event ev;
478
479 ev.device = qp->ibqp.device;
480 ev.element.qp = &qp->ibqp;
481 ev.event = IB_EVENT_SQ_DRAINED;
482 qp->ibqp.event_handler(&ev,
483 qp->ibqp.qp_context);
484 }
485 } else {
486 spin_unlock_irqrestore(&qp->state_lock, flags);
487 }
488 }
489
490 do_complete(qp, wqe);
491
492 if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
493 return COMPST_UPDATE_COMP;
494 else
495 return COMPST_DONE;
496 }
497
complete_wqe(struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct rxe_send_wqe * wqe)498 static inline enum comp_state complete_wqe(struct rxe_qp *qp,
499 struct rxe_pkt_info *pkt,
500 struct rxe_send_wqe *wqe)
501 {
502 if (pkt && wqe->state == wqe_state_pending) {
503 if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) {
504 qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK;
505 qp->comp.opcode = -1;
506 }
507
508 if (qp->req.wait_psn) {
509 qp->req.wait_psn = 0;
510 rxe_run_task(&qp->req.task, 1);
511 }
512 }
513
514 do_complete(qp, wqe);
515
516 return COMPST_GET_WQE;
517 }
518
rxe_drain_resp_pkts(struct rxe_qp * qp,bool notify)519 static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
520 {
521 struct sk_buff *skb;
522 struct rxe_send_wqe *wqe;
523 struct rxe_queue *q = qp->sq.queue;
524
525 while ((skb = skb_dequeue(&qp->resp_pkts))) {
526 rxe_drop_ref(qp);
527 kfree_skb(skb);
528 ib_device_put(qp->ibqp.device);
529 }
530
531 while ((wqe = queue_head(q, q->type))) {
532 if (notify) {
533 wqe->status = IB_WC_WR_FLUSH_ERR;
534 do_complete(qp, wqe);
535 } else {
536 queue_advance_consumer(q, q->type);
537 }
538 }
539 }
540
free_pkt(struct rxe_pkt_info * pkt)541 static void free_pkt(struct rxe_pkt_info *pkt)
542 {
543 struct sk_buff *skb = PKT_TO_SKB(pkt);
544 struct rxe_qp *qp = pkt->qp;
545 struct ib_device *dev = qp->ibqp.device;
546
547 kfree_skb(skb);
548 rxe_drop_ref(qp);
549 ib_device_put(dev);
550 }
551
rxe_completer(void * arg)552 int rxe_completer(void *arg)
553 {
554 struct rxe_qp *qp = (struct rxe_qp *)arg;
555 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
556 struct rxe_send_wqe *wqe = NULL;
557 struct sk_buff *skb = NULL;
558 struct rxe_pkt_info *pkt = NULL;
559 enum comp_state state;
560 int ret = 0;
561
562 rxe_add_ref(qp);
563
564 if (!qp->valid || qp->req.state == QP_STATE_ERROR ||
565 qp->req.state == QP_STATE_RESET) {
566 rxe_drain_resp_pkts(qp, qp->valid &&
567 qp->req.state == QP_STATE_ERROR);
568 ret = -EAGAIN;
569 goto done;
570 }
571
572 if (qp->comp.timeout) {
573 qp->comp.timeout_retry = 1;
574 qp->comp.timeout = 0;
575 } else {
576 qp->comp.timeout_retry = 0;
577 }
578
579 if (qp->req.need_retry) {
580 ret = -EAGAIN;
581 goto done;
582 }
583
584 state = COMPST_GET_ACK;
585
586 while (1) {
587 pr_debug("qp#%d state = %s\n", qp_num(qp),
588 comp_state_name[state]);
589 switch (state) {
590 case COMPST_GET_ACK:
591 skb = skb_dequeue(&qp->resp_pkts);
592 if (skb) {
593 pkt = SKB_TO_PKT(skb);
594 qp->comp.timeout_retry = 0;
595 }
596 state = COMPST_GET_WQE;
597 break;
598
599 case COMPST_GET_WQE:
600 state = get_wqe(qp, pkt, &wqe);
601 break;
602
603 case COMPST_CHECK_PSN:
604 state = check_psn(qp, pkt, wqe);
605 break;
606
607 case COMPST_CHECK_ACK:
608 state = check_ack(qp, pkt, wqe);
609 break;
610
611 case COMPST_READ:
612 state = do_read(qp, pkt, wqe);
613 break;
614
615 case COMPST_ATOMIC:
616 state = do_atomic(qp, pkt, wqe);
617 break;
618
619 case COMPST_WRITE_SEND:
620 if (wqe->state == wqe_state_pending &&
621 wqe->last_psn == pkt->psn)
622 state = COMPST_COMP_ACK;
623 else
624 state = COMPST_UPDATE_COMP;
625 break;
626
627 case COMPST_COMP_ACK:
628 state = complete_ack(qp, pkt, wqe);
629 break;
630
631 case COMPST_COMP_WQE:
632 state = complete_wqe(qp, pkt, wqe);
633 break;
634
635 case COMPST_UPDATE_COMP:
636 if (pkt->mask & RXE_END_MASK)
637 qp->comp.opcode = -1;
638 else
639 qp->comp.opcode = pkt->opcode;
640
641 if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
642 qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
643
644 if (qp->req.wait_psn) {
645 qp->req.wait_psn = 0;
646 rxe_run_task(&qp->req.task, 1);
647 }
648
649 state = COMPST_DONE;
650 break;
651
652 case COMPST_DONE:
653 goto done;
654
655 case COMPST_EXIT:
656 if (qp->comp.timeout_retry && wqe) {
657 state = COMPST_ERROR_RETRY;
658 break;
659 }
660
661 /* re reset the timeout counter if
662 * (1) QP is type RC
663 * (2) the QP is alive
664 * (3) there is a packet sent by the requester that
665 * might be acked (we still might get spurious
666 * timeouts but try to keep them as few as possible)
667 * (4) the timeout parameter is set
668 */
669 if ((qp_type(qp) == IB_QPT_RC) &&
670 (qp->req.state == QP_STATE_READY) &&
671 (psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
672 qp->qp_timeout_jiffies)
673 mod_timer(&qp->retrans_timer,
674 jiffies + qp->qp_timeout_jiffies);
675 ret = -EAGAIN;
676 goto done;
677
678 case COMPST_ERROR_RETRY:
679 /* we come here if the retry timer fired and we did
680 * not receive a response packet. try to retry the send
681 * queue if that makes sense and the limits have not
682 * been exceeded. remember that some timeouts are
683 * spurious since we do not reset the timer but kick
684 * it down the road or let it expire
685 */
686
687 /* there is nothing to retry in this case */
688 if (!wqe || (wqe->state == wqe_state_posted)) {
689 ret = -EAGAIN;
690 goto done;
691 }
692
693 /* if we've started a retry, don't start another
694 * retry sequence, unless this is a timeout.
695 */
696 if (qp->comp.started_retry &&
697 !qp->comp.timeout_retry)
698 goto done;
699
700 if (qp->comp.retry_cnt > 0) {
701 if (qp->comp.retry_cnt != 7)
702 qp->comp.retry_cnt--;
703
704 /* no point in retrying if we have already
705 * seen the last ack that the requester could
706 * have caused
707 */
708 if (psn_compare(qp->req.psn,
709 qp->comp.psn) > 0) {
710 /* tell the requester to retry the
711 * send queue next time around
712 */
713 rxe_counter_inc(rxe,
714 RXE_CNT_COMP_RETRY);
715 qp->req.need_retry = 1;
716 qp->comp.started_retry = 1;
717 rxe_run_task(&qp->req.task, 0);
718 }
719 goto done;
720
721 } else {
722 rxe_counter_inc(rxe, RXE_CNT_RETRY_EXCEEDED);
723 wqe->status = IB_WC_RETRY_EXC_ERR;
724 state = COMPST_ERROR;
725 }
726 break;
727
728 case COMPST_RNR_RETRY:
729 if (qp->comp.rnr_retry > 0) {
730 if (qp->comp.rnr_retry != 7)
731 qp->comp.rnr_retry--;
732
733 qp->req.need_retry = 1;
734 pr_debug("qp#%d set rnr nak timer\n",
735 qp_num(qp));
736 mod_timer(&qp->rnr_nak_timer,
737 jiffies + rnrnak_jiffies(aeth_syn(pkt)
738 & ~AETH_TYPE_MASK));
739 ret = -EAGAIN;
740 goto done;
741 } else {
742 rxe_counter_inc(rxe,
743 RXE_CNT_RNR_RETRY_EXCEEDED);
744 wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
745 state = COMPST_ERROR;
746 }
747 break;
748
749 case COMPST_ERROR:
750 WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
751 do_complete(qp, wqe);
752 rxe_qp_error(qp);
753 ret = -EAGAIN;
754 goto done;
755 }
756 }
757
758 done:
759 if (pkt)
760 free_pkt(pkt);
761 rxe_drop_ref(qp);
762
763 return ret;
764 }
765