1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #include <linux/skbuff.h>
8 #include <crypto/hash.h>
9
10 #include "rxe.h"
11 #include "rxe_loc.h"
12 #include "rxe_queue.h"
13
14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
15 u32 opcode);
16
retry_first_write_send(struct rxe_qp * qp,struct rxe_send_wqe * wqe,unsigned int mask,int npsn)17 static inline void retry_first_write_send(struct rxe_qp *qp,
18 struct rxe_send_wqe *wqe,
19 unsigned int mask, int npsn)
20 {
21 int i;
22
23 for (i = 0; i < npsn; i++) {
24 int to_send = (wqe->dma.resid > qp->mtu) ?
25 qp->mtu : wqe->dma.resid;
26
27 qp->req.opcode = next_opcode(qp, wqe,
28 wqe->wr.opcode);
29
30 if (wqe->wr.send_flags & IB_SEND_INLINE) {
31 wqe->dma.resid -= to_send;
32 wqe->dma.sge_offset += to_send;
33 } else {
34 advance_dma_data(&wqe->dma, to_send);
35 }
36 if (mask & WR_WRITE_MASK)
37 wqe->iova += qp->mtu;
38 }
39 }
40
req_retry(struct rxe_qp * qp)41 static void req_retry(struct rxe_qp *qp)
42 {
43 struct rxe_send_wqe *wqe;
44 unsigned int wqe_index;
45 unsigned int mask;
46 int npsn;
47 int first = 1;
48 struct rxe_queue *q = qp->sq.queue;
49 unsigned int cons;
50 unsigned int prod;
51
52 cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
53 prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
54
55 qp->req.wqe_index = cons;
56 qp->req.psn = qp->comp.psn;
57 qp->req.opcode = -1;
58
59 for (wqe_index = cons; wqe_index != prod;
60 wqe_index = queue_next_index(q, wqe_index)) {
61 wqe = queue_addr_from_index(qp->sq.queue, wqe_index);
62 mask = wr_opcode_mask(wqe->wr.opcode, qp);
63
64 if (wqe->state == wqe_state_posted)
65 break;
66
67 if (wqe->state == wqe_state_done)
68 continue;
69
70 wqe->iova = (mask & WR_ATOMIC_MASK) ?
71 wqe->wr.wr.atomic.remote_addr :
72 (mask & WR_READ_OR_WRITE_MASK) ?
73 wqe->wr.wr.rdma.remote_addr :
74 0;
75
76 if (!first || (mask & WR_READ_MASK) == 0) {
77 wqe->dma.resid = wqe->dma.length;
78 wqe->dma.cur_sge = 0;
79 wqe->dma.sge_offset = 0;
80 }
81
82 if (first) {
83 first = 0;
84
85 if (mask & WR_WRITE_OR_SEND_MASK) {
86 npsn = (qp->comp.psn - wqe->first_psn) &
87 BTH_PSN_MASK;
88 retry_first_write_send(qp, wqe, mask, npsn);
89 }
90
91 if (mask & WR_READ_MASK) {
92 npsn = (wqe->dma.length - wqe->dma.resid) /
93 qp->mtu;
94 wqe->iova += npsn * qp->mtu;
95 }
96 }
97
98 wqe->state = wqe_state_posted;
99 }
100 }
101
rnr_nak_timer(struct timer_list * t)102 void rnr_nak_timer(struct timer_list *t)
103 {
104 struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
105
106 pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
107 rxe_run_task(&qp->req.task, 1);
108 }
109
req_next_wqe(struct rxe_qp * qp)110 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
111 {
112 struct rxe_send_wqe *wqe;
113 unsigned long flags;
114 struct rxe_queue *q = qp->sq.queue;
115 unsigned int index = qp->req.wqe_index;
116 unsigned int cons;
117 unsigned int prod;
118
119 wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
120 cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
121 prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
122
123 if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
124 /* check to see if we are drained;
125 * state_lock used by requester and completer
126 */
127 spin_lock_irqsave(&qp->state_lock, flags);
128 do {
129 if (qp->req.state != QP_STATE_DRAIN) {
130 /* comp just finished */
131 spin_unlock_irqrestore(&qp->state_lock,
132 flags);
133 break;
134 }
135
136 if (wqe && ((index != cons) ||
137 (wqe->state != wqe_state_posted))) {
138 /* comp not done yet */
139 spin_unlock_irqrestore(&qp->state_lock,
140 flags);
141 break;
142 }
143
144 qp->req.state = QP_STATE_DRAINED;
145 spin_unlock_irqrestore(&qp->state_lock, flags);
146
147 if (qp->ibqp.event_handler) {
148 struct ib_event ev;
149
150 ev.device = qp->ibqp.device;
151 ev.element.qp = &qp->ibqp;
152 ev.event = IB_EVENT_SQ_DRAINED;
153 qp->ibqp.event_handler(&ev,
154 qp->ibqp.qp_context);
155 }
156 } while (0);
157 }
158
159 if (index == prod)
160 return NULL;
161
162 wqe = queue_addr_from_index(q, index);
163
164 if (unlikely((qp->req.state == QP_STATE_DRAIN ||
165 qp->req.state == QP_STATE_DRAINED) &&
166 (wqe->state != wqe_state_processing)))
167 return NULL;
168
169 if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
170 (index != cons))) {
171 qp->req.wait_fence = 1;
172 return NULL;
173 }
174
175 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
176 return wqe;
177 }
178
next_opcode_rc(struct rxe_qp * qp,u32 opcode,int fits)179 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
180 {
181 switch (opcode) {
182 case IB_WR_RDMA_WRITE:
183 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
184 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
185 return fits ?
186 IB_OPCODE_RC_RDMA_WRITE_LAST :
187 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
188 else
189 return fits ?
190 IB_OPCODE_RC_RDMA_WRITE_ONLY :
191 IB_OPCODE_RC_RDMA_WRITE_FIRST;
192
193 case IB_WR_RDMA_WRITE_WITH_IMM:
194 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
195 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
196 return fits ?
197 IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
198 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
199 else
200 return fits ?
201 IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
202 IB_OPCODE_RC_RDMA_WRITE_FIRST;
203
204 case IB_WR_SEND:
205 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
206 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
207 return fits ?
208 IB_OPCODE_RC_SEND_LAST :
209 IB_OPCODE_RC_SEND_MIDDLE;
210 else
211 return fits ?
212 IB_OPCODE_RC_SEND_ONLY :
213 IB_OPCODE_RC_SEND_FIRST;
214
215 case IB_WR_SEND_WITH_IMM:
216 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
217 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
218 return fits ?
219 IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
220 IB_OPCODE_RC_SEND_MIDDLE;
221 else
222 return fits ?
223 IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
224 IB_OPCODE_RC_SEND_FIRST;
225
226 case IB_WR_RDMA_READ:
227 return IB_OPCODE_RC_RDMA_READ_REQUEST;
228
229 case IB_WR_ATOMIC_CMP_AND_SWP:
230 return IB_OPCODE_RC_COMPARE_SWAP;
231
232 case IB_WR_ATOMIC_FETCH_AND_ADD:
233 return IB_OPCODE_RC_FETCH_ADD;
234
235 case IB_WR_SEND_WITH_INV:
236 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
237 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
238 return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
239 IB_OPCODE_RC_SEND_MIDDLE;
240 else
241 return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
242 IB_OPCODE_RC_SEND_FIRST;
243 case IB_WR_REG_MR:
244 case IB_WR_LOCAL_INV:
245 return opcode;
246 }
247
248 return -EINVAL;
249 }
250
next_opcode_uc(struct rxe_qp * qp,u32 opcode,int fits)251 static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
252 {
253 switch (opcode) {
254 case IB_WR_RDMA_WRITE:
255 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
256 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
257 return fits ?
258 IB_OPCODE_UC_RDMA_WRITE_LAST :
259 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
260 else
261 return fits ?
262 IB_OPCODE_UC_RDMA_WRITE_ONLY :
263 IB_OPCODE_UC_RDMA_WRITE_FIRST;
264
265 case IB_WR_RDMA_WRITE_WITH_IMM:
266 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
267 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
268 return fits ?
269 IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
270 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
271 else
272 return fits ?
273 IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
274 IB_OPCODE_UC_RDMA_WRITE_FIRST;
275
276 case IB_WR_SEND:
277 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
278 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
279 return fits ?
280 IB_OPCODE_UC_SEND_LAST :
281 IB_OPCODE_UC_SEND_MIDDLE;
282 else
283 return fits ?
284 IB_OPCODE_UC_SEND_ONLY :
285 IB_OPCODE_UC_SEND_FIRST;
286
287 case IB_WR_SEND_WITH_IMM:
288 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
289 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
290 return fits ?
291 IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
292 IB_OPCODE_UC_SEND_MIDDLE;
293 else
294 return fits ?
295 IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
296 IB_OPCODE_UC_SEND_FIRST;
297 }
298
299 return -EINVAL;
300 }
301
next_opcode(struct rxe_qp * qp,struct rxe_send_wqe * wqe,u32 opcode)302 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
303 u32 opcode)
304 {
305 int fits = (wqe->dma.resid <= qp->mtu);
306
307 switch (qp_type(qp)) {
308 case IB_QPT_RC:
309 return next_opcode_rc(qp, opcode, fits);
310
311 case IB_QPT_UC:
312 return next_opcode_uc(qp, opcode, fits);
313
314 case IB_QPT_SMI:
315 case IB_QPT_UD:
316 case IB_QPT_GSI:
317 switch (opcode) {
318 case IB_WR_SEND:
319 return IB_OPCODE_UD_SEND_ONLY;
320
321 case IB_WR_SEND_WITH_IMM:
322 return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
323 }
324 break;
325
326 default:
327 break;
328 }
329
330 return -EINVAL;
331 }
332
check_init_depth(struct rxe_qp * qp,struct rxe_send_wqe * wqe)333 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
334 {
335 int depth;
336
337 if (wqe->has_rd_atomic)
338 return 0;
339
340 qp->req.need_rd_atomic = 1;
341 depth = atomic_dec_return(&qp->req.rd_atomic);
342
343 if (depth >= 0) {
344 qp->req.need_rd_atomic = 0;
345 wqe->has_rd_atomic = 1;
346 return 0;
347 }
348
349 atomic_inc(&qp->req.rd_atomic);
350 return -EAGAIN;
351 }
352
get_mtu(struct rxe_qp * qp)353 static inline int get_mtu(struct rxe_qp *qp)
354 {
355 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
356
357 if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
358 return qp->mtu;
359
360 return rxe->port.mtu_cap;
361 }
362
init_req_packet(struct rxe_qp * qp,struct rxe_send_wqe * wqe,int opcode,int payload,struct rxe_pkt_info * pkt)363 static struct sk_buff *init_req_packet(struct rxe_qp *qp,
364 struct rxe_send_wqe *wqe,
365 int opcode, int payload,
366 struct rxe_pkt_info *pkt)
367 {
368 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
369 struct sk_buff *skb;
370 struct rxe_send_wr *ibwr = &wqe->wr;
371 struct rxe_av *av;
372 int pad = (-payload) & 0x3;
373 int paylen;
374 int solicited;
375 u16 pkey;
376 u32 qp_num;
377 int ack_req;
378
379 /* length from start of bth to end of icrc */
380 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
381
382 /* pkt->hdr, rxe, port_num and mask are initialized in ifc
383 * layer
384 */
385 pkt->opcode = opcode;
386 pkt->qp = qp;
387 pkt->psn = qp->req.psn;
388 pkt->mask = rxe_opcode[opcode].mask;
389 pkt->paylen = paylen;
390 pkt->wqe = wqe;
391
392 /* init skb */
393 av = rxe_get_av(pkt);
394 skb = rxe_init_packet(rxe, av, paylen, pkt);
395 if (unlikely(!skb))
396 return NULL;
397
398 /* init bth */
399 solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
400 (pkt->mask & RXE_END_MASK) &&
401 ((pkt->mask & (RXE_SEND_MASK)) ||
402 (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
403 (RXE_WRITE_MASK | RXE_IMMDT_MASK));
404
405 pkey = IB_DEFAULT_PKEY_FULL;
406
407 qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
408 qp->attr.dest_qp_num;
409
410 ack_req = ((pkt->mask & RXE_END_MASK) ||
411 (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
412 if (ack_req)
413 qp->req.noack_pkts = 0;
414
415 bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
416 ack_req, pkt->psn);
417
418 /* init optional headers */
419 if (pkt->mask & RXE_RETH_MASK) {
420 reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
421 reth_set_va(pkt, wqe->iova);
422 reth_set_len(pkt, wqe->dma.resid);
423 }
424
425 if (pkt->mask & RXE_IMMDT_MASK)
426 immdt_set_imm(pkt, ibwr->ex.imm_data);
427
428 if (pkt->mask & RXE_IETH_MASK)
429 ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
430
431 if (pkt->mask & RXE_ATMETH_MASK) {
432 atmeth_set_va(pkt, wqe->iova);
433 if (opcode == IB_OPCODE_RC_COMPARE_SWAP ||
434 opcode == IB_OPCODE_RD_COMPARE_SWAP) {
435 atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
436 atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
437 } else {
438 atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
439 }
440 atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
441 }
442
443 if (pkt->mask & RXE_DETH_MASK) {
444 if (qp->ibqp.qp_num == 1)
445 deth_set_qkey(pkt, GSI_QKEY);
446 else
447 deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
448 deth_set_sqp(pkt, qp->ibqp.qp_num);
449 }
450
451 return skb;
452 }
453
finish_packet(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt,struct sk_buff * skb,int paylen)454 static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
455 struct rxe_pkt_info *pkt, struct sk_buff *skb,
456 int paylen)
457 {
458 int err;
459
460 err = rxe_prepare(pkt, skb);
461 if (err)
462 return err;
463
464 if (pkt->mask & RXE_WRITE_OR_SEND) {
465 if (wqe->wr.send_flags & IB_SEND_INLINE) {
466 u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
467
468 memcpy(payload_addr(pkt), tmp, paylen);
469
470 wqe->dma.resid -= paylen;
471 wqe->dma.sge_offset += paylen;
472 } else {
473 err = copy_data(qp->pd, 0, &wqe->dma,
474 payload_addr(pkt), paylen,
475 RXE_FROM_MR_OBJ);
476 if (err)
477 return err;
478 }
479 if (bth_pad(pkt)) {
480 u8 *pad = payload_addr(pkt) + paylen;
481
482 memset(pad, 0, bth_pad(pkt));
483 }
484 }
485
486 return 0;
487 }
488
update_wqe_state(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt)489 static void update_wqe_state(struct rxe_qp *qp,
490 struct rxe_send_wqe *wqe,
491 struct rxe_pkt_info *pkt)
492 {
493 if (pkt->mask & RXE_END_MASK) {
494 if (qp_type(qp) == IB_QPT_RC)
495 wqe->state = wqe_state_pending;
496 } else {
497 wqe->state = wqe_state_processing;
498 }
499 }
500
update_wqe_psn(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt,int payload)501 static void update_wqe_psn(struct rxe_qp *qp,
502 struct rxe_send_wqe *wqe,
503 struct rxe_pkt_info *pkt,
504 int payload)
505 {
506 /* number of packets left to send including current one */
507 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
508
509 /* handle zero length packet case */
510 if (num_pkt == 0)
511 num_pkt = 1;
512
513 if (pkt->mask & RXE_START_MASK) {
514 wqe->first_psn = qp->req.psn;
515 wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
516 }
517
518 if (pkt->mask & RXE_READ_MASK)
519 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
520 else
521 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
522 }
523
save_state(struct rxe_send_wqe * wqe,struct rxe_qp * qp,struct rxe_send_wqe * rollback_wqe,u32 * rollback_psn)524 static void save_state(struct rxe_send_wqe *wqe,
525 struct rxe_qp *qp,
526 struct rxe_send_wqe *rollback_wqe,
527 u32 *rollback_psn)
528 {
529 rollback_wqe->state = wqe->state;
530 rollback_wqe->first_psn = wqe->first_psn;
531 rollback_wqe->last_psn = wqe->last_psn;
532 *rollback_psn = qp->req.psn;
533 }
534
rollback_state(struct rxe_send_wqe * wqe,struct rxe_qp * qp,struct rxe_send_wqe * rollback_wqe,u32 rollback_psn)535 static void rollback_state(struct rxe_send_wqe *wqe,
536 struct rxe_qp *qp,
537 struct rxe_send_wqe *rollback_wqe,
538 u32 rollback_psn)
539 {
540 wqe->state = rollback_wqe->state;
541 wqe->first_psn = rollback_wqe->first_psn;
542 wqe->last_psn = rollback_wqe->last_psn;
543 qp->req.psn = rollback_psn;
544 }
545
update_state(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt,int payload)546 static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
547 struct rxe_pkt_info *pkt, int payload)
548 {
549 qp->req.opcode = pkt->opcode;
550
551 if (pkt->mask & RXE_END_MASK)
552 qp->req.wqe_index = queue_next_index(qp->sq.queue,
553 qp->req.wqe_index);
554
555 qp->need_req_skb = 0;
556
557 if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
558 mod_timer(&qp->retrans_timer,
559 jiffies + qp->qp_timeout_jiffies);
560 }
561
rxe_do_local_ops(struct rxe_qp * qp,struct rxe_send_wqe * wqe)562 static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
563 {
564 u8 opcode = wqe->wr.opcode;
565 u32 rkey;
566 int ret;
567
568 switch (opcode) {
569 case IB_WR_LOCAL_INV:
570 rkey = wqe->wr.ex.invalidate_rkey;
571 if (rkey_is_mw(rkey))
572 ret = rxe_invalidate_mw(qp, rkey);
573 else
574 ret = rxe_invalidate_mr(qp, rkey);
575
576 if (unlikely(ret)) {
577 wqe->status = IB_WC_LOC_QP_OP_ERR;
578 return ret;
579 }
580 break;
581 case IB_WR_REG_MR:
582 ret = rxe_reg_fast_mr(qp, wqe);
583 if (unlikely(ret)) {
584 wqe->status = IB_WC_LOC_QP_OP_ERR;
585 return ret;
586 }
587 break;
588 case IB_WR_BIND_MW:
589 ret = rxe_bind_mw(qp, wqe);
590 if (unlikely(ret)) {
591 wqe->status = IB_WC_MW_BIND_ERR;
592 return ret;
593 }
594 break;
595 default:
596 pr_err("Unexpected send wqe opcode %d\n", opcode);
597 wqe->status = IB_WC_LOC_QP_OP_ERR;
598 return -EINVAL;
599 }
600
601 wqe->state = wqe_state_done;
602 wqe->status = IB_WC_SUCCESS;
603 qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
604
605 /* There is no ack coming for local work requests
606 * which can lead to a deadlock. So go ahead and complete
607 * it now.
608 */
609 rxe_run_task(&qp->comp.task, 1);
610
611 return 0;
612 }
613
rxe_requester(void * arg)614 int rxe_requester(void *arg)
615 {
616 struct rxe_qp *qp = (struct rxe_qp *)arg;
617 struct rxe_pkt_info pkt;
618 struct sk_buff *skb;
619 struct rxe_send_wqe *wqe;
620 enum rxe_hdr_mask mask;
621 int payload;
622 int mtu;
623 int opcode;
624 int ret;
625 struct rxe_send_wqe rollback_wqe;
626 u32 rollback_psn;
627 struct rxe_queue *q = qp->sq.queue;
628
629 rxe_add_ref(qp);
630
631 next_wqe:
632 if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
633 goto exit;
634
635 if (unlikely(qp->req.state == QP_STATE_RESET)) {
636 qp->req.wqe_index = queue_get_consumer(q,
637 QUEUE_TYPE_FROM_CLIENT);
638 qp->req.opcode = -1;
639 qp->req.need_rd_atomic = 0;
640 qp->req.wait_psn = 0;
641 qp->req.need_retry = 0;
642 goto exit;
643 }
644
645 if (unlikely(qp->req.need_retry)) {
646 req_retry(qp);
647 qp->req.need_retry = 0;
648 }
649
650 wqe = req_next_wqe(qp);
651 if (unlikely(!wqe))
652 goto exit;
653
654 if (wqe->mask & WR_LOCAL_OP_MASK) {
655 ret = rxe_do_local_ops(qp, wqe);
656 if (unlikely(ret))
657 goto err;
658 else
659 goto next_wqe;
660 }
661
662 if (unlikely(qp_type(qp) == IB_QPT_RC &&
663 psn_compare(qp->req.psn, (qp->comp.psn +
664 RXE_MAX_UNACKED_PSNS)) > 0)) {
665 qp->req.wait_psn = 1;
666 goto exit;
667 }
668
669 /* Limit the number of inflight SKBs per QP */
670 if (unlikely(atomic_read(&qp->skb_out) >
671 RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
672 qp->need_req_skb = 1;
673 goto exit;
674 }
675
676 opcode = next_opcode(qp, wqe, wqe->wr.opcode);
677 if (unlikely(opcode < 0)) {
678 wqe->status = IB_WC_LOC_QP_OP_ERR;
679 goto err;
680 }
681
682 mask = rxe_opcode[opcode].mask;
683 if (unlikely(mask & RXE_READ_OR_ATOMIC)) {
684 if (check_init_depth(qp, wqe))
685 goto exit;
686 }
687
688 mtu = get_mtu(qp);
689 payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
690 if (payload > mtu) {
691 if (qp_type(qp) == IB_QPT_UD) {
692 /* C10-93.1.1: If the total sum of all the buffer lengths specified for a
693 * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
694 * shall not emit any packets for this message. Further, the CI shall not
695 * generate an error due to this condition.
696 */
697
698 /* fake a successful UD send */
699 wqe->first_psn = qp->req.psn;
700 wqe->last_psn = qp->req.psn;
701 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
702 qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
703 qp->req.wqe_index = queue_next_index(qp->sq.queue,
704 qp->req.wqe_index);
705 wqe->state = wqe_state_done;
706 wqe->status = IB_WC_SUCCESS;
707 __rxe_do_task(&qp->comp.task);
708 rxe_drop_ref(qp);
709 return 0;
710 }
711 payload = mtu;
712 }
713
714 skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
715 if (unlikely(!skb)) {
716 pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
717 wqe->status = IB_WC_LOC_QP_OP_ERR;
718 goto err;
719 }
720
721 ret = finish_packet(qp, wqe, &pkt, skb, payload);
722 if (unlikely(ret)) {
723 pr_debug("qp#%d Error during finish packet\n", qp_num(qp));
724 if (ret == -EFAULT)
725 wqe->status = IB_WC_LOC_PROT_ERR;
726 else
727 wqe->status = IB_WC_LOC_QP_OP_ERR;
728 kfree_skb(skb);
729 goto err;
730 }
731
732 /*
733 * To prevent a race on wqe access between requester and completer,
734 * wqe members state and psn need to be set before calling
735 * rxe_xmit_packet().
736 * Otherwise, completer might initiate an unjustified retry flow.
737 */
738 save_state(wqe, qp, &rollback_wqe, &rollback_psn);
739 update_wqe_state(qp, wqe, &pkt);
740 update_wqe_psn(qp, wqe, &pkt, payload);
741 ret = rxe_xmit_packet(qp, &pkt, skb);
742 if (ret) {
743 qp->need_req_skb = 1;
744
745 rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
746
747 if (ret == -EAGAIN) {
748 rxe_run_task(&qp->req.task, 1);
749 goto exit;
750 }
751
752 wqe->status = IB_WC_LOC_QP_OP_ERR;
753 goto err;
754 }
755
756 update_state(qp, wqe, &pkt, payload);
757
758 goto next_wqe;
759
760 err:
761 wqe->state = wqe_state_error;
762 __rxe_do_task(&qp->comp.task);
763
764 exit:
765 rxe_drop_ref(qp);
766 return -EAGAIN;
767 }
768