1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #include <linux/skbuff.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/vmalloc.h>
11 #include <rdma/uverbs_ioctl.h>
12
13 #include "rxe.h"
14 #include "rxe_loc.h"
15 #include "rxe_queue.h"
16 #include "rxe_task.h"
17
rxe_qp_chk_cap(struct rxe_dev * rxe,struct ib_qp_cap * cap,int has_srq)18 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
19 int has_srq)
20 {
21 if (cap->max_send_wr > rxe->attr.max_qp_wr) {
22 pr_debug("invalid send wr = %u > %d\n",
23 cap->max_send_wr, rxe->attr.max_qp_wr);
24 goto err1;
25 }
26
27 if (cap->max_send_sge > rxe->attr.max_send_sge) {
28 pr_debug("invalid send sge = %u > %d\n",
29 cap->max_send_sge, rxe->attr.max_send_sge);
30 goto err1;
31 }
32
33 if (!has_srq) {
34 if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
35 pr_debug("invalid recv wr = %u > %d\n",
36 cap->max_recv_wr, rxe->attr.max_qp_wr);
37 goto err1;
38 }
39
40 if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
41 pr_debug("invalid recv sge = %u > %d\n",
42 cap->max_recv_sge, rxe->attr.max_recv_sge);
43 goto err1;
44 }
45 }
46
47 if (cap->max_inline_data > rxe->max_inline_data) {
48 pr_debug("invalid max inline data = %u > %d\n",
49 cap->max_inline_data, rxe->max_inline_data);
50 goto err1;
51 }
52
53 return 0;
54
55 err1:
56 return -EINVAL;
57 }
58
rxe_qp_chk_init(struct rxe_dev * rxe,struct ib_qp_init_attr * init)59 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
60 {
61 struct ib_qp_cap *cap = &init->cap;
62 struct rxe_port *port;
63 int port_num = init->port_num;
64
65 switch (init->qp_type) {
66 case IB_QPT_GSI:
67 case IB_QPT_RC:
68 case IB_QPT_UC:
69 case IB_QPT_UD:
70 break;
71 default:
72 return -EOPNOTSUPP;
73 }
74
75 if (!init->recv_cq || !init->send_cq) {
76 pr_debug("missing cq\n");
77 goto err1;
78 }
79
80 if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
81 goto err1;
82
83 if (init->qp_type == IB_QPT_GSI) {
84 if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
85 pr_debug("invalid port = %d\n", port_num);
86 goto err1;
87 }
88
89 port = &rxe->port;
90
91 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
92 pr_debug("GSI QP exists for port %d\n", port_num);
93 goto err1;
94 }
95 }
96
97 return 0;
98
99 err1:
100 return -EINVAL;
101 }
102
alloc_rd_atomic_resources(struct rxe_qp * qp,unsigned int n)103 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
104 {
105 qp->resp.res_head = 0;
106 qp->resp.res_tail = 0;
107 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
108
109 if (!qp->resp.resources)
110 return -ENOMEM;
111
112 return 0;
113 }
114
free_rd_atomic_resources(struct rxe_qp * qp)115 static void free_rd_atomic_resources(struct rxe_qp *qp)
116 {
117 if (qp->resp.resources) {
118 int i;
119
120 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
121 struct resp_res *res = &qp->resp.resources[i];
122
123 free_rd_atomic_resource(res);
124 }
125 kfree(qp->resp.resources);
126 qp->resp.resources = NULL;
127 }
128 }
129
free_rd_atomic_resource(struct resp_res * res)130 void free_rd_atomic_resource(struct resp_res *res)
131 {
132 res->type = 0;
133 }
134
cleanup_rd_atomic_resources(struct rxe_qp * qp)135 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
136 {
137 int i;
138 struct resp_res *res;
139
140 if (qp->resp.resources) {
141 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
142 res = &qp->resp.resources[i];
143 free_rd_atomic_resource(res);
144 }
145 }
146 }
147
rxe_qp_init_misc(struct rxe_dev * rxe,struct rxe_qp * qp,struct ib_qp_init_attr * init)148 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
149 struct ib_qp_init_attr *init)
150 {
151 struct rxe_port *port;
152 u32 qpn;
153
154 qp->sq_sig_type = init->sq_sig_type;
155 qp->attr.path_mtu = 1;
156 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
157
158 qpn = qp->elem.index;
159 port = &rxe->port;
160
161 switch (init->qp_type) {
162 case IB_QPT_GSI:
163 qp->ibqp.qp_num = 1;
164 port->qp_gsi_index = qpn;
165 qp->attr.port_num = init->port_num;
166 break;
167
168 default:
169 qp->ibqp.qp_num = qpn;
170 break;
171 }
172
173 spin_lock_init(&qp->state_lock);
174
175 spin_lock_init(&qp->req.task.state_lock);
176 spin_lock_init(&qp->resp.task.state_lock);
177 spin_lock_init(&qp->comp.task.state_lock);
178
179 spin_lock_init(&qp->sq.sq_lock);
180 spin_lock_init(&qp->rq.producer_lock);
181 spin_lock_init(&qp->rq.consumer_lock);
182
183 skb_queue_head_init(&qp->req_pkts);
184 skb_queue_head_init(&qp->resp_pkts);
185
186 atomic_set(&qp->ssn, 0);
187 atomic_set(&qp->skb_out, 0);
188 }
189
rxe_qp_init_req(struct rxe_dev * rxe,struct rxe_qp * qp,struct ib_qp_init_attr * init,struct ib_udata * udata,struct rxe_create_qp_resp __user * uresp)190 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
191 struct ib_qp_init_attr *init, struct ib_udata *udata,
192 struct rxe_create_qp_resp __user *uresp)
193 {
194 int err;
195 int wqe_size;
196 enum queue_type type;
197
198 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
199 if (err < 0)
200 return err;
201 qp->sk->sk->sk_user_data = qp;
202
203 /* pick a source UDP port number for this QP based on
204 * the source QPN. this spreads traffic for different QPs
205 * across different NIC RX queues (while using a single
206 * flow for a given QP to maintain packet order).
207 * the port number must be in the Dynamic Ports range
208 * (0xc000 - 0xffff).
209 */
210 qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
211 qp->sq.max_wr = init->cap.max_send_wr;
212
213 /* These caps are limited by rxe_qp_chk_cap() done by the caller */
214 wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
215 init->cap.max_inline_data);
216 qp->sq.max_sge = init->cap.max_send_sge =
217 wqe_size / sizeof(struct ib_sge);
218 qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
219 wqe_size += sizeof(struct rxe_send_wqe);
220
221 type = QUEUE_TYPE_FROM_CLIENT;
222 qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
223 wqe_size, type);
224 if (!qp->sq.queue)
225 return -ENOMEM;
226
227 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
228 qp->sq.queue->buf, qp->sq.queue->buf_size,
229 &qp->sq.queue->ip);
230
231 if (err) {
232 vfree(qp->sq.queue->buf);
233 kfree(qp->sq.queue);
234 qp->sq.queue = NULL;
235 return err;
236 }
237
238 qp->req.wqe_index = queue_get_producer(qp->sq.queue,
239 QUEUE_TYPE_FROM_CLIENT);
240
241 qp->req.state = QP_STATE_RESET;
242 qp->comp.state = QP_STATE_RESET;
243 qp->req.opcode = -1;
244 qp->comp.opcode = -1;
245
246 rxe_init_task(&qp->req.task, qp, rxe_requester);
247 rxe_init_task(&qp->comp.task, qp, rxe_completer);
248
249 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
250 if (init->qp_type == IB_QPT_RC) {
251 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
252 timer_setup(&qp->retrans_timer, retransmit_timer, 0);
253 }
254 return 0;
255 }
256
rxe_qp_init_resp(struct rxe_dev * rxe,struct rxe_qp * qp,struct ib_qp_init_attr * init,struct ib_udata * udata,struct rxe_create_qp_resp __user * uresp)257 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
258 struct ib_qp_init_attr *init,
259 struct ib_udata *udata,
260 struct rxe_create_qp_resp __user *uresp)
261 {
262 int err;
263 int wqe_size;
264 enum queue_type type;
265
266 if (!qp->srq) {
267 qp->rq.max_wr = init->cap.max_recv_wr;
268 qp->rq.max_sge = init->cap.max_recv_sge;
269
270 wqe_size = rcv_wqe_size(qp->rq.max_sge);
271
272 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
273 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
274
275 type = QUEUE_TYPE_FROM_CLIENT;
276 qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
277 wqe_size, type);
278 if (!qp->rq.queue)
279 return -ENOMEM;
280
281 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
282 qp->rq.queue->buf, qp->rq.queue->buf_size,
283 &qp->rq.queue->ip);
284 if (err) {
285 vfree(qp->rq.queue->buf);
286 kfree(qp->rq.queue);
287 qp->rq.queue = NULL;
288 return err;
289 }
290 }
291
292 rxe_init_task(&qp->resp.task, qp, rxe_responder);
293
294 qp->resp.opcode = OPCODE_NONE;
295 qp->resp.msn = 0;
296 qp->resp.state = QP_STATE_RESET;
297
298 return 0;
299 }
300
301 /* called by the create qp verb */
rxe_qp_from_init(struct rxe_dev * rxe,struct rxe_qp * qp,struct rxe_pd * pd,struct ib_qp_init_attr * init,struct rxe_create_qp_resp __user * uresp,struct ib_pd * ibpd,struct ib_udata * udata)302 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
303 struct ib_qp_init_attr *init,
304 struct rxe_create_qp_resp __user *uresp,
305 struct ib_pd *ibpd,
306 struct ib_udata *udata)
307 {
308 int err;
309 struct rxe_cq *rcq = to_rcq(init->recv_cq);
310 struct rxe_cq *scq = to_rcq(init->send_cq);
311 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
312
313 rxe_get(pd);
314 rxe_get(rcq);
315 rxe_get(scq);
316 if (srq)
317 rxe_get(srq);
318
319 qp->pd = pd;
320 qp->rcq = rcq;
321 qp->scq = scq;
322 qp->srq = srq;
323
324 atomic_inc(&rcq->num_wq);
325 atomic_inc(&scq->num_wq);
326
327 rxe_qp_init_misc(rxe, qp, init);
328
329 err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
330 if (err)
331 goto err1;
332
333 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
334 if (err)
335 goto err2;
336
337 qp->attr.qp_state = IB_QPS_RESET;
338 qp->valid = 1;
339
340 return 0;
341
342 err2:
343 rxe_queue_cleanup(qp->sq.queue);
344 qp->sq.queue = NULL;
345 err1:
346 atomic_dec(&rcq->num_wq);
347 atomic_dec(&scq->num_wq);
348
349 qp->pd = NULL;
350 qp->rcq = NULL;
351 qp->scq = NULL;
352 qp->srq = NULL;
353
354 if (srq)
355 rxe_put(srq);
356 rxe_put(scq);
357 rxe_put(rcq);
358 rxe_put(pd);
359
360 return err;
361 }
362
363 /* called by the query qp verb */
rxe_qp_to_init(struct rxe_qp * qp,struct ib_qp_init_attr * init)364 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
365 {
366 init->event_handler = qp->ibqp.event_handler;
367 init->qp_context = qp->ibqp.qp_context;
368 init->send_cq = qp->ibqp.send_cq;
369 init->recv_cq = qp->ibqp.recv_cq;
370 init->srq = qp->ibqp.srq;
371
372 init->cap.max_send_wr = qp->sq.max_wr;
373 init->cap.max_send_sge = qp->sq.max_sge;
374 init->cap.max_inline_data = qp->sq.max_inline;
375
376 if (!qp->srq) {
377 init->cap.max_recv_wr = qp->rq.max_wr;
378 init->cap.max_recv_sge = qp->rq.max_sge;
379 }
380
381 init->sq_sig_type = qp->sq_sig_type;
382
383 init->qp_type = qp->ibqp.qp_type;
384 init->port_num = 1;
385
386 return 0;
387 }
388
389 /* called by the modify qp verb, this routine checks all the parameters before
390 * making any changes
391 */
rxe_qp_chk_attr(struct rxe_dev * rxe,struct rxe_qp * qp,struct ib_qp_attr * attr,int mask)392 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
393 struct ib_qp_attr *attr, int mask)
394 {
395 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
396 attr->cur_qp_state : qp->attr.qp_state;
397 enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
398 attr->qp_state : cur_state;
399
400 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
401 pr_debug("invalid mask or state for qp\n");
402 goto err1;
403 }
404
405 if (mask & IB_QP_STATE) {
406 if (cur_state == IB_QPS_SQD) {
407 if (qp->req.state == QP_STATE_DRAIN &&
408 new_state != IB_QPS_ERR)
409 goto err1;
410 }
411 }
412
413 if (mask & IB_QP_PORT) {
414 if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
415 pr_debug("invalid port %d\n", attr->port_num);
416 goto err1;
417 }
418 }
419
420 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
421 goto err1;
422
423 if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
424 goto err1;
425
426 if (mask & IB_QP_ALT_PATH) {
427 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
428 goto err1;
429 if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) {
430 pr_debug("invalid alt port %d\n", attr->alt_port_num);
431 goto err1;
432 }
433 if (attr->alt_timeout > 31) {
434 pr_debug("invalid QP alt timeout %d > 31\n",
435 attr->alt_timeout);
436 goto err1;
437 }
438 }
439
440 if (mask & IB_QP_PATH_MTU) {
441 struct rxe_port *port = &rxe->port;
442
443 enum ib_mtu max_mtu = port->attr.max_mtu;
444 enum ib_mtu mtu = attr->path_mtu;
445
446 if (mtu > max_mtu) {
447 pr_debug("invalid mtu (%d) > (%d)\n",
448 ib_mtu_enum_to_int(mtu),
449 ib_mtu_enum_to_int(max_mtu));
450 goto err1;
451 }
452 }
453
454 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
455 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
456 pr_debug("invalid max_rd_atomic %d > %d\n",
457 attr->max_rd_atomic,
458 rxe->attr.max_qp_rd_atom);
459 goto err1;
460 }
461 }
462
463 if (mask & IB_QP_TIMEOUT) {
464 if (attr->timeout > 31) {
465 pr_debug("invalid QP timeout %d > 31\n", attr->timeout);
466 goto err1;
467 }
468 }
469
470 return 0;
471
472 err1:
473 return -EINVAL;
474 }
475
476 /* move the qp to the reset state */
rxe_qp_reset(struct rxe_qp * qp)477 static void rxe_qp_reset(struct rxe_qp *qp)
478 {
479 /* stop tasks from running */
480 rxe_disable_task(&qp->resp.task);
481
482 /* stop request/comp */
483 if (qp->sq.queue) {
484 if (qp_type(qp) == IB_QPT_RC)
485 rxe_disable_task(&qp->comp.task);
486 rxe_disable_task(&qp->req.task);
487 }
488
489 /* move qp to the reset state */
490 qp->req.state = QP_STATE_RESET;
491 qp->comp.state = QP_STATE_RESET;
492 qp->resp.state = QP_STATE_RESET;
493
494 /* let state machines reset themselves drain work and packet queues
495 * etc.
496 */
497 __rxe_do_task(&qp->resp.task);
498
499 if (qp->sq.queue) {
500 __rxe_do_task(&qp->comp.task);
501 __rxe_do_task(&qp->req.task);
502 rxe_queue_reset(qp->sq.queue);
503 }
504
505 /* cleanup attributes */
506 atomic_set(&qp->ssn, 0);
507 qp->req.opcode = -1;
508 qp->req.need_retry = 0;
509 qp->req.wait_for_rnr_timer = 0;
510 qp->req.noack_pkts = 0;
511 qp->resp.msn = 0;
512 qp->resp.opcode = -1;
513 qp->resp.drop_msg = 0;
514 qp->resp.goto_error = 0;
515 qp->resp.sent_psn_nak = 0;
516
517 if (qp->resp.mr) {
518 rxe_put(qp->resp.mr);
519 qp->resp.mr = NULL;
520 }
521
522 cleanup_rd_atomic_resources(qp);
523
524 /* reenable tasks */
525 rxe_enable_task(&qp->resp.task);
526
527 if (qp->sq.queue) {
528 if (qp_type(qp) == IB_QPT_RC)
529 rxe_enable_task(&qp->comp.task);
530
531 rxe_enable_task(&qp->req.task);
532 }
533 }
534
535 /* drain the send queue */
rxe_qp_drain(struct rxe_qp * qp)536 static void rxe_qp_drain(struct rxe_qp *qp)
537 {
538 if (qp->sq.queue) {
539 if (qp->req.state != QP_STATE_DRAINED) {
540 qp->req.state = QP_STATE_DRAIN;
541 if (qp_type(qp) == IB_QPT_RC)
542 rxe_sched_task(&qp->comp.task);
543 else
544 __rxe_do_task(&qp->comp.task);
545 rxe_sched_task(&qp->req.task);
546 }
547 }
548 }
549
550 /* move the qp to the error state */
rxe_qp_error(struct rxe_qp * qp)551 void rxe_qp_error(struct rxe_qp *qp)
552 {
553 qp->req.state = QP_STATE_ERROR;
554 qp->resp.state = QP_STATE_ERROR;
555 qp->comp.state = QP_STATE_ERROR;
556 qp->attr.qp_state = IB_QPS_ERR;
557
558 /* drain work and packet queues */
559 rxe_sched_task(&qp->resp.task);
560
561 if (qp_type(qp) == IB_QPT_RC)
562 rxe_sched_task(&qp->comp.task);
563 else
564 __rxe_do_task(&qp->comp.task);
565 rxe_sched_task(&qp->req.task);
566 }
567
568 /* called by the modify qp verb */
rxe_qp_from_attr(struct rxe_qp * qp,struct ib_qp_attr * attr,int mask,struct ib_udata * udata)569 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
570 struct ib_udata *udata)
571 {
572 int err;
573
574 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
575 int max_rd_atomic = attr->max_rd_atomic ?
576 roundup_pow_of_two(attr->max_rd_atomic) : 0;
577
578 qp->attr.max_rd_atomic = max_rd_atomic;
579 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
580 }
581
582 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
583 int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
584 roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
585
586 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
587
588 free_rd_atomic_resources(qp);
589
590 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
591 if (err)
592 return err;
593 }
594
595 if (mask & IB_QP_CUR_STATE)
596 qp->attr.cur_qp_state = attr->qp_state;
597
598 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
599 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
600
601 if (mask & IB_QP_ACCESS_FLAGS)
602 qp->attr.qp_access_flags = attr->qp_access_flags;
603
604 if (mask & IB_QP_PKEY_INDEX)
605 qp->attr.pkey_index = attr->pkey_index;
606
607 if (mask & IB_QP_PORT)
608 qp->attr.port_num = attr->port_num;
609
610 if (mask & IB_QP_QKEY)
611 qp->attr.qkey = attr->qkey;
612
613 if (mask & IB_QP_AV)
614 rxe_init_av(&attr->ah_attr, &qp->pri_av);
615
616 if (mask & IB_QP_ALT_PATH) {
617 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
618 qp->attr.alt_port_num = attr->alt_port_num;
619 qp->attr.alt_pkey_index = attr->alt_pkey_index;
620 qp->attr.alt_timeout = attr->alt_timeout;
621 }
622
623 if (mask & IB_QP_PATH_MTU) {
624 qp->attr.path_mtu = attr->path_mtu;
625 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
626 }
627
628 if (mask & IB_QP_TIMEOUT) {
629 qp->attr.timeout = attr->timeout;
630 if (attr->timeout == 0) {
631 qp->qp_timeout_jiffies = 0;
632 } else {
633 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
634 int j = nsecs_to_jiffies(4096ULL << attr->timeout);
635
636 qp->qp_timeout_jiffies = j ? j : 1;
637 }
638 }
639
640 if (mask & IB_QP_RETRY_CNT) {
641 qp->attr.retry_cnt = attr->retry_cnt;
642 qp->comp.retry_cnt = attr->retry_cnt;
643 pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
644 attr->retry_cnt);
645 }
646
647 if (mask & IB_QP_RNR_RETRY) {
648 qp->attr.rnr_retry = attr->rnr_retry;
649 qp->comp.rnr_retry = attr->rnr_retry;
650 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
651 attr->rnr_retry);
652 }
653
654 if (mask & IB_QP_RQ_PSN) {
655 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
656 qp->resp.psn = qp->attr.rq_psn;
657 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
658 qp->resp.psn);
659 }
660
661 if (mask & IB_QP_MIN_RNR_TIMER) {
662 qp->attr.min_rnr_timer = attr->min_rnr_timer;
663 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
664 attr->min_rnr_timer);
665 }
666
667 if (mask & IB_QP_SQ_PSN) {
668 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
669 qp->req.psn = qp->attr.sq_psn;
670 qp->comp.psn = qp->attr.sq_psn;
671 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
672 }
673
674 if (mask & IB_QP_PATH_MIG_STATE)
675 qp->attr.path_mig_state = attr->path_mig_state;
676
677 if (mask & IB_QP_DEST_QPN)
678 qp->attr.dest_qp_num = attr->dest_qp_num;
679
680 if (mask & IB_QP_STATE) {
681 qp->attr.qp_state = attr->qp_state;
682
683 switch (attr->qp_state) {
684 case IB_QPS_RESET:
685 pr_debug("qp#%d state -> RESET\n", qp_num(qp));
686 rxe_qp_reset(qp);
687 break;
688
689 case IB_QPS_INIT:
690 pr_debug("qp#%d state -> INIT\n", qp_num(qp));
691 qp->req.state = QP_STATE_INIT;
692 qp->resp.state = QP_STATE_INIT;
693 qp->comp.state = QP_STATE_INIT;
694 break;
695
696 case IB_QPS_RTR:
697 pr_debug("qp#%d state -> RTR\n", qp_num(qp));
698 qp->resp.state = QP_STATE_READY;
699 break;
700
701 case IB_QPS_RTS:
702 pr_debug("qp#%d state -> RTS\n", qp_num(qp));
703 qp->req.state = QP_STATE_READY;
704 qp->comp.state = QP_STATE_READY;
705 break;
706
707 case IB_QPS_SQD:
708 pr_debug("qp#%d state -> SQD\n", qp_num(qp));
709 rxe_qp_drain(qp);
710 break;
711
712 case IB_QPS_SQE:
713 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
714 /* Not possible from modify_qp. */
715 break;
716
717 case IB_QPS_ERR:
718 pr_debug("qp#%d state -> ERR\n", qp_num(qp));
719 rxe_qp_error(qp);
720 break;
721 }
722 }
723
724 return 0;
725 }
726
727 /* called by the query qp verb */
rxe_qp_to_attr(struct rxe_qp * qp,struct ib_qp_attr * attr,int mask)728 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
729 {
730 *attr = qp->attr;
731
732 attr->rq_psn = qp->resp.psn;
733 attr->sq_psn = qp->req.psn;
734
735 attr->cap.max_send_wr = qp->sq.max_wr;
736 attr->cap.max_send_sge = qp->sq.max_sge;
737 attr->cap.max_inline_data = qp->sq.max_inline;
738
739 if (!qp->srq) {
740 attr->cap.max_recv_wr = qp->rq.max_wr;
741 attr->cap.max_recv_sge = qp->rq.max_sge;
742 }
743
744 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
745 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
746
747 if (qp->req.state == QP_STATE_DRAIN) {
748 attr->sq_draining = 1;
749 /* applications that get this state
750 * typically spin on it. yield the
751 * processor
752 */
753 cond_resched();
754 } else {
755 attr->sq_draining = 0;
756 }
757
758 pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
759
760 return 0;
761 }
762
rxe_qp_chk_destroy(struct rxe_qp * qp)763 int rxe_qp_chk_destroy(struct rxe_qp *qp)
764 {
765 /* See IBA o10-2.2.3
766 * An attempt to destroy a QP while attached to a mcast group
767 * will fail immediately.
768 */
769 if (atomic_read(&qp->mcg_num)) {
770 pr_debug("Attempt to destroy QP while attached to multicast group\n");
771 return -EBUSY;
772 }
773
774 return 0;
775 }
776
777 /* called when the last reference to the qp is dropped */
rxe_qp_do_cleanup(struct work_struct * work)778 static void rxe_qp_do_cleanup(struct work_struct *work)
779 {
780 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
781
782 qp->valid = 0;
783 qp->qp_timeout_jiffies = 0;
784 rxe_cleanup_task(&qp->resp.task);
785
786 if (qp_type(qp) == IB_QPT_RC) {
787 del_timer_sync(&qp->retrans_timer);
788 del_timer_sync(&qp->rnr_nak_timer);
789 }
790
791 if (qp->req.task.func)
792 rxe_cleanup_task(&qp->req.task);
793
794 if (qp->comp.task.func)
795 rxe_cleanup_task(&qp->comp.task);
796
797 /* flush out any receive wr's or pending requests */
798 if (qp->req.task.func)
799 __rxe_do_task(&qp->req.task);
800
801 if (qp->sq.queue) {
802 __rxe_do_task(&qp->comp.task);
803 __rxe_do_task(&qp->req.task);
804 }
805
806 if (qp->sq.queue)
807 rxe_queue_cleanup(qp->sq.queue);
808
809 if (qp->srq)
810 rxe_put(qp->srq);
811
812 if (qp->rq.queue)
813 rxe_queue_cleanup(qp->rq.queue);
814
815 if (qp->scq) {
816 atomic_dec(&qp->scq->num_wq);
817 rxe_put(qp->scq);
818 }
819
820 if (qp->rcq) {
821 atomic_dec(&qp->rcq->num_wq);
822 rxe_put(qp->rcq);
823 }
824
825 if (qp->pd)
826 rxe_put(qp->pd);
827
828 if (qp->resp.mr)
829 rxe_put(qp->resp.mr);
830
831 free_rd_atomic_resources(qp);
832
833 if (qp->sk) {
834 if (qp_type(qp) == IB_QPT_RC)
835 sk_dst_reset(qp->sk->sk);
836
837 kernel_sock_shutdown(qp->sk, SHUT_RDWR);
838 sock_release(qp->sk);
839 }
840 }
841
842 /* called when the last reference to the qp is dropped */
rxe_qp_cleanup(struct rxe_pool_elem * elem)843 void rxe_qp_cleanup(struct rxe_pool_elem *elem)
844 {
845 struct rxe_qp *qp = container_of(elem, typeof(*qp), elem);
846
847 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
848 }
849