1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #include <linux/skbuff.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/vmalloc.h>
11 #include <rdma/uverbs_ioctl.h>
12
13 #include "rxe.h"
14 #include "rxe_loc.h"
15 #include "rxe_queue.h"
16 #include "rxe_task.h"
17
rxe_qp_chk_cap(struct rxe_dev * rxe,struct ib_qp_cap * cap,int has_srq)18 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
19 int has_srq)
20 {
21 if (cap->max_send_wr > rxe->attr.max_qp_wr) {
22 pr_warn("invalid send wr = %d > %d\n",
23 cap->max_send_wr, rxe->attr.max_qp_wr);
24 goto err1;
25 }
26
27 if (cap->max_send_sge > rxe->attr.max_send_sge) {
28 pr_warn("invalid send sge = %d > %d\n",
29 cap->max_send_sge, rxe->attr.max_send_sge);
30 goto err1;
31 }
32
33 if (!has_srq) {
34 if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
35 pr_warn("invalid recv wr = %d > %d\n",
36 cap->max_recv_wr, rxe->attr.max_qp_wr);
37 goto err1;
38 }
39
40 if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
41 pr_warn("invalid recv sge = %d > %d\n",
42 cap->max_recv_sge, rxe->attr.max_recv_sge);
43 goto err1;
44 }
45 }
46
47 if (cap->max_inline_data > rxe->max_inline_data) {
48 pr_warn("invalid max inline data = %d > %d\n",
49 cap->max_inline_data, rxe->max_inline_data);
50 goto err1;
51 }
52
53 return 0;
54
55 err1:
56 return -EINVAL;
57 }
58
rxe_qp_chk_init(struct rxe_dev * rxe,struct ib_qp_init_attr * init)59 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
60 {
61 struct ib_qp_cap *cap = &init->cap;
62 struct rxe_port *port;
63 int port_num = init->port_num;
64
65 if (!init->recv_cq || !init->send_cq) {
66 pr_warn("missing cq\n");
67 goto err1;
68 }
69
70 if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
71 goto err1;
72
73 if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
74 if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
75 pr_warn("invalid port = %d\n", port_num);
76 goto err1;
77 }
78
79 port = &rxe->port;
80
81 if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
82 pr_warn("SMI QP exists for port %d\n", port_num);
83 goto err1;
84 }
85
86 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
87 pr_warn("GSI QP exists for port %d\n", port_num);
88 goto err1;
89 }
90 }
91
92 return 0;
93
94 err1:
95 return -EINVAL;
96 }
97
alloc_rd_atomic_resources(struct rxe_qp * qp,unsigned int n)98 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
99 {
100 qp->resp.res_head = 0;
101 qp->resp.res_tail = 0;
102 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
103
104 if (!qp->resp.resources)
105 return -ENOMEM;
106
107 return 0;
108 }
109
free_rd_atomic_resources(struct rxe_qp * qp)110 static void free_rd_atomic_resources(struct rxe_qp *qp)
111 {
112 if (qp->resp.resources) {
113 int i;
114
115 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
116 struct resp_res *res = &qp->resp.resources[i];
117
118 free_rd_atomic_resource(qp, res);
119 }
120 kfree(qp->resp.resources);
121 qp->resp.resources = NULL;
122 }
123 }
124
free_rd_atomic_resource(struct rxe_qp * qp,struct resp_res * res)125 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
126 {
127 if (res->type == RXE_ATOMIC_MASK) {
128 kfree_skb(res->atomic.skb);
129 } else if (res->type == RXE_READ_MASK) {
130 if (res->read.mr)
131 rxe_drop_ref(res->read.mr);
132 }
133 res->type = 0;
134 }
135
cleanup_rd_atomic_resources(struct rxe_qp * qp)136 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
137 {
138 int i;
139 struct resp_res *res;
140
141 if (qp->resp.resources) {
142 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
143 res = &qp->resp.resources[i];
144 free_rd_atomic_resource(qp, res);
145 }
146 }
147 }
148
rxe_qp_init_misc(struct rxe_dev * rxe,struct rxe_qp * qp,struct ib_qp_init_attr * init)149 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
150 struct ib_qp_init_attr *init)
151 {
152 struct rxe_port *port;
153 u32 qpn;
154
155 qp->sq_sig_type = init->sq_sig_type;
156 qp->attr.path_mtu = 1;
157 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
158
159 qpn = qp->pelem.index;
160 port = &rxe->port;
161
162 switch (init->qp_type) {
163 case IB_QPT_SMI:
164 qp->ibqp.qp_num = 0;
165 port->qp_smi_index = qpn;
166 qp->attr.port_num = init->port_num;
167 break;
168
169 case IB_QPT_GSI:
170 qp->ibqp.qp_num = 1;
171 port->qp_gsi_index = qpn;
172 qp->attr.port_num = init->port_num;
173 break;
174
175 default:
176 qp->ibqp.qp_num = qpn;
177 break;
178 }
179
180 INIT_LIST_HEAD(&qp->grp_list);
181
182 skb_queue_head_init(&qp->send_pkts);
183
184 spin_lock_init(&qp->grp_lock);
185 spin_lock_init(&qp->state_lock);
186
187 spin_lock_init(&qp->req.task.state_lock);
188 spin_lock_init(&qp->resp.task.state_lock);
189 spin_lock_init(&qp->comp.task.state_lock);
190
191 spin_lock_init(&qp->sq.sq_lock);
192 spin_lock_init(&qp->rq.producer_lock);
193 spin_lock_init(&qp->rq.consumer_lock);
194
195 atomic_set(&qp->ssn, 0);
196 atomic_set(&qp->skb_out, 0);
197 }
198
rxe_qp_init_req(struct rxe_dev * rxe,struct rxe_qp * qp,struct ib_qp_init_attr * init,struct ib_udata * udata,struct rxe_create_qp_resp __user * uresp)199 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
200 struct ib_qp_init_attr *init, struct ib_udata *udata,
201 struct rxe_create_qp_resp __user *uresp)
202 {
203 int err;
204 int wqe_size;
205
206 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
207 if (err < 0)
208 return err;
209 qp->sk->sk->sk_user_data = qp;
210
211 /* pick a source UDP port number for this QP based on
212 * the source QPN. this spreads traffic for different QPs
213 * across different NIC RX queues (while using a single
214 * flow for a given QP to maintain packet order).
215 * the port number must be in the Dynamic Ports range
216 * (0xc000 - 0xffff).
217 */
218 qp->src_port = RXE_ROCE_V2_SPORT +
219 (hash_32_generic(qp_num(qp), 14) & 0x3fff);
220 qp->sq.max_wr = init->cap.max_send_wr;
221
222 /* These caps are limited by rxe_qp_chk_cap() done by the caller */
223 wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
224 init->cap.max_inline_data);
225 qp->sq.max_sge = init->cap.max_send_sge =
226 wqe_size / sizeof(struct ib_sge);
227 qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
228 wqe_size += sizeof(struct rxe_send_wqe);
229
230 qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size);
231 if (!qp->sq.queue)
232 return -ENOMEM;
233
234 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
235 qp->sq.queue->buf, qp->sq.queue->buf_size,
236 &qp->sq.queue->ip);
237
238 if (err) {
239 vfree(qp->sq.queue->buf);
240 kfree(qp->sq.queue);
241 qp->sq.queue = NULL;
242 return err;
243 }
244
245 qp->req.wqe_index = producer_index(qp->sq.queue);
246 qp->req.state = QP_STATE_RESET;
247 qp->req.opcode = -1;
248 qp->comp.opcode = -1;
249
250 skb_queue_head_init(&qp->req_pkts);
251
252 rxe_init_task(rxe, &qp->req.task, qp,
253 rxe_requester, "req");
254 rxe_init_task(rxe, &qp->comp.task, qp,
255 rxe_completer, "comp");
256
257 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
258 if (init->qp_type == IB_QPT_RC) {
259 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
260 timer_setup(&qp->retrans_timer, retransmit_timer, 0);
261 }
262 return 0;
263 }
264
rxe_qp_init_resp(struct rxe_dev * rxe,struct rxe_qp * qp,struct ib_qp_init_attr * init,struct ib_udata * udata,struct rxe_create_qp_resp __user * uresp)265 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
266 struct ib_qp_init_attr *init,
267 struct ib_udata *udata,
268 struct rxe_create_qp_resp __user *uresp)
269 {
270 int err;
271 int wqe_size;
272
273 if (!qp->srq) {
274 qp->rq.max_wr = init->cap.max_recv_wr;
275 qp->rq.max_sge = init->cap.max_recv_sge;
276
277 wqe_size = rcv_wqe_size(qp->rq.max_sge);
278
279 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
280 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
281
282 qp->rq.queue = rxe_queue_init(rxe,
283 &qp->rq.max_wr,
284 wqe_size);
285 if (!qp->rq.queue)
286 return -ENOMEM;
287
288 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
289 qp->rq.queue->buf, qp->rq.queue->buf_size,
290 &qp->rq.queue->ip);
291 if (err) {
292 vfree(qp->rq.queue->buf);
293 kfree(qp->rq.queue);
294 qp->rq.queue = NULL;
295 return err;
296 }
297 }
298
299 skb_queue_head_init(&qp->resp_pkts);
300
301 rxe_init_task(rxe, &qp->resp.task, qp,
302 rxe_responder, "resp");
303
304 qp->resp.opcode = OPCODE_NONE;
305 qp->resp.msn = 0;
306 qp->resp.state = QP_STATE_RESET;
307
308 return 0;
309 }
310
311 /* called by the create qp verb */
rxe_qp_from_init(struct rxe_dev * rxe,struct rxe_qp * qp,struct rxe_pd * pd,struct ib_qp_init_attr * init,struct rxe_create_qp_resp __user * uresp,struct ib_pd * ibpd,struct ib_udata * udata)312 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
313 struct ib_qp_init_attr *init,
314 struct rxe_create_qp_resp __user *uresp,
315 struct ib_pd *ibpd,
316 struct ib_udata *udata)
317 {
318 int err;
319 struct rxe_cq *rcq = to_rcq(init->recv_cq);
320 struct rxe_cq *scq = to_rcq(init->send_cq);
321 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
322
323 rxe_add_ref(pd);
324 rxe_add_ref(rcq);
325 rxe_add_ref(scq);
326 if (srq)
327 rxe_add_ref(srq);
328
329 qp->pd = pd;
330 qp->rcq = rcq;
331 qp->scq = scq;
332 qp->srq = srq;
333
334 rxe_qp_init_misc(rxe, qp, init);
335
336 err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
337 if (err)
338 goto err1;
339
340 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
341 if (err)
342 goto err2;
343
344 qp->attr.qp_state = IB_QPS_RESET;
345 qp->valid = 1;
346
347 return 0;
348
349 err2:
350 rxe_queue_cleanup(qp->sq.queue);
351 err1:
352 qp->pd = NULL;
353 qp->rcq = NULL;
354 qp->scq = NULL;
355 qp->srq = NULL;
356
357 if (srq)
358 rxe_drop_ref(srq);
359 rxe_drop_ref(scq);
360 rxe_drop_ref(rcq);
361 rxe_drop_ref(pd);
362
363 return err;
364 }
365
366 /* called by the query qp verb */
rxe_qp_to_init(struct rxe_qp * qp,struct ib_qp_init_attr * init)367 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
368 {
369 init->event_handler = qp->ibqp.event_handler;
370 init->qp_context = qp->ibqp.qp_context;
371 init->send_cq = qp->ibqp.send_cq;
372 init->recv_cq = qp->ibqp.recv_cq;
373 init->srq = qp->ibqp.srq;
374
375 init->cap.max_send_wr = qp->sq.max_wr;
376 init->cap.max_send_sge = qp->sq.max_sge;
377 init->cap.max_inline_data = qp->sq.max_inline;
378
379 if (!qp->srq) {
380 init->cap.max_recv_wr = qp->rq.max_wr;
381 init->cap.max_recv_sge = qp->rq.max_sge;
382 }
383
384 init->sq_sig_type = qp->sq_sig_type;
385
386 init->qp_type = qp->ibqp.qp_type;
387 init->port_num = 1;
388
389 return 0;
390 }
391
392 /* called by the modify qp verb, this routine checks all the parameters before
393 * making any changes
394 */
rxe_qp_chk_attr(struct rxe_dev * rxe,struct rxe_qp * qp,struct ib_qp_attr * attr,int mask)395 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
396 struct ib_qp_attr *attr, int mask)
397 {
398 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
399 attr->cur_qp_state : qp->attr.qp_state;
400 enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
401 attr->qp_state : cur_state;
402
403 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
404 pr_warn("invalid mask or state for qp\n");
405 goto err1;
406 }
407
408 if (mask & IB_QP_STATE) {
409 if (cur_state == IB_QPS_SQD) {
410 if (qp->req.state == QP_STATE_DRAIN &&
411 new_state != IB_QPS_ERR)
412 goto err1;
413 }
414 }
415
416 if (mask & IB_QP_PORT) {
417 if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
418 pr_warn("invalid port %d\n", attr->port_num);
419 goto err1;
420 }
421 }
422
423 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
424 goto err1;
425
426 if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
427 goto err1;
428
429 if (mask & IB_QP_ALT_PATH) {
430 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
431 goto err1;
432 if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) {
433 pr_warn("invalid alt port %d\n", attr->alt_port_num);
434 goto err1;
435 }
436 if (attr->alt_timeout > 31) {
437 pr_warn("invalid QP alt timeout %d > 31\n",
438 attr->alt_timeout);
439 goto err1;
440 }
441 }
442
443 if (mask & IB_QP_PATH_MTU) {
444 struct rxe_port *port = &rxe->port;
445
446 enum ib_mtu max_mtu = port->attr.max_mtu;
447 enum ib_mtu mtu = attr->path_mtu;
448
449 if (mtu > max_mtu) {
450 pr_debug("invalid mtu (%d) > (%d)\n",
451 ib_mtu_enum_to_int(mtu),
452 ib_mtu_enum_to_int(max_mtu));
453 goto err1;
454 }
455 }
456
457 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
458 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
459 pr_warn("invalid max_rd_atomic %d > %d\n",
460 attr->max_rd_atomic,
461 rxe->attr.max_qp_rd_atom);
462 goto err1;
463 }
464 }
465
466 if (mask & IB_QP_TIMEOUT) {
467 if (attr->timeout > 31) {
468 pr_warn("invalid QP timeout %d > 31\n",
469 attr->timeout);
470 goto err1;
471 }
472 }
473
474 return 0;
475
476 err1:
477 return -EINVAL;
478 }
479
480 /* move the qp to the reset state */
rxe_qp_reset(struct rxe_qp * qp)481 static void rxe_qp_reset(struct rxe_qp *qp)
482 {
483 /* stop tasks from running */
484 rxe_disable_task(&qp->resp.task);
485
486 /* stop request/comp */
487 if (qp->sq.queue) {
488 if (qp_type(qp) == IB_QPT_RC)
489 rxe_disable_task(&qp->comp.task);
490 rxe_disable_task(&qp->req.task);
491 }
492
493 /* move qp to the reset state */
494 qp->req.state = QP_STATE_RESET;
495 qp->resp.state = QP_STATE_RESET;
496
497 /* let state machines reset themselves drain work and packet queues
498 * etc.
499 */
500 __rxe_do_task(&qp->resp.task);
501
502 if (qp->sq.queue) {
503 __rxe_do_task(&qp->comp.task);
504 __rxe_do_task(&qp->req.task);
505 rxe_queue_reset(qp->sq.queue);
506 }
507
508 /* cleanup attributes */
509 atomic_set(&qp->ssn, 0);
510 qp->req.opcode = -1;
511 qp->req.need_retry = 0;
512 qp->req.noack_pkts = 0;
513 qp->resp.msn = 0;
514 qp->resp.opcode = -1;
515 qp->resp.drop_msg = 0;
516 qp->resp.goto_error = 0;
517 qp->resp.sent_psn_nak = 0;
518
519 if (qp->resp.mr) {
520 rxe_drop_ref(qp->resp.mr);
521 qp->resp.mr = NULL;
522 }
523
524 cleanup_rd_atomic_resources(qp);
525
526 /* reenable tasks */
527 rxe_enable_task(&qp->resp.task);
528
529 if (qp->sq.queue) {
530 if (qp_type(qp) == IB_QPT_RC)
531 rxe_enable_task(&qp->comp.task);
532
533 rxe_enable_task(&qp->req.task);
534 }
535 }
536
537 /* drain the send queue */
rxe_qp_drain(struct rxe_qp * qp)538 static void rxe_qp_drain(struct rxe_qp *qp)
539 {
540 if (qp->sq.queue) {
541 if (qp->req.state != QP_STATE_DRAINED) {
542 qp->req.state = QP_STATE_DRAIN;
543 if (qp_type(qp) == IB_QPT_RC)
544 rxe_run_task(&qp->comp.task, 1);
545 else
546 __rxe_do_task(&qp->comp.task);
547 rxe_run_task(&qp->req.task, 1);
548 }
549 }
550 }
551
552 /* move the qp to the error state */
rxe_qp_error(struct rxe_qp * qp)553 void rxe_qp_error(struct rxe_qp *qp)
554 {
555 qp->req.state = QP_STATE_ERROR;
556 qp->resp.state = QP_STATE_ERROR;
557 qp->attr.qp_state = IB_QPS_ERR;
558
559 /* drain work and packet queues */
560 rxe_run_task(&qp->resp.task, 1);
561
562 if (qp_type(qp) == IB_QPT_RC)
563 rxe_run_task(&qp->comp.task, 1);
564 else
565 __rxe_do_task(&qp->comp.task);
566 rxe_run_task(&qp->req.task, 1);
567 }
568
569 /* called by the modify qp verb */
rxe_qp_from_attr(struct rxe_qp * qp,struct ib_qp_attr * attr,int mask,struct ib_udata * udata)570 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
571 struct ib_udata *udata)
572 {
573 int err;
574
575 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
576 int max_rd_atomic = attr->max_rd_atomic ?
577 roundup_pow_of_two(attr->max_rd_atomic) : 0;
578
579 qp->attr.max_rd_atomic = max_rd_atomic;
580 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
581 }
582
583 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
584 int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
585 roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
586
587 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
588
589 free_rd_atomic_resources(qp);
590
591 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
592 if (err)
593 return err;
594 }
595
596 if (mask & IB_QP_CUR_STATE)
597 qp->attr.cur_qp_state = attr->qp_state;
598
599 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
600 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
601
602 if (mask & IB_QP_ACCESS_FLAGS)
603 qp->attr.qp_access_flags = attr->qp_access_flags;
604
605 if (mask & IB_QP_PKEY_INDEX)
606 qp->attr.pkey_index = attr->pkey_index;
607
608 if (mask & IB_QP_PORT)
609 qp->attr.port_num = attr->port_num;
610
611 if (mask & IB_QP_QKEY)
612 qp->attr.qkey = attr->qkey;
613
614 if (mask & IB_QP_AV)
615 rxe_init_av(&attr->ah_attr, &qp->pri_av);
616
617 if (mask & IB_QP_ALT_PATH) {
618 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
619 qp->attr.alt_port_num = attr->alt_port_num;
620 qp->attr.alt_pkey_index = attr->alt_pkey_index;
621 qp->attr.alt_timeout = attr->alt_timeout;
622 }
623
624 if (mask & IB_QP_PATH_MTU) {
625 qp->attr.path_mtu = attr->path_mtu;
626 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
627 }
628
629 if (mask & IB_QP_TIMEOUT) {
630 qp->attr.timeout = attr->timeout;
631 if (attr->timeout == 0) {
632 qp->qp_timeout_jiffies = 0;
633 } else {
634 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
635 int j = nsecs_to_jiffies(4096ULL << attr->timeout);
636
637 qp->qp_timeout_jiffies = j ? j : 1;
638 }
639 }
640
641 if (mask & IB_QP_RETRY_CNT) {
642 qp->attr.retry_cnt = attr->retry_cnt;
643 qp->comp.retry_cnt = attr->retry_cnt;
644 pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
645 attr->retry_cnt);
646 }
647
648 if (mask & IB_QP_RNR_RETRY) {
649 qp->attr.rnr_retry = attr->rnr_retry;
650 qp->comp.rnr_retry = attr->rnr_retry;
651 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
652 attr->rnr_retry);
653 }
654
655 if (mask & IB_QP_RQ_PSN) {
656 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
657 qp->resp.psn = qp->attr.rq_psn;
658 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
659 qp->resp.psn);
660 }
661
662 if (mask & IB_QP_MIN_RNR_TIMER) {
663 qp->attr.min_rnr_timer = attr->min_rnr_timer;
664 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
665 attr->min_rnr_timer);
666 }
667
668 if (mask & IB_QP_SQ_PSN) {
669 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
670 qp->req.psn = qp->attr.sq_psn;
671 qp->comp.psn = qp->attr.sq_psn;
672 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
673 }
674
675 if (mask & IB_QP_PATH_MIG_STATE)
676 qp->attr.path_mig_state = attr->path_mig_state;
677
678 if (mask & IB_QP_DEST_QPN)
679 qp->attr.dest_qp_num = attr->dest_qp_num;
680
681 if (mask & IB_QP_STATE) {
682 qp->attr.qp_state = attr->qp_state;
683
684 switch (attr->qp_state) {
685 case IB_QPS_RESET:
686 pr_debug("qp#%d state -> RESET\n", qp_num(qp));
687 rxe_qp_reset(qp);
688 break;
689
690 case IB_QPS_INIT:
691 pr_debug("qp#%d state -> INIT\n", qp_num(qp));
692 qp->req.state = QP_STATE_INIT;
693 qp->resp.state = QP_STATE_INIT;
694 break;
695
696 case IB_QPS_RTR:
697 pr_debug("qp#%d state -> RTR\n", qp_num(qp));
698 qp->resp.state = QP_STATE_READY;
699 break;
700
701 case IB_QPS_RTS:
702 pr_debug("qp#%d state -> RTS\n", qp_num(qp));
703 qp->req.state = QP_STATE_READY;
704 break;
705
706 case IB_QPS_SQD:
707 pr_debug("qp#%d state -> SQD\n", qp_num(qp));
708 rxe_qp_drain(qp);
709 break;
710
711 case IB_QPS_SQE:
712 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
713 /* Not possible from modify_qp. */
714 break;
715
716 case IB_QPS_ERR:
717 pr_debug("qp#%d state -> ERR\n", qp_num(qp));
718 rxe_qp_error(qp);
719 break;
720 }
721 }
722
723 return 0;
724 }
725
726 /* called by the query qp verb */
rxe_qp_to_attr(struct rxe_qp * qp,struct ib_qp_attr * attr,int mask)727 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
728 {
729 *attr = qp->attr;
730
731 attr->rq_psn = qp->resp.psn;
732 attr->sq_psn = qp->req.psn;
733
734 attr->cap.max_send_wr = qp->sq.max_wr;
735 attr->cap.max_send_sge = qp->sq.max_sge;
736 attr->cap.max_inline_data = qp->sq.max_inline;
737
738 if (!qp->srq) {
739 attr->cap.max_recv_wr = qp->rq.max_wr;
740 attr->cap.max_recv_sge = qp->rq.max_sge;
741 }
742
743 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
744 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
745
746 if (qp->req.state == QP_STATE_DRAIN) {
747 attr->sq_draining = 1;
748 /* applications that get this state
749 * typically spin on it. yield the
750 * processor
751 */
752 cond_resched();
753 } else {
754 attr->sq_draining = 0;
755 }
756
757 pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
758
759 return 0;
760 }
761
762 /* called by the destroy qp verb */
rxe_qp_destroy(struct rxe_qp * qp)763 void rxe_qp_destroy(struct rxe_qp *qp)
764 {
765 qp->valid = 0;
766 qp->qp_timeout_jiffies = 0;
767 rxe_cleanup_task(&qp->resp.task);
768
769 if (qp_type(qp) == IB_QPT_RC) {
770 del_timer_sync(&qp->retrans_timer);
771 del_timer_sync(&qp->rnr_nak_timer);
772 }
773
774 rxe_cleanup_task(&qp->req.task);
775 rxe_cleanup_task(&qp->comp.task);
776
777 /* flush out any receive wr's or pending requests */
778 if (qp->req.task.func)
779 __rxe_do_task(&qp->req.task);
780
781 if (qp->sq.queue) {
782 __rxe_do_task(&qp->comp.task);
783 __rxe_do_task(&qp->req.task);
784 }
785 }
786
787 /* called when the last reference to the qp is dropped */
rxe_qp_do_cleanup(struct work_struct * work)788 static void rxe_qp_do_cleanup(struct work_struct *work)
789 {
790 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
791
792 rxe_drop_all_mcast_groups(qp);
793
794 if (qp->sq.queue)
795 rxe_queue_cleanup(qp->sq.queue);
796
797 if (qp->srq)
798 rxe_drop_ref(qp->srq);
799
800 if (qp->rq.queue)
801 rxe_queue_cleanup(qp->rq.queue);
802
803 if (qp->scq)
804 rxe_drop_ref(qp->scq);
805 if (qp->rcq)
806 rxe_drop_ref(qp->rcq);
807 if (qp->pd)
808 rxe_drop_ref(qp->pd);
809
810 if (qp->resp.mr) {
811 rxe_drop_ref(qp->resp.mr);
812 qp->resp.mr = NULL;
813 }
814
815 free_rd_atomic_resources(qp);
816
817 if (qp->sk) {
818 if (qp_type(qp) == IB_QPT_RC)
819 sk_dst_reset(qp->sk->sk);
820
821 kernel_sock_shutdown(qp->sk, SHUT_RDWR);
822 sock_release(qp->sk);
823 }
824 }
825
826 /* called when the last reference to the qp is dropped */
rxe_qp_cleanup(struct rxe_pool_entry * arg)827 void rxe_qp_cleanup(struct rxe_pool_entry *arg)
828 {
829 struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
830
831 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
832 }
833