1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #include <linux/skbuff.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/vmalloc.h>
11 #include <rdma/uverbs_ioctl.h>
12
13 #include "rxe.h"
14 #include "rxe_loc.h"
15 #include "rxe_queue.h"
16 #include "rxe_task.h"
17
rxe_qp_chk_cap(struct rxe_dev * rxe,struct ib_qp_cap * cap,int has_srq)18 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
19 int has_srq)
20 {
21 if (cap->max_send_wr > rxe->attr.max_qp_wr) {
22 pr_warn("invalid send wr = %d > %d\n",
23 cap->max_send_wr, rxe->attr.max_qp_wr);
24 goto err1;
25 }
26
27 if (cap->max_send_sge > rxe->attr.max_send_sge) {
28 pr_warn("invalid send sge = %d > %d\n",
29 cap->max_send_sge, rxe->attr.max_send_sge);
30 goto err1;
31 }
32
33 if (!has_srq) {
34 if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
35 pr_warn("invalid recv wr = %d > %d\n",
36 cap->max_recv_wr, rxe->attr.max_qp_wr);
37 goto err1;
38 }
39
40 if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
41 pr_warn("invalid recv sge = %d > %d\n",
42 cap->max_recv_sge, rxe->attr.max_recv_sge);
43 goto err1;
44 }
45 }
46
47 if (cap->max_inline_data > rxe->max_inline_data) {
48 pr_warn("invalid max inline data = %d > %d\n",
49 cap->max_inline_data, rxe->max_inline_data);
50 goto err1;
51 }
52
53 return 0;
54
55 err1:
56 return -EINVAL;
57 }
58
rxe_qp_chk_init(struct rxe_dev * rxe,struct ib_qp_init_attr * init)59 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
60 {
61 struct ib_qp_cap *cap = &init->cap;
62 struct rxe_port *port;
63 int port_num = init->port_num;
64
65 switch (init->qp_type) {
66 case IB_QPT_SMI:
67 case IB_QPT_GSI:
68 case IB_QPT_RC:
69 case IB_QPT_UC:
70 case IB_QPT_UD:
71 break;
72 default:
73 return -EOPNOTSUPP;
74 }
75
76 if (!init->recv_cq || !init->send_cq) {
77 pr_warn("missing cq\n");
78 goto err1;
79 }
80
81 if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
82 goto err1;
83
84 if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
85 if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
86 pr_warn("invalid port = %d\n", port_num);
87 goto err1;
88 }
89
90 port = &rxe->port;
91
92 if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
93 pr_warn("SMI QP exists for port %d\n", port_num);
94 goto err1;
95 }
96
97 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
98 pr_warn("GSI QP exists for port %d\n", port_num);
99 goto err1;
100 }
101 }
102
103 return 0;
104
105 err1:
106 return -EINVAL;
107 }
108
alloc_rd_atomic_resources(struct rxe_qp * qp,unsigned int n)109 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
110 {
111 qp->resp.res_head = 0;
112 qp->resp.res_tail = 0;
113 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
114
115 if (!qp->resp.resources)
116 return -ENOMEM;
117
118 return 0;
119 }
120
free_rd_atomic_resources(struct rxe_qp * qp)121 static void free_rd_atomic_resources(struct rxe_qp *qp)
122 {
123 if (qp->resp.resources) {
124 int i;
125
126 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
127 struct resp_res *res = &qp->resp.resources[i];
128
129 free_rd_atomic_resource(qp, res);
130 }
131 kfree(qp->resp.resources);
132 qp->resp.resources = NULL;
133 }
134 }
135
free_rd_atomic_resource(struct rxe_qp * qp,struct resp_res * res)136 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
137 {
138 if (res->type == RXE_ATOMIC_MASK) {
139 kfree_skb(res->atomic.skb);
140 } else if (res->type == RXE_READ_MASK) {
141 if (res->read.mr)
142 rxe_drop_ref(res->read.mr);
143 }
144 res->type = 0;
145 }
146
cleanup_rd_atomic_resources(struct rxe_qp * qp)147 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
148 {
149 int i;
150 struct resp_res *res;
151
152 if (qp->resp.resources) {
153 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
154 res = &qp->resp.resources[i];
155 free_rd_atomic_resource(qp, res);
156 }
157 }
158 }
159
rxe_qp_init_misc(struct rxe_dev * rxe,struct rxe_qp * qp,struct ib_qp_init_attr * init)160 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
161 struct ib_qp_init_attr *init)
162 {
163 struct rxe_port *port;
164 u32 qpn;
165
166 qp->sq_sig_type = init->sq_sig_type;
167 qp->attr.path_mtu = 1;
168 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
169
170 qpn = qp->pelem.index;
171 port = &rxe->port;
172
173 switch (init->qp_type) {
174 case IB_QPT_SMI:
175 qp->ibqp.qp_num = 0;
176 port->qp_smi_index = qpn;
177 qp->attr.port_num = init->port_num;
178 break;
179
180 case IB_QPT_GSI:
181 qp->ibqp.qp_num = 1;
182 port->qp_gsi_index = qpn;
183 qp->attr.port_num = init->port_num;
184 break;
185
186 default:
187 qp->ibqp.qp_num = qpn;
188 break;
189 }
190
191 INIT_LIST_HEAD(&qp->grp_list);
192
193 skb_queue_head_init(&qp->send_pkts);
194
195 spin_lock_init(&qp->grp_lock);
196 spin_lock_init(&qp->state_lock);
197
198 spin_lock_init(&qp->req.task.state_lock);
199 spin_lock_init(&qp->resp.task.state_lock);
200 spin_lock_init(&qp->comp.task.state_lock);
201
202 spin_lock_init(&qp->sq.sq_lock);
203 spin_lock_init(&qp->rq.producer_lock);
204 spin_lock_init(&qp->rq.consumer_lock);
205
206 skb_queue_head_init(&qp->req_pkts);
207 skb_queue_head_init(&qp->resp_pkts);
208
209 atomic_set(&qp->ssn, 0);
210 atomic_set(&qp->skb_out, 0);
211 }
212
rxe_qp_init_req(struct rxe_dev * rxe,struct rxe_qp * qp,struct ib_qp_init_attr * init,struct ib_udata * udata,struct rxe_create_qp_resp __user * uresp)213 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
214 struct ib_qp_init_attr *init, struct ib_udata *udata,
215 struct rxe_create_qp_resp __user *uresp)
216 {
217 int err;
218 int wqe_size;
219 enum queue_type type;
220
221 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
222 if (err < 0)
223 return err;
224 qp->sk->sk->sk_user_data = qp;
225
226 /* pick a source UDP port number for this QP based on
227 * the source QPN. this spreads traffic for different QPs
228 * across different NIC RX queues (while using a single
229 * flow for a given QP to maintain packet order).
230 * the port number must be in the Dynamic Ports range
231 * (0xc000 - 0xffff).
232 */
233 qp->src_port = RXE_ROCE_V2_SPORT +
234 (hash_32_generic(qp_num(qp), 14) & 0x3fff);
235 qp->sq.max_wr = init->cap.max_send_wr;
236
237 /* These caps are limited by rxe_qp_chk_cap() done by the caller */
238 wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
239 init->cap.max_inline_data);
240 qp->sq.max_sge = init->cap.max_send_sge =
241 wqe_size / sizeof(struct ib_sge);
242 qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
243 wqe_size += sizeof(struct rxe_send_wqe);
244
245 type = QUEUE_TYPE_FROM_CLIENT;
246 qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
247 wqe_size, type);
248 if (!qp->sq.queue)
249 return -ENOMEM;
250
251 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
252 qp->sq.queue->buf, qp->sq.queue->buf_size,
253 &qp->sq.queue->ip);
254
255 if (err) {
256 vfree(qp->sq.queue->buf);
257 kfree(qp->sq.queue);
258 qp->sq.queue = NULL;
259 return err;
260 }
261
262 qp->req.wqe_index = queue_get_producer(qp->sq.queue,
263 QUEUE_TYPE_FROM_CLIENT);
264
265 qp->req.state = QP_STATE_RESET;
266 qp->req.opcode = -1;
267 qp->comp.opcode = -1;
268
269 rxe_init_task(&qp->req.task, qp, rxe_requester);
270 rxe_init_task(&qp->comp.task, qp, rxe_completer);
271
272 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
273 if (init->qp_type == IB_QPT_RC) {
274 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
275 timer_setup(&qp->retrans_timer, retransmit_timer, 0);
276 }
277 return 0;
278 }
279
rxe_qp_init_resp(struct rxe_dev * rxe,struct rxe_qp * qp,struct ib_qp_init_attr * init,struct ib_udata * udata,struct rxe_create_qp_resp __user * uresp)280 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
281 struct ib_qp_init_attr *init,
282 struct ib_udata *udata,
283 struct rxe_create_qp_resp __user *uresp)
284 {
285 int err;
286 int wqe_size;
287 enum queue_type type;
288
289 if (!qp->srq) {
290 qp->rq.max_wr = init->cap.max_recv_wr;
291 qp->rq.max_sge = init->cap.max_recv_sge;
292
293 wqe_size = rcv_wqe_size(qp->rq.max_sge);
294
295 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
296 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
297
298 type = QUEUE_TYPE_FROM_CLIENT;
299 qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
300 wqe_size, type);
301 if (!qp->rq.queue)
302 return -ENOMEM;
303
304 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
305 qp->rq.queue->buf, qp->rq.queue->buf_size,
306 &qp->rq.queue->ip);
307 if (err) {
308 vfree(qp->rq.queue->buf);
309 kfree(qp->rq.queue);
310 qp->rq.queue = NULL;
311 return err;
312 }
313 }
314
315 rxe_init_task(&qp->resp.task, qp, rxe_responder);
316
317 qp->resp.opcode = OPCODE_NONE;
318 qp->resp.msn = 0;
319 qp->resp.state = QP_STATE_RESET;
320
321 return 0;
322 }
323
324 /* called by the create qp verb */
rxe_qp_from_init(struct rxe_dev * rxe,struct rxe_qp * qp,struct rxe_pd * pd,struct ib_qp_init_attr * init,struct rxe_create_qp_resp __user * uresp,struct ib_pd * ibpd,struct ib_udata * udata)325 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
326 struct ib_qp_init_attr *init,
327 struct rxe_create_qp_resp __user *uresp,
328 struct ib_pd *ibpd,
329 struct ib_udata *udata)
330 {
331 int err;
332 struct rxe_cq *rcq = to_rcq(init->recv_cq);
333 struct rxe_cq *scq = to_rcq(init->send_cq);
334 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
335
336 rxe_add_ref(pd);
337 rxe_add_ref(rcq);
338 rxe_add_ref(scq);
339 if (srq)
340 rxe_add_ref(srq);
341
342 qp->pd = pd;
343 qp->rcq = rcq;
344 qp->scq = scq;
345 qp->srq = srq;
346
347 rxe_qp_init_misc(rxe, qp, init);
348
349 err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
350 if (err)
351 goto err1;
352
353 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
354 if (err)
355 goto err2;
356
357 qp->attr.qp_state = IB_QPS_RESET;
358 qp->valid = 1;
359
360 return 0;
361
362 err2:
363 rxe_queue_cleanup(qp->sq.queue);
364 qp->sq.queue = NULL;
365 err1:
366 qp->pd = NULL;
367 qp->rcq = NULL;
368 qp->scq = NULL;
369 qp->srq = NULL;
370
371 if (srq)
372 rxe_drop_ref(srq);
373 rxe_drop_ref(scq);
374 rxe_drop_ref(rcq);
375 rxe_drop_ref(pd);
376
377 return err;
378 }
379
380 /* called by the query qp verb */
rxe_qp_to_init(struct rxe_qp * qp,struct ib_qp_init_attr * init)381 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
382 {
383 init->event_handler = qp->ibqp.event_handler;
384 init->qp_context = qp->ibqp.qp_context;
385 init->send_cq = qp->ibqp.send_cq;
386 init->recv_cq = qp->ibqp.recv_cq;
387 init->srq = qp->ibqp.srq;
388
389 init->cap.max_send_wr = qp->sq.max_wr;
390 init->cap.max_send_sge = qp->sq.max_sge;
391 init->cap.max_inline_data = qp->sq.max_inline;
392
393 if (!qp->srq) {
394 init->cap.max_recv_wr = qp->rq.max_wr;
395 init->cap.max_recv_sge = qp->rq.max_sge;
396 }
397
398 init->sq_sig_type = qp->sq_sig_type;
399
400 init->qp_type = qp->ibqp.qp_type;
401 init->port_num = 1;
402
403 return 0;
404 }
405
406 /* called by the modify qp verb, this routine checks all the parameters before
407 * making any changes
408 */
rxe_qp_chk_attr(struct rxe_dev * rxe,struct rxe_qp * qp,struct ib_qp_attr * attr,int mask)409 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
410 struct ib_qp_attr *attr, int mask)
411 {
412 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
413 attr->cur_qp_state : qp->attr.qp_state;
414 enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
415 attr->qp_state : cur_state;
416
417 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
418 pr_warn("invalid mask or state for qp\n");
419 goto err1;
420 }
421
422 if (mask & IB_QP_STATE) {
423 if (cur_state == IB_QPS_SQD) {
424 if (qp->req.state == QP_STATE_DRAIN &&
425 new_state != IB_QPS_ERR)
426 goto err1;
427 }
428 }
429
430 if (mask & IB_QP_PORT) {
431 if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
432 pr_warn("invalid port %d\n", attr->port_num);
433 goto err1;
434 }
435 }
436
437 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
438 goto err1;
439
440 if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
441 goto err1;
442
443 if (mask & IB_QP_ALT_PATH) {
444 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
445 goto err1;
446 if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) {
447 pr_warn("invalid alt port %d\n", attr->alt_port_num);
448 goto err1;
449 }
450 if (attr->alt_timeout > 31) {
451 pr_warn("invalid QP alt timeout %d > 31\n",
452 attr->alt_timeout);
453 goto err1;
454 }
455 }
456
457 if (mask & IB_QP_PATH_MTU) {
458 struct rxe_port *port = &rxe->port;
459
460 enum ib_mtu max_mtu = port->attr.max_mtu;
461 enum ib_mtu mtu = attr->path_mtu;
462
463 if (mtu > max_mtu) {
464 pr_debug("invalid mtu (%d) > (%d)\n",
465 ib_mtu_enum_to_int(mtu),
466 ib_mtu_enum_to_int(max_mtu));
467 goto err1;
468 }
469 }
470
471 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
472 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
473 pr_warn("invalid max_rd_atomic %d > %d\n",
474 attr->max_rd_atomic,
475 rxe->attr.max_qp_rd_atom);
476 goto err1;
477 }
478 }
479
480 if (mask & IB_QP_TIMEOUT) {
481 if (attr->timeout > 31) {
482 pr_warn("invalid QP timeout %d > 31\n",
483 attr->timeout);
484 goto err1;
485 }
486 }
487
488 return 0;
489
490 err1:
491 return -EINVAL;
492 }
493
494 /* move the qp to the reset state */
rxe_qp_reset(struct rxe_qp * qp)495 static void rxe_qp_reset(struct rxe_qp *qp)
496 {
497 /* stop tasks from running */
498 rxe_disable_task(&qp->resp.task);
499
500 /* stop request/comp */
501 if (qp->sq.queue) {
502 if (qp_type(qp) == IB_QPT_RC)
503 rxe_disable_task(&qp->comp.task);
504 rxe_disable_task(&qp->req.task);
505 }
506
507 /* move qp to the reset state */
508 qp->req.state = QP_STATE_RESET;
509 qp->resp.state = QP_STATE_RESET;
510
511 /* let state machines reset themselves drain work and packet queues
512 * etc.
513 */
514 __rxe_do_task(&qp->resp.task);
515
516 if (qp->sq.queue) {
517 __rxe_do_task(&qp->comp.task);
518 __rxe_do_task(&qp->req.task);
519 rxe_queue_reset(qp->sq.queue);
520 }
521
522 /* cleanup attributes */
523 atomic_set(&qp->ssn, 0);
524 qp->req.opcode = -1;
525 qp->req.need_retry = 0;
526 qp->req.noack_pkts = 0;
527 qp->resp.msn = 0;
528 qp->resp.opcode = -1;
529 qp->resp.drop_msg = 0;
530 qp->resp.goto_error = 0;
531 qp->resp.sent_psn_nak = 0;
532
533 if (qp->resp.mr) {
534 rxe_drop_ref(qp->resp.mr);
535 qp->resp.mr = NULL;
536 }
537
538 cleanup_rd_atomic_resources(qp);
539
540 /* reenable tasks */
541 rxe_enable_task(&qp->resp.task);
542
543 if (qp->sq.queue) {
544 if (qp_type(qp) == IB_QPT_RC)
545 rxe_enable_task(&qp->comp.task);
546
547 rxe_enable_task(&qp->req.task);
548 }
549 }
550
551 /* drain the send queue */
rxe_qp_drain(struct rxe_qp * qp)552 static void rxe_qp_drain(struct rxe_qp *qp)
553 {
554 if (qp->sq.queue) {
555 if (qp->req.state != QP_STATE_DRAINED) {
556 qp->req.state = QP_STATE_DRAIN;
557 if (qp_type(qp) == IB_QPT_RC)
558 rxe_run_task(&qp->comp.task, 1);
559 else
560 __rxe_do_task(&qp->comp.task);
561 rxe_run_task(&qp->req.task, 1);
562 }
563 }
564 }
565
566 /* move the qp to the error state */
rxe_qp_error(struct rxe_qp * qp)567 void rxe_qp_error(struct rxe_qp *qp)
568 {
569 qp->req.state = QP_STATE_ERROR;
570 qp->resp.state = QP_STATE_ERROR;
571 qp->attr.qp_state = IB_QPS_ERR;
572
573 /* drain work and packet queues */
574 rxe_run_task(&qp->resp.task, 1);
575
576 if (qp_type(qp) == IB_QPT_RC)
577 rxe_run_task(&qp->comp.task, 1);
578 else
579 __rxe_do_task(&qp->comp.task);
580 rxe_run_task(&qp->req.task, 1);
581 }
582
583 /* called by the modify qp verb */
rxe_qp_from_attr(struct rxe_qp * qp,struct ib_qp_attr * attr,int mask,struct ib_udata * udata)584 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
585 struct ib_udata *udata)
586 {
587 int err;
588
589 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
590 int max_rd_atomic = attr->max_rd_atomic ?
591 roundup_pow_of_two(attr->max_rd_atomic) : 0;
592
593 qp->attr.max_rd_atomic = max_rd_atomic;
594 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
595 }
596
597 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
598 int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
599 roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
600
601 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
602
603 free_rd_atomic_resources(qp);
604
605 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
606 if (err)
607 return err;
608 }
609
610 if (mask & IB_QP_CUR_STATE)
611 qp->attr.cur_qp_state = attr->qp_state;
612
613 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
614 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
615
616 if (mask & IB_QP_ACCESS_FLAGS)
617 qp->attr.qp_access_flags = attr->qp_access_flags;
618
619 if (mask & IB_QP_PKEY_INDEX)
620 qp->attr.pkey_index = attr->pkey_index;
621
622 if (mask & IB_QP_PORT)
623 qp->attr.port_num = attr->port_num;
624
625 if (mask & IB_QP_QKEY)
626 qp->attr.qkey = attr->qkey;
627
628 if (mask & IB_QP_AV)
629 rxe_init_av(&attr->ah_attr, &qp->pri_av);
630
631 if (mask & IB_QP_ALT_PATH) {
632 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
633 qp->attr.alt_port_num = attr->alt_port_num;
634 qp->attr.alt_pkey_index = attr->alt_pkey_index;
635 qp->attr.alt_timeout = attr->alt_timeout;
636 }
637
638 if (mask & IB_QP_PATH_MTU) {
639 qp->attr.path_mtu = attr->path_mtu;
640 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
641 }
642
643 if (mask & IB_QP_TIMEOUT) {
644 qp->attr.timeout = attr->timeout;
645 if (attr->timeout == 0) {
646 qp->qp_timeout_jiffies = 0;
647 } else {
648 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
649 int j = nsecs_to_jiffies(4096ULL << attr->timeout);
650
651 qp->qp_timeout_jiffies = j ? j : 1;
652 }
653 }
654
655 if (mask & IB_QP_RETRY_CNT) {
656 qp->attr.retry_cnt = attr->retry_cnt;
657 qp->comp.retry_cnt = attr->retry_cnt;
658 pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
659 attr->retry_cnt);
660 }
661
662 if (mask & IB_QP_RNR_RETRY) {
663 qp->attr.rnr_retry = attr->rnr_retry;
664 qp->comp.rnr_retry = attr->rnr_retry;
665 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
666 attr->rnr_retry);
667 }
668
669 if (mask & IB_QP_RQ_PSN) {
670 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
671 qp->resp.psn = qp->attr.rq_psn;
672 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
673 qp->resp.psn);
674 }
675
676 if (mask & IB_QP_MIN_RNR_TIMER) {
677 qp->attr.min_rnr_timer = attr->min_rnr_timer;
678 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
679 attr->min_rnr_timer);
680 }
681
682 if (mask & IB_QP_SQ_PSN) {
683 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
684 qp->req.psn = qp->attr.sq_psn;
685 qp->comp.psn = qp->attr.sq_psn;
686 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
687 }
688
689 if (mask & IB_QP_PATH_MIG_STATE)
690 qp->attr.path_mig_state = attr->path_mig_state;
691
692 if (mask & IB_QP_DEST_QPN)
693 qp->attr.dest_qp_num = attr->dest_qp_num;
694
695 if (mask & IB_QP_STATE) {
696 qp->attr.qp_state = attr->qp_state;
697
698 switch (attr->qp_state) {
699 case IB_QPS_RESET:
700 pr_debug("qp#%d state -> RESET\n", qp_num(qp));
701 rxe_qp_reset(qp);
702 break;
703
704 case IB_QPS_INIT:
705 pr_debug("qp#%d state -> INIT\n", qp_num(qp));
706 qp->req.state = QP_STATE_INIT;
707 qp->resp.state = QP_STATE_INIT;
708 break;
709
710 case IB_QPS_RTR:
711 pr_debug("qp#%d state -> RTR\n", qp_num(qp));
712 qp->resp.state = QP_STATE_READY;
713 break;
714
715 case IB_QPS_RTS:
716 pr_debug("qp#%d state -> RTS\n", qp_num(qp));
717 qp->req.state = QP_STATE_READY;
718 break;
719
720 case IB_QPS_SQD:
721 pr_debug("qp#%d state -> SQD\n", qp_num(qp));
722 rxe_qp_drain(qp);
723 break;
724
725 case IB_QPS_SQE:
726 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
727 /* Not possible from modify_qp. */
728 break;
729
730 case IB_QPS_ERR:
731 pr_debug("qp#%d state -> ERR\n", qp_num(qp));
732 rxe_qp_error(qp);
733 break;
734 }
735 }
736
737 return 0;
738 }
739
740 /* called by the query qp verb */
rxe_qp_to_attr(struct rxe_qp * qp,struct ib_qp_attr * attr,int mask)741 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
742 {
743 *attr = qp->attr;
744
745 attr->rq_psn = qp->resp.psn;
746 attr->sq_psn = qp->req.psn;
747
748 attr->cap.max_send_wr = qp->sq.max_wr;
749 attr->cap.max_send_sge = qp->sq.max_sge;
750 attr->cap.max_inline_data = qp->sq.max_inline;
751
752 if (!qp->srq) {
753 attr->cap.max_recv_wr = qp->rq.max_wr;
754 attr->cap.max_recv_sge = qp->rq.max_sge;
755 }
756
757 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
758 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
759
760 if (qp->req.state == QP_STATE_DRAIN) {
761 attr->sq_draining = 1;
762 /* applications that get this state
763 * typically spin on it. yield the
764 * processor
765 */
766 cond_resched();
767 } else {
768 attr->sq_draining = 0;
769 }
770
771 pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
772
773 return 0;
774 }
775
776 /* called by the destroy qp verb */
rxe_qp_destroy(struct rxe_qp * qp)777 void rxe_qp_destroy(struct rxe_qp *qp)
778 {
779 qp->valid = 0;
780 qp->qp_timeout_jiffies = 0;
781 rxe_cleanup_task(&qp->resp.task);
782
783 if (qp_type(qp) == IB_QPT_RC) {
784 del_timer_sync(&qp->retrans_timer);
785 del_timer_sync(&qp->rnr_nak_timer);
786 }
787
788 rxe_cleanup_task(&qp->req.task);
789 rxe_cleanup_task(&qp->comp.task);
790
791 /* flush out any receive wr's or pending requests */
792 if (qp->req.task.func)
793 __rxe_do_task(&qp->req.task);
794
795 if (qp->sq.queue) {
796 __rxe_do_task(&qp->comp.task);
797 __rxe_do_task(&qp->req.task);
798 }
799 }
800
801 /* called when the last reference to the qp is dropped */
rxe_qp_do_cleanup(struct work_struct * work)802 static void rxe_qp_do_cleanup(struct work_struct *work)
803 {
804 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
805
806 rxe_drop_all_mcast_groups(qp);
807
808 if (qp->sq.queue)
809 rxe_queue_cleanup(qp->sq.queue);
810
811 if (qp->srq)
812 rxe_drop_ref(qp->srq);
813
814 if (qp->rq.queue)
815 rxe_queue_cleanup(qp->rq.queue);
816
817 if (qp->scq)
818 rxe_drop_ref(qp->scq);
819 if (qp->rcq)
820 rxe_drop_ref(qp->rcq);
821 if (qp->pd)
822 rxe_drop_ref(qp->pd);
823
824 if (qp->resp.mr) {
825 rxe_drop_ref(qp->resp.mr);
826 qp->resp.mr = NULL;
827 }
828
829 free_rd_atomic_resources(qp);
830
831 if (qp->sk) {
832 if (qp_type(qp) == IB_QPT_RC)
833 sk_dst_reset(qp->sk->sk);
834
835 kernel_sock_shutdown(qp->sk, SHUT_RDWR);
836 sock_release(qp->sk);
837 }
838 }
839
840 /* called when the last reference to the qp is dropped */
rxe_qp_cleanup(struct rxe_pool_entry * arg)841 void rxe_qp_cleanup(struct rxe_pool_entry *arg)
842 {
843 struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
844
845 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
846 }
847