• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *	   Redistribution and use in source and binary forms, with or
12  *	   without modification, are permitted provided that the following
13  *	   conditions are met:
14  *
15  *		- Redistributions of source code must retain the above
16  *		  copyright notice, this list of conditions and the following
17  *		  disclaimer.
18  *
19  *		- Redistributions in binary form must reproduce the above
20  *		  copyright notice, this list of conditions and the following
21  *		  disclaimer in the documentation and/or other materials
22  *		  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/skbuff.h>
35 #include <linux/delay.h>
36 #include <linux/sched.h>
37 #include <linux/vmalloc.h>
38 
39 #include "rxe.h"
40 #include "rxe_loc.h"
41 #include "rxe_queue.h"
42 #include "rxe_task.h"
43 
44 char *rxe_qp_state_name[] = {
45 	[QP_STATE_RESET]	= "RESET",
46 	[QP_STATE_INIT]		= "INIT",
47 	[QP_STATE_READY]	= "READY",
48 	[QP_STATE_DRAIN]	= "DRAIN",
49 	[QP_STATE_DRAINED]	= "DRAINED",
50 	[QP_STATE_ERROR]	= "ERROR",
51 };
52 
rxe_qp_chk_cap(struct rxe_dev * rxe,struct ib_qp_cap * cap,int has_srq)53 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
54 			  int has_srq)
55 {
56 	if (cap->max_send_wr > rxe->attr.max_qp_wr) {
57 		pr_warn("invalid send wr = %d > %d\n",
58 			cap->max_send_wr, rxe->attr.max_qp_wr);
59 		goto err1;
60 	}
61 
62 	if (cap->max_send_sge > rxe->attr.max_sge) {
63 		pr_warn("invalid send sge = %d > %d\n",
64 			cap->max_send_sge, rxe->attr.max_sge);
65 		goto err1;
66 	}
67 
68 	if (!has_srq) {
69 		if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
70 			pr_warn("invalid recv wr = %d > %d\n",
71 				cap->max_recv_wr, rxe->attr.max_qp_wr);
72 			goto err1;
73 		}
74 
75 		if (cap->max_recv_sge > rxe->attr.max_sge) {
76 			pr_warn("invalid recv sge = %d > %d\n",
77 				cap->max_recv_sge, rxe->attr.max_sge);
78 			goto err1;
79 		}
80 	}
81 
82 	if (cap->max_inline_data > rxe->max_inline_data) {
83 		pr_warn("invalid max inline data = %d > %d\n",
84 			cap->max_inline_data, rxe->max_inline_data);
85 		goto err1;
86 	}
87 
88 	return 0;
89 
90 err1:
91 	return -EINVAL;
92 }
93 
rxe_qp_chk_init(struct rxe_dev * rxe,struct ib_qp_init_attr * init)94 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
95 {
96 	struct ib_qp_cap *cap = &init->cap;
97 	struct rxe_port *port;
98 	int port_num = init->port_num;
99 
100 	if (!init->recv_cq || !init->send_cq) {
101 		pr_warn("missing cq\n");
102 		goto err1;
103 	}
104 
105 	if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
106 		goto err1;
107 
108 	if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
109 		if (port_num != 1) {
110 			pr_warn("invalid port = %d\n", port_num);
111 			goto err1;
112 		}
113 
114 		port = &rxe->port;
115 
116 		if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
117 			pr_warn("SMI QP exists for port %d\n", port_num);
118 			goto err1;
119 		}
120 
121 		if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
122 			pr_warn("GSI QP exists for port %d\n", port_num);
123 			goto err1;
124 		}
125 	}
126 
127 	return 0;
128 
129 err1:
130 	return -EINVAL;
131 }
132 
alloc_rd_atomic_resources(struct rxe_qp * qp,unsigned int n)133 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
134 {
135 	qp->resp.res_head = 0;
136 	qp->resp.res_tail = 0;
137 	qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
138 
139 	if (!qp->resp.resources)
140 		return -ENOMEM;
141 
142 	return 0;
143 }
144 
free_rd_atomic_resources(struct rxe_qp * qp)145 static void free_rd_atomic_resources(struct rxe_qp *qp)
146 {
147 	if (qp->resp.resources) {
148 		int i;
149 
150 		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
151 			struct resp_res *res = &qp->resp.resources[i];
152 
153 			free_rd_atomic_resource(qp, res);
154 		}
155 		kfree(qp->resp.resources);
156 		qp->resp.resources = NULL;
157 	}
158 }
159 
free_rd_atomic_resource(struct rxe_qp * qp,struct resp_res * res)160 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
161 {
162 	if (res->type == RXE_ATOMIC_MASK) {
163 		rxe_drop_ref(qp);
164 		kfree_skb(res->atomic.skb);
165 	} else if (res->type == RXE_READ_MASK) {
166 		if (res->read.mr)
167 			rxe_drop_ref(res->read.mr);
168 	}
169 	res->type = 0;
170 }
171 
cleanup_rd_atomic_resources(struct rxe_qp * qp)172 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
173 {
174 	int i;
175 	struct resp_res *res;
176 
177 	if (qp->resp.resources) {
178 		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
179 			res = &qp->resp.resources[i];
180 			free_rd_atomic_resource(qp, res);
181 		}
182 	}
183 }
184 
rxe_qp_init_misc(struct rxe_dev * rxe,struct rxe_qp * qp,struct ib_qp_init_attr * init)185 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
186 			     struct ib_qp_init_attr *init)
187 {
188 	struct rxe_port *port;
189 	u32 qpn;
190 
191 	qp->sq_sig_type		= init->sq_sig_type;
192 	qp->attr.path_mtu	= 1;
193 	qp->mtu			= ib_mtu_enum_to_int(qp->attr.path_mtu);
194 
195 	qpn			= qp->pelem.index;
196 	port			= &rxe->port;
197 
198 	switch (init->qp_type) {
199 	case IB_QPT_SMI:
200 		qp->ibqp.qp_num		= 0;
201 		port->qp_smi_index	= qpn;
202 		qp->attr.port_num	= init->port_num;
203 		break;
204 
205 	case IB_QPT_GSI:
206 		qp->ibqp.qp_num		= 1;
207 		port->qp_gsi_index	= qpn;
208 		qp->attr.port_num	= init->port_num;
209 		break;
210 
211 	default:
212 		qp->ibqp.qp_num		= qpn;
213 		break;
214 	}
215 
216 	INIT_LIST_HEAD(&qp->grp_list);
217 
218 	skb_queue_head_init(&qp->send_pkts);
219 
220 	spin_lock_init(&qp->grp_lock);
221 	spin_lock_init(&qp->state_lock);
222 
223 	atomic_set(&qp->ssn, 0);
224 	atomic_set(&qp->skb_out, 0);
225 }
226 
rxe_qp_init_req(struct rxe_dev * rxe,struct rxe_qp * qp,struct ib_qp_init_attr * init,struct ib_ucontext * context,struct ib_udata * udata)227 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
228 			   struct ib_qp_init_attr *init,
229 			   struct ib_ucontext *context, struct ib_udata *udata)
230 {
231 	int err;
232 	int wqe_size;
233 
234 	err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
235 	if (err < 0)
236 		return err;
237 	qp->sk->sk->sk_user_data = qp;
238 
239 	qp->sq.max_wr		= init->cap.max_send_wr;
240 	qp->sq.max_sge		= init->cap.max_send_sge;
241 	qp->sq.max_inline	= init->cap.max_inline_data;
242 
243 	wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
244 			 qp->sq.max_sge * sizeof(struct ib_sge),
245 			 sizeof(struct rxe_send_wqe) +
246 			 qp->sq.max_inline);
247 
248 	qp->sq.queue = rxe_queue_init(rxe,
249 				      &qp->sq.max_wr,
250 				      wqe_size);
251 	if (!qp->sq.queue)
252 		return -ENOMEM;
253 
254 	err = do_mmap_info(rxe, udata, true,
255 			   context, qp->sq.queue->buf,
256 			   qp->sq.queue->buf_size, &qp->sq.queue->ip);
257 
258 	if (err) {
259 		vfree(qp->sq.queue->buf);
260 		kfree(qp->sq.queue);
261 		return err;
262 	}
263 
264 	qp->req.wqe_index	= producer_index(qp->sq.queue);
265 	qp->req.state		= QP_STATE_RESET;
266 	qp->req.opcode		= -1;
267 	qp->comp.opcode		= -1;
268 
269 	spin_lock_init(&qp->sq.sq_lock);
270 	skb_queue_head_init(&qp->req_pkts);
271 
272 	rxe_init_task(rxe, &qp->req.task, qp,
273 		      rxe_requester, "req");
274 	rxe_init_task(rxe, &qp->comp.task, qp,
275 		      rxe_completer, "comp");
276 
277 	qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
278 	if (init->qp_type == IB_QPT_RC) {
279 		setup_timer(&qp->rnr_nak_timer, rnr_nak_timer, (unsigned long)qp);
280 		setup_timer(&qp->retrans_timer, retransmit_timer, (unsigned long)qp);
281 	}
282 	return 0;
283 }
284 
rxe_qp_init_resp(struct rxe_dev * rxe,struct rxe_qp * qp,struct ib_qp_init_attr * init,struct ib_ucontext * context,struct ib_udata * udata)285 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
286 			    struct ib_qp_init_attr *init,
287 			    struct ib_ucontext *context, struct ib_udata *udata)
288 {
289 	int err;
290 	int wqe_size;
291 
292 	if (!qp->srq) {
293 		qp->rq.max_wr		= init->cap.max_recv_wr;
294 		qp->rq.max_sge		= init->cap.max_recv_sge;
295 
296 		wqe_size = rcv_wqe_size(qp->rq.max_sge);
297 
298 		pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
299 			 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
300 
301 		qp->rq.queue = rxe_queue_init(rxe,
302 					      &qp->rq.max_wr,
303 					      wqe_size);
304 		if (!qp->rq.queue)
305 			return -ENOMEM;
306 
307 		err = do_mmap_info(rxe, udata, false, context,
308 				   qp->rq.queue->buf,
309 				   qp->rq.queue->buf_size,
310 				   &qp->rq.queue->ip);
311 		if (err) {
312 			vfree(qp->rq.queue->buf);
313 			kfree(qp->rq.queue);
314 			return err;
315 		}
316 	}
317 
318 	spin_lock_init(&qp->rq.producer_lock);
319 	spin_lock_init(&qp->rq.consumer_lock);
320 
321 	skb_queue_head_init(&qp->resp_pkts);
322 
323 	rxe_init_task(rxe, &qp->resp.task, qp,
324 		      rxe_responder, "resp");
325 
326 	qp->resp.opcode		= OPCODE_NONE;
327 	qp->resp.msn		= 0;
328 	qp->resp.state		= QP_STATE_RESET;
329 
330 	return 0;
331 }
332 
333 /* called by the create qp verb */
rxe_qp_from_init(struct rxe_dev * rxe,struct rxe_qp * qp,struct rxe_pd * pd,struct ib_qp_init_attr * init,struct ib_udata * udata,struct ib_pd * ibpd)334 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
335 		     struct ib_qp_init_attr *init, struct ib_udata *udata,
336 		     struct ib_pd *ibpd)
337 {
338 	int err;
339 	struct rxe_cq *rcq = to_rcq(init->recv_cq);
340 	struct rxe_cq *scq = to_rcq(init->send_cq);
341 	struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
342 	struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
343 
344 	rxe_add_ref(pd);
345 	rxe_add_ref(rcq);
346 	rxe_add_ref(scq);
347 	if (srq)
348 		rxe_add_ref(srq);
349 
350 	qp->pd			= pd;
351 	qp->rcq			= rcq;
352 	qp->scq			= scq;
353 	qp->srq			= srq;
354 
355 	rxe_qp_init_misc(rxe, qp, init);
356 
357 	err = rxe_qp_init_req(rxe, qp, init, context, udata);
358 	if (err)
359 		goto err1;
360 
361 	err = rxe_qp_init_resp(rxe, qp, init, context, udata);
362 	if (err)
363 		goto err2;
364 
365 	qp->attr.qp_state = IB_QPS_RESET;
366 	qp->valid = 1;
367 
368 	return 0;
369 
370 err2:
371 	rxe_queue_cleanup(qp->sq.queue);
372 err1:
373 	if (srq)
374 		rxe_drop_ref(srq);
375 	rxe_drop_ref(scq);
376 	rxe_drop_ref(rcq);
377 	rxe_drop_ref(pd);
378 
379 	return err;
380 }
381 
382 /* called by the query qp verb */
rxe_qp_to_init(struct rxe_qp * qp,struct ib_qp_init_attr * init)383 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
384 {
385 	init->event_handler		= qp->ibqp.event_handler;
386 	init->qp_context		= qp->ibqp.qp_context;
387 	init->send_cq			= qp->ibqp.send_cq;
388 	init->recv_cq			= qp->ibqp.recv_cq;
389 	init->srq			= qp->ibqp.srq;
390 
391 	init->cap.max_send_wr		= qp->sq.max_wr;
392 	init->cap.max_send_sge		= qp->sq.max_sge;
393 	init->cap.max_inline_data	= qp->sq.max_inline;
394 
395 	if (!qp->srq) {
396 		init->cap.max_recv_wr		= qp->rq.max_wr;
397 		init->cap.max_recv_sge		= qp->rq.max_sge;
398 	}
399 
400 	init->sq_sig_type		= qp->sq_sig_type;
401 
402 	init->qp_type			= qp->ibqp.qp_type;
403 	init->port_num			= 1;
404 
405 	return 0;
406 }
407 
408 /* called by the modify qp verb, this routine checks all the parameters before
409  * making any changes
410  */
rxe_qp_chk_attr(struct rxe_dev * rxe,struct rxe_qp * qp,struct ib_qp_attr * attr,int mask)411 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
412 		    struct ib_qp_attr *attr, int mask)
413 {
414 	enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
415 					attr->cur_qp_state : qp->attr.qp_state;
416 	enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
417 					attr->qp_state : cur_state;
418 
419 	if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask,
420 				IB_LINK_LAYER_ETHERNET)) {
421 		pr_warn("invalid mask or state for qp\n");
422 		goto err1;
423 	}
424 
425 	if (mask & IB_QP_STATE) {
426 		if (cur_state == IB_QPS_SQD) {
427 			if (qp->req.state == QP_STATE_DRAIN &&
428 			    new_state != IB_QPS_ERR)
429 				goto err1;
430 		}
431 	}
432 
433 	if (mask & IB_QP_PORT) {
434 		if (attr->port_num != 1) {
435 			pr_warn("invalid port %d\n", attr->port_num);
436 			goto err1;
437 		}
438 	}
439 
440 	if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
441 		goto err1;
442 
443 	if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
444 		goto err1;
445 
446 	if (mask & IB_QP_ALT_PATH) {
447 		if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
448 			goto err1;
449 		if (attr->alt_port_num != 1) {
450 			pr_warn("invalid alt port %d\n", attr->alt_port_num);
451 			goto err1;
452 		}
453 		if (attr->alt_timeout > 31) {
454 			pr_warn("invalid QP alt timeout %d > 31\n",
455 				attr->alt_timeout);
456 			goto err1;
457 		}
458 	}
459 
460 	if (mask & IB_QP_PATH_MTU) {
461 		struct rxe_port *port = &rxe->port;
462 
463 		enum ib_mtu max_mtu = port->attr.max_mtu;
464 		enum ib_mtu mtu = attr->path_mtu;
465 
466 		if (mtu > max_mtu) {
467 			pr_debug("invalid mtu (%d) > (%d)\n",
468 				 ib_mtu_enum_to_int(mtu),
469 				 ib_mtu_enum_to_int(max_mtu));
470 			goto err1;
471 		}
472 	}
473 
474 	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
475 		if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
476 			pr_warn("invalid max_rd_atomic %d > %d\n",
477 				attr->max_rd_atomic,
478 				rxe->attr.max_qp_rd_atom);
479 			goto err1;
480 		}
481 	}
482 
483 	if (mask & IB_QP_TIMEOUT) {
484 		if (attr->timeout > 31) {
485 			pr_warn("invalid QP timeout %d > 31\n",
486 				attr->timeout);
487 			goto err1;
488 		}
489 	}
490 
491 	return 0;
492 
493 err1:
494 	return -EINVAL;
495 }
496 
497 /* move the qp to the reset state */
rxe_qp_reset(struct rxe_qp * qp)498 static void rxe_qp_reset(struct rxe_qp *qp)
499 {
500 	/* stop tasks from running */
501 	rxe_disable_task(&qp->resp.task);
502 
503 	/* stop request/comp */
504 	if (qp->sq.queue) {
505 		if (qp_type(qp) == IB_QPT_RC)
506 			rxe_disable_task(&qp->comp.task);
507 		rxe_disable_task(&qp->req.task);
508 	}
509 
510 	/* move qp to the reset state */
511 	qp->req.state = QP_STATE_RESET;
512 	qp->resp.state = QP_STATE_RESET;
513 
514 	/* let state machines reset themselves drain work and packet queues
515 	 * etc.
516 	 */
517 	__rxe_do_task(&qp->resp.task);
518 
519 	if (qp->sq.queue) {
520 		__rxe_do_task(&qp->comp.task);
521 		__rxe_do_task(&qp->req.task);
522 		rxe_queue_reset(qp->sq.queue);
523 	}
524 
525 	/* cleanup attributes */
526 	atomic_set(&qp->ssn, 0);
527 	qp->req.opcode = -1;
528 	qp->req.need_retry = 0;
529 	qp->req.noack_pkts = 0;
530 	qp->resp.msn = 0;
531 	qp->resp.opcode = -1;
532 	qp->resp.drop_msg = 0;
533 	qp->resp.goto_error = 0;
534 	qp->resp.sent_psn_nak = 0;
535 
536 	if (qp->resp.mr) {
537 		rxe_drop_ref(qp->resp.mr);
538 		qp->resp.mr = NULL;
539 	}
540 
541 	cleanup_rd_atomic_resources(qp);
542 
543 	/* reenable tasks */
544 	rxe_enable_task(&qp->resp.task);
545 
546 	if (qp->sq.queue) {
547 		if (qp_type(qp) == IB_QPT_RC)
548 			rxe_enable_task(&qp->comp.task);
549 
550 		rxe_enable_task(&qp->req.task);
551 	}
552 }
553 
554 /* drain the send queue */
rxe_qp_drain(struct rxe_qp * qp)555 static void rxe_qp_drain(struct rxe_qp *qp)
556 {
557 	if (qp->sq.queue) {
558 		if (qp->req.state != QP_STATE_DRAINED) {
559 			qp->req.state = QP_STATE_DRAIN;
560 			if (qp_type(qp) == IB_QPT_RC)
561 				rxe_run_task(&qp->comp.task, 1);
562 			else
563 				__rxe_do_task(&qp->comp.task);
564 			rxe_run_task(&qp->req.task, 1);
565 		}
566 	}
567 }
568 
569 /* move the qp to the error state */
rxe_qp_error(struct rxe_qp * qp)570 void rxe_qp_error(struct rxe_qp *qp)
571 {
572 	qp->req.state = QP_STATE_ERROR;
573 	qp->resp.state = QP_STATE_ERROR;
574 	qp->attr.qp_state = IB_QPS_ERR;
575 
576 	/* drain work and packet queues */
577 	rxe_run_task(&qp->resp.task, 1);
578 
579 	if (qp_type(qp) == IB_QPT_RC)
580 		rxe_run_task(&qp->comp.task, 1);
581 	else
582 		__rxe_do_task(&qp->comp.task);
583 	rxe_run_task(&qp->req.task, 1);
584 }
585 
586 /* called by the modify qp verb */
rxe_qp_from_attr(struct rxe_qp * qp,struct ib_qp_attr * attr,int mask,struct ib_udata * udata)587 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
588 		     struct ib_udata *udata)
589 {
590 	int err;
591 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
592 	union ib_gid sgid;
593 	struct ib_gid_attr sgid_attr;
594 
595 	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
596 		int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
597 
598 		qp->attr.max_rd_atomic = max_rd_atomic;
599 		atomic_set(&qp->req.rd_atomic, max_rd_atomic);
600 	}
601 
602 	if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
603 		int max_dest_rd_atomic =
604 			__roundup_pow_of_two(attr->max_dest_rd_atomic);
605 
606 		qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
607 
608 		free_rd_atomic_resources(qp);
609 
610 		err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
611 		if (err)
612 			return err;
613 	}
614 
615 	if (mask & IB_QP_CUR_STATE)
616 		qp->attr.cur_qp_state = attr->qp_state;
617 
618 	if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
619 		qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
620 
621 	if (mask & IB_QP_ACCESS_FLAGS)
622 		qp->attr.qp_access_flags = attr->qp_access_flags;
623 
624 	if (mask & IB_QP_PKEY_INDEX)
625 		qp->attr.pkey_index = attr->pkey_index;
626 
627 	if (mask & IB_QP_PORT)
628 		qp->attr.port_num = attr->port_num;
629 
630 	if (mask & IB_QP_QKEY)
631 		qp->attr.qkey = attr->qkey;
632 
633 	if (mask & IB_QP_AV) {
634 		ib_get_cached_gid(&rxe->ib_dev, 1,
635 				  rdma_ah_read_grh(&attr->ah_attr)->sgid_index,
636 				  &sgid, &sgid_attr);
637 		rxe_av_from_attr(rxe, attr->port_num, &qp->pri_av,
638 				 &attr->ah_attr);
639 		rxe_av_fill_ip_info(rxe, &qp->pri_av, &attr->ah_attr,
640 				    &sgid_attr, &sgid);
641 		if (sgid_attr.ndev)
642 			dev_put(sgid_attr.ndev);
643 	}
644 
645 	if (mask & IB_QP_ALT_PATH) {
646 		u8 sgid_index =
647 			rdma_ah_read_grh(&attr->alt_ah_attr)->sgid_index;
648 
649 		ib_get_cached_gid(&rxe->ib_dev, 1, sgid_index,
650 				  &sgid, &sgid_attr);
651 
652 		rxe_av_from_attr(rxe, attr->alt_port_num, &qp->alt_av,
653 				 &attr->alt_ah_attr);
654 		rxe_av_fill_ip_info(rxe, &qp->alt_av, &attr->alt_ah_attr,
655 				    &sgid_attr, &sgid);
656 		if (sgid_attr.ndev)
657 			dev_put(sgid_attr.ndev);
658 
659 		qp->attr.alt_port_num = attr->alt_port_num;
660 		qp->attr.alt_pkey_index = attr->alt_pkey_index;
661 		qp->attr.alt_timeout = attr->alt_timeout;
662 	}
663 
664 	if (mask & IB_QP_PATH_MTU) {
665 		qp->attr.path_mtu = attr->path_mtu;
666 		qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
667 	}
668 
669 	if (mask & IB_QP_TIMEOUT) {
670 		qp->attr.timeout = attr->timeout;
671 		if (attr->timeout == 0) {
672 			qp->qp_timeout_jiffies = 0;
673 		} else {
674 			/* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
675 			int j = nsecs_to_jiffies(4096ULL << attr->timeout);
676 
677 			qp->qp_timeout_jiffies = j ? j : 1;
678 		}
679 	}
680 
681 	if (mask & IB_QP_RETRY_CNT) {
682 		qp->attr.retry_cnt = attr->retry_cnt;
683 		qp->comp.retry_cnt = attr->retry_cnt;
684 		pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
685 			 attr->retry_cnt);
686 	}
687 
688 	if (mask & IB_QP_RNR_RETRY) {
689 		qp->attr.rnr_retry = attr->rnr_retry;
690 		qp->comp.rnr_retry = attr->rnr_retry;
691 		pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
692 			 attr->rnr_retry);
693 	}
694 
695 	if (mask & IB_QP_RQ_PSN) {
696 		qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
697 		qp->resp.psn = qp->attr.rq_psn;
698 		pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
699 			 qp->resp.psn);
700 	}
701 
702 	if (mask & IB_QP_MIN_RNR_TIMER) {
703 		qp->attr.min_rnr_timer = attr->min_rnr_timer;
704 		pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
705 			 attr->min_rnr_timer);
706 	}
707 
708 	if (mask & IB_QP_SQ_PSN) {
709 		qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
710 		qp->req.psn = qp->attr.sq_psn;
711 		qp->comp.psn = qp->attr.sq_psn;
712 		pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
713 	}
714 
715 	if (mask & IB_QP_PATH_MIG_STATE)
716 		qp->attr.path_mig_state = attr->path_mig_state;
717 
718 	if (mask & IB_QP_DEST_QPN)
719 		qp->attr.dest_qp_num = attr->dest_qp_num;
720 
721 	if (mask & IB_QP_STATE) {
722 		qp->attr.qp_state = attr->qp_state;
723 
724 		switch (attr->qp_state) {
725 		case IB_QPS_RESET:
726 			pr_debug("qp#%d state -> RESET\n", qp_num(qp));
727 			rxe_qp_reset(qp);
728 			break;
729 
730 		case IB_QPS_INIT:
731 			pr_debug("qp#%d state -> INIT\n", qp_num(qp));
732 			qp->req.state = QP_STATE_INIT;
733 			qp->resp.state = QP_STATE_INIT;
734 			break;
735 
736 		case IB_QPS_RTR:
737 			pr_debug("qp#%d state -> RTR\n", qp_num(qp));
738 			qp->resp.state = QP_STATE_READY;
739 			break;
740 
741 		case IB_QPS_RTS:
742 			pr_debug("qp#%d state -> RTS\n", qp_num(qp));
743 			qp->req.state = QP_STATE_READY;
744 			break;
745 
746 		case IB_QPS_SQD:
747 			pr_debug("qp#%d state -> SQD\n", qp_num(qp));
748 			rxe_qp_drain(qp);
749 			break;
750 
751 		case IB_QPS_SQE:
752 			pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
753 			/* Not possible from modify_qp. */
754 			break;
755 
756 		case IB_QPS_ERR:
757 			pr_debug("qp#%d state -> ERR\n", qp_num(qp));
758 			rxe_qp_error(qp);
759 			break;
760 		}
761 	}
762 
763 	return 0;
764 }
765 
766 /* called by the query qp verb */
rxe_qp_to_attr(struct rxe_qp * qp,struct ib_qp_attr * attr,int mask)767 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
768 {
769 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
770 
771 	*attr = qp->attr;
772 
773 	attr->rq_psn				= qp->resp.psn;
774 	attr->sq_psn				= qp->req.psn;
775 
776 	attr->cap.max_send_wr			= qp->sq.max_wr;
777 	attr->cap.max_send_sge			= qp->sq.max_sge;
778 	attr->cap.max_inline_data		= qp->sq.max_inline;
779 
780 	if (!qp->srq) {
781 		attr->cap.max_recv_wr		= qp->rq.max_wr;
782 		attr->cap.max_recv_sge		= qp->rq.max_sge;
783 	}
784 
785 	rxe_av_to_attr(rxe, &qp->pri_av, &attr->ah_attr);
786 	rxe_av_to_attr(rxe, &qp->alt_av, &attr->alt_ah_attr);
787 
788 	if (qp->req.state == QP_STATE_DRAIN) {
789 		attr->sq_draining = 1;
790 		/* applications that get this state
791 		 * typically spin on it. yield the
792 		 * processor
793 		 */
794 		cond_resched();
795 	} else {
796 		attr->sq_draining = 0;
797 	}
798 
799 	pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
800 
801 	return 0;
802 }
803 
804 /* called by the destroy qp verb */
rxe_qp_destroy(struct rxe_qp * qp)805 void rxe_qp_destroy(struct rxe_qp *qp)
806 {
807 	qp->valid = 0;
808 	qp->qp_timeout_jiffies = 0;
809 	rxe_cleanup_task(&qp->resp.task);
810 
811 	if (qp_type(qp) == IB_QPT_RC) {
812 		del_timer_sync(&qp->retrans_timer);
813 		del_timer_sync(&qp->rnr_nak_timer);
814 	}
815 
816 	rxe_cleanup_task(&qp->req.task);
817 	rxe_cleanup_task(&qp->comp.task);
818 
819 	/* flush out any receive wr's or pending requests */
820 	__rxe_do_task(&qp->req.task);
821 	if (qp->sq.queue) {
822 		__rxe_do_task(&qp->comp.task);
823 		__rxe_do_task(&qp->req.task);
824 	}
825 }
826 
827 /* called when the last reference to the qp is dropped */
rxe_qp_do_cleanup(struct work_struct * work)828 static void rxe_qp_do_cleanup(struct work_struct *work)
829 {
830 	struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
831 
832 	rxe_drop_all_mcast_groups(qp);
833 
834 	if (qp->sq.queue)
835 		rxe_queue_cleanup(qp->sq.queue);
836 
837 	if (qp->srq)
838 		rxe_drop_ref(qp->srq);
839 
840 	if (qp->rq.queue)
841 		rxe_queue_cleanup(qp->rq.queue);
842 
843 	if (qp->scq)
844 		rxe_drop_ref(qp->scq);
845 	if (qp->rcq)
846 		rxe_drop_ref(qp->rcq);
847 	if (qp->pd)
848 		rxe_drop_ref(qp->pd);
849 
850 	if (qp->resp.mr) {
851 		rxe_drop_ref(qp->resp.mr);
852 		qp->resp.mr = NULL;
853 	}
854 
855 	if (qp_type(qp) == IB_QPT_RC)
856 		sk_dst_reset(qp->sk->sk);
857 
858 	free_rd_atomic_resources(qp);
859 
860 	kernel_sock_shutdown(qp->sk, SHUT_RDWR);
861 	sock_release(qp->sk);
862 }
863 
864 /* called when the last reference to the qp is dropped */
rxe_qp_cleanup(struct rxe_pool_entry * arg)865 void rxe_qp_cleanup(struct rxe_pool_entry *arg)
866 {
867 	struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
868 
869 	execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
870 }
871