• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of EITHER the GNU General Public License
6  * version 2 as published by the Free Software Foundation or the BSD
7  * 2-Clause License. This program is distributed in the hope that it
8  * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9  * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10  * See the GNU General Public License version 2 for more details at
11  * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program available in the file COPYING in the main
15  * directory of this source tree.
16  *
17  * The BSD 2-Clause License
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *      - Redistributions of source code must retain the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer.
26  *
27  *      - Redistributions in binary form must reproduce the above
28  *        copyright notice, this list of conditions and the following
29  *        disclaimer in the documentation and/or other materials
30  *        provided with the distribution.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43  * OF THE POSSIBILITY OF SUCH DAMAGE.
44  */
45 
46 #include <asm/page.h>
47 #include <linux/io.h>
48 #include <linux/wait.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_smi.h>
51 #include <rdma/ib_user_verbs.h>
52 
53 #include "pvrdma.h"
54 
get_cqs(struct pvrdma_qp * qp,struct pvrdma_cq ** send_cq,struct pvrdma_cq ** recv_cq)55 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq,
56 			   struct pvrdma_cq **recv_cq)
57 {
58 	*send_cq = to_vcq(qp->ibqp.send_cq);
59 	*recv_cq = to_vcq(qp->ibqp.recv_cq);
60 }
61 
pvrdma_lock_cqs(struct pvrdma_cq * scq,struct pvrdma_cq * rcq,unsigned long * scq_flags,unsigned long * rcq_flags)62 static void pvrdma_lock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq,
63 			    unsigned long *scq_flags,
64 			    unsigned long *rcq_flags)
65 	__acquires(scq->cq_lock) __acquires(rcq->cq_lock)
66 {
67 	if (scq == rcq) {
68 		spin_lock_irqsave(&scq->cq_lock, *scq_flags);
69 		__acquire(rcq->cq_lock);
70 	} else if (scq->cq_handle < rcq->cq_handle) {
71 		spin_lock_irqsave(&scq->cq_lock, *scq_flags);
72 		spin_lock_irqsave_nested(&rcq->cq_lock, *rcq_flags,
73 					 SINGLE_DEPTH_NESTING);
74 	} else {
75 		spin_lock_irqsave(&rcq->cq_lock, *rcq_flags);
76 		spin_lock_irqsave_nested(&scq->cq_lock, *scq_flags,
77 					 SINGLE_DEPTH_NESTING);
78 	}
79 }
80 
pvrdma_unlock_cqs(struct pvrdma_cq * scq,struct pvrdma_cq * rcq,unsigned long * scq_flags,unsigned long * rcq_flags)81 static void pvrdma_unlock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq,
82 			      unsigned long *scq_flags,
83 			      unsigned long *rcq_flags)
84 	__releases(scq->cq_lock) __releases(rcq->cq_lock)
85 {
86 	if (scq == rcq) {
87 		__release(rcq->cq_lock);
88 		spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
89 	} else if (scq->cq_handle < rcq->cq_handle) {
90 		spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags);
91 		spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
92 	} else {
93 		spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
94 		spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags);
95 	}
96 }
97 
pvrdma_reset_qp(struct pvrdma_qp * qp)98 static void pvrdma_reset_qp(struct pvrdma_qp *qp)
99 {
100 	struct pvrdma_cq *scq, *rcq;
101 	unsigned long scq_flags, rcq_flags;
102 
103 	/* Clean up cqes */
104 	get_cqs(qp, &scq, &rcq);
105 	pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
106 
107 	_pvrdma_flush_cqe(qp, scq);
108 	if (scq != rcq)
109 		_pvrdma_flush_cqe(qp, rcq);
110 
111 	pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
112 
113 	/*
114 	 * Reset queuepair. The checks are because usermode queuepairs won't
115 	 * have kernel ringstates.
116 	 */
117 	if (qp->rq.ring) {
118 		atomic_set(&qp->rq.ring->cons_head, 0);
119 		atomic_set(&qp->rq.ring->prod_tail, 0);
120 	}
121 	if (qp->sq.ring) {
122 		atomic_set(&qp->sq.ring->cons_head, 0);
123 		atomic_set(&qp->sq.ring->prod_tail, 0);
124 	}
125 }
126 
pvrdma_set_rq_size(struct pvrdma_dev * dev,struct ib_qp_cap * req_cap,struct pvrdma_qp * qp)127 static int pvrdma_set_rq_size(struct pvrdma_dev *dev,
128 			      struct ib_qp_cap *req_cap,
129 			      struct pvrdma_qp *qp)
130 {
131 	if (req_cap->max_recv_wr > dev->dsr->caps.max_qp_wr ||
132 	    req_cap->max_recv_sge > dev->dsr->caps.max_sge) {
133 		dev_warn(&dev->pdev->dev, "recv queue size invalid\n");
134 		return -EINVAL;
135 	}
136 
137 	qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr));
138 	qp->rq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_recv_sge));
139 
140 	/* Write back */
141 	req_cap->max_recv_wr = qp->rq.wqe_cnt;
142 	req_cap->max_recv_sge = qp->rq.max_sg;
143 
144 	qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) +
145 					     sizeof(struct pvrdma_sge) *
146 					     qp->rq.max_sg);
147 	qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) /
148 			  PAGE_SIZE;
149 
150 	return 0;
151 }
152 
pvrdma_set_sq_size(struct pvrdma_dev * dev,struct ib_qp_cap * req_cap,struct pvrdma_qp * qp)153 static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
154 			      struct pvrdma_qp *qp)
155 {
156 	if (req_cap->max_send_wr > dev->dsr->caps.max_qp_wr ||
157 	    req_cap->max_send_sge > dev->dsr->caps.max_sge) {
158 		dev_warn(&dev->pdev->dev, "send queue size invalid\n");
159 		return -EINVAL;
160 	}
161 
162 	qp->sq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_send_wr));
163 	qp->sq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_send_sge));
164 
165 	/* Write back */
166 	req_cap->max_send_wr = qp->sq.wqe_cnt;
167 	req_cap->max_send_sge = qp->sq.max_sg;
168 
169 	qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) +
170 					     sizeof(struct pvrdma_sge) *
171 					     qp->sq.max_sg);
172 	/* Note: one extra page for the header. */
173 	qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES +
174 			  (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) /
175 								PAGE_SIZE;
176 
177 	return 0;
178 }
179 
180 /**
181  * pvrdma_create_qp - create queue pair
182  * @pd: protection domain
183  * @init_attr: queue pair attributes
184  * @udata: user data
185  *
186  * @return: the ib_qp pointer on success, otherwise returns an errno.
187  */
pvrdma_create_qp(struct ib_pd * pd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)188 struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
189 			       struct ib_qp_init_attr *init_attr,
190 			       struct ib_udata *udata)
191 {
192 	struct pvrdma_qp *qp = NULL;
193 	struct pvrdma_dev *dev = to_vdev(pd->device);
194 	union pvrdma_cmd_req req;
195 	union pvrdma_cmd_resp rsp;
196 	struct pvrdma_cmd_create_qp *cmd = &req.create_qp;
197 	struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp;
198 	struct pvrdma_create_qp ucmd;
199 	unsigned long flags;
200 	int ret;
201 
202 	if (init_attr->create_flags) {
203 		dev_warn(&dev->pdev->dev,
204 			 "invalid create queuepair flags %#x\n",
205 			 init_attr->create_flags);
206 		return ERR_PTR(-EINVAL);
207 	}
208 
209 	if (init_attr->qp_type != IB_QPT_RC &&
210 	    init_attr->qp_type != IB_QPT_UD &&
211 	    init_attr->qp_type != IB_QPT_GSI) {
212 		dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n",
213 			 init_attr->qp_type);
214 		return ERR_PTR(-EINVAL);
215 	}
216 
217 	if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp))
218 		return ERR_PTR(-ENOMEM);
219 
220 	switch (init_attr->qp_type) {
221 	case IB_QPT_GSI:
222 		if (init_attr->port_num == 0 ||
223 		    init_attr->port_num > pd->device->phys_port_cnt ||
224 		    udata) {
225 			dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n");
226 			ret = -EINVAL;
227 			goto err_qp;
228 		}
229 		/* fall through */
230 	case IB_QPT_RC:
231 	case IB_QPT_UD:
232 		qp = kzalloc(sizeof(*qp), GFP_KERNEL);
233 		if (!qp) {
234 			ret = -ENOMEM;
235 			goto err_qp;
236 		}
237 
238 		spin_lock_init(&qp->sq.lock);
239 		spin_lock_init(&qp->rq.lock);
240 		mutex_init(&qp->mutex);
241 		atomic_set(&qp->refcnt, 1);
242 		init_waitqueue_head(&qp->wait);
243 
244 		qp->state = IB_QPS_RESET;
245 
246 		if (pd->uobject && udata) {
247 			dev_dbg(&dev->pdev->dev,
248 				"create queuepair from user space\n");
249 
250 			if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
251 				ret = -EFAULT;
252 				goto err_qp;
253 			}
254 
255 			/* set qp->sq.wqe_cnt, shift, buf_size.. */
256 			qp->rumem = ib_umem_get(pd->uobject->context,
257 						ucmd.rbuf_addr,
258 						ucmd.rbuf_size, 0, 0);
259 			if (IS_ERR(qp->rumem)) {
260 				ret = PTR_ERR(qp->rumem);
261 				goto err_qp;
262 			}
263 
264 			qp->sumem = ib_umem_get(pd->uobject->context,
265 						ucmd.sbuf_addr,
266 						ucmd.sbuf_size, 0, 0);
267 			if (IS_ERR(qp->sumem)) {
268 				ib_umem_release(qp->rumem);
269 				ret = PTR_ERR(qp->sumem);
270 				goto err_qp;
271 			}
272 
273 			qp->npages_send = ib_umem_page_count(qp->sumem);
274 			qp->npages_recv = ib_umem_page_count(qp->rumem);
275 			qp->npages = qp->npages_send + qp->npages_recv;
276 		} else {
277 			qp->is_kernel = true;
278 
279 			ret = pvrdma_set_sq_size(to_vdev(pd->device),
280 						 &init_attr->cap, qp);
281 			if (ret)
282 				goto err_qp;
283 
284 			ret = pvrdma_set_rq_size(to_vdev(pd->device),
285 						 &init_attr->cap, qp);
286 			if (ret)
287 				goto err_qp;
288 
289 			qp->npages = qp->npages_send + qp->npages_recv;
290 
291 			/* Skip header page. */
292 			qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE;
293 
294 			/* Recv queue pages are after send pages. */
295 			qp->rq.offset = qp->npages_send * PAGE_SIZE;
296 		}
297 
298 		if (qp->npages < 0 || qp->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
299 			dev_warn(&dev->pdev->dev,
300 				 "overflow pages in queuepair\n");
301 			ret = -EINVAL;
302 			goto err_umem;
303 		}
304 
305 		ret = pvrdma_page_dir_init(dev, &qp->pdir, qp->npages,
306 					   qp->is_kernel);
307 		if (ret) {
308 			dev_warn(&dev->pdev->dev,
309 				 "could not allocate page directory\n");
310 			goto err_umem;
311 		}
312 
313 		if (!qp->is_kernel) {
314 			pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0);
315 			pvrdma_page_dir_insert_umem(&qp->pdir, qp->rumem,
316 						    qp->npages_send);
317 		} else {
318 			/* Ring state is always the first page. */
319 			qp->sq.ring = qp->pdir.pages[0];
320 			qp->rq.ring = &qp->sq.ring[1];
321 		}
322 		break;
323 	default:
324 		ret = -EINVAL;
325 		goto err_qp;
326 	}
327 
328 	/* Not supported */
329 	init_attr->cap.max_inline_data = 0;
330 
331 	memset(cmd, 0, sizeof(*cmd));
332 	cmd->hdr.cmd = PVRDMA_CMD_CREATE_QP;
333 	cmd->pd_handle = to_vpd(pd)->pd_handle;
334 	cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle;
335 	cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle;
336 	cmd->max_send_wr = init_attr->cap.max_send_wr;
337 	cmd->max_recv_wr = init_attr->cap.max_recv_wr;
338 	cmd->max_send_sge = init_attr->cap.max_send_sge;
339 	cmd->max_recv_sge = init_attr->cap.max_recv_sge;
340 	cmd->max_inline_data = init_attr->cap.max_inline_data;
341 	cmd->sq_sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
342 	cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type);
343 	cmd->access_flags = IB_ACCESS_LOCAL_WRITE;
344 	cmd->total_chunks = qp->npages;
345 	cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES;
346 	cmd->pdir_dma = qp->pdir.dir_dma;
347 
348 	dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n",
349 		cmd->max_send_wr, cmd->max_recv_wr, cmd->max_send_sge,
350 		cmd->max_recv_sge);
351 
352 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_QP_RESP);
353 	if (ret < 0) {
354 		dev_warn(&dev->pdev->dev,
355 			 "could not create queuepair, error: %d\n", ret);
356 		goto err_pdir;
357 	}
358 
359 	/* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */
360 	qp->qp_handle = resp->qpn;
361 	qp->port = init_attr->port_num;
362 	qp->ibqp.qp_num = resp->qpn;
363 	spin_lock_irqsave(&dev->qp_tbl_lock, flags);
364 	dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp;
365 	spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
366 
367 	return &qp->ibqp;
368 
369 err_pdir:
370 	pvrdma_page_dir_cleanup(dev, &qp->pdir);
371 err_umem:
372 	if (pd->uobject && udata) {
373 		if (qp->rumem)
374 			ib_umem_release(qp->rumem);
375 		if (qp->sumem)
376 			ib_umem_release(qp->sumem);
377 	}
378 err_qp:
379 	kfree(qp);
380 	atomic_dec(&dev->num_qps);
381 
382 	return ERR_PTR(ret);
383 }
384 
pvrdma_free_qp(struct pvrdma_qp * qp)385 static void pvrdma_free_qp(struct pvrdma_qp *qp)
386 {
387 	struct pvrdma_dev *dev = to_vdev(qp->ibqp.device);
388 	struct pvrdma_cq *scq;
389 	struct pvrdma_cq *rcq;
390 	unsigned long flags, scq_flags, rcq_flags;
391 
392 	/* In case cq is polling */
393 	get_cqs(qp, &scq, &rcq);
394 	pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
395 
396 	_pvrdma_flush_cqe(qp, scq);
397 	if (scq != rcq)
398 		_pvrdma_flush_cqe(qp, rcq);
399 
400 	spin_lock_irqsave(&dev->qp_tbl_lock, flags);
401 	dev->qp_tbl[qp->qp_handle] = NULL;
402 	spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
403 
404 	pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
405 
406 	atomic_dec(&qp->refcnt);
407 	wait_event(qp->wait, !atomic_read(&qp->refcnt));
408 
409 	if (!qp->is_kernel) {
410 		if (qp->rumem)
411 			ib_umem_release(qp->rumem);
412 		if (qp->sumem)
413 			ib_umem_release(qp->sumem);
414 	}
415 
416 	pvrdma_page_dir_cleanup(dev, &qp->pdir);
417 
418 	kfree(qp);
419 
420 	atomic_dec(&dev->num_qps);
421 }
422 
423 /**
424  * pvrdma_destroy_qp - destroy a queue pair
425  * @qp: the queue pair to destroy
426  *
427  * @return: 0 on success.
428  */
pvrdma_destroy_qp(struct ib_qp * qp)429 int pvrdma_destroy_qp(struct ib_qp *qp)
430 {
431 	struct pvrdma_qp *vqp = to_vqp(qp);
432 	union pvrdma_cmd_req req;
433 	struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp;
434 	int ret;
435 
436 	memset(cmd, 0, sizeof(*cmd));
437 	cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP;
438 	cmd->qp_handle = vqp->qp_handle;
439 
440 	ret = pvrdma_cmd_post(to_vdev(qp->device), &req, NULL, 0);
441 	if (ret < 0)
442 		dev_warn(&to_vdev(qp->device)->pdev->dev,
443 			 "destroy queuepair failed, error: %d\n", ret);
444 
445 	pvrdma_free_qp(vqp);
446 
447 	return 0;
448 }
449 
450 /**
451  * pvrdma_modify_qp - modify queue pair attributes
452  * @ibqp: the queue pair
453  * @attr: the new queue pair's attributes
454  * @attr_mask: attributes mask
455  * @udata: user data
456  *
457  * @returns 0 on success, otherwise returns an errno.
458  */
pvrdma_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)459 int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
460 		     int attr_mask, struct ib_udata *udata)
461 {
462 	struct pvrdma_dev *dev = to_vdev(ibqp->device);
463 	struct pvrdma_qp *qp = to_vqp(ibqp);
464 	union pvrdma_cmd_req req;
465 	union pvrdma_cmd_resp rsp;
466 	struct pvrdma_cmd_modify_qp *cmd = &req.modify_qp;
467 	int cur_state, next_state;
468 	int ret;
469 
470 	/* Sanity checking. Should need lock here */
471 	mutex_lock(&qp->mutex);
472 	cur_state = (attr_mask & IB_QP_CUR_STATE) ? attr->cur_qp_state :
473 		qp->state;
474 	next_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : cur_state;
475 
476 	if (!ib_modify_qp_is_ok(cur_state, next_state, ibqp->qp_type,
477 				attr_mask, IB_LINK_LAYER_ETHERNET)) {
478 		ret = -EINVAL;
479 		goto out;
480 	}
481 
482 	if (attr_mask & IB_QP_PORT) {
483 		if (attr->port_num == 0 ||
484 		    attr->port_num > ibqp->device->phys_port_cnt) {
485 			ret = -EINVAL;
486 			goto out;
487 		}
488 	}
489 
490 	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
491 		if (attr->min_rnr_timer > 31) {
492 			ret = -EINVAL;
493 			goto out;
494 		}
495 	}
496 
497 	if (attr_mask & IB_QP_PKEY_INDEX) {
498 		if (attr->pkey_index >= dev->dsr->caps.max_pkeys) {
499 			ret = -EINVAL;
500 			goto out;
501 		}
502 	}
503 
504 	if (attr_mask & IB_QP_QKEY)
505 		qp->qkey = attr->qkey;
506 
507 	if (cur_state == next_state && cur_state == IB_QPS_RESET) {
508 		ret = 0;
509 		goto out;
510 	}
511 
512 	qp->state = next_state;
513 	memset(cmd, 0, sizeof(*cmd));
514 	cmd->hdr.cmd = PVRDMA_CMD_MODIFY_QP;
515 	cmd->qp_handle = qp->qp_handle;
516 	cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask);
517 	cmd->attrs.qp_state = ib_qp_state_to_pvrdma(attr->qp_state);
518 	cmd->attrs.cur_qp_state =
519 		ib_qp_state_to_pvrdma(attr->cur_qp_state);
520 	cmd->attrs.path_mtu = ib_mtu_to_pvrdma(attr->path_mtu);
521 	cmd->attrs.path_mig_state =
522 		ib_mig_state_to_pvrdma(attr->path_mig_state);
523 	cmd->attrs.qkey = attr->qkey;
524 	cmd->attrs.rq_psn = attr->rq_psn;
525 	cmd->attrs.sq_psn = attr->sq_psn;
526 	cmd->attrs.dest_qp_num = attr->dest_qp_num;
527 	cmd->attrs.qp_access_flags =
528 		ib_access_flags_to_pvrdma(attr->qp_access_flags);
529 	cmd->attrs.pkey_index = attr->pkey_index;
530 	cmd->attrs.alt_pkey_index = attr->alt_pkey_index;
531 	cmd->attrs.en_sqd_async_notify = attr->en_sqd_async_notify;
532 	cmd->attrs.sq_draining = attr->sq_draining;
533 	cmd->attrs.max_rd_atomic = attr->max_rd_atomic;
534 	cmd->attrs.max_dest_rd_atomic = attr->max_dest_rd_atomic;
535 	cmd->attrs.min_rnr_timer = attr->min_rnr_timer;
536 	cmd->attrs.port_num = attr->port_num;
537 	cmd->attrs.timeout = attr->timeout;
538 	cmd->attrs.retry_cnt = attr->retry_cnt;
539 	cmd->attrs.rnr_retry = attr->rnr_retry;
540 	cmd->attrs.alt_port_num = attr->alt_port_num;
541 	cmd->attrs.alt_timeout = attr->alt_timeout;
542 	ib_qp_cap_to_pvrdma(&cmd->attrs.cap, &attr->cap);
543 	rdma_ah_attr_to_pvrdma(&cmd->attrs.ah_attr, &attr->ah_attr);
544 	rdma_ah_attr_to_pvrdma(&cmd->attrs.alt_ah_attr, &attr->alt_ah_attr);
545 
546 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_MODIFY_QP_RESP);
547 	if (ret < 0) {
548 		dev_warn(&dev->pdev->dev,
549 			 "could not modify queuepair, error: %d\n", ret);
550 	} else if (rsp.hdr.err > 0) {
551 		dev_warn(&dev->pdev->dev,
552 			 "cannot modify queuepair, error: %d\n", rsp.hdr.err);
553 		ret = -EINVAL;
554 	}
555 
556 	if (ret == 0 && next_state == IB_QPS_RESET)
557 		pvrdma_reset_qp(qp);
558 
559 out:
560 	mutex_unlock(&qp->mutex);
561 
562 	return ret;
563 }
564 
get_sq_wqe(struct pvrdma_qp * qp,unsigned int n)565 static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n)
566 {
567 	return pvrdma_page_dir_get_ptr(&qp->pdir,
568 				       qp->sq.offset + n * qp->sq.wqe_size);
569 }
570 
get_rq_wqe(struct pvrdma_qp * qp,unsigned int n)571 static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
572 {
573 	return pvrdma_page_dir_get_ptr(&qp->pdir,
574 				       qp->rq.offset + n * qp->rq.wqe_size);
575 }
576 
set_reg_seg(struct pvrdma_sq_wqe_hdr * wqe_hdr,struct ib_reg_wr * wr)577 static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr, struct ib_reg_wr *wr)
578 {
579 	struct pvrdma_user_mr *mr = to_vmr(wr->mr);
580 
581 	wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova;
582 	wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma;
583 	wqe_hdr->wr.fast_reg.page_shift = mr->page_shift;
584 	wqe_hdr->wr.fast_reg.page_list_len = mr->npages;
585 	wqe_hdr->wr.fast_reg.length = mr->ibmr.length;
586 	wqe_hdr->wr.fast_reg.access_flags = wr->access;
587 	wqe_hdr->wr.fast_reg.rkey = wr->key;
588 
589 	return pvrdma_page_dir_insert_page_list(&mr->pdir, mr->pages,
590 						mr->npages);
591 }
592 
593 /**
594  * pvrdma_post_send - post send work request entries on a QP
595  * @ibqp: the QP
596  * @wr: work request list to post
597  * @bad_wr: the first bad WR returned
598  *
599  * @return: 0 on success, otherwise errno returned.
600  */
pvrdma_post_send(struct ib_qp * ibqp,struct ib_send_wr * wr,struct ib_send_wr ** bad_wr)601 int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
602 		     struct ib_send_wr **bad_wr)
603 {
604 	struct pvrdma_qp *qp = to_vqp(ibqp);
605 	struct pvrdma_dev *dev = to_vdev(ibqp->device);
606 	unsigned long flags;
607 	struct pvrdma_sq_wqe_hdr *wqe_hdr;
608 	struct pvrdma_sge *sge;
609 	int i, ret;
610 
611 	/*
612 	 * In states lower than RTS, we can fail immediately. In other states,
613 	 * just post and let the device figure it out.
614 	 */
615 	if (qp->state < IB_QPS_RTS) {
616 		*bad_wr = wr;
617 		return -EINVAL;
618 	}
619 
620 	spin_lock_irqsave(&qp->sq.lock, flags);
621 
622 	while (wr) {
623 		unsigned int tail = 0;
624 
625 		if (unlikely(!pvrdma_idx_ring_has_space(
626 				qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
627 			dev_warn_ratelimited(&dev->pdev->dev,
628 					     "send queue is full\n");
629 			*bad_wr = wr;
630 			ret = -ENOMEM;
631 			goto out;
632 		}
633 
634 		if (unlikely(wr->num_sge > qp->sq.max_sg || wr->num_sge < 0)) {
635 			dev_warn_ratelimited(&dev->pdev->dev,
636 					     "send SGE overflow\n");
637 			*bad_wr = wr;
638 			ret = -EINVAL;
639 			goto out;
640 		}
641 
642 		if (unlikely(wr->opcode < 0)) {
643 			dev_warn_ratelimited(&dev->pdev->dev,
644 					     "invalid send opcode\n");
645 			*bad_wr = wr;
646 			ret = -EINVAL;
647 			goto out;
648 		}
649 
650 		/*
651 		 * Only support UD, RC.
652 		 * Need to check opcode table for thorough checking.
653 		 * opcode		_UD	_UC	_RC
654 		 * _SEND		x	x	x
655 		 * _SEND_WITH_IMM	x	x	x
656 		 * _RDMA_WRITE			x	x
657 		 * _RDMA_WRITE_WITH_IMM		x	x
658 		 * _LOCAL_INV			x	x
659 		 * _SEND_WITH_INV		x	x
660 		 * _RDMA_READ				x
661 		 * _ATOMIC_CMP_AND_SWP			x
662 		 * _ATOMIC_FETCH_AND_ADD		x
663 		 * _MASK_ATOMIC_CMP_AND_SWP		x
664 		 * _MASK_ATOMIC_FETCH_AND_ADD		x
665 		 * _REG_MR				x
666 		 *
667 		 */
668 		if (qp->ibqp.qp_type != IB_QPT_UD &&
669 		    qp->ibqp.qp_type != IB_QPT_RC &&
670 			wr->opcode != IB_WR_SEND) {
671 			dev_warn_ratelimited(&dev->pdev->dev,
672 					     "unsupported queuepair type\n");
673 			*bad_wr = wr;
674 			ret = -EINVAL;
675 			goto out;
676 		} else if (qp->ibqp.qp_type == IB_QPT_UD ||
677 			   qp->ibqp.qp_type == IB_QPT_GSI) {
678 			if (wr->opcode != IB_WR_SEND &&
679 			    wr->opcode != IB_WR_SEND_WITH_IMM) {
680 				dev_warn_ratelimited(&dev->pdev->dev,
681 						     "invalid send opcode\n");
682 				*bad_wr = wr;
683 				ret = -EINVAL;
684 				goto out;
685 			}
686 		}
687 
688 		wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail);
689 		memset(wqe_hdr, 0, sizeof(*wqe_hdr));
690 		wqe_hdr->wr_id = wr->wr_id;
691 		wqe_hdr->num_sge = wr->num_sge;
692 		wqe_hdr->opcode = ib_wr_opcode_to_pvrdma(wr->opcode);
693 		wqe_hdr->send_flags = ib_send_flags_to_pvrdma(wr->send_flags);
694 		if (wr->opcode == IB_WR_SEND_WITH_IMM ||
695 		    wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
696 			wqe_hdr->ex.imm_data = wr->ex.imm_data;
697 
698 		if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
699 			*bad_wr = wr;
700 			ret = -EINVAL;
701 			goto out;
702 		}
703 
704 		switch (qp->ibqp.qp_type) {
705 		case IB_QPT_GSI:
706 		case IB_QPT_UD:
707 			if (unlikely(!ud_wr(wr)->ah)) {
708 				dev_warn_ratelimited(&dev->pdev->dev,
709 						     "invalid address handle\n");
710 				*bad_wr = wr;
711 				ret = -EINVAL;
712 				goto out;
713 			}
714 
715 			/*
716 			 * Use qkey from qp context if high order bit set,
717 			 * otherwise from work request.
718 			 */
719 			wqe_hdr->wr.ud.remote_qpn = ud_wr(wr)->remote_qpn;
720 			wqe_hdr->wr.ud.remote_qkey =
721 				ud_wr(wr)->remote_qkey & 0x80000000 ?
722 				qp->qkey : ud_wr(wr)->remote_qkey;
723 			wqe_hdr->wr.ud.av = to_vah(ud_wr(wr)->ah)->av;
724 
725 			break;
726 		case IB_QPT_RC:
727 			switch (wr->opcode) {
728 			case IB_WR_RDMA_READ:
729 			case IB_WR_RDMA_WRITE:
730 			case IB_WR_RDMA_WRITE_WITH_IMM:
731 				wqe_hdr->wr.rdma.remote_addr =
732 					rdma_wr(wr)->remote_addr;
733 				wqe_hdr->wr.rdma.rkey = rdma_wr(wr)->rkey;
734 				break;
735 			case IB_WR_LOCAL_INV:
736 			case IB_WR_SEND_WITH_INV:
737 				wqe_hdr->ex.invalidate_rkey =
738 					wr->ex.invalidate_rkey;
739 				break;
740 			case IB_WR_ATOMIC_CMP_AND_SWP:
741 			case IB_WR_ATOMIC_FETCH_AND_ADD:
742 				wqe_hdr->wr.atomic.remote_addr =
743 					atomic_wr(wr)->remote_addr;
744 				wqe_hdr->wr.atomic.rkey = atomic_wr(wr)->rkey;
745 				wqe_hdr->wr.atomic.compare_add =
746 					atomic_wr(wr)->compare_add;
747 				if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP)
748 					wqe_hdr->wr.atomic.swap =
749 						atomic_wr(wr)->swap;
750 				break;
751 			case IB_WR_REG_MR:
752 				ret = set_reg_seg(wqe_hdr, reg_wr(wr));
753 				if (ret < 0) {
754 					dev_warn_ratelimited(&dev->pdev->dev,
755 							     "Failed to set fast register work request\n");
756 					*bad_wr = wr;
757 					goto out;
758 				}
759 				break;
760 			default:
761 				break;
762 			}
763 
764 			break;
765 		default:
766 			dev_warn_ratelimited(&dev->pdev->dev,
767 					     "invalid queuepair type\n");
768 			ret = -EINVAL;
769 			*bad_wr = wr;
770 			goto out;
771 		}
772 
773 		sge = (struct pvrdma_sge *)(wqe_hdr + 1);
774 		for (i = 0; i < wr->num_sge; i++) {
775 			/* Need to check wqe_size 0 or max size */
776 			sge->addr = wr->sg_list[i].addr;
777 			sge->length = wr->sg_list[i].length;
778 			sge->lkey = wr->sg_list[i].lkey;
779 			sge++;
780 		}
781 
782 		/* Make sure wqe is written before index update */
783 		smp_wmb();
784 
785 		/* Update shared sq ring */
786 		pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
787 				    qp->sq.wqe_cnt);
788 
789 		wr = wr->next;
790 	}
791 
792 	ret = 0;
793 
794 out:
795 	spin_unlock_irqrestore(&qp->sq.lock, flags);
796 
797 	if (!ret)
798 		pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_SEND | qp->qp_handle);
799 
800 	return ret;
801 }
802 
803 /**
804  * pvrdma_post_receive - post receive work request entries on a QP
805  * @ibqp: the QP
806  * @wr: the work request list to post
807  * @bad_wr: the first bad WR returned
808  *
809  * @return: 0 on success, otherwise errno returned.
810  */
pvrdma_post_recv(struct ib_qp * ibqp,struct ib_recv_wr * wr,struct ib_recv_wr ** bad_wr)811 int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
812 		     struct ib_recv_wr **bad_wr)
813 {
814 	struct pvrdma_dev *dev = to_vdev(ibqp->device);
815 	unsigned long flags;
816 	struct pvrdma_qp *qp = to_vqp(ibqp);
817 	struct pvrdma_rq_wqe_hdr *wqe_hdr;
818 	struct pvrdma_sge *sge;
819 	int ret = 0;
820 	int i;
821 
822 	/*
823 	 * In the RESET state, we can fail immediately. For other states,
824 	 * just post and let the device figure it out.
825 	 */
826 	if (qp->state == IB_QPS_RESET) {
827 		*bad_wr = wr;
828 		return -EINVAL;
829 	}
830 
831 	spin_lock_irqsave(&qp->rq.lock, flags);
832 
833 	while (wr) {
834 		unsigned int tail = 0;
835 
836 		if (unlikely(wr->num_sge > qp->rq.max_sg ||
837 			     wr->num_sge < 0)) {
838 			ret = -EINVAL;
839 			*bad_wr = wr;
840 			dev_warn_ratelimited(&dev->pdev->dev,
841 					     "recv SGE overflow\n");
842 			goto out;
843 		}
844 
845 		if (unlikely(!pvrdma_idx_ring_has_space(
846 				qp->rq.ring, qp->rq.wqe_cnt, &tail))) {
847 			ret = -ENOMEM;
848 			*bad_wr = wr;
849 			dev_warn_ratelimited(&dev->pdev->dev,
850 					     "recv queue full\n");
851 			goto out;
852 		}
853 
854 		wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail);
855 		wqe_hdr->wr_id = wr->wr_id;
856 		wqe_hdr->num_sge = wr->num_sge;
857 		wqe_hdr->total_len = 0;
858 
859 		sge = (struct pvrdma_sge *)(wqe_hdr + 1);
860 		for (i = 0; i < wr->num_sge; i++) {
861 			sge->addr = wr->sg_list[i].addr;
862 			sge->length = wr->sg_list[i].length;
863 			sge->lkey = wr->sg_list[i].lkey;
864 			sge++;
865 		}
866 
867 		/* Make sure wqe is written before index update */
868 		smp_wmb();
869 
870 		/* Update shared rq ring */
871 		pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
872 				    qp->rq.wqe_cnt);
873 
874 		wr = wr->next;
875 	}
876 
877 	spin_unlock_irqrestore(&qp->rq.lock, flags);
878 
879 	pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_RECV | qp->qp_handle);
880 
881 	return ret;
882 
883 out:
884 	spin_unlock_irqrestore(&qp->rq.lock, flags);
885 
886 	return ret;
887 }
888 
889 /**
890  * pvrdma_query_qp - query a queue pair's attributes
891  * @ibqp: the queue pair to query
892  * @attr: the queue pair's attributes
893  * @attr_mask: attributes mask
894  * @init_attr: initial queue pair attributes
895  *
896  * @returns 0 on success, otherwise returns an errno.
897  */
pvrdma_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_qp_init_attr * init_attr)898 int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
899 		    int attr_mask, struct ib_qp_init_attr *init_attr)
900 {
901 	struct pvrdma_dev *dev = to_vdev(ibqp->device);
902 	struct pvrdma_qp *qp = to_vqp(ibqp);
903 	union pvrdma_cmd_req req;
904 	union pvrdma_cmd_resp rsp;
905 	struct pvrdma_cmd_query_qp *cmd = &req.query_qp;
906 	struct pvrdma_cmd_query_qp_resp *resp = &rsp.query_qp_resp;
907 	int ret = 0;
908 
909 	mutex_lock(&qp->mutex);
910 
911 	if (qp->state == IB_QPS_RESET) {
912 		attr->qp_state = IB_QPS_RESET;
913 		goto out;
914 	}
915 
916 	memset(cmd, 0, sizeof(*cmd));
917 	cmd->hdr.cmd = PVRDMA_CMD_QUERY_QP;
918 	cmd->qp_handle = qp->qp_handle;
919 	cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask);
920 
921 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_QP_RESP);
922 	if (ret < 0) {
923 		dev_warn(&dev->pdev->dev,
924 			 "could not query queuepair, error: %d\n", ret);
925 		goto out;
926 	}
927 
928 	attr->qp_state = pvrdma_qp_state_to_ib(resp->attrs.qp_state);
929 	attr->cur_qp_state =
930 		pvrdma_qp_state_to_ib(resp->attrs.cur_qp_state);
931 	attr->path_mtu = pvrdma_mtu_to_ib(resp->attrs.path_mtu);
932 	attr->path_mig_state =
933 		pvrdma_mig_state_to_ib(resp->attrs.path_mig_state);
934 	attr->qkey = resp->attrs.qkey;
935 	attr->rq_psn = resp->attrs.rq_psn;
936 	attr->sq_psn = resp->attrs.sq_psn;
937 	attr->dest_qp_num = resp->attrs.dest_qp_num;
938 	attr->qp_access_flags =
939 		pvrdma_access_flags_to_ib(resp->attrs.qp_access_flags);
940 	attr->pkey_index = resp->attrs.pkey_index;
941 	attr->alt_pkey_index = resp->attrs.alt_pkey_index;
942 	attr->en_sqd_async_notify = resp->attrs.en_sqd_async_notify;
943 	attr->sq_draining = resp->attrs.sq_draining;
944 	attr->max_rd_atomic = resp->attrs.max_rd_atomic;
945 	attr->max_dest_rd_atomic = resp->attrs.max_dest_rd_atomic;
946 	attr->min_rnr_timer = resp->attrs.min_rnr_timer;
947 	attr->port_num = resp->attrs.port_num;
948 	attr->timeout = resp->attrs.timeout;
949 	attr->retry_cnt = resp->attrs.retry_cnt;
950 	attr->rnr_retry = resp->attrs.rnr_retry;
951 	attr->alt_port_num = resp->attrs.alt_port_num;
952 	attr->alt_timeout = resp->attrs.alt_timeout;
953 	pvrdma_qp_cap_to_ib(&attr->cap, &resp->attrs.cap);
954 	pvrdma_ah_attr_to_rdma(&attr->ah_attr, &resp->attrs.ah_attr);
955 	pvrdma_ah_attr_to_rdma(&attr->alt_ah_attr, &resp->attrs.alt_ah_attr);
956 
957 	qp->state = attr->qp_state;
958 
959 	ret = 0;
960 
961 out:
962 	attr->cur_qp_state = attr->qp_state;
963 
964 	init_attr->event_handler = qp->ibqp.event_handler;
965 	init_attr->qp_context = qp->ibqp.qp_context;
966 	init_attr->send_cq = qp->ibqp.send_cq;
967 	init_attr->recv_cq = qp->ibqp.recv_cq;
968 	init_attr->srq = qp->ibqp.srq;
969 	init_attr->xrcd = NULL;
970 	init_attr->cap = attr->cap;
971 	init_attr->sq_sig_type = 0;
972 	init_attr->qp_type = qp->ibqp.qp_type;
973 	init_attr->create_flags = 0;
974 	init_attr->port_num = qp->port;
975 
976 	mutex_unlock(&qp->mutex);
977 	return ret;
978 }
979