• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* QLogic qedr NIC Driver
2  * Copyright (c) 2015-2016  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
34 #include <net/ip.h>
35 #include <net/ipv6.h>
36 #include <net/udp.h>
37 #include <linux/iommu.h>
38 
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/uverbs_ioctl.h>
46 
47 #include <linux/qed/common_hsi.h>
48 #include "qedr_hsi_rdma.h"
49 #include <linux/qed/qed_if.h>
50 #include "qedr.h"
51 #include "verbs.h"
52 #include <rdma/qedr-abi.h>
53 #include "qedr_roce_cm.h"
54 #include "qedr_iw_cm.h"
55 
56 #define QEDR_SRQ_WQE_ELEM_SIZE	sizeof(union rdma_srq_elm)
57 #define	RDMA_MAX_SGE_PER_SRQ	(4)
58 #define RDMA_MAX_SRQ_WQE_SIZE	(RDMA_MAX_SGE_PER_SRQ + 1)
59 
60 #define DB_ADDR_SHIFT(addr)		((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
61 
62 enum {
63 	QEDR_USER_MMAP_IO_WC = 0,
64 	QEDR_USER_MMAP_PHYS_PAGE,
65 };
66 
qedr_ib_copy_to_udata(struct ib_udata * udata,void * src,size_t len)67 static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
68 					size_t len)
69 {
70 	size_t min_len = min_t(size_t, len, udata->outlen);
71 
72 	return ib_copy_to_udata(udata, src, min_len);
73 }
74 
qedr_query_pkey(struct ib_device * ibdev,u8 port,u16 index,u16 * pkey)75 int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
76 {
77 	if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
78 		return -EINVAL;
79 
80 	*pkey = QEDR_ROCE_PKEY_DEFAULT;
81 	return 0;
82 }
83 
qedr_iw_query_gid(struct ib_device * ibdev,u8 port,int index,union ib_gid * sgid)84 int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
85 		      int index, union ib_gid *sgid)
86 {
87 	struct qedr_dev *dev = get_qedr_dev(ibdev);
88 
89 	memset(sgid->raw, 0, sizeof(sgid->raw));
90 	ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
91 
92 	DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
93 		 sgid->global.interface_id, sgid->global.subnet_prefix);
94 
95 	return 0;
96 }
97 
qedr_query_srq(struct ib_srq * ibsrq,struct ib_srq_attr * srq_attr)98 int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
99 {
100 	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
101 	struct qedr_device_attr *qattr = &dev->attr;
102 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
103 
104 	srq_attr->srq_limit = srq->srq_limit;
105 	srq_attr->max_wr = qattr->max_srq_wr;
106 	srq_attr->max_sge = qattr->max_sge;
107 
108 	return 0;
109 }
110 
qedr_query_device(struct ib_device * ibdev,struct ib_device_attr * attr,struct ib_udata * udata)111 int qedr_query_device(struct ib_device *ibdev,
112 		      struct ib_device_attr *attr, struct ib_udata *udata)
113 {
114 	struct qedr_dev *dev = get_qedr_dev(ibdev);
115 	struct qedr_device_attr *qattr = &dev->attr;
116 
117 	if (!dev->rdma_ctx) {
118 		DP_ERR(dev,
119 		       "qedr_query_device called with invalid params rdma_ctx=%p\n",
120 		       dev->rdma_ctx);
121 		return -EINVAL;
122 	}
123 
124 	memset(attr, 0, sizeof(*attr));
125 
126 	attr->fw_ver = qattr->fw_ver;
127 	attr->sys_image_guid = qattr->sys_image_guid;
128 	attr->max_mr_size = qattr->max_mr_size;
129 	attr->page_size_cap = qattr->page_size_caps;
130 	attr->vendor_id = qattr->vendor_id;
131 	attr->vendor_part_id = qattr->vendor_part_id;
132 	attr->hw_ver = qattr->hw_ver;
133 	attr->max_qp = qattr->max_qp;
134 	attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
135 	attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
136 	    IB_DEVICE_RC_RNR_NAK_GEN |
137 	    IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
138 
139 	if (!rdma_protocol_iwarp(&dev->ibdev, 1))
140 		attr->device_cap_flags |= IB_DEVICE_XRC;
141 	attr->max_send_sge = qattr->max_sge;
142 	attr->max_recv_sge = qattr->max_sge;
143 	attr->max_sge_rd = qattr->max_sge;
144 	attr->max_cq = qattr->max_cq;
145 	attr->max_cqe = qattr->max_cqe;
146 	attr->max_mr = qattr->max_mr;
147 	attr->max_mw = qattr->max_mw;
148 	attr->max_pd = qattr->max_pd;
149 	attr->atomic_cap = dev->atomic_cap;
150 	attr->max_qp_init_rd_atom =
151 	    1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
152 	attr->max_qp_rd_atom =
153 	    min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
154 		attr->max_qp_init_rd_atom);
155 
156 	attr->max_srq = qattr->max_srq;
157 	attr->max_srq_sge = qattr->max_srq_sge;
158 	attr->max_srq_wr = qattr->max_srq_wr;
159 
160 	attr->local_ca_ack_delay = qattr->dev_ack_delay;
161 	attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
162 	attr->max_pkeys = qattr->max_pkey;
163 	attr->max_ah = qattr->max_ah;
164 
165 	return 0;
166 }
167 
get_link_speed_and_width(int speed,u16 * ib_speed,u8 * ib_width)168 static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
169 					    u8 *ib_width)
170 {
171 	switch (speed) {
172 	case 1000:
173 		*ib_speed = IB_SPEED_SDR;
174 		*ib_width = IB_WIDTH_1X;
175 		break;
176 	case 10000:
177 		*ib_speed = IB_SPEED_QDR;
178 		*ib_width = IB_WIDTH_1X;
179 		break;
180 
181 	case 20000:
182 		*ib_speed = IB_SPEED_DDR;
183 		*ib_width = IB_WIDTH_4X;
184 		break;
185 
186 	case 25000:
187 		*ib_speed = IB_SPEED_EDR;
188 		*ib_width = IB_WIDTH_1X;
189 		break;
190 
191 	case 40000:
192 		*ib_speed = IB_SPEED_QDR;
193 		*ib_width = IB_WIDTH_4X;
194 		break;
195 
196 	case 50000:
197 		*ib_speed = IB_SPEED_HDR;
198 		*ib_width = IB_WIDTH_1X;
199 		break;
200 
201 	case 100000:
202 		*ib_speed = IB_SPEED_EDR;
203 		*ib_width = IB_WIDTH_4X;
204 		break;
205 
206 	default:
207 		/* Unsupported */
208 		*ib_speed = IB_SPEED_SDR;
209 		*ib_width = IB_WIDTH_1X;
210 	}
211 }
212 
qedr_query_port(struct ib_device * ibdev,u8 port,struct ib_port_attr * attr)213 int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
214 {
215 	struct qedr_dev *dev;
216 	struct qed_rdma_port *rdma_port;
217 
218 	dev = get_qedr_dev(ibdev);
219 
220 	if (!dev->rdma_ctx) {
221 		DP_ERR(dev, "rdma_ctx is NULL\n");
222 		return -EINVAL;
223 	}
224 
225 	rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
226 
227 	/* *attr being zeroed by the caller, avoid zeroing it here */
228 	if (rdma_port->port_state == QED_RDMA_PORT_UP) {
229 		attr->state = IB_PORT_ACTIVE;
230 		attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
231 	} else {
232 		attr->state = IB_PORT_DOWN;
233 		attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
234 	}
235 	attr->max_mtu = IB_MTU_4096;
236 	attr->lid = 0;
237 	attr->lmc = 0;
238 	attr->sm_lid = 0;
239 	attr->sm_sl = 0;
240 	attr->ip_gids = true;
241 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
242 		attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu);
243 		attr->gid_tbl_len = 1;
244 	} else {
245 		attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
246 		attr->gid_tbl_len = QEDR_MAX_SGID;
247 		attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
248 	}
249 	attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
250 	attr->qkey_viol_cntr = 0;
251 	get_link_speed_and_width(rdma_port->link_speed,
252 				 &attr->active_speed, &attr->active_width);
253 	attr->max_msg_sz = rdma_port->max_msg_size;
254 	attr->max_vl_num = 4;
255 
256 	return 0;
257 }
258 
qedr_alloc_ucontext(struct ib_ucontext * uctx,struct ib_udata * udata)259 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
260 {
261 	struct ib_device *ibdev = uctx->device;
262 	int rc;
263 	struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
264 	struct qedr_alloc_ucontext_resp uresp = {};
265 	struct qedr_alloc_ucontext_req ureq = {};
266 	struct qedr_dev *dev = get_qedr_dev(ibdev);
267 	struct qed_rdma_add_user_out_params oparams;
268 	struct qedr_user_mmap_entry *entry;
269 
270 	if (!udata)
271 		return -EFAULT;
272 
273 	if (udata->inlen) {
274 		rc = ib_copy_from_udata(&ureq, udata,
275 					min(sizeof(ureq), udata->inlen));
276 		if (rc) {
277 			DP_ERR(dev, "Problem copying data from user space\n");
278 			return -EFAULT;
279 		}
280 		ctx->edpm_mode = !!(ureq.context_flags &
281 				    QEDR_ALLOC_UCTX_EDPM_MODE);
282 		ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
283 	}
284 
285 	rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
286 	if (rc) {
287 		DP_ERR(dev,
288 		       "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
289 		       rc);
290 		return rc;
291 	}
292 
293 	ctx->dpi = oparams.dpi;
294 	ctx->dpi_addr = oparams.dpi_addr;
295 	ctx->dpi_phys_addr = oparams.dpi_phys_addr;
296 	ctx->dpi_size = oparams.dpi_size;
297 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
298 	if (!entry) {
299 		rc = -ENOMEM;
300 		goto err;
301 	}
302 
303 	entry->io_address = ctx->dpi_phys_addr;
304 	entry->length = ctx->dpi_size;
305 	entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
306 	entry->dpi = ctx->dpi;
307 	entry->dev = dev;
308 	rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
309 					 ctx->dpi_size);
310 	if (rc) {
311 		kfree(entry);
312 		goto err;
313 	}
314 	ctx->db_mmap_entry = &entry->rdma_entry;
315 
316 	if (!dev->user_dpm_enabled)
317 		uresp.dpm_flags = 0;
318 	else if (rdma_protocol_iwarp(&dev->ibdev, 1))
319 		uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY;
320 	else
321 		uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED |
322 				  QEDR_DPM_TYPE_ROCE_LEGACY |
323 				  QEDR_DPM_TYPE_ROCE_EDPM_MODE;
324 
325 	if (ureq.context_flags & QEDR_SUPPORT_DPM_SIZES) {
326 		uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
327 		uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
328 		uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
329 		uresp.edpm_limit_size = QEDR_EDPM_MAX_SIZE;
330 	}
331 
332 	uresp.wids_enabled = 1;
333 	uresp.wid_count = oparams.wid_count;
334 	uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
335 	uresp.db_size = ctx->dpi_size;
336 	uresp.max_send_wr = dev->attr.max_sqe;
337 	uresp.max_recv_wr = dev->attr.max_rqe;
338 	uresp.max_srq_wr = dev->attr.max_srq_wr;
339 	uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
340 	uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
341 	uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
342 	uresp.max_cqes = QEDR_MAX_CQES;
343 
344 	rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
345 	if (rc)
346 		goto err;
347 
348 	ctx->dev = dev;
349 
350 	DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
351 		 &ctx->ibucontext);
352 	return 0;
353 
354 err:
355 	if (!ctx->db_mmap_entry)
356 		dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
357 	else
358 		rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
359 
360 	return rc;
361 }
362 
qedr_dealloc_ucontext(struct ib_ucontext * ibctx)363 void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
364 {
365 	struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
366 
367 	DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
368 		 uctx);
369 
370 	rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
371 }
372 
qedr_mmap_free(struct rdma_user_mmap_entry * rdma_entry)373 void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
374 {
375 	struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
376 	struct qedr_dev *dev = entry->dev;
377 
378 	if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
379 		free_page((unsigned long)entry->address);
380 	else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
381 		dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
382 
383 	kfree(entry);
384 }
385 
qedr_mmap(struct ib_ucontext * ucontext,struct vm_area_struct * vma)386 int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
387 {
388 	struct ib_device *dev = ucontext->device;
389 	size_t length = vma->vm_end - vma->vm_start;
390 	struct rdma_user_mmap_entry *rdma_entry;
391 	struct qedr_user_mmap_entry *entry;
392 	int rc = 0;
393 	u64 pfn;
394 
395 	ibdev_dbg(dev,
396 		  "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
397 		  vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
398 
399 	rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
400 	if (!rdma_entry) {
401 		ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
402 			  vma->vm_pgoff);
403 		return -EINVAL;
404 	}
405 	entry = get_qedr_mmap_entry(rdma_entry);
406 	ibdev_dbg(dev,
407 		  "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
408 		  entry->io_address, length, entry->mmap_flag);
409 
410 	switch (entry->mmap_flag) {
411 	case QEDR_USER_MMAP_IO_WC:
412 		pfn = entry->io_address >> PAGE_SHIFT;
413 		rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
414 				       pgprot_writecombine(vma->vm_page_prot),
415 				       rdma_entry);
416 		break;
417 	case QEDR_USER_MMAP_PHYS_PAGE:
418 		rc = vm_insert_page(vma, vma->vm_start,
419 				    virt_to_page(entry->address));
420 		break;
421 	default:
422 		rc = -EINVAL;
423 	}
424 
425 	if (rc)
426 		ibdev_dbg(dev,
427 			  "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
428 			  entry->io_address, length, entry->mmap_flag, rc);
429 
430 	rdma_user_mmap_entry_put(rdma_entry);
431 	return rc;
432 }
433 
qedr_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)434 int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
435 {
436 	struct ib_device *ibdev = ibpd->device;
437 	struct qedr_dev *dev = get_qedr_dev(ibdev);
438 	struct qedr_pd *pd = get_qedr_pd(ibpd);
439 	u16 pd_id;
440 	int rc;
441 
442 	DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
443 		 udata ? "User Lib" : "Kernel");
444 
445 	if (!dev->rdma_ctx) {
446 		DP_ERR(dev, "invalid RDMA context\n");
447 		return -EINVAL;
448 	}
449 
450 	rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
451 	if (rc)
452 		return rc;
453 
454 	pd->pd_id = pd_id;
455 
456 	if (udata) {
457 		struct qedr_alloc_pd_uresp uresp = {
458 			.pd_id = pd_id,
459 		};
460 		struct qedr_ucontext *context = rdma_udata_to_drv_context(
461 			udata, struct qedr_ucontext, ibucontext);
462 
463 		rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
464 		if (rc) {
465 			DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
466 			dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
467 			return rc;
468 		}
469 
470 		pd->uctx = context;
471 		pd->uctx->pd = pd;
472 	}
473 
474 	return 0;
475 }
476 
qedr_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)477 int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
478 {
479 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
480 	struct qedr_pd *pd = get_qedr_pd(ibpd);
481 
482 	DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
483 	dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
484 	return 0;
485 }
486 
487 
qedr_alloc_xrcd(struct ib_xrcd * ibxrcd,struct ib_udata * udata)488 int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
489 {
490 	struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
491 	struct qedr_xrcd *xrcd = get_qedr_xrcd(ibxrcd);
492 
493 	return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id);
494 }
495 
qedr_dealloc_xrcd(struct ib_xrcd * ibxrcd,struct ib_udata * udata)496 int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
497 {
498 	struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
499 	u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id;
500 
501 	dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id);
502 	return 0;
503 }
qedr_free_pbl(struct qedr_dev * dev,struct qedr_pbl_info * pbl_info,struct qedr_pbl * pbl)504 static void qedr_free_pbl(struct qedr_dev *dev,
505 			  struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
506 {
507 	struct pci_dev *pdev = dev->pdev;
508 	int i;
509 
510 	for (i = 0; i < pbl_info->num_pbls; i++) {
511 		if (!pbl[i].va)
512 			continue;
513 		dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
514 				  pbl[i].va, pbl[i].pa);
515 	}
516 
517 	kfree(pbl);
518 }
519 
520 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
521 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
522 
523 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
524 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
525 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
526 
qedr_alloc_pbl_tbl(struct qedr_dev * dev,struct qedr_pbl_info * pbl_info,gfp_t flags)527 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
528 					   struct qedr_pbl_info *pbl_info,
529 					   gfp_t flags)
530 {
531 	struct pci_dev *pdev = dev->pdev;
532 	struct qedr_pbl *pbl_table;
533 	dma_addr_t *pbl_main_tbl;
534 	dma_addr_t pa;
535 	void *va;
536 	int i;
537 
538 	pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
539 	if (!pbl_table)
540 		return ERR_PTR(-ENOMEM);
541 
542 	for (i = 0; i < pbl_info->num_pbls; i++) {
543 		va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
544 					flags);
545 		if (!va)
546 			goto err;
547 
548 		pbl_table[i].va = va;
549 		pbl_table[i].pa = pa;
550 	}
551 
552 	/* Two-Layer PBLs, if we have more than one pbl we need to initialize
553 	 * the first one with physical pointers to all of the rest
554 	 */
555 	pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
556 	for (i = 0; i < pbl_info->num_pbls - 1; i++)
557 		pbl_main_tbl[i] = pbl_table[i + 1].pa;
558 
559 	return pbl_table;
560 
561 err:
562 	for (i--; i >= 0; i--)
563 		dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
564 				  pbl_table[i].va, pbl_table[i].pa);
565 
566 	qedr_free_pbl(dev, pbl_info, pbl_table);
567 
568 	return ERR_PTR(-ENOMEM);
569 }
570 
qedr_prepare_pbl_tbl(struct qedr_dev * dev,struct qedr_pbl_info * pbl_info,u32 num_pbes,int two_layer_capable)571 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
572 				struct qedr_pbl_info *pbl_info,
573 				u32 num_pbes, int two_layer_capable)
574 {
575 	u32 pbl_capacity;
576 	u32 pbl_size;
577 	u32 num_pbls;
578 
579 	if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
580 		if (num_pbes > MAX_PBES_TWO_LAYER) {
581 			DP_ERR(dev, "prepare pbl table: too many pages %d\n",
582 			       num_pbes);
583 			return -EINVAL;
584 		}
585 
586 		/* calculate required pbl page size */
587 		pbl_size = MIN_FW_PBL_PAGE_SIZE;
588 		pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
589 			       NUM_PBES_ON_PAGE(pbl_size);
590 
591 		while (pbl_capacity < num_pbes) {
592 			pbl_size *= 2;
593 			pbl_capacity = pbl_size / sizeof(u64);
594 			pbl_capacity = pbl_capacity * pbl_capacity;
595 		}
596 
597 		num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
598 		num_pbls++;	/* One for the layer0 ( points to the pbls) */
599 		pbl_info->two_layered = true;
600 	} else {
601 		/* One layered PBL */
602 		num_pbls = 1;
603 		pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
604 				 roundup_pow_of_two((num_pbes * sizeof(u64))));
605 		pbl_info->two_layered = false;
606 	}
607 
608 	pbl_info->num_pbls = num_pbls;
609 	pbl_info->pbl_size = pbl_size;
610 	pbl_info->num_pbes = num_pbes;
611 
612 	DP_DEBUG(dev, QEDR_MSG_MR,
613 		 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
614 		 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
615 
616 	return 0;
617 }
618 
qedr_populate_pbls(struct qedr_dev * dev,struct ib_umem * umem,struct qedr_pbl * pbl,struct qedr_pbl_info * pbl_info,u32 pg_shift)619 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
620 			       struct qedr_pbl *pbl,
621 			       struct qedr_pbl_info *pbl_info, u32 pg_shift)
622 {
623 	int pbe_cnt, total_num_pbes = 0;
624 	struct qedr_pbl *pbl_tbl;
625 	struct ib_block_iter biter;
626 	struct regpair *pbe;
627 
628 	if (!pbl_info->num_pbes)
629 		return;
630 
631 	/* If we have a two layered pbl, the first pbl points to the rest
632 	 * of the pbls and the first entry lays on the second pbl in the table
633 	 */
634 	if (pbl_info->two_layered)
635 		pbl_tbl = &pbl[1];
636 	else
637 		pbl_tbl = pbl;
638 
639 	pbe = (struct regpair *)pbl_tbl->va;
640 	if (!pbe) {
641 		DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
642 		return;
643 	}
644 
645 	pbe_cnt = 0;
646 
647 	rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) {
648 		u64 pg_addr = rdma_block_iter_dma_address(&biter);
649 
650 		pbe->lo = cpu_to_le32(pg_addr);
651 		pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
652 
653 		pbe_cnt++;
654 		total_num_pbes++;
655 		pbe++;
656 
657 		if (total_num_pbes == pbl_info->num_pbes)
658 			return;
659 
660 		/* If the given pbl is full storing the pbes, move to next pbl.
661 		 */
662 		if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
663 			pbl_tbl++;
664 			pbe = (struct regpair *)pbl_tbl->va;
665 			pbe_cnt = 0;
666 		}
667 	}
668 }
669 
qedr_db_recovery_add(struct qedr_dev * dev,void __iomem * db_addr,void * db_data,enum qed_db_rec_width db_width,enum qed_db_rec_space db_space)670 static int qedr_db_recovery_add(struct qedr_dev *dev,
671 				void __iomem *db_addr,
672 				void *db_data,
673 				enum qed_db_rec_width db_width,
674 				enum qed_db_rec_space db_space)
675 {
676 	if (!db_data) {
677 		DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
678 		return 0;
679 	}
680 
681 	return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
682 						 db_width, db_space);
683 }
684 
qedr_db_recovery_del(struct qedr_dev * dev,void __iomem * db_addr,void * db_data)685 static void qedr_db_recovery_del(struct qedr_dev *dev,
686 				 void __iomem *db_addr,
687 				 void *db_data)
688 {
689 	if (!db_data) {
690 		DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
691 		return;
692 	}
693 
694 	/* Ignore return code as there is not much we can do about it. Error
695 	 * log will be printed inside.
696 	 */
697 	dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
698 }
699 
qedr_copy_cq_uresp(struct qedr_dev * dev,struct qedr_cq * cq,struct ib_udata * udata,u32 db_offset)700 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
701 			      struct qedr_cq *cq, struct ib_udata *udata,
702 			      u32 db_offset)
703 {
704 	struct qedr_create_cq_uresp uresp;
705 	int rc;
706 
707 	memset(&uresp, 0, sizeof(uresp));
708 
709 	uresp.db_offset = db_offset;
710 	uresp.icid = cq->icid;
711 	if (cq->q.db_mmap_entry)
712 		uresp.db_rec_addr =
713 			rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
714 
715 	rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
716 	if (rc)
717 		DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
718 
719 	return rc;
720 }
721 
consume_cqe(struct qedr_cq * cq)722 static void consume_cqe(struct qedr_cq *cq)
723 {
724 	if (cq->latest_cqe == cq->toggle_cqe)
725 		cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
726 
727 	cq->latest_cqe = qed_chain_consume(&cq->pbl);
728 }
729 
qedr_align_cq_entries(int entries)730 static inline int qedr_align_cq_entries(int entries)
731 {
732 	u64 size, aligned_size;
733 
734 	/* We allocate an extra entry that we don't report to the FW. */
735 	size = (entries + 1) * QEDR_CQE_SIZE;
736 	aligned_size = ALIGN(size, PAGE_SIZE);
737 
738 	return aligned_size / QEDR_CQE_SIZE;
739 }
740 
qedr_init_user_db_rec(struct ib_udata * udata,struct qedr_dev * dev,struct qedr_userq * q,bool requires_db_rec)741 static int qedr_init_user_db_rec(struct ib_udata *udata,
742 				 struct qedr_dev *dev, struct qedr_userq *q,
743 				 bool requires_db_rec)
744 {
745 	struct qedr_ucontext *uctx =
746 		rdma_udata_to_drv_context(udata, struct qedr_ucontext,
747 					  ibucontext);
748 	struct qedr_user_mmap_entry *entry;
749 	int rc;
750 
751 	/* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
752 	if (requires_db_rec == 0 || !uctx->db_rec)
753 		return 0;
754 
755 	/* Allocate a page for doorbell recovery, add to mmap */
756 	q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
757 	if (!q->db_rec_data) {
758 		DP_ERR(dev, "get_zeroed_page failed\n");
759 		return -ENOMEM;
760 	}
761 
762 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
763 	if (!entry)
764 		goto err_free_db_data;
765 
766 	entry->address = q->db_rec_data;
767 	entry->length = PAGE_SIZE;
768 	entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
769 	rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
770 					 &entry->rdma_entry,
771 					 PAGE_SIZE);
772 	if (rc)
773 		goto err_free_entry;
774 
775 	q->db_mmap_entry = &entry->rdma_entry;
776 
777 	return 0;
778 
779 err_free_entry:
780 	kfree(entry);
781 
782 err_free_db_data:
783 	free_page((unsigned long)q->db_rec_data);
784 	q->db_rec_data = NULL;
785 	return -ENOMEM;
786 }
787 
qedr_init_user_queue(struct ib_udata * udata,struct qedr_dev * dev,struct qedr_userq * q,u64 buf_addr,size_t buf_len,bool requires_db_rec,int access,int alloc_and_init)788 static inline int qedr_init_user_queue(struct ib_udata *udata,
789 				       struct qedr_dev *dev,
790 				       struct qedr_userq *q, u64 buf_addr,
791 				       size_t buf_len, bool requires_db_rec,
792 				       int access,
793 				       int alloc_and_init)
794 {
795 	u32 fw_pages;
796 	int rc;
797 
798 	q->buf_addr = buf_addr;
799 	q->buf_len = buf_len;
800 	q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
801 	if (IS_ERR(q->umem)) {
802 		DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
803 		       PTR_ERR(q->umem));
804 		return PTR_ERR(q->umem);
805 	}
806 
807 	fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT);
808 	rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
809 	if (rc)
810 		goto err0;
811 
812 	if (alloc_and_init) {
813 		q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
814 		if (IS_ERR(q->pbl_tbl)) {
815 			rc = PTR_ERR(q->pbl_tbl);
816 			goto err0;
817 		}
818 		qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
819 				   FW_PAGE_SHIFT);
820 	} else {
821 		q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
822 		if (!q->pbl_tbl) {
823 			rc = -ENOMEM;
824 			goto err0;
825 		}
826 	}
827 
828 	/* mmap the user address used to store doorbell data for recovery */
829 	return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
830 
831 err0:
832 	ib_umem_release(q->umem);
833 	q->umem = NULL;
834 
835 	return rc;
836 }
837 
qedr_init_cq_params(struct qedr_cq * cq,struct qedr_ucontext * ctx,struct qedr_dev * dev,int vector,int chain_entries,int page_cnt,u64 pbl_ptr,struct qed_rdma_create_cq_in_params * params)838 static inline void qedr_init_cq_params(struct qedr_cq *cq,
839 				       struct qedr_ucontext *ctx,
840 				       struct qedr_dev *dev, int vector,
841 				       int chain_entries, int page_cnt,
842 				       u64 pbl_ptr,
843 				       struct qed_rdma_create_cq_in_params
844 				       *params)
845 {
846 	memset(params, 0, sizeof(*params));
847 	params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
848 	params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
849 	params->cnq_id = vector;
850 	params->cq_size = chain_entries - 1;
851 	params->dpi = (ctx) ? ctx->dpi : dev->dpi;
852 	params->pbl_num_pages = page_cnt;
853 	params->pbl_ptr = pbl_ptr;
854 	params->pbl_two_level = 0;
855 }
856 
doorbell_cq(struct qedr_cq * cq,u32 cons,u8 flags)857 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
858 {
859 	cq->db.data.agg_flags = flags;
860 	cq->db.data.value = cpu_to_le32(cons);
861 	writeq(cq->db.raw, cq->db_addr);
862 }
863 
qedr_arm_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)864 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
865 {
866 	struct qedr_cq *cq = get_qedr_cq(ibcq);
867 	unsigned long sflags;
868 	struct qedr_dev *dev;
869 
870 	dev = get_qedr_dev(ibcq->device);
871 
872 	if (cq->destroyed) {
873 		DP_ERR(dev,
874 		       "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
875 		       cq, cq->icid);
876 		return -EINVAL;
877 	}
878 
879 
880 	if (cq->cq_type == QEDR_CQ_TYPE_GSI)
881 		return 0;
882 
883 	spin_lock_irqsave(&cq->cq_lock, sflags);
884 
885 	cq->arm_flags = 0;
886 
887 	if (flags & IB_CQ_SOLICITED)
888 		cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
889 
890 	if (flags & IB_CQ_NEXT_COMP)
891 		cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
892 
893 	doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
894 
895 	spin_unlock_irqrestore(&cq->cq_lock, sflags);
896 
897 	return 0;
898 }
899 
qedr_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct ib_udata * udata)900 int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
901 		   struct ib_udata *udata)
902 {
903 	struct ib_device *ibdev = ibcq->device;
904 	struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
905 		udata, struct qedr_ucontext, ibucontext);
906 	struct qed_rdma_destroy_cq_out_params destroy_oparams;
907 	struct qed_rdma_destroy_cq_in_params destroy_iparams;
908 	struct qed_chain_init_params chain_params = {
909 		.mode		= QED_CHAIN_MODE_PBL,
910 		.intended_use	= QED_CHAIN_USE_TO_CONSUME,
911 		.cnt_type	= QED_CHAIN_CNT_TYPE_U32,
912 		.elem_size	= sizeof(union rdma_cqe),
913 	};
914 	struct qedr_dev *dev = get_qedr_dev(ibdev);
915 	struct qed_rdma_create_cq_in_params params;
916 	struct qedr_create_cq_ureq ureq = {};
917 	int vector = attr->comp_vector;
918 	int entries = attr->cqe;
919 	struct qedr_cq *cq = get_qedr_cq(ibcq);
920 	int chain_entries;
921 	u32 db_offset;
922 	int page_cnt;
923 	u64 pbl_ptr;
924 	u16 icid;
925 	int rc;
926 
927 	DP_DEBUG(dev, QEDR_MSG_INIT,
928 		 "create_cq: called from %s. entries=%d, vector=%d\n",
929 		 udata ? "User Lib" : "Kernel", entries, vector);
930 
931 	if (entries > QEDR_MAX_CQES) {
932 		DP_ERR(dev,
933 		       "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
934 		       entries, QEDR_MAX_CQES);
935 		return -EINVAL;
936 	}
937 
938 	chain_entries = qedr_align_cq_entries(entries);
939 	chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
940 	chain_params.num_elems = chain_entries;
941 
942 	/* calc db offset. user will add DPI base, kernel will add db addr */
943 	db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
944 
945 	if (udata) {
946 		if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
947 							 udata->inlen))) {
948 			DP_ERR(dev,
949 			       "create cq: problem copying data from user space\n");
950 			goto err0;
951 		}
952 
953 		if (!ureq.len) {
954 			DP_ERR(dev,
955 			       "create cq: cannot create a cq with 0 entries\n");
956 			goto err0;
957 		}
958 
959 		cq->cq_type = QEDR_CQ_TYPE_USER;
960 
961 		rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
962 					  ureq.len, true, IB_ACCESS_LOCAL_WRITE,
963 					  1);
964 		if (rc)
965 			goto err0;
966 
967 		pbl_ptr = cq->q.pbl_tbl->pa;
968 		page_cnt = cq->q.pbl_info.num_pbes;
969 
970 		cq->ibcq.cqe = chain_entries;
971 		cq->q.db_addr = ctx->dpi_addr + db_offset;
972 	} else {
973 		cq->cq_type = QEDR_CQ_TYPE_KERNEL;
974 
975 		rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
976 						   &chain_params);
977 		if (rc)
978 			goto err0;
979 
980 		page_cnt = qed_chain_get_page_cnt(&cq->pbl);
981 		pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
982 		cq->ibcq.cqe = cq->pbl.capacity;
983 	}
984 
985 	qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
986 			    pbl_ptr, &params);
987 
988 	rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
989 	if (rc)
990 		goto err1;
991 
992 	cq->icid = icid;
993 	cq->sig = QEDR_CQ_MAGIC_NUMBER;
994 	spin_lock_init(&cq->cq_lock);
995 
996 	if (udata) {
997 		rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
998 		if (rc)
999 			goto err2;
1000 
1001 		rc = qedr_db_recovery_add(dev, cq->q.db_addr,
1002 					  &cq->q.db_rec_data->db_data,
1003 					  DB_REC_WIDTH_64B,
1004 					  DB_REC_USER);
1005 		if (rc)
1006 			goto err2;
1007 
1008 	} else {
1009 		/* Generate doorbell address. */
1010 		cq->db.data.icid = cq->icid;
1011 		cq->db_addr = dev->db_addr + db_offset;
1012 		cq->db.data.params = DB_AGG_CMD_MAX <<
1013 		    RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1014 
1015 		/* point to the very last element, passing it we will toggle */
1016 		cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
1017 		cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
1018 		cq->latest_cqe = NULL;
1019 		consume_cqe(cq);
1020 		cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
1021 
1022 		rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
1023 					  DB_REC_WIDTH_64B, DB_REC_KERNEL);
1024 		if (rc)
1025 			goto err2;
1026 	}
1027 
1028 	DP_DEBUG(dev, QEDR_MSG_CQ,
1029 		 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1030 		 cq->icid, cq, params.cq_size);
1031 
1032 	return 0;
1033 
1034 err2:
1035 	destroy_iparams.icid = cq->icid;
1036 	dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1037 				  &destroy_oparams);
1038 err1:
1039 	if (udata) {
1040 		qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1041 		ib_umem_release(cq->q.umem);
1042 		if (cq->q.db_mmap_entry)
1043 			rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1044 	} else {
1045 		dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1046 	}
1047 err0:
1048 	return -EINVAL;
1049 }
1050 
qedr_resize_cq(struct ib_cq * ibcq,int new_cnt,struct ib_udata * udata)1051 int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
1052 {
1053 	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1054 	struct qedr_cq *cq = get_qedr_cq(ibcq);
1055 
1056 	DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
1057 
1058 	return 0;
1059 }
1060 
1061 #define QEDR_DESTROY_CQ_MAX_ITERATIONS		(10)
1062 #define QEDR_DESTROY_CQ_ITER_DURATION		(10)
1063 
qedr_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)1064 int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1065 {
1066 	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1067 	struct qed_rdma_destroy_cq_out_params oparams;
1068 	struct qed_rdma_destroy_cq_in_params iparams;
1069 	struct qedr_cq *cq = get_qedr_cq(ibcq);
1070 	int iter;
1071 
1072 	DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
1073 
1074 	cq->destroyed = 1;
1075 
1076 	/* GSIs CQs are handled by driver, so they don't exist in the FW */
1077 	if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
1078 		qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1079 		return 0;
1080 	}
1081 
1082 	iparams.icid = cq->icid;
1083 	dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1084 	dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1085 
1086 	if (udata) {
1087 		qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1088 		ib_umem_release(cq->q.umem);
1089 
1090 		if (cq->q.db_rec_data) {
1091 			qedr_db_recovery_del(dev, cq->q.db_addr,
1092 					     &cq->q.db_rec_data->db_data);
1093 			rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1094 		}
1095 	} else {
1096 		qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1097 	}
1098 
1099 	/* We don't want the IRQ handler to handle a non-existing CQ so we
1100 	 * wait until all CNQ interrupts, if any, are received. This will always
1101 	 * happen and will always happen very fast. If not, then a serious error
1102 	 * has occured. That is why we can use a long delay.
1103 	 * We spin for a short time so we don’t lose time on context switching
1104 	 * in case all the completions are handled in that span. Otherwise
1105 	 * we sleep for a while and check again. Since the CNQ may be
1106 	 * associated with (only) the current CPU we use msleep to allow the
1107 	 * current CPU to be freed.
1108 	 * The CNQ notification is increased in qedr_irq_handler().
1109 	 */
1110 	iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1111 	while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1112 		udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1113 		iter--;
1114 	}
1115 
1116 	iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1117 	while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1118 		msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1119 		iter--;
1120 	}
1121 
1122 	/* Note that we don't need to have explicit code to wait for the
1123 	 * completion of the event handler because it is invoked from the EQ.
1124 	 * Since the destroy CQ ramrod has also been received on the EQ we can
1125 	 * be certain that there's no event handler in process.
1126 	 */
1127 	return 0;
1128 }
1129 
get_gid_info_from_table(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct qed_rdma_modify_qp_in_params * qp_params)1130 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1131 					  struct ib_qp_attr *attr,
1132 					  int attr_mask,
1133 					  struct qed_rdma_modify_qp_in_params
1134 					  *qp_params)
1135 {
1136 	const struct ib_gid_attr *gid_attr;
1137 	enum rdma_network_type nw_type;
1138 	const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1139 	u32 ipv4_addr;
1140 	int ret;
1141 	int i;
1142 
1143 	gid_attr = grh->sgid_attr;
1144 	ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
1145 	if (ret)
1146 		return ret;
1147 
1148 	nw_type = rdma_gid_attr_network_type(gid_attr);
1149 	switch (nw_type) {
1150 	case RDMA_NETWORK_IPV6:
1151 		memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1152 		       sizeof(qp_params->sgid));
1153 		memcpy(&qp_params->dgid.bytes[0],
1154 		       &grh->dgid,
1155 		       sizeof(qp_params->dgid));
1156 		qp_params->roce_mode = ROCE_V2_IPV6;
1157 		SET_FIELD(qp_params->modify_flags,
1158 			  QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1159 		break;
1160 	case RDMA_NETWORK_ROCE_V1:
1161 		memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1162 		       sizeof(qp_params->sgid));
1163 		memcpy(&qp_params->dgid.bytes[0],
1164 		       &grh->dgid,
1165 		       sizeof(qp_params->dgid));
1166 		qp_params->roce_mode = ROCE_V1;
1167 		break;
1168 	case RDMA_NETWORK_IPV4:
1169 		memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1170 		memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1171 		ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
1172 		qp_params->sgid.ipv4_addr = ipv4_addr;
1173 		ipv4_addr =
1174 		    qedr_get_ipv4_from_gid(grh->dgid.raw);
1175 		qp_params->dgid.ipv4_addr = ipv4_addr;
1176 		SET_FIELD(qp_params->modify_flags,
1177 			  QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1178 		qp_params->roce_mode = ROCE_V2_IPV4;
1179 		break;
1180 	default:
1181 		return -EINVAL;
1182 	}
1183 
1184 	for (i = 0; i < 4; i++) {
1185 		qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1186 		qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1187 	}
1188 
1189 	if (qp_params->vlan_id >= VLAN_CFI_MASK)
1190 		qp_params->vlan_id = 0;
1191 
1192 	return 0;
1193 }
1194 
qedr_check_qp_attrs(struct ib_pd * ibpd,struct qedr_dev * dev,struct ib_qp_init_attr * attrs,struct ib_udata * udata)1195 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1196 			       struct ib_qp_init_attr *attrs,
1197 			       struct ib_udata *udata)
1198 {
1199 	struct qedr_device_attr *qattr = &dev->attr;
1200 
1201 	/* QP0... attrs->qp_type == IB_QPT_GSI */
1202 	if (attrs->qp_type != IB_QPT_RC &&
1203 	    attrs->qp_type != IB_QPT_GSI &&
1204 	    attrs->qp_type != IB_QPT_XRC_INI &&
1205 	    attrs->qp_type != IB_QPT_XRC_TGT) {
1206 		DP_DEBUG(dev, QEDR_MSG_QP,
1207 			 "create qp: unsupported qp type=0x%x requested\n",
1208 			 attrs->qp_type);
1209 		return -EOPNOTSUPP;
1210 	}
1211 
1212 	if (attrs->cap.max_send_wr > qattr->max_sqe) {
1213 		DP_ERR(dev,
1214 		       "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1215 		       attrs->cap.max_send_wr, qattr->max_sqe);
1216 		return -EINVAL;
1217 	}
1218 
1219 	if (attrs->cap.max_inline_data > qattr->max_inline) {
1220 		DP_ERR(dev,
1221 		       "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1222 		       attrs->cap.max_inline_data, qattr->max_inline);
1223 		return -EINVAL;
1224 	}
1225 
1226 	if (attrs->cap.max_send_sge > qattr->max_sge) {
1227 		DP_ERR(dev,
1228 		       "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1229 		       attrs->cap.max_send_sge, qattr->max_sge);
1230 		return -EINVAL;
1231 	}
1232 
1233 	if (attrs->cap.max_recv_sge > qattr->max_sge) {
1234 		DP_ERR(dev,
1235 		       "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1236 		       attrs->cap.max_recv_sge, qattr->max_sge);
1237 		return -EINVAL;
1238 	}
1239 
1240 	/* verify consumer QPs are not trying to use GSI QP's CQ.
1241 	 * TGT QP isn't associated with RQ/SQ
1242 	 */
1243 	if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
1244 	    (attrs->qp_type != IB_QPT_XRC_TGT) &&
1245 	    (attrs->qp_type != IB_QPT_XRC_INI)) {
1246 		struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
1247 		struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
1248 
1249 		if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) ||
1250 		    (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) {
1251 			DP_ERR(dev,
1252 			       "create qp: consumer QP cannot use GSI CQs.\n");
1253 			return -EINVAL;
1254 		}
1255 	}
1256 
1257 	return 0;
1258 }
1259 
qedr_copy_srq_uresp(struct qedr_dev * dev,struct qedr_srq * srq,struct ib_udata * udata)1260 static int qedr_copy_srq_uresp(struct qedr_dev *dev,
1261 			       struct qedr_srq *srq, struct ib_udata *udata)
1262 {
1263 	struct qedr_create_srq_uresp uresp = {};
1264 	int rc;
1265 
1266 	uresp.srq_id = srq->srq_id;
1267 
1268 	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1269 	if (rc)
1270 		DP_ERR(dev, "create srq: problem copying data to user space\n");
1271 
1272 	return rc;
1273 }
1274 
qedr_copy_rq_uresp(struct qedr_dev * dev,struct qedr_create_qp_uresp * uresp,struct qedr_qp * qp)1275 static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1276 			       struct qedr_create_qp_uresp *uresp,
1277 			       struct qedr_qp *qp)
1278 {
1279 	/* iWARP requires two doorbells per RQ. */
1280 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1281 		uresp->rq_db_offset =
1282 		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1283 		uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1284 	} else {
1285 		uresp->rq_db_offset =
1286 		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1287 	}
1288 
1289 	uresp->rq_icid = qp->icid;
1290 	if (qp->urq.db_mmap_entry)
1291 		uresp->rq_db_rec_addr =
1292 			rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
1293 }
1294 
qedr_copy_sq_uresp(struct qedr_dev * dev,struct qedr_create_qp_uresp * uresp,struct qedr_qp * qp)1295 static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1296 			       struct qedr_create_qp_uresp *uresp,
1297 			       struct qedr_qp *qp)
1298 {
1299 	uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1300 
1301 	/* iWARP uses the same cid for rq and sq */
1302 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
1303 		uresp->sq_icid = qp->icid;
1304 	else
1305 		uresp->sq_icid = qp->icid + 1;
1306 
1307 	if (qp->usq.db_mmap_entry)
1308 		uresp->sq_db_rec_addr =
1309 			rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
1310 }
1311 
qedr_copy_qp_uresp(struct qedr_dev * dev,struct qedr_qp * qp,struct ib_udata * udata,struct qedr_create_qp_uresp * uresp)1312 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1313 			      struct qedr_qp *qp, struct ib_udata *udata,
1314 			      struct qedr_create_qp_uresp *uresp)
1315 {
1316 	int rc;
1317 
1318 	memset(uresp, 0, sizeof(*uresp));
1319 
1320 	if (qedr_qp_has_sq(qp))
1321 		qedr_copy_sq_uresp(dev, uresp, qp);
1322 
1323 	if (qedr_qp_has_rq(qp))
1324 		qedr_copy_rq_uresp(dev, uresp, qp);
1325 
1326 	uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1327 	uresp->qp_id = qp->qp_id;
1328 
1329 	rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
1330 	if (rc)
1331 		DP_ERR(dev,
1332 		       "create qp: failed a copy to user space with qp icid=0x%x.\n",
1333 		       qp->icid);
1334 
1335 	return rc;
1336 }
1337 
qedr_set_common_qp_params(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_pd * pd,struct ib_qp_init_attr * attrs)1338 static void qedr_set_common_qp_params(struct qedr_dev *dev,
1339 				      struct qedr_qp *qp,
1340 				      struct qedr_pd *pd,
1341 				      struct ib_qp_init_attr *attrs)
1342 {
1343 	spin_lock_init(&qp->q_lock);
1344 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1345 		kref_init(&qp->refcnt);
1346 		init_completion(&qp->iwarp_cm_comp);
1347 	}
1348 
1349 	qp->pd = pd;
1350 	qp->qp_type = attrs->qp_type;
1351 	qp->max_inline_data = attrs->cap.max_inline_data;
1352 	qp->state = QED_ROCE_QP_STATE_RESET;
1353 	qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1354 	qp->dev = dev;
1355 	if (qedr_qp_has_sq(qp)) {
1356 		qp->sq.max_sges = attrs->cap.max_send_sge;
1357 		qp->sq_cq = get_qedr_cq(attrs->send_cq);
1358 		DP_DEBUG(dev, QEDR_MSG_QP,
1359 			 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1360 			 qp->sq.max_sges, qp->sq_cq->icid);
1361 	}
1362 
1363 	if (attrs->srq)
1364 		qp->srq = get_qedr_srq(attrs->srq);
1365 
1366 	if (qedr_qp_has_rq(qp)) {
1367 		qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1368 		qp->rq.max_sges = attrs->cap.max_recv_sge;
1369 		DP_DEBUG(dev, QEDR_MSG_QP,
1370 			 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1371 			 qp->rq.max_sges, qp->rq_cq->icid);
1372 	}
1373 
1374 	DP_DEBUG(dev, QEDR_MSG_QP,
1375 		 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1376 		 pd->pd_id, qp->qp_type, qp->max_inline_data,
1377 		 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1378 	DP_DEBUG(dev, QEDR_MSG_QP,
1379 		 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1380 		 qp->sq.max_sges, qp->sq_cq->icid);
1381 }
1382 
qedr_set_roce_db_info(struct qedr_dev * dev,struct qedr_qp * qp)1383 static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1384 {
1385 	int rc = 0;
1386 
1387 	if (qedr_qp_has_sq(qp)) {
1388 		qp->sq.db = dev->db_addr +
1389 			    DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1390 		qp->sq.db_data.data.icid = qp->icid + 1;
1391 		rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data,
1392 					  DB_REC_WIDTH_32B, DB_REC_KERNEL);
1393 		if (rc)
1394 			return rc;
1395 	}
1396 
1397 	if (qedr_qp_has_rq(qp)) {
1398 		qp->rq.db = dev->db_addr +
1399 			    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1400 		qp->rq.db_data.data.icid = qp->icid;
1401 		rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data,
1402 					  DB_REC_WIDTH_32B, DB_REC_KERNEL);
1403 		if (rc && qedr_qp_has_sq(qp))
1404 			qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
1405 	}
1406 
1407 	return rc;
1408 }
1409 
qedr_check_srq_params(struct qedr_dev * dev,struct ib_srq_init_attr * attrs,struct ib_udata * udata)1410 static int qedr_check_srq_params(struct qedr_dev *dev,
1411 				 struct ib_srq_init_attr *attrs,
1412 				 struct ib_udata *udata)
1413 {
1414 	struct qedr_device_attr *qattr = &dev->attr;
1415 
1416 	if (attrs->attr.max_wr > qattr->max_srq_wr) {
1417 		DP_ERR(dev,
1418 		       "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
1419 		       attrs->attr.max_wr, qattr->max_srq_wr);
1420 		return -EINVAL;
1421 	}
1422 
1423 	if (attrs->attr.max_sge > qattr->max_sge) {
1424 		DP_ERR(dev,
1425 		       "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
1426 		       attrs->attr.max_sge, qattr->max_sge);
1427 	}
1428 
1429 	if (!udata && attrs->srq_type == IB_SRQT_XRC) {
1430 		DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n");
1431 		return -EINVAL;
1432 	}
1433 
1434 	return 0;
1435 }
1436 
qedr_free_srq_user_params(struct qedr_srq * srq)1437 static void qedr_free_srq_user_params(struct qedr_srq *srq)
1438 {
1439 	qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1440 	ib_umem_release(srq->usrq.umem);
1441 	ib_umem_release(srq->prod_umem);
1442 }
1443 
qedr_free_srq_kernel_params(struct qedr_srq * srq)1444 static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
1445 {
1446 	struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1447 	struct qedr_dev *dev = srq->dev;
1448 
1449 	dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
1450 
1451 	dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1452 			  hw_srq->virt_prod_pair_addr,
1453 			  hw_srq->phy_prod_pair_addr);
1454 }
1455 
qedr_init_srq_user_params(struct ib_udata * udata,struct qedr_srq * srq,struct qedr_create_srq_ureq * ureq,int access)1456 static int qedr_init_srq_user_params(struct ib_udata *udata,
1457 				     struct qedr_srq *srq,
1458 				     struct qedr_create_srq_ureq *ureq,
1459 				     int access)
1460 {
1461 	struct scatterlist *sg;
1462 	int rc;
1463 
1464 	rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
1465 				  ureq->srq_len, false, access, 1);
1466 	if (rc)
1467 		return rc;
1468 
1469 	srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
1470 				     sizeof(struct rdma_srq_producers), access);
1471 	if (IS_ERR(srq->prod_umem)) {
1472 		qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1473 		ib_umem_release(srq->usrq.umem);
1474 		DP_ERR(srq->dev,
1475 		       "create srq: failed ib_umem_get for producer, got %ld\n",
1476 		       PTR_ERR(srq->prod_umem));
1477 		return PTR_ERR(srq->prod_umem);
1478 	}
1479 
1480 	sg = srq->prod_umem->sg_head.sgl;
1481 	srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
1482 
1483 	return 0;
1484 }
1485 
qedr_alloc_srq_kernel_params(struct qedr_srq * srq,struct qedr_dev * dev,struct ib_srq_init_attr * init_attr)1486 static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
1487 					struct qedr_dev *dev,
1488 					struct ib_srq_init_attr *init_attr)
1489 {
1490 	struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1491 	struct qed_chain_init_params params = {
1492 		.mode		= QED_CHAIN_MODE_PBL,
1493 		.intended_use	= QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1494 		.cnt_type	= QED_CHAIN_CNT_TYPE_U32,
1495 		.elem_size	= QEDR_SRQ_WQE_ELEM_SIZE,
1496 	};
1497 	dma_addr_t phy_prod_pair_addr;
1498 	u32 num_elems;
1499 	void *va;
1500 	int rc;
1501 
1502 	va = dma_alloc_coherent(&dev->pdev->dev,
1503 				sizeof(struct rdma_srq_producers),
1504 				&phy_prod_pair_addr, GFP_KERNEL);
1505 	if (!va) {
1506 		DP_ERR(dev,
1507 		       "create srq: failed to allocate dma memory for producer\n");
1508 		return -ENOMEM;
1509 	}
1510 
1511 	hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
1512 	hw_srq->virt_prod_pair_addr = va;
1513 
1514 	num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
1515 	params.num_elems = num_elems;
1516 
1517 	rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, &params);
1518 	if (rc)
1519 		goto err0;
1520 
1521 	hw_srq->num_elems = num_elems;
1522 
1523 	return 0;
1524 
1525 err0:
1526 	dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1527 			  va, phy_prod_pair_addr);
1528 	return rc;
1529 }
1530 
qedr_create_srq(struct ib_srq * ibsrq,struct ib_srq_init_attr * init_attr,struct ib_udata * udata)1531 int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1532 		    struct ib_udata *udata)
1533 {
1534 	struct qed_rdma_destroy_srq_in_params destroy_in_params;
1535 	struct qed_rdma_create_srq_in_params in_params = {};
1536 	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1537 	struct qed_rdma_create_srq_out_params out_params;
1538 	struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
1539 	struct qedr_create_srq_ureq ureq = {};
1540 	u64 pbl_base_addr, phy_prod_pair_addr;
1541 	struct qedr_srq_hwq_info *hw_srq;
1542 	u32 page_cnt, page_size;
1543 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
1544 	int rc = 0;
1545 
1546 	DP_DEBUG(dev, QEDR_MSG_QP,
1547 		 "create SRQ called from %s (pd %p)\n",
1548 		 (udata) ? "User lib" : "kernel", pd);
1549 
1550 	rc = qedr_check_srq_params(dev, init_attr, udata);
1551 	if (rc)
1552 		return -EINVAL;
1553 
1554 	srq->dev = dev;
1555 	srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
1556 	hw_srq = &srq->hw_srq;
1557 	spin_lock_init(&srq->lock);
1558 
1559 	hw_srq->max_wr = init_attr->attr.max_wr;
1560 	hw_srq->max_sges = init_attr->attr.max_sge;
1561 
1562 	if (udata) {
1563 		if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1564 							 udata->inlen))) {
1565 			DP_ERR(dev,
1566 			       "create srq: problem copying data from user space\n");
1567 			goto err0;
1568 		}
1569 
1570 		rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
1571 		if (rc)
1572 			goto err0;
1573 
1574 		page_cnt = srq->usrq.pbl_info.num_pbes;
1575 		pbl_base_addr = srq->usrq.pbl_tbl->pa;
1576 		phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1577 		page_size = PAGE_SIZE;
1578 	} else {
1579 		struct qed_chain *pbl;
1580 
1581 		rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
1582 		if (rc)
1583 			goto err0;
1584 
1585 		pbl = &hw_srq->pbl;
1586 		page_cnt = qed_chain_get_page_cnt(pbl);
1587 		pbl_base_addr = qed_chain_get_pbl_phys(pbl);
1588 		phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1589 		page_size = QED_CHAIN_PAGE_SIZE;
1590 	}
1591 
1592 	in_params.pd_id = pd->pd_id;
1593 	in_params.pbl_base_addr = pbl_base_addr;
1594 	in_params.prod_pair_addr = phy_prod_pair_addr;
1595 	in_params.num_pages = page_cnt;
1596 	in_params.page_size = page_size;
1597 	if (srq->is_xrc) {
1598 		struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd);
1599 		struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq);
1600 
1601 		in_params.is_xrc = 1;
1602 		in_params.xrcd_id = xrcd->xrcd_id;
1603 		in_params.cq_cid = cq->icid;
1604 	}
1605 
1606 	rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
1607 	if (rc)
1608 		goto err1;
1609 
1610 	srq->srq_id = out_params.srq_id;
1611 
1612 	if (udata) {
1613 		rc = qedr_copy_srq_uresp(dev, srq, udata);
1614 		if (rc)
1615 			goto err2;
1616 	}
1617 
1618 	rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
1619 	if (rc)
1620 		goto err2;
1621 
1622 	DP_DEBUG(dev, QEDR_MSG_SRQ,
1623 		 "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
1624 	return 0;
1625 
1626 err2:
1627 	destroy_in_params.srq_id = srq->srq_id;
1628 
1629 	dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
1630 err1:
1631 	if (udata)
1632 		qedr_free_srq_user_params(srq);
1633 	else
1634 		qedr_free_srq_kernel_params(srq);
1635 err0:
1636 	return -EFAULT;
1637 }
1638 
qedr_destroy_srq(struct ib_srq * ibsrq,struct ib_udata * udata)1639 int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
1640 {
1641 	struct qed_rdma_destroy_srq_in_params in_params = {};
1642 	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1643 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
1644 
1645 	xa_erase_irq(&dev->srqs, srq->srq_id);
1646 	in_params.srq_id = srq->srq_id;
1647 	in_params.is_xrc = srq->is_xrc;
1648 	dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
1649 
1650 	if (ibsrq->uobject)
1651 		qedr_free_srq_user_params(srq);
1652 	else
1653 		qedr_free_srq_kernel_params(srq);
1654 
1655 	DP_DEBUG(dev, QEDR_MSG_SRQ,
1656 		 "destroy srq: destroyed srq with srq_id=0x%0x\n",
1657 		 srq->srq_id);
1658 	return 0;
1659 }
1660 
qedr_modify_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr,enum ib_srq_attr_mask attr_mask,struct ib_udata * udata)1661 int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1662 		    enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1663 {
1664 	struct qed_rdma_modify_srq_in_params in_params = {};
1665 	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1666 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
1667 	int rc;
1668 
1669 	if (attr_mask & IB_SRQ_MAX_WR) {
1670 		DP_ERR(dev,
1671 		       "modify srq: invalid attribute mask=0x%x specified for %p\n",
1672 		       attr_mask, srq);
1673 		return -EINVAL;
1674 	}
1675 
1676 	if (attr_mask & IB_SRQ_LIMIT) {
1677 		if (attr->srq_limit >= srq->hw_srq.max_wr) {
1678 			DP_ERR(dev,
1679 			       "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
1680 			       attr->srq_limit, srq->hw_srq.max_wr);
1681 			return -EINVAL;
1682 		}
1683 
1684 		in_params.srq_id = srq->srq_id;
1685 		in_params.wqe_limit = attr->srq_limit;
1686 		rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
1687 		if (rc)
1688 			return rc;
1689 	}
1690 
1691 	srq->srq_limit = attr->srq_limit;
1692 
1693 	DP_DEBUG(dev, QEDR_MSG_SRQ,
1694 		 "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
1695 
1696 	return 0;
1697 }
1698 
qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)1699 static enum qed_rdma_qp_type qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)
1700 {
1701 	switch (ib_qp_type) {
1702 	case IB_QPT_RC:
1703 		return QED_RDMA_QP_TYPE_RC;
1704 	case IB_QPT_XRC_INI:
1705 		return QED_RDMA_QP_TYPE_XRC_INI;
1706 	case IB_QPT_XRC_TGT:
1707 		return QED_RDMA_QP_TYPE_XRC_TGT;
1708 	default:
1709 		return QED_RDMA_QP_TYPE_INVAL;
1710 	}
1711 }
1712 
1713 static inline void
qedr_init_common_qp_in_params(struct qedr_dev * dev,struct qedr_pd * pd,struct qedr_qp * qp,struct ib_qp_init_attr * attrs,bool fmr_and_reserved_lkey,struct qed_rdma_create_qp_in_params * params)1714 qedr_init_common_qp_in_params(struct qedr_dev *dev,
1715 			      struct qedr_pd *pd,
1716 			      struct qedr_qp *qp,
1717 			      struct ib_qp_init_attr *attrs,
1718 			      bool fmr_and_reserved_lkey,
1719 			      struct qed_rdma_create_qp_in_params *params)
1720 {
1721 	/* QP handle to be written in an async event */
1722 	params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1723 	params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1724 
1725 	params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1726 	params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1727 	params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type);
1728 	params->stats_queue = 0;
1729 
1730 	if (pd) {
1731 		params->pd = pd->pd_id;
1732 		params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1733 	}
1734 
1735 	if (qedr_qp_has_sq(qp))
1736 		params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1737 
1738 	if (qedr_qp_has_rq(qp))
1739 		params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1740 
1741 	if (qedr_qp_has_srq(qp)) {
1742 		params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1743 		params->srq_id = qp->srq->srq_id;
1744 		params->use_srq = true;
1745 	} else {
1746 		params->srq_id = 0;
1747 		params->use_srq = false;
1748 	}
1749 }
1750 
qedr_qp_user_print(struct qedr_dev * dev,struct qedr_qp * qp)1751 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1752 {
1753 	DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1754 		 "qp=%p. "
1755 		 "sq_addr=0x%llx, "
1756 		 "sq_len=%zd, "
1757 		 "rq_addr=0x%llx, "
1758 		 "rq_len=%zd"
1759 		 "\n",
1760 		 qp,
1761 		 qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0,
1762 		 qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0,
1763 		 qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0,
1764 		 qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0);
1765 }
1766 
1767 static inline void
qedr_iwarp_populate_user_qp(struct qedr_dev * dev,struct qedr_qp * qp,struct qed_rdma_create_qp_out_params * out_params)1768 qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1769 			    struct qedr_qp *qp,
1770 			    struct qed_rdma_create_qp_out_params *out_params)
1771 {
1772 	qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1773 	qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1774 
1775 	qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1776 			   &qp->usq.pbl_info, FW_PAGE_SHIFT);
1777 	if (!qp->srq) {
1778 		qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1779 		qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1780 	}
1781 
1782 	qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1783 			   &qp->urq.pbl_info, FW_PAGE_SHIFT);
1784 }
1785 
qedr_cleanup_user(struct qedr_dev * dev,struct qedr_ucontext * ctx,struct qedr_qp * qp)1786 static void qedr_cleanup_user(struct qedr_dev *dev,
1787 			      struct qedr_ucontext *ctx,
1788 			      struct qedr_qp *qp)
1789 {
1790 	if (qedr_qp_has_sq(qp)) {
1791 		ib_umem_release(qp->usq.umem);
1792 		qp->usq.umem = NULL;
1793 	}
1794 
1795 	if (qedr_qp_has_rq(qp)) {
1796 		ib_umem_release(qp->urq.umem);
1797 		qp->urq.umem = NULL;
1798 	}
1799 
1800 	if (rdma_protocol_roce(&dev->ibdev, 1)) {
1801 		qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1802 		qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1803 	} else {
1804 		kfree(qp->usq.pbl_tbl);
1805 		kfree(qp->urq.pbl_tbl);
1806 	}
1807 
1808 	if (qp->usq.db_rec_data) {
1809 		qedr_db_recovery_del(dev, qp->usq.db_addr,
1810 				     &qp->usq.db_rec_data->db_data);
1811 		rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
1812 	}
1813 
1814 	if (qp->urq.db_rec_data) {
1815 		qedr_db_recovery_del(dev, qp->urq.db_addr,
1816 				     &qp->urq.db_rec_data->db_data);
1817 		rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
1818 	}
1819 
1820 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
1821 		qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
1822 				     &qp->urq.db_rec_db2_data);
1823 }
1824 
qedr_create_user_qp(struct qedr_dev * dev,struct qedr_qp * qp,struct ib_pd * ibpd,struct ib_udata * udata,struct ib_qp_init_attr * attrs)1825 static int qedr_create_user_qp(struct qedr_dev *dev,
1826 			       struct qedr_qp *qp,
1827 			       struct ib_pd *ibpd,
1828 			       struct ib_udata *udata,
1829 			       struct ib_qp_init_attr *attrs)
1830 {
1831 	struct qed_rdma_create_qp_in_params in_params;
1832 	struct qed_rdma_create_qp_out_params out_params;
1833 	struct qedr_create_qp_uresp uresp = {};
1834 	struct qedr_create_qp_ureq ureq = {};
1835 	int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
1836 	struct qedr_ucontext *ctx = NULL;
1837 	struct qedr_pd *pd = NULL;
1838 	int rc = 0;
1839 
1840 	qp->create_type = QEDR_QP_CREATE_USER;
1841 
1842 	if (ibpd) {
1843 		pd = get_qedr_pd(ibpd);
1844 		ctx = pd->uctx;
1845 	}
1846 
1847 	if (udata) {
1848 		rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1849 					udata->inlen));
1850 		if (rc) {
1851 			DP_ERR(dev, "Problem copying data from user space\n");
1852 			return rc;
1853 		}
1854 	}
1855 
1856 	if (qedr_qp_has_sq(qp)) {
1857 		/* SQ - read access only (0) */
1858 		rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
1859 					  ureq.sq_len, true, 0, alloc_and_init);
1860 		if (rc)
1861 			return rc;
1862 	}
1863 
1864 	if (qedr_qp_has_rq(qp)) {
1865 		/* RQ - read access only (0) */
1866 		rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
1867 					  ureq.rq_len, true, 0, alloc_and_init);
1868 		if (rc)
1869 			return rc;
1870 	}
1871 
1872 	memset(&in_params, 0, sizeof(in_params));
1873 	qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1874 	in_params.qp_handle_lo = ureq.qp_handle_lo;
1875 	in_params.qp_handle_hi = ureq.qp_handle_hi;
1876 
1877 	if (qp->qp_type == IB_QPT_XRC_TGT) {
1878 		struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd);
1879 
1880 		in_params.xrcd_id = xrcd->xrcd_id;
1881 		in_params.qp_handle_lo = qp->qp_id;
1882 		in_params.use_srq = 1;
1883 	}
1884 
1885 	if (qedr_qp_has_sq(qp)) {
1886 		in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1887 		in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1888 	}
1889 
1890 	if (qedr_qp_has_rq(qp)) {
1891 		in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1892 		in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1893 	}
1894 
1895 	if (ctx)
1896 		SET_FIELD(in_params.flags, QED_ROCE_EDPM_MODE, ctx->edpm_mode);
1897 
1898 	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1899 					      &in_params, &out_params);
1900 
1901 	if (!qp->qed_qp) {
1902 		rc = -ENOMEM;
1903 		goto err1;
1904 	}
1905 
1906 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
1907 		qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1908 
1909 	qp->qp_id = out_params.qp_id;
1910 	qp->icid = out_params.icid;
1911 
1912 	if (udata) {
1913 		rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
1914 		if (rc)
1915 			goto err;
1916 	}
1917 
1918 	/* db offset was calculated in copy_qp_uresp, now set in the user q */
1919 	if (qedr_qp_has_sq(qp)) {
1920 		qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
1921 		qp->sq.max_wr = attrs->cap.max_send_wr;
1922 		rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
1923 					  &qp->usq.db_rec_data->db_data,
1924 					  DB_REC_WIDTH_32B,
1925 					  DB_REC_USER);
1926 		if (rc)
1927 			goto err;
1928 	}
1929 
1930 	if (qedr_qp_has_rq(qp)) {
1931 		qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
1932 		qp->rq.max_wr = attrs->cap.max_recv_wr;
1933 		rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
1934 					  &qp->urq.db_rec_data->db_data,
1935 					  DB_REC_WIDTH_32B,
1936 					  DB_REC_USER);
1937 		if (rc)
1938 			goto err;
1939 	}
1940 
1941 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1942 		qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
1943 
1944 		/* calculate the db_rec_db2 data since it is constant so no
1945 		 * need to reflect from user
1946 		 */
1947 		qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
1948 		qp->urq.db_rec_db2_data.data.value =
1949 			cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
1950 
1951 		rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
1952 					  &qp->urq.db_rec_db2_data,
1953 					  DB_REC_WIDTH_32B,
1954 					  DB_REC_USER);
1955 		if (rc)
1956 			goto err;
1957 	}
1958 	qedr_qp_user_print(dev, qp);
1959 	return rc;
1960 err:
1961 	rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1962 	if (rc)
1963 		DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1964 
1965 err1:
1966 	qedr_cleanup_user(dev, ctx, qp);
1967 	return rc;
1968 }
1969 
qedr_set_iwarp_db_info(struct qedr_dev * dev,struct qedr_qp * qp)1970 static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1971 {
1972 	int rc;
1973 
1974 	qp->sq.db = dev->db_addr +
1975 	    DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1976 	qp->sq.db_data.data.icid = qp->icid;
1977 
1978 	rc = qedr_db_recovery_add(dev, qp->sq.db,
1979 				  &qp->sq.db_data,
1980 				  DB_REC_WIDTH_32B,
1981 				  DB_REC_KERNEL);
1982 	if (rc)
1983 		return rc;
1984 
1985 	qp->rq.db = dev->db_addr +
1986 		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1987 	qp->rq.db_data.data.icid = qp->icid;
1988 	qp->rq.iwarp_db2 = dev->db_addr +
1989 			   DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1990 	qp->rq.iwarp_db2_data.data.icid = qp->icid;
1991 	qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
1992 
1993 	rc = qedr_db_recovery_add(dev, qp->rq.db,
1994 				  &qp->rq.db_data,
1995 				  DB_REC_WIDTH_32B,
1996 				  DB_REC_KERNEL);
1997 	if (rc)
1998 		return rc;
1999 
2000 	rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
2001 				  &qp->rq.iwarp_db2_data,
2002 				  DB_REC_WIDTH_32B,
2003 				  DB_REC_KERNEL);
2004 	return rc;
2005 }
2006 
2007 static int
qedr_roce_create_kernel_qp(struct qedr_dev * dev,struct qedr_qp * qp,struct qed_rdma_create_qp_in_params * in_params,u32 n_sq_elems,u32 n_rq_elems)2008 qedr_roce_create_kernel_qp(struct qedr_dev *dev,
2009 			   struct qedr_qp *qp,
2010 			   struct qed_rdma_create_qp_in_params *in_params,
2011 			   u32 n_sq_elems, u32 n_rq_elems)
2012 {
2013 	struct qed_rdma_create_qp_out_params out_params;
2014 	struct qed_chain_init_params params = {
2015 		.mode		= QED_CHAIN_MODE_PBL,
2016 		.cnt_type	= QED_CHAIN_CNT_TYPE_U32,
2017 	};
2018 	int rc;
2019 
2020 	params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2021 	params.num_elems = n_sq_elems;
2022 	params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2023 
2024 	rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
2025 	if (rc)
2026 		return rc;
2027 
2028 	in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
2029 	in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
2030 
2031 	params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2032 	params.num_elems = n_rq_elems;
2033 	params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2034 
2035 	rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
2036 	if (rc)
2037 		return rc;
2038 
2039 	in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
2040 	in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
2041 
2042 	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2043 					      in_params, &out_params);
2044 
2045 	if (!qp->qed_qp)
2046 		return -EINVAL;
2047 
2048 	qp->qp_id = out_params.qp_id;
2049 	qp->icid = out_params.icid;
2050 
2051 	return qedr_set_roce_db_info(dev, qp);
2052 }
2053 
2054 static int
qedr_iwarp_create_kernel_qp(struct qedr_dev * dev,struct qedr_qp * qp,struct qed_rdma_create_qp_in_params * in_params,u32 n_sq_elems,u32 n_rq_elems)2055 qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
2056 			    struct qedr_qp *qp,
2057 			    struct qed_rdma_create_qp_in_params *in_params,
2058 			    u32 n_sq_elems, u32 n_rq_elems)
2059 {
2060 	struct qed_rdma_create_qp_out_params out_params;
2061 	struct qed_chain_init_params params = {
2062 		.mode		= QED_CHAIN_MODE_PBL,
2063 		.cnt_type	= QED_CHAIN_CNT_TYPE_U32,
2064 	};
2065 	int rc;
2066 
2067 	in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
2068 						     QEDR_SQE_ELEMENT_SIZE,
2069 						     QED_CHAIN_PAGE_SIZE,
2070 						     QED_CHAIN_MODE_PBL);
2071 	in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
2072 						     QEDR_RQE_ELEMENT_SIZE,
2073 						     QED_CHAIN_PAGE_SIZE,
2074 						     QED_CHAIN_MODE_PBL);
2075 
2076 	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2077 					      in_params, &out_params);
2078 
2079 	if (!qp->qed_qp)
2080 		return -EINVAL;
2081 
2082 	/* Now we allocate the chain */
2083 
2084 	params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2085 	params.num_elems = n_sq_elems;
2086 	params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2087 	params.ext_pbl_virt = out_params.sq_pbl_virt;
2088 	params.ext_pbl_phys = out_params.sq_pbl_phys;
2089 
2090 	rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
2091 	if (rc)
2092 		goto err;
2093 
2094 	params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2095 	params.num_elems = n_rq_elems;
2096 	params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2097 	params.ext_pbl_virt = out_params.rq_pbl_virt;
2098 	params.ext_pbl_phys = out_params.rq_pbl_phys;
2099 
2100 	rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
2101 	if (rc)
2102 		goto err;
2103 
2104 	qp->qp_id = out_params.qp_id;
2105 	qp->icid = out_params.icid;
2106 
2107 	return qedr_set_iwarp_db_info(dev, qp);
2108 
2109 err:
2110 	dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2111 
2112 	return rc;
2113 }
2114 
qedr_cleanup_kernel(struct qedr_dev * dev,struct qedr_qp * qp)2115 static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
2116 {
2117 	dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
2118 	kfree(qp->wqe_wr_id);
2119 
2120 	dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
2121 	kfree(qp->rqe_wr_id);
2122 
2123 	/* GSI qp is not registered to db mechanism so no need to delete */
2124 	if (qp->qp_type == IB_QPT_GSI)
2125 		return;
2126 
2127 	qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
2128 
2129 	if (!qp->srq) {
2130 		qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
2131 
2132 		if (rdma_protocol_iwarp(&dev->ibdev, 1))
2133 			qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
2134 					     &qp->rq.iwarp_db2_data);
2135 	}
2136 }
2137 
qedr_create_kernel_qp(struct qedr_dev * dev,struct qedr_qp * qp,struct ib_pd * ibpd,struct ib_qp_init_attr * attrs)2138 static int qedr_create_kernel_qp(struct qedr_dev *dev,
2139 				 struct qedr_qp *qp,
2140 				 struct ib_pd *ibpd,
2141 				 struct ib_qp_init_attr *attrs)
2142 {
2143 	struct qed_rdma_create_qp_in_params in_params;
2144 	struct qedr_pd *pd = get_qedr_pd(ibpd);
2145 	int rc = -EINVAL;
2146 	u32 n_rq_elems;
2147 	u32 n_sq_elems;
2148 	u32 n_sq_entries;
2149 
2150 	memset(&in_params, 0, sizeof(in_params));
2151 	qp->create_type = QEDR_QP_CREATE_KERNEL;
2152 
2153 	/* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
2154 	 * the ring. The ring should allow at least a single WR, even if the
2155 	 * user requested none, due to allocation issues.
2156 	 * We should add an extra WR since the prod and cons indices of
2157 	 * wqe_wr_id are managed in such a way that the WQ is considered full
2158 	 * when (prod+1)%max_wr==cons. We currently don't do that because we
2159 	 * double the number of entries due an iSER issue that pushes far more
2160 	 * WRs than indicated. If we decline its ib_post_send() then we get
2161 	 * error prints in the dmesg we'd like to avoid.
2162 	 */
2163 	qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
2164 			      dev->attr.max_sqe);
2165 
2166 	qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
2167 				GFP_KERNEL);
2168 	if (!qp->wqe_wr_id) {
2169 		DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
2170 		return -ENOMEM;
2171 	}
2172 
2173 	/* QP handle to be written in CQE */
2174 	in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
2175 	in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
2176 
2177 	/* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
2178 	 * the ring. There ring should allow at least a single WR, even if the
2179 	 * user requested none, due to allocation issues.
2180 	 */
2181 	qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
2182 
2183 	/* Allocate driver internal RQ array */
2184 	qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
2185 				GFP_KERNEL);
2186 	if (!qp->rqe_wr_id) {
2187 		DP_ERR(dev,
2188 		       "create qp: failed RQ shadow memory allocation\n");
2189 		kfree(qp->wqe_wr_id);
2190 		return -ENOMEM;
2191 	}
2192 
2193 	qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
2194 
2195 	n_sq_entries = attrs->cap.max_send_wr;
2196 	n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
2197 	n_sq_entries = max_t(u32, n_sq_entries, 1);
2198 	n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2199 
2200 	n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
2201 
2202 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
2203 		rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
2204 						 n_sq_elems, n_rq_elems);
2205 	else
2206 		rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
2207 						n_sq_elems, n_rq_elems);
2208 	if (rc)
2209 		qedr_cleanup_kernel(dev, qp);
2210 
2211 	return rc;
2212 }
2213 
qedr_free_qp_resources(struct qedr_dev * dev,struct qedr_qp * qp,struct ib_udata * udata)2214 static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
2215 				  struct ib_udata *udata)
2216 {
2217 	struct qedr_ucontext *ctx =
2218 		rdma_udata_to_drv_context(udata, struct qedr_ucontext,
2219 					  ibucontext);
2220 	int rc;
2221 
2222 	if (qp->qp_type != IB_QPT_GSI) {
2223 		rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2224 		if (rc)
2225 			return rc;
2226 	}
2227 
2228 	if (qp->create_type == QEDR_QP_CREATE_USER)
2229 		qedr_cleanup_user(dev, ctx, qp);
2230 	else
2231 		qedr_cleanup_kernel(dev, qp);
2232 
2233 	return 0;
2234 }
2235 
qedr_create_qp(struct ib_pd * ibpd,struct ib_qp_init_attr * attrs,struct ib_udata * udata)2236 struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
2237 			     struct ib_qp_init_attr *attrs,
2238 			     struct ib_udata *udata)
2239 {
2240 	struct qedr_xrcd *xrcd = NULL;
2241 	struct qedr_pd *pd = NULL;
2242 	struct qedr_dev *dev;
2243 	struct qedr_qp *qp;
2244 	struct ib_qp *ibqp;
2245 	int rc = 0;
2246 
2247 	if (attrs->qp_type == IB_QPT_XRC_TGT) {
2248 		xrcd = get_qedr_xrcd(attrs->xrcd);
2249 		dev = get_qedr_dev(xrcd->ibxrcd.device);
2250 	} else {
2251 		pd = get_qedr_pd(ibpd);
2252 		dev = get_qedr_dev(ibpd->device);
2253 	}
2254 
2255 	DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
2256 		 udata ? "user library" : "kernel", pd);
2257 
2258 	rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
2259 	if (rc)
2260 		return ERR_PTR(rc);
2261 
2262 	DP_DEBUG(dev, QEDR_MSG_QP,
2263 		 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
2264 		 udata ? "user library" : "kernel", attrs->event_handler, pd,
2265 		 get_qedr_cq(attrs->send_cq),
2266 		 get_qedr_cq(attrs->send_cq)->icid,
2267 		 get_qedr_cq(attrs->recv_cq),
2268 		 attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
2269 
2270 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2271 	if (!qp) {
2272 		DP_ERR(dev, "create qp: failed allocating memory\n");
2273 		return ERR_PTR(-ENOMEM);
2274 	}
2275 
2276 	qedr_set_common_qp_params(dev, qp, pd, attrs);
2277 
2278 	if (attrs->qp_type == IB_QPT_GSI) {
2279 		ibqp = qedr_create_gsi_qp(dev, attrs, qp);
2280 		if (IS_ERR(ibqp))
2281 			kfree(qp);
2282 		return ibqp;
2283 	}
2284 
2285 	if (udata || xrcd)
2286 		rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
2287 	else
2288 		rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
2289 
2290 	if (rc)
2291 		goto out_free_qp;
2292 
2293 	qp->ibqp.qp_num = qp->qp_id;
2294 
2295 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2296 		rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
2297 		if (rc)
2298 			goto out_free_qp_resources;
2299 	}
2300 
2301 	return &qp->ibqp;
2302 
2303 out_free_qp_resources:
2304 	qedr_free_qp_resources(dev, qp, udata);
2305 out_free_qp:
2306 	kfree(qp);
2307 
2308 	return ERR_PTR(-EFAULT);
2309 }
2310 
qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)2311 static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
2312 {
2313 	switch (qp_state) {
2314 	case QED_ROCE_QP_STATE_RESET:
2315 		return IB_QPS_RESET;
2316 	case QED_ROCE_QP_STATE_INIT:
2317 		return IB_QPS_INIT;
2318 	case QED_ROCE_QP_STATE_RTR:
2319 		return IB_QPS_RTR;
2320 	case QED_ROCE_QP_STATE_RTS:
2321 		return IB_QPS_RTS;
2322 	case QED_ROCE_QP_STATE_SQD:
2323 		return IB_QPS_SQD;
2324 	case QED_ROCE_QP_STATE_ERR:
2325 		return IB_QPS_ERR;
2326 	case QED_ROCE_QP_STATE_SQE:
2327 		return IB_QPS_SQE;
2328 	}
2329 	return IB_QPS_ERR;
2330 }
2331 
qedr_get_state_from_ibqp(enum ib_qp_state qp_state)2332 static enum qed_roce_qp_state qedr_get_state_from_ibqp(
2333 					enum ib_qp_state qp_state)
2334 {
2335 	switch (qp_state) {
2336 	case IB_QPS_RESET:
2337 		return QED_ROCE_QP_STATE_RESET;
2338 	case IB_QPS_INIT:
2339 		return QED_ROCE_QP_STATE_INIT;
2340 	case IB_QPS_RTR:
2341 		return QED_ROCE_QP_STATE_RTR;
2342 	case IB_QPS_RTS:
2343 		return QED_ROCE_QP_STATE_RTS;
2344 	case IB_QPS_SQD:
2345 		return QED_ROCE_QP_STATE_SQD;
2346 	case IB_QPS_ERR:
2347 		return QED_ROCE_QP_STATE_ERR;
2348 	default:
2349 		return QED_ROCE_QP_STATE_ERR;
2350 	}
2351 }
2352 
qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info * qph)2353 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
2354 {
2355 	qed_chain_reset(&qph->pbl);
2356 	qph->prod = 0;
2357 	qph->cons = 0;
2358 	qph->wqe_cons = 0;
2359 	qph->db_data.data.value = cpu_to_le16(0);
2360 }
2361 
qedr_update_qp_state(struct qedr_dev * dev,struct qedr_qp * qp,enum qed_roce_qp_state cur_state,enum qed_roce_qp_state new_state)2362 static int qedr_update_qp_state(struct qedr_dev *dev,
2363 				struct qedr_qp *qp,
2364 				enum qed_roce_qp_state cur_state,
2365 				enum qed_roce_qp_state new_state)
2366 {
2367 	int status = 0;
2368 
2369 	if (new_state == cur_state)
2370 		return 0;
2371 
2372 	switch (cur_state) {
2373 	case QED_ROCE_QP_STATE_RESET:
2374 		switch (new_state) {
2375 		case QED_ROCE_QP_STATE_INIT:
2376 			qp->prev_wqe_size = 0;
2377 			qedr_reset_qp_hwq_info(&qp->sq);
2378 			qedr_reset_qp_hwq_info(&qp->rq);
2379 			break;
2380 		default:
2381 			status = -EINVAL;
2382 			break;
2383 		}
2384 		break;
2385 	case QED_ROCE_QP_STATE_INIT:
2386 		switch (new_state) {
2387 		case QED_ROCE_QP_STATE_RTR:
2388 			/* Update doorbell (in case post_recv was
2389 			 * done before move to RTR)
2390 			 */
2391 
2392 			if (rdma_protocol_roce(&dev->ibdev, 1)) {
2393 				writel(qp->rq.db_data.raw, qp->rq.db);
2394 			}
2395 			break;
2396 		case QED_ROCE_QP_STATE_ERR:
2397 			break;
2398 		default:
2399 			/* Invalid state change. */
2400 			status = -EINVAL;
2401 			break;
2402 		}
2403 		break;
2404 	case QED_ROCE_QP_STATE_RTR:
2405 		/* RTR->XXX */
2406 		switch (new_state) {
2407 		case QED_ROCE_QP_STATE_RTS:
2408 			break;
2409 		case QED_ROCE_QP_STATE_ERR:
2410 			break;
2411 		default:
2412 			/* Invalid state change. */
2413 			status = -EINVAL;
2414 			break;
2415 		}
2416 		break;
2417 	case QED_ROCE_QP_STATE_RTS:
2418 		/* RTS->XXX */
2419 		switch (new_state) {
2420 		case QED_ROCE_QP_STATE_SQD:
2421 			break;
2422 		case QED_ROCE_QP_STATE_ERR:
2423 			break;
2424 		default:
2425 			/* Invalid state change. */
2426 			status = -EINVAL;
2427 			break;
2428 		}
2429 		break;
2430 	case QED_ROCE_QP_STATE_SQD:
2431 		/* SQD->XXX */
2432 		switch (new_state) {
2433 		case QED_ROCE_QP_STATE_RTS:
2434 		case QED_ROCE_QP_STATE_ERR:
2435 			break;
2436 		default:
2437 			/* Invalid state change. */
2438 			status = -EINVAL;
2439 			break;
2440 		}
2441 		break;
2442 	case QED_ROCE_QP_STATE_ERR:
2443 		/* ERR->XXX */
2444 		switch (new_state) {
2445 		case QED_ROCE_QP_STATE_RESET:
2446 			if ((qp->rq.prod != qp->rq.cons) ||
2447 			    (qp->sq.prod != qp->sq.cons)) {
2448 				DP_NOTICE(dev,
2449 					  "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
2450 					  qp->rq.prod, qp->rq.cons, qp->sq.prod,
2451 					  qp->sq.cons);
2452 				status = -EINVAL;
2453 			}
2454 			break;
2455 		default:
2456 			status = -EINVAL;
2457 			break;
2458 		}
2459 		break;
2460 	default:
2461 		status = -EINVAL;
2462 		break;
2463 	}
2464 
2465 	return status;
2466 }
2467 
qedr_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)2468 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2469 		   int attr_mask, struct ib_udata *udata)
2470 {
2471 	struct qedr_qp *qp = get_qedr_qp(ibqp);
2472 	struct qed_rdma_modify_qp_in_params qp_params = { 0 };
2473 	struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
2474 	const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2475 	enum ib_qp_state old_qp_state, new_qp_state;
2476 	enum qed_roce_qp_state cur_state;
2477 	int rc = 0;
2478 
2479 	DP_DEBUG(dev, QEDR_MSG_QP,
2480 		 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
2481 		 attr->qp_state);
2482 
2483 	old_qp_state = qedr_get_ibqp_state(qp->state);
2484 	if (attr_mask & IB_QP_STATE)
2485 		new_qp_state = attr->qp_state;
2486 	else
2487 		new_qp_state = old_qp_state;
2488 
2489 	if (rdma_protocol_roce(&dev->ibdev, 1)) {
2490 		if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
2491 					ibqp->qp_type, attr_mask)) {
2492 			DP_ERR(dev,
2493 			       "modify qp: invalid attribute mask=0x%x specified for\n"
2494 			       "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
2495 			       attr_mask, qp->qp_id, ibqp->qp_type,
2496 			       old_qp_state, new_qp_state);
2497 			rc = -EINVAL;
2498 			goto err;
2499 		}
2500 	}
2501 
2502 	/* Translate the masks... */
2503 	if (attr_mask & IB_QP_STATE) {
2504 		SET_FIELD(qp_params.modify_flags,
2505 			  QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
2506 		qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
2507 	}
2508 
2509 	if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
2510 		qp_params.sqd_async = true;
2511 
2512 	if (attr_mask & IB_QP_PKEY_INDEX) {
2513 		SET_FIELD(qp_params.modify_flags,
2514 			  QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
2515 		if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
2516 			rc = -EINVAL;
2517 			goto err;
2518 		}
2519 
2520 		qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2521 	}
2522 
2523 	if (attr_mask & IB_QP_QKEY)
2524 		qp->qkey = attr->qkey;
2525 
2526 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
2527 		SET_FIELD(qp_params.modify_flags,
2528 			  QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2529 		qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2530 						  IB_ACCESS_REMOTE_READ;
2531 		qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2532 						   IB_ACCESS_REMOTE_WRITE;
2533 		qp_params.incoming_atomic_en = attr->qp_access_flags &
2534 					       IB_ACCESS_REMOTE_ATOMIC;
2535 	}
2536 
2537 	if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
2538 		if (rdma_protocol_iwarp(&dev->ibdev, 1))
2539 			return -EINVAL;
2540 
2541 		if (attr_mask & IB_QP_PATH_MTU) {
2542 			if (attr->path_mtu < IB_MTU_256 ||
2543 			    attr->path_mtu > IB_MTU_4096) {
2544 				pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2545 				rc = -EINVAL;
2546 				goto err;
2547 			}
2548 			qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2549 				      ib_mtu_enum_to_int(iboe_get_mtu
2550 							 (dev->ndev->mtu)));
2551 		}
2552 
2553 		if (!qp->mtu) {
2554 			qp->mtu =
2555 			ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2556 			pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2557 		}
2558 
2559 		SET_FIELD(qp_params.modify_flags,
2560 			  QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2561 
2562 		qp_params.traffic_class_tos = grh->traffic_class;
2563 		qp_params.flow_label = grh->flow_label;
2564 		qp_params.hop_limit_ttl = grh->hop_limit;
2565 
2566 		qp->sgid_idx = grh->sgid_index;
2567 
2568 		rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2569 		if (rc) {
2570 			DP_ERR(dev,
2571 			       "modify qp: problems with GID index %d (rc=%d)\n",
2572 			       grh->sgid_index, rc);
2573 			return rc;
2574 		}
2575 
2576 		rc = qedr_get_dmac(dev, &attr->ah_attr,
2577 				   qp_params.remote_mac_addr);
2578 		if (rc)
2579 			return rc;
2580 
2581 		qp_params.use_local_mac = true;
2582 		ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2583 
2584 		DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2585 			 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2586 			 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2587 		DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2588 			 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2589 			 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2590 		DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2591 			 qp_params.remote_mac_addr);
2592 
2593 		qp_params.mtu = qp->mtu;
2594 		qp_params.lb_indication = false;
2595 	}
2596 
2597 	if (!qp_params.mtu) {
2598 		/* Stay with current MTU */
2599 		if (qp->mtu)
2600 			qp_params.mtu = qp->mtu;
2601 		else
2602 			qp_params.mtu =
2603 			    ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2604 	}
2605 
2606 	if (attr_mask & IB_QP_TIMEOUT) {
2607 		SET_FIELD(qp_params.modify_flags,
2608 			  QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2609 
2610 		/* The received timeout value is an exponent used like this:
2611 		 *    "12.7.34 LOCAL ACK TIMEOUT
2612 		 *    Value representing the transport (ACK) timeout for use by
2613 		 *    the remote, expressed as: 4.096 * 2^timeout [usec]"
2614 		 * The FW expects timeout in msec so we need to divide the usec
2615 		 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2616 		 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2617 		 * The value of zero means infinite so we use a 'max_t' to make
2618 		 * sure that sub 1 msec values will be configured as 1 msec.
2619 		 */
2620 		if (attr->timeout)
2621 			qp_params.ack_timeout =
2622 					1 << max_t(int, attr->timeout - 8, 0);
2623 		else
2624 			qp_params.ack_timeout = 0;
2625 
2626 		qp->timeout = attr->timeout;
2627 	}
2628 
2629 	if (attr_mask & IB_QP_RETRY_CNT) {
2630 		SET_FIELD(qp_params.modify_flags,
2631 			  QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2632 		qp_params.retry_cnt = attr->retry_cnt;
2633 	}
2634 
2635 	if (attr_mask & IB_QP_RNR_RETRY) {
2636 		SET_FIELD(qp_params.modify_flags,
2637 			  QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2638 		qp_params.rnr_retry_cnt = attr->rnr_retry;
2639 	}
2640 
2641 	if (attr_mask & IB_QP_RQ_PSN) {
2642 		SET_FIELD(qp_params.modify_flags,
2643 			  QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2644 		qp_params.rq_psn = attr->rq_psn;
2645 		qp->rq_psn = attr->rq_psn;
2646 	}
2647 
2648 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2649 		if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2650 			rc = -EINVAL;
2651 			DP_ERR(dev,
2652 			       "unsupported max_rd_atomic=%d, supported=%d\n",
2653 			       attr->max_rd_atomic,
2654 			       dev->attr.max_qp_req_rd_atomic_resc);
2655 			goto err;
2656 		}
2657 
2658 		SET_FIELD(qp_params.modify_flags,
2659 			  QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2660 		qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2661 	}
2662 
2663 	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2664 		SET_FIELD(qp_params.modify_flags,
2665 			  QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2666 		qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2667 	}
2668 
2669 	if (attr_mask & IB_QP_SQ_PSN) {
2670 		SET_FIELD(qp_params.modify_flags,
2671 			  QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2672 		qp_params.sq_psn = attr->sq_psn;
2673 		qp->sq_psn = attr->sq_psn;
2674 	}
2675 
2676 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2677 		if (attr->max_dest_rd_atomic >
2678 		    dev->attr.max_qp_resp_rd_atomic_resc) {
2679 			DP_ERR(dev,
2680 			       "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2681 			       attr->max_dest_rd_atomic,
2682 			       dev->attr.max_qp_resp_rd_atomic_resc);
2683 
2684 			rc = -EINVAL;
2685 			goto err;
2686 		}
2687 
2688 		SET_FIELD(qp_params.modify_flags,
2689 			  QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2690 		qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2691 	}
2692 
2693 	if (attr_mask & IB_QP_DEST_QPN) {
2694 		SET_FIELD(qp_params.modify_flags,
2695 			  QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2696 
2697 		qp_params.dest_qp = attr->dest_qp_num;
2698 		qp->dest_qp_num = attr->dest_qp_num;
2699 	}
2700 
2701 	cur_state = qp->state;
2702 
2703 	/* Update the QP state before the actual ramrod to prevent a race with
2704 	 * fast path. Modifying the QP state to error will cause the device to
2705 	 * flush the CQEs and while polling the flushed CQEs will considered as
2706 	 * a potential issue if the QP isn't in error state.
2707 	 */
2708 	if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2709 	    !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
2710 		qp->state = QED_ROCE_QP_STATE_ERR;
2711 
2712 	if (qp->qp_type != IB_QPT_GSI)
2713 		rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2714 					      qp->qed_qp, &qp_params);
2715 
2716 	if (attr_mask & IB_QP_STATE) {
2717 		if ((qp->qp_type != IB_QPT_GSI) && (!udata))
2718 			rc = qedr_update_qp_state(dev, qp, cur_state,
2719 						  qp_params.new_state);
2720 		qp->state = qp_params.new_state;
2721 	}
2722 
2723 err:
2724 	return rc;
2725 }
2726 
qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params * params)2727 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2728 {
2729 	int ib_qp_acc_flags = 0;
2730 
2731 	if (params->incoming_rdma_write_en)
2732 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2733 	if (params->incoming_rdma_read_en)
2734 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2735 	if (params->incoming_atomic_en)
2736 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2737 	ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2738 	return ib_qp_acc_flags;
2739 }
2740 
qedr_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int attr_mask,struct ib_qp_init_attr * qp_init_attr)2741 int qedr_query_qp(struct ib_qp *ibqp,
2742 		  struct ib_qp_attr *qp_attr,
2743 		  int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2744 {
2745 	struct qed_rdma_query_qp_out_params params;
2746 	struct qedr_qp *qp = get_qedr_qp(ibqp);
2747 	struct qedr_dev *dev = qp->dev;
2748 	int rc = 0;
2749 
2750 	memset(&params, 0, sizeof(params));
2751 	memset(qp_attr, 0, sizeof(*qp_attr));
2752 	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2753 
2754 	if (qp->qp_type != IB_QPT_GSI) {
2755 		rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2756 		if (rc)
2757 			goto err;
2758 		qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2759 	} else {
2760 		qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS);
2761 	}
2762 
2763 	qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2764 	qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2765 	qp_attr->path_mig_state = IB_MIG_MIGRATED;
2766 	qp_attr->rq_psn = params.rq_psn;
2767 	qp_attr->sq_psn = params.sq_psn;
2768 	qp_attr->dest_qp_num = params.dest_qp;
2769 
2770 	qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2771 
2772 	qp_attr->cap.max_send_wr = qp->sq.max_wr;
2773 	qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2774 	qp_attr->cap.max_send_sge = qp->sq.max_sges;
2775 	qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2776 	qp_attr->cap.max_inline_data = dev->attr.max_inline;
2777 	qp_init_attr->cap = qp_attr->cap;
2778 
2779 	qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2780 	rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2781 			params.flow_label, qp->sgid_idx,
2782 			params.hop_limit_ttl, params.traffic_class_tos);
2783 	rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2784 	rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2785 	rdma_ah_set_sl(&qp_attr->ah_attr, 0);
2786 	qp_attr->timeout = qp->timeout;
2787 	qp_attr->rnr_retry = params.rnr_retry;
2788 	qp_attr->retry_cnt = params.retry_cnt;
2789 	qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2790 	qp_attr->pkey_index = params.pkey_index;
2791 	qp_attr->port_num = 1;
2792 	rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2793 	rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
2794 	qp_attr->alt_pkey_index = 0;
2795 	qp_attr->alt_port_num = 0;
2796 	qp_attr->alt_timeout = 0;
2797 	memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2798 
2799 	qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2800 	qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2801 	qp_attr->max_rd_atomic = params.max_rd_atomic;
2802 	qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2803 
2804 	DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2805 		 qp_attr->cap.max_inline_data);
2806 
2807 err:
2808 	return rc;
2809 }
2810 
qedr_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)2811 int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
2812 {
2813 	struct qedr_qp *qp = get_qedr_qp(ibqp);
2814 	struct qedr_dev *dev = qp->dev;
2815 	struct ib_qp_attr attr;
2816 	int attr_mask = 0;
2817 
2818 	DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2819 		 qp, qp->qp_type);
2820 
2821 	if (rdma_protocol_roce(&dev->ibdev, 1)) {
2822 		if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2823 		    (qp->state != QED_ROCE_QP_STATE_ERR) &&
2824 		    (qp->state != QED_ROCE_QP_STATE_INIT)) {
2825 
2826 			attr.qp_state = IB_QPS_ERR;
2827 			attr_mask |= IB_QP_STATE;
2828 
2829 			/* Change the QP state to ERROR */
2830 			qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2831 		}
2832 	} else {
2833 		/* If connection establishment started the WAIT_FOR_CONNECT
2834 		 * bit will be on and we need to Wait for the establishment
2835 		 * to complete before destroying the qp.
2836 		 */
2837 		if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
2838 				     &qp->iwarp_cm_flags))
2839 			wait_for_completion(&qp->iwarp_cm_comp);
2840 
2841 		/* If graceful disconnect started, the WAIT_FOR_DISCONNECT
2842 		 * bit will be on, and we need to wait for the disconnect to
2843 		 * complete before continuing. We can use the same completion,
2844 		 * iwarp_cm_comp, since this is the only place that waits for
2845 		 * this completion and it is sequential. In addition,
2846 		 * disconnect can't occur before the connection is fully
2847 		 * established, therefore if WAIT_FOR_DISCONNECT is on it
2848 		 * means WAIT_FOR_CONNECT is also on and the completion for
2849 		 * CONNECT already occurred.
2850 		 */
2851 		if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
2852 				     &qp->iwarp_cm_flags))
2853 			wait_for_completion(&qp->iwarp_cm_comp);
2854 	}
2855 
2856 	if (qp->qp_type == IB_QPT_GSI)
2857 		qedr_destroy_gsi_qp(dev);
2858 
2859 	/* We need to remove the entry from the xarray before we release the
2860 	 * qp_id to avoid a race of the qp_id being reallocated and failing
2861 	 * on xa_insert
2862 	 */
2863 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
2864 		xa_erase(&dev->qps, qp->qp_id);
2865 
2866 	qedr_free_qp_resources(dev, qp, udata);
2867 
2868 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
2869 		qedr_iw_qp_rem_ref(&qp->ibqp);
2870 	else
2871 		kfree(qp);
2872 
2873 	return 0;
2874 }
2875 
qedr_create_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)2876 int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
2877 		   struct ib_udata *udata)
2878 {
2879 	struct qedr_ah *ah = get_qedr_ah(ibah);
2880 
2881 	rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
2882 
2883 	return 0;
2884 }
2885 
qedr_destroy_ah(struct ib_ah * ibah,u32 flags)2886 int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
2887 {
2888 	struct qedr_ah *ah = get_qedr_ah(ibah);
2889 
2890 	rdma_destroy_ah_attr(&ah->attr);
2891 	return 0;
2892 }
2893 
free_mr_info(struct qedr_dev * dev,struct mr_info * info)2894 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2895 {
2896 	struct qedr_pbl *pbl, *tmp;
2897 
2898 	if (info->pbl_table)
2899 		list_add_tail(&info->pbl_table->list_entry,
2900 			      &info->free_pbl_list);
2901 
2902 	if (!list_empty(&info->inuse_pbl_list))
2903 		list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2904 
2905 	list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2906 		list_del(&pbl->list_entry);
2907 		qedr_free_pbl(dev, &info->pbl_info, pbl);
2908 	}
2909 }
2910 
init_mr_info(struct qedr_dev * dev,struct mr_info * info,size_t page_list_len,bool two_layered)2911 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2912 			size_t page_list_len, bool two_layered)
2913 {
2914 	struct qedr_pbl *tmp;
2915 	int rc;
2916 
2917 	INIT_LIST_HEAD(&info->free_pbl_list);
2918 	INIT_LIST_HEAD(&info->inuse_pbl_list);
2919 
2920 	rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2921 				  page_list_len, two_layered);
2922 	if (rc)
2923 		goto done;
2924 
2925 	info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2926 	if (IS_ERR(info->pbl_table)) {
2927 		rc = PTR_ERR(info->pbl_table);
2928 		goto done;
2929 	}
2930 
2931 	DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2932 		 &info->pbl_table->pa);
2933 
2934 	/* in usual case we use 2 PBLs, so we add one to free
2935 	 * list and allocating another one
2936 	 */
2937 	tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2938 	if (IS_ERR(tmp)) {
2939 		DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2940 		goto done;
2941 	}
2942 
2943 	list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2944 
2945 	DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2946 
2947 done:
2948 	if (rc)
2949 		free_mr_info(dev, info);
2950 
2951 	return rc;
2952 }
2953 
qedr_reg_user_mr(struct ib_pd * ibpd,u64 start,u64 len,u64 usr_addr,int acc,struct ib_udata * udata)2954 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2955 			       u64 usr_addr, int acc, struct ib_udata *udata)
2956 {
2957 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2958 	struct qedr_mr *mr;
2959 	struct qedr_pd *pd;
2960 	int rc = -ENOMEM;
2961 
2962 	pd = get_qedr_pd(ibpd);
2963 	DP_DEBUG(dev, QEDR_MSG_MR,
2964 		 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2965 		 pd->pd_id, start, len, usr_addr, acc);
2966 
2967 	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2968 		return ERR_PTR(-EINVAL);
2969 
2970 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2971 	if (!mr)
2972 		return ERR_PTR(rc);
2973 
2974 	mr->type = QEDR_MR_USER;
2975 
2976 	mr->umem = ib_umem_get(ibpd->device, start, len, acc);
2977 	if (IS_ERR(mr->umem)) {
2978 		rc = -EFAULT;
2979 		goto err0;
2980 	}
2981 
2982 	rc = init_mr_info(dev, &mr->info,
2983 			  ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1);
2984 	if (rc)
2985 		goto err1;
2986 
2987 	qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2988 			   &mr->info.pbl_info, PAGE_SHIFT);
2989 
2990 	rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2991 	if (rc) {
2992 		if (rc == -EINVAL)
2993 			DP_ERR(dev, "Out of MR resources\n");
2994 		else
2995 			DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
2996 
2997 		goto err1;
2998 	}
2999 
3000 	/* Index only, 18 bit long, lkey = itid << 8 | key */
3001 	mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3002 	mr->hw_mr.key = 0;
3003 	mr->hw_mr.pd = pd->pd_id;
3004 	mr->hw_mr.local_read = 1;
3005 	mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3006 	mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3007 	mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3008 	mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3009 	mr->hw_mr.mw_bind = false;
3010 	mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
3011 	mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3012 	mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3013 	mr->hw_mr.page_size_log = PAGE_SHIFT;
3014 	mr->hw_mr.length = len;
3015 	mr->hw_mr.vaddr = usr_addr;
3016 	mr->hw_mr.phy_mr = false;
3017 	mr->hw_mr.dma_mr = false;
3018 
3019 	rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3020 	if (rc) {
3021 		DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3022 		goto err2;
3023 	}
3024 
3025 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3026 	if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3027 	    mr->hw_mr.remote_atomic)
3028 		mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3029 
3030 	DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
3031 		 mr->ibmr.lkey);
3032 	return &mr->ibmr;
3033 
3034 err2:
3035 	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3036 err1:
3037 	qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3038 err0:
3039 	kfree(mr);
3040 	return ERR_PTR(rc);
3041 }
3042 
qedr_dereg_mr(struct ib_mr * ib_mr,struct ib_udata * udata)3043 int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3044 {
3045 	struct qedr_mr *mr = get_qedr_mr(ib_mr);
3046 	struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
3047 	int rc = 0;
3048 
3049 	rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
3050 	if (rc)
3051 		return rc;
3052 
3053 	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3054 
3055 	if (mr->type != QEDR_MR_DMA)
3056 		free_mr_info(dev, &mr->info);
3057 
3058 	/* it could be user registered memory. */
3059 	ib_umem_release(mr->umem);
3060 
3061 	kfree(mr);
3062 
3063 	return rc;
3064 }
3065 
__qedr_alloc_mr(struct ib_pd * ibpd,int max_page_list_len)3066 static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
3067 				       int max_page_list_len)
3068 {
3069 	struct qedr_pd *pd = get_qedr_pd(ibpd);
3070 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3071 	struct qedr_mr *mr;
3072 	int rc = -ENOMEM;
3073 
3074 	DP_DEBUG(dev, QEDR_MSG_MR,
3075 		 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
3076 		 max_page_list_len);
3077 
3078 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3079 	if (!mr)
3080 		return ERR_PTR(rc);
3081 
3082 	mr->dev = dev;
3083 	mr->type = QEDR_MR_FRMR;
3084 
3085 	rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
3086 	if (rc)
3087 		goto err0;
3088 
3089 	rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3090 	if (rc) {
3091 		if (rc == -EINVAL)
3092 			DP_ERR(dev, "Out of MR resources\n");
3093 		else
3094 			DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3095 
3096 		goto err1;
3097 	}
3098 
3099 	/* Index only, 18 bit long, lkey = itid << 8 | key */
3100 	mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
3101 	mr->hw_mr.key = 0;
3102 	mr->hw_mr.pd = pd->pd_id;
3103 	mr->hw_mr.local_read = 1;
3104 	mr->hw_mr.local_write = 0;
3105 	mr->hw_mr.remote_read = 0;
3106 	mr->hw_mr.remote_write = 0;
3107 	mr->hw_mr.remote_atomic = 0;
3108 	mr->hw_mr.mw_bind = false;
3109 	mr->hw_mr.pbl_ptr = 0;
3110 	mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3111 	mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3112 	mr->hw_mr.length = 0;
3113 	mr->hw_mr.vaddr = 0;
3114 	mr->hw_mr.phy_mr = true;
3115 	mr->hw_mr.dma_mr = false;
3116 
3117 	rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3118 	if (rc) {
3119 		DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3120 		goto err2;
3121 	}
3122 
3123 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3124 	mr->ibmr.rkey = mr->ibmr.lkey;
3125 
3126 	DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
3127 	return mr;
3128 
3129 err2:
3130 	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3131 err1:
3132 	qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3133 err0:
3134 	kfree(mr);
3135 	return ERR_PTR(rc);
3136 }
3137 
qedr_alloc_mr(struct ib_pd * ibpd,enum ib_mr_type mr_type,u32 max_num_sg)3138 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
3139 			    u32 max_num_sg)
3140 {
3141 	struct qedr_mr *mr;
3142 
3143 	if (mr_type != IB_MR_TYPE_MEM_REG)
3144 		return ERR_PTR(-EINVAL);
3145 
3146 	mr = __qedr_alloc_mr(ibpd, max_num_sg);
3147 
3148 	if (IS_ERR(mr))
3149 		return ERR_PTR(-EINVAL);
3150 
3151 	return &mr->ibmr;
3152 }
3153 
qedr_set_page(struct ib_mr * ibmr,u64 addr)3154 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
3155 {
3156 	struct qedr_mr *mr = get_qedr_mr(ibmr);
3157 	struct qedr_pbl *pbl_table;
3158 	struct regpair *pbe;
3159 	u32 pbes_in_page;
3160 
3161 	if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
3162 		DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
3163 		return -ENOMEM;
3164 	}
3165 
3166 	DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
3167 		 mr->npages, addr);
3168 
3169 	pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
3170 	pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
3171 	pbe = (struct regpair *)pbl_table->va;
3172 	pbe +=  mr->npages % pbes_in_page;
3173 	pbe->lo = cpu_to_le32((u32)addr);
3174 	pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
3175 
3176 	mr->npages++;
3177 
3178 	return 0;
3179 }
3180 
handle_completed_mrs(struct qedr_dev * dev,struct mr_info * info)3181 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
3182 {
3183 	int work = info->completed - info->completed_handled - 1;
3184 
3185 	DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
3186 	while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
3187 		struct qedr_pbl *pbl;
3188 
3189 		/* Free all the page list that are possible to be freed
3190 		 * (all the ones that were invalidated), under the assumption
3191 		 * that if an FMR was completed successfully that means that
3192 		 * if there was an invalidate operation before it also ended
3193 		 */
3194 		pbl = list_first_entry(&info->inuse_pbl_list,
3195 				       struct qedr_pbl, list_entry);
3196 		list_move_tail(&pbl->list_entry, &info->free_pbl_list);
3197 		info->completed_handled++;
3198 	}
3199 }
3200 
qedr_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)3201 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
3202 		   int sg_nents, unsigned int *sg_offset)
3203 {
3204 	struct qedr_mr *mr = get_qedr_mr(ibmr);
3205 
3206 	mr->npages = 0;
3207 
3208 	handle_completed_mrs(mr->dev, &mr->info);
3209 	return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
3210 }
3211 
qedr_get_dma_mr(struct ib_pd * ibpd,int acc)3212 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
3213 {
3214 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3215 	struct qedr_pd *pd = get_qedr_pd(ibpd);
3216 	struct qedr_mr *mr;
3217 	int rc;
3218 
3219 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3220 	if (!mr)
3221 		return ERR_PTR(-ENOMEM);
3222 
3223 	mr->type = QEDR_MR_DMA;
3224 
3225 	rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3226 	if (rc) {
3227 		if (rc == -EINVAL)
3228 			DP_ERR(dev, "Out of MR resources\n");
3229 		else
3230 			DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3231 
3232 		goto err1;
3233 	}
3234 
3235 	/* index only, 18 bit long, lkey = itid << 8 | key */
3236 	mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3237 	mr->hw_mr.pd = pd->pd_id;
3238 	mr->hw_mr.local_read = 1;
3239 	mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3240 	mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3241 	mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3242 	mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3243 	mr->hw_mr.dma_mr = true;
3244 
3245 	rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3246 	if (rc) {
3247 		DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3248 		goto err2;
3249 	}
3250 
3251 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3252 	if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3253 	    mr->hw_mr.remote_atomic)
3254 		mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3255 
3256 	DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
3257 	return &mr->ibmr;
3258 
3259 err2:
3260 	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3261 err1:
3262 	kfree(mr);
3263 	return ERR_PTR(rc);
3264 }
3265 
qedr_wq_is_full(struct qedr_qp_hwq_info * wq)3266 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
3267 {
3268 	return (((wq->prod + 1) % wq->max_wr) == wq->cons);
3269 }
3270 
sge_data_len(struct ib_sge * sg_list,int num_sge)3271 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
3272 {
3273 	int i, len = 0;
3274 
3275 	for (i = 0; i < num_sge; i++)
3276 		len += sg_list[i].length;
3277 
3278 	return len;
3279 }
3280 
swap_wqe_data64(u64 * p)3281 static void swap_wqe_data64(u64 *p)
3282 {
3283 	int i;
3284 
3285 	for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
3286 		*p = cpu_to_be64(cpu_to_le64(*p));
3287 }
3288 
qedr_prepare_sq_inline_data(struct qedr_dev * dev,struct qedr_qp * qp,u8 * wqe_size,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr,u8 * bits,u8 bit)3289 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
3290 				       struct qedr_qp *qp, u8 *wqe_size,
3291 				       const struct ib_send_wr *wr,
3292 				       const struct ib_send_wr **bad_wr,
3293 				       u8 *bits, u8 bit)
3294 {
3295 	u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
3296 	char *seg_prt, *wqe;
3297 	int i, seg_siz;
3298 
3299 	if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
3300 		DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
3301 		*bad_wr = wr;
3302 		return 0;
3303 	}
3304 
3305 	if (!data_size)
3306 		return data_size;
3307 
3308 	*bits |= bit;
3309 
3310 	seg_prt = NULL;
3311 	wqe = NULL;
3312 	seg_siz = 0;
3313 
3314 	/* Copy data inline */
3315 	for (i = 0; i < wr->num_sge; i++) {
3316 		u32 len = wr->sg_list[i].length;
3317 		void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
3318 
3319 		while (len > 0) {
3320 			u32 cur;
3321 
3322 			/* New segment required */
3323 			if (!seg_siz) {
3324 				wqe = (char *)qed_chain_produce(&qp->sq.pbl);
3325 				seg_prt = wqe;
3326 				seg_siz = sizeof(struct rdma_sq_common_wqe);
3327 				(*wqe_size)++;
3328 			}
3329 
3330 			/* Calculate currently allowed length */
3331 			cur = min_t(u32, len, seg_siz);
3332 			memcpy(seg_prt, src, cur);
3333 
3334 			/* Update segment variables */
3335 			seg_prt += cur;
3336 			seg_siz -= cur;
3337 
3338 			/* Update sge variables */
3339 			src += cur;
3340 			len -= cur;
3341 
3342 			/* Swap fully-completed segments */
3343 			if (!seg_siz)
3344 				swap_wqe_data64((u64 *)wqe);
3345 		}
3346 	}
3347 
3348 	/* swap last not completed segment */
3349 	if (seg_siz)
3350 		swap_wqe_data64((u64 *)wqe);
3351 
3352 	return data_size;
3353 }
3354 
3355 #define RQ_SGE_SET(sge, vaddr, vlength, vflags)			\
3356 	do {							\
3357 		DMA_REGPAIR_LE(sge->addr, vaddr);		\
3358 		(sge)->length = cpu_to_le32(vlength);		\
3359 		(sge)->flags = cpu_to_le32(vflags);		\
3360 	} while (0)
3361 
3362 #define SRQ_HDR_SET(hdr, vwr_id, num_sge)			\
3363 	do {							\
3364 		DMA_REGPAIR_LE(hdr->wr_id, vwr_id);		\
3365 		(hdr)->num_sges = num_sge;			\
3366 	} while (0)
3367 
3368 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey)			\
3369 	do {							\
3370 		DMA_REGPAIR_LE(sge->addr, vaddr);		\
3371 		(sge)->length = cpu_to_le32(vlength);		\
3372 		(sge)->l_key = cpu_to_le32(vlkey);		\
3373 	} while (0)
3374 
qedr_prepare_sq_sges(struct qedr_qp * qp,u8 * wqe_size,const struct ib_send_wr * wr)3375 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
3376 				const struct ib_send_wr *wr)
3377 {
3378 	u32 data_size = 0;
3379 	int i;
3380 
3381 	for (i = 0; i < wr->num_sge; i++) {
3382 		struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
3383 
3384 		DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
3385 		sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
3386 		sge->length = cpu_to_le32(wr->sg_list[i].length);
3387 		data_size += wr->sg_list[i].length;
3388 	}
3389 
3390 	if (wqe_size)
3391 		*wqe_size += wr->num_sge;
3392 
3393 	return data_size;
3394 }
3395 
qedr_prepare_sq_rdma_data(struct qedr_dev * dev,struct qedr_qp * qp,struct rdma_sq_rdma_wqe_1st * rwqe,struct rdma_sq_rdma_wqe_2nd * rwqe2,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)3396 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
3397 				     struct qedr_qp *qp,
3398 				     struct rdma_sq_rdma_wqe_1st *rwqe,
3399 				     struct rdma_sq_rdma_wqe_2nd *rwqe2,
3400 				     const struct ib_send_wr *wr,
3401 				     const struct ib_send_wr **bad_wr)
3402 {
3403 	rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
3404 	DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
3405 
3406 	if (wr->send_flags & IB_SEND_INLINE &&
3407 	    (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
3408 	     wr->opcode == IB_WR_RDMA_WRITE)) {
3409 		u8 flags = 0;
3410 
3411 		SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
3412 		return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
3413 						   bad_wr, &rwqe->flags, flags);
3414 	}
3415 
3416 	return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
3417 }
3418 
qedr_prepare_sq_send_data(struct qedr_dev * dev,struct qedr_qp * qp,struct rdma_sq_send_wqe_1st * swqe,struct rdma_sq_send_wqe_2st * swqe2,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)3419 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
3420 				     struct qedr_qp *qp,
3421 				     struct rdma_sq_send_wqe_1st *swqe,
3422 				     struct rdma_sq_send_wqe_2st *swqe2,
3423 				     const struct ib_send_wr *wr,
3424 				     const struct ib_send_wr **bad_wr)
3425 {
3426 	memset(swqe2, 0, sizeof(*swqe2));
3427 	if (wr->send_flags & IB_SEND_INLINE) {
3428 		u8 flags = 0;
3429 
3430 		SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
3431 		return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
3432 						   bad_wr, &swqe->flags, flags);
3433 	}
3434 
3435 	return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
3436 }
3437 
qedr_prepare_reg(struct qedr_qp * qp,struct rdma_sq_fmr_wqe_1st * fwqe1,const struct ib_reg_wr * wr)3438 static int qedr_prepare_reg(struct qedr_qp *qp,
3439 			    struct rdma_sq_fmr_wqe_1st *fwqe1,
3440 			    const struct ib_reg_wr *wr)
3441 {
3442 	struct qedr_mr *mr = get_qedr_mr(wr->mr);
3443 	struct rdma_sq_fmr_wqe_2nd *fwqe2;
3444 
3445 	fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
3446 	fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
3447 	fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
3448 	fwqe1->l_key = wr->key;
3449 
3450 	fwqe2->access_ctrl = 0;
3451 
3452 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
3453 		   !!(wr->access & IB_ACCESS_REMOTE_READ));
3454 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
3455 		   !!(wr->access & IB_ACCESS_REMOTE_WRITE));
3456 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
3457 		   !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
3458 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
3459 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
3460 		   !!(wr->access & IB_ACCESS_LOCAL_WRITE));
3461 	fwqe2->fmr_ctrl = 0;
3462 
3463 	SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
3464 		   ilog2(mr->ibmr.page_size) - 12);
3465 
3466 	fwqe2->length_hi = 0;
3467 	fwqe2->length_lo = mr->ibmr.length;
3468 	fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
3469 	fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
3470 
3471 	qp->wqe_wr_id[qp->sq.prod].mr = mr;
3472 
3473 	return 0;
3474 }
3475 
qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)3476 static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
3477 {
3478 	switch (opcode) {
3479 	case IB_WR_RDMA_WRITE:
3480 	case IB_WR_RDMA_WRITE_WITH_IMM:
3481 		return IB_WC_RDMA_WRITE;
3482 	case IB_WR_SEND_WITH_IMM:
3483 	case IB_WR_SEND:
3484 	case IB_WR_SEND_WITH_INV:
3485 		return IB_WC_SEND;
3486 	case IB_WR_RDMA_READ:
3487 	case IB_WR_RDMA_READ_WITH_INV:
3488 		return IB_WC_RDMA_READ;
3489 	case IB_WR_ATOMIC_CMP_AND_SWP:
3490 		return IB_WC_COMP_SWAP;
3491 	case IB_WR_ATOMIC_FETCH_AND_ADD:
3492 		return IB_WC_FETCH_ADD;
3493 	case IB_WR_REG_MR:
3494 		return IB_WC_REG_MR;
3495 	case IB_WR_LOCAL_INV:
3496 		return IB_WC_LOCAL_INV;
3497 	default:
3498 		return IB_WC_SEND;
3499 	}
3500 }
3501 
qedr_can_post_send(struct qedr_qp * qp,const struct ib_send_wr * wr)3502 static inline bool qedr_can_post_send(struct qedr_qp *qp,
3503 				      const struct ib_send_wr *wr)
3504 {
3505 	int wq_is_full, err_wr, pbl_is_full;
3506 	struct qedr_dev *dev = qp->dev;
3507 
3508 	/* prevent SQ overflow and/or processing of a bad WR */
3509 	err_wr = wr->num_sge > qp->sq.max_sges;
3510 	wq_is_full = qedr_wq_is_full(&qp->sq);
3511 	pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
3512 		      QEDR_MAX_SQE_ELEMENTS_PER_SQE;
3513 	if (wq_is_full || err_wr || pbl_is_full) {
3514 		if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
3515 			DP_ERR(dev,
3516 			       "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
3517 			       qp);
3518 			qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
3519 		}
3520 
3521 		if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
3522 			DP_ERR(dev,
3523 			       "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
3524 			       qp);
3525 			qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
3526 		}
3527 
3528 		if (pbl_is_full &&
3529 		    !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
3530 			DP_ERR(dev,
3531 			       "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
3532 			       qp);
3533 			qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
3534 		}
3535 		return false;
3536 	}
3537 	return true;
3538 }
3539 
__qedr_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)3540 static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3541 			    const struct ib_send_wr **bad_wr)
3542 {
3543 	struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3544 	struct qedr_qp *qp = get_qedr_qp(ibqp);
3545 	struct rdma_sq_atomic_wqe_1st *awqe1;
3546 	struct rdma_sq_atomic_wqe_2nd *awqe2;
3547 	struct rdma_sq_atomic_wqe_3rd *awqe3;
3548 	struct rdma_sq_send_wqe_2st *swqe2;
3549 	struct rdma_sq_local_inv_wqe *iwqe;
3550 	struct rdma_sq_rdma_wqe_2nd *rwqe2;
3551 	struct rdma_sq_send_wqe_1st *swqe;
3552 	struct rdma_sq_rdma_wqe_1st *rwqe;
3553 	struct rdma_sq_fmr_wqe_1st *fwqe1;
3554 	struct rdma_sq_common_wqe *wqe;
3555 	u32 length;
3556 	int rc = 0;
3557 	bool comp;
3558 
3559 	if (!qedr_can_post_send(qp, wr)) {
3560 		*bad_wr = wr;
3561 		return -ENOMEM;
3562 	}
3563 
3564 	wqe = qed_chain_produce(&qp->sq.pbl);
3565 	qp->wqe_wr_id[qp->sq.prod].signaled =
3566 		!!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3567 
3568 	wqe->flags = 0;
3569 	SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3570 		   !!(wr->send_flags & IB_SEND_SOLICITED));
3571 	comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3572 	SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3573 	SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3574 		   !!(wr->send_flags & IB_SEND_FENCE));
3575 	wqe->prev_wqe_size = qp->prev_wqe_size;
3576 
3577 	qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3578 
3579 	switch (wr->opcode) {
3580 	case IB_WR_SEND_WITH_IMM:
3581 		if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3582 			rc = -EINVAL;
3583 			*bad_wr = wr;
3584 			break;
3585 		}
3586 		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3587 		swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3588 		swqe->wqe_size = 2;
3589 		swqe2 = qed_chain_produce(&qp->sq.pbl);
3590 
3591 		swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
3592 		length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3593 						   wr, bad_wr);
3594 		swqe->length = cpu_to_le32(length);
3595 		qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3596 		qp->prev_wqe_size = swqe->wqe_size;
3597 		qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3598 		break;
3599 	case IB_WR_SEND:
3600 		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3601 		swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3602 
3603 		swqe->wqe_size = 2;
3604 		swqe2 = qed_chain_produce(&qp->sq.pbl);
3605 		length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3606 						   wr, bad_wr);
3607 		swqe->length = cpu_to_le32(length);
3608 		qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3609 		qp->prev_wqe_size = swqe->wqe_size;
3610 		qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3611 		break;
3612 	case IB_WR_SEND_WITH_INV:
3613 		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3614 		swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3615 		swqe2 = qed_chain_produce(&qp->sq.pbl);
3616 		swqe->wqe_size = 2;
3617 		swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3618 		length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3619 						   wr, bad_wr);
3620 		swqe->length = cpu_to_le32(length);
3621 		qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3622 		qp->prev_wqe_size = swqe->wqe_size;
3623 		qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3624 		break;
3625 
3626 	case IB_WR_RDMA_WRITE_WITH_IMM:
3627 		if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3628 			rc = -EINVAL;
3629 			*bad_wr = wr;
3630 			break;
3631 		}
3632 		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3633 		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3634 
3635 		rwqe->wqe_size = 2;
3636 		rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3637 		rwqe2 = qed_chain_produce(&qp->sq.pbl);
3638 		length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3639 						   wr, bad_wr);
3640 		rwqe->length = cpu_to_le32(length);
3641 		qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3642 		qp->prev_wqe_size = rwqe->wqe_size;
3643 		qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3644 		break;
3645 	case IB_WR_RDMA_WRITE:
3646 		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3647 		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3648 
3649 		rwqe->wqe_size = 2;
3650 		rwqe2 = qed_chain_produce(&qp->sq.pbl);
3651 		length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3652 						   wr, bad_wr);
3653 		rwqe->length = cpu_to_le32(length);
3654 		qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3655 		qp->prev_wqe_size = rwqe->wqe_size;
3656 		qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3657 		break;
3658 	case IB_WR_RDMA_READ_WITH_INV:
3659 		SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3660 		fallthrough;	/* same is identical to RDMA READ */
3661 
3662 	case IB_WR_RDMA_READ:
3663 		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3664 		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3665 
3666 		rwqe->wqe_size = 2;
3667 		rwqe2 = qed_chain_produce(&qp->sq.pbl);
3668 		length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3669 						   wr, bad_wr);
3670 		rwqe->length = cpu_to_le32(length);
3671 		qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3672 		qp->prev_wqe_size = rwqe->wqe_size;
3673 		qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3674 		break;
3675 
3676 	case IB_WR_ATOMIC_CMP_AND_SWP:
3677 	case IB_WR_ATOMIC_FETCH_AND_ADD:
3678 		awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3679 		awqe1->wqe_size = 4;
3680 
3681 		awqe2 = qed_chain_produce(&qp->sq.pbl);
3682 		DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3683 		awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3684 
3685 		awqe3 = qed_chain_produce(&qp->sq.pbl);
3686 
3687 		if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3688 			wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3689 			DMA_REGPAIR_LE(awqe3->swap_data,
3690 				       atomic_wr(wr)->compare_add);
3691 		} else {
3692 			wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3693 			DMA_REGPAIR_LE(awqe3->swap_data,
3694 				       atomic_wr(wr)->swap);
3695 			DMA_REGPAIR_LE(awqe3->cmp_data,
3696 				       atomic_wr(wr)->compare_add);
3697 		}
3698 
3699 		qedr_prepare_sq_sges(qp, NULL, wr);
3700 
3701 		qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3702 		qp->prev_wqe_size = awqe1->wqe_size;
3703 		break;
3704 
3705 	case IB_WR_LOCAL_INV:
3706 		iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3707 		iwqe->wqe_size = 1;
3708 
3709 		iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3710 		iwqe->inv_l_key = wr->ex.invalidate_rkey;
3711 		qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3712 		qp->prev_wqe_size = iwqe->wqe_size;
3713 		break;
3714 	case IB_WR_REG_MR:
3715 		DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3716 		wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3717 		fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3718 		fwqe1->wqe_size = 2;
3719 
3720 		rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3721 		if (rc) {
3722 			DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3723 			*bad_wr = wr;
3724 			break;
3725 		}
3726 
3727 		qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3728 		qp->prev_wqe_size = fwqe1->wqe_size;
3729 		break;
3730 	default:
3731 		DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3732 		rc = -EINVAL;
3733 		*bad_wr = wr;
3734 		break;
3735 	}
3736 
3737 	if (*bad_wr) {
3738 		u16 value;
3739 
3740 		/* Restore prod to its position before
3741 		 * this WR was processed
3742 		 */
3743 		value = le16_to_cpu(qp->sq.db_data.data.value);
3744 		qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3745 
3746 		/* Restore prev_wqe_size */
3747 		qp->prev_wqe_size = wqe->prev_wqe_size;
3748 		rc = -EINVAL;
3749 		DP_ERR(dev, "POST SEND FAILED\n");
3750 	}
3751 
3752 	return rc;
3753 }
3754 
qedr_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)3755 int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3756 		   const struct ib_send_wr **bad_wr)
3757 {
3758 	struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3759 	struct qedr_qp *qp = get_qedr_qp(ibqp);
3760 	unsigned long flags;
3761 	int rc = 0;
3762 
3763 	*bad_wr = NULL;
3764 
3765 	if (qp->qp_type == IB_QPT_GSI)
3766 		return qedr_gsi_post_send(ibqp, wr, bad_wr);
3767 
3768 	spin_lock_irqsave(&qp->q_lock, flags);
3769 
3770 	if (rdma_protocol_roce(&dev->ibdev, 1)) {
3771 		if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3772 		    (qp->state != QED_ROCE_QP_STATE_ERR) &&
3773 		    (qp->state != QED_ROCE_QP_STATE_SQD)) {
3774 			spin_unlock_irqrestore(&qp->q_lock, flags);
3775 			*bad_wr = wr;
3776 			DP_DEBUG(dev, QEDR_MSG_CQ,
3777 				 "QP in wrong state! QP icid=0x%x state %d\n",
3778 				 qp->icid, qp->state);
3779 			return -EINVAL;
3780 		}
3781 	}
3782 
3783 	while (wr) {
3784 		rc = __qedr_post_send(ibqp, wr, bad_wr);
3785 		if (rc)
3786 			break;
3787 
3788 		qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3789 
3790 		qedr_inc_sw_prod(&qp->sq);
3791 
3792 		qp->sq.db_data.data.value++;
3793 
3794 		wr = wr->next;
3795 	}
3796 
3797 	/* Trigger doorbell
3798 	 * If there was a failure in the first WR then it will be triggered in
3799 	 * vane. However this is not harmful (as long as the producer value is
3800 	 * unchanged). For performance reasons we avoid checking for this
3801 	 * redundant doorbell.
3802 	 *
3803 	 * qp->wqe_wr_id is accessed during qedr_poll_cq, as
3804 	 * soon as we give the doorbell, we could get a completion
3805 	 * for this wr, therefore we need to make sure that the
3806 	 * memory is updated before giving the doorbell.
3807 	 * During qedr_poll_cq, rmb is called before accessing the
3808 	 * cqe. This covers for the smp_rmb as well.
3809 	 */
3810 	smp_wmb();
3811 	writel(qp->sq.db_data.raw, qp->sq.db);
3812 
3813 	spin_unlock_irqrestore(&qp->q_lock, flags);
3814 
3815 	return rc;
3816 }
3817 
qedr_srq_elem_left(struct qedr_srq_hwq_info * hw_srq)3818 static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
3819 {
3820 	u32 used;
3821 
3822 	/* Calculate number of elements used based on producer
3823 	 * count and consumer count and subtract it from max
3824 	 * work request supported so that we get elements left.
3825 	 */
3826 	used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
3827 
3828 	return hw_srq->max_wr - used;
3829 }
3830 
qedr_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)3831 int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
3832 		       const struct ib_recv_wr **bad_wr)
3833 {
3834 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
3835 	struct qedr_srq_hwq_info *hw_srq;
3836 	struct qedr_dev *dev = srq->dev;
3837 	struct qed_chain *pbl;
3838 	unsigned long flags;
3839 	int status = 0;
3840 	u32 num_sge;
3841 
3842 	spin_lock_irqsave(&srq->lock, flags);
3843 
3844 	hw_srq = &srq->hw_srq;
3845 	pbl = &srq->hw_srq.pbl;
3846 	while (wr) {
3847 		struct rdma_srq_wqe_header *hdr;
3848 		int i;
3849 
3850 		if (!qedr_srq_elem_left(hw_srq) ||
3851 		    wr->num_sge > srq->hw_srq.max_sges) {
3852 			DP_ERR(dev, "Can't post WR  (%d,%d) || (%d > %d)\n",
3853 			       hw_srq->wr_prod_cnt,
3854 			       atomic_read(&hw_srq->wr_cons_cnt),
3855 			       wr->num_sge, srq->hw_srq.max_sges);
3856 			status = -ENOMEM;
3857 			*bad_wr = wr;
3858 			break;
3859 		}
3860 
3861 		hdr = qed_chain_produce(pbl);
3862 		num_sge = wr->num_sge;
3863 		/* Set number of sge and work request id in header */
3864 		SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
3865 
3866 		srq->hw_srq.wr_prod_cnt++;
3867 		hw_srq->wqe_prod++;
3868 		hw_srq->sge_prod++;
3869 
3870 		DP_DEBUG(dev, QEDR_MSG_SRQ,
3871 			 "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
3872 			 wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
3873 
3874 		for (i = 0; i < wr->num_sge; i++) {
3875 			struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
3876 
3877 			/* Set SGE length, lkey and address */
3878 			SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
3879 				    wr->sg_list[i].length, wr->sg_list[i].lkey);
3880 
3881 			DP_DEBUG(dev, QEDR_MSG_SRQ,
3882 				 "[%d]: len %d key %x addr %x:%x\n",
3883 				 i, srq_sge->length, srq_sge->l_key,
3884 				 srq_sge->addr.hi, srq_sge->addr.lo);
3885 			hw_srq->sge_prod++;
3886 		}
3887 
3888 		/* Update WQE and SGE information before
3889 		 * updating producer.
3890 		 */
3891 		dma_wmb();
3892 
3893 		/* SRQ producer is 8 bytes. Need to update SGE producer index
3894 		 * in first 4 bytes and need to update WQE producer in
3895 		 * next 4 bytes.
3896 		 */
3897 		srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod);
3898 		/* Make sure sge producer is updated first */
3899 		dma_wmb();
3900 		srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod);
3901 
3902 		wr = wr->next;
3903 	}
3904 
3905 	DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
3906 		 qed_chain_get_elem_left(pbl));
3907 	spin_unlock_irqrestore(&srq->lock, flags);
3908 
3909 	return status;
3910 }
3911 
qedr_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)3912 int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
3913 		   const struct ib_recv_wr **bad_wr)
3914 {
3915 	struct qedr_qp *qp = get_qedr_qp(ibqp);
3916 	struct qedr_dev *dev = qp->dev;
3917 	unsigned long flags;
3918 	int status = 0;
3919 
3920 	if (qp->qp_type == IB_QPT_GSI)
3921 		return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3922 
3923 	spin_lock_irqsave(&qp->q_lock, flags);
3924 
3925 	if (qp->state == QED_ROCE_QP_STATE_RESET) {
3926 		spin_unlock_irqrestore(&qp->q_lock, flags);
3927 		*bad_wr = wr;
3928 		return -EINVAL;
3929 	}
3930 
3931 	while (wr) {
3932 		int i;
3933 
3934 		if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3935 		    QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3936 		    wr->num_sge > qp->rq.max_sges) {
3937 			DP_ERR(dev, "Can't post WR  (%d < %d) || (%d > %d)\n",
3938 			       qed_chain_get_elem_left_u32(&qp->rq.pbl),
3939 			       QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3940 			       qp->rq.max_sges);
3941 			status = -ENOMEM;
3942 			*bad_wr = wr;
3943 			break;
3944 		}
3945 		for (i = 0; i < wr->num_sge; i++) {
3946 			u32 flags = 0;
3947 			struct rdma_rq_sge *rqe =
3948 			    qed_chain_produce(&qp->rq.pbl);
3949 
3950 			/* First one must include the number
3951 			 * of SGE in the list
3952 			 */
3953 			if (!i)
3954 				SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3955 					  wr->num_sge);
3956 
3957 			SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
3958 				  wr->sg_list[i].lkey);
3959 
3960 			RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3961 				   wr->sg_list[i].length, flags);
3962 		}
3963 
3964 		/* Special case of no sges. FW requires between 1-4 sges...
3965 		 * in this case we need to post 1 sge with length zero. this is
3966 		 * because rdma write with immediate consumes an RQ.
3967 		 */
3968 		if (!wr->num_sge) {
3969 			u32 flags = 0;
3970 			struct rdma_rq_sge *rqe =
3971 			    qed_chain_produce(&qp->rq.pbl);
3972 
3973 			/* First one must include the number
3974 			 * of SGE in the list
3975 			 */
3976 			SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
3977 			SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3978 
3979 			RQ_SGE_SET(rqe, 0, 0, flags);
3980 			i = 1;
3981 		}
3982 
3983 		qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3984 		qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3985 
3986 		qedr_inc_sw_prod(&qp->rq);
3987 
3988 		/* qp->rqe_wr_id is accessed during qedr_poll_cq, as
3989 		 * soon as we give the doorbell, we could get a completion
3990 		 * for this wr, therefore we need to make sure that the
3991 		 * memory is update before giving the doorbell.
3992 		 * During qedr_poll_cq, rmb is called before accessing the
3993 		 * cqe. This covers for the smp_rmb as well.
3994 		 */
3995 		smp_wmb();
3996 
3997 		qp->rq.db_data.data.value++;
3998 
3999 		writel(qp->rq.db_data.raw, qp->rq.db);
4000 
4001 		if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
4002 			writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
4003 		}
4004 
4005 		wr = wr->next;
4006 	}
4007 
4008 	spin_unlock_irqrestore(&qp->q_lock, flags);
4009 
4010 	return status;
4011 }
4012 
is_valid_cqe(struct qedr_cq * cq,union rdma_cqe * cqe)4013 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
4014 {
4015 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
4016 
4017 	return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
4018 		cq->pbl_toggle;
4019 }
4020 
cqe_get_qp(union rdma_cqe * cqe)4021 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
4022 {
4023 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
4024 	struct qedr_qp *qp;
4025 
4026 	qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
4027 						   resp_cqe->qp_handle.lo,
4028 						   u64);
4029 	return qp;
4030 }
4031 
cqe_get_type(union rdma_cqe * cqe)4032 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
4033 {
4034 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
4035 
4036 	return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
4037 }
4038 
4039 /* Return latest CQE (needs processing) */
get_cqe(struct qedr_cq * cq)4040 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
4041 {
4042 	return cq->latest_cqe;
4043 }
4044 
4045 /* In fmr we need to increase the number of fmr completed counter for the fmr
4046  * algorithm determining whether we can free a pbl or not.
4047  * we need to perform this whether the work request was signaled or not. for
4048  * this purpose we call this function from the condition that checks if a wr
4049  * should be skipped, to make sure we don't miss it ( possibly this fmr
4050  * operation was not signalted)
4051  */
qedr_chk_if_fmr(struct qedr_qp * qp)4052 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
4053 {
4054 	if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
4055 		qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4056 }
4057 
process_req(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,int num_entries,struct ib_wc * wc,u16 hw_cons,enum ib_wc_status status,int force)4058 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
4059 		       struct qedr_cq *cq, int num_entries,
4060 		       struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
4061 		       int force)
4062 {
4063 	u16 cnt = 0;
4064 
4065 	while (num_entries && qp->sq.wqe_cons != hw_cons) {
4066 		if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
4067 			qedr_chk_if_fmr(qp);
4068 			/* skip WC */
4069 			goto next_cqe;
4070 		}
4071 
4072 		/* fill WC */
4073 		wc->status = status;
4074 		wc->vendor_err = 0;
4075 		wc->wc_flags = 0;
4076 		wc->src_qp = qp->id;
4077 		wc->qp = &qp->ibqp;
4078 
4079 		wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
4080 		wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
4081 
4082 		switch (wc->opcode) {
4083 		case IB_WC_RDMA_WRITE:
4084 			wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4085 			break;
4086 		case IB_WC_COMP_SWAP:
4087 		case IB_WC_FETCH_ADD:
4088 			wc->byte_len = 8;
4089 			break;
4090 		case IB_WC_REG_MR:
4091 			qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4092 			break;
4093 		case IB_WC_RDMA_READ:
4094 		case IB_WC_SEND:
4095 			wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4096 			break;
4097 		default:
4098 			break;
4099 		}
4100 
4101 		num_entries--;
4102 		wc++;
4103 		cnt++;
4104 next_cqe:
4105 		while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
4106 			qed_chain_consume(&qp->sq.pbl);
4107 		qedr_inc_sw_cons(&qp->sq);
4108 	}
4109 
4110 	return cnt;
4111 }
4112 
qedr_poll_cq_req(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,int num_entries,struct ib_wc * wc,struct rdma_cqe_requester * req)4113 static int qedr_poll_cq_req(struct qedr_dev *dev,
4114 			    struct qedr_qp *qp, struct qedr_cq *cq,
4115 			    int num_entries, struct ib_wc *wc,
4116 			    struct rdma_cqe_requester *req)
4117 {
4118 	int cnt = 0;
4119 
4120 	switch (req->status) {
4121 	case RDMA_CQE_REQ_STS_OK:
4122 		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4123 				  IB_WC_SUCCESS, 0);
4124 		break;
4125 	case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
4126 		if (qp->state != QED_ROCE_QP_STATE_ERR)
4127 			DP_DEBUG(dev, QEDR_MSG_CQ,
4128 				 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4129 				 cq->icid, qp->icid);
4130 		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4131 				  IB_WC_WR_FLUSH_ERR, 1);
4132 		break;
4133 	default:
4134 		/* process all WQE before the cosumer */
4135 		qp->state = QED_ROCE_QP_STATE_ERR;
4136 		cnt = process_req(dev, qp, cq, num_entries, wc,
4137 				  req->sq_cons - 1, IB_WC_SUCCESS, 0);
4138 		wc += cnt;
4139 		/* if we have extra WC fill it with actual error info */
4140 		if (cnt < num_entries) {
4141 			enum ib_wc_status wc_status;
4142 
4143 			switch (req->status) {
4144 			case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
4145 				DP_ERR(dev,
4146 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4147 				       cq->icid, qp->icid);
4148 				wc_status = IB_WC_BAD_RESP_ERR;
4149 				break;
4150 			case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
4151 				DP_ERR(dev,
4152 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4153 				       cq->icid, qp->icid);
4154 				wc_status = IB_WC_LOC_LEN_ERR;
4155 				break;
4156 			case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
4157 				DP_ERR(dev,
4158 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4159 				       cq->icid, qp->icid);
4160 				wc_status = IB_WC_LOC_QP_OP_ERR;
4161 				break;
4162 			case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
4163 				DP_ERR(dev,
4164 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4165 				       cq->icid, qp->icid);
4166 				wc_status = IB_WC_LOC_PROT_ERR;
4167 				break;
4168 			case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
4169 				DP_ERR(dev,
4170 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4171 				       cq->icid, qp->icid);
4172 				wc_status = IB_WC_MW_BIND_ERR;
4173 				break;
4174 			case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
4175 				DP_ERR(dev,
4176 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4177 				       cq->icid, qp->icid);
4178 				wc_status = IB_WC_REM_INV_REQ_ERR;
4179 				break;
4180 			case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
4181 				DP_ERR(dev,
4182 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4183 				       cq->icid, qp->icid);
4184 				wc_status = IB_WC_REM_ACCESS_ERR;
4185 				break;
4186 			case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
4187 				DP_ERR(dev,
4188 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4189 				       cq->icid, qp->icid);
4190 				wc_status = IB_WC_REM_OP_ERR;
4191 				break;
4192 			case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
4193 				DP_ERR(dev,
4194 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4195 				       cq->icid, qp->icid);
4196 				wc_status = IB_WC_RNR_RETRY_EXC_ERR;
4197 				break;
4198 			case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
4199 				DP_ERR(dev,
4200 				       "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4201 				       cq->icid, qp->icid);
4202 				wc_status = IB_WC_RETRY_EXC_ERR;
4203 				break;
4204 			default:
4205 				DP_ERR(dev,
4206 				       "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4207 				       cq->icid, qp->icid);
4208 				wc_status = IB_WC_GENERAL_ERR;
4209 			}
4210 			cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
4211 					   wc_status, 1);
4212 		}
4213 	}
4214 
4215 	return cnt;
4216 }
4217 
qedr_cqe_resp_status_to_ib(u8 status)4218 static inline int qedr_cqe_resp_status_to_ib(u8 status)
4219 {
4220 	switch (status) {
4221 	case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
4222 		return IB_WC_LOC_ACCESS_ERR;
4223 	case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
4224 		return IB_WC_LOC_LEN_ERR;
4225 	case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
4226 		return IB_WC_LOC_QP_OP_ERR;
4227 	case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
4228 		return IB_WC_LOC_PROT_ERR;
4229 	case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
4230 		return IB_WC_MW_BIND_ERR;
4231 	case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
4232 		return IB_WC_REM_INV_RD_REQ_ERR;
4233 	case RDMA_CQE_RESP_STS_OK:
4234 		return IB_WC_SUCCESS;
4235 	default:
4236 		return IB_WC_GENERAL_ERR;
4237 	}
4238 }
4239 
qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder * resp,struct ib_wc * wc)4240 static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
4241 					  struct ib_wc *wc)
4242 {
4243 	wc->status = IB_WC_SUCCESS;
4244 	wc->byte_len = le32_to_cpu(resp->length);
4245 
4246 	if (resp->flags & QEDR_RESP_IMM) {
4247 		wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
4248 		wc->wc_flags |= IB_WC_WITH_IMM;
4249 
4250 		if (resp->flags & QEDR_RESP_RDMA)
4251 			wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
4252 
4253 		if (resp->flags & QEDR_RESP_INV)
4254 			return -EINVAL;
4255 
4256 	} else if (resp->flags & QEDR_RESP_INV) {
4257 		wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
4258 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4259 
4260 		if (resp->flags & QEDR_RESP_RDMA)
4261 			return -EINVAL;
4262 
4263 	} else if (resp->flags & QEDR_RESP_RDMA) {
4264 		return -EINVAL;
4265 	}
4266 
4267 	return 0;
4268 }
4269 
__process_resp_one(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,struct ib_wc * wc,struct rdma_cqe_responder * resp,u64 wr_id)4270 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4271 			       struct qedr_cq *cq, struct ib_wc *wc,
4272 			       struct rdma_cqe_responder *resp, u64 wr_id)
4273 {
4274 	/* Must fill fields before qedr_set_ok_cqe_resp_wc() */
4275 	wc->opcode = IB_WC_RECV;
4276 	wc->wc_flags = 0;
4277 
4278 	if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
4279 		if (qedr_set_ok_cqe_resp_wc(resp, wc))
4280 			DP_ERR(dev,
4281 			       "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
4282 			       cq, cq->icid, resp->flags);
4283 
4284 	} else {
4285 		wc->status = qedr_cqe_resp_status_to_ib(resp->status);
4286 		if (wc->status == IB_WC_GENERAL_ERR)
4287 			DP_ERR(dev,
4288 			       "CQ %p (icid=%d) contains an invalid CQE status %d\n",
4289 			       cq, cq->icid, resp->status);
4290 	}
4291 
4292 	/* Fill the rest of the WC */
4293 	wc->vendor_err = 0;
4294 	wc->src_qp = qp->id;
4295 	wc->qp = &qp->ibqp;
4296 	wc->wr_id = wr_id;
4297 }
4298 
process_resp_one_srq(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,struct ib_wc * wc,struct rdma_cqe_responder * resp)4299 static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4300 				struct qedr_cq *cq, struct ib_wc *wc,
4301 				struct rdma_cqe_responder *resp)
4302 {
4303 	struct qedr_srq *srq = qp->srq;
4304 	u64 wr_id;
4305 
4306 	wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
4307 			 le32_to_cpu(resp->srq_wr_id.lo), u64);
4308 
4309 	if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4310 		wc->status = IB_WC_WR_FLUSH_ERR;
4311 		wc->vendor_err = 0;
4312 		wc->wr_id = wr_id;
4313 		wc->byte_len = 0;
4314 		wc->src_qp = qp->id;
4315 		wc->qp = &qp->ibqp;
4316 		wc->wr_id = wr_id;
4317 	} else {
4318 		__process_resp_one(dev, qp, cq, wc, resp, wr_id);
4319 	}
4320 	atomic_inc(&srq->hw_srq.wr_cons_cnt);
4321 
4322 	return 1;
4323 }
process_resp_one(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,struct ib_wc * wc,struct rdma_cqe_responder * resp)4324 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4325 			    struct qedr_cq *cq, struct ib_wc *wc,
4326 			    struct rdma_cqe_responder *resp)
4327 {
4328 	u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4329 
4330 	__process_resp_one(dev, qp, cq, wc, resp, wr_id);
4331 
4332 	while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4333 		qed_chain_consume(&qp->rq.pbl);
4334 	qedr_inc_sw_cons(&qp->rq);
4335 
4336 	return 1;
4337 }
4338 
process_resp_flush(struct qedr_qp * qp,struct qedr_cq * cq,int num_entries,struct ib_wc * wc,u16 hw_cons)4339 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
4340 			      int num_entries, struct ib_wc *wc, u16 hw_cons)
4341 {
4342 	u16 cnt = 0;
4343 
4344 	while (num_entries && qp->rq.wqe_cons != hw_cons) {
4345 		/* fill WC */
4346 		wc->status = IB_WC_WR_FLUSH_ERR;
4347 		wc->vendor_err = 0;
4348 		wc->wc_flags = 0;
4349 		wc->src_qp = qp->id;
4350 		wc->byte_len = 0;
4351 		wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4352 		wc->qp = &qp->ibqp;
4353 		num_entries--;
4354 		wc++;
4355 		cnt++;
4356 		while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4357 			qed_chain_consume(&qp->rq.pbl);
4358 		qedr_inc_sw_cons(&qp->rq);
4359 	}
4360 
4361 	return cnt;
4362 }
4363 
try_consume_resp_cqe(struct qedr_cq * cq,struct qedr_qp * qp,struct rdma_cqe_responder * resp,int * update)4364 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4365 				 struct rdma_cqe_responder *resp, int *update)
4366 {
4367 	if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
4368 		consume_cqe(cq);
4369 		*update |= 1;
4370 	}
4371 }
4372 
qedr_poll_cq_resp_srq(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,int num_entries,struct ib_wc * wc,struct rdma_cqe_responder * resp)4373 static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4374 				 struct qedr_cq *cq, int num_entries,
4375 				 struct ib_wc *wc,
4376 				 struct rdma_cqe_responder *resp)
4377 {
4378 	int cnt;
4379 
4380 	cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
4381 	consume_cqe(cq);
4382 
4383 	return cnt;
4384 }
4385 
qedr_poll_cq_resp(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,int num_entries,struct ib_wc * wc,struct rdma_cqe_responder * resp,int * update)4386 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
4387 			     struct qedr_cq *cq, int num_entries,
4388 			     struct ib_wc *wc, struct rdma_cqe_responder *resp,
4389 			     int *update)
4390 {
4391 	int cnt;
4392 
4393 	if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4394 		cnt = process_resp_flush(qp, cq, num_entries, wc,
4395 					 resp->rq_cons_or_srq_id);
4396 		try_consume_resp_cqe(cq, qp, resp, update);
4397 	} else {
4398 		cnt = process_resp_one(dev, qp, cq, wc, resp);
4399 		consume_cqe(cq);
4400 		*update |= 1;
4401 	}
4402 
4403 	return cnt;
4404 }
4405 
try_consume_req_cqe(struct qedr_cq * cq,struct qedr_qp * qp,struct rdma_cqe_requester * req,int * update)4406 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4407 				struct rdma_cqe_requester *req, int *update)
4408 {
4409 	if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
4410 		consume_cqe(cq);
4411 		*update |= 1;
4412 	}
4413 }
4414 
qedr_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)4415 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
4416 {
4417 	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
4418 	struct qedr_cq *cq = get_qedr_cq(ibcq);
4419 	union rdma_cqe *cqe;
4420 	u32 old_cons, new_cons;
4421 	unsigned long flags;
4422 	int update = 0;
4423 	int done = 0;
4424 
4425 	if (cq->destroyed) {
4426 		DP_ERR(dev,
4427 		       "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
4428 		       cq, cq->icid);
4429 		return 0;
4430 	}
4431 
4432 	if (cq->cq_type == QEDR_CQ_TYPE_GSI)
4433 		return qedr_gsi_poll_cq(ibcq, num_entries, wc);
4434 
4435 	spin_lock_irqsave(&cq->cq_lock, flags);
4436 	cqe = cq->latest_cqe;
4437 	old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4438 	while (num_entries && is_valid_cqe(cq, cqe)) {
4439 		struct qedr_qp *qp;
4440 		int cnt = 0;
4441 
4442 		/* prevent speculative reads of any field of CQE */
4443 		rmb();
4444 
4445 		qp = cqe_get_qp(cqe);
4446 		if (!qp) {
4447 			WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
4448 			break;
4449 		}
4450 
4451 		wc->qp = &qp->ibqp;
4452 
4453 		switch (cqe_get_type(cqe)) {
4454 		case RDMA_CQE_TYPE_REQUESTER:
4455 			cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
4456 					       &cqe->req);
4457 			try_consume_req_cqe(cq, qp, &cqe->req, &update);
4458 			break;
4459 		case RDMA_CQE_TYPE_RESPONDER_RQ:
4460 			cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
4461 						&cqe->resp, &update);
4462 			break;
4463 		case RDMA_CQE_TYPE_RESPONDER_SRQ:
4464 			cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
4465 						    wc, &cqe->resp);
4466 			update = 1;
4467 			break;
4468 		case RDMA_CQE_TYPE_INVALID:
4469 		default:
4470 			DP_ERR(dev, "Error: invalid CQE type = %d\n",
4471 			       cqe_get_type(cqe));
4472 		}
4473 		num_entries -= cnt;
4474 		wc += cnt;
4475 		done += cnt;
4476 
4477 		cqe = get_cqe(cq);
4478 	}
4479 	new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4480 
4481 	cq->cq_cons += new_cons - old_cons;
4482 
4483 	if (update)
4484 		/* doorbell notifies abount latest VALID entry,
4485 		 * but chain already point to the next INVALID one
4486 		 */
4487 		doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
4488 
4489 	spin_unlock_irqrestore(&cq->cq_lock, flags);
4490 	return done;
4491 }
4492 
qedr_process_mad(struct ib_device * ibdev,int process_mad_flags,u8 port_num,const struct ib_wc * in_wc,const struct ib_grh * in_grh,const struct ib_mad * in,struct ib_mad * out_mad,size_t * out_mad_size,u16 * out_mad_pkey_index)4493 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
4494 		     u8 port_num, const struct ib_wc *in_wc,
4495 		     const struct ib_grh *in_grh, const struct ib_mad *in,
4496 		     struct ib_mad *out_mad, size_t *out_mad_size,
4497 		     u16 *out_mad_pkey_index)
4498 {
4499 	return IB_MAD_RESULT_SUCCESS;
4500 }
4501