• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: IB Verbs interpreter
37  */
38 
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44 #include <net/addrconf.h>
45 
46 #include <rdma/ib_verbs.h>
47 #include <rdma/ib_user_verbs.h>
48 #include <rdma/ib_umem.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_mad.h>
51 #include <rdma/ib_cache.h>
52 #include <rdma/uverbs_ioctl.h>
53 #include <linux/hashtable.h>
54 
55 #include "bnxt_ulp.h"
56 
57 #include "roce_hsi.h"
58 #include "qplib_res.h"
59 #include "qplib_sp.h"
60 #include "qplib_fp.h"
61 #include "qplib_rcfw.h"
62 
63 #include "bnxt_re.h"
64 #include "ib_verbs.h"
65 
66 #include <rdma/uverbs_types.h>
67 #include <rdma/uverbs_std_types.h>
68 
69 #include <rdma/ib_user_ioctl_cmds.h>
70 
71 #define UVERBS_MODULE_NAME bnxt_re
72 #include <rdma/uverbs_named_ioctl.h>
73 
74 #include <rdma/bnxt_re-abi.h>
75 
__from_ib_access_flags(int iflags)76 static int __from_ib_access_flags(int iflags)
77 {
78 	int qflags = 0;
79 
80 	if (iflags & IB_ACCESS_LOCAL_WRITE)
81 		qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
82 	if (iflags & IB_ACCESS_REMOTE_READ)
83 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
84 	if (iflags & IB_ACCESS_REMOTE_WRITE)
85 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
86 	if (iflags & IB_ACCESS_REMOTE_ATOMIC)
87 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
88 	if (iflags & IB_ACCESS_MW_BIND)
89 		qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
90 	if (iflags & IB_ZERO_BASED)
91 		qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
92 	if (iflags & IB_ACCESS_ON_DEMAND)
93 		qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
94 	return qflags;
95 };
96 
__to_ib_access_flags(int qflags)97 static enum ib_access_flags __to_ib_access_flags(int qflags)
98 {
99 	enum ib_access_flags iflags = 0;
100 
101 	if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
102 		iflags |= IB_ACCESS_LOCAL_WRITE;
103 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
104 		iflags |= IB_ACCESS_REMOTE_WRITE;
105 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
106 		iflags |= IB_ACCESS_REMOTE_READ;
107 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
108 		iflags |= IB_ACCESS_REMOTE_ATOMIC;
109 	if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
110 		iflags |= IB_ACCESS_MW_BIND;
111 	if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
112 		iflags |= IB_ZERO_BASED;
113 	if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
114 		iflags |= IB_ACCESS_ON_DEMAND;
115 	return iflags;
116 };
117 
bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev * rdev,struct bnxt_qplib_mrw * qplib_mr)118 static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev,
119 						   struct bnxt_qplib_mrw *qplib_mr)
120 {
121 	if (_is_relaxed_ordering_supported(rdev->dev_attr->dev_cap_flags2) &&
122 	    pcie_relaxed_ordering_enabled(rdev->en_dev->pdev))
123 		qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO;
124 }
125 
bnxt_re_build_sgl(struct ib_sge * ib_sg_list,struct bnxt_qplib_sge * sg_list,int num)126 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
127 			     struct bnxt_qplib_sge *sg_list, int num)
128 {
129 	int i, total = 0;
130 
131 	for (i = 0; i < num; i++) {
132 		sg_list[i].addr = ib_sg_list[i].addr;
133 		sg_list[i].lkey = ib_sg_list[i].lkey;
134 		sg_list[i].size = ib_sg_list[i].length;
135 		total += sg_list[i].size;
136 	}
137 	return total;
138 }
139 
140 /* Device */
bnxt_re_query_device(struct ib_device * ibdev,struct ib_device_attr * ib_attr,struct ib_udata * udata)141 int bnxt_re_query_device(struct ib_device *ibdev,
142 			 struct ib_device_attr *ib_attr,
143 			 struct ib_udata *udata)
144 {
145 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
146 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
147 
148 	memset(ib_attr, 0, sizeof(*ib_attr));
149 	memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
150 	       min(sizeof(dev_attr->fw_ver),
151 		   sizeof(ib_attr->fw_ver)));
152 	addrconf_addr_eui48((u8 *)&ib_attr->sys_image_guid,
153 			    rdev->netdev->dev_addr);
154 	ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
155 	ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED;
156 
157 	ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
158 	ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
159 	ib_attr->hw_ver = rdev->en_dev->pdev->revision;
160 	ib_attr->max_qp = dev_attr->max_qp;
161 	ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
162 	ib_attr->device_cap_flags =
163 				    IB_DEVICE_CURR_QP_STATE_MOD
164 				    | IB_DEVICE_RC_RNR_NAK_GEN
165 				    | IB_DEVICE_SHUTDOWN_PORT
166 				    | IB_DEVICE_SYS_IMAGE_GUID
167 				    | IB_DEVICE_RESIZE_MAX_WR
168 				    | IB_DEVICE_PORT_ACTIVE_EVENT
169 				    | IB_DEVICE_N_NOTIFY_CQ
170 				    | IB_DEVICE_MEM_WINDOW
171 				    | IB_DEVICE_MEM_WINDOW_TYPE_2B
172 				    | IB_DEVICE_MEM_MGT_EXTENSIONS;
173 	ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
174 	ib_attr->max_send_sge = dev_attr->max_qp_sges;
175 	ib_attr->max_recv_sge = dev_attr->max_qp_sges;
176 	ib_attr->max_sge_rd = dev_attr->max_qp_sges;
177 	ib_attr->max_cq = dev_attr->max_cq;
178 	ib_attr->max_cqe = dev_attr->max_cq_wqes;
179 	ib_attr->max_mr = dev_attr->max_mr;
180 	ib_attr->max_pd = dev_attr->max_pd;
181 	ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
182 	ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
183 	ib_attr->atomic_cap = IB_ATOMIC_NONE;
184 	ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
185 	if (dev_attr->is_atomic) {
186 		ib_attr->atomic_cap = IB_ATOMIC_GLOB;
187 		ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
188 	}
189 
190 	ib_attr->max_ee_rd_atom = 0;
191 	ib_attr->max_res_rd_atom = 0;
192 	ib_attr->max_ee_init_rd_atom = 0;
193 	ib_attr->max_ee = 0;
194 	ib_attr->max_rdd = 0;
195 	ib_attr->max_mw = dev_attr->max_mw;
196 	ib_attr->max_raw_ipv6_qp = 0;
197 	ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
198 	ib_attr->max_mcast_grp = 0;
199 	ib_attr->max_mcast_qp_attach = 0;
200 	ib_attr->max_total_mcast_qp_attach = 0;
201 	ib_attr->max_ah = dev_attr->max_ah;
202 
203 	ib_attr->max_srq = dev_attr->max_srq;
204 	ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
205 	ib_attr->max_srq_sge = dev_attr->max_srq_sges;
206 
207 	ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
208 
209 	ib_attr->max_pkeys = 1;
210 	ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
211 	return 0;
212 }
213 
214 /* Port */
bnxt_re_query_port(struct ib_device * ibdev,u32 port_num,struct ib_port_attr * port_attr)215 int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
216 		       struct ib_port_attr *port_attr)
217 {
218 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
219 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
220 	int rc;
221 
222 	memset(port_attr, 0, sizeof(*port_attr));
223 
224 	if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
225 		port_attr->state = IB_PORT_ACTIVE;
226 		port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
227 	} else {
228 		port_attr->state = IB_PORT_DOWN;
229 		port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
230 	}
231 	port_attr->max_mtu = IB_MTU_4096;
232 	port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
233 	port_attr->gid_tbl_len = dev_attr->max_sgid;
234 	port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
235 				    IB_PORT_DEVICE_MGMT_SUP |
236 				    IB_PORT_VENDOR_CLASS_SUP;
237 	port_attr->ip_gids = true;
238 
239 	port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
240 	port_attr->bad_pkey_cntr = 0;
241 	port_attr->qkey_viol_cntr = 0;
242 	port_attr->pkey_tbl_len = dev_attr->max_pkey;
243 	port_attr->lid = 0;
244 	port_attr->sm_lid = 0;
245 	port_attr->lmc = 0;
246 	port_attr->max_vl_num = 4;
247 	port_attr->sm_sl = 0;
248 	port_attr->subnet_timeout = 0;
249 	port_attr->init_type_reply = 0;
250 	rc = ib_get_eth_speed(&rdev->ibdev, port_num, &port_attr->active_speed,
251 			      &port_attr->active_width);
252 
253 	return rc;
254 }
255 
bnxt_re_get_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)256 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
257 			       struct ib_port_immutable *immutable)
258 {
259 	struct ib_port_attr port_attr;
260 
261 	if (bnxt_re_query_port(ibdev, port_num, &port_attr))
262 		return -EINVAL;
263 
264 	immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
265 	immutable->gid_tbl_len = port_attr.gid_tbl_len;
266 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
267 	immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
268 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
269 	return 0;
270 }
271 
bnxt_re_query_fw_str(struct ib_device * ibdev,char * str)272 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
273 {
274 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
275 
276 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
277 		 rdev->dev_attr->fw_ver[0], rdev->dev_attr->fw_ver[1],
278 		 rdev->dev_attr->fw_ver[2], rdev->dev_attr->fw_ver[3]);
279 }
280 
bnxt_re_query_pkey(struct ib_device * ibdev,u32 port_num,u16 index,u16 * pkey)281 int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
282 		       u16 index, u16 *pkey)
283 {
284 	if (index > 0)
285 		return -EINVAL;
286 
287 	*pkey = IB_DEFAULT_PKEY_FULL;
288 
289 	return 0;
290 }
291 
bnxt_re_query_gid(struct ib_device * ibdev,u32 port_num,int index,union ib_gid * gid)292 int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
293 		      int index, union ib_gid *gid)
294 {
295 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
296 	int rc;
297 
298 	/* Ignore port_num */
299 	memset(gid, 0, sizeof(*gid));
300 	rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
301 				 &rdev->qplib_res.sgid_tbl, index,
302 				 (struct bnxt_qplib_gid *)gid);
303 	return rc;
304 }
305 
bnxt_re_del_gid(const struct ib_gid_attr * attr,void ** context)306 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
307 {
308 	int rc = 0;
309 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
310 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
311 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
312 	struct bnxt_qplib_gid *gid_to_del;
313 	u16 vlan_id = 0xFFFF;
314 
315 	/* Delete the entry from the hardware */
316 	ctx = *context;
317 	if (!ctx)
318 		return -EINVAL;
319 
320 	if (sgid_tbl && sgid_tbl->active) {
321 		if (ctx->idx >= sgid_tbl->max)
322 			return -EINVAL;
323 		gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
324 		vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
325 		/* DEL_GID is called in WQ context(netdevice_event_work_handler)
326 		 * or via the ib_unregister_device path. In the former case QP1
327 		 * may not be destroyed yet, in which case just return as FW
328 		 * needs that entry to be present and will fail it's deletion.
329 		 * We could get invoked again after QP1 is destroyed OR get an
330 		 * ADD_GID call with a different GID value for the same index
331 		 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
332 		 */
333 		if (ctx->idx == 0 &&
334 		    rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
335 		    ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
336 			ibdev_dbg(&rdev->ibdev,
337 				  "Trying to delete GID0 while QP1 is alive\n");
338 			return -EFAULT;
339 		}
340 		ctx->refcnt--;
341 		if (!ctx->refcnt) {
342 			rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
343 						 vlan_id,  true);
344 			if (rc) {
345 				ibdev_err(&rdev->ibdev,
346 					  "Failed to remove GID: %#x", rc);
347 			} else {
348 				ctx_tbl = sgid_tbl->ctx;
349 				ctx_tbl[ctx->idx] = NULL;
350 				kfree(ctx);
351 			}
352 		}
353 	} else {
354 		return -EINVAL;
355 	}
356 	return rc;
357 }
358 
bnxt_re_add_gid(const struct ib_gid_attr * attr,void ** context)359 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
360 {
361 	int rc;
362 	u32 tbl_idx = 0;
363 	u16 vlan_id = 0xFFFF;
364 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
365 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
366 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
367 
368 	rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
369 	if (rc)
370 		return rc;
371 
372 	rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
373 				 rdev->qplib_res.netdev->dev_addr,
374 				 vlan_id, true, &tbl_idx);
375 	if (rc == -EALREADY) {
376 		ctx_tbl = sgid_tbl->ctx;
377 		ctx_tbl[tbl_idx]->refcnt++;
378 		*context = ctx_tbl[tbl_idx];
379 		return 0;
380 	}
381 
382 	if (rc < 0) {
383 		ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
384 		return rc;
385 	}
386 
387 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
388 	if (!ctx)
389 		return -ENOMEM;
390 	ctx_tbl = sgid_tbl->ctx;
391 	ctx->idx = tbl_idx;
392 	ctx->refcnt = 1;
393 	ctx_tbl[tbl_idx] = ctx;
394 	*context = ctx;
395 
396 	return rc;
397 }
398 
bnxt_re_get_link_layer(struct ib_device * ibdev,u32 port_num)399 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
400 					    u32 port_num)
401 {
402 	return IB_LINK_LAYER_ETHERNET;
403 }
404 
405 #define	BNXT_RE_FENCE_PBL_SIZE	DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
406 
bnxt_re_create_fence_wqe(struct bnxt_re_pd * pd)407 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
408 {
409 	struct bnxt_re_fence_data *fence = &pd->fence;
410 	struct ib_mr *ib_mr = &fence->mr->ib_mr;
411 	struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
412 	struct bnxt_re_dev *rdev = pd->rdev;
413 
414 	if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
415 		return;
416 
417 	memset(wqe, 0, sizeof(*wqe));
418 	wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
419 	wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
420 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
421 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
422 	wqe->bind.zero_based = false;
423 	wqe->bind.parent_l_key = ib_mr->lkey;
424 	wqe->bind.va = (u64)(unsigned long)fence->va;
425 	wqe->bind.length = fence->size;
426 	wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
427 	wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
428 
429 	/* Save the initial rkey in fence structure for now;
430 	 * wqe->bind.r_key will be set at (re)bind time.
431 	 */
432 	fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
433 }
434 
bnxt_re_bind_fence_mw(struct bnxt_qplib_qp * qplib_qp)435 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
436 {
437 	struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
438 					     qplib_qp);
439 	struct ib_pd *ib_pd = qp->ib_qp.pd;
440 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
441 	struct bnxt_re_fence_data *fence = &pd->fence;
442 	struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
443 	struct bnxt_qplib_swqe wqe;
444 	int rc;
445 
446 	memcpy(&wqe, fence_wqe, sizeof(wqe));
447 	wqe.bind.r_key = fence->bind_rkey;
448 	fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
449 
450 	ibdev_dbg(&qp->rdev->ibdev,
451 		  "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
452 		wqe.bind.r_key, qp->qplib_qp.id, pd);
453 	rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
454 	if (rc) {
455 		ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
456 		return rc;
457 	}
458 	bnxt_qplib_post_send_db(&qp->qplib_qp);
459 
460 	return rc;
461 }
462 
bnxt_re_destroy_fence_mr(struct bnxt_re_pd * pd)463 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
464 {
465 	struct bnxt_re_fence_data *fence = &pd->fence;
466 	struct bnxt_re_dev *rdev = pd->rdev;
467 	struct device *dev = &rdev->en_dev->pdev->dev;
468 	struct bnxt_re_mr *mr = fence->mr;
469 
470 	if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
471 		return;
472 
473 	if (fence->mw) {
474 		bnxt_re_dealloc_mw(fence->mw);
475 		fence->mw = NULL;
476 	}
477 	if (mr) {
478 		if (mr->ib_mr.rkey)
479 			bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
480 					     true);
481 		if (mr->ib_mr.lkey)
482 			bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
483 		kfree(mr);
484 		fence->mr = NULL;
485 	}
486 	if (fence->dma_addr) {
487 		dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
488 				 DMA_BIDIRECTIONAL);
489 		fence->dma_addr = 0;
490 	}
491 }
492 
bnxt_re_create_fence_mr(struct bnxt_re_pd * pd)493 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
494 {
495 	int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
496 	struct bnxt_re_fence_data *fence = &pd->fence;
497 	struct bnxt_re_dev *rdev = pd->rdev;
498 	struct device *dev = &rdev->en_dev->pdev->dev;
499 	struct bnxt_re_mr *mr = NULL;
500 	dma_addr_t dma_addr = 0;
501 	struct ib_mw *mw;
502 	int rc;
503 
504 	if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
505 		return 0;
506 
507 	dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
508 				  DMA_BIDIRECTIONAL);
509 	rc = dma_mapping_error(dev, dma_addr);
510 	if (rc) {
511 		ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
512 		rc = -EIO;
513 		fence->dma_addr = 0;
514 		goto fail;
515 	}
516 	fence->dma_addr = dma_addr;
517 
518 	/* Allocate a MR */
519 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
520 	if (!mr) {
521 		rc = -ENOMEM;
522 		goto fail;
523 	}
524 	fence->mr = mr;
525 	mr->rdev = rdev;
526 	mr->qplib_mr.pd = &pd->qplib_pd;
527 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
528 	mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
529 	if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
530 		rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
531 		if (rc) {
532 			ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
533 			goto fail;
534 		}
535 
536 		/* Register MR */
537 		mr->ib_mr.lkey = mr->qplib_mr.lkey;
538 	} else {
539 		mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
540 	}
541 	mr->qplib_mr.va = (u64)(unsigned long)fence->va;
542 	mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
543 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
544 			       BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
545 	if (rc) {
546 		ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
547 		goto fail;
548 	}
549 	mr->ib_mr.rkey = mr->qplib_mr.rkey;
550 
551 	/* Create a fence MW only for kernel consumers */
552 	mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
553 	if (IS_ERR(mw)) {
554 		ibdev_err(&rdev->ibdev,
555 			  "Failed to create fence-MW for PD: %p\n", pd);
556 		rc = PTR_ERR(mw);
557 		goto fail;
558 	}
559 	fence->mw = mw;
560 
561 	bnxt_re_create_fence_wqe(pd);
562 	return 0;
563 
564 fail:
565 	bnxt_re_destroy_fence_mr(pd);
566 	return rc;
567 }
568 
569 static struct bnxt_re_user_mmap_entry*
bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext * uctx,u64 mem_offset,enum bnxt_re_mmap_flag mmap_flag,u64 * offset)570 bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
571 			  enum bnxt_re_mmap_flag mmap_flag, u64 *offset)
572 {
573 	struct bnxt_re_user_mmap_entry *entry;
574 	int ret;
575 
576 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
577 	if (!entry)
578 		return NULL;
579 
580 	entry->mem_offset = mem_offset;
581 	entry->mmap_flag = mmap_flag;
582 	entry->uctx = uctx;
583 
584 	switch (mmap_flag) {
585 	case BNXT_RE_MMAP_SH_PAGE:
586 		ret = rdma_user_mmap_entry_insert_exact(&uctx->ib_uctx,
587 							&entry->rdma_entry, PAGE_SIZE, 0);
588 		break;
589 	case BNXT_RE_MMAP_UC_DB:
590 	case BNXT_RE_MMAP_WC_DB:
591 	case BNXT_RE_MMAP_DBR_BAR:
592 	case BNXT_RE_MMAP_DBR_PAGE:
593 	case BNXT_RE_MMAP_TOGGLE_PAGE:
594 		ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
595 						  &entry->rdma_entry, PAGE_SIZE);
596 		break;
597 	default:
598 		ret = -EINVAL;
599 		break;
600 	}
601 
602 	if (ret) {
603 		kfree(entry);
604 		return NULL;
605 	}
606 	if (offset)
607 		*offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
608 
609 	return entry;
610 }
611 
612 /* Protection Domains */
bnxt_re_dealloc_pd(struct ib_pd * ib_pd,struct ib_udata * udata)613 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
614 {
615 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
616 	struct bnxt_re_dev *rdev = pd->rdev;
617 
618 	if (udata) {
619 		rdma_user_mmap_entry_remove(pd->pd_db_mmap);
620 		pd->pd_db_mmap = NULL;
621 	}
622 
623 	bnxt_re_destroy_fence_mr(pd);
624 
625 	if (pd->qplib_pd.id) {
626 		if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res,
627 					   &rdev->qplib_res.pd_tbl,
628 					   &pd->qplib_pd))
629 			atomic_dec(&rdev->stats.res.pd_count);
630 	}
631 	return 0;
632 }
633 
bnxt_re_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)634 int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
635 {
636 	struct ib_device *ibdev = ibpd->device;
637 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
638 	struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
639 		udata, struct bnxt_re_ucontext, ib_uctx);
640 	struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
641 	struct bnxt_re_user_mmap_entry *entry = NULL;
642 	u32 active_pds;
643 	int rc = 0;
644 
645 	pd->rdev = rdev;
646 	if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) {
647 		ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
648 		rc = -ENOMEM;
649 		goto fail;
650 	}
651 
652 	if (udata) {
653 		struct bnxt_re_pd_resp resp = {};
654 
655 		if (!ucntx->dpi.dbr) {
656 			/* Allocate DPI in alloc_pd to avoid failing of
657 			 * ibv_devinfo and family of application when DPIs
658 			 * are depleted.
659 			 */
660 			if (bnxt_qplib_alloc_dpi(&rdev->qplib_res,
661 						 &ucntx->dpi, ucntx, BNXT_QPLIB_DPI_TYPE_UC)) {
662 				rc = -ENOMEM;
663 				goto dbfail;
664 			}
665 		}
666 
667 		resp.pdid = pd->qplib_pd.id;
668 		/* Still allow mapping this DBR to the new user PD. */
669 		resp.dpi = ucntx->dpi.dpi;
670 
671 		entry = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr,
672 						  BNXT_RE_MMAP_UC_DB, &resp.dbr);
673 
674 		if (!entry) {
675 			rc = -ENOMEM;
676 			goto dbfail;
677 		}
678 
679 		pd->pd_db_mmap = &entry->rdma_entry;
680 
681 		rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
682 		if (rc) {
683 			rdma_user_mmap_entry_remove(pd->pd_db_mmap);
684 			rc = -EFAULT;
685 			goto dbfail;
686 		}
687 	}
688 
689 	if (!udata)
690 		if (bnxt_re_create_fence_mr(pd))
691 			ibdev_warn(&rdev->ibdev,
692 				   "Failed to create Fence-MR\n");
693 	active_pds = atomic_inc_return(&rdev->stats.res.pd_count);
694 	if (active_pds > rdev->stats.res.pd_watermark)
695 		rdev->stats.res.pd_watermark = active_pds;
696 
697 	return 0;
698 dbfail:
699 	bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
700 			      &pd->qplib_pd);
701 fail:
702 	return rc;
703 }
704 
705 /* Address Handles */
bnxt_re_destroy_ah(struct ib_ah * ib_ah,u32 flags)706 int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
707 {
708 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
709 	struct bnxt_re_dev *rdev = ah->rdev;
710 	bool block = true;
711 	int rc;
712 
713 	block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
714 	rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
715 	if (BNXT_RE_CHECK_RC(rc)) {
716 		if (rc == -ETIMEDOUT)
717 			rc = 0;
718 		else
719 			goto fail;
720 	}
721 	atomic_dec(&rdev->stats.res.ah_count);
722 fail:
723 	return rc;
724 }
725 
bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)726 static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
727 {
728 	u8 nw_type;
729 
730 	switch (ntype) {
731 	case RDMA_NETWORK_IPV4:
732 		nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
733 		break;
734 	case RDMA_NETWORK_IPV6:
735 		nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
736 		break;
737 	default:
738 		nw_type = CMDQ_CREATE_AH_TYPE_V1;
739 		break;
740 	}
741 	return nw_type;
742 }
743 
bnxt_re_create_ah(struct ib_ah * ib_ah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)744 int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
745 		      struct ib_udata *udata)
746 {
747 	struct ib_pd *ib_pd = ib_ah->pd;
748 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
749 	struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
750 	const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
751 	struct bnxt_re_dev *rdev = pd->rdev;
752 	const struct ib_gid_attr *sgid_attr;
753 	struct bnxt_re_gid_ctx *ctx;
754 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
755 	u32 active_ahs;
756 	u8 nw_type;
757 	int rc;
758 
759 	if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
760 		ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
761 		return -EINVAL;
762 	}
763 
764 	ah->rdev = rdev;
765 	ah->qplib_ah.pd = &pd->qplib_pd;
766 
767 	/* Supply the configuration for the HW */
768 	memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
769 	       sizeof(union ib_gid));
770 	sgid_attr = grh->sgid_attr;
771 	/* Get the HW context of the GID. The reference
772 	 * of GID table entry is already taken by the caller.
773 	 */
774 	ctx = rdma_read_gid_hw_context(sgid_attr);
775 	ah->qplib_ah.sgid_index = ctx->idx;
776 	ah->qplib_ah.host_sgid_index = grh->sgid_index;
777 	ah->qplib_ah.traffic_class = grh->traffic_class;
778 	ah->qplib_ah.flow_label = grh->flow_label;
779 	ah->qplib_ah.hop_limit = grh->hop_limit;
780 	ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
781 
782 	/* Get network header type for this GID */
783 	nw_type = rdma_gid_attr_network_type(sgid_attr);
784 	ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
785 
786 	memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
787 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
788 				  !(init_attr->flags &
789 				    RDMA_CREATE_AH_SLEEPABLE));
790 	if (rc) {
791 		ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
792 		return rc;
793 	}
794 
795 	/* Write AVID to shared page. */
796 	if (udata) {
797 		struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
798 			udata, struct bnxt_re_ucontext, ib_uctx);
799 		unsigned long flag;
800 		u32 *wrptr;
801 
802 		spin_lock_irqsave(&uctx->sh_lock, flag);
803 		wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
804 		*wrptr = ah->qplib_ah.id;
805 		wmb(); /* make sure cache is updated. */
806 		spin_unlock_irqrestore(&uctx->sh_lock, flag);
807 	}
808 	active_ahs = atomic_inc_return(&rdev->stats.res.ah_count);
809 	if (active_ahs > rdev->stats.res.ah_watermark)
810 		rdev->stats.res.ah_watermark = active_ahs;
811 
812 	return 0;
813 }
814 
bnxt_re_query_ah(struct ib_ah * ib_ah,struct rdma_ah_attr * ah_attr)815 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
816 {
817 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
818 
819 	ah_attr->type = ib_ah->type;
820 	rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
821 	memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
822 	rdma_ah_set_grh(ah_attr, NULL, 0,
823 			ah->qplib_ah.host_sgid_index,
824 			0, ah->qplib_ah.traffic_class);
825 	rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
826 	rdma_ah_set_port_num(ah_attr, 1);
827 	rdma_ah_set_static_rate(ah_attr, 0);
828 	return 0;
829 }
830 
bnxt_re_lock_cqs(struct bnxt_re_qp * qp)831 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
832 	__acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
833 {
834 	unsigned long flags;
835 
836 	spin_lock_irqsave(&qp->scq->cq_lock, flags);
837 	if (qp->rcq != qp->scq)
838 		spin_lock(&qp->rcq->cq_lock);
839 	else
840 		__acquire(&qp->rcq->cq_lock);
841 
842 	return flags;
843 }
844 
bnxt_re_unlock_cqs(struct bnxt_re_qp * qp,unsigned long flags)845 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
846 			unsigned long flags)
847 	__releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
848 {
849 	if (qp->rcq != qp->scq)
850 		spin_unlock(&qp->rcq->cq_lock);
851 	else
852 		__release(&qp->rcq->cq_lock);
853 	spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
854 }
855 
bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp * qp)856 static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
857 {
858 	struct bnxt_re_qp *gsi_sqp;
859 	struct bnxt_re_ah *gsi_sah;
860 	struct bnxt_re_dev *rdev;
861 	int rc;
862 
863 	rdev = qp->rdev;
864 	gsi_sqp = rdev->gsi_ctx.gsi_sqp;
865 	gsi_sah = rdev->gsi_ctx.gsi_sah;
866 
867 	ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
868 	bnxt_qplib_destroy_ah(&rdev->qplib_res,
869 			      &gsi_sah->qplib_ah,
870 			      true);
871 	atomic_dec(&rdev->stats.res.ah_count);
872 	bnxt_qplib_clean_qp(&qp->qplib_qp);
873 
874 	ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
875 	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
876 	if (rc) {
877 		ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
878 		goto fail;
879 	}
880 	bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
881 
882 	/* remove from active qp list */
883 	mutex_lock(&rdev->qp_lock);
884 	list_del(&gsi_sqp->list);
885 	mutex_unlock(&rdev->qp_lock);
886 	atomic_dec(&rdev->stats.res.qp_count);
887 
888 	kfree(rdev->gsi_ctx.sqp_tbl);
889 	kfree(gsi_sah);
890 	kfree(gsi_sqp);
891 	rdev->gsi_ctx.gsi_sqp = NULL;
892 	rdev->gsi_ctx.gsi_sah = NULL;
893 	rdev->gsi_ctx.sqp_tbl = NULL;
894 
895 	return 0;
896 fail:
897 	return rc;
898 }
899 
900 /* Queue Pairs */
bnxt_re_destroy_qp(struct ib_qp * ib_qp,struct ib_udata * udata)901 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
902 {
903 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
904 	struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
905 	struct bnxt_re_dev *rdev = qp->rdev;
906 	struct bnxt_qplib_nq *scq_nq = NULL;
907 	struct bnxt_qplib_nq *rcq_nq = NULL;
908 	unsigned int flags;
909 	int rc;
910 
911 	bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
912 
913 	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
914 	if (rc) {
915 		ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
916 		return rc;
917 	}
918 
919 	if (rdma_is_kernel_res(&qp->ib_qp.res)) {
920 		flags = bnxt_re_lock_cqs(qp);
921 		bnxt_qplib_clean_qp(&qp->qplib_qp);
922 		bnxt_re_unlock_cqs(qp, flags);
923 	}
924 
925 	bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
926 
927 	if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
928 		rc = bnxt_re_destroy_gsi_sqp(qp);
929 		if (rc)
930 			return rc;
931 	}
932 
933 	mutex_lock(&rdev->qp_lock);
934 	list_del(&qp->list);
935 	mutex_unlock(&rdev->qp_lock);
936 	atomic_dec(&rdev->stats.res.qp_count);
937 	if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC)
938 		atomic_dec(&rdev->stats.res.rc_qp_count);
939 	else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
940 		atomic_dec(&rdev->stats.res.ud_qp_count);
941 
942 	ib_umem_release(qp->rumem);
943 	ib_umem_release(qp->sumem);
944 
945 	/* Flush all the entries of notification queue associated with
946 	 * given qp.
947 	 */
948 	scq_nq = qplib_qp->scq->nq;
949 	rcq_nq = qplib_qp->rcq->nq;
950 	bnxt_re_synchronize_nq(scq_nq);
951 	if (scq_nq != rcq_nq)
952 		bnxt_re_synchronize_nq(rcq_nq);
953 
954 	return 0;
955 }
956 
__from_ib_qp_type(enum ib_qp_type type)957 static u8 __from_ib_qp_type(enum ib_qp_type type)
958 {
959 	switch (type) {
960 	case IB_QPT_GSI:
961 		return CMDQ_CREATE_QP1_TYPE_GSI;
962 	case IB_QPT_RC:
963 		return CMDQ_CREATE_QP_TYPE_RC;
964 	case IB_QPT_UD:
965 		return CMDQ_CREATE_QP_TYPE_UD;
966 	default:
967 		return IB_QPT_MAX;
968 	}
969 }
970 
bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp * qplqp,int rsge,int max)971 static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp,
972 				   int rsge, int max)
973 {
974 	if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
975 		rsge = max;
976 	return bnxt_re_get_rwqe_size(rsge);
977 }
978 
bnxt_re_get_wqe_size(int ilsize,int nsge)979 static u16 bnxt_re_get_wqe_size(int ilsize, int nsge)
980 {
981 	u16 wqe_size, calc_ils;
982 
983 	wqe_size = bnxt_re_get_swqe_size(nsge);
984 	if (ilsize) {
985 		calc_ils = sizeof(struct sq_send_hdr) + ilsize;
986 		wqe_size = max_t(u16, calc_ils, wqe_size);
987 		wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr));
988 	}
989 	return wqe_size;
990 }
991 
bnxt_re_setup_swqe_size(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr)992 static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
993 				   struct ib_qp_init_attr *init_attr)
994 {
995 	struct bnxt_qplib_dev_attr *dev_attr;
996 	struct bnxt_qplib_qp *qplqp;
997 	struct bnxt_re_dev *rdev;
998 	struct bnxt_qplib_q *sq;
999 	int align, ilsize;
1000 
1001 	rdev = qp->rdev;
1002 	qplqp = &qp->qplib_qp;
1003 	sq = &qplqp->sq;
1004 	dev_attr = rdev->dev_attr;
1005 
1006 	align = sizeof(struct sq_send_hdr);
1007 	ilsize = ALIGN(init_attr->cap.max_inline_data, align);
1008 
1009 	/* For gen p4 and gen p5 fixed wqe compatibility mode
1010 	 * wqe size is fixed to 128 bytes - ie 6 SGEs
1011 	 */
1012 	if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) {
1013 		sq->wqe_size = bnxt_re_get_swqe_size(BNXT_STATIC_MAX_SGE);
1014 		sq->max_sge = BNXT_STATIC_MAX_SGE;
1015 	} else {
1016 		sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
1017 		if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
1018 			return -EINVAL;
1019 	}
1020 
1021 	if (init_attr->cap.max_inline_data) {
1022 		qplqp->max_inline_data = sq->wqe_size -
1023 			sizeof(struct sq_send_hdr);
1024 		init_attr->cap.max_inline_data = qplqp->max_inline_data;
1025 	}
1026 
1027 	return 0;
1028 }
1029 
bnxt_re_init_user_qp(struct bnxt_re_dev * rdev,struct bnxt_re_pd * pd,struct bnxt_re_qp * qp,struct bnxt_re_ucontext * cntx,struct bnxt_re_qp_req * ureq)1030 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
1031 				struct bnxt_re_qp *qp, struct bnxt_re_ucontext *cntx,
1032 				struct bnxt_re_qp_req *ureq)
1033 {
1034 	struct bnxt_qplib_qp *qplib_qp;
1035 	int bytes = 0, psn_sz;
1036 	struct ib_umem *umem;
1037 	int psn_nume;
1038 
1039 	qplib_qp = &qp->qplib_qp;
1040 
1041 	bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
1042 	/* Consider mapping PSN search memory only for RC QPs. */
1043 	if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1044 		psn_sz = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
1045 						   sizeof(struct sq_psn_search_ext) :
1046 						   sizeof(struct sq_psn_search);
1047 		if (cntx && bnxt_re_is_var_size_supported(rdev, cntx)) {
1048 			psn_nume = ureq->sq_slots;
1049 		} else {
1050 			psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1051 			qplib_qp->sq.max_wqe : ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
1052 				 sizeof(struct bnxt_qplib_sge));
1053 		}
1054 		if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2))
1055 			psn_nume = roundup_pow_of_two(psn_nume);
1056 		bytes += (psn_nume * psn_sz);
1057 	}
1058 
1059 	bytes = PAGE_ALIGN(bytes);
1060 	umem = ib_umem_get(&rdev->ibdev, ureq->qpsva, bytes,
1061 			   IB_ACCESS_LOCAL_WRITE);
1062 	if (IS_ERR(umem))
1063 		return PTR_ERR(umem);
1064 
1065 	qp->sumem = umem;
1066 	qplib_qp->sq.sg_info.umem = umem;
1067 	qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
1068 	qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
1069 	qplib_qp->qp_handle = ureq->qp_handle;
1070 
1071 	if (!qp->qplib_qp.srq) {
1072 		bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
1073 		bytes = PAGE_ALIGN(bytes);
1074 		umem = ib_umem_get(&rdev->ibdev, ureq->qprva, bytes,
1075 				   IB_ACCESS_LOCAL_WRITE);
1076 		if (IS_ERR(umem))
1077 			goto rqfail;
1078 		qp->rumem = umem;
1079 		qplib_qp->rq.sg_info.umem = umem;
1080 		qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
1081 		qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
1082 	}
1083 
1084 	qplib_qp->dpi = &cntx->dpi;
1085 	return 0;
1086 rqfail:
1087 	ib_umem_release(qp->sumem);
1088 	qp->sumem = NULL;
1089 	memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
1090 
1091 	return PTR_ERR(umem);
1092 }
1093 
bnxt_re_create_shadow_qp_ah(struct bnxt_re_pd * pd,struct bnxt_qplib_res * qp1_res,struct bnxt_qplib_qp * qp1_qp)1094 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
1095 				(struct bnxt_re_pd *pd,
1096 				 struct bnxt_qplib_res *qp1_res,
1097 				 struct bnxt_qplib_qp *qp1_qp)
1098 {
1099 	struct bnxt_re_dev *rdev = pd->rdev;
1100 	struct bnxt_re_ah *ah;
1101 	union ib_gid sgid;
1102 	int rc;
1103 
1104 	ah = kzalloc(sizeof(*ah), GFP_KERNEL);
1105 	if (!ah)
1106 		return NULL;
1107 
1108 	ah->rdev = rdev;
1109 	ah->qplib_ah.pd = &pd->qplib_pd;
1110 
1111 	rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
1112 	if (rc)
1113 		goto fail;
1114 
1115 	/* supply the dgid data same as sgid */
1116 	memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
1117 	       sizeof(union ib_gid));
1118 	ah->qplib_ah.sgid_index = 0;
1119 
1120 	ah->qplib_ah.traffic_class = 0;
1121 	ah->qplib_ah.flow_label = 0;
1122 	ah->qplib_ah.hop_limit = 1;
1123 	ah->qplib_ah.sl = 0;
1124 	/* Have DMAC same as SMAC */
1125 	ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
1126 
1127 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
1128 	if (rc) {
1129 		ibdev_err(&rdev->ibdev,
1130 			  "Failed to allocate HW AH for Shadow QP");
1131 		goto fail;
1132 	}
1133 	atomic_inc(&rdev->stats.res.ah_count);
1134 
1135 	return ah;
1136 
1137 fail:
1138 	kfree(ah);
1139 	return NULL;
1140 }
1141 
bnxt_re_create_shadow_qp(struct bnxt_re_pd * pd,struct bnxt_qplib_res * qp1_res,struct bnxt_qplib_qp * qp1_qp)1142 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
1143 				(struct bnxt_re_pd *pd,
1144 				 struct bnxt_qplib_res *qp1_res,
1145 				 struct bnxt_qplib_qp *qp1_qp)
1146 {
1147 	struct bnxt_re_dev *rdev = pd->rdev;
1148 	struct bnxt_re_qp *qp;
1149 	int rc;
1150 
1151 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1152 	if (!qp)
1153 		return NULL;
1154 
1155 	qp->rdev = rdev;
1156 
1157 	/* Initialize the shadow QP structure from the QP1 values */
1158 	ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1159 
1160 	qp->qplib_qp.pd = &pd->qplib_pd;
1161 	qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1162 	qp->qplib_qp.type = IB_QPT_UD;
1163 
1164 	qp->qplib_qp.max_inline_data = 0;
1165 	qp->qplib_qp.sig_type = true;
1166 
1167 	/* Shadow QP SQ depth should be same as QP1 RQ depth */
1168 	qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
1169 	qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1170 	qp->qplib_qp.sq.max_sw_wqe = qp1_qp->rq.max_wqe;
1171 	qp->qplib_qp.sq.max_sge = 2;
1172 	/* Q full delta can be 1 since it is internal QP */
1173 	qp->qplib_qp.sq.q_full_delta = 1;
1174 	qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
1175 	qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
1176 
1177 	qp->qplib_qp.scq = qp1_qp->scq;
1178 	qp->qplib_qp.rcq = qp1_qp->rcq;
1179 
1180 	qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
1181 	qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1182 	qp->qplib_qp.rq.max_sw_wqe = qp1_qp->rq.max_wqe;
1183 	qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1184 	/* Q full delta can be 1 since it is internal QP */
1185 	qp->qplib_qp.rq.q_full_delta = 1;
1186 	qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
1187 	qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
1188 
1189 	qp->qplib_qp.mtu = qp1_qp->mtu;
1190 
1191 	qp->qplib_qp.sq_hdr_buf_size = 0;
1192 	qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1193 	qp->qplib_qp.dpi = &rdev->dpi_privileged;
1194 
1195 	rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1196 	if (rc)
1197 		goto fail;
1198 
1199 	spin_lock_init(&qp->sq_lock);
1200 	INIT_LIST_HEAD(&qp->list);
1201 	mutex_lock(&rdev->qp_lock);
1202 	list_add_tail(&qp->list, &rdev->qp_list);
1203 	atomic_inc(&rdev->stats.res.qp_count);
1204 	mutex_unlock(&rdev->qp_lock);
1205 	return qp;
1206 fail:
1207 	kfree(qp);
1208 	return NULL;
1209 }
1210 
bnxt_re_init_rq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,struct bnxt_re_ucontext * uctx)1211 static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1212 				struct ib_qp_init_attr *init_attr,
1213 				struct bnxt_re_ucontext *uctx)
1214 {
1215 	struct bnxt_qplib_dev_attr *dev_attr;
1216 	struct bnxt_qplib_qp *qplqp;
1217 	struct bnxt_re_dev *rdev;
1218 	struct bnxt_qplib_q *rq;
1219 	int entries;
1220 
1221 	rdev = qp->rdev;
1222 	qplqp = &qp->qplib_qp;
1223 	rq = &qplqp->rq;
1224 	dev_attr = rdev->dev_attr;
1225 
1226 	if (init_attr->srq) {
1227 		struct bnxt_re_srq *srq;
1228 
1229 		srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
1230 		qplqp->srq = &srq->qplib_srq;
1231 		rq->max_wqe = 0;
1232 	} else {
1233 		rq->max_sge = init_attr->cap.max_recv_sge;
1234 		if (rq->max_sge > dev_attr->max_qp_sges)
1235 			rq->max_sge = dev_attr->max_qp_sges;
1236 		init_attr->cap.max_recv_sge = rq->max_sge;
1237 		rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge,
1238 						       dev_attr->max_qp_sges);
1239 		/* Allocate 1 more than what's provided so posting max doesn't
1240 		 * mean empty.
1241 		 */
1242 		entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx);
1243 		rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1244 		rq->max_sw_wqe = rq->max_wqe;
1245 		rq->q_full_delta = 0;
1246 		rq->sg_info.pgsize = PAGE_SIZE;
1247 		rq->sg_info.pgshft = PAGE_SHIFT;
1248 	}
1249 
1250 	return 0;
1251 }
1252 
bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp * qp)1253 static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1254 {
1255 	struct bnxt_qplib_dev_attr *dev_attr;
1256 	struct bnxt_qplib_qp *qplqp;
1257 	struct bnxt_re_dev *rdev;
1258 
1259 	rdev = qp->rdev;
1260 	qplqp = &qp->qplib_qp;
1261 	dev_attr = rdev->dev_attr;
1262 
1263 	if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
1264 		qplqp->rq.max_sge = dev_attr->max_qp_sges;
1265 		if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
1266 			qplqp->rq.max_sge = dev_attr->max_qp_sges;
1267 		qplqp->rq.max_sge = 6;
1268 	}
1269 }
1270 
bnxt_re_init_sq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,struct bnxt_re_ucontext * uctx,struct bnxt_re_qp_req * ureq)1271 static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1272 				struct ib_qp_init_attr *init_attr,
1273 				struct bnxt_re_ucontext *uctx,
1274 				struct bnxt_re_qp_req *ureq)
1275 {
1276 	struct bnxt_qplib_dev_attr *dev_attr;
1277 	struct bnxt_qplib_qp *qplqp;
1278 	struct bnxt_re_dev *rdev;
1279 	struct bnxt_qplib_q *sq;
1280 	int diff = 0;
1281 	int entries;
1282 	int rc;
1283 
1284 	rdev = qp->rdev;
1285 	qplqp = &qp->qplib_qp;
1286 	sq = &qplqp->sq;
1287 	dev_attr = rdev->dev_attr;
1288 
1289 	sq->max_sge = init_attr->cap.max_send_sge;
1290 	entries = init_attr->cap.max_send_wr;
1291 	if (uctx && qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) {
1292 		sq->max_wqe = ureq->sq_slots;
1293 		sq->max_sw_wqe = ureq->sq_slots;
1294 		sq->wqe_size = sizeof(struct sq_sge);
1295 	} else {
1296 		if (sq->max_sge > dev_attr->max_qp_sges) {
1297 			sq->max_sge = dev_attr->max_qp_sges;
1298 			init_attr->cap.max_send_sge = sq->max_sge;
1299 		}
1300 
1301 		rc = bnxt_re_setup_swqe_size(qp, init_attr);
1302 		if (rc)
1303 			return rc;
1304 
1305 		/* Allocate 128 + 1 more than what's provided */
1306 		diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
1307 			0 : BNXT_QPLIB_RESERVED_QP_WRS;
1308 		entries = bnxt_re_init_depth(entries + diff + 1, uctx);
1309 		sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
1310 		if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1311 			sq->max_sw_wqe = bnxt_qplib_get_depth(sq, qplqp->wqe_mode, true);
1312 		else
1313 			sq->max_sw_wqe = sq->max_wqe;
1314 
1315 	}
1316 	sq->q_full_delta = diff + 1;
1317 	/*
1318 	 * Reserving one slot for Phantom WQE. Application can
1319 	 * post one extra entry in this case. But allowing this to avoid
1320 	 * unexpected Queue full condition
1321 	 */
1322 	qplqp->sq.q_full_delta -= 1;
1323 	qplqp->sq.sg_info.pgsize = PAGE_SIZE;
1324 	qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
1325 
1326 	return 0;
1327 }
1328 
bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,struct bnxt_re_ucontext * uctx)1329 static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
1330 				       struct ib_qp_init_attr *init_attr,
1331 				       struct bnxt_re_ucontext *uctx)
1332 {
1333 	struct bnxt_qplib_dev_attr *dev_attr;
1334 	struct bnxt_qplib_qp *qplqp;
1335 	struct bnxt_re_dev *rdev;
1336 	int entries;
1337 
1338 	rdev = qp->rdev;
1339 	qplqp = &qp->qplib_qp;
1340 	dev_attr = rdev->dev_attr;
1341 
1342 	if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
1343 		entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
1344 		qplqp->sq.max_wqe = min_t(u32, entries,
1345 					  dev_attr->max_qp_wqes + 1);
1346 		qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
1347 			init_attr->cap.max_send_wr;
1348 		qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
1349 		if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
1350 			qplqp->sq.max_sge = dev_attr->max_qp_sges;
1351 	}
1352 }
1353 
bnxt_re_init_qp_type(struct bnxt_re_dev * rdev,struct ib_qp_init_attr * init_attr)1354 static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
1355 				struct ib_qp_init_attr *init_attr)
1356 {
1357 	struct bnxt_qplib_chip_ctx *chip_ctx;
1358 	int qptype;
1359 
1360 	chip_ctx = rdev->chip_ctx;
1361 
1362 	qptype = __from_ib_qp_type(init_attr->qp_type);
1363 	if (qptype == IB_QPT_MAX) {
1364 		ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
1365 		qptype = -EOPNOTSUPP;
1366 		goto out;
1367 	}
1368 
1369 	if (bnxt_qplib_is_chip_gen_p5_p7(chip_ctx) &&
1370 	    init_attr->qp_type == IB_QPT_GSI)
1371 		qptype = CMDQ_CREATE_QP_TYPE_GSI;
1372 out:
1373 	return qptype;
1374 }
1375 
bnxt_re_init_qp_attr(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd,struct ib_qp_init_attr * init_attr,struct bnxt_re_ucontext * uctx,struct bnxt_re_qp_req * ureq)1376 static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1377 				struct ib_qp_init_attr *init_attr,
1378 				struct bnxt_re_ucontext *uctx,
1379 				struct bnxt_re_qp_req *ureq)
1380 {
1381 	struct bnxt_qplib_dev_attr *dev_attr;
1382 	struct bnxt_qplib_qp *qplqp;
1383 	struct bnxt_re_dev *rdev;
1384 	struct bnxt_re_cq *cq;
1385 	int rc = 0, qptype;
1386 
1387 	rdev = qp->rdev;
1388 	qplqp = &qp->qplib_qp;
1389 	dev_attr = rdev->dev_attr;
1390 
1391 	/* Setup misc params */
1392 	ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
1393 	qplqp->pd = &pd->qplib_pd;
1394 	qplqp->qp_handle = (u64)qplqp;
1395 	qplqp->max_inline_data = init_attr->cap.max_inline_data;
1396 	qplqp->sig_type = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1397 	qptype = bnxt_re_init_qp_type(rdev, init_attr);
1398 	if (qptype < 0) {
1399 		rc = qptype;
1400 		goto out;
1401 	}
1402 	qplqp->type = (u8)qptype;
1403 	qplqp->wqe_mode = bnxt_re_is_var_size_supported(rdev, uctx);
1404 	if (init_attr->qp_type == IB_QPT_RC) {
1405 		qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
1406 		qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1407 	}
1408 	qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1409 	qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
1410 	if (init_attr->create_flags) {
1411 		ibdev_dbg(&rdev->ibdev,
1412 			  "QP create flags 0x%x not supported",
1413 			  init_attr->create_flags);
1414 		return -EOPNOTSUPP;
1415 	}
1416 
1417 	/* Setup CQs */
1418 	if (init_attr->send_cq) {
1419 		cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
1420 		qplqp->scq = &cq->qplib_cq;
1421 		qp->scq = cq;
1422 	}
1423 
1424 	if (init_attr->recv_cq) {
1425 		cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
1426 		qplqp->rcq = &cq->qplib_cq;
1427 		qp->rcq = cq;
1428 	}
1429 
1430 	/* Setup RQ/SRQ */
1431 	rc = bnxt_re_init_rq_attr(qp, init_attr, uctx);
1432 	if (rc)
1433 		goto out;
1434 	if (init_attr->qp_type == IB_QPT_GSI)
1435 		bnxt_re_adjust_gsi_rq_attr(qp);
1436 
1437 	/* Setup SQ */
1438 	rc = bnxt_re_init_sq_attr(qp, init_attr, uctx, ureq);
1439 	if (rc)
1440 		goto out;
1441 	if (init_attr->qp_type == IB_QPT_GSI)
1442 		bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx);
1443 
1444 	if (uctx) /* This will update DPI and qp_handle */
1445 		rc = bnxt_re_init_user_qp(rdev, pd, qp, uctx, ureq);
1446 out:
1447 	return rc;
1448 }
1449 
bnxt_re_create_shadow_gsi(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd)1450 static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
1451 				     struct bnxt_re_pd *pd)
1452 {
1453 	struct bnxt_re_sqp_entries *sqp_tbl;
1454 	struct bnxt_re_dev *rdev;
1455 	struct bnxt_re_qp *sqp;
1456 	struct bnxt_re_ah *sah;
1457 	int rc = 0;
1458 
1459 	rdev = qp->rdev;
1460 	/* Create a shadow QP to handle the QP1 traffic */
1461 	sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl),
1462 			  GFP_KERNEL);
1463 	if (!sqp_tbl)
1464 		return -ENOMEM;
1465 	rdev->gsi_ctx.sqp_tbl = sqp_tbl;
1466 
1467 	sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
1468 	if (!sqp) {
1469 		rc = -ENODEV;
1470 		ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
1471 		goto out;
1472 	}
1473 	rdev->gsi_ctx.gsi_sqp = sqp;
1474 
1475 	sqp->rcq = qp->rcq;
1476 	sqp->scq = qp->scq;
1477 	sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1478 					  &qp->qplib_qp);
1479 	if (!sah) {
1480 		bnxt_qplib_destroy_qp(&rdev->qplib_res,
1481 				      &sqp->qplib_qp);
1482 		rc = -ENODEV;
1483 		ibdev_err(&rdev->ibdev,
1484 			  "Failed to create AH entry for ShadowQP");
1485 		goto out;
1486 	}
1487 	rdev->gsi_ctx.gsi_sah = sah;
1488 
1489 	return 0;
1490 out:
1491 	kfree(sqp_tbl);
1492 	return rc;
1493 }
1494 
bnxt_re_create_gsi_qp(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd,struct ib_qp_init_attr * init_attr)1495 static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1496 				 struct ib_qp_init_attr *init_attr)
1497 {
1498 	struct bnxt_re_dev *rdev;
1499 	struct bnxt_qplib_qp *qplqp;
1500 	int rc;
1501 
1502 	rdev = qp->rdev;
1503 	qplqp = &qp->qplib_qp;
1504 
1505 	qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1506 	qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1507 
1508 	rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
1509 	if (rc) {
1510 		ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
1511 		goto out;
1512 	}
1513 
1514 	rc = bnxt_re_create_shadow_gsi(qp, pd);
1515 out:
1516 	return rc;
1517 }
1518 
bnxt_re_test_qp_limits(struct bnxt_re_dev * rdev,struct ib_qp_init_attr * init_attr,struct bnxt_qplib_dev_attr * dev_attr)1519 static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
1520 				   struct ib_qp_init_attr *init_attr,
1521 				   struct bnxt_qplib_dev_attr *dev_attr)
1522 {
1523 	bool rc = true;
1524 
1525 	if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
1526 	    init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
1527 	    init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
1528 	    init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
1529 	    init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
1530 		ibdev_err(&rdev->ibdev,
1531 			  "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
1532 			  init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
1533 			  init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
1534 			  init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
1535 			  init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
1536 			  init_attr->cap.max_inline_data,
1537 			  dev_attr->max_inline_data);
1538 		rc = false;
1539 	}
1540 	return rc;
1541 }
1542 
bnxt_re_create_qp(struct ib_qp * ib_qp,struct ib_qp_init_attr * qp_init_attr,struct ib_udata * udata)1543 int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
1544 		      struct ib_udata *udata)
1545 {
1546 	struct bnxt_qplib_dev_attr *dev_attr;
1547 	struct bnxt_re_ucontext *uctx;
1548 	struct bnxt_re_qp_req ureq;
1549 	struct bnxt_re_dev *rdev;
1550 	struct bnxt_re_pd *pd;
1551 	struct bnxt_re_qp *qp;
1552 	struct ib_pd *ib_pd;
1553 	u32 active_qps;
1554 	int rc;
1555 
1556 	ib_pd = ib_qp->pd;
1557 	pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1558 	rdev = pd->rdev;
1559 	dev_attr = rdev->dev_attr;
1560 	qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1561 
1562 	uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
1563 	if (udata)
1564 		if (ib_copy_from_udata(&ureq, udata,  min(udata->inlen, sizeof(ureq))))
1565 			return -EFAULT;
1566 
1567 	rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
1568 	if (!rc) {
1569 		rc = -EINVAL;
1570 		goto fail;
1571 	}
1572 
1573 	qp->rdev = rdev;
1574 	rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, uctx, &ureq);
1575 	if (rc)
1576 		goto fail;
1577 
1578 	if (qp_init_attr->qp_type == IB_QPT_GSI &&
1579 	    !(bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))) {
1580 		rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
1581 		if (rc == -ENODEV)
1582 			goto qp_destroy;
1583 		if (rc)
1584 			goto fail;
1585 	} else {
1586 		rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1587 		if (rc) {
1588 			ibdev_err(&rdev->ibdev, "Failed to create HW QP");
1589 			goto free_umem;
1590 		}
1591 		if (udata) {
1592 			struct bnxt_re_qp_resp resp;
1593 
1594 			resp.qpid = qp->qplib_qp.id;
1595 			resp.rsvd = 0;
1596 			rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1597 			if (rc) {
1598 				ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
1599 				goto qp_destroy;
1600 			}
1601 		}
1602 	}
1603 
1604 	qp->ib_qp.qp_num = qp->qplib_qp.id;
1605 	if (qp_init_attr->qp_type == IB_QPT_GSI)
1606 		rdev->gsi_ctx.gsi_qp = qp;
1607 	spin_lock_init(&qp->sq_lock);
1608 	spin_lock_init(&qp->rq_lock);
1609 	INIT_LIST_HEAD(&qp->list);
1610 	mutex_lock(&rdev->qp_lock);
1611 	list_add_tail(&qp->list, &rdev->qp_list);
1612 	mutex_unlock(&rdev->qp_lock);
1613 	active_qps = atomic_inc_return(&rdev->stats.res.qp_count);
1614 	if (active_qps > rdev->stats.res.qp_watermark)
1615 		rdev->stats.res.qp_watermark = active_qps;
1616 	if (qp_init_attr->qp_type == IB_QPT_RC) {
1617 		active_qps = atomic_inc_return(&rdev->stats.res.rc_qp_count);
1618 		if (active_qps > rdev->stats.res.rc_qp_watermark)
1619 			rdev->stats.res.rc_qp_watermark = active_qps;
1620 	} else if (qp_init_attr->qp_type == IB_QPT_UD) {
1621 		active_qps = atomic_inc_return(&rdev->stats.res.ud_qp_count);
1622 		if (active_qps > rdev->stats.res.ud_qp_watermark)
1623 			rdev->stats.res.ud_qp_watermark = active_qps;
1624 	}
1625 
1626 	return 0;
1627 qp_destroy:
1628 	bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1629 free_umem:
1630 	ib_umem_release(qp->rumem);
1631 	ib_umem_release(qp->sumem);
1632 fail:
1633 	return rc;
1634 }
1635 
__from_ib_qp_state(enum ib_qp_state state)1636 static u8 __from_ib_qp_state(enum ib_qp_state state)
1637 {
1638 	switch (state) {
1639 	case IB_QPS_RESET:
1640 		return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1641 	case IB_QPS_INIT:
1642 		return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1643 	case IB_QPS_RTR:
1644 		return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1645 	case IB_QPS_RTS:
1646 		return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1647 	case IB_QPS_SQD:
1648 		return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1649 	case IB_QPS_SQE:
1650 		return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1651 	case IB_QPS_ERR:
1652 	default:
1653 		return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1654 	}
1655 }
1656 
__to_ib_qp_state(u8 state)1657 static enum ib_qp_state __to_ib_qp_state(u8 state)
1658 {
1659 	switch (state) {
1660 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1661 		return IB_QPS_RESET;
1662 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1663 		return IB_QPS_INIT;
1664 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1665 		return IB_QPS_RTR;
1666 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1667 		return IB_QPS_RTS;
1668 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1669 		return IB_QPS_SQD;
1670 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1671 		return IB_QPS_SQE;
1672 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1673 	default:
1674 		return IB_QPS_ERR;
1675 	}
1676 }
1677 
__from_ib_mtu(enum ib_mtu mtu)1678 static u32 __from_ib_mtu(enum ib_mtu mtu)
1679 {
1680 	switch (mtu) {
1681 	case IB_MTU_256:
1682 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1683 	case IB_MTU_512:
1684 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1685 	case IB_MTU_1024:
1686 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1687 	case IB_MTU_2048:
1688 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1689 	case IB_MTU_4096:
1690 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1691 	default:
1692 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1693 	}
1694 }
1695 
__to_ib_mtu(u32 mtu)1696 static enum ib_mtu __to_ib_mtu(u32 mtu)
1697 {
1698 	switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1699 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1700 		return IB_MTU_256;
1701 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1702 		return IB_MTU_512;
1703 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1704 		return IB_MTU_1024;
1705 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1706 		return IB_MTU_2048;
1707 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1708 		return IB_MTU_4096;
1709 	default:
1710 		return IB_MTU_2048;
1711 	}
1712 }
1713 
1714 /* Shared Receive Queues */
bnxt_re_destroy_srq(struct ib_srq * ib_srq,struct ib_udata * udata)1715 int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1716 {
1717 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1718 					       ib_srq);
1719 	struct bnxt_re_dev *rdev = srq->rdev;
1720 	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1721 	struct bnxt_qplib_nq *nq = NULL;
1722 
1723 	if (qplib_srq->cq)
1724 		nq = qplib_srq->cq->nq;
1725 	if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
1726 		free_page((unsigned long)srq->uctx_srq_page);
1727 		hash_del(&srq->hash_entry);
1728 	}
1729 	bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1730 	ib_umem_release(srq->umem);
1731 	atomic_dec(&rdev->stats.res.srq_count);
1732 	if (nq)
1733 		nq->budget--;
1734 	return 0;
1735 }
1736 
bnxt_re_init_user_srq(struct bnxt_re_dev * rdev,struct bnxt_re_pd * pd,struct bnxt_re_srq * srq,struct ib_udata * udata)1737 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1738 				 struct bnxt_re_pd *pd,
1739 				 struct bnxt_re_srq *srq,
1740 				 struct ib_udata *udata)
1741 {
1742 	struct bnxt_re_srq_req ureq;
1743 	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1744 	struct ib_umem *umem;
1745 	int bytes = 0;
1746 	struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1747 		udata, struct bnxt_re_ucontext, ib_uctx);
1748 
1749 	if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1750 		return -EFAULT;
1751 
1752 	bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
1753 	bytes = PAGE_ALIGN(bytes);
1754 	umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
1755 			   IB_ACCESS_LOCAL_WRITE);
1756 	if (IS_ERR(umem))
1757 		return PTR_ERR(umem);
1758 
1759 	srq->umem = umem;
1760 	qplib_srq->sg_info.umem = umem;
1761 	qplib_srq->sg_info.pgsize = PAGE_SIZE;
1762 	qplib_srq->sg_info.pgshft = PAGE_SHIFT;
1763 	qplib_srq->srq_handle = ureq.srq_handle;
1764 	qplib_srq->dpi = &cntx->dpi;
1765 
1766 	return 0;
1767 }
1768 
bnxt_re_create_srq(struct ib_srq * ib_srq,struct ib_srq_init_attr * srq_init_attr,struct ib_udata * udata)1769 int bnxt_re_create_srq(struct ib_srq *ib_srq,
1770 		       struct ib_srq_init_attr *srq_init_attr,
1771 		       struct ib_udata *udata)
1772 {
1773 	struct bnxt_qplib_dev_attr *dev_attr;
1774 	struct bnxt_qplib_nq *nq = NULL;
1775 	struct bnxt_re_ucontext *uctx;
1776 	struct bnxt_re_dev *rdev;
1777 	struct bnxt_re_srq *srq;
1778 	struct bnxt_re_pd *pd;
1779 	struct ib_pd *ib_pd;
1780 	u32 active_srqs;
1781 	int rc, entries;
1782 
1783 	ib_pd = ib_srq->pd;
1784 	pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1785 	rdev = pd->rdev;
1786 	dev_attr = rdev->dev_attr;
1787 	srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1788 
1789 	if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1790 		ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
1791 		rc = -EINVAL;
1792 		goto exit;
1793 	}
1794 
1795 	if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1796 		rc = -EOPNOTSUPP;
1797 		goto exit;
1798 	}
1799 
1800 	uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
1801 	srq->rdev = rdev;
1802 	srq->qplib_srq.pd = &pd->qplib_pd;
1803 	srq->qplib_srq.dpi = &rdev->dpi_privileged;
1804 	/* Allocate 1 more than what's provided so posting max doesn't
1805 	 * mean empty
1806 	 */
1807 	entries = bnxt_re_init_depth(srq_init_attr->attr.max_wr + 1, uctx);
1808 	if (entries > dev_attr->max_srq_wqes + 1)
1809 		entries = dev_attr->max_srq_wqes + 1;
1810 	srq->qplib_srq.max_wqe = entries;
1811 
1812 	srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1813 	 /* 128 byte wqe size for SRQ . So use max sges */
1814 	srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
1815 	srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1816 	srq->srq_limit = srq_init_attr->attr.srq_limit;
1817 	srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id;
1818 	srq->qplib_srq.sg_info.pgsize = PAGE_SIZE;
1819 	srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT;
1820 	nq = &rdev->nqr->nq[0];
1821 
1822 	if (udata) {
1823 		rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1824 		if (rc)
1825 			goto fail;
1826 	}
1827 
1828 	rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1829 	if (rc) {
1830 		ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
1831 		goto fail;
1832 	}
1833 
1834 	if (udata) {
1835 		struct bnxt_re_srq_resp resp = {};
1836 
1837 		resp.srqid = srq->qplib_srq.id;
1838 		if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
1839 			hash_add(rdev->srq_hash, &srq->hash_entry, srq->qplib_srq.id);
1840 			srq->uctx_srq_page = (void *)get_zeroed_page(GFP_KERNEL);
1841 			if (!srq->uctx_srq_page) {
1842 				rc = -ENOMEM;
1843 				goto fail;
1844 			}
1845 			resp.comp_mask |= BNXT_RE_SRQ_TOGGLE_PAGE_SUPPORT;
1846 		}
1847 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1848 		if (rc) {
1849 			ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
1850 			bnxt_qplib_destroy_srq(&rdev->qplib_res,
1851 					       &srq->qplib_srq);
1852 			goto fail;
1853 		}
1854 	}
1855 	if (nq)
1856 		nq->budget++;
1857 	active_srqs = atomic_inc_return(&rdev->stats.res.srq_count);
1858 	if (active_srqs > rdev->stats.res.srq_watermark)
1859 		rdev->stats.res.srq_watermark = active_srqs;
1860 	spin_lock_init(&srq->lock);
1861 
1862 	return 0;
1863 
1864 fail:
1865 	ib_umem_release(srq->umem);
1866 exit:
1867 	return rc;
1868 }
1869 
bnxt_re_modify_srq(struct ib_srq * ib_srq,struct ib_srq_attr * srq_attr,enum ib_srq_attr_mask srq_attr_mask,struct ib_udata * udata)1870 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1871 		       enum ib_srq_attr_mask srq_attr_mask,
1872 		       struct ib_udata *udata)
1873 {
1874 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1875 					       ib_srq);
1876 	struct bnxt_re_dev *rdev = srq->rdev;
1877 
1878 	switch (srq_attr_mask) {
1879 	case IB_SRQ_MAX_WR:
1880 		/* SRQ resize is not supported */
1881 		return -EINVAL;
1882 	case IB_SRQ_LIMIT:
1883 		/* Change the SRQ threshold */
1884 		if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1885 			return -EINVAL;
1886 
1887 		srq->qplib_srq.threshold = srq_attr->srq_limit;
1888 		bnxt_qplib_srq_arm_db(&srq->qplib_srq.dbinfo, srq->qplib_srq.threshold);
1889 
1890 		/* On success, update the shadow */
1891 		srq->srq_limit = srq_attr->srq_limit;
1892 		/* No need to Build and send response back to udata */
1893 		return 0;
1894 	default:
1895 		ibdev_err(&rdev->ibdev,
1896 			  "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1897 		return -EINVAL;
1898 	}
1899 }
1900 
bnxt_re_query_srq(struct ib_srq * ib_srq,struct ib_srq_attr * srq_attr)1901 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1902 {
1903 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1904 					       ib_srq);
1905 	struct bnxt_re_srq tsrq;
1906 	struct bnxt_re_dev *rdev = srq->rdev;
1907 	int rc;
1908 
1909 	/* Get live SRQ attr */
1910 	tsrq.qplib_srq.id = srq->qplib_srq.id;
1911 	rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1912 	if (rc) {
1913 		ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
1914 		return rc;
1915 	}
1916 	srq_attr->max_wr = srq->qplib_srq.max_wqe;
1917 	srq_attr->max_sge = srq->qplib_srq.max_sge;
1918 	srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1919 
1920 	return 0;
1921 }
1922 
bnxt_re_post_srq_recv(struct ib_srq * ib_srq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1923 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1924 			  const struct ib_recv_wr **bad_wr)
1925 {
1926 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1927 					       ib_srq);
1928 	struct bnxt_qplib_swqe wqe;
1929 	unsigned long flags;
1930 	int rc = 0;
1931 
1932 	spin_lock_irqsave(&srq->lock, flags);
1933 	while (wr) {
1934 		/* Transcribe each ib_recv_wr to qplib_swqe */
1935 		wqe.num_sge = wr->num_sge;
1936 		bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1937 		wqe.wr_id = wr->wr_id;
1938 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1939 
1940 		rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1941 		if (rc) {
1942 			*bad_wr = wr;
1943 			break;
1944 		}
1945 		wr = wr->next;
1946 	}
1947 	spin_unlock_irqrestore(&srq->lock, flags);
1948 
1949 	return rc;
1950 }
bnxt_re_modify_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp1_qp,int qp_attr_mask)1951 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1952 				    struct bnxt_re_qp *qp1_qp,
1953 				    int qp_attr_mask)
1954 {
1955 	struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
1956 	int rc;
1957 
1958 	if (qp_attr_mask & IB_QP_STATE) {
1959 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1960 		qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1961 	}
1962 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1963 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1964 		qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1965 	}
1966 
1967 	if (qp_attr_mask & IB_QP_QKEY) {
1968 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1969 		/* Using a Random  QKEY */
1970 		qp->qplib_qp.qkey = 0x81818181;
1971 	}
1972 	if (qp_attr_mask & IB_QP_SQ_PSN) {
1973 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1974 		qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1975 	}
1976 
1977 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1978 	if (rc)
1979 		ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
1980 	return rc;
1981 }
1982 
bnxt_re_modify_qp(struct ib_qp * ib_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_udata * udata)1983 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1984 		      int qp_attr_mask, struct ib_udata *udata)
1985 {
1986 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1987 	struct bnxt_re_dev *rdev = qp->rdev;
1988 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
1989 	enum ib_qp_state curr_qp_state, new_qp_state;
1990 	int rc, entries;
1991 	unsigned int flags;
1992 	u8 nw_type;
1993 
1994 	if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1995 		return -EOPNOTSUPP;
1996 
1997 	qp->qplib_qp.modify_flags = 0;
1998 	if (qp_attr_mask & IB_QP_STATE) {
1999 		curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
2000 		new_qp_state = qp_attr->qp_state;
2001 		if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
2002 					ib_qp->qp_type, qp_attr_mask)) {
2003 			ibdev_err(&rdev->ibdev,
2004 				  "Invalid attribute mask: %#x specified ",
2005 				  qp_attr_mask);
2006 			ibdev_err(&rdev->ibdev,
2007 				  "for qpn: %#x type: %#x",
2008 				  ib_qp->qp_num, ib_qp->qp_type);
2009 			ibdev_err(&rdev->ibdev,
2010 				  "curr_qp_state=0x%x, new_qp_state=0x%x\n",
2011 				  curr_qp_state, new_qp_state);
2012 			return -EINVAL;
2013 		}
2014 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
2015 		qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
2016 
2017 		if (!qp->sumem &&
2018 		    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2019 			ibdev_dbg(&rdev->ibdev,
2020 				  "Move QP = %p to flush list\n", qp);
2021 			flags = bnxt_re_lock_cqs(qp);
2022 			bnxt_qplib_add_flush_qp(&qp->qplib_qp);
2023 			bnxt_re_unlock_cqs(qp, flags);
2024 		}
2025 		if (!qp->sumem &&
2026 		    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2027 			ibdev_dbg(&rdev->ibdev,
2028 				  "Move QP = %p out of flush list\n", qp);
2029 			flags = bnxt_re_lock_cqs(qp);
2030 			bnxt_qplib_clean_qp(&qp->qplib_qp);
2031 			bnxt_re_unlock_cqs(qp, flags);
2032 		}
2033 	}
2034 	if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
2035 		qp->qplib_qp.modify_flags |=
2036 				CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
2037 		qp->qplib_qp.en_sqd_async_notify = true;
2038 	}
2039 	if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
2040 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
2041 		qp->qplib_qp.access =
2042 			__from_ib_access_flags(qp_attr->qp_access_flags);
2043 		/* LOCAL_WRITE access must be set to allow RC receive */
2044 		qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
2045 		/* Temp: Set all params on QP as of now */
2046 		qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
2047 		qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
2048 	}
2049 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
2050 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
2051 		qp->qplib_qp.pkey_index = qp_attr->pkey_index;
2052 	}
2053 	if (qp_attr_mask & IB_QP_QKEY) {
2054 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
2055 		qp->qplib_qp.qkey = qp_attr->qkey;
2056 	}
2057 	if (qp_attr_mask & IB_QP_AV) {
2058 		const struct ib_global_route *grh =
2059 			rdma_ah_read_grh(&qp_attr->ah_attr);
2060 		const struct ib_gid_attr *sgid_attr;
2061 		struct bnxt_re_gid_ctx *ctx;
2062 
2063 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
2064 				     CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
2065 				     CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
2066 				     CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
2067 				     CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
2068 				     CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
2069 				     CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
2070 		memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
2071 		       sizeof(qp->qplib_qp.ah.dgid.data));
2072 		qp->qplib_qp.ah.flow_label = grh->flow_label;
2073 		sgid_attr = grh->sgid_attr;
2074 		/* Get the HW context of the GID. The reference
2075 		 * of GID table entry is already taken by the caller.
2076 		 */
2077 		ctx = rdma_read_gid_hw_context(sgid_attr);
2078 		qp->qplib_qp.ah.sgid_index = ctx->idx;
2079 		qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
2080 		qp->qplib_qp.ah.hop_limit = grh->hop_limit;
2081 		qp->qplib_qp.ah.traffic_class = grh->traffic_class;
2082 		qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
2083 		ether_addr_copy(qp->qplib_qp.ah.dmac,
2084 				qp_attr->ah_attr.roce.dmac);
2085 
2086 		rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
2087 					     &qp->qplib_qp.smac[0]);
2088 		if (rc)
2089 			return rc;
2090 
2091 		nw_type = rdma_gid_attr_network_type(sgid_attr);
2092 		switch (nw_type) {
2093 		case RDMA_NETWORK_IPV4:
2094 			qp->qplib_qp.nw_type =
2095 				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
2096 			break;
2097 		case RDMA_NETWORK_IPV6:
2098 			qp->qplib_qp.nw_type =
2099 				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
2100 			break;
2101 		default:
2102 			qp->qplib_qp.nw_type =
2103 				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
2104 			break;
2105 		}
2106 	}
2107 
2108 	if (qp_attr->qp_state == IB_QPS_RTR) {
2109 		enum ib_mtu qpmtu;
2110 
2111 		qpmtu = iboe_get_mtu(rdev->netdev->mtu);
2112 		if (qp_attr_mask & IB_QP_PATH_MTU) {
2113 			if (ib_mtu_enum_to_int(qp_attr->path_mtu) >
2114 			    ib_mtu_enum_to_int(qpmtu))
2115 				return -EINVAL;
2116 			qpmtu = qp_attr->path_mtu;
2117 		}
2118 
2119 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
2120 		qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
2121 		qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu);
2122 	}
2123 
2124 	if (qp_attr_mask & IB_QP_TIMEOUT) {
2125 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
2126 		qp->qplib_qp.timeout = qp_attr->timeout;
2127 	}
2128 	if (qp_attr_mask & IB_QP_RETRY_CNT) {
2129 		qp->qplib_qp.modify_flags |=
2130 				CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
2131 		qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
2132 	}
2133 	if (qp_attr_mask & IB_QP_RNR_RETRY) {
2134 		qp->qplib_qp.modify_flags |=
2135 				CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
2136 		qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
2137 	}
2138 	if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
2139 		qp->qplib_qp.modify_flags |=
2140 				CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
2141 		qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
2142 	}
2143 	if (qp_attr_mask & IB_QP_RQ_PSN) {
2144 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
2145 		qp->qplib_qp.rq.psn = qp_attr->rq_psn;
2146 	}
2147 	if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2148 		qp->qplib_qp.modify_flags |=
2149 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
2150 		/* Cap the max_rd_atomic to device max */
2151 		qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
2152 						   dev_attr->max_qp_rd_atom);
2153 	}
2154 	if (qp_attr_mask & IB_QP_SQ_PSN) {
2155 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
2156 		qp->qplib_qp.sq.psn = qp_attr->sq_psn;
2157 	}
2158 	if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2159 		if (qp_attr->max_dest_rd_atomic >
2160 		    dev_attr->max_qp_init_rd_atom) {
2161 			ibdev_err(&rdev->ibdev,
2162 				  "max_dest_rd_atomic requested%d is > dev_max%d",
2163 				  qp_attr->max_dest_rd_atomic,
2164 				  dev_attr->max_qp_init_rd_atom);
2165 			return -EINVAL;
2166 		}
2167 
2168 		qp->qplib_qp.modify_flags |=
2169 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
2170 		qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
2171 	}
2172 	if (qp_attr_mask & IB_QP_CAP) {
2173 		struct bnxt_re_ucontext *uctx =
2174 			rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
2175 
2176 		qp->qplib_qp.modify_flags |=
2177 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
2178 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
2179 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
2180 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
2181 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
2182 		if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
2183 		    (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
2184 		    (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
2185 		    (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
2186 		    (qp_attr->cap.max_inline_data >=
2187 						dev_attr->max_inline_data)) {
2188 			ibdev_err(&rdev->ibdev,
2189 				  "Create QP failed - max exceeded");
2190 			return -EINVAL;
2191 		}
2192 		entries = bnxt_re_init_depth(qp_attr->cap.max_send_wr, uctx);
2193 		qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
2194 						dev_attr->max_qp_wqes + 1);
2195 		qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
2196 						qp_attr->cap.max_send_wr;
2197 		/*
2198 		 * Reserving one slot for Phantom WQE. Some application can
2199 		 * post one extra entry in this case. Allowing this to avoid
2200 		 * unexpected Queue full condition
2201 		 */
2202 		qp->qplib_qp.sq.q_full_delta -= 1;
2203 		qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
2204 		if (qp->qplib_qp.rq.max_wqe) {
2205 			entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx);
2206 			qp->qplib_qp.rq.max_wqe =
2207 				min_t(u32, entries, dev_attr->max_qp_wqes + 1);
2208 			qp->qplib_qp.rq.max_sw_wqe = qp->qplib_qp.rq.max_wqe;
2209 			qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
2210 						       qp_attr->cap.max_recv_wr;
2211 			qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
2212 		} else {
2213 			/* SRQ was used prior, just ignore the RQ caps */
2214 		}
2215 	}
2216 	if (qp_attr_mask & IB_QP_DEST_QPN) {
2217 		qp->qplib_qp.modify_flags |=
2218 				CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
2219 		qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
2220 	}
2221 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2222 	if (rc) {
2223 		ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
2224 		return rc;
2225 	}
2226 	if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
2227 		rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2228 	return rc;
2229 }
2230 
bnxt_re_query_qp(struct ib_qp * ib_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)2231 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2232 		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
2233 {
2234 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2235 	struct bnxt_re_dev *rdev = qp->rdev;
2236 	struct bnxt_qplib_qp *qplib_qp;
2237 	int rc;
2238 
2239 	qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
2240 	if (!qplib_qp)
2241 		return -ENOMEM;
2242 
2243 	qplib_qp->id = qp->qplib_qp.id;
2244 	qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
2245 
2246 	rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2247 	if (rc) {
2248 		ibdev_err(&rdev->ibdev, "Failed to query HW QP");
2249 		goto out;
2250 	}
2251 	qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2252 	qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2253 	qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2254 	qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
2255 	qp_attr->pkey_index = qplib_qp->pkey_index;
2256 	qp_attr->qkey = qplib_qp->qkey;
2257 	qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2258 	rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
2259 			qplib_qp->ah.host_sgid_index,
2260 			qplib_qp->ah.hop_limit,
2261 			qplib_qp->ah.traffic_class);
2262 	rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
2263 	rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
2264 	ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
2265 	qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2266 	qp_attr->timeout = qplib_qp->timeout;
2267 	qp_attr->retry_cnt = qplib_qp->retry_cnt;
2268 	qp_attr->rnr_retry = qplib_qp->rnr_retry;
2269 	qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2270 	qp_attr->port_num = __to_ib_port_num(qplib_qp->port_id);
2271 	qp_attr->rq_psn = qplib_qp->rq.psn;
2272 	qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2273 	qp_attr->sq_psn = qplib_qp->sq.psn;
2274 	qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2275 	qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2276 							 IB_SIGNAL_REQ_WR;
2277 	qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2278 
2279 	qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2280 	qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2281 	qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2282 	qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2283 	qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2284 	qp_init_attr->cap = qp_attr->cap;
2285 
2286 out:
2287 	kfree(qplib_qp);
2288 	return rc;
2289 }
2290 
2291 /* Routine for sending QP1 packets for RoCE V1 an V2
2292  */
bnxt_re_build_qp1_send_v2(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe,int payload_size)2293 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
2294 				     const struct ib_send_wr *wr,
2295 				     struct bnxt_qplib_swqe *wqe,
2296 				     int payload_size)
2297 {
2298 	struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
2299 					     ib_ah);
2300 	struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2301 	const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
2302 	struct bnxt_qplib_sge sge;
2303 	u8 nw_type;
2304 	u16 ether_type;
2305 	union ib_gid dgid;
2306 	bool is_eth = false;
2307 	bool is_vlan = false;
2308 	bool is_grh = false;
2309 	bool is_udp = false;
2310 	u8 ip_version = 0;
2311 	u16 vlan_id = 0xFFFF;
2312 	void *buf;
2313 	int i, rc;
2314 
2315 	memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2316 
2317 	rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
2318 	if (rc)
2319 		return rc;
2320 
2321 	/* Get network header type for this GID */
2322 	nw_type = rdma_gid_attr_network_type(sgid_attr);
2323 	switch (nw_type) {
2324 	case RDMA_NETWORK_IPV4:
2325 		nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
2326 		break;
2327 	case RDMA_NETWORK_IPV6:
2328 		nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
2329 		break;
2330 	default:
2331 		nw_type = BNXT_RE_ROCE_V1_PACKET;
2332 		break;
2333 	}
2334 	memcpy(&dgid.raw, &qplib_ah->dgid, 16);
2335 	is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
2336 	if (is_udp) {
2337 		if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
2338 			ip_version = 4;
2339 			ether_type = ETH_P_IP;
2340 		} else {
2341 			ip_version = 6;
2342 			ether_type = ETH_P_IPV6;
2343 		}
2344 		is_grh = false;
2345 	} else {
2346 		ether_type = ETH_P_IBOE;
2347 		is_grh = true;
2348 	}
2349 
2350 	is_eth = true;
2351 	is_vlan = vlan_id && (vlan_id < 0x1000);
2352 
2353 	ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
2354 			  ip_version, is_udp, 0, &qp->qp1_hdr);
2355 
2356 	/* ETH */
2357 	ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
2358 	ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
2359 
2360 	/* For vlan, check the sgid for vlan existence */
2361 
2362 	if (!is_vlan) {
2363 		qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
2364 	} else {
2365 		qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
2366 		qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
2367 	}
2368 
2369 	if (is_grh || (ip_version == 6)) {
2370 		memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
2371 		       sizeof(sgid_attr->gid));
2372 		memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2373 		       sizeof(sgid_attr->gid));
2374 		qp->qp1_hdr.grh.hop_limit     = qplib_ah->hop_limit;
2375 	}
2376 
2377 	if (ip_version == 4) {
2378 		qp->qp1_hdr.ip4.tos = 0;
2379 		qp->qp1_hdr.ip4.id = 0;
2380 		qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
2381 		qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
2382 
2383 		memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
2384 		memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2385 		qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2386 	}
2387 
2388 	if (is_udp) {
2389 		qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2390 		qp->qp1_hdr.udp.sport = htons(0x8CD1);
2391 		qp->qp1_hdr.udp.csum = 0;
2392 	}
2393 
2394 	/* BTH */
2395 	if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2396 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2397 		qp->qp1_hdr.immediate_present = 1;
2398 	} else {
2399 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2400 	}
2401 	if (wr->send_flags & IB_SEND_SOLICITED)
2402 		qp->qp1_hdr.bth.solicited_event = 1;
2403 	/* pad_count */
2404 	qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2405 
2406 	/* P_key for QP1 is for all members */
2407 	qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2408 	qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2409 	qp->qp1_hdr.bth.ack_req = 0;
2410 	qp->send_psn++;
2411 	qp->send_psn &= BTH_PSN_MASK;
2412 	qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2413 	/* DETH */
2414 	/* Use the priviledged Q_Key for QP1 */
2415 	qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2416 	qp->qp1_hdr.deth.source_qpn = IB_QP1;
2417 
2418 	/* Pack the QP1 to the transmit buffer */
2419 	buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2420 	if (buf) {
2421 		ib_ud_header_pack(&qp->qp1_hdr, buf);
2422 		for (i = wqe->num_sge; i; i--) {
2423 			wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2424 			wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2425 			wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2426 		}
2427 
2428 		/*
2429 		 * Max Header buf size for IPV6 RoCE V2 is 86,
2430 		 * which is same as the QP1 SQ header buffer.
2431 		 * Header buf size for IPV4 RoCE V2 can be 66.
2432 		 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2433 		 * Subtract 20 bytes from QP1 SQ header buf size
2434 		 */
2435 		if (is_udp && ip_version == 4)
2436 			sge.size -= 20;
2437 		/*
2438 		 * Max Header buf size for RoCE V1 is 78.
2439 		 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2440 		 * Subtract 8 bytes from QP1 SQ header buf size
2441 		 */
2442 		if (!is_udp)
2443 			sge.size -= 8;
2444 
2445 		/* Subtract 4 bytes for non vlan packets */
2446 		if (!is_vlan)
2447 			sge.size -= 4;
2448 
2449 		wqe->sg_list[0].addr = sge.addr;
2450 		wqe->sg_list[0].lkey = sge.lkey;
2451 		wqe->sg_list[0].size = sge.size;
2452 		wqe->num_sge++;
2453 
2454 	} else {
2455 		ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
2456 		rc = -ENOMEM;
2457 	}
2458 	return rc;
2459 }
2460 
2461 /* For the MAD layer, it only provides the recv SGE the size of
2462  * ib_grh + MAD datagram.  No Ethernet headers, Ethertype, BTH, DETH,
2463  * nor RoCE iCRC.  The Cu+ solution must provide buffer for the entire
2464  * receive packet (334 bytes) with no VLAN and then copy the GRH
2465  * and the MAD datagram out to the provided SGE.
2466  */
bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp * qp,const struct ib_recv_wr * wr,struct bnxt_qplib_swqe * wqe,int payload_size)2467 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2468 					    const struct ib_recv_wr *wr,
2469 					    struct bnxt_qplib_swqe *wqe,
2470 					    int payload_size)
2471 {
2472 	struct bnxt_re_sqp_entries *sqp_entry;
2473 	struct bnxt_qplib_sge ref, sge;
2474 	struct bnxt_re_dev *rdev;
2475 	u32 rq_prod_index;
2476 
2477 	rdev = qp->rdev;
2478 
2479 	rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2480 
2481 	if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2482 		return -ENOMEM;
2483 
2484 	/* Create 1 SGE to receive the entire
2485 	 * ethernet packet
2486 	 */
2487 	/* Save the reference from ULP */
2488 	ref.addr = wqe->sg_list[0].addr;
2489 	ref.lkey = wqe->sg_list[0].lkey;
2490 	ref.size = wqe->sg_list[0].size;
2491 
2492 	sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
2493 
2494 	/* SGE 1 */
2495 	wqe->sg_list[0].addr = sge.addr;
2496 	wqe->sg_list[0].lkey = sge.lkey;
2497 	wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2498 	sge.size -= wqe->sg_list[0].size;
2499 
2500 	sqp_entry->sge.addr = ref.addr;
2501 	sqp_entry->sge.lkey = ref.lkey;
2502 	sqp_entry->sge.size = ref.size;
2503 	/* Store the wrid for reporting completion */
2504 	sqp_entry->wrid = wqe->wr_id;
2505 	/* change the wqe->wrid to table index */
2506 	wqe->wr_id = rq_prod_index;
2507 	return 0;
2508 }
2509 
is_ud_qp(struct bnxt_re_qp * qp)2510 static int is_ud_qp(struct bnxt_re_qp *qp)
2511 {
2512 	return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2513 		qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2514 }
2515 
bnxt_re_build_send_wqe(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2516 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2517 				  const struct ib_send_wr *wr,
2518 				  struct bnxt_qplib_swqe *wqe)
2519 {
2520 	struct bnxt_re_ah *ah = NULL;
2521 
2522 	if (is_ud_qp(qp)) {
2523 		ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2524 		wqe->send.q_key = ud_wr(wr)->remote_qkey;
2525 		wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2526 		wqe->send.avid = ah->qplib_ah.id;
2527 	}
2528 	switch (wr->opcode) {
2529 	case IB_WR_SEND:
2530 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2531 		break;
2532 	case IB_WR_SEND_WITH_IMM:
2533 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2534 		wqe->send.imm_data = be32_to_cpu(wr->ex.imm_data);
2535 		break;
2536 	case IB_WR_SEND_WITH_INV:
2537 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2538 		wqe->send.inv_key = wr->ex.invalidate_rkey;
2539 		break;
2540 	default:
2541 		return -EINVAL;
2542 	}
2543 	if (wr->send_flags & IB_SEND_SIGNALED)
2544 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2545 	if (wr->send_flags & IB_SEND_FENCE)
2546 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2547 	if (wr->send_flags & IB_SEND_SOLICITED)
2548 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2549 	if (wr->send_flags & IB_SEND_INLINE)
2550 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2551 
2552 	return 0;
2553 }
2554 
bnxt_re_build_rdma_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2555 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2556 				  struct bnxt_qplib_swqe *wqe)
2557 {
2558 	switch (wr->opcode) {
2559 	case IB_WR_RDMA_WRITE:
2560 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2561 		break;
2562 	case IB_WR_RDMA_WRITE_WITH_IMM:
2563 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2564 		wqe->rdma.imm_data = be32_to_cpu(wr->ex.imm_data);
2565 		break;
2566 	case IB_WR_RDMA_READ:
2567 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2568 		wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2569 		break;
2570 	default:
2571 		return -EINVAL;
2572 	}
2573 	wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2574 	wqe->rdma.r_key = rdma_wr(wr)->rkey;
2575 	if (wr->send_flags & IB_SEND_SIGNALED)
2576 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2577 	if (wr->send_flags & IB_SEND_FENCE)
2578 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2579 	if (wr->send_flags & IB_SEND_SOLICITED)
2580 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2581 	if (wr->send_flags & IB_SEND_INLINE)
2582 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2583 
2584 	return 0;
2585 }
2586 
bnxt_re_build_atomic_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2587 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2588 				    struct bnxt_qplib_swqe *wqe)
2589 {
2590 	switch (wr->opcode) {
2591 	case IB_WR_ATOMIC_CMP_AND_SWP:
2592 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2593 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2594 		wqe->atomic.swap_data = atomic_wr(wr)->swap;
2595 		break;
2596 	case IB_WR_ATOMIC_FETCH_AND_ADD:
2597 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2598 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2599 		break;
2600 	default:
2601 		return -EINVAL;
2602 	}
2603 	wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2604 	wqe->atomic.r_key = atomic_wr(wr)->rkey;
2605 	if (wr->send_flags & IB_SEND_SIGNALED)
2606 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2607 	if (wr->send_flags & IB_SEND_FENCE)
2608 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2609 	if (wr->send_flags & IB_SEND_SOLICITED)
2610 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2611 	return 0;
2612 }
2613 
bnxt_re_build_inv_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2614 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2615 				 struct bnxt_qplib_swqe *wqe)
2616 {
2617 	wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2618 	wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2619 
2620 	if (wr->send_flags & IB_SEND_SIGNALED)
2621 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2622 	if (wr->send_flags & IB_SEND_SOLICITED)
2623 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2624 
2625 	return 0;
2626 }
2627 
bnxt_re_build_reg_wqe(const struct ib_reg_wr * wr,struct bnxt_qplib_swqe * wqe)2628 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2629 				 struct bnxt_qplib_swqe *wqe)
2630 {
2631 	struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2632 	struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2633 	int access = wr->access;
2634 
2635 	wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2636 	wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2637 	wqe->frmr.page_list = mr->pages;
2638 	wqe->frmr.page_list_len = mr->npages;
2639 	wqe->frmr.levels = qplib_frpl->hwq.level;
2640 	wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2641 
2642 	if (wr->wr.send_flags & IB_SEND_SIGNALED)
2643 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2644 
2645 	if (access & IB_ACCESS_LOCAL_WRITE)
2646 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2647 	if (access & IB_ACCESS_REMOTE_READ)
2648 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2649 	if (access & IB_ACCESS_REMOTE_WRITE)
2650 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2651 	if (access & IB_ACCESS_REMOTE_ATOMIC)
2652 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2653 	if (access & IB_ACCESS_MW_BIND)
2654 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2655 
2656 	wqe->frmr.l_key = wr->key;
2657 	wqe->frmr.length = wr->mr->length;
2658 	wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
2659 	wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
2660 	wqe->frmr.va = wr->mr->iova;
2661 	return 0;
2662 }
2663 
bnxt_re_copy_inline_data(struct bnxt_re_dev * rdev,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2664 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2665 				    const struct ib_send_wr *wr,
2666 				    struct bnxt_qplib_swqe *wqe)
2667 {
2668 	/*  Copy the inline data to the data  field */
2669 	u8 *in_data;
2670 	u32 i, sge_len;
2671 	void *sge_addr;
2672 
2673 	in_data = wqe->inline_data;
2674 	for (i = 0; i < wr->num_sge; i++) {
2675 		sge_addr = (void *)(unsigned long)
2676 				wr->sg_list[i].addr;
2677 		sge_len = wr->sg_list[i].length;
2678 
2679 		if ((sge_len + wqe->inline_len) >
2680 		    BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2681 			ibdev_err(&rdev->ibdev,
2682 				  "Inline data size requested > supported value");
2683 			return -EINVAL;
2684 		}
2685 		sge_len = wr->sg_list[i].length;
2686 
2687 		memcpy(in_data, sge_addr, sge_len);
2688 		in_data += wr->sg_list[i].length;
2689 		wqe->inline_len += wr->sg_list[i].length;
2690 	}
2691 	return wqe->inline_len;
2692 }
2693 
bnxt_re_copy_wr_payload(struct bnxt_re_dev * rdev,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2694 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2695 				   const struct ib_send_wr *wr,
2696 				   struct bnxt_qplib_swqe *wqe)
2697 {
2698 	int payload_sz = 0;
2699 
2700 	if (wr->send_flags & IB_SEND_INLINE)
2701 		payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2702 	else
2703 		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2704 					       wqe->num_sge);
2705 
2706 	return payload_sz;
2707 }
2708 
bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp * qp)2709 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2710 {
2711 	if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2712 	     qp->ib_qp.qp_type == IB_QPT_GSI ||
2713 	     qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2714 	     qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2715 		int qp_attr_mask;
2716 		struct ib_qp_attr qp_attr;
2717 
2718 		qp_attr_mask = IB_QP_STATE;
2719 		qp_attr.qp_state = IB_QPS_RTS;
2720 		bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2721 		qp->qplib_qp.wqe_cnt = 0;
2722 	}
2723 }
2724 
bnxt_re_post_send_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,const struct ib_send_wr * wr)2725 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2726 				       struct bnxt_re_qp *qp,
2727 				       const struct ib_send_wr *wr)
2728 {
2729 	int rc = 0, payload_sz = 0;
2730 	unsigned long flags;
2731 
2732 	spin_lock_irqsave(&qp->sq_lock, flags);
2733 	while (wr) {
2734 		struct bnxt_qplib_swqe wqe = {};
2735 
2736 		/* Common */
2737 		wqe.num_sge = wr->num_sge;
2738 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2739 			ibdev_err(&rdev->ibdev,
2740 				  "Limit exceeded for Send SGEs");
2741 			rc = -EINVAL;
2742 			goto bad;
2743 		}
2744 
2745 		payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2746 		if (payload_sz < 0) {
2747 			rc = -EINVAL;
2748 			goto bad;
2749 		}
2750 		wqe.wr_id = wr->wr_id;
2751 
2752 		wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2753 
2754 		rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2755 		if (!rc)
2756 			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2757 bad:
2758 		if (rc) {
2759 			ibdev_err(&rdev->ibdev,
2760 				  "Post send failed opcode = %#x rc = %d",
2761 				  wr->opcode, rc);
2762 			break;
2763 		}
2764 		wr = wr->next;
2765 	}
2766 	bnxt_qplib_post_send_db(&qp->qplib_qp);
2767 	if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
2768 		bnxt_ud_qp_hw_stall_workaround(qp);
2769 	spin_unlock_irqrestore(&qp->sq_lock, flags);
2770 	return rc;
2771 }
2772 
bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe * wqe)2773 static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
2774 {
2775 	/* Need unconditional fence for non-wire memory opcode
2776 	 * to work as expected.
2777 	 */
2778 	if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
2779 	    wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
2780 	    wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
2781 	    wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
2782 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2783 }
2784 
bnxt_re_post_send(struct ib_qp * ib_qp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)2785 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2786 		      const struct ib_send_wr **bad_wr)
2787 {
2788 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2789 	struct bnxt_qplib_swqe wqe;
2790 	int rc = 0, payload_sz = 0;
2791 	unsigned long flags;
2792 
2793 	spin_lock_irqsave(&qp->sq_lock, flags);
2794 	while (wr) {
2795 		/* House keeping */
2796 		memset(&wqe, 0, sizeof(wqe));
2797 
2798 		/* Common */
2799 		wqe.num_sge = wr->num_sge;
2800 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2801 			ibdev_err(&qp->rdev->ibdev,
2802 				  "Limit exceeded for Send SGEs");
2803 			rc = -EINVAL;
2804 			goto bad;
2805 		}
2806 
2807 		payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2808 		if (payload_sz < 0) {
2809 			rc = -EINVAL;
2810 			goto bad;
2811 		}
2812 		wqe.wr_id = wr->wr_id;
2813 
2814 		switch (wr->opcode) {
2815 		case IB_WR_SEND:
2816 		case IB_WR_SEND_WITH_IMM:
2817 			if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2818 				rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2819 							       payload_sz);
2820 				if (rc)
2821 					goto bad;
2822 				wqe.rawqp1.lflags |=
2823 					SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2824 			}
2825 			switch (wr->send_flags) {
2826 			case IB_SEND_IP_CSUM:
2827 				wqe.rawqp1.lflags |=
2828 					SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2829 				break;
2830 			default:
2831 				break;
2832 			}
2833 			fallthrough;
2834 		case IB_WR_SEND_WITH_INV:
2835 			rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2836 			break;
2837 		case IB_WR_RDMA_WRITE:
2838 		case IB_WR_RDMA_WRITE_WITH_IMM:
2839 		case IB_WR_RDMA_READ:
2840 			rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2841 			break;
2842 		case IB_WR_ATOMIC_CMP_AND_SWP:
2843 		case IB_WR_ATOMIC_FETCH_AND_ADD:
2844 			rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2845 			break;
2846 		case IB_WR_RDMA_READ_WITH_INV:
2847 			ibdev_err(&qp->rdev->ibdev,
2848 				  "RDMA Read with Invalidate is not supported");
2849 			rc = -EINVAL;
2850 			goto bad;
2851 		case IB_WR_LOCAL_INV:
2852 			rc = bnxt_re_build_inv_wqe(wr, &wqe);
2853 			break;
2854 		case IB_WR_REG_MR:
2855 			rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2856 			break;
2857 		default:
2858 			/* Unsupported WRs */
2859 			ibdev_err(&qp->rdev->ibdev,
2860 				  "WR (%#x) is not supported", wr->opcode);
2861 			rc = -EINVAL;
2862 			goto bad;
2863 		}
2864 		if (!rc) {
2865 			if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
2866 				bnxt_re_legacy_set_uc_fence(&wqe);
2867 			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2868 		}
2869 bad:
2870 		if (rc) {
2871 			ibdev_err(&qp->rdev->ibdev,
2872 				  "post_send failed op:%#x qps = %#x rc = %d\n",
2873 				  wr->opcode, qp->qplib_qp.state, rc);
2874 			*bad_wr = wr;
2875 			break;
2876 		}
2877 		wr = wr->next;
2878 	}
2879 	bnxt_qplib_post_send_db(&qp->qplib_qp);
2880 	if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
2881 		bnxt_ud_qp_hw_stall_workaround(qp);
2882 	spin_unlock_irqrestore(&qp->sq_lock, flags);
2883 
2884 	return rc;
2885 }
2886 
bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,const struct ib_recv_wr * wr)2887 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2888 				       struct bnxt_re_qp *qp,
2889 				       const struct ib_recv_wr *wr)
2890 {
2891 	struct bnxt_qplib_swqe wqe;
2892 	int rc = 0;
2893 
2894 	while (wr) {
2895 		/* House keeping */
2896 		memset(&wqe, 0, sizeof(wqe));
2897 
2898 		/* Common */
2899 		wqe.num_sge = wr->num_sge;
2900 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2901 			ibdev_err(&rdev->ibdev,
2902 				  "Limit exceeded for Receive SGEs");
2903 			rc = -EINVAL;
2904 			break;
2905 		}
2906 		bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2907 		wqe.wr_id = wr->wr_id;
2908 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2909 
2910 		rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2911 		if (rc)
2912 			break;
2913 
2914 		wr = wr->next;
2915 	}
2916 	if (!rc)
2917 		bnxt_qplib_post_recv_db(&qp->qplib_qp);
2918 	return rc;
2919 }
2920 
bnxt_re_post_recv(struct ib_qp * ib_qp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)2921 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2922 		      const struct ib_recv_wr **bad_wr)
2923 {
2924 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2925 	struct bnxt_qplib_swqe wqe;
2926 	int rc = 0, payload_sz = 0;
2927 	unsigned long flags;
2928 	u32 count = 0;
2929 
2930 	spin_lock_irqsave(&qp->rq_lock, flags);
2931 	while (wr) {
2932 		/* House keeping */
2933 		memset(&wqe, 0, sizeof(wqe));
2934 
2935 		/* Common */
2936 		wqe.num_sge = wr->num_sge;
2937 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2938 			ibdev_err(&qp->rdev->ibdev,
2939 				  "Limit exceeded for Receive SGEs");
2940 			rc = -EINVAL;
2941 			*bad_wr = wr;
2942 			break;
2943 		}
2944 
2945 		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2946 					       wr->num_sge);
2947 		wqe.wr_id = wr->wr_id;
2948 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2949 
2950 		if (ib_qp->qp_type == IB_QPT_GSI &&
2951 		    qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
2952 			rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2953 							      payload_sz);
2954 		if (!rc)
2955 			rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2956 		if (rc) {
2957 			*bad_wr = wr;
2958 			break;
2959 		}
2960 
2961 		/* Ring DB if the RQEs posted reaches a threshold value */
2962 		if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2963 			bnxt_qplib_post_recv_db(&qp->qplib_qp);
2964 			count = 0;
2965 		}
2966 
2967 		wr = wr->next;
2968 	}
2969 
2970 	if (count)
2971 		bnxt_qplib_post_recv_db(&qp->qplib_qp);
2972 
2973 	spin_unlock_irqrestore(&qp->rq_lock, flags);
2974 
2975 	return rc;
2976 }
2977 
2978 /* Completion Queues */
bnxt_re_destroy_cq(struct ib_cq * ib_cq,struct ib_udata * udata)2979 int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
2980 {
2981 	struct bnxt_qplib_chip_ctx *cctx;
2982 	struct bnxt_qplib_nq *nq;
2983 	struct bnxt_re_dev *rdev;
2984 	struct bnxt_re_cq *cq;
2985 
2986 	cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2987 	rdev = cq->rdev;
2988 	nq = cq->qplib_cq.nq;
2989 	cctx = rdev->chip_ctx;
2990 
2991 	if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
2992 		free_page((unsigned long)cq->uctx_cq_page);
2993 		hash_del(&cq->hash_entry);
2994 	}
2995 	bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2996 	ib_umem_release(cq->umem);
2997 
2998 	atomic_dec(&rdev->stats.res.cq_count);
2999 	nq->budget--;
3000 	kfree(cq->cql);
3001 	return 0;
3002 }
3003 
bnxt_re_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct uverbs_attr_bundle * attrs)3004 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
3005 		      struct uverbs_attr_bundle *attrs)
3006 {
3007 	struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
3008 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
3009 	struct ib_udata *udata = &attrs->driver_udata;
3010 	struct bnxt_re_ucontext *uctx =
3011 		rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
3012 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
3013 	struct bnxt_qplib_chip_ctx *cctx;
3014 	struct bnxt_qplib_nq *nq = NULL;
3015 	unsigned int nq_alloc_cnt;
3016 	int cqe = attr->cqe;
3017 	int rc, entries;
3018 	u32 active_cqs;
3019 
3020 	if (attr->flags)
3021 		return -EOPNOTSUPP;
3022 
3023 	/* Validate CQ fields */
3024 	if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
3025 		ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
3026 		return -EINVAL;
3027 	}
3028 
3029 	cq->rdev = rdev;
3030 	cctx = rdev->chip_ctx;
3031 	cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
3032 
3033 	entries = bnxt_re_init_depth(cqe + 1, uctx);
3034 	if (entries > dev_attr->max_cq_wqes + 1)
3035 		entries = dev_attr->max_cq_wqes + 1;
3036 
3037 	cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
3038 	cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
3039 	if (udata) {
3040 		struct bnxt_re_cq_req req;
3041 		if (ib_copy_from_udata(&req, udata, sizeof(req))) {
3042 			rc = -EFAULT;
3043 			goto fail;
3044 		}
3045 
3046 		cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
3047 				       entries * sizeof(struct cq_base),
3048 				       IB_ACCESS_LOCAL_WRITE);
3049 		if (IS_ERR(cq->umem)) {
3050 			rc = PTR_ERR(cq->umem);
3051 			goto fail;
3052 		}
3053 		cq->qplib_cq.sg_info.umem = cq->umem;
3054 		cq->qplib_cq.dpi = &uctx->dpi;
3055 	} else {
3056 		cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
3057 		cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
3058 				  GFP_KERNEL);
3059 		if (!cq->cql) {
3060 			rc = -ENOMEM;
3061 			goto fail;
3062 		}
3063 
3064 		cq->qplib_cq.dpi = &rdev->dpi_privileged;
3065 	}
3066 	/*
3067 	 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
3068 	 * used for getting the NQ index.
3069 	 */
3070 	nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
3071 	nq = &rdev->nqr->nq[nq_alloc_cnt % (rdev->nqr->num_msix - 1)];
3072 	cq->qplib_cq.max_wqe = entries;
3073 	cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
3074 	cq->qplib_cq.nq	= nq;
3075 
3076 	rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
3077 	if (rc) {
3078 		ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
3079 		goto fail;
3080 	}
3081 
3082 	cq->ib_cq.cqe = entries;
3083 	cq->cq_period = cq->qplib_cq.period;
3084 	nq->budget++;
3085 
3086 	active_cqs = atomic_inc_return(&rdev->stats.res.cq_count);
3087 	if (active_cqs > rdev->stats.res.cq_watermark)
3088 		rdev->stats.res.cq_watermark = active_cqs;
3089 	spin_lock_init(&cq->cq_lock);
3090 
3091 	if (udata) {
3092 		struct bnxt_re_cq_resp resp = {};
3093 
3094 		if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
3095 			hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id);
3096 			/* Allocate a page */
3097 			cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL);
3098 			if (!cq->uctx_cq_page) {
3099 				rc = -ENOMEM;
3100 				goto c2fail;
3101 			}
3102 			resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT;
3103 		}
3104 		resp.cqid = cq->qplib_cq.id;
3105 		resp.tail = cq->qplib_cq.hwq.cons;
3106 		resp.phase = cq->qplib_cq.period;
3107 		resp.rsvd = 0;
3108 		rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
3109 		if (rc) {
3110 			ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
3111 			bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
3112 			goto free_mem;
3113 		}
3114 	}
3115 
3116 	return 0;
3117 
3118 free_mem:
3119 	free_page((unsigned long)cq->uctx_cq_page);
3120 c2fail:
3121 	ib_umem_release(cq->umem);
3122 fail:
3123 	kfree(cq->cql);
3124 	return rc;
3125 }
3126 
bnxt_re_resize_cq_complete(struct bnxt_re_cq * cq)3127 static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
3128 {
3129 	struct bnxt_re_dev *rdev = cq->rdev;
3130 
3131 	bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
3132 
3133 	cq->qplib_cq.max_wqe = cq->resize_cqe;
3134 	if (cq->resize_umem) {
3135 		ib_umem_release(cq->umem);
3136 		cq->umem = cq->resize_umem;
3137 		cq->resize_umem = NULL;
3138 		cq->resize_cqe = 0;
3139 	}
3140 }
3141 
bnxt_re_resize_cq(struct ib_cq * ibcq,int cqe,struct ib_udata * udata)3142 int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
3143 {
3144 	struct bnxt_qplib_sg_info sg_info = {};
3145 	struct bnxt_qplib_dpi *orig_dpi = NULL;
3146 	struct bnxt_qplib_dev_attr *dev_attr;
3147 	struct bnxt_re_ucontext *uctx = NULL;
3148 	struct bnxt_re_resize_cq_req req;
3149 	struct bnxt_re_dev *rdev;
3150 	struct bnxt_re_cq *cq;
3151 	int rc, entries;
3152 
3153 	cq =  container_of(ibcq, struct bnxt_re_cq, ib_cq);
3154 	rdev = cq->rdev;
3155 	dev_attr = rdev->dev_attr;
3156 	if (!ibcq->uobject) {
3157 		ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
3158 		return -EOPNOTSUPP;
3159 	}
3160 
3161 	if (cq->resize_umem) {
3162 		ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - Busy",
3163 			  cq->qplib_cq.id);
3164 		return -EBUSY;
3165 	}
3166 
3167 	/* Check the requested cq depth out of supported depth */
3168 	if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
3169 		ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d",
3170 			  cq->qplib_cq.id, cqe);
3171 		return -EINVAL;
3172 	}
3173 
3174 	uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
3175 	entries = bnxt_re_init_depth(cqe + 1, uctx);
3176 	if (entries > dev_attr->max_cq_wqes + 1)
3177 		entries = dev_attr->max_cq_wqes + 1;
3178 
3179 	/* uverbs consumer */
3180 	if (ib_copy_from_udata(&req, udata, sizeof(req))) {
3181 		rc = -EFAULT;
3182 		goto fail;
3183 	}
3184 
3185 	cq->resize_umem = ib_umem_get(&rdev->ibdev, req.cq_va,
3186 				      entries * sizeof(struct cq_base),
3187 				      IB_ACCESS_LOCAL_WRITE);
3188 	if (IS_ERR(cq->resize_umem)) {
3189 		rc = PTR_ERR(cq->resize_umem);
3190 		cq->resize_umem = NULL;
3191 		ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n",
3192 			  __func__, rc);
3193 		goto fail;
3194 	}
3195 	cq->resize_cqe = entries;
3196 	memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info));
3197 	orig_dpi = cq->qplib_cq.dpi;
3198 
3199 	cq->qplib_cq.sg_info.umem = cq->resize_umem;
3200 	cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
3201 	cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
3202 	cq->qplib_cq.dpi = &uctx->dpi;
3203 
3204 	rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
3205 	if (rc) {
3206 		ibdev_err(&rdev->ibdev, "Resize HW CQ %#x failed!",
3207 			  cq->qplib_cq.id);
3208 		goto fail;
3209 	}
3210 
3211 	cq->ib_cq.cqe = cq->resize_cqe;
3212 	atomic_inc(&rdev->stats.res.resize_count);
3213 
3214 	return 0;
3215 
3216 fail:
3217 	if (cq->resize_umem) {
3218 		ib_umem_release(cq->resize_umem);
3219 		cq->resize_umem = NULL;
3220 		cq->resize_cqe = 0;
3221 		memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info));
3222 		cq->qplib_cq.dpi = orig_dpi;
3223 	}
3224 	return rc;
3225 }
3226 
__req_to_ib_wc_status(u8 qstatus)3227 static u8 __req_to_ib_wc_status(u8 qstatus)
3228 {
3229 	switch (qstatus) {
3230 	case CQ_REQ_STATUS_OK:
3231 		return IB_WC_SUCCESS;
3232 	case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
3233 		return IB_WC_BAD_RESP_ERR;
3234 	case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
3235 		return IB_WC_LOC_LEN_ERR;
3236 	case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
3237 		return IB_WC_LOC_QP_OP_ERR;
3238 	case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
3239 		return IB_WC_LOC_PROT_ERR;
3240 	case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
3241 		return IB_WC_GENERAL_ERR;
3242 	case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
3243 		return IB_WC_REM_INV_REQ_ERR;
3244 	case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
3245 		return IB_WC_REM_ACCESS_ERR;
3246 	case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
3247 		return IB_WC_REM_OP_ERR;
3248 	case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
3249 		return IB_WC_RNR_RETRY_EXC_ERR;
3250 	case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
3251 		return IB_WC_RETRY_EXC_ERR;
3252 	case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
3253 		return IB_WC_WR_FLUSH_ERR;
3254 	default:
3255 		return IB_WC_GENERAL_ERR;
3256 	}
3257 	return 0;
3258 }
3259 
__rawqp1_to_ib_wc_status(u8 qstatus)3260 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
3261 {
3262 	switch (qstatus) {
3263 	case CQ_RES_RAWETH_QP1_STATUS_OK:
3264 		return IB_WC_SUCCESS;
3265 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
3266 		return IB_WC_LOC_ACCESS_ERR;
3267 	case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
3268 		return IB_WC_LOC_LEN_ERR;
3269 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
3270 		return IB_WC_LOC_PROT_ERR;
3271 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
3272 		return IB_WC_LOC_QP_OP_ERR;
3273 	case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
3274 		return IB_WC_GENERAL_ERR;
3275 	case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
3276 		return IB_WC_WR_FLUSH_ERR;
3277 	case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
3278 		return IB_WC_WR_FLUSH_ERR;
3279 	default:
3280 		return IB_WC_GENERAL_ERR;
3281 	}
3282 }
3283 
__rc_to_ib_wc_status(u8 qstatus)3284 static u8 __rc_to_ib_wc_status(u8 qstatus)
3285 {
3286 	switch (qstatus) {
3287 	case CQ_RES_RC_STATUS_OK:
3288 		return IB_WC_SUCCESS;
3289 	case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
3290 		return IB_WC_LOC_ACCESS_ERR;
3291 	case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
3292 		return IB_WC_LOC_LEN_ERR;
3293 	case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
3294 		return IB_WC_LOC_PROT_ERR;
3295 	case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
3296 		return IB_WC_LOC_QP_OP_ERR;
3297 	case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
3298 		return IB_WC_GENERAL_ERR;
3299 	case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
3300 		return IB_WC_REM_INV_REQ_ERR;
3301 	case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
3302 		return IB_WC_WR_FLUSH_ERR;
3303 	case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
3304 		return IB_WC_WR_FLUSH_ERR;
3305 	default:
3306 		return IB_WC_GENERAL_ERR;
3307 	}
3308 }
3309 
bnxt_re_process_req_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3310 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
3311 {
3312 	switch (cqe->type) {
3313 	case BNXT_QPLIB_SWQE_TYPE_SEND:
3314 		wc->opcode = IB_WC_SEND;
3315 		break;
3316 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
3317 		wc->opcode = IB_WC_SEND;
3318 		wc->wc_flags |= IB_WC_WITH_IMM;
3319 		break;
3320 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
3321 		wc->opcode = IB_WC_SEND;
3322 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3323 		break;
3324 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
3325 		wc->opcode = IB_WC_RDMA_WRITE;
3326 		break;
3327 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
3328 		wc->opcode = IB_WC_RDMA_WRITE;
3329 		wc->wc_flags |= IB_WC_WITH_IMM;
3330 		break;
3331 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
3332 		wc->opcode = IB_WC_RDMA_READ;
3333 		break;
3334 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
3335 		wc->opcode = IB_WC_COMP_SWAP;
3336 		break;
3337 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
3338 		wc->opcode = IB_WC_FETCH_ADD;
3339 		break;
3340 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
3341 		wc->opcode = IB_WC_LOCAL_INV;
3342 		break;
3343 	case BNXT_QPLIB_SWQE_TYPE_REG_MR:
3344 		wc->opcode = IB_WC_REG_MR;
3345 		break;
3346 	default:
3347 		wc->opcode = IB_WC_SEND;
3348 		break;
3349 	}
3350 
3351 	wc->status = __req_to_ib_wc_status(cqe->status);
3352 }
3353 
bnxt_re_check_packet_type(u16 raweth_qp1_flags,u16 raweth_qp1_flags2)3354 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
3355 				     u16 raweth_qp1_flags2)
3356 {
3357 	bool is_ipv6 = false, is_ipv4 = false;
3358 
3359 	/* raweth_qp1_flags Bit 9-6 indicates itype */
3360 	if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3361 	    != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3362 		return -1;
3363 
3364 	if (raweth_qp1_flags2 &
3365 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
3366 	    raweth_qp1_flags2 &
3367 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
3368 		/* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
3369 		(raweth_qp1_flags2 &
3370 		 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
3371 			(is_ipv6 = true) : (is_ipv4 = true);
3372 		return ((is_ipv6) ?
3373 			 BNXT_RE_ROCEV2_IPV6_PACKET :
3374 			 BNXT_RE_ROCEV2_IPV4_PACKET);
3375 	} else {
3376 		return BNXT_RE_ROCE_V1_PACKET;
3377 	}
3378 }
3379 
bnxt_re_to_ib_nw_type(int nw_type)3380 static int bnxt_re_to_ib_nw_type(int nw_type)
3381 {
3382 	u8 nw_hdr_type = 0xFF;
3383 
3384 	switch (nw_type) {
3385 	case BNXT_RE_ROCE_V1_PACKET:
3386 		nw_hdr_type = RDMA_NETWORK_ROCE_V1;
3387 		break;
3388 	case BNXT_RE_ROCEV2_IPV4_PACKET:
3389 		nw_hdr_type = RDMA_NETWORK_IPV4;
3390 		break;
3391 	case BNXT_RE_ROCEV2_IPV6_PACKET:
3392 		nw_hdr_type = RDMA_NETWORK_IPV6;
3393 		break;
3394 	}
3395 	return nw_hdr_type;
3396 }
3397 
bnxt_re_is_loopback_packet(struct bnxt_re_dev * rdev,void * rq_hdr_buf)3398 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
3399 				       void *rq_hdr_buf)
3400 {
3401 	u8 *tmp_buf = NULL;
3402 	struct ethhdr *eth_hdr;
3403 	u16 eth_type;
3404 	bool rc = false;
3405 
3406 	tmp_buf = (u8 *)rq_hdr_buf;
3407 	/*
3408 	 * If dest mac is not same as I/F mac, this could be a
3409 	 * loopback address or multicast address, check whether
3410 	 * it is a loopback packet
3411 	 */
3412 	if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
3413 		tmp_buf += 4;
3414 		/* Check the  ether type */
3415 		eth_hdr = (struct ethhdr *)tmp_buf;
3416 		eth_type = ntohs(eth_hdr->h_proto);
3417 		switch (eth_type) {
3418 		case ETH_P_IBOE:
3419 			rc = true;
3420 			break;
3421 		case ETH_P_IP:
3422 		case ETH_P_IPV6: {
3423 			u32 len;
3424 			struct udphdr *udp_hdr;
3425 
3426 			len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
3427 						      sizeof(struct ipv6hdr));
3428 			tmp_buf += sizeof(struct ethhdr) + len;
3429 			udp_hdr = (struct udphdr *)tmp_buf;
3430 			if (ntohs(udp_hdr->dest) ==
3431 				    ROCE_V2_UDP_DPORT)
3432 				rc = true;
3433 			break;
3434 			}
3435 		default:
3436 			break;
3437 		}
3438 	}
3439 
3440 	return rc;
3441 }
3442 
bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp * gsi_qp,struct bnxt_qplib_cqe * cqe)3443 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
3444 					 struct bnxt_qplib_cqe *cqe)
3445 {
3446 	struct bnxt_re_dev *rdev = gsi_qp->rdev;
3447 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3448 	struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
3449 	dma_addr_t shrq_hdr_buf_map;
3450 	struct ib_sge s_sge[2] = {};
3451 	struct ib_sge r_sge[2] = {};
3452 	struct bnxt_re_ah *gsi_sah;
3453 	struct ib_recv_wr rwr = {};
3454 	dma_addr_t rq_hdr_buf_map;
3455 	struct ib_ud_wr udwr = {};
3456 	struct ib_send_wr *swr;
3457 	u32 skip_bytes = 0;
3458 	int pkt_type = 0;
3459 	void *rq_hdr_buf;
3460 	u32 offset = 0;
3461 	u32 tbl_idx;
3462 	int rc;
3463 
3464 	swr = &udwr.wr;
3465 	tbl_idx = cqe->wr_id;
3466 
3467 	rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
3468 			(tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
3469 	rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3470 							  tbl_idx);
3471 
3472 	/* Shadow QP header buffer */
3473 	shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3474 							    tbl_idx);
3475 	sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3476 
3477 	/* Store this cqe */
3478 	memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
3479 	sqp_entry->qp1_qp = gsi_qp;
3480 
3481 	/* Find packet type from the cqe */
3482 
3483 	pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
3484 					     cqe->raweth_qp1_flags2);
3485 	if (pkt_type < 0) {
3486 		ibdev_err(&rdev->ibdev, "Invalid packet\n");
3487 		return -EINVAL;
3488 	}
3489 
3490 	/* Adjust the offset for the user buffer and post in the rq */
3491 
3492 	if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
3493 		offset = 20;
3494 
3495 	/*
3496 	 * QP1 loopback packet has 4 bytes of internal header before
3497 	 * ether header. Skip these four bytes.
3498 	 */
3499 	if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3500 		skip_bytes = 4;
3501 
3502 	/* First send SGE . Skip the ether header*/
3503 	s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3504 			+ skip_bytes;
3505 	s_sge[0].lkey = 0xFFFFFFFF;
3506 	s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3507 				BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3508 
3509 	/* Second Send SGE */
3510 	s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3511 			BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3512 	if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3513 		s_sge[1].addr += 8;
3514 	s_sge[1].lkey = 0xFFFFFFFF;
3515 	s_sge[1].length = 256;
3516 
3517 	/* First recv SGE */
3518 
3519 	r_sge[0].addr = shrq_hdr_buf_map;
3520 	r_sge[0].lkey = 0xFFFFFFFF;
3521 	r_sge[0].length = 40;
3522 
3523 	r_sge[1].addr = sqp_entry->sge.addr + offset;
3524 	r_sge[1].lkey = sqp_entry->sge.lkey;
3525 	r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3526 
3527 	/* Create receive work request */
3528 	rwr.num_sge = 2;
3529 	rwr.sg_list = r_sge;
3530 	rwr.wr_id = tbl_idx;
3531 	rwr.next = NULL;
3532 
3533 	rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
3534 	if (rc) {
3535 		ibdev_err(&rdev->ibdev,
3536 			  "Failed to post Rx buffers to shadow QP");
3537 		return -ENOMEM;
3538 	}
3539 
3540 	swr->num_sge = 2;
3541 	swr->sg_list = s_sge;
3542 	swr->wr_id = tbl_idx;
3543 	swr->opcode = IB_WR_SEND;
3544 	swr->next = NULL;
3545 	gsi_sah = rdev->gsi_ctx.gsi_sah;
3546 	udwr.ah = &gsi_sah->ib_ah;
3547 	udwr.remote_qpn = gsi_sqp->qplib_qp.id;
3548 	udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
3549 
3550 	/* post data received  in the send queue */
3551 	return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
3552 }
3553 
bnxt_re_process_res_rawqp1_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3554 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3555 					  struct bnxt_qplib_cqe *cqe)
3556 {
3557 	wc->opcode = IB_WC_RECV;
3558 	wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3559 	wc->wc_flags |= IB_WC_GRH;
3560 }
3561 
bnxt_re_check_if_vlan_valid(struct bnxt_re_dev * rdev,u16 vlan_id)3562 static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
3563 					u16 vlan_id)
3564 {
3565 	/*
3566 	 * Check if the vlan is configured in the host.  If not configured, it
3567 	 * can be a transparent VLAN. So dont report the vlan id.
3568 	 */
3569 	if (!__vlan_find_dev_deep_rcu(rdev->netdev,
3570 				      htons(ETH_P_8021Q), vlan_id))
3571 		return false;
3572 	return true;
3573 }
3574 
bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe * orig_cqe,u16 * vid,u8 * sl)3575 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3576 				u16 *vid, u8 *sl)
3577 {
3578 	bool ret = false;
3579 	u32 metadata;
3580 	u16 tpid;
3581 
3582 	metadata = orig_cqe->raweth_qp1_metadata;
3583 	if (orig_cqe->raweth_qp1_flags2 &
3584 		CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3585 		tpid = ((metadata &
3586 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3587 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3588 		if (tpid == ETH_P_8021Q) {
3589 			*vid = metadata &
3590 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3591 			*sl = (metadata &
3592 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3593 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3594 			ret = true;
3595 		}
3596 	}
3597 
3598 	return ret;
3599 }
3600 
bnxt_re_process_res_rc_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3601 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3602 				      struct bnxt_qplib_cqe *cqe)
3603 {
3604 	wc->opcode = IB_WC_RECV;
3605 	wc->status = __rc_to_ib_wc_status(cqe->status);
3606 
3607 	if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3608 		wc->wc_flags |= IB_WC_WITH_IMM;
3609 	if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3610 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3611 	if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3612 	    (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3613 		wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3614 }
3615 
bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp * gsi_sqp,struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3616 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
3617 					     struct ib_wc *wc,
3618 					     struct bnxt_qplib_cqe *cqe)
3619 {
3620 	struct bnxt_re_dev *rdev = gsi_sqp->rdev;
3621 	struct bnxt_re_qp *gsi_qp = NULL;
3622 	struct bnxt_qplib_cqe *orig_cqe = NULL;
3623 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3624 	int nw_type;
3625 	u32 tbl_idx;
3626 	u16 vlan_id;
3627 	u8 sl;
3628 
3629 	tbl_idx = cqe->wr_id;
3630 
3631 	sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3632 	gsi_qp = sqp_entry->qp1_qp;
3633 	orig_cqe = &sqp_entry->cqe;
3634 
3635 	wc->wr_id = sqp_entry->wrid;
3636 	wc->byte_len = orig_cqe->length;
3637 	wc->qp = &gsi_qp->ib_qp;
3638 
3639 	wc->ex.imm_data = cpu_to_be32(orig_cqe->immdata);
3640 	wc->src_qp = orig_cqe->src_qp;
3641 	memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3642 	if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3643 		if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3644 			wc->vlan_id = vlan_id;
3645 			wc->sl = sl;
3646 			wc->wc_flags |= IB_WC_WITH_VLAN;
3647 		}
3648 	}
3649 	wc->port_num = 1;
3650 	wc->vendor_err = orig_cqe->status;
3651 
3652 	wc->opcode = IB_WC_RECV;
3653 	wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3654 	wc->wc_flags |= IB_WC_GRH;
3655 
3656 	nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3657 					    orig_cqe->raweth_qp1_flags2);
3658 	if (nw_type >= 0) {
3659 		wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3660 		wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3661 	}
3662 }
3663 
bnxt_re_process_res_ud_wc(struct bnxt_re_qp * qp,struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3664 static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3665 				      struct ib_wc *wc,
3666 				      struct bnxt_qplib_cqe *cqe)
3667 {
3668 	struct bnxt_re_dev *rdev;
3669 	u16 vlan_id = 0;
3670 	u8 nw_type;
3671 
3672 	rdev = qp->rdev;
3673 	wc->opcode = IB_WC_RECV;
3674 	wc->status = __rc_to_ib_wc_status(cqe->status);
3675 
3676 	if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3677 		wc->wc_flags |= IB_WC_WITH_IMM;
3678 	/* report only on GSI QP for Thor */
3679 	if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3680 		wc->wc_flags |= IB_WC_GRH;
3681 		memcpy(wc->smac, cqe->smac, ETH_ALEN);
3682 		wc->wc_flags |= IB_WC_WITH_SMAC;
3683 		if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3684 			vlan_id = (cqe->cfa_meta & 0xFFF);
3685 		}
3686 		/* Mark only if vlan_id is non zero */
3687 		if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3688 			wc->vlan_id = vlan_id;
3689 			wc->wc_flags |= IB_WC_WITH_VLAN;
3690 		}
3691 		nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3692 			   CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3693 		wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3694 		wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3695 	}
3696 
3697 }
3698 
send_phantom_wqe(struct bnxt_re_qp * qp)3699 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3700 {
3701 	struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3702 	unsigned long flags;
3703 	int rc;
3704 
3705 	spin_lock_irqsave(&qp->sq_lock, flags);
3706 
3707 	rc = bnxt_re_bind_fence_mw(lib_qp);
3708 	if (!rc) {
3709 		lib_qp->sq.phantom_wqe_cnt++;
3710 		ibdev_dbg(&qp->rdev->ibdev,
3711 			  "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3712 			  lib_qp->id, lib_qp->sq.hwq.prod,
3713 			  HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3714 			  lib_qp->sq.phantom_wqe_cnt);
3715 	}
3716 
3717 	spin_unlock_irqrestore(&qp->sq_lock, flags);
3718 	return rc;
3719 }
3720 
bnxt_re_poll_cq(struct ib_cq * ib_cq,int num_entries,struct ib_wc * wc)3721 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3722 {
3723 	struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3724 	struct bnxt_re_qp *qp, *sh_qp;
3725 	struct bnxt_qplib_cqe *cqe;
3726 	int i, ncqe, budget;
3727 	struct bnxt_qplib_q *sq;
3728 	struct bnxt_qplib_qp *lib_qp;
3729 	u32 tbl_idx;
3730 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3731 	unsigned long flags;
3732 
3733 	/* User CQ; the only processing we do is to
3734 	 * complete any pending CQ resize operation.
3735 	 */
3736 	if (cq->umem) {
3737 		if (cq->resize_umem)
3738 			bnxt_re_resize_cq_complete(cq);
3739 		return 0;
3740 	}
3741 
3742 	spin_lock_irqsave(&cq->cq_lock, flags);
3743 	budget = min_t(u32, num_entries, cq->max_cql);
3744 	num_entries = budget;
3745 	if (!cq->cql) {
3746 		ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
3747 		goto exit;
3748 	}
3749 	cqe = &cq->cql[0];
3750 	while (budget) {
3751 		lib_qp = NULL;
3752 		ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3753 		if (lib_qp) {
3754 			sq = &lib_qp->sq;
3755 			if (sq->send_phantom) {
3756 				qp = container_of(lib_qp,
3757 						  struct bnxt_re_qp, qplib_qp);
3758 				if (send_phantom_wqe(qp) == -ENOMEM)
3759 					ibdev_err(&cq->rdev->ibdev,
3760 						  "Phantom failed! Scheduled to send again\n");
3761 				else
3762 					sq->send_phantom = false;
3763 			}
3764 		}
3765 		if (ncqe < budget)
3766 			ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3767 							      cqe + ncqe,
3768 							      budget - ncqe);
3769 
3770 		if (!ncqe)
3771 			break;
3772 
3773 		for (i = 0; i < ncqe; i++, cqe++) {
3774 			/* Transcribe each qplib_wqe back to ib_wc */
3775 			memset(wc, 0, sizeof(*wc));
3776 
3777 			wc->wr_id = cqe->wr_id;
3778 			wc->byte_len = cqe->length;
3779 			qp = container_of
3780 				((struct bnxt_qplib_qp *)
3781 				 (unsigned long)(cqe->qp_handle),
3782 				 struct bnxt_re_qp, qplib_qp);
3783 			wc->qp = &qp->ib_qp;
3784 			if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3785 				wc->ex.imm_data = cpu_to_be32(cqe->immdata);
3786 			else
3787 				wc->ex.invalidate_rkey = cqe->invrkey;
3788 			wc->src_qp = cqe->src_qp;
3789 			memcpy(wc->smac, cqe->smac, ETH_ALEN);
3790 			wc->port_num = 1;
3791 			wc->vendor_err = cqe->status;
3792 
3793 			switch (cqe->opcode) {
3794 			case CQ_BASE_CQE_TYPE_REQ:
3795 				sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3796 				if (sh_qp &&
3797 				    qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3798 					/* Handle this completion with
3799 					 * the stored completion
3800 					 */
3801 					memset(wc, 0, sizeof(*wc));
3802 					continue;
3803 				}
3804 				bnxt_re_process_req_wc(wc, cqe);
3805 				break;
3806 			case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3807 				if (!cqe->status) {
3808 					int rc = 0;
3809 
3810 					rc = bnxt_re_process_raw_qp_pkt_rx
3811 								(qp, cqe);
3812 					if (!rc) {
3813 						memset(wc, 0, sizeof(*wc));
3814 						continue;
3815 					}
3816 					cqe->status = -1;
3817 				}
3818 				/* Errors need not be looped back.
3819 				 * But change the wr_id to the one
3820 				 * stored in the table
3821 				 */
3822 				tbl_idx = cqe->wr_id;
3823 				sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
3824 				wc->wr_id = sqp_entry->wrid;
3825 				bnxt_re_process_res_rawqp1_wc(wc, cqe);
3826 				break;
3827 			case CQ_BASE_CQE_TYPE_RES_RC:
3828 				bnxt_re_process_res_rc_wc(wc, cqe);
3829 				break;
3830 			case CQ_BASE_CQE_TYPE_RES_UD:
3831 				sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3832 				if (sh_qp &&
3833 				    qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3834 					/* Handle this completion with
3835 					 * the stored completion
3836 					 */
3837 					if (cqe->status) {
3838 						continue;
3839 					} else {
3840 						bnxt_re_process_res_shadow_qp_wc
3841 								(qp, wc, cqe);
3842 						break;
3843 					}
3844 				}
3845 				bnxt_re_process_res_ud_wc(qp, wc, cqe);
3846 				break;
3847 			default:
3848 				ibdev_err(&cq->rdev->ibdev,
3849 					  "POLL CQ : type 0x%x not handled",
3850 					  cqe->opcode);
3851 				continue;
3852 			}
3853 			wc++;
3854 			budget--;
3855 		}
3856 	}
3857 exit:
3858 	spin_unlock_irqrestore(&cq->cq_lock, flags);
3859 	return num_entries - budget;
3860 }
3861 
bnxt_re_req_notify_cq(struct ib_cq * ib_cq,enum ib_cq_notify_flags ib_cqn_flags)3862 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3863 			  enum ib_cq_notify_flags ib_cqn_flags)
3864 {
3865 	struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3866 	int type = 0, rc = 0;
3867 	unsigned long flags;
3868 
3869 	spin_lock_irqsave(&cq->cq_lock, flags);
3870 	/* Trigger on the very next completion */
3871 	if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3872 		type = DBC_DBC_TYPE_CQ_ARMALL;
3873 	/* Trigger on the next solicited completion */
3874 	else if (ib_cqn_flags & IB_CQ_SOLICITED)
3875 		type = DBC_DBC_TYPE_CQ_ARMSE;
3876 
3877 	/* Poll to see if there are missed events */
3878 	if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3879 	    !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3880 		rc = 1;
3881 		goto exit;
3882 	}
3883 	bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3884 
3885 exit:
3886 	spin_unlock_irqrestore(&cq->cq_lock, flags);
3887 	return rc;
3888 }
3889 
3890 /* Memory Regions */
bnxt_re_get_dma_mr(struct ib_pd * ib_pd,int mr_access_flags)3891 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3892 {
3893 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3894 	struct bnxt_re_dev *rdev = pd->rdev;
3895 	struct bnxt_re_mr *mr;
3896 	u32 active_mrs;
3897 	int rc;
3898 
3899 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3900 	if (!mr)
3901 		return ERR_PTR(-ENOMEM);
3902 
3903 	mr->rdev = rdev;
3904 	mr->qplib_mr.pd = &pd->qplib_pd;
3905 	mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
3906 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3907 
3908 	if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
3909 		bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
3910 
3911 	/* Allocate and register 0 as the address */
3912 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3913 	if (rc)
3914 		goto fail;
3915 
3916 	mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3917 	mr->qplib_mr.total_size = -1; /* Infinte length */
3918 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
3919 			       PAGE_SIZE);
3920 	if (rc)
3921 		goto fail_mr;
3922 
3923 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
3924 	if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3925 			       IB_ACCESS_REMOTE_ATOMIC))
3926 		mr->ib_mr.rkey = mr->ib_mr.lkey;
3927 	active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
3928 	if (active_mrs > rdev->stats.res.mr_watermark)
3929 		rdev->stats.res.mr_watermark = active_mrs;
3930 
3931 	return &mr->ib_mr;
3932 
3933 fail_mr:
3934 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3935 fail:
3936 	kfree(mr);
3937 	return ERR_PTR(rc);
3938 }
3939 
bnxt_re_dereg_mr(struct ib_mr * ib_mr,struct ib_udata * udata)3940 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3941 {
3942 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3943 	struct bnxt_re_dev *rdev = mr->rdev;
3944 	int rc;
3945 
3946 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3947 	if (rc) {
3948 		ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
3949 		return rc;
3950 	}
3951 
3952 	if (mr->pages) {
3953 		rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3954 							&mr->qplib_frpl);
3955 		kfree(mr->pages);
3956 		mr->npages = 0;
3957 		mr->pages = NULL;
3958 	}
3959 	ib_umem_release(mr->ib_umem);
3960 
3961 	kfree(mr);
3962 	atomic_dec(&rdev->stats.res.mr_count);
3963 	return rc;
3964 }
3965 
bnxt_re_set_page(struct ib_mr * ib_mr,u64 addr)3966 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3967 {
3968 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3969 
3970 	if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3971 		return -ENOMEM;
3972 
3973 	mr->pages[mr->npages++] = addr;
3974 	return 0;
3975 }
3976 
bnxt_re_map_mr_sg(struct ib_mr * ib_mr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)3977 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3978 		      unsigned int *sg_offset)
3979 {
3980 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3981 
3982 	mr->npages = 0;
3983 	return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3984 }
3985 
bnxt_re_alloc_mr(struct ib_pd * ib_pd,enum ib_mr_type type,u32 max_num_sg)3986 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3987 			       u32 max_num_sg)
3988 {
3989 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3990 	struct bnxt_re_dev *rdev = pd->rdev;
3991 	struct bnxt_re_mr *mr = NULL;
3992 	u32 active_mrs;
3993 	int rc;
3994 
3995 	if (type != IB_MR_TYPE_MEM_REG) {
3996 		ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
3997 		return ERR_PTR(-EINVAL);
3998 	}
3999 	if (max_num_sg > MAX_PBL_LVL_1_PGS)
4000 		return ERR_PTR(-EINVAL);
4001 
4002 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
4003 	if (!mr)
4004 		return ERR_PTR(-ENOMEM);
4005 
4006 	mr->rdev = rdev;
4007 	mr->qplib_mr.pd = &pd->qplib_pd;
4008 	mr->qplib_mr.access_flags = BNXT_QPLIB_FR_PMR;
4009 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
4010 
4011 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4012 	if (rc)
4013 		goto bail;
4014 
4015 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
4016 	mr->ib_mr.rkey = mr->ib_mr.lkey;
4017 
4018 	mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
4019 	if (!mr->pages) {
4020 		rc = -ENOMEM;
4021 		goto fail;
4022 	}
4023 	rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
4024 						 &mr->qplib_frpl, max_num_sg);
4025 	if (rc) {
4026 		ibdev_err(&rdev->ibdev,
4027 			  "Failed to allocate HW FR page list");
4028 		goto fail_mr;
4029 	}
4030 
4031 	active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
4032 	if (active_mrs > rdev->stats.res.mr_watermark)
4033 		rdev->stats.res.mr_watermark = active_mrs;
4034 	return &mr->ib_mr;
4035 
4036 fail_mr:
4037 	kfree(mr->pages);
4038 fail:
4039 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4040 bail:
4041 	kfree(mr);
4042 	return ERR_PTR(rc);
4043 }
4044 
bnxt_re_alloc_mw(struct ib_pd * ib_pd,enum ib_mw_type type,struct ib_udata * udata)4045 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
4046 			       struct ib_udata *udata)
4047 {
4048 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4049 	struct bnxt_re_dev *rdev = pd->rdev;
4050 	struct bnxt_re_mw *mw;
4051 	u32 active_mws;
4052 	int rc;
4053 
4054 	mw = kzalloc(sizeof(*mw), GFP_KERNEL);
4055 	if (!mw)
4056 		return ERR_PTR(-ENOMEM);
4057 	mw->rdev = rdev;
4058 	mw->qplib_mw.pd = &pd->qplib_pd;
4059 
4060 	mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
4061 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
4062 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
4063 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
4064 	if (rc) {
4065 		ibdev_err(&rdev->ibdev, "Allocate MW failed!");
4066 		goto fail;
4067 	}
4068 	mw->ib_mw.rkey = mw->qplib_mw.rkey;
4069 
4070 	active_mws = atomic_inc_return(&rdev->stats.res.mw_count);
4071 	if (active_mws > rdev->stats.res.mw_watermark)
4072 		rdev->stats.res.mw_watermark = active_mws;
4073 	return &mw->ib_mw;
4074 
4075 fail:
4076 	kfree(mw);
4077 	return ERR_PTR(rc);
4078 }
4079 
bnxt_re_dealloc_mw(struct ib_mw * ib_mw)4080 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
4081 {
4082 	struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
4083 	struct bnxt_re_dev *rdev = mw->rdev;
4084 	int rc;
4085 
4086 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
4087 	if (rc) {
4088 		ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
4089 		return rc;
4090 	}
4091 
4092 	kfree(mw);
4093 	atomic_dec(&rdev->stats.res.mw_count);
4094 	return rc;
4095 }
4096 
__bnxt_re_user_reg_mr(struct ib_pd * ib_pd,u64 length,u64 virt_addr,int mr_access_flags,struct ib_umem * umem)4097 static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr,
4098 					   int mr_access_flags, struct ib_umem *umem)
4099 {
4100 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4101 	struct bnxt_re_dev *rdev = pd->rdev;
4102 	unsigned long page_size;
4103 	struct bnxt_re_mr *mr;
4104 	int umem_pgs, rc;
4105 	u32 active_mrs;
4106 
4107 	if (length > BNXT_RE_MAX_MR_SIZE) {
4108 		ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
4109 			  length, BNXT_RE_MAX_MR_SIZE);
4110 		return ERR_PTR(-ENOMEM);
4111 	}
4112 
4113 	page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
4114 	if (!page_size) {
4115 		ibdev_err(&rdev->ibdev, "umem page size unsupported!");
4116 		return ERR_PTR(-EINVAL);
4117 	}
4118 
4119 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
4120 	if (!mr)
4121 		return ERR_PTR(-ENOMEM);
4122 
4123 	mr->rdev = rdev;
4124 	mr->qplib_mr.pd = &pd->qplib_pd;
4125 	mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
4126 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
4127 
4128 	if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
4129 		rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4130 		if (rc) {
4131 			ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
4132 			rc = -EIO;
4133 			goto free_mr;
4134 		}
4135 		/* The fixed portion of the rkey is the same as the lkey */
4136 		mr->ib_mr.rkey = mr->qplib_mr.rkey;
4137 	} else {
4138 		mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
4139 	}
4140 	mr->ib_umem = umem;
4141 	mr->qplib_mr.va = virt_addr;
4142 	mr->qplib_mr.total_size = length;
4143 
4144 	if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
4145 		bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
4146 
4147 	umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
4148 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
4149 			       umem_pgs, page_size);
4150 	if (rc) {
4151 		ibdev_err(&rdev->ibdev, "Failed to register user MR - rc = %d\n", rc);
4152 		rc = -EIO;
4153 		goto free_mrw;
4154 	}
4155 
4156 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
4157 	mr->ib_mr.rkey = mr->qplib_mr.lkey;
4158 	active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
4159 	if (active_mrs > rdev->stats.res.mr_watermark)
4160 		rdev->stats.res.mr_watermark = active_mrs;
4161 
4162 	return &mr->ib_mr;
4163 
4164 free_mrw:
4165 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4166 free_mr:
4167 	kfree(mr);
4168 	return ERR_PTR(rc);
4169 }
4170 
bnxt_re_reg_user_mr(struct ib_pd * ib_pd,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct ib_udata * udata)4171 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
4172 				  u64 virt_addr, int mr_access_flags,
4173 				  struct ib_udata *udata)
4174 {
4175 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4176 	struct bnxt_re_dev *rdev = pd->rdev;
4177 	struct ib_umem *umem;
4178 	struct ib_mr *ib_mr;
4179 
4180 	umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
4181 	if (IS_ERR(umem))
4182 		return ERR_CAST(umem);
4183 
4184 	ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
4185 	if (IS_ERR(ib_mr))
4186 		ib_umem_release(umem);
4187 	return ib_mr;
4188 }
4189 
bnxt_re_reg_user_mr_dmabuf(struct ib_pd * ib_pd,u64 start,u64 length,u64 virt_addr,int fd,int mr_access_flags,struct uverbs_attr_bundle * attrs)4190 struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
4191 					 u64 length, u64 virt_addr, int fd,
4192 					 int mr_access_flags,
4193 					 struct uverbs_attr_bundle *attrs)
4194 {
4195 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4196 	struct bnxt_re_dev *rdev = pd->rdev;
4197 	struct ib_umem_dmabuf *umem_dmabuf;
4198 	struct ib_umem *umem;
4199 	struct ib_mr *ib_mr;
4200 
4201 	umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
4202 						fd, mr_access_flags);
4203 	if (IS_ERR(umem_dmabuf))
4204 		return ERR_CAST(umem_dmabuf);
4205 
4206 	umem = &umem_dmabuf->umem;
4207 
4208 	ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
4209 	if (IS_ERR(ib_mr))
4210 		ib_umem_release(umem);
4211 	return ib_mr;
4212 }
4213 
bnxt_re_alloc_ucontext(struct ib_ucontext * ctx,struct ib_udata * udata)4214 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
4215 {
4216 	struct ib_device *ibdev = ctx->device;
4217 	struct bnxt_re_ucontext *uctx =
4218 		container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
4219 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
4220 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
4221 	struct bnxt_re_user_mmap_entry *entry;
4222 	struct bnxt_re_uctx_resp resp = {};
4223 	struct bnxt_re_uctx_req ureq = {};
4224 	u32 chip_met_rev_num = 0;
4225 	int rc;
4226 
4227 	ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
4228 
4229 	if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
4230 		ibdev_dbg(ibdev, " is different from the device %d ",
4231 			  BNXT_RE_ABI_VERSION);
4232 		return -EPERM;
4233 	}
4234 
4235 	uctx->rdev = rdev;
4236 
4237 	uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
4238 	if (!uctx->shpg) {
4239 		rc = -ENOMEM;
4240 		goto fail;
4241 	}
4242 	spin_lock_init(&uctx->sh_lock);
4243 
4244 	resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
4245 	chip_met_rev_num = rdev->chip_ctx->chip_num;
4246 	chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
4247 			     BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
4248 	chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
4249 			     BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
4250 	resp.chip_id0 = chip_met_rev_num;
4251 	/*Temp, Use xa_alloc instead */
4252 	resp.dev_id = rdev->en_dev->pdev->devfn;
4253 	resp.max_qp = rdev->qplib_ctx.qpc_count;
4254 	resp.pg_size = PAGE_SIZE;
4255 	resp.cqe_sz = sizeof(struct cq_base);
4256 	resp.max_cqd = dev_attr->max_cq_wqes;
4257 
4258 	if (rdev->chip_ctx->modes.db_push)
4259 		resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;
4260 
4261 	entry = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL);
4262 	if (!entry) {
4263 		rc = -ENOMEM;
4264 		goto cfail;
4265 	}
4266 	uctx->shpage_mmap = &entry->rdma_entry;
4267 	if (rdev->pacing.dbr_pacing)
4268 		resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED;
4269 
4270 	if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2))
4271 		resp.comp_mask |= BNXT_RE_UCNTX_CMASK_MSN_TABLE_ENABLED;
4272 
4273 	if (udata->inlen >= sizeof(ureq)) {
4274 		rc = ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq)));
4275 		if (rc)
4276 			goto cfail;
4277 		if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) {
4278 			resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
4279 			uctx->cmask |= BNXT_RE_UCNTX_CAP_POW2_DISABLED;
4280 		}
4281 		if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_VAR_WQE_SUPPORT) {
4282 			resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
4283 			resp.mode = rdev->chip_ctx->modes.wqe_mode;
4284 			if (resp.mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
4285 				uctx->cmask |= BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED;
4286 		}
4287 	}
4288 
4289 	rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
4290 	if (rc) {
4291 		ibdev_err(ibdev, "Failed to copy user context");
4292 		rc = -EFAULT;
4293 		goto cfail;
4294 	}
4295 
4296 	return 0;
4297 cfail:
4298 	free_page((unsigned long)uctx->shpg);
4299 	uctx->shpg = NULL;
4300 fail:
4301 	return rc;
4302 }
4303 
bnxt_re_dealloc_ucontext(struct ib_ucontext * ib_uctx)4304 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
4305 {
4306 	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4307 						   struct bnxt_re_ucontext,
4308 						   ib_uctx);
4309 
4310 	struct bnxt_re_dev *rdev = uctx->rdev;
4311 
4312 	rdma_user_mmap_entry_remove(uctx->shpage_mmap);
4313 	uctx->shpage_mmap = NULL;
4314 	if (uctx->shpg)
4315 		free_page((unsigned long)uctx->shpg);
4316 
4317 	if (uctx->dpi.dbr) {
4318 		/* Free DPI only if this is the first PD allocated by the
4319 		 * application and mark the context dpi as NULL
4320 		 */
4321 		bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->dpi);
4322 		uctx->dpi.dbr = NULL;
4323 	}
4324 }
4325 
bnxt_re_search_for_cq(struct bnxt_re_dev * rdev,u32 cq_id)4326 static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
4327 {
4328 	struct bnxt_re_cq *cq = NULL, *tmp_cq;
4329 
4330 	hash_for_each_possible(rdev->cq_hash, tmp_cq, hash_entry, cq_id) {
4331 		if (tmp_cq->qplib_cq.id == cq_id) {
4332 			cq = tmp_cq;
4333 			break;
4334 		}
4335 	}
4336 	return cq;
4337 }
4338 
bnxt_re_search_for_srq(struct bnxt_re_dev * rdev,u32 srq_id)4339 static struct bnxt_re_srq *bnxt_re_search_for_srq(struct bnxt_re_dev *rdev, u32 srq_id)
4340 {
4341 	struct bnxt_re_srq *srq = NULL, *tmp_srq;
4342 
4343 	hash_for_each_possible(rdev->srq_hash, tmp_srq, hash_entry, srq_id) {
4344 		if (tmp_srq->qplib_srq.id == srq_id) {
4345 			srq = tmp_srq;
4346 			break;
4347 		}
4348 	}
4349 	return srq;
4350 }
4351 
4352 /* Helper function to mmap the virtual memory from user app */
bnxt_re_mmap(struct ib_ucontext * ib_uctx,struct vm_area_struct * vma)4353 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
4354 {
4355 	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4356 						   struct bnxt_re_ucontext,
4357 						   ib_uctx);
4358 	struct bnxt_re_user_mmap_entry *bnxt_entry;
4359 	struct rdma_user_mmap_entry *rdma_entry;
4360 	int ret = 0;
4361 	u64 pfn;
4362 
4363 	rdma_entry = rdma_user_mmap_entry_get(&uctx->ib_uctx, vma);
4364 	if (!rdma_entry)
4365 		return -EINVAL;
4366 
4367 	bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4368 				  rdma_entry);
4369 
4370 	switch (bnxt_entry->mmap_flag) {
4371 	case BNXT_RE_MMAP_WC_DB:
4372 		pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4373 		ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4374 					pgprot_writecombine(vma->vm_page_prot),
4375 					rdma_entry);
4376 		break;
4377 	case BNXT_RE_MMAP_UC_DB:
4378 		pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4379 		ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4380 					pgprot_noncached(vma->vm_page_prot),
4381 				rdma_entry);
4382 		break;
4383 	case BNXT_RE_MMAP_SH_PAGE:
4384 		ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg));
4385 		break;
4386 	case BNXT_RE_MMAP_DBR_BAR:
4387 		pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4388 		ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4389 					pgprot_noncached(vma->vm_page_prot),
4390 					rdma_entry);
4391 		break;
4392 	case BNXT_RE_MMAP_DBR_PAGE:
4393 	case BNXT_RE_MMAP_TOGGLE_PAGE:
4394 		/* Driver doesn't expect write access for user space */
4395 		if (vma->vm_flags & VM_WRITE)
4396 			ret = -EFAULT;
4397 		else
4398 			ret = vm_insert_page(vma, vma->vm_start,
4399 					     virt_to_page((void *)bnxt_entry->mem_offset));
4400 		break;
4401 	default:
4402 		ret = -EINVAL;
4403 		break;
4404 	}
4405 
4406 	rdma_user_mmap_entry_put(rdma_entry);
4407 	return ret;
4408 }
4409 
bnxt_re_mmap_free(struct rdma_user_mmap_entry * rdma_entry)4410 void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
4411 {
4412 	struct bnxt_re_user_mmap_entry *bnxt_entry;
4413 
4414 	bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4415 				  rdma_entry);
4416 
4417 	kfree(bnxt_entry);
4418 }
4419 
UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)4420 static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs)
4421 {
4422 	struct bnxt_re_ucontext *uctx;
4423 
4424 	uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
4425 	bnxt_re_pacing_alert(uctx->rdev);
4426 	return 0;
4427 }
4428 
UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)4429 static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs)
4430 {
4431 	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
4432 	enum bnxt_re_alloc_page_type alloc_type;
4433 	struct bnxt_re_user_mmap_entry *entry;
4434 	enum bnxt_re_mmap_flag mmap_flag;
4435 	struct bnxt_qplib_chip_ctx *cctx;
4436 	struct bnxt_re_ucontext *uctx;
4437 	struct bnxt_re_dev *rdev;
4438 	u64 mmap_offset;
4439 	u32 length;
4440 	u32 dpi;
4441 	u64 addr;
4442 	int err;
4443 
4444 	uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
4445 	if (IS_ERR(uctx))
4446 		return PTR_ERR(uctx);
4447 
4448 	err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE);
4449 	if (err)
4450 		return err;
4451 
4452 	rdev = uctx->rdev;
4453 	cctx = rdev->chip_ctx;
4454 
4455 	switch (alloc_type) {
4456 	case BNXT_RE_ALLOC_WC_PAGE:
4457 		if (cctx->modes.db_push)  {
4458 			if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, &uctx->wcdpi,
4459 						 uctx, BNXT_QPLIB_DPI_TYPE_WC))
4460 				return -ENOMEM;
4461 			length = PAGE_SIZE;
4462 			dpi = uctx->wcdpi.dpi;
4463 			addr = (u64)uctx->wcdpi.umdbr;
4464 			mmap_flag = BNXT_RE_MMAP_WC_DB;
4465 		} else {
4466 			return -EINVAL;
4467 		}
4468 
4469 		break;
4470 	case BNXT_RE_ALLOC_DBR_BAR_PAGE:
4471 		length = PAGE_SIZE;
4472 		addr = (u64)rdev->pacing.dbr_bar_addr;
4473 		mmap_flag = BNXT_RE_MMAP_DBR_BAR;
4474 		break;
4475 
4476 	case BNXT_RE_ALLOC_DBR_PAGE:
4477 		length = PAGE_SIZE;
4478 		addr = (u64)rdev->pacing.dbr_page;
4479 		mmap_flag = BNXT_RE_MMAP_DBR_PAGE;
4480 		break;
4481 
4482 	default:
4483 		return -EOPNOTSUPP;
4484 	}
4485 
4486 	entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mmap_offset);
4487 	if (!entry)
4488 		return -ENOMEM;
4489 
4490 	uobj->object = entry;
4491 	uverbs_finalize_uobj_create(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
4492 	err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
4493 			     &mmap_offset, sizeof(mmap_offset));
4494 	if (err)
4495 		return err;
4496 
4497 	err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
4498 			     &length, sizeof(length));
4499 	if (err)
4500 		return err;
4501 
4502 	err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI,
4503 			     &dpi, sizeof(length));
4504 	if (err)
4505 		return err;
4506 
4507 	return 0;
4508 }
4509 
alloc_page_obj_cleanup(struct ib_uobject * uobject,enum rdma_remove_reason why,struct uverbs_attr_bundle * attrs)4510 static int alloc_page_obj_cleanup(struct ib_uobject *uobject,
4511 				  enum rdma_remove_reason why,
4512 			    struct uverbs_attr_bundle *attrs)
4513 {
4514 	struct  bnxt_re_user_mmap_entry *entry = uobject->object;
4515 	struct bnxt_re_ucontext *uctx = entry->uctx;
4516 
4517 	switch (entry->mmap_flag) {
4518 	case BNXT_RE_MMAP_WC_DB:
4519 		if (uctx && uctx->wcdpi.dbr) {
4520 			struct bnxt_re_dev *rdev = uctx->rdev;
4521 
4522 			bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->wcdpi);
4523 			uctx->wcdpi.dbr = NULL;
4524 		}
4525 		break;
4526 	case BNXT_RE_MMAP_DBR_BAR:
4527 	case BNXT_RE_MMAP_DBR_PAGE:
4528 		break;
4529 	default:
4530 		goto exit;
4531 	}
4532 	rdma_user_mmap_entry_remove(&entry->rdma_entry);
4533 exit:
4534 	return 0;
4535 }
4536 
4537 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE,
4538 			    UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE,
4539 					    BNXT_RE_OBJECT_ALLOC_PAGE,
4540 					    UVERBS_ACCESS_NEW,
4541 					    UA_MANDATORY),
4542 			    UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE,
4543 						 enum bnxt_re_alloc_page_type,
4544 						 UA_MANDATORY),
4545 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
4546 						UVERBS_ATTR_TYPE(u64),
4547 						UA_MANDATORY),
4548 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
4549 						UVERBS_ATTR_TYPE(u32),
4550 						UA_MANDATORY),
4551 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI,
4552 						UVERBS_ATTR_TYPE(u32),
4553 						UA_MANDATORY));
4554 
4555 DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE,
4556 				    UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE,
4557 						    BNXT_RE_OBJECT_ALLOC_PAGE,
4558 						    UVERBS_ACCESS_DESTROY,
4559 						    UA_MANDATORY));
4560 
4561 DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE,
4562 			    UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup),
4563 			    &UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE),
4564 			    &UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE));
4565 
4566 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_NOTIFY_DRV);
4567 
4568 DECLARE_UVERBS_GLOBAL_METHODS(BNXT_RE_OBJECT_NOTIFY_DRV,
4569 			      &UVERBS_METHOD(BNXT_RE_METHOD_NOTIFY_DRV));
4570 
4571 /* Toggle MEM */
UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)4572 static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bundle *attrs)
4573 {
4574 	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
4575 	enum bnxt_re_mmap_flag mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
4576 	enum bnxt_re_get_toggle_mem_type res_type;
4577 	struct bnxt_re_user_mmap_entry *entry;
4578 	struct bnxt_re_ucontext *uctx;
4579 	struct ib_ucontext *ib_uctx;
4580 	struct bnxt_re_dev *rdev;
4581 	struct bnxt_re_srq *srq;
4582 	u32 length = PAGE_SIZE;
4583 	struct bnxt_re_cq *cq;
4584 	u64 mem_offset;
4585 	u32 offset = 0;
4586 	u64 addr = 0;
4587 	u32 res_id;
4588 	int err;
4589 
4590 	ib_uctx = ib_uverbs_get_ucontext(attrs);
4591 	if (IS_ERR(ib_uctx))
4592 		return PTR_ERR(ib_uctx);
4593 
4594 	err = uverbs_get_const(&res_type, attrs, BNXT_RE_TOGGLE_MEM_TYPE);
4595 	if (err)
4596 		return err;
4597 
4598 	uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
4599 	rdev = uctx->rdev;
4600 	err = uverbs_copy_from(&res_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
4601 	if (err)
4602 		return err;
4603 
4604 	switch (res_type) {
4605 	case BNXT_RE_CQ_TOGGLE_MEM:
4606 		cq = bnxt_re_search_for_cq(rdev, res_id);
4607 		if (!cq)
4608 			return -EINVAL;
4609 
4610 		addr = (u64)cq->uctx_cq_page;
4611 		break;
4612 	case BNXT_RE_SRQ_TOGGLE_MEM:
4613 		srq = bnxt_re_search_for_srq(rdev, res_id);
4614 		if (!srq)
4615 			return -EINVAL;
4616 
4617 		addr = (u64)srq->uctx_srq_page;
4618 		break;
4619 
4620 	default:
4621 		return -EOPNOTSUPP;
4622 	}
4623 
4624 	entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mem_offset);
4625 	if (!entry)
4626 		return -ENOMEM;
4627 
4628 	uobj->object = entry;
4629 	uverbs_finalize_uobj_create(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
4630 	err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
4631 			     &mem_offset, sizeof(mem_offset));
4632 	if (err)
4633 		return err;
4634 
4635 	err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
4636 			     &length, sizeof(length));
4637 	if (err)
4638 		return err;
4639 
4640 	err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
4641 			     &offset, sizeof(offset));
4642 	if (err)
4643 		return err;
4644 
4645 	return 0;
4646 }
4647 
get_toggle_mem_obj_cleanup(struct ib_uobject * uobject,enum rdma_remove_reason why,struct uverbs_attr_bundle * attrs)4648 static int get_toggle_mem_obj_cleanup(struct ib_uobject *uobject,
4649 				      enum rdma_remove_reason why,
4650 				      struct uverbs_attr_bundle *attrs)
4651 {
4652 	struct  bnxt_re_user_mmap_entry *entry = uobject->object;
4653 
4654 	rdma_user_mmap_entry_remove(&entry->rdma_entry);
4655 	return 0;
4656 }
4657 
4658 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM,
4659 			    UVERBS_ATTR_IDR(BNXT_RE_TOGGLE_MEM_HANDLE,
4660 					    BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4661 					    UVERBS_ACCESS_NEW,
4662 					    UA_MANDATORY),
4663 			    UVERBS_ATTR_CONST_IN(BNXT_RE_TOGGLE_MEM_TYPE,
4664 						 enum bnxt_re_get_toggle_mem_type,
4665 						 UA_MANDATORY),
4666 			    UVERBS_ATTR_PTR_IN(BNXT_RE_TOGGLE_MEM_RES_ID,
4667 					       UVERBS_ATTR_TYPE(u32),
4668 					       UA_MANDATORY),
4669 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
4670 						UVERBS_ATTR_TYPE(u64),
4671 						UA_MANDATORY),
4672 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
4673 						UVERBS_ATTR_TYPE(u32),
4674 						UA_MANDATORY),
4675 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
4676 						UVERBS_ATTR_TYPE(u32),
4677 						UA_MANDATORY));
4678 
4679 DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM,
4680 				    UVERBS_ATTR_IDR(BNXT_RE_RELEASE_TOGGLE_MEM_HANDLE,
4681 						    BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4682 						    UVERBS_ACCESS_DESTROY,
4683 						    UA_MANDATORY));
4684 
4685 DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4686 			    UVERBS_TYPE_ALLOC_IDR(get_toggle_mem_obj_cleanup),
4687 			    &UVERBS_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM),
4688 			    &UVERBS_METHOD(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM));
4689 
4690 const struct uapi_definition bnxt_re_uapi_defs[] = {
4691 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE),
4692 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_NOTIFY_DRV),
4693 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_GET_TOGGLE_MEM),
4694 	{}
4695 };
4696