1 /*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter
37 */
38
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/uverbs_ioctl.h>
52
53 #include "bnxt_ulp.h"
54
55 #include "roce_hsi.h"
56 #include "qplib_res.h"
57 #include "qplib_sp.h"
58 #include "qplib_fp.h"
59 #include "qplib_rcfw.h"
60
61 #include "bnxt_re.h"
62 #include "ib_verbs.h"
63 #include <rdma/bnxt_re-abi.h>
64
__from_ib_access_flags(int iflags)65 static int __from_ib_access_flags(int iflags)
66 {
67 int qflags = 0;
68
69 if (iflags & IB_ACCESS_LOCAL_WRITE)
70 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
71 if (iflags & IB_ACCESS_REMOTE_READ)
72 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
73 if (iflags & IB_ACCESS_REMOTE_WRITE)
74 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
75 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
76 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
77 if (iflags & IB_ACCESS_MW_BIND)
78 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
79 if (iflags & IB_ZERO_BASED)
80 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
81 if (iflags & IB_ACCESS_ON_DEMAND)
82 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
83 return qflags;
84 };
85
__to_ib_access_flags(int qflags)86 static enum ib_access_flags __to_ib_access_flags(int qflags)
87 {
88 enum ib_access_flags iflags = 0;
89
90 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
91 iflags |= IB_ACCESS_LOCAL_WRITE;
92 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
93 iflags |= IB_ACCESS_REMOTE_WRITE;
94 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
95 iflags |= IB_ACCESS_REMOTE_READ;
96 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
97 iflags |= IB_ACCESS_REMOTE_ATOMIC;
98 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
99 iflags |= IB_ACCESS_MW_BIND;
100 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
101 iflags |= IB_ZERO_BASED;
102 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
103 iflags |= IB_ACCESS_ON_DEMAND;
104 return iflags;
105 };
106
bnxt_re_build_sgl(struct ib_sge * ib_sg_list,struct bnxt_qplib_sge * sg_list,int num)107 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
108 struct bnxt_qplib_sge *sg_list, int num)
109 {
110 int i, total = 0;
111
112 for (i = 0; i < num; i++) {
113 sg_list[i].addr = ib_sg_list[i].addr;
114 sg_list[i].lkey = ib_sg_list[i].lkey;
115 sg_list[i].size = ib_sg_list[i].length;
116 total += sg_list[i].size;
117 }
118 return total;
119 }
120
121 /* Device */
bnxt_re_query_device(struct ib_device * ibdev,struct ib_device_attr * ib_attr,struct ib_udata * udata)122 int bnxt_re_query_device(struct ib_device *ibdev,
123 struct ib_device_attr *ib_attr,
124 struct ib_udata *udata)
125 {
126 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
127 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
128
129 memset(ib_attr, 0, sizeof(*ib_attr));
130 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
131 min(sizeof(dev_attr->fw_ver),
132 sizeof(ib_attr->fw_ver)));
133 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
134 (u8 *)&ib_attr->sys_image_guid);
135 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
136 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
137
138 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
139 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
140 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
141 ib_attr->max_qp = dev_attr->max_qp;
142 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
143 ib_attr->device_cap_flags =
144 IB_DEVICE_CURR_QP_STATE_MOD
145 | IB_DEVICE_RC_RNR_NAK_GEN
146 | IB_DEVICE_SHUTDOWN_PORT
147 | IB_DEVICE_SYS_IMAGE_GUID
148 | IB_DEVICE_LOCAL_DMA_LKEY
149 | IB_DEVICE_RESIZE_MAX_WR
150 | IB_DEVICE_PORT_ACTIVE_EVENT
151 | IB_DEVICE_N_NOTIFY_CQ
152 | IB_DEVICE_MEM_WINDOW
153 | IB_DEVICE_MEM_WINDOW_TYPE_2B
154 | IB_DEVICE_MEM_MGT_EXTENSIONS;
155 ib_attr->max_send_sge = dev_attr->max_qp_sges;
156 ib_attr->max_recv_sge = dev_attr->max_qp_sges;
157 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
158 ib_attr->max_cq = dev_attr->max_cq;
159 ib_attr->max_cqe = dev_attr->max_cq_wqes;
160 ib_attr->max_mr = dev_attr->max_mr;
161 ib_attr->max_pd = dev_attr->max_pd;
162 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
163 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
164 ib_attr->atomic_cap = IB_ATOMIC_NONE;
165 ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
166 if (dev_attr->is_atomic) {
167 ib_attr->atomic_cap = IB_ATOMIC_GLOB;
168 ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
169 }
170
171 ib_attr->max_ee_rd_atom = 0;
172 ib_attr->max_res_rd_atom = 0;
173 ib_attr->max_ee_init_rd_atom = 0;
174 ib_attr->max_ee = 0;
175 ib_attr->max_rdd = 0;
176 ib_attr->max_mw = dev_attr->max_mw;
177 ib_attr->max_raw_ipv6_qp = 0;
178 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
179 ib_attr->max_mcast_grp = 0;
180 ib_attr->max_mcast_qp_attach = 0;
181 ib_attr->max_total_mcast_qp_attach = 0;
182 ib_attr->max_ah = dev_attr->max_ah;
183
184 ib_attr->max_srq = dev_attr->max_srq;
185 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
186 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
187
188 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
189
190 ib_attr->max_pkeys = 1;
191 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
192 return 0;
193 }
194
195 /* Port */
bnxt_re_query_port(struct ib_device * ibdev,u32 port_num,struct ib_port_attr * port_attr)196 int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
197 struct ib_port_attr *port_attr)
198 {
199 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
200 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
201
202 memset(port_attr, 0, sizeof(*port_attr));
203
204 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
205 port_attr->state = IB_PORT_ACTIVE;
206 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
207 } else {
208 port_attr->state = IB_PORT_DOWN;
209 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
210 }
211 port_attr->max_mtu = IB_MTU_4096;
212 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
213 port_attr->gid_tbl_len = dev_attr->max_sgid;
214 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
215 IB_PORT_DEVICE_MGMT_SUP |
216 IB_PORT_VENDOR_CLASS_SUP;
217 port_attr->ip_gids = true;
218
219 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
220 port_attr->bad_pkey_cntr = 0;
221 port_attr->qkey_viol_cntr = 0;
222 port_attr->pkey_tbl_len = dev_attr->max_pkey;
223 port_attr->lid = 0;
224 port_attr->sm_lid = 0;
225 port_attr->lmc = 0;
226 port_attr->max_vl_num = 4;
227 port_attr->sm_sl = 0;
228 port_attr->subnet_timeout = 0;
229 port_attr->init_type_reply = 0;
230 port_attr->active_speed = rdev->active_speed;
231 port_attr->active_width = rdev->active_width;
232
233 return 0;
234 }
235
bnxt_re_get_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)236 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
237 struct ib_port_immutable *immutable)
238 {
239 struct ib_port_attr port_attr;
240
241 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
242 return -EINVAL;
243
244 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
245 immutable->gid_tbl_len = port_attr.gid_tbl_len;
246 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
247 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
248 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
249 return 0;
250 }
251
bnxt_re_query_fw_str(struct ib_device * ibdev,char * str)252 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
253 {
254 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
255
256 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
257 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
258 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
259 }
260
bnxt_re_query_pkey(struct ib_device * ibdev,u32 port_num,u16 index,u16 * pkey)261 int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
262 u16 index, u16 *pkey)
263 {
264 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
265
266 /* Ignore port_num */
267
268 memset(pkey, 0, sizeof(*pkey));
269 return bnxt_qplib_get_pkey(&rdev->qplib_res,
270 &rdev->qplib_res.pkey_tbl, index, pkey);
271 }
272
bnxt_re_query_gid(struct ib_device * ibdev,u32 port_num,int index,union ib_gid * gid)273 int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
274 int index, union ib_gid *gid)
275 {
276 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
277 int rc = 0;
278
279 /* Ignore port_num */
280 memset(gid, 0, sizeof(*gid));
281 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
282 &rdev->qplib_res.sgid_tbl, index,
283 (struct bnxt_qplib_gid *)gid);
284 return rc;
285 }
286
bnxt_re_del_gid(const struct ib_gid_attr * attr,void ** context)287 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
288 {
289 int rc = 0;
290 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
291 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
292 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
293 struct bnxt_qplib_gid *gid_to_del;
294 u16 vlan_id = 0xFFFF;
295
296 /* Delete the entry from the hardware */
297 ctx = *context;
298 if (!ctx)
299 return -EINVAL;
300
301 if (sgid_tbl && sgid_tbl->active) {
302 if (ctx->idx >= sgid_tbl->max)
303 return -EINVAL;
304 gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
305 vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
306 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
307 * or via the ib_unregister_device path. In the former case QP1
308 * may not be destroyed yet, in which case just return as FW
309 * needs that entry to be present and will fail it's deletion.
310 * We could get invoked again after QP1 is destroyed OR get an
311 * ADD_GID call with a different GID value for the same index
312 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
313 */
314 if (ctx->idx == 0 &&
315 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
316 ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
317 ibdev_dbg(&rdev->ibdev,
318 "Trying to delete GID0 while QP1 is alive\n");
319 return -EFAULT;
320 }
321 ctx->refcnt--;
322 if (!ctx->refcnt) {
323 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
324 vlan_id, true);
325 if (rc) {
326 ibdev_err(&rdev->ibdev,
327 "Failed to remove GID: %#x", rc);
328 } else {
329 ctx_tbl = sgid_tbl->ctx;
330 ctx_tbl[ctx->idx] = NULL;
331 kfree(ctx);
332 }
333 }
334 } else {
335 return -EINVAL;
336 }
337 return rc;
338 }
339
bnxt_re_add_gid(const struct ib_gid_attr * attr,void ** context)340 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
341 {
342 int rc;
343 u32 tbl_idx = 0;
344 u16 vlan_id = 0xFFFF;
345 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
346 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
347 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
348
349 rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
350 if (rc)
351 return rc;
352
353 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
354 rdev->qplib_res.netdev->dev_addr,
355 vlan_id, true, &tbl_idx);
356 if (rc == -EALREADY) {
357 ctx_tbl = sgid_tbl->ctx;
358 ctx_tbl[tbl_idx]->refcnt++;
359 *context = ctx_tbl[tbl_idx];
360 return 0;
361 }
362
363 if (rc < 0) {
364 ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
365 return rc;
366 }
367
368 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
369 if (!ctx)
370 return -ENOMEM;
371 ctx_tbl = sgid_tbl->ctx;
372 ctx->idx = tbl_idx;
373 ctx->refcnt = 1;
374 ctx_tbl[tbl_idx] = ctx;
375 *context = ctx;
376
377 return rc;
378 }
379
bnxt_re_get_link_layer(struct ib_device * ibdev,u32 port_num)380 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
381 u32 port_num)
382 {
383 return IB_LINK_LAYER_ETHERNET;
384 }
385
386 #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
387
bnxt_re_create_fence_wqe(struct bnxt_re_pd * pd)388 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
389 {
390 struct bnxt_re_fence_data *fence = &pd->fence;
391 struct ib_mr *ib_mr = &fence->mr->ib_mr;
392 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
393
394 memset(wqe, 0, sizeof(*wqe));
395 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
396 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
397 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
398 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
399 wqe->bind.zero_based = false;
400 wqe->bind.parent_l_key = ib_mr->lkey;
401 wqe->bind.va = (u64)(unsigned long)fence->va;
402 wqe->bind.length = fence->size;
403 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
404 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
405
406 /* Save the initial rkey in fence structure for now;
407 * wqe->bind.r_key will be set at (re)bind time.
408 */
409 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
410 }
411
bnxt_re_bind_fence_mw(struct bnxt_qplib_qp * qplib_qp)412 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
413 {
414 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
415 qplib_qp);
416 struct ib_pd *ib_pd = qp->ib_qp.pd;
417 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
418 struct bnxt_re_fence_data *fence = &pd->fence;
419 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
420 struct bnxt_qplib_swqe wqe;
421 int rc;
422
423 memcpy(&wqe, fence_wqe, sizeof(wqe));
424 wqe.bind.r_key = fence->bind_rkey;
425 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
426
427 ibdev_dbg(&qp->rdev->ibdev,
428 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
429 wqe.bind.r_key, qp->qplib_qp.id, pd);
430 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
431 if (rc) {
432 ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
433 return rc;
434 }
435 bnxt_qplib_post_send_db(&qp->qplib_qp);
436
437 return rc;
438 }
439
bnxt_re_destroy_fence_mr(struct bnxt_re_pd * pd)440 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
441 {
442 struct bnxt_re_fence_data *fence = &pd->fence;
443 struct bnxt_re_dev *rdev = pd->rdev;
444 struct device *dev = &rdev->en_dev->pdev->dev;
445 struct bnxt_re_mr *mr = fence->mr;
446
447 if (fence->mw) {
448 bnxt_re_dealloc_mw(fence->mw);
449 fence->mw = NULL;
450 }
451 if (mr) {
452 if (mr->ib_mr.rkey)
453 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
454 true);
455 if (mr->ib_mr.lkey)
456 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
457 kfree(mr);
458 fence->mr = NULL;
459 }
460 if (fence->dma_addr) {
461 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
462 DMA_BIDIRECTIONAL);
463 fence->dma_addr = 0;
464 }
465 }
466
bnxt_re_create_fence_mr(struct bnxt_re_pd * pd)467 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
468 {
469 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
470 struct bnxt_re_fence_data *fence = &pd->fence;
471 struct bnxt_re_dev *rdev = pd->rdev;
472 struct device *dev = &rdev->en_dev->pdev->dev;
473 struct bnxt_re_mr *mr = NULL;
474 dma_addr_t dma_addr = 0;
475 struct ib_mw *mw;
476 int rc;
477
478 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
479 DMA_BIDIRECTIONAL);
480 rc = dma_mapping_error(dev, dma_addr);
481 if (rc) {
482 ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
483 rc = -EIO;
484 fence->dma_addr = 0;
485 goto fail;
486 }
487 fence->dma_addr = dma_addr;
488
489 /* Allocate a MR */
490 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
491 if (!mr) {
492 rc = -ENOMEM;
493 goto fail;
494 }
495 fence->mr = mr;
496 mr->rdev = rdev;
497 mr->qplib_mr.pd = &pd->qplib_pd;
498 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
499 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
500 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
501 if (rc) {
502 ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
503 goto fail;
504 }
505
506 /* Register MR */
507 mr->ib_mr.lkey = mr->qplib_mr.lkey;
508 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
509 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
510 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
511 BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
512 if (rc) {
513 ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
514 goto fail;
515 }
516 mr->ib_mr.rkey = mr->qplib_mr.rkey;
517
518 /* Create a fence MW only for kernel consumers */
519 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
520 if (IS_ERR(mw)) {
521 ibdev_err(&rdev->ibdev,
522 "Failed to create fence-MW for PD: %p\n", pd);
523 rc = PTR_ERR(mw);
524 goto fail;
525 }
526 fence->mw = mw;
527
528 bnxt_re_create_fence_wqe(pd);
529 return 0;
530
531 fail:
532 bnxt_re_destroy_fence_mr(pd);
533 return rc;
534 }
535
536 /* Protection Domains */
bnxt_re_dealloc_pd(struct ib_pd * ib_pd,struct ib_udata * udata)537 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
538 {
539 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
540 struct bnxt_re_dev *rdev = pd->rdev;
541
542 bnxt_re_destroy_fence_mr(pd);
543
544 if (pd->qplib_pd.id)
545 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
546 &pd->qplib_pd);
547 return 0;
548 }
549
bnxt_re_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)550 int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
551 {
552 struct ib_device *ibdev = ibpd->device;
553 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
554 struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
555 udata, struct bnxt_re_ucontext, ib_uctx);
556 struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
557 int rc;
558
559 pd->rdev = rdev;
560 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
561 ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
562 rc = -ENOMEM;
563 goto fail;
564 }
565
566 if (udata) {
567 struct bnxt_re_pd_resp resp;
568
569 if (!ucntx->dpi.dbr) {
570 /* Allocate DPI in alloc_pd to avoid failing of
571 * ibv_devinfo and family of application when DPIs
572 * are depleted.
573 */
574 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
575 &ucntx->dpi, ucntx)) {
576 rc = -ENOMEM;
577 goto dbfail;
578 }
579 }
580
581 resp.pdid = pd->qplib_pd.id;
582 /* Still allow mapping this DBR to the new user PD. */
583 resp.dpi = ucntx->dpi.dpi;
584 resp.dbr = (u64)ucntx->dpi.umdbr;
585
586 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
587 if (rc) {
588 ibdev_err(&rdev->ibdev,
589 "Failed to copy user response\n");
590 goto dbfail;
591 }
592 }
593
594 if (!udata)
595 if (bnxt_re_create_fence_mr(pd))
596 ibdev_warn(&rdev->ibdev,
597 "Failed to create Fence-MR\n");
598 return 0;
599 dbfail:
600 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
601 &pd->qplib_pd);
602 fail:
603 return rc;
604 }
605
606 /* Address Handles */
bnxt_re_destroy_ah(struct ib_ah * ib_ah,u32 flags)607 int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
608 {
609 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
610 struct bnxt_re_dev *rdev = ah->rdev;
611
612 bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
613 !(flags & RDMA_DESTROY_AH_SLEEPABLE));
614 return 0;
615 }
616
bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)617 static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
618 {
619 u8 nw_type;
620
621 switch (ntype) {
622 case RDMA_NETWORK_IPV4:
623 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
624 break;
625 case RDMA_NETWORK_IPV6:
626 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
627 break;
628 default:
629 nw_type = CMDQ_CREATE_AH_TYPE_V1;
630 break;
631 }
632 return nw_type;
633 }
634
bnxt_re_create_ah(struct ib_ah * ib_ah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)635 int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
636 struct ib_udata *udata)
637 {
638 struct ib_pd *ib_pd = ib_ah->pd;
639 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
640 struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
641 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
642 struct bnxt_re_dev *rdev = pd->rdev;
643 const struct ib_gid_attr *sgid_attr;
644 struct bnxt_re_gid_ctx *ctx;
645 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
646 u8 nw_type;
647 int rc;
648
649 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
650 ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
651 return -EINVAL;
652 }
653
654 ah->rdev = rdev;
655 ah->qplib_ah.pd = &pd->qplib_pd;
656
657 /* Supply the configuration for the HW */
658 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
659 sizeof(union ib_gid));
660 sgid_attr = grh->sgid_attr;
661 /* Get the HW context of the GID. The reference
662 * of GID table entry is already taken by the caller.
663 */
664 ctx = rdma_read_gid_hw_context(sgid_attr);
665 ah->qplib_ah.sgid_index = ctx->idx;
666 ah->qplib_ah.host_sgid_index = grh->sgid_index;
667 ah->qplib_ah.traffic_class = grh->traffic_class;
668 ah->qplib_ah.flow_label = grh->flow_label;
669 ah->qplib_ah.hop_limit = grh->hop_limit;
670 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
671
672 /* Get network header type for this GID */
673 nw_type = rdma_gid_attr_network_type(sgid_attr);
674 ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
675
676 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
677 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
678 !(init_attr->flags &
679 RDMA_CREATE_AH_SLEEPABLE));
680 if (rc) {
681 ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
682 return rc;
683 }
684
685 /* Write AVID to shared page. */
686 if (udata) {
687 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
688 udata, struct bnxt_re_ucontext, ib_uctx);
689 unsigned long flag;
690 u32 *wrptr;
691
692 spin_lock_irqsave(&uctx->sh_lock, flag);
693 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
694 *wrptr = ah->qplib_ah.id;
695 wmb(); /* make sure cache is updated. */
696 spin_unlock_irqrestore(&uctx->sh_lock, flag);
697 }
698
699 return 0;
700 }
701
bnxt_re_modify_ah(struct ib_ah * ib_ah,struct rdma_ah_attr * ah_attr)702 int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
703 {
704 return 0;
705 }
706
bnxt_re_query_ah(struct ib_ah * ib_ah,struct rdma_ah_attr * ah_attr)707 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
708 {
709 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
710
711 ah_attr->type = ib_ah->type;
712 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
713 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
714 rdma_ah_set_grh(ah_attr, NULL, 0,
715 ah->qplib_ah.host_sgid_index,
716 0, ah->qplib_ah.traffic_class);
717 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
718 rdma_ah_set_port_num(ah_attr, 1);
719 rdma_ah_set_static_rate(ah_attr, 0);
720 return 0;
721 }
722
bnxt_re_lock_cqs(struct bnxt_re_qp * qp)723 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
724 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
725 {
726 unsigned long flags;
727
728 spin_lock_irqsave(&qp->scq->cq_lock, flags);
729 if (qp->rcq != qp->scq)
730 spin_lock(&qp->rcq->cq_lock);
731 else
732 __acquire(&qp->rcq->cq_lock);
733
734 return flags;
735 }
736
bnxt_re_unlock_cqs(struct bnxt_re_qp * qp,unsigned long flags)737 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
738 unsigned long flags)
739 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
740 {
741 if (qp->rcq != qp->scq)
742 spin_unlock(&qp->rcq->cq_lock);
743 else
744 __release(&qp->rcq->cq_lock);
745 spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
746 }
747
bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp * qp)748 static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
749 {
750 struct bnxt_re_qp *gsi_sqp;
751 struct bnxt_re_ah *gsi_sah;
752 struct bnxt_re_dev *rdev;
753 int rc = 0;
754
755 rdev = qp->rdev;
756 gsi_sqp = rdev->gsi_ctx.gsi_sqp;
757 gsi_sah = rdev->gsi_ctx.gsi_sah;
758
759 ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
760 bnxt_qplib_destroy_ah(&rdev->qplib_res,
761 &gsi_sah->qplib_ah,
762 true);
763 bnxt_qplib_clean_qp(&qp->qplib_qp);
764
765 ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
766 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
767 if (rc) {
768 ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
769 goto fail;
770 }
771 bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
772
773 /* remove from active qp list */
774 mutex_lock(&rdev->qp_lock);
775 list_del(&gsi_sqp->list);
776 mutex_unlock(&rdev->qp_lock);
777 atomic_dec(&rdev->qp_count);
778
779 kfree(rdev->gsi_ctx.sqp_tbl);
780 kfree(gsi_sah);
781 kfree(gsi_sqp);
782 rdev->gsi_ctx.gsi_sqp = NULL;
783 rdev->gsi_ctx.gsi_sah = NULL;
784 rdev->gsi_ctx.sqp_tbl = NULL;
785
786 return 0;
787 fail:
788 return rc;
789 }
790
791 /* Queue Pairs */
bnxt_re_destroy_qp(struct ib_qp * ib_qp,struct ib_udata * udata)792 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
793 {
794 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
795 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
796 struct bnxt_re_dev *rdev = qp->rdev;
797 struct bnxt_qplib_nq *scq_nq = NULL;
798 struct bnxt_qplib_nq *rcq_nq = NULL;
799 unsigned int flags;
800 int rc;
801
802 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
803
804 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
805 if (rc) {
806 ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
807 return rc;
808 }
809
810 if (rdma_is_kernel_res(&qp->ib_qp.res)) {
811 flags = bnxt_re_lock_cqs(qp);
812 bnxt_qplib_clean_qp(&qp->qplib_qp);
813 bnxt_re_unlock_cqs(qp, flags);
814 }
815
816 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
817
818 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
819 rc = bnxt_re_destroy_gsi_sqp(qp);
820 if (rc)
821 return rc;
822 }
823
824 mutex_lock(&rdev->qp_lock);
825 list_del(&qp->list);
826 mutex_unlock(&rdev->qp_lock);
827 atomic_dec(&rdev->qp_count);
828
829 ib_umem_release(qp->rumem);
830 ib_umem_release(qp->sumem);
831
832 /* Flush all the entries of notification queue associated with
833 * given qp.
834 */
835 scq_nq = qplib_qp->scq->nq;
836 rcq_nq = qplib_qp->rcq->nq;
837 bnxt_re_synchronize_nq(scq_nq);
838 if (scq_nq != rcq_nq)
839 bnxt_re_synchronize_nq(rcq_nq);
840
841 return 0;
842 }
843
__from_ib_qp_type(enum ib_qp_type type)844 static u8 __from_ib_qp_type(enum ib_qp_type type)
845 {
846 switch (type) {
847 case IB_QPT_GSI:
848 return CMDQ_CREATE_QP1_TYPE_GSI;
849 case IB_QPT_RC:
850 return CMDQ_CREATE_QP_TYPE_RC;
851 case IB_QPT_UD:
852 return CMDQ_CREATE_QP_TYPE_UD;
853 default:
854 return IB_QPT_MAX;
855 }
856 }
857
bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp * qplqp,int rsge,int max)858 static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp,
859 int rsge, int max)
860 {
861 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
862 rsge = max;
863 return bnxt_re_get_rwqe_size(rsge);
864 }
865
bnxt_re_get_wqe_size(int ilsize,int nsge)866 static u16 bnxt_re_get_wqe_size(int ilsize, int nsge)
867 {
868 u16 wqe_size, calc_ils;
869
870 wqe_size = bnxt_re_get_swqe_size(nsge);
871 if (ilsize) {
872 calc_ils = sizeof(struct sq_send_hdr) + ilsize;
873 wqe_size = max_t(u16, calc_ils, wqe_size);
874 wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr));
875 }
876 return wqe_size;
877 }
878
bnxt_re_setup_swqe_size(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr)879 static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
880 struct ib_qp_init_attr *init_attr)
881 {
882 struct bnxt_qplib_dev_attr *dev_attr;
883 struct bnxt_qplib_qp *qplqp;
884 struct bnxt_re_dev *rdev;
885 struct bnxt_qplib_q *sq;
886 int align, ilsize;
887
888 rdev = qp->rdev;
889 qplqp = &qp->qplib_qp;
890 sq = &qplqp->sq;
891 dev_attr = &rdev->dev_attr;
892
893 align = sizeof(struct sq_send_hdr);
894 ilsize = ALIGN(init_attr->cap.max_inline_data, align);
895
896 sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
897 if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
898 return -EINVAL;
899 /* For gen p4 and gen p5 backward compatibility mode
900 * wqe size is fixed to 128 bytes
901 */
902 if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) &&
903 qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
904 sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges);
905
906 if (init_attr->cap.max_inline_data) {
907 qplqp->max_inline_data = sq->wqe_size -
908 sizeof(struct sq_send_hdr);
909 init_attr->cap.max_inline_data = qplqp->max_inline_data;
910 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
911 sq->max_sge = qplqp->max_inline_data /
912 sizeof(struct sq_sge);
913 }
914
915 return 0;
916 }
917
bnxt_re_init_user_qp(struct bnxt_re_dev * rdev,struct bnxt_re_pd * pd,struct bnxt_re_qp * qp,struct ib_udata * udata)918 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
919 struct bnxt_re_qp *qp, struct ib_udata *udata)
920 {
921 struct bnxt_qplib_qp *qplib_qp;
922 struct bnxt_re_ucontext *cntx;
923 struct bnxt_re_qp_req ureq;
924 int bytes = 0, psn_sz;
925 struct ib_umem *umem;
926 int psn_nume;
927
928 qplib_qp = &qp->qplib_qp;
929 cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
930 ib_uctx);
931 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
932 return -EFAULT;
933
934 bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
935 /* Consider mapping PSN search memory only for RC QPs. */
936 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
937 psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
938 sizeof(struct sq_psn_search_ext) :
939 sizeof(struct sq_psn_search);
940 psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
941 qplib_qp->sq.max_wqe :
942 ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
943 sizeof(struct bnxt_qplib_sge));
944 bytes += (psn_nume * psn_sz);
945 }
946
947 bytes = PAGE_ALIGN(bytes);
948 umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
949 IB_ACCESS_LOCAL_WRITE);
950 if (IS_ERR(umem))
951 return PTR_ERR(umem);
952
953 qp->sumem = umem;
954 qplib_qp->sq.sg_info.umem = umem;
955 qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
956 qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
957 qplib_qp->qp_handle = ureq.qp_handle;
958
959 if (!qp->qplib_qp.srq) {
960 bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
961 bytes = PAGE_ALIGN(bytes);
962 umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
963 IB_ACCESS_LOCAL_WRITE);
964 if (IS_ERR(umem))
965 goto rqfail;
966 qp->rumem = umem;
967 qplib_qp->rq.sg_info.umem = umem;
968 qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
969 qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
970 }
971
972 qplib_qp->dpi = &cntx->dpi;
973 return 0;
974 rqfail:
975 ib_umem_release(qp->sumem);
976 qp->sumem = NULL;
977 memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
978
979 return PTR_ERR(umem);
980 }
981
bnxt_re_create_shadow_qp_ah(struct bnxt_re_pd * pd,struct bnxt_qplib_res * qp1_res,struct bnxt_qplib_qp * qp1_qp)982 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
983 (struct bnxt_re_pd *pd,
984 struct bnxt_qplib_res *qp1_res,
985 struct bnxt_qplib_qp *qp1_qp)
986 {
987 struct bnxt_re_dev *rdev = pd->rdev;
988 struct bnxt_re_ah *ah;
989 union ib_gid sgid;
990 int rc;
991
992 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
993 if (!ah)
994 return NULL;
995
996 ah->rdev = rdev;
997 ah->qplib_ah.pd = &pd->qplib_pd;
998
999 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
1000 if (rc)
1001 goto fail;
1002
1003 /* supply the dgid data same as sgid */
1004 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
1005 sizeof(union ib_gid));
1006 ah->qplib_ah.sgid_index = 0;
1007
1008 ah->qplib_ah.traffic_class = 0;
1009 ah->qplib_ah.flow_label = 0;
1010 ah->qplib_ah.hop_limit = 1;
1011 ah->qplib_ah.sl = 0;
1012 /* Have DMAC same as SMAC */
1013 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
1014
1015 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
1016 if (rc) {
1017 ibdev_err(&rdev->ibdev,
1018 "Failed to allocate HW AH for Shadow QP");
1019 goto fail;
1020 }
1021
1022 return ah;
1023
1024 fail:
1025 kfree(ah);
1026 return NULL;
1027 }
1028
bnxt_re_create_shadow_qp(struct bnxt_re_pd * pd,struct bnxt_qplib_res * qp1_res,struct bnxt_qplib_qp * qp1_qp)1029 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
1030 (struct bnxt_re_pd *pd,
1031 struct bnxt_qplib_res *qp1_res,
1032 struct bnxt_qplib_qp *qp1_qp)
1033 {
1034 struct bnxt_re_dev *rdev = pd->rdev;
1035 struct bnxt_re_qp *qp;
1036 int rc;
1037
1038 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1039 if (!qp)
1040 return NULL;
1041
1042 qp->rdev = rdev;
1043
1044 /* Initialize the shadow QP structure from the QP1 values */
1045 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1046
1047 qp->qplib_qp.pd = &pd->qplib_pd;
1048 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1049 qp->qplib_qp.type = IB_QPT_UD;
1050
1051 qp->qplib_qp.max_inline_data = 0;
1052 qp->qplib_qp.sig_type = true;
1053
1054 /* Shadow QP SQ depth should be same as QP1 RQ depth */
1055 qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
1056 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1057 qp->qplib_qp.sq.max_sge = 2;
1058 /* Q full delta can be 1 since it is internal QP */
1059 qp->qplib_qp.sq.q_full_delta = 1;
1060 qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
1061 qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
1062
1063 qp->qplib_qp.scq = qp1_qp->scq;
1064 qp->qplib_qp.rcq = qp1_qp->rcq;
1065
1066 qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
1067 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1068 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1069 /* Q full delta can be 1 since it is internal QP */
1070 qp->qplib_qp.rq.q_full_delta = 1;
1071 qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
1072 qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
1073
1074 qp->qplib_qp.mtu = qp1_qp->mtu;
1075
1076 qp->qplib_qp.sq_hdr_buf_size = 0;
1077 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1078 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1079
1080 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1081 if (rc)
1082 goto fail;
1083
1084 spin_lock_init(&qp->sq_lock);
1085 INIT_LIST_HEAD(&qp->list);
1086 mutex_lock(&rdev->qp_lock);
1087 list_add_tail(&qp->list, &rdev->qp_list);
1088 atomic_inc(&rdev->qp_count);
1089 mutex_unlock(&rdev->qp_lock);
1090 return qp;
1091 fail:
1092 kfree(qp);
1093 return NULL;
1094 }
1095
bnxt_re_init_rq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr)1096 static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1097 struct ib_qp_init_attr *init_attr)
1098 {
1099 struct bnxt_qplib_dev_attr *dev_attr;
1100 struct bnxt_qplib_qp *qplqp;
1101 struct bnxt_re_dev *rdev;
1102 struct bnxt_qplib_q *rq;
1103 int entries;
1104
1105 rdev = qp->rdev;
1106 qplqp = &qp->qplib_qp;
1107 rq = &qplqp->rq;
1108 dev_attr = &rdev->dev_attr;
1109
1110 if (init_attr->srq) {
1111 struct bnxt_re_srq *srq;
1112
1113 srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
1114 qplqp->srq = &srq->qplib_srq;
1115 rq->max_wqe = 0;
1116 } else {
1117 rq->max_sge = init_attr->cap.max_recv_sge;
1118 if (rq->max_sge > dev_attr->max_qp_sges)
1119 rq->max_sge = dev_attr->max_qp_sges;
1120 init_attr->cap.max_recv_sge = rq->max_sge;
1121 rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge,
1122 dev_attr->max_qp_sges);
1123 /* Allocate 1 more than what's provided so posting max doesn't
1124 * mean empty.
1125 */
1126 entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
1127 rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1128 rq->q_full_delta = 0;
1129 rq->sg_info.pgsize = PAGE_SIZE;
1130 rq->sg_info.pgshft = PAGE_SHIFT;
1131 }
1132
1133 return 0;
1134 }
1135
bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp * qp)1136 static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1137 {
1138 struct bnxt_qplib_dev_attr *dev_attr;
1139 struct bnxt_qplib_qp *qplqp;
1140 struct bnxt_re_dev *rdev;
1141
1142 rdev = qp->rdev;
1143 qplqp = &qp->qplib_qp;
1144 dev_attr = &rdev->dev_attr;
1145
1146 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
1147 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1148 if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
1149 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1150 qplqp->rq.max_sge = 6;
1151 }
1152 }
1153
bnxt_re_init_sq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)1154 static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1155 struct ib_qp_init_attr *init_attr,
1156 struct ib_udata *udata)
1157 {
1158 struct bnxt_qplib_dev_attr *dev_attr;
1159 struct bnxt_qplib_qp *qplqp;
1160 struct bnxt_re_dev *rdev;
1161 struct bnxt_qplib_q *sq;
1162 int entries;
1163 int diff;
1164 int rc;
1165
1166 rdev = qp->rdev;
1167 qplqp = &qp->qplib_qp;
1168 sq = &qplqp->sq;
1169 dev_attr = &rdev->dev_attr;
1170
1171 sq->max_sge = init_attr->cap.max_send_sge;
1172 if (sq->max_sge > dev_attr->max_qp_sges) {
1173 sq->max_sge = dev_attr->max_qp_sges;
1174 init_attr->cap.max_send_sge = sq->max_sge;
1175 }
1176
1177 rc = bnxt_re_setup_swqe_size(qp, init_attr);
1178 if (rc)
1179 return rc;
1180
1181 entries = init_attr->cap.max_send_wr;
1182 /* Allocate 128 + 1 more than what's provided */
1183 diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
1184 0 : BNXT_QPLIB_RESERVED_QP_WRS;
1185 entries = roundup_pow_of_two(entries + diff + 1);
1186 sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
1187 sq->q_full_delta = diff + 1;
1188 /*
1189 * Reserving one slot for Phantom WQE. Application can
1190 * post one extra entry in this case. But allowing this to avoid
1191 * unexpected Queue full condition
1192 */
1193 qplqp->sq.q_full_delta -= 1;
1194 qplqp->sq.sg_info.pgsize = PAGE_SIZE;
1195 qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
1196
1197 return 0;
1198 }
1199
bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr)1200 static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
1201 struct ib_qp_init_attr *init_attr)
1202 {
1203 struct bnxt_qplib_dev_attr *dev_attr;
1204 struct bnxt_qplib_qp *qplqp;
1205 struct bnxt_re_dev *rdev;
1206 int entries;
1207
1208 rdev = qp->rdev;
1209 qplqp = &qp->qplib_qp;
1210 dev_attr = &rdev->dev_attr;
1211
1212 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
1213 entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1);
1214 qplqp->sq.max_wqe = min_t(u32, entries,
1215 dev_attr->max_qp_wqes + 1);
1216 qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
1217 init_attr->cap.max_send_wr;
1218 qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
1219 if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
1220 qplqp->sq.max_sge = dev_attr->max_qp_sges;
1221 }
1222 }
1223
bnxt_re_init_qp_type(struct bnxt_re_dev * rdev,struct ib_qp_init_attr * init_attr)1224 static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
1225 struct ib_qp_init_attr *init_attr)
1226 {
1227 struct bnxt_qplib_chip_ctx *chip_ctx;
1228 int qptype;
1229
1230 chip_ctx = rdev->chip_ctx;
1231
1232 qptype = __from_ib_qp_type(init_attr->qp_type);
1233 if (qptype == IB_QPT_MAX) {
1234 ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
1235 qptype = -EOPNOTSUPP;
1236 goto out;
1237 }
1238
1239 if (bnxt_qplib_is_chip_gen_p5(chip_ctx) &&
1240 init_attr->qp_type == IB_QPT_GSI)
1241 qptype = CMDQ_CREATE_QP_TYPE_GSI;
1242 out:
1243 return qptype;
1244 }
1245
bnxt_re_init_qp_attr(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)1246 static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1247 struct ib_qp_init_attr *init_attr,
1248 struct ib_udata *udata)
1249 {
1250 struct bnxt_qplib_dev_attr *dev_attr;
1251 struct bnxt_qplib_qp *qplqp;
1252 struct bnxt_re_dev *rdev;
1253 struct bnxt_re_cq *cq;
1254 int rc = 0, qptype;
1255
1256 rdev = qp->rdev;
1257 qplqp = &qp->qplib_qp;
1258 dev_attr = &rdev->dev_attr;
1259
1260 /* Setup misc params */
1261 ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
1262 qplqp->pd = &pd->qplib_pd;
1263 qplqp->qp_handle = (u64)qplqp;
1264 qplqp->max_inline_data = init_attr->cap.max_inline_data;
1265 qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
1266 true : false);
1267 qptype = bnxt_re_init_qp_type(rdev, init_attr);
1268 if (qptype < 0) {
1269 rc = qptype;
1270 goto out;
1271 }
1272 qplqp->type = (u8)qptype;
1273 qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode;
1274
1275 if (init_attr->qp_type == IB_QPT_RC) {
1276 qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
1277 qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1278 }
1279 qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1280 qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
1281 if (init_attr->create_flags) {
1282 ibdev_dbg(&rdev->ibdev,
1283 "QP create flags 0x%x not supported",
1284 init_attr->create_flags);
1285 return -EOPNOTSUPP;
1286 }
1287
1288 /* Setup CQs */
1289 if (init_attr->send_cq) {
1290 cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
1291 qplqp->scq = &cq->qplib_cq;
1292 qp->scq = cq;
1293 }
1294
1295 if (init_attr->recv_cq) {
1296 cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
1297 qplqp->rcq = &cq->qplib_cq;
1298 qp->rcq = cq;
1299 }
1300
1301 /* Setup RQ/SRQ */
1302 rc = bnxt_re_init_rq_attr(qp, init_attr);
1303 if (rc)
1304 goto out;
1305 if (init_attr->qp_type == IB_QPT_GSI)
1306 bnxt_re_adjust_gsi_rq_attr(qp);
1307
1308 /* Setup SQ */
1309 rc = bnxt_re_init_sq_attr(qp, init_attr, udata);
1310 if (rc)
1311 goto out;
1312 if (init_attr->qp_type == IB_QPT_GSI)
1313 bnxt_re_adjust_gsi_sq_attr(qp, init_attr);
1314
1315 if (udata) /* This will update DPI and qp_handle */
1316 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1317 out:
1318 return rc;
1319 }
1320
bnxt_re_create_shadow_gsi(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd)1321 static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
1322 struct bnxt_re_pd *pd)
1323 {
1324 struct bnxt_re_sqp_entries *sqp_tbl;
1325 struct bnxt_re_dev *rdev;
1326 struct bnxt_re_qp *sqp;
1327 struct bnxt_re_ah *sah;
1328 int rc = 0;
1329
1330 rdev = qp->rdev;
1331 /* Create a shadow QP to handle the QP1 traffic */
1332 sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl),
1333 GFP_KERNEL);
1334 if (!sqp_tbl)
1335 return -ENOMEM;
1336 rdev->gsi_ctx.sqp_tbl = sqp_tbl;
1337
1338 sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
1339 if (!sqp) {
1340 rc = -ENODEV;
1341 ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
1342 goto out;
1343 }
1344 rdev->gsi_ctx.gsi_sqp = sqp;
1345
1346 sqp->rcq = qp->rcq;
1347 sqp->scq = qp->scq;
1348 sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1349 &qp->qplib_qp);
1350 if (!sah) {
1351 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1352 &sqp->qplib_qp);
1353 rc = -ENODEV;
1354 ibdev_err(&rdev->ibdev,
1355 "Failed to create AH entry for ShadowQP");
1356 goto out;
1357 }
1358 rdev->gsi_ctx.gsi_sah = sah;
1359
1360 return 0;
1361 out:
1362 kfree(sqp_tbl);
1363 return rc;
1364 }
1365
bnxt_re_create_gsi_qp(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd,struct ib_qp_init_attr * init_attr)1366 static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1367 struct ib_qp_init_attr *init_attr)
1368 {
1369 struct bnxt_re_dev *rdev;
1370 struct bnxt_qplib_qp *qplqp;
1371 int rc = 0;
1372
1373 rdev = qp->rdev;
1374 qplqp = &qp->qplib_qp;
1375
1376 qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1377 qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1378
1379 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
1380 if (rc) {
1381 ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
1382 goto out;
1383 }
1384
1385 rc = bnxt_re_create_shadow_gsi(qp, pd);
1386 out:
1387 return rc;
1388 }
1389
bnxt_re_test_qp_limits(struct bnxt_re_dev * rdev,struct ib_qp_init_attr * init_attr,struct bnxt_qplib_dev_attr * dev_attr)1390 static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
1391 struct ib_qp_init_attr *init_attr,
1392 struct bnxt_qplib_dev_attr *dev_attr)
1393 {
1394 bool rc = true;
1395
1396 if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
1397 init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
1398 init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
1399 init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
1400 init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
1401 ibdev_err(&rdev->ibdev,
1402 "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
1403 init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
1404 init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
1405 init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
1406 init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
1407 init_attr->cap.max_inline_data,
1408 dev_attr->max_inline_data);
1409 rc = false;
1410 }
1411 return rc;
1412 }
1413
bnxt_re_create_qp(struct ib_qp * ib_qp,struct ib_qp_init_attr * qp_init_attr,struct ib_udata * udata)1414 int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
1415 struct ib_udata *udata)
1416 {
1417 struct ib_pd *ib_pd = ib_qp->pd;
1418 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1419 struct bnxt_re_dev *rdev = pd->rdev;
1420 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1421 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1422 int rc;
1423
1424 rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
1425 if (!rc) {
1426 rc = -EINVAL;
1427 goto fail;
1428 }
1429
1430 qp->rdev = rdev;
1431 rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
1432 if (rc)
1433 goto fail;
1434
1435 if (qp_init_attr->qp_type == IB_QPT_GSI &&
1436 !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) {
1437 rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
1438 if (rc == -ENODEV)
1439 goto qp_destroy;
1440 if (rc)
1441 goto fail;
1442 } else {
1443 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1444 if (rc) {
1445 ibdev_err(&rdev->ibdev, "Failed to create HW QP");
1446 goto free_umem;
1447 }
1448 if (udata) {
1449 struct bnxt_re_qp_resp resp;
1450
1451 resp.qpid = qp->qplib_qp.id;
1452 resp.rsvd = 0;
1453 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1454 if (rc) {
1455 ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
1456 goto qp_destroy;
1457 }
1458 }
1459 }
1460
1461 qp->ib_qp.qp_num = qp->qplib_qp.id;
1462 if (qp_init_attr->qp_type == IB_QPT_GSI)
1463 rdev->gsi_ctx.gsi_qp = qp;
1464 spin_lock_init(&qp->sq_lock);
1465 spin_lock_init(&qp->rq_lock);
1466 INIT_LIST_HEAD(&qp->list);
1467 mutex_lock(&rdev->qp_lock);
1468 list_add_tail(&qp->list, &rdev->qp_list);
1469 mutex_unlock(&rdev->qp_lock);
1470 atomic_inc(&rdev->qp_count);
1471
1472 return 0;
1473 qp_destroy:
1474 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1475 free_umem:
1476 ib_umem_release(qp->rumem);
1477 ib_umem_release(qp->sumem);
1478 fail:
1479 return rc;
1480 }
1481
__from_ib_qp_state(enum ib_qp_state state)1482 static u8 __from_ib_qp_state(enum ib_qp_state state)
1483 {
1484 switch (state) {
1485 case IB_QPS_RESET:
1486 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1487 case IB_QPS_INIT:
1488 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1489 case IB_QPS_RTR:
1490 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1491 case IB_QPS_RTS:
1492 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1493 case IB_QPS_SQD:
1494 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1495 case IB_QPS_SQE:
1496 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1497 case IB_QPS_ERR:
1498 default:
1499 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1500 }
1501 }
1502
__to_ib_qp_state(u8 state)1503 static enum ib_qp_state __to_ib_qp_state(u8 state)
1504 {
1505 switch (state) {
1506 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1507 return IB_QPS_RESET;
1508 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1509 return IB_QPS_INIT;
1510 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1511 return IB_QPS_RTR;
1512 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1513 return IB_QPS_RTS;
1514 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1515 return IB_QPS_SQD;
1516 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1517 return IB_QPS_SQE;
1518 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1519 default:
1520 return IB_QPS_ERR;
1521 }
1522 }
1523
__from_ib_mtu(enum ib_mtu mtu)1524 static u32 __from_ib_mtu(enum ib_mtu mtu)
1525 {
1526 switch (mtu) {
1527 case IB_MTU_256:
1528 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1529 case IB_MTU_512:
1530 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1531 case IB_MTU_1024:
1532 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1533 case IB_MTU_2048:
1534 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1535 case IB_MTU_4096:
1536 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1537 default:
1538 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1539 }
1540 }
1541
__to_ib_mtu(u32 mtu)1542 static enum ib_mtu __to_ib_mtu(u32 mtu)
1543 {
1544 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1545 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1546 return IB_MTU_256;
1547 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1548 return IB_MTU_512;
1549 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1550 return IB_MTU_1024;
1551 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1552 return IB_MTU_2048;
1553 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1554 return IB_MTU_4096;
1555 default:
1556 return IB_MTU_2048;
1557 }
1558 }
1559
1560 /* Shared Receive Queues */
bnxt_re_destroy_srq(struct ib_srq * ib_srq,struct ib_udata * udata)1561 int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1562 {
1563 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1564 ib_srq);
1565 struct bnxt_re_dev *rdev = srq->rdev;
1566 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1567 struct bnxt_qplib_nq *nq = NULL;
1568
1569 if (qplib_srq->cq)
1570 nq = qplib_srq->cq->nq;
1571 bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1572 ib_umem_release(srq->umem);
1573 atomic_dec(&rdev->srq_count);
1574 if (nq)
1575 nq->budget--;
1576 return 0;
1577 }
1578
bnxt_re_init_user_srq(struct bnxt_re_dev * rdev,struct bnxt_re_pd * pd,struct bnxt_re_srq * srq,struct ib_udata * udata)1579 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1580 struct bnxt_re_pd *pd,
1581 struct bnxt_re_srq *srq,
1582 struct ib_udata *udata)
1583 {
1584 struct bnxt_re_srq_req ureq;
1585 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1586 struct ib_umem *umem;
1587 int bytes = 0;
1588 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1589 udata, struct bnxt_re_ucontext, ib_uctx);
1590
1591 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1592 return -EFAULT;
1593
1594 bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
1595 bytes = PAGE_ALIGN(bytes);
1596 umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
1597 IB_ACCESS_LOCAL_WRITE);
1598 if (IS_ERR(umem))
1599 return PTR_ERR(umem);
1600
1601 srq->umem = umem;
1602 qplib_srq->sg_info.umem = umem;
1603 qplib_srq->sg_info.pgsize = PAGE_SIZE;
1604 qplib_srq->sg_info.pgshft = PAGE_SHIFT;
1605 qplib_srq->srq_handle = ureq.srq_handle;
1606 qplib_srq->dpi = &cntx->dpi;
1607
1608 return 0;
1609 }
1610
bnxt_re_create_srq(struct ib_srq * ib_srq,struct ib_srq_init_attr * srq_init_attr,struct ib_udata * udata)1611 int bnxt_re_create_srq(struct ib_srq *ib_srq,
1612 struct ib_srq_init_attr *srq_init_attr,
1613 struct ib_udata *udata)
1614 {
1615 struct bnxt_qplib_dev_attr *dev_attr;
1616 struct bnxt_qplib_nq *nq = NULL;
1617 struct bnxt_re_dev *rdev;
1618 struct bnxt_re_srq *srq;
1619 struct bnxt_re_pd *pd;
1620 struct ib_pd *ib_pd;
1621 int rc, entries;
1622
1623 ib_pd = ib_srq->pd;
1624 pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1625 rdev = pd->rdev;
1626 dev_attr = &rdev->dev_attr;
1627 srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1628
1629 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1630 ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
1631 rc = -EINVAL;
1632 goto exit;
1633 }
1634
1635 if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1636 rc = -EOPNOTSUPP;
1637 goto exit;
1638 }
1639
1640 srq->rdev = rdev;
1641 srq->qplib_srq.pd = &pd->qplib_pd;
1642 srq->qplib_srq.dpi = &rdev->dpi_privileged;
1643 /* Allocate 1 more than what's provided so posting max doesn't
1644 * mean empty
1645 */
1646 entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1647 if (entries > dev_attr->max_srq_wqes + 1)
1648 entries = dev_attr->max_srq_wqes + 1;
1649 srq->qplib_srq.max_wqe = entries;
1650
1651 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1652 /* 128 byte wqe size for SRQ . So use max sges */
1653 srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
1654 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1655 srq->srq_limit = srq_init_attr->attr.srq_limit;
1656 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1657 nq = &rdev->nq[0];
1658
1659 if (udata) {
1660 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1661 if (rc)
1662 goto fail;
1663 }
1664
1665 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1666 if (rc) {
1667 ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
1668 goto fail;
1669 }
1670
1671 if (udata) {
1672 struct bnxt_re_srq_resp resp;
1673
1674 resp.srqid = srq->qplib_srq.id;
1675 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1676 if (rc) {
1677 ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
1678 bnxt_qplib_destroy_srq(&rdev->qplib_res,
1679 &srq->qplib_srq);
1680 goto fail;
1681 }
1682 }
1683 if (nq)
1684 nq->budget++;
1685 atomic_inc(&rdev->srq_count);
1686 spin_lock_init(&srq->lock);
1687
1688 return 0;
1689
1690 fail:
1691 ib_umem_release(srq->umem);
1692 exit:
1693 return rc;
1694 }
1695
bnxt_re_modify_srq(struct ib_srq * ib_srq,struct ib_srq_attr * srq_attr,enum ib_srq_attr_mask srq_attr_mask,struct ib_udata * udata)1696 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1697 enum ib_srq_attr_mask srq_attr_mask,
1698 struct ib_udata *udata)
1699 {
1700 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1701 ib_srq);
1702 struct bnxt_re_dev *rdev = srq->rdev;
1703 int rc;
1704
1705 switch (srq_attr_mask) {
1706 case IB_SRQ_MAX_WR:
1707 /* SRQ resize is not supported */
1708 break;
1709 case IB_SRQ_LIMIT:
1710 /* Change the SRQ threshold */
1711 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1712 return -EINVAL;
1713
1714 srq->qplib_srq.threshold = srq_attr->srq_limit;
1715 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1716 if (rc) {
1717 ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
1718 return rc;
1719 }
1720 /* On success, update the shadow */
1721 srq->srq_limit = srq_attr->srq_limit;
1722 /* No need to Build and send response back to udata */
1723 break;
1724 default:
1725 ibdev_err(&rdev->ibdev,
1726 "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1727 return -EINVAL;
1728 }
1729 return 0;
1730 }
1731
bnxt_re_query_srq(struct ib_srq * ib_srq,struct ib_srq_attr * srq_attr)1732 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1733 {
1734 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1735 ib_srq);
1736 struct bnxt_re_srq tsrq;
1737 struct bnxt_re_dev *rdev = srq->rdev;
1738 int rc;
1739
1740 /* Get live SRQ attr */
1741 tsrq.qplib_srq.id = srq->qplib_srq.id;
1742 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1743 if (rc) {
1744 ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
1745 return rc;
1746 }
1747 srq_attr->max_wr = srq->qplib_srq.max_wqe;
1748 srq_attr->max_sge = srq->qplib_srq.max_sge;
1749 srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1750
1751 return 0;
1752 }
1753
bnxt_re_post_srq_recv(struct ib_srq * ib_srq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1754 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1755 const struct ib_recv_wr **bad_wr)
1756 {
1757 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1758 ib_srq);
1759 struct bnxt_qplib_swqe wqe;
1760 unsigned long flags;
1761 int rc = 0;
1762
1763 spin_lock_irqsave(&srq->lock, flags);
1764 while (wr) {
1765 /* Transcribe each ib_recv_wr to qplib_swqe */
1766 wqe.num_sge = wr->num_sge;
1767 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1768 wqe.wr_id = wr->wr_id;
1769 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1770
1771 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1772 if (rc) {
1773 *bad_wr = wr;
1774 break;
1775 }
1776 wr = wr->next;
1777 }
1778 spin_unlock_irqrestore(&srq->lock, flags);
1779
1780 return rc;
1781 }
bnxt_re_modify_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp1_qp,int qp_attr_mask)1782 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1783 struct bnxt_re_qp *qp1_qp,
1784 int qp_attr_mask)
1785 {
1786 struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
1787 int rc = 0;
1788
1789 if (qp_attr_mask & IB_QP_STATE) {
1790 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1791 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1792 }
1793 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1794 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1795 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1796 }
1797
1798 if (qp_attr_mask & IB_QP_QKEY) {
1799 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1800 /* Using a Random QKEY */
1801 qp->qplib_qp.qkey = 0x81818181;
1802 }
1803 if (qp_attr_mask & IB_QP_SQ_PSN) {
1804 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1805 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1806 }
1807
1808 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1809 if (rc)
1810 ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
1811 return rc;
1812 }
1813
bnxt_re_modify_qp(struct ib_qp * ib_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_udata * udata)1814 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1815 int qp_attr_mask, struct ib_udata *udata)
1816 {
1817 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1818 struct bnxt_re_dev *rdev = qp->rdev;
1819 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1820 enum ib_qp_state curr_qp_state, new_qp_state;
1821 int rc, entries;
1822 unsigned int flags;
1823 u8 nw_type;
1824
1825 if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1826 return -EOPNOTSUPP;
1827
1828 qp->qplib_qp.modify_flags = 0;
1829 if (qp_attr_mask & IB_QP_STATE) {
1830 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1831 new_qp_state = qp_attr->qp_state;
1832 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1833 ib_qp->qp_type, qp_attr_mask)) {
1834 ibdev_err(&rdev->ibdev,
1835 "Invalid attribute mask: %#x specified ",
1836 qp_attr_mask);
1837 ibdev_err(&rdev->ibdev,
1838 "for qpn: %#x type: %#x",
1839 ib_qp->qp_num, ib_qp->qp_type);
1840 ibdev_err(&rdev->ibdev,
1841 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1842 curr_qp_state, new_qp_state);
1843 return -EINVAL;
1844 }
1845 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1846 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1847
1848 if (!qp->sumem &&
1849 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1850 ibdev_dbg(&rdev->ibdev,
1851 "Move QP = %p to flush list\n", qp);
1852 flags = bnxt_re_lock_cqs(qp);
1853 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1854 bnxt_re_unlock_cqs(qp, flags);
1855 }
1856 if (!qp->sumem &&
1857 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1858 ibdev_dbg(&rdev->ibdev,
1859 "Move QP = %p out of flush list\n", qp);
1860 flags = bnxt_re_lock_cqs(qp);
1861 bnxt_qplib_clean_qp(&qp->qplib_qp);
1862 bnxt_re_unlock_cqs(qp, flags);
1863 }
1864 }
1865 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1866 qp->qplib_qp.modify_flags |=
1867 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1868 qp->qplib_qp.en_sqd_async_notify = true;
1869 }
1870 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1871 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1872 qp->qplib_qp.access =
1873 __from_ib_access_flags(qp_attr->qp_access_flags);
1874 /* LOCAL_WRITE access must be set to allow RC receive */
1875 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1876 /* Temp: Set all params on QP as of now */
1877 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
1878 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
1879 }
1880 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1881 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1882 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1883 }
1884 if (qp_attr_mask & IB_QP_QKEY) {
1885 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1886 qp->qplib_qp.qkey = qp_attr->qkey;
1887 }
1888 if (qp_attr_mask & IB_QP_AV) {
1889 const struct ib_global_route *grh =
1890 rdma_ah_read_grh(&qp_attr->ah_attr);
1891 const struct ib_gid_attr *sgid_attr;
1892 struct bnxt_re_gid_ctx *ctx;
1893
1894 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1895 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1896 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1897 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1898 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1899 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1900 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1901 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1902 sizeof(qp->qplib_qp.ah.dgid.data));
1903 qp->qplib_qp.ah.flow_label = grh->flow_label;
1904 sgid_attr = grh->sgid_attr;
1905 /* Get the HW context of the GID. The reference
1906 * of GID table entry is already taken by the caller.
1907 */
1908 ctx = rdma_read_gid_hw_context(sgid_attr);
1909 qp->qplib_qp.ah.sgid_index = ctx->idx;
1910 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1911 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1912 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1913 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1914 ether_addr_copy(qp->qplib_qp.ah.dmac,
1915 qp_attr->ah_attr.roce.dmac);
1916
1917 rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
1918 &qp->qplib_qp.smac[0]);
1919 if (rc)
1920 return rc;
1921
1922 nw_type = rdma_gid_attr_network_type(sgid_attr);
1923 switch (nw_type) {
1924 case RDMA_NETWORK_IPV4:
1925 qp->qplib_qp.nw_type =
1926 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1927 break;
1928 case RDMA_NETWORK_IPV6:
1929 qp->qplib_qp.nw_type =
1930 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1931 break;
1932 default:
1933 qp->qplib_qp.nw_type =
1934 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1935 break;
1936 }
1937 }
1938
1939 if (qp_attr_mask & IB_QP_PATH_MTU) {
1940 qp->qplib_qp.modify_flags |=
1941 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1942 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1943 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1944 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1945 qp->qplib_qp.modify_flags |=
1946 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1947 qp->qplib_qp.path_mtu =
1948 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1949 qp->qplib_qp.mtu =
1950 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1951 }
1952
1953 if (qp_attr_mask & IB_QP_TIMEOUT) {
1954 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1955 qp->qplib_qp.timeout = qp_attr->timeout;
1956 }
1957 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1958 qp->qplib_qp.modify_flags |=
1959 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1960 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1961 }
1962 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1963 qp->qplib_qp.modify_flags |=
1964 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1965 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1966 }
1967 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1968 qp->qplib_qp.modify_flags |=
1969 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1970 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1971 }
1972 if (qp_attr_mask & IB_QP_RQ_PSN) {
1973 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1974 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1975 }
1976 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1977 qp->qplib_qp.modify_flags |=
1978 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1979 /* Cap the max_rd_atomic to device max */
1980 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1981 dev_attr->max_qp_rd_atom);
1982 }
1983 if (qp_attr_mask & IB_QP_SQ_PSN) {
1984 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1985 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1986 }
1987 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1988 if (qp_attr->max_dest_rd_atomic >
1989 dev_attr->max_qp_init_rd_atom) {
1990 ibdev_err(&rdev->ibdev,
1991 "max_dest_rd_atomic requested%d is > dev_max%d",
1992 qp_attr->max_dest_rd_atomic,
1993 dev_attr->max_qp_init_rd_atom);
1994 return -EINVAL;
1995 }
1996
1997 qp->qplib_qp.modify_flags |=
1998 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1999 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
2000 }
2001 if (qp_attr_mask & IB_QP_CAP) {
2002 qp->qplib_qp.modify_flags |=
2003 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
2004 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
2005 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
2006 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
2007 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
2008 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
2009 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
2010 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
2011 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
2012 (qp_attr->cap.max_inline_data >=
2013 dev_attr->max_inline_data)) {
2014 ibdev_err(&rdev->ibdev,
2015 "Create QP failed - max exceeded");
2016 return -EINVAL;
2017 }
2018 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
2019 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
2020 dev_attr->max_qp_wqes + 1);
2021 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
2022 qp_attr->cap.max_send_wr;
2023 /*
2024 * Reserving one slot for Phantom WQE. Some application can
2025 * post one extra entry in this case. Allowing this to avoid
2026 * unexpected Queue full condition
2027 */
2028 qp->qplib_qp.sq.q_full_delta -= 1;
2029 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
2030 if (qp->qplib_qp.rq.max_wqe) {
2031 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
2032 qp->qplib_qp.rq.max_wqe =
2033 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
2034 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
2035 qp_attr->cap.max_recv_wr;
2036 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
2037 } else {
2038 /* SRQ was used prior, just ignore the RQ caps */
2039 }
2040 }
2041 if (qp_attr_mask & IB_QP_DEST_QPN) {
2042 qp->qplib_qp.modify_flags |=
2043 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
2044 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
2045 }
2046 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2047 if (rc) {
2048 ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
2049 return rc;
2050 }
2051 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
2052 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2053 return rc;
2054 }
2055
bnxt_re_query_qp(struct ib_qp * ib_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)2056 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2057 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
2058 {
2059 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2060 struct bnxt_re_dev *rdev = qp->rdev;
2061 struct bnxt_qplib_qp *qplib_qp;
2062 int rc;
2063
2064 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
2065 if (!qplib_qp)
2066 return -ENOMEM;
2067
2068 qplib_qp->id = qp->qplib_qp.id;
2069 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
2070
2071 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2072 if (rc) {
2073 ibdev_err(&rdev->ibdev, "Failed to query HW QP");
2074 goto out;
2075 }
2076 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2077 qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2078 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2079 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
2080 qp_attr->pkey_index = qplib_qp->pkey_index;
2081 qp_attr->qkey = qplib_qp->qkey;
2082 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2083 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
2084 qplib_qp->ah.host_sgid_index,
2085 qplib_qp->ah.hop_limit,
2086 qplib_qp->ah.traffic_class);
2087 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
2088 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
2089 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
2090 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2091 qp_attr->timeout = qplib_qp->timeout;
2092 qp_attr->retry_cnt = qplib_qp->retry_cnt;
2093 qp_attr->rnr_retry = qplib_qp->rnr_retry;
2094 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2095 qp_attr->rq_psn = qplib_qp->rq.psn;
2096 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2097 qp_attr->sq_psn = qplib_qp->sq.psn;
2098 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2099 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2100 IB_SIGNAL_REQ_WR;
2101 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2102
2103 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2104 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2105 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2106 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2107 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2108 qp_init_attr->cap = qp_attr->cap;
2109
2110 out:
2111 kfree(qplib_qp);
2112 return rc;
2113 }
2114
2115 /* Routine for sending QP1 packets for RoCE V1 an V2
2116 */
bnxt_re_build_qp1_send_v2(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe,int payload_size)2117 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
2118 const struct ib_send_wr *wr,
2119 struct bnxt_qplib_swqe *wqe,
2120 int payload_size)
2121 {
2122 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
2123 ib_ah);
2124 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2125 const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
2126 struct bnxt_qplib_sge sge;
2127 u8 nw_type;
2128 u16 ether_type;
2129 union ib_gid dgid;
2130 bool is_eth = false;
2131 bool is_vlan = false;
2132 bool is_grh = false;
2133 bool is_udp = false;
2134 u8 ip_version = 0;
2135 u16 vlan_id = 0xFFFF;
2136 void *buf;
2137 int i, rc = 0;
2138
2139 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2140
2141 rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
2142 if (rc)
2143 return rc;
2144
2145 /* Get network header type for this GID */
2146 nw_type = rdma_gid_attr_network_type(sgid_attr);
2147 switch (nw_type) {
2148 case RDMA_NETWORK_IPV4:
2149 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
2150 break;
2151 case RDMA_NETWORK_IPV6:
2152 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
2153 break;
2154 default:
2155 nw_type = BNXT_RE_ROCE_V1_PACKET;
2156 break;
2157 }
2158 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
2159 is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
2160 if (is_udp) {
2161 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
2162 ip_version = 4;
2163 ether_type = ETH_P_IP;
2164 } else {
2165 ip_version = 6;
2166 ether_type = ETH_P_IPV6;
2167 }
2168 is_grh = false;
2169 } else {
2170 ether_type = ETH_P_IBOE;
2171 is_grh = true;
2172 }
2173
2174 is_eth = true;
2175 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
2176
2177 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
2178 ip_version, is_udp, 0, &qp->qp1_hdr);
2179
2180 /* ETH */
2181 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
2182 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
2183
2184 /* For vlan, check the sgid for vlan existence */
2185
2186 if (!is_vlan) {
2187 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
2188 } else {
2189 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
2190 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
2191 }
2192
2193 if (is_grh || (ip_version == 6)) {
2194 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
2195 sizeof(sgid_attr->gid));
2196 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2197 sizeof(sgid_attr->gid));
2198 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
2199 }
2200
2201 if (ip_version == 4) {
2202 qp->qp1_hdr.ip4.tos = 0;
2203 qp->qp1_hdr.ip4.id = 0;
2204 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
2205 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
2206
2207 memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
2208 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2209 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2210 }
2211
2212 if (is_udp) {
2213 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2214 qp->qp1_hdr.udp.sport = htons(0x8CD1);
2215 qp->qp1_hdr.udp.csum = 0;
2216 }
2217
2218 /* BTH */
2219 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2220 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2221 qp->qp1_hdr.immediate_present = 1;
2222 } else {
2223 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2224 }
2225 if (wr->send_flags & IB_SEND_SOLICITED)
2226 qp->qp1_hdr.bth.solicited_event = 1;
2227 /* pad_count */
2228 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2229
2230 /* P_key for QP1 is for all members */
2231 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2232 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2233 qp->qp1_hdr.bth.ack_req = 0;
2234 qp->send_psn++;
2235 qp->send_psn &= BTH_PSN_MASK;
2236 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2237 /* DETH */
2238 /* Use the priviledged Q_Key for QP1 */
2239 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2240 qp->qp1_hdr.deth.source_qpn = IB_QP1;
2241
2242 /* Pack the QP1 to the transmit buffer */
2243 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2244 if (buf) {
2245 ib_ud_header_pack(&qp->qp1_hdr, buf);
2246 for (i = wqe->num_sge; i; i--) {
2247 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2248 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2249 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2250 }
2251
2252 /*
2253 * Max Header buf size for IPV6 RoCE V2 is 86,
2254 * which is same as the QP1 SQ header buffer.
2255 * Header buf size for IPV4 RoCE V2 can be 66.
2256 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2257 * Subtract 20 bytes from QP1 SQ header buf size
2258 */
2259 if (is_udp && ip_version == 4)
2260 sge.size -= 20;
2261 /*
2262 * Max Header buf size for RoCE V1 is 78.
2263 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2264 * Subtract 8 bytes from QP1 SQ header buf size
2265 */
2266 if (!is_udp)
2267 sge.size -= 8;
2268
2269 /* Subtract 4 bytes for non vlan packets */
2270 if (!is_vlan)
2271 sge.size -= 4;
2272
2273 wqe->sg_list[0].addr = sge.addr;
2274 wqe->sg_list[0].lkey = sge.lkey;
2275 wqe->sg_list[0].size = sge.size;
2276 wqe->num_sge++;
2277
2278 } else {
2279 ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
2280 rc = -ENOMEM;
2281 }
2282 return rc;
2283 }
2284
2285 /* For the MAD layer, it only provides the recv SGE the size of
2286 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
2287 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
2288 * receive packet (334 bytes) with no VLAN and then copy the GRH
2289 * and the MAD datagram out to the provided SGE.
2290 */
bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp * qp,const struct ib_recv_wr * wr,struct bnxt_qplib_swqe * wqe,int payload_size)2291 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2292 const struct ib_recv_wr *wr,
2293 struct bnxt_qplib_swqe *wqe,
2294 int payload_size)
2295 {
2296 struct bnxt_re_sqp_entries *sqp_entry;
2297 struct bnxt_qplib_sge ref, sge;
2298 struct bnxt_re_dev *rdev;
2299 u32 rq_prod_index;
2300
2301 rdev = qp->rdev;
2302
2303 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2304
2305 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2306 return -ENOMEM;
2307
2308 /* Create 1 SGE to receive the entire
2309 * ethernet packet
2310 */
2311 /* Save the reference from ULP */
2312 ref.addr = wqe->sg_list[0].addr;
2313 ref.lkey = wqe->sg_list[0].lkey;
2314 ref.size = wqe->sg_list[0].size;
2315
2316 sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
2317
2318 /* SGE 1 */
2319 wqe->sg_list[0].addr = sge.addr;
2320 wqe->sg_list[0].lkey = sge.lkey;
2321 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2322 sge.size -= wqe->sg_list[0].size;
2323
2324 sqp_entry->sge.addr = ref.addr;
2325 sqp_entry->sge.lkey = ref.lkey;
2326 sqp_entry->sge.size = ref.size;
2327 /* Store the wrid for reporting completion */
2328 sqp_entry->wrid = wqe->wr_id;
2329 /* change the wqe->wrid to table index */
2330 wqe->wr_id = rq_prod_index;
2331 return 0;
2332 }
2333
is_ud_qp(struct bnxt_re_qp * qp)2334 static int is_ud_qp(struct bnxt_re_qp *qp)
2335 {
2336 return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2337 qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2338 }
2339
bnxt_re_build_send_wqe(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2340 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2341 const struct ib_send_wr *wr,
2342 struct bnxt_qplib_swqe *wqe)
2343 {
2344 struct bnxt_re_ah *ah = NULL;
2345
2346 if (is_ud_qp(qp)) {
2347 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2348 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2349 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2350 wqe->send.avid = ah->qplib_ah.id;
2351 }
2352 switch (wr->opcode) {
2353 case IB_WR_SEND:
2354 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2355 break;
2356 case IB_WR_SEND_WITH_IMM:
2357 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2358 wqe->send.imm_data = wr->ex.imm_data;
2359 break;
2360 case IB_WR_SEND_WITH_INV:
2361 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2362 wqe->send.inv_key = wr->ex.invalidate_rkey;
2363 break;
2364 default:
2365 return -EINVAL;
2366 }
2367 if (wr->send_flags & IB_SEND_SIGNALED)
2368 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2369 if (wr->send_flags & IB_SEND_FENCE)
2370 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2371 if (wr->send_flags & IB_SEND_SOLICITED)
2372 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2373 if (wr->send_flags & IB_SEND_INLINE)
2374 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2375
2376 return 0;
2377 }
2378
bnxt_re_build_rdma_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2379 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2380 struct bnxt_qplib_swqe *wqe)
2381 {
2382 switch (wr->opcode) {
2383 case IB_WR_RDMA_WRITE:
2384 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2385 break;
2386 case IB_WR_RDMA_WRITE_WITH_IMM:
2387 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2388 wqe->rdma.imm_data = wr->ex.imm_data;
2389 break;
2390 case IB_WR_RDMA_READ:
2391 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2392 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2393 break;
2394 default:
2395 return -EINVAL;
2396 }
2397 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2398 wqe->rdma.r_key = rdma_wr(wr)->rkey;
2399 if (wr->send_flags & IB_SEND_SIGNALED)
2400 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2401 if (wr->send_flags & IB_SEND_FENCE)
2402 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2403 if (wr->send_flags & IB_SEND_SOLICITED)
2404 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2405 if (wr->send_flags & IB_SEND_INLINE)
2406 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2407
2408 return 0;
2409 }
2410
bnxt_re_build_atomic_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2411 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2412 struct bnxt_qplib_swqe *wqe)
2413 {
2414 switch (wr->opcode) {
2415 case IB_WR_ATOMIC_CMP_AND_SWP:
2416 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2417 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2418 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2419 break;
2420 case IB_WR_ATOMIC_FETCH_AND_ADD:
2421 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2422 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2423 break;
2424 default:
2425 return -EINVAL;
2426 }
2427 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2428 wqe->atomic.r_key = atomic_wr(wr)->rkey;
2429 if (wr->send_flags & IB_SEND_SIGNALED)
2430 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2431 if (wr->send_flags & IB_SEND_FENCE)
2432 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2433 if (wr->send_flags & IB_SEND_SOLICITED)
2434 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2435 return 0;
2436 }
2437
bnxt_re_build_inv_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2438 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2439 struct bnxt_qplib_swqe *wqe)
2440 {
2441 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2442 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2443
2444 /* Need unconditional fence for local invalidate
2445 * opcode to work as expected.
2446 */
2447 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2448
2449 if (wr->send_flags & IB_SEND_SIGNALED)
2450 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2451 if (wr->send_flags & IB_SEND_SOLICITED)
2452 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2453
2454 return 0;
2455 }
2456
bnxt_re_build_reg_wqe(const struct ib_reg_wr * wr,struct bnxt_qplib_swqe * wqe)2457 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2458 struct bnxt_qplib_swqe *wqe)
2459 {
2460 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2461 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2462 int access = wr->access;
2463
2464 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2465 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2466 wqe->frmr.page_list = mr->pages;
2467 wqe->frmr.page_list_len = mr->npages;
2468 wqe->frmr.levels = qplib_frpl->hwq.level;
2469 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2470
2471 /* Need unconditional fence for reg_mr
2472 * opcode to function as expected.
2473 */
2474
2475 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2476
2477 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2478 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2479
2480 if (access & IB_ACCESS_LOCAL_WRITE)
2481 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2482 if (access & IB_ACCESS_REMOTE_READ)
2483 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2484 if (access & IB_ACCESS_REMOTE_WRITE)
2485 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2486 if (access & IB_ACCESS_REMOTE_ATOMIC)
2487 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2488 if (access & IB_ACCESS_MW_BIND)
2489 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2490
2491 wqe->frmr.l_key = wr->key;
2492 wqe->frmr.length = wr->mr->length;
2493 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2494 wqe->frmr.va = wr->mr->iova;
2495 return 0;
2496 }
2497
bnxt_re_copy_inline_data(struct bnxt_re_dev * rdev,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2498 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2499 const struct ib_send_wr *wr,
2500 struct bnxt_qplib_swqe *wqe)
2501 {
2502 /* Copy the inline data to the data field */
2503 u8 *in_data;
2504 u32 i, sge_len;
2505 void *sge_addr;
2506
2507 in_data = wqe->inline_data;
2508 for (i = 0; i < wr->num_sge; i++) {
2509 sge_addr = (void *)(unsigned long)
2510 wr->sg_list[i].addr;
2511 sge_len = wr->sg_list[i].length;
2512
2513 if ((sge_len + wqe->inline_len) >
2514 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2515 ibdev_err(&rdev->ibdev,
2516 "Inline data size requested > supported value");
2517 return -EINVAL;
2518 }
2519 sge_len = wr->sg_list[i].length;
2520
2521 memcpy(in_data, sge_addr, sge_len);
2522 in_data += wr->sg_list[i].length;
2523 wqe->inline_len += wr->sg_list[i].length;
2524 }
2525 return wqe->inline_len;
2526 }
2527
bnxt_re_copy_wr_payload(struct bnxt_re_dev * rdev,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2528 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2529 const struct ib_send_wr *wr,
2530 struct bnxt_qplib_swqe *wqe)
2531 {
2532 int payload_sz = 0;
2533
2534 if (wr->send_flags & IB_SEND_INLINE)
2535 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2536 else
2537 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2538 wqe->num_sge);
2539
2540 return payload_sz;
2541 }
2542
bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp * qp)2543 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2544 {
2545 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2546 qp->ib_qp.qp_type == IB_QPT_GSI ||
2547 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2548 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2549 int qp_attr_mask;
2550 struct ib_qp_attr qp_attr;
2551
2552 qp_attr_mask = IB_QP_STATE;
2553 qp_attr.qp_state = IB_QPS_RTS;
2554 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2555 qp->qplib_qp.wqe_cnt = 0;
2556 }
2557 }
2558
bnxt_re_post_send_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,const struct ib_send_wr * wr)2559 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2560 struct bnxt_re_qp *qp,
2561 const struct ib_send_wr *wr)
2562 {
2563 int rc = 0, payload_sz = 0;
2564 unsigned long flags;
2565
2566 spin_lock_irqsave(&qp->sq_lock, flags);
2567 while (wr) {
2568 struct bnxt_qplib_swqe wqe = {};
2569
2570 /* Common */
2571 wqe.num_sge = wr->num_sge;
2572 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2573 ibdev_err(&rdev->ibdev,
2574 "Limit exceeded for Send SGEs");
2575 rc = -EINVAL;
2576 goto bad;
2577 }
2578
2579 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2580 if (payload_sz < 0) {
2581 rc = -EINVAL;
2582 goto bad;
2583 }
2584 wqe.wr_id = wr->wr_id;
2585
2586 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2587
2588 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2589 if (!rc)
2590 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2591 bad:
2592 if (rc) {
2593 ibdev_err(&rdev->ibdev,
2594 "Post send failed opcode = %#x rc = %d",
2595 wr->opcode, rc);
2596 break;
2597 }
2598 wr = wr->next;
2599 }
2600 bnxt_qplib_post_send_db(&qp->qplib_qp);
2601 bnxt_ud_qp_hw_stall_workaround(qp);
2602 spin_unlock_irqrestore(&qp->sq_lock, flags);
2603 return rc;
2604 }
2605
bnxt_re_post_send(struct ib_qp * ib_qp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)2606 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2607 const struct ib_send_wr **bad_wr)
2608 {
2609 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2610 struct bnxt_qplib_swqe wqe;
2611 int rc = 0, payload_sz = 0;
2612 unsigned long flags;
2613
2614 spin_lock_irqsave(&qp->sq_lock, flags);
2615 while (wr) {
2616 /* House keeping */
2617 memset(&wqe, 0, sizeof(wqe));
2618
2619 /* Common */
2620 wqe.num_sge = wr->num_sge;
2621 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2622 ibdev_err(&qp->rdev->ibdev,
2623 "Limit exceeded for Send SGEs");
2624 rc = -EINVAL;
2625 goto bad;
2626 }
2627
2628 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2629 if (payload_sz < 0) {
2630 rc = -EINVAL;
2631 goto bad;
2632 }
2633 wqe.wr_id = wr->wr_id;
2634
2635 switch (wr->opcode) {
2636 case IB_WR_SEND:
2637 case IB_WR_SEND_WITH_IMM:
2638 if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2639 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2640 payload_sz);
2641 if (rc)
2642 goto bad;
2643 wqe.rawqp1.lflags |=
2644 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2645 }
2646 switch (wr->send_flags) {
2647 case IB_SEND_IP_CSUM:
2648 wqe.rawqp1.lflags |=
2649 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2650 break;
2651 default:
2652 break;
2653 }
2654 fallthrough;
2655 case IB_WR_SEND_WITH_INV:
2656 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2657 break;
2658 case IB_WR_RDMA_WRITE:
2659 case IB_WR_RDMA_WRITE_WITH_IMM:
2660 case IB_WR_RDMA_READ:
2661 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2662 break;
2663 case IB_WR_ATOMIC_CMP_AND_SWP:
2664 case IB_WR_ATOMIC_FETCH_AND_ADD:
2665 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2666 break;
2667 case IB_WR_RDMA_READ_WITH_INV:
2668 ibdev_err(&qp->rdev->ibdev,
2669 "RDMA Read with Invalidate is not supported");
2670 rc = -EINVAL;
2671 goto bad;
2672 case IB_WR_LOCAL_INV:
2673 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2674 break;
2675 case IB_WR_REG_MR:
2676 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2677 break;
2678 default:
2679 /* Unsupported WRs */
2680 ibdev_err(&qp->rdev->ibdev,
2681 "WR (%#x) is not supported", wr->opcode);
2682 rc = -EINVAL;
2683 goto bad;
2684 }
2685 if (!rc)
2686 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2687 bad:
2688 if (rc) {
2689 ibdev_err(&qp->rdev->ibdev,
2690 "post_send failed op:%#x qps = %#x rc = %d\n",
2691 wr->opcode, qp->qplib_qp.state, rc);
2692 *bad_wr = wr;
2693 break;
2694 }
2695 wr = wr->next;
2696 }
2697 bnxt_qplib_post_send_db(&qp->qplib_qp);
2698 bnxt_ud_qp_hw_stall_workaround(qp);
2699 spin_unlock_irqrestore(&qp->sq_lock, flags);
2700
2701 return rc;
2702 }
2703
bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,const struct ib_recv_wr * wr)2704 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2705 struct bnxt_re_qp *qp,
2706 const struct ib_recv_wr *wr)
2707 {
2708 struct bnxt_qplib_swqe wqe;
2709 int rc = 0;
2710
2711 memset(&wqe, 0, sizeof(wqe));
2712 while (wr) {
2713 /* House keeping */
2714 memset(&wqe, 0, sizeof(wqe));
2715
2716 /* Common */
2717 wqe.num_sge = wr->num_sge;
2718 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2719 ibdev_err(&rdev->ibdev,
2720 "Limit exceeded for Receive SGEs");
2721 rc = -EINVAL;
2722 break;
2723 }
2724 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2725 wqe.wr_id = wr->wr_id;
2726 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2727
2728 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2729 if (rc)
2730 break;
2731
2732 wr = wr->next;
2733 }
2734 if (!rc)
2735 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2736 return rc;
2737 }
2738
bnxt_re_post_recv(struct ib_qp * ib_qp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)2739 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2740 const struct ib_recv_wr **bad_wr)
2741 {
2742 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2743 struct bnxt_qplib_swqe wqe;
2744 int rc = 0, payload_sz = 0;
2745 unsigned long flags;
2746 u32 count = 0;
2747
2748 spin_lock_irqsave(&qp->rq_lock, flags);
2749 while (wr) {
2750 /* House keeping */
2751 memset(&wqe, 0, sizeof(wqe));
2752
2753 /* Common */
2754 wqe.num_sge = wr->num_sge;
2755 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2756 ibdev_err(&qp->rdev->ibdev,
2757 "Limit exceeded for Receive SGEs");
2758 rc = -EINVAL;
2759 *bad_wr = wr;
2760 break;
2761 }
2762
2763 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2764 wr->num_sge);
2765 wqe.wr_id = wr->wr_id;
2766 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2767
2768 if (ib_qp->qp_type == IB_QPT_GSI &&
2769 qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
2770 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2771 payload_sz);
2772 if (!rc)
2773 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2774 if (rc) {
2775 *bad_wr = wr;
2776 break;
2777 }
2778
2779 /* Ring DB if the RQEs posted reaches a threshold value */
2780 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2781 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2782 count = 0;
2783 }
2784
2785 wr = wr->next;
2786 }
2787
2788 if (count)
2789 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2790
2791 spin_unlock_irqrestore(&qp->rq_lock, flags);
2792
2793 return rc;
2794 }
2795
2796 /* Completion Queues */
bnxt_re_destroy_cq(struct ib_cq * ib_cq,struct ib_udata * udata)2797 int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
2798 {
2799 struct bnxt_re_cq *cq;
2800 struct bnxt_qplib_nq *nq;
2801 struct bnxt_re_dev *rdev;
2802
2803 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2804 rdev = cq->rdev;
2805 nq = cq->qplib_cq.nq;
2806
2807 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2808 ib_umem_release(cq->umem);
2809
2810 atomic_dec(&rdev->cq_count);
2811 nq->budget--;
2812 kfree(cq->cql);
2813 return 0;
2814 }
2815
bnxt_re_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct ib_udata * udata)2816 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
2817 struct ib_udata *udata)
2818 {
2819 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
2820 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2821 struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
2822 int rc, entries;
2823 int cqe = attr->cqe;
2824 struct bnxt_qplib_nq *nq = NULL;
2825 unsigned int nq_alloc_cnt;
2826
2827 if (attr->flags)
2828 return -EOPNOTSUPP;
2829
2830 /* Validate CQ fields */
2831 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2832 ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
2833 return -EINVAL;
2834 }
2835
2836 cq->rdev = rdev;
2837 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2838
2839 entries = roundup_pow_of_two(cqe + 1);
2840 if (entries > dev_attr->max_cq_wqes + 1)
2841 entries = dev_attr->max_cq_wqes + 1;
2842
2843 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
2844 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
2845 if (udata) {
2846 struct bnxt_re_cq_req req;
2847 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
2848 udata, struct bnxt_re_ucontext, ib_uctx);
2849 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2850 rc = -EFAULT;
2851 goto fail;
2852 }
2853
2854 cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
2855 entries * sizeof(struct cq_base),
2856 IB_ACCESS_LOCAL_WRITE);
2857 if (IS_ERR(cq->umem)) {
2858 rc = PTR_ERR(cq->umem);
2859 goto fail;
2860 }
2861 cq->qplib_cq.sg_info.umem = cq->umem;
2862 cq->qplib_cq.dpi = &uctx->dpi;
2863 } else {
2864 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2865 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2866 GFP_KERNEL);
2867 if (!cq->cql) {
2868 rc = -ENOMEM;
2869 goto fail;
2870 }
2871
2872 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2873 }
2874 /*
2875 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2876 * used for getting the NQ index.
2877 */
2878 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2879 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2880 cq->qplib_cq.max_wqe = entries;
2881 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2882 cq->qplib_cq.nq = nq;
2883
2884 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2885 if (rc) {
2886 ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
2887 goto fail;
2888 }
2889
2890 cq->ib_cq.cqe = entries;
2891 cq->cq_period = cq->qplib_cq.period;
2892 nq->budget++;
2893
2894 atomic_inc(&rdev->cq_count);
2895 spin_lock_init(&cq->cq_lock);
2896
2897 if (udata) {
2898 struct bnxt_re_cq_resp resp;
2899
2900 resp.cqid = cq->qplib_cq.id;
2901 resp.tail = cq->qplib_cq.hwq.cons;
2902 resp.phase = cq->qplib_cq.period;
2903 resp.rsvd = 0;
2904 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2905 if (rc) {
2906 ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
2907 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2908 goto c2fail;
2909 }
2910 }
2911
2912 return 0;
2913
2914 c2fail:
2915 ib_umem_release(cq->umem);
2916 fail:
2917 kfree(cq->cql);
2918 return rc;
2919 }
2920
__req_to_ib_wc_status(u8 qstatus)2921 static u8 __req_to_ib_wc_status(u8 qstatus)
2922 {
2923 switch (qstatus) {
2924 case CQ_REQ_STATUS_OK:
2925 return IB_WC_SUCCESS;
2926 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2927 return IB_WC_BAD_RESP_ERR;
2928 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2929 return IB_WC_LOC_LEN_ERR;
2930 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2931 return IB_WC_LOC_QP_OP_ERR;
2932 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2933 return IB_WC_LOC_PROT_ERR;
2934 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2935 return IB_WC_GENERAL_ERR;
2936 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2937 return IB_WC_REM_INV_REQ_ERR;
2938 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2939 return IB_WC_REM_ACCESS_ERR;
2940 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2941 return IB_WC_REM_OP_ERR;
2942 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2943 return IB_WC_RNR_RETRY_EXC_ERR;
2944 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2945 return IB_WC_RETRY_EXC_ERR;
2946 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2947 return IB_WC_WR_FLUSH_ERR;
2948 default:
2949 return IB_WC_GENERAL_ERR;
2950 }
2951 return 0;
2952 }
2953
__rawqp1_to_ib_wc_status(u8 qstatus)2954 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2955 {
2956 switch (qstatus) {
2957 case CQ_RES_RAWETH_QP1_STATUS_OK:
2958 return IB_WC_SUCCESS;
2959 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2960 return IB_WC_LOC_ACCESS_ERR;
2961 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2962 return IB_WC_LOC_LEN_ERR;
2963 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2964 return IB_WC_LOC_PROT_ERR;
2965 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2966 return IB_WC_LOC_QP_OP_ERR;
2967 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2968 return IB_WC_GENERAL_ERR;
2969 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2970 return IB_WC_WR_FLUSH_ERR;
2971 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2972 return IB_WC_WR_FLUSH_ERR;
2973 default:
2974 return IB_WC_GENERAL_ERR;
2975 }
2976 }
2977
__rc_to_ib_wc_status(u8 qstatus)2978 static u8 __rc_to_ib_wc_status(u8 qstatus)
2979 {
2980 switch (qstatus) {
2981 case CQ_RES_RC_STATUS_OK:
2982 return IB_WC_SUCCESS;
2983 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2984 return IB_WC_LOC_ACCESS_ERR;
2985 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2986 return IB_WC_LOC_LEN_ERR;
2987 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2988 return IB_WC_LOC_PROT_ERR;
2989 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2990 return IB_WC_LOC_QP_OP_ERR;
2991 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2992 return IB_WC_GENERAL_ERR;
2993 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2994 return IB_WC_REM_INV_REQ_ERR;
2995 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2996 return IB_WC_WR_FLUSH_ERR;
2997 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2998 return IB_WC_WR_FLUSH_ERR;
2999 default:
3000 return IB_WC_GENERAL_ERR;
3001 }
3002 }
3003
bnxt_re_process_req_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3004 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
3005 {
3006 switch (cqe->type) {
3007 case BNXT_QPLIB_SWQE_TYPE_SEND:
3008 wc->opcode = IB_WC_SEND;
3009 break;
3010 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
3011 wc->opcode = IB_WC_SEND;
3012 wc->wc_flags |= IB_WC_WITH_IMM;
3013 break;
3014 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
3015 wc->opcode = IB_WC_SEND;
3016 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3017 break;
3018 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
3019 wc->opcode = IB_WC_RDMA_WRITE;
3020 break;
3021 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
3022 wc->opcode = IB_WC_RDMA_WRITE;
3023 wc->wc_flags |= IB_WC_WITH_IMM;
3024 break;
3025 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
3026 wc->opcode = IB_WC_RDMA_READ;
3027 break;
3028 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
3029 wc->opcode = IB_WC_COMP_SWAP;
3030 break;
3031 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
3032 wc->opcode = IB_WC_FETCH_ADD;
3033 break;
3034 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
3035 wc->opcode = IB_WC_LOCAL_INV;
3036 break;
3037 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
3038 wc->opcode = IB_WC_REG_MR;
3039 break;
3040 default:
3041 wc->opcode = IB_WC_SEND;
3042 break;
3043 }
3044
3045 wc->status = __req_to_ib_wc_status(cqe->status);
3046 }
3047
bnxt_re_check_packet_type(u16 raweth_qp1_flags,u16 raweth_qp1_flags2)3048 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
3049 u16 raweth_qp1_flags2)
3050 {
3051 bool is_ipv6 = false, is_ipv4 = false;
3052
3053 /* raweth_qp1_flags Bit 9-6 indicates itype */
3054 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3055 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3056 return -1;
3057
3058 if (raweth_qp1_flags2 &
3059 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
3060 raweth_qp1_flags2 &
3061 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
3062 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
3063 (raweth_qp1_flags2 &
3064 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
3065 (is_ipv6 = true) : (is_ipv4 = true);
3066 return ((is_ipv6) ?
3067 BNXT_RE_ROCEV2_IPV6_PACKET :
3068 BNXT_RE_ROCEV2_IPV4_PACKET);
3069 } else {
3070 return BNXT_RE_ROCE_V1_PACKET;
3071 }
3072 }
3073
bnxt_re_to_ib_nw_type(int nw_type)3074 static int bnxt_re_to_ib_nw_type(int nw_type)
3075 {
3076 u8 nw_hdr_type = 0xFF;
3077
3078 switch (nw_type) {
3079 case BNXT_RE_ROCE_V1_PACKET:
3080 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
3081 break;
3082 case BNXT_RE_ROCEV2_IPV4_PACKET:
3083 nw_hdr_type = RDMA_NETWORK_IPV4;
3084 break;
3085 case BNXT_RE_ROCEV2_IPV6_PACKET:
3086 nw_hdr_type = RDMA_NETWORK_IPV6;
3087 break;
3088 }
3089 return nw_hdr_type;
3090 }
3091
bnxt_re_is_loopback_packet(struct bnxt_re_dev * rdev,void * rq_hdr_buf)3092 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
3093 void *rq_hdr_buf)
3094 {
3095 u8 *tmp_buf = NULL;
3096 struct ethhdr *eth_hdr;
3097 u16 eth_type;
3098 bool rc = false;
3099
3100 tmp_buf = (u8 *)rq_hdr_buf;
3101 /*
3102 * If dest mac is not same as I/F mac, this could be a
3103 * loopback address or multicast address, check whether
3104 * it is a loopback packet
3105 */
3106 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
3107 tmp_buf += 4;
3108 /* Check the ether type */
3109 eth_hdr = (struct ethhdr *)tmp_buf;
3110 eth_type = ntohs(eth_hdr->h_proto);
3111 switch (eth_type) {
3112 case ETH_P_IBOE:
3113 rc = true;
3114 break;
3115 case ETH_P_IP:
3116 case ETH_P_IPV6: {
3117 u32 len;
3118 struct udphdr *udp_hdr;
3119
3120 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
3121 sizeof(struct ipv6hdr));
3122 tmp_buf += sizeof(struct ethhdr) + len;
3123 udp_hdr = (struct udphdr *)tmp_buf;
3124 if (ntohs(udp_hdr->dest) ==
3125 ROCE_V2_UDP_DPORT)
3126 rc = true;
3127 break;
3128 }
3129 default:
3130 break;
3131 }
3132 }
3133
3134 return rc;
3135 }
3136
bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp * gsi_qp,struct bnxt_qplib_cqe * cqe)3137 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
3138 struct bnxt_qplib_cqe *cqe)
3139 {
3140 struct bnxt_re_dev *rdev = gsi_qp->rdev;
3141 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3142 struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
3143 struct bnxt_re_ah *gsi_sah;
3144 struct ib_send_wr *swr;
3145 struct ib_ud_wr udwr;
3146 struct ib_recv_wr rwr;
3147 int pkt_type = 0;
3148 u32 tbl_idx;
3149 void *rq_hdr_buf;
3150 dma_addr_t rq_hdr_buf_map;
3151 dma_addr_t shrq_hdr_buf_map;
3152 u32 offset = 0;
3153 u32 skip_bytes = 0;
3154 struct ib_sge s_sge[2];
3155 struct ib_sge r_sge[2];
3156 int rc;
3157
3158 memset(&udwr, 0, sizeof(udwr));
3159 memset(&rwr, 0, sizeof(rwr));
3160 memset(&s_sge, 0, sizeof(s_sge));
3161 memset(&r_sge, 0, sizeof(r_sge));
3162
3163 swr = &udwr.wr;
3164 tbl_idx = cqe->wr_id;
3165
3166 rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
3167 (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
3168 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3169 tbl_idx);
3170
3171 /* Shadow QP header buffer */
3172 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3173 tbl_idx);
3174 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3175
3176 /* Store this cqe */
3177 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
3178 sqp_entry->qp1_qp = gsi_qp;
3179
3180 /* Find packet type from the cqe */
3181
3182 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
3183 cqe->raweth_qp1_flags2);
3184 if (pkt_type < 0) {
3185 ibdev_err(&rdev->ibdev, "Invalid packet\n");
3186 return -EINVAL;
3187 }
3188
3189 /* Adjust the offset for the user buffer and post in the rq */
3190
3191 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
3192 offset = 20;
3193
3194 /*
3195 * QP1 loopback packet has 4 bytes of internal header before
3196 * ether header. Skip these four bytes.
3197 */
3198 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3199 skip_bytes = 4;
3200
3201 /* First send SGE . Skip the ether header*/
3202 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3203 + skip_bytes;
3204 s_sge[0].lkey = 0xFFFFFFFF;
3205 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3206 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3207
3208 /* Second Send SGE */
3209 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3210 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3211 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3212 s_sge[1].addr += 8;
3213 s_sge[1].lkey = 0xFFFFFFFF;
3214 s_sge[1].length = 256;
3215
3216 /* First recv SGE */
3217
3218 r_sge[0].addr = shrq_hdr_buf_map;
3219 r_sge[0].lkey = 0xFFFFFFFF;
3220 r_sge[0].length = 40;
3221
3222 r_sge[1].addr = sqp_entry->sge.addr + offset;
3223 r_sge[1].lkey = sqp_entry->sge.lkey;
3224 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3225
3226 /* Create receive work request */
3227 rwr.num_sge = 2;
3228 rwr.sg_list = r_sge;
3229 rwr.wr_id = tbl_idx;
3230 rwr.next = NULL;
3231
3232 rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
3233 if (rc) {
3234 ibdev_err(&rdev->ibdev,
3235 "Failed to post Rx buffers to shadow QP");
3236 return -ENOMEM;
3237 }
3238
3239 swr->num_sge = 2;
3240 swr->sg_list = s_sge;
3241 swr->wr_id = tbl_idx;
3242 swr->opcode = IB_WR_SEND;
3243 swr->next = NULL;
3244 gsi_sah = rdev->gsi_ctx.gsi_sah;
3245 udwr.ah = &gsi_sah->ib_ah;
3246 udwr.remote_qpn = gsi_sqp->qplib_qp.id;
3247 udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
3248
3249 /* post data received in the send queue */
3250 return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
3251 }
3252
bnxt_re_process_res_rawqp1_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3253 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3254 struct bnxt_qplib_cqe *cqe)
3255 {
3256 wc->opcode = IB_WC_RECV;
3257 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3258 wc->wc_flags |= IB_WC_GRH;
3259 }
3260
bnxt_re_check_if_vlan_valid(struct bnxt_re_dev * rdev,u16 vlan_id)3261 static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
3262 u16 vlan_id)
3263 {
3264 /*
3265 * Check if the vlan is configured in the host. If not configured, it
3266 * can be a transparent VLAN. So dont report the vlan id.
3267 */
3268 if (!__vlan_find_dev_deep_rcu(rdev->netdev,
3269 htons(ETH_P_8021Q), vlan_id))
3270 return false;
3271 return true;
3272 }
3273
bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe * orig_cqe,u16 * vid,u8 * sl)3274 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3275 u16 *vid, u8 *sl)
3276 {
3277 bool ret = false;
3278 u32 metadata;
3279 u16 tpid;
3280
3281 metadata = orig_cqe->raweth_qp1_metadata;
3282 if (orig_cqe->raweth_qp1_flags2 &
3283 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3284 tpid = ((metadata &
3285 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3286 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3287 if (tpid == ETH_P_8021Q) {
3288 *vid = metadata &
3289 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3290 *sl = (metadata &
3291 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3292 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3293 ret = true;
3294 }
3295 }
3296
3297 return ret;
3298 }
3299
bnxt_re_process_res_rc_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3300 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3301 struct bnxt_qplib_cqe *cqe)
3302 {
3303 wc->opcode = IB_WC_RECV;
3304 wc->status = __rc_to_ib_wc_status(cqe->status);
3305
3306 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3307 wc->wc_flags |= IB_WC_WITH_IMM;
3308 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3309 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3310 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3311 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3312 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3313 }
3314
bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp * gsi_sqp,struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3315 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
3316 struct ib_wc *wc,
3317 struct bnxt_qplib_cqe *cqe)
3318 {
3319 struct bnxt_re_dev *rdev = gsi_sqp->rdev;
3320 struct bnxt_re_qp *gsi_qp = NULL;
3321 struct bnxt_qplib_cqe *orig_cqe = NULL;
3322 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3323 int nw_type;
3324 u32 tbl_idx;
3325 u16 vlan_id;
3326 u8 sl;
3327
3328 tbl_idx = cqe->wr_id;
3329
3330 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3331 gsi_qp = sqp_entry->qp1_qp;
3332 orig_cqe = &sqp_entry->cqe;
3333
3334 wc->wr_id = sqp_entry->wrid;
3335 wc->byte_len = orig_cqe->length;
3336 wc->qp = &gsi_qp->ib_qp;
3337
3338 wc->ex.imm_data = orig_cqe->immdata;
3339 wc->src_qp = orig_cqe->src_qp;
3340 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3341 if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3342 if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3343 wc->vlan_id = vlan_id;
3344 wc->sl = sl;
3345 wc->wc_flags |= IB_WC_WITH_VLAN;
3346 }
3347 }
3348 wc->port_num = 1;
3349 wc->vendor_err = orig_cqe->status;
3350
3351 wc->opcode = IB_WC_RECV;
3352 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3353 wc->wc_flags |= IB_WC_GRH;
3354
3355 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3356 orig_cqe->raweth_qp1_flags2);
3357 if (nw_type >= 0) {
3358 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3359 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3360 }
3361 }
3362
bnxt_re_process_res_ud_wc(struct bnxt_re_qp * qp,struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3363 static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3364 struct ib_wc *wc,
3365 struct bnxt_qplib_cqe *cqe)
3366 {
3367 struct bnxt_re_dev *rdev;
3368 u16 vlan_id = 0;
3369 u8 nw_type;
3370
3371 rdev = qp->rdev;
3372 wc->opcode = IB_WC_RECV;
3373 wc->status = __rc_to_ib_wc_status(cqe->status);
3374
3375 if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3376 wc->wc_flags |= IB_WC_WITH_IMM;
3377 /* report only on GSI QP for Thor */
3378 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3379 wc->wc_flags |= IB_WC_GRH;
3380 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3381 wc->wc_flags |= IB_WC_WITH_SMAC;
3382 if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3383 vlan_id = (cqe->cfa_meta & 0xFFF);
3384 }
3385 /* Mark only if vlan_id is non zero */
3386 if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3387 wc->vlan_id = vlan_id;
3388 wc->wc_flags |= IB_WC_WITH_VLAN;
3389 }
3390 nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3391 CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3392 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3393 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3394 }
3395
3396 }
3397
send_phantom_wqe(struct bnxt_re_qp * qp)3398 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3399 {
3400 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3401 unsigned long flags;
3402 int rc = 0;
3403
3404 spin_lock_irqsave(&qp->sq_lock, flags);
3405
3406 rc = bnxt_re_bind_fence_mw(lib_qp);
3407 if (!rc) {
3408 lib_qp->sq.phantom_wqe_cnt++;
3409 ibdev_dbg(&qp->rdev->ibdev,
3410 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3411 lib_qp->id, lib_qp->sq.hwq.prod,
3412 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3413 lib_qp->sq.phantom_wqe_cnt);
3414 }
3415
3416 spin_unlock_irqrestore(&qp->sq_lock, flags);
3417 return rc;
3418 }
3419
bnxt_re_poll_cq(struct ib_cq * ib_cq,int num_entries,struct ib_wc * wc)3420 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3421 {
3422 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3423 struct bnxt_re_qp *qp, *sh_qp;
3424 struct bnxt_qplib_cqe *cqe;
3425 int i, ncqe, budget;
3426 struct bnxt_qplib_q *sq;
3427 struct bnxt_qplib_qp *lib_qp;
3428 u32 tbl_idx;
3429 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3430 unsigned long flags;
3431
3432 spin_lock_irqsave(&cq->cq_lock, flags);
3433 budget = min_t(u32, num_entries, cq->max_cql);
3434 num_entries = budget;
3435 if (!cq->cql) {
3436 ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
3437 goto exit;
3438 }
3439 cqe = &cq->cql[0];
3440 while (budget) {
3441 lib_qp = NULL;
3442 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3443 if (lib_qp) {
3444 sq = &lib_qp->sq;
3445 if (sq->send_phantom) {
3446 qp = container_of(lib_qp,
3447 struct bnxt_re_qp, qplib_qp);
3448 if (send_phantom_wqe(qp) == -ENOMEM)
3449 ibdev_err(&cq->rdev->ibdev,
3450 "Phantom failed! Scheduled to send again\n");
3451 else
3452 sq->send_phantom = false;
3453 }
3454 }
3455 if (ncqe < budget)
3456 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3457 cqe + ncqe,
3458 budget - ncqe);
3459
3460 if (!ncqe)
3461 break;
3462
3463 for (i = 0; i < ncqe; i++, cqe++) {
3464 /* Transcribe each qplib_wqe back to ib_wc */
3465 memset(wc, 0, sizeof(*wc));
3466
3467 wc->wr_id = cqe->wr_id;
3468 wc->byte_len = cqe->length;
3469 qp = container_of
3470 ((struct bnxt_qplib_qp *)
3471 (unsigned long)(cqe->qp_handle),
3472 struct bnxt_re_qp, qplib_qp);
3473 wc->qp = &qp->ib_qp;
3474 wc->ex.imm_data = cqe->immdata;
3475 wc->src_qp = cqe->src_qp;
3476 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3477 wc->port_num = 1;
3478 wc->vendor_err = cqe->status;
3479
3480 switch (cqe->opcode) {
3481 case CQ_BASE_CQE_TYPE_REQ:
3482 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3483 if (sh_qp &&
3484 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3485 /* Handle this completion with
3486 * the stored completion
3487 */
3488 memset(wc, 0, sizeof(*wc));
3489 continue;
3490 }
3491 bnxt_re_process_req_wc(wc, cqe);
3492 break;
3493 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3494 if (!cqe->status) {
3495 int rc = 0;
3496
3497 rc = bnxt_re_process_raw_qp_pkt_rx
3498 (qp, cqe);
3499 if (!rc) {
3500 memset(wc, 0, sizeof(*wc));
3501 continue;
3502 }
3503 cqe->status = -1;
3504 }
3505 /* Errors need not be looped back.
3506 * But change the wr_id to the one
3507 * stored in the table
3508 */
3509 tbl_idx = cqe->wr_id;
3510 sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
3511 wc->wr_id = sqp_entry->wrid;
3512 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3513 break;
3514 case CQ_BASE_CQE_TYPE_RES_RC:
3515 bnxt_re_process_res_rc_wc(wc, cqe);
3516 break;
3517 case CQ_BASE_CQE_TYPE_RES_UD:
3518 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3519 if (sh_qp &&
3520 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3521 /* Handle this completion with
3522 * the stored completion
3523 */
3524 if (cqe->status) {
3525 continue;
3526 } else {
3527 bnxt_re_process_res_shadow_qp_wc
3528 (qp, wc, cqe);
3529 break;
3530 }
3531 }
3532 bnxt_re_process_res_ud_wc(qp, wc, cqe);
3533 break;
3534 default:
3535 ibdev_err(&cq->rdev->ibdev,
3536 "POLL CQ : type 0x%x not handled",
3537 cqe->opcode);
3538 continue;
3539 }
3540 wc++;
3541 budget--;
3542 }
3543 }
3544 exit:
3545 spin_unlock_irqrestore(&cq->cq_lock, flags);
3546 return num_entries - budget;
3547 }
3548
bnxt_re_req_notify_cq(struct ib_cq * ib_cq,enum ib_cq_notify_flags ib_cqn_flags)3549 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3550 enum ib_cq_notify_flags ib_cqn_flags)
3551 {
3552 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3553 int type = 0, rc = 0;
3554 unsigned long flags;
3555
3556 spin_lock_irqsave(&cq->cq_lock, flags);
3557 /* Trigger on the very next completion */
3558 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3559 type = DBC_DBC_TYPE_CQ_ARMALL;
3560 /* Trigger on the next solicited completion */
3561 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3562 type = DBC_DBC_TYPE_CQ_ARMSE;
3563
3564 /* Poll to see if there are missed events */
3565 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3566 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3567 rc = 1;
3568 goto exit;
3569 }
3570 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3571
3572 exit:
3573 spin_unlock_irqrestore(&cq->cq_lock, flags);
3574 return rc;
3575 }
3576
3577 /* Memory Regions */
bnxt_re_get_dma_mr(struct ib_pd * ib_pd,int mr_access_flags)3578 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3579 {
3580 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3581 struct bnxt_re_dev *rdev = pd->rdev;
3582 struct bnxt_re_mr *mr;
3583 int rc;
3584
3585 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3586 if (!mr)
3587 return ERR_PTR(-ENOMEM);
3588
3589 mr->rdev = rdev;
3590 mr->qplib_mr.pd = &pd->qplib_pd;
3591 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3592 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3593
3594 /* Allocate and register 0 as the address */
3595 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3596 if (rc)
3597 goto fail;
3598
3599 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3600 mr->qplib_mr.total_size = -1; /* Infinte length */
3601 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
3602 PAGE_SIZE);
3603 if (rc)
3604 goto fail_mr;
3605
3606 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3607 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3608 IB_ACCESS_REMOTE_ATOMIC))
3609 mr->ib_mr.rkey = mr->ib_mr.lkey;
3610 atomic_inc(&rdev->mr_count);
3611
3612 return &mr->ib_mr;
3613
3614 fail_mr:
3615 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3616 fail:
3617 kfree(mr);
3618 return ERR_PTR(rc);
3619 }
3620
bnxt_re_dereg_mr(struct ib_mr * ib_mr,struct ib_udata * udata)3621 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3622 {
3623 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3624 struct bnxt_re_dev *rdev = mr->rdev;
3625 int rc;
3626
3627 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3628 if (rc) {
3629 ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
3630 return rc;
3631 }
3632
3633 if (mr->pages) {
3634 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3635 &mr->qplib_frpl);
3636 kfree(mr->pages);
3637 mr->npages = 0;
3638 mr->pages = NULL;
3639 }
3640 ib_umem_release(mr->ib_umem);
3641
3642 kfree(mr);
3643 atomic_dec(&rdev->mr_count);
3644 return rc;
3645 }
3646
bnxt_re_set_page(struct ib_mr * ib_mr,u64 addr)3647 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3648 {
3649 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3650
3651 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3652 return -ENOMEM;
3653
3654 mr->pages[mr->npages++] = addr;
3655 return 0;
3656 }
3657
bnxt_re_map_mr_sg(struct ib_mr * ib_mr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)3658 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3659 unsigned int *sg_offset)
3660 {
3661 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3662
3663 mr->npages = 0;
3664 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3665 }
3666
bnxt_re_alloc_mr(struct ib_pd * ib_pd,enum ib_mr_type type,u32 max_num_sg)3667 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3668 u32 max_num_sg)
3669 {
3670 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3671 struct bnxt_re_dev *rdev = pd->rdev;
3672 struct bnxt_re_mr *mr = NULL;
3673 int rc;
3674
3675 if (type != IB_MR_TYPE_MEM_REG) {
3676 ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
3677 return ERR_PTR(-EINVAL);
3678 }
3679 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3680 return ERR_PTR(-EINVAL);
3681
3682 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3683 if (!mr)
3684 return ERR_PTR(-ENOMEM);
3685
3686 mr->rdev = rdev;
3687 mr->qplib_mr.pd = &pd->qplib_pd;
3688 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3689 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3690
3691 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3692 if (rc)
3693 goto bail;
3694
3695 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3696 mr->ib_mr.rkey = mr->ib_mr.lkey;
3697
3698 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3699 if (!mr->pages) {
3700 rc = -ENOMEM;
3701 goto fail;
3702 }
3703 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3704 &mr->qplib_frpl, max_num_sg);
3705 if (rc) {
3706 ibdev_err(&rdev->ibdev,
3707 "Failed to allocate HW FR page list");
3708 goto fail_mr;
3709 }
3710
3711 atomic_inc(&rdev->mr_count);
3712 return &mr->ib_mr;
3713
3714 fail_mr:
3715 kfree(mr->pages);
3716 fail:
3717 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3718 bail:
3719 kfree(mr);
3720 return ERR_PTR(rc);
3721 }
3722
bnxt_re_alloc_mw(struct ib_pd * ib_pd,enum ib_mw_type type,struct ib_udata * udata)3723 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3724 struct ib_udata *udata)
3725 {
3726 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3727 struct bnxt_re_dev *rdev = pd->rdev;
3728 struct bnxt_re_mw *mw;
3729 int rc;
3730
3731 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3732 if (!mw)
3733 return ERR_PTR(-ENOMEM);
3734 mw->rdev = rdev;
3735 mw->qplib_mw.pd = &pd->qplib_pd;
3736
3737 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3738 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3739 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3740 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3741 if (rc) {
3742 ibdev_err(&rdev->ibdev, "Allocate MW failed!");
3743 goto fail;
3744 }
3745 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3746
3747 atomic_inc(&rdev->mw_count);
3748 return &mw->ib_mw;
3749
3750 fail:
3751 kfree(mw);
3752 return ERR_PTR(rc);
3753 }
3754
bnxt_re_dealloc_mw(struct ib_mw * ib_mw)3755 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3756 {
3757 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3758 struct bnxt_re_dev *rdev = mw->rdev;
3759 int rc;
3760
3761 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3762 if (rc) {
3763 ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
3764 return rc;
3765 }
3766
3767 kfree(mw);
3768 atomic_dec(&rdev->mw_count);
3769 return rc;
3770 }
3771
3772 /* uverbs */
bnxt_re_reg_user_mr(struct ib_pd * ib_pd,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct ib_udata * udata)3773 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3774 u64 virt_addr, int mr_access_flags,
3775 struct ib_udata *udata)
3776 {
3777 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3778 struct bnxt_re_dev *rdev = pd->rdev;
3779 struct bnxt_re_mr *mr;
3780 struct ib_umem *umem;
3781 unsigned long page_size;
3782 int umem_pgs, rc;
3783
3784 if (length > BNXT_RE_MAX_MR_SIZE) {
3785 ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
3786 length, BNXT_RE_MAX_MR_SIZE);
3787 return ERR_PTR(-ENOMEM);
3788 }
3789
3790 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3791 if (!mr)
3792 return ERR_PTR(-ENOMEM);
3793
3794 mr->rdev = rdev;
3795 mr->qplib_mr.pd = &pd->qplib_pd;
3796 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3797 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3798
3799 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3800 if (rc) {
3801 ibdev_err(&rdev->ibdev, "Failed to allocate MR");
3802 goto free_mr;
3803 }
3804 /* The fixed portion of the rkey is the same as the lkey */
3805 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3806
3807 umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
3808 if (IS_ERR(umem)) {
3809 ibdev_err(&rdev->ibdev, "Failed to get umem");
3810 rc = -EFAULT;
3811 goto free_mrw;
3812 }
3813 mr->ib_umem = umem;
3814
3815 mr->qplib_mr.va = virt_addr;
3816 page_size = ib_umem_find_best_pgsz(
3817 umem, BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M, virt_addr);
3818 if (!page_size) {
3819 ibdev_err(&rdev->ibdev, "umem page size unsupported!");
3820 rc = -EFAULT;
3821 goto free_umem;
3822 }
3823 mr->qplib_mr.total_size = length;
3824
3825 umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
3826 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
3827 umem_pgs, page_size);
3828 if (rc) {
3829 ibdev_err(&rdev->ibdev, "Failed to register user MR");
3830 goto free_umem;
3831 }
3832
3833 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3834 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3835 atomic_inc(&rdev->mr_count);
3836
3837 return &mr->ib_mr;
3838 free_umem:
3839 ib_umem_release(umem);
3840 free_mrw:
3841 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3842 free_mr:
3843 kfree(mr);
3844 return ERR_PTR(rc);
3845 }
3846
bnxt_re_alloc_ucontext(struct ib_ucontext * ctx,struct ib_udata * udata)3847 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
3848 {
3849 struct ib_device *ibdev = ctx->device;
3850 struct bnxt_re_ucontext *uctx =
3851 container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
3852 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3853 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3854 struct bnxt_re_uctx_resp resp = {};
3855 u32 chip_met_rev_num = 0;
3856 int rc;
3857
3858 ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
3859
3860 if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3861 ibdev_dbg(ibdev, " is different from the device %d ",
3862 BNXT_RE_ABI_VERSION);
3863 return -EPERM;
3864 }
3865
3866 uctx->rdev = rdev;
3867
3868 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3869 if (!uctx->shpg) {
3870 rc = -ENOMEM;
3871 goto fail;
3872 }
3873 spin_lock_init(&uctx->sh_lock);
3874
3875 resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
3876 chip_met_rev_num = rdev->chip_ctx->chip_num;
3877 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
3878 BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
3879 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
3880 BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
3881 resp.chip_id0 = chip_met_rev_num;
3882 /*Temp, Use xa_alloc instead */
3883 resp.dev_id = rdev->en_dev->pdev->devfn;
3884 resp.max_qp = rdev->qplib_ctx.qpc_count;
3885 resp.pg_size = PAGE_SIZE;
3886 resp.cqe_sz = sizeof(struct cq_base);
3887 resp.max_cqd = dev_attr->max_cq_wqes;
3888
3889 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
3890 resp.mode = rdev->chip_ctx->modes.wqe_mode;
3891
3892 rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
3893 if (rc) {
3894 ibdev_err(ibdev, "Failed to copy user context");
3895 rc = -EFAULT;
3896 goto cfail;
3897 }
3898
3899 return 0;
3900 cfail:
3901 free_page((unsigned long)uctx->shpg);
3902 uctx->shpg = NULL;
3903 fail:
3904 return rc;
3905 }
3906
bnxt_re_dealloc_ucontext(struct ib_ucontext * ib_uctx)3907 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3908 {
3909 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3910 struct bnxt_re_ucontext,
3911 ib_uctx);
3912
3913 struct bnxt_re_dev *rdev = uctx->rdev;
3914
3915 if (uctx->shpg)
3916 free_page((unsigned long)uctx->shpg);
3917
3918 if (uctx->dpi.dbr) {
3919 /* Free DPI only if this is the first PD allocated by the
3920 * application and mark the context dpi as NULL
3921 */
3922 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3923 &rdev->qplib_res.dpi_tbl, &uctx->dpi);
3924 uctx->dpi.dbr = NULL;
3925 }
3926 }
3927
3928 /* Helper function to mmap the virtual memory from user app */
bnxt_re_mmap(struct ib_ucontext * ib_uctx,struct vm_area_struct * vma)3929 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3930 {
3931 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3932 struct bnxt_re_ucontext,
3933 ib_uctx);
3934 struct bnxt_re_dev *rdev = uctx->rdev;
3935 u64 pfn;
3936
3937 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3938 return -EINVAL;
3939
3940 if (vma->vm_pgoff) {
3941 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3942 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3943 PAGE_SIZE, vma->vm_page_prot)) {
3944 ibdev_err(&rdev->ibdev, "Failed to map DPI");
3945 return -EAGAIN;
3946 }
3947 } else {
3948 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3949 if (remap_pfn_range(vma, vma->vm_start,
3950 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3951 ibdev_err(&rdev->ibdev, "Failed to map shared page");
3952 return -EAGAIN;
3953 }
3954 }
3955
3956 return 0;
3957 }
3958