• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: Main component of the bnxt_re driver
37  */
38 
39 #include <linux/module.h>
40 #include <linux/netdevice.h>
41 #include <linux/ethtool.h>
42 #include <linux/mutex.h>
43 #include <linux/list.h>
44 #include <linux/rculist.h>
45 #include <linux/spinlock.h>
46 #include <linux/pci.h>
47 #include <net/dcbnl.h>
48 #include <net/ipv6.h>
49 #include <net/addrconf.h>
50 #include <linux/if_ether.h>
51 
52 #include <rdma/ib_verbs.h>
53 #include <rdma/ib_user_verbs.h>
54 #include <rdma/ib_umem.h>
55 #include <rdma/ib_addr.h>
56 
57 #include "bnxt_ulp.h"
58 #include "roce_hsi.h"
59 #include "qplib_res.h"
60 #include "qplib_sp.h"
61 #include "qplib_fp.h"
62 #include "qplib_rcfw.h"
63 #include "bnxt_re.h"
64 #include "ib_verbs.h"
65 #include <rdma/bnxt_re-abi.h>
66 #include "bnxt.h"
67 #include "hw_counters.h"
68 
69 static char version[] =
70 		BNXT_RE_DESC "\n";
71 
72 MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
73 MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
74 MODULE_LICENSE("Dual BSD/GPL");
75 
76 /* globals */
77 static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
78 /* Mutex to protect the list of bnxt_re devices added */
79 static DEFINE_MUTEX(bnxt_re_dev_lock);
80 static struct workqueue_struct *bnxt_re_wq;
81 static void bnxt_re_remove_device(struct bnxt_re_dev *rdev);
82 static void bnxt_re_dealloc_driver(struct ib_device *ib_dev);
83 static void bnxt_re_stop_irq(void *handle);
84 
bnxt_re_set_drv_mode(struct bnxt_re_dev * rdev,u8 mode)85 static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
86 {
87 	struct bnxt_qplib_chip_ctx *cctx;
88 
89 	cctx = rdev->chip_ctx;
90 	cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
91 			       mode : BNXT_QPLIB_WQE_MODE_STATIC;
92 }
93 
bnxt_re_destroy_chip_ctx(struct bnxt_re_dev * rdev)94 static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
95 {
96 	struct bnxt_qplib_chip_ctx *chip_ctx;
97 
98 	if (!rdev->chip_ctx)
99 		return;
100 	chip_ctx = rdev->chip_ctx;
101 	rdev->chip_ctx = NULL;
102 	rdev->rcfw.res = NULL;
103 	rdev->qplib_res.cctx = NULL;
104 	rdev->qplib_res.pdev = NULL;
105 	rdev->qplib_res.netdev = NULL;
106 	kfree(chip_ctx);
107 }
108 
bnxt_re_setup_chip_ctx(struct bnxt_re_dev * rdev,u8 wqe_mode)109 static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
110 {
111 	struct bnxt_qplib_chip_ctx *chip_ctx;
112 	struct bnxt_en_dev *en_dev;
113 	struct bnxt *bp;
114 
115 	en_dev = rdev->en_dev;
116 	bp = netdev_priv(en_dev->net);
117 
118 	chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL);
119 	if (!chip_ctx)
120 		return -ENOMEM;
121 	chip_ctx->chip_num = bp->chip_num;
122 	chip_ctx->hw_stats_size = bp->hw_ring_stats_size;
123 
124 	rdev->chip_ctx = chip_ctx;
125 	/* rest members to follow eventually */
126 
127 	rdev->qplib_res.cctx = rdev->chip_ctx;
128 	rdev->rcfw.res = &rdev->qplib_res;
129 
130 	bnxt_re_set_drv_mode(rdev, wqe_mode);
131 	return 0;
132 }
133 
134 /* SR-IOV helper functions */
135 
bnxt_re_get_sriov_func_type(struct bnxt_re_dev * rdev)136 static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
137 {
138 	struct bnxt *bp;
139 
140 	bp = netdev_priv(rdev->en_dev->net);
141 	if (BNXT_VF(bp))
142 		rdev->is_virtfn = 1;
143 }
144 
145 /* Set the maximum number of each resource that the driver actually wants
146  * to allocate. This may be up to the maximum number the firmware has
147  * reserved for the function. The driver may choose to allocate fewer
148  * resources than the firmware maximum.
149  */
bnxt_re_limit_pf_res(struct bnxt_re_dev * rdev)150 static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
151 {
152 	struct bnxt_qplib_dev_attr *attr;
153 	struct bnxt_qplib_ctx *ctx;
154 	int i;
155 
156 	attr = &rdev->dev_attr;
157 	ctx = &rdev->qplib_ctx;
158 
159 	ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
160 			       attr->max_qp);
161 	ctx->mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
162 	/* Use max_mr from fw since max_mrw does not get set */
163 	ctx->mrw_count = min_t(u32, ctx->mrw_count, attr->max_mr);
164 	ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
165 				attr->max_srq);
166 	ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq);
167 	if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
168 		for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
169 			rdev->qplib_ctx.tqm_ctx.qcount[i] =
170 			rdev->dev_attr.tqm_alloc_reqs[i];
171 }
172 
bnxt_re_limit_vf_res(struct bnxt_qplib_ctx * qplib_ctx,u32 num_vf)173 static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf)
174 {
175 	struct bnxt_qplib_vf_res *vf_res;
176 	u32 mrws = 0;
177 	u32 vf_pct;
178 	u32 nvfs;
179 
180 	vf_res = &qplib_ctx->vf_res;
181 	/*
182 	 * Reserve a set of resources for the PF. Divide the remaining
183 	 * resources among the VFs
184 	 */
185 	vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
186 	nvfs = num_vf;
187 	num_vf = 100 * num_vf;
188 	vf_res->max_qp_per_vf = (qplib_ctx->qpc_count * vf_pct) / num_vf;
189 	vf_res->max_srq_per_vf = (qplib_ctx->srqc_count * vf_pct) / num_vf;
190 	vf_res->max_cq_per_vf = (qplib_ctx->cq_count * vf_pct) / num_vf;
191 	/*
192 	 * The driver allows many more MRs than other resources. If the
193 	 * firmware does also, then reserve a fixed amount for the PF and
194 	 * divide the rest among VFs. VFs may use many MRs for NFS
195 	 * mounts, ISER, NVME applications, etc. If the firmware severely
196 	 * restricts the number of MRs, then let PF have half and divide
197 	 * the rest among VFs, as for the other resource types.
198 	 */
199 	if (qplib_ctx->mrw_count < BNXT_RE_MAX_MRW_COUNT_64K) {
200 		mrws = qplib_ctx->mrw_count * vf_pct;
201 		nvfs = num_vf;
202 	} else {
203 		mrws = qplib_ctx->mrw_count - BNXT_RE_RESVD_MR_FOR_PF;
204 	}
205 	vf_res->max_mrw_per_vf = (mrws / nvfs);
206 	vf_res->max_gid_per_vf = BNXT_RE_MAX_GID_PER_VF;
207 }
208 
bnxt_re_set_resource_limits(struct bnxt_re_dev * rdev)209 static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
210 {
211 	u32 num_vfs;
212 
213 	memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
214 	bnxt_re_limit_pf_res(rdev);
215 
216 	num_vfs =  bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
217 			BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
218 	if (num_vfs)
219 		bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
220 }
221 
222 /* for handling bnxt_en callbacks later */
bnxt_re_stop(void * p)223 static void bnxt_re_stop(void *p)
224 {
225 }
226 
bnxt_re_start(void * p)227 static void bnxt_re_start(void *p)
228 {
229 }
230 
bnxt_re_sriov_config(void * p,int num_vfs)231 static void bnxt_re_sriov_config(void *p, int num_vfs)
232 {
233 	struct bnxt_re_dev *rdev = p;
234 
235 	if (!rdev)
236 		return;
237 
238 	rdev->num_vfs = num_vfs;
239 	if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
240 		bnxt_re_set_resource_limits(rdev);
241 		bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
242 					      &rdev->qplib_ctx);
243 	}
244 }
245 
bnxt_re_shutdown(void * p)246 static void bnxt_re_shutdown(void *p)
247 {
248 	struct bnxt_re_dev *rdev = p;
249 
250 	if (!rdev)
251 		return;
252 	ASSERT_RTNL();
253 	/* Release the MSIx vectors before queuing unregister */
254 	bnxt_re_stop_irq(rdev);
255 	ib_unregister_device_queued(&rdev->ibdev);
256 }
257 
bnxt_re_stop_irq(void * handle)258 static void bnxt_re_stop_irq(void *handle)
259 {
260 	struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
261 	struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
262 	struct bnxt_qplib_nq *nq;
263 	int indx;
264 
265 	for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
266 		nq = &rdev->nq[indx - 1];
267 		bnxt_qplib_nq_stop_irq(nq, false);
268 	}
269 
270 	bnxt_qplib_rcfw_stop_irq(rcfw, false);
271 }
272 
bnxt_re_start_irq(void * handle,struct bnxt_msix_entry * ent)273 static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
274 {
275 	struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
276 	struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
277 	struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
278 	struct bnxt_qplib_nq *nq;
279 	int indx, rc;
280 
281 	if (!ent) {
282 		/* Not setting the f/w timeout bit in rcfw.
283 		 * During the driver unload the first command
284 		 * to f/w will timeout and that will set the
285 		 * timeout bit.
286 		 */
287 		ibdev_err(&rdev->ibdev, "Failed to re-start IRQs\n");
288 		return;
289 	}
290 
291 	/* Vectors may change after restart, so update with new vectors
292 	 * in device sctructure.
293 	 */
294 	for (indx = 0; indx < rdev->num_msix; indx++)
295 		rdev->msix_entries[indx].vector = ent[indx].vector;
296 
297 	bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
298 				  false);
299 	for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
300 		nq = &rdev->nq[indx - 1];
301 		rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
302 					     msix_ent[indx].vector, false);
303 		if (rc)
304 			ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n",
305 				   indx - 1);
306 	}
307 }
308 
309 static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
310 	.ulp_async_notifier = NULL,
311 	.ulp_stop = bnxt_re_stop,
312 	.ulp_start = bnxt_re_start,
313 	.ulp_sriov_config = bnxt_re_sriov_config,
314 	.ulp_shutdown = bnxt_re_shutdown,
315 	.ulp_irq_stop = bnxt_re_stop_irq,
316 	.ulp_irq_restart = bnxt_re_start_irq
317 };
318 
319 /* RoCE -> Net driver */
320 
321 /* Driver registration routines used to let the networking driver (bnxt_en)
322  * to know that the RoCE driver is now installed
323  */
bnxt_re_unregister_netdev(struct bnxt_re_dev * rdev)324 static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
325 {
326 	struct bnxt_en_dev *en_dev;
327 	int rc;
328 
329 	if (!rdev)
330 		return -EINVAL;
331 
332 	en_dev = rdev->en_dev;
333 
334 	rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
335 						    BNXT_ROCE_ULP);
336 	return rc;
337 }
338 
bnxt_re_register_netdev(struct bnxt_re_dev * rdev)339 static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
340 {
341 	struct bnxt_en_dev *en_dev;
342 	int rc = 0;
343 
344 	if (!rdev)
345 		return -EINVAL;
346 
347 	en_dev = rdev->en_dev;
348 
349 	rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
350 						  &bnxt_re_ulp_ops, rdev);
351 	rdev->qplib_res.pdev = rdev->en_dev->pdev;
352 	return rc;
353 }
354 
bnxt_re_free_msix(struct bnxt_re_dev * rdev)355 static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
356 {
357 	struct bnxt_en_dev *en_dev;
358 	int rc;
359 
360 	if (!rdev)
361 		return -EINVAL;
362 
363 	en_dev = rdev->en_dev;
364 
365 
366 	rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
367 
368 	return rc;
369 }
370 
bnxt_re_request_msix(struct bnxt_re_dev * rdev)371 static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
372 {
373 	int rc = 0, num_msix_want = BNXT_RE_MAX_MSIX, num_msix_got;
374 	struct bnxt_en_dev *en_dev;
375 
376 	if (!rdev)
377 		return -EINVAL;
378 
379 	en_dev = rdev->en_dev;
380 
381 	num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
382 
383 	num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
384 							 rdev->msix_entries,
385 							 num_msix_want);
386 	if (num_msix_got < BNXT_RE_MIN_MSIX) {
387 		rc = -EINVAL;
388 		goto done;
389 	}
390 	if (num_msix_got != num_msix_want) {
391 		ibdev_warn(&rdev->ibdev,
392 			   "Requested %d MSI-X vectors, got %d\n",
393 			   num_msix_want, num_msix_got);
394 	}
395 	rdev->num_msix = num_msix_got;
396 done:
397 	return rc;
398 }
399 
bnxt_re_init_hwrm_hdr(struct bnxt_re_dev * rdev,struct input * hdr,u16 opcd,u16 crid,u16 trid)400 static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, struct input *hdr,
401 				  u16 opcd, u16 crid, u16 trid)
402 {
403 	hdr->req_type = cpu_to_le16(opcd);
404 	hdr->cmpl_ring = cpu_to_le16(crid);
405 	hdr->target_id = cpu_to_le16(trid);
406 }
407 
bnxt_re_fill_fw_msg(struct bnxt_fw_msg * fw_msg,void * msg,int msg_len,void * resp,int resp_max_len,int timeout)408 static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
409 				int msg_len, void *resp, int resp_max_len,
410 				int timeout)
411 {
412 	fw_msg->msg = msg;
413 	fw_msg->msg_len = msg_len;
414 	fw_msg->resp = resp;
415 	fw_msg->resp_max_len = resp_max_len;
416 	fw_msg->timeout = timeout;
417 }
418 
bnxt_re_net_ring_free(struct bnxt_re_dev * rdev,u16 fw_ring_id,int type)419 static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
420 				 u16 fw_ring_id, int type)
421 {
422 	struct bnxt_en_dev *en_dev = rdev->en_dev;
423 	struct hwrm_ring_free_input req = {0};
424 	struct hwrm_ring_free_output resp;
425 	struct bnxt_fw_msg fw_msg;
426 	int rc = -EINVAL;
427 
428 	if (!en_dev)
429 		return rc;
430 
431 	memset(&fw_msg, 0, sizeof(fw_msg));
432 
433 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
434 	req.ring_type = type;
435 	req.ring_id = cpu_to_le16(fw_ring_id);
436 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
437 			    sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
438 	rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
439 	if (rc)
440 		ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x",
441 			  req.ring_id, rc);
442 	return rc;
443 }
444 
bnxt_re_net_ring_alloc(struct bnxt_re_dev * rdev,struct bnxt_re_ring_attr * ring_attr,u16 * fw_ring_id)445 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
446 				  struct bnxt_re_ring_attr *ring_attr,
447 				  u16 *fw_ring_id)
448 {
449 	struct bnxt_en_dev *en_dev = rdev->en_dev;
450 	struct hwrm_ring_alloc_input req = {0};
451 	struct hwrm_ring_alloc_output resp;
452 	struct bnxt_fw_msg fw_msg;
453 	int rc = -EINVAL;
454 
455 	if (!en_dev)
456 		return rc;
457 
458 	memset(&fw_msg, 0, sizeof(fw_msg));
459 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
460 	req.enables = 0;
461 	req.page_tbl_addr =  cpu_to_le64(ring_attr->dma_arr[0]);
462 	if (ring_attr->pages > 1) {
463 		/* Page size is in log2 units */
464 		req.page_size = BNXT_PAGE_SHIFT;
465 		req.page_tbl_depth = 1;
466 	}
467 	req.fbo = 0;
468 	/* Association of ring index with doorbell index and MSIX number */
469 	req.logical_id = cpu_to_le16(ring_attr->lrid);
470 	req.length = cpu_to_le32(ring_attr->depth + 1);
471 	req.ring_type = ring_attr->type;
472 	req.int_mode = ring_attr->mode;
473 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
474 			    sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
475 	rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
476 	if (!rc)
477 		*fw_ring_id = le16_to_cpu(resp.ring_id);
478 
479 	return rc;
480 }
481 
bnxt_re_net_stats_ctx_free(struct bnxt_re_dev * rdev,u32 fw_stats_ctx_id)482 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
483 				      u32 fw_stats_ctx_id)
484 {
485 	struct bnxt_en_dev *en_dev = rdev->en_dev;
486 	struct hwrm_stat_ctx_free_input req = {0};
487 	struct bnxt_fw_msg fw_msg;
488 	int rc = -EINVAL;
489 
490 	if (!en_dev)
491 		return rc;
492 
493 	memset(&fw_msg, 0, sizeof(fw_msg));
494 
495 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
496 	req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
497 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&req,
498 			    sizeof(req), DFLT_HWRM_CMD_TIMEOUT);
499 	rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
500 	if (rc)
501 		ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
502 			  rc);
503 
504 	return rc;
505 }
506 
bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev * rdev,dma_addr_t dma_map,u32 * fw_stats_ctx_id)507 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
508 				       dma_addr_t dma_map,
509 				       u32 *fw_stats_ctx_id)
510 {
511 	struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
512 	struct hwrm_stat_ctx_alloc_output resp = {0};
513 	struct hwrm_stat_ctx_alloc_input req = {0};
514 	struct bnxt_en_dev *en_dev = rdev->en_dev;
515 	struct bnxt_fw_msg fw_msg;
516 	int rc = -EINVAL;
517 
518 	*fw_stats_ctx_id = INVALID_STATS_CTX_ID;
519 
520 	if (!en_dev)
521 		return rc;
522 
523 	memset(&fw_msg, 0, sizeof(fw_msg));
524 
525 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
526 	req.update_period_ms = cpu_to_le32(1000);
527 	req.stats_dma_addr = cpu_to_le64(dma_map);
528 	req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
529 	req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
530 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
531 			    sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
532 	rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
533 	if (!rc)
534 		*fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
535 
536 	return rc;
537 }
538 
539 /* Device */
540 
is_bnxt_re_dev(struct net_device * netdev)541 static bool is_bnxt_re_dev(struct net_device *netdev)
542 {
543 	struct ethtool_drvinfo drvinfo;
544 
545 	if (netdev->ethtool_ops && netdev->ethtool_ops->get_drvinfo) {
546 		memset(&drvinfo, 0, sizeof(drvinfo));
547 		netdev->ethtool_ops->get_drvinfo(netdev, &drvinfo);
548 
549 		if (strcmp(drvinfo.driver, "bnxt_en"))
550 			return false;
551 		return true;
552 	}
553 	return false;
554 }
555 
bnxt_re_from_netdev(struct net_device * netdev)556 static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
557 {
558 	struct ib_device *ibdev =
559 		ib_device_get_by_netdev(netdev, RDMA_DRIVER_BNXT_RE);
560 	if (!ibdev)
561 		return NULL;
562 
563 	return container_of(ibdev, struct bnxt_re_dev, ibdev);
564 }
565 
bnxt_re_dev_unprobe(struct net_device * netdev,struct bnxt_en_dev * en_dev)566 static void bnxt_re_dev_unprobe(struct net_device *netdev,
567 				struct bnxt_en_dev *en_dev)
568 {
569 	dev_put(netdev);
570 	module_put(en_dev->pdev->driver->driver.owner);
571 }
572 
bnxt_re_dev_probe(struct net_device * netdev)573 static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
574 {
575 	struct bnxt *bp = netdev_priv(netdev);
576 	struct bnxt_en_dev *en_dev;
577 	struct pci_dev *pdev;
578 
579 	/* Call bnxt_en's RoCE probe via indirect API */
580 	if (!bp->ulp_probe)
581 		return ERR_PTR(-EINVAL);
582 
583 	en_dev = bp->ulp_probe(netdev);
584 	if (IS_ERR(en_dev))
585 		return en_dev;
586 
587 	pdev = en_dev->pdev;
588 	if (!pdev)
589 		return ERR_PTR(-EINVAL);
590 
591 	if (!(en_dev->flags & BNXT_EN_FLAG_ROCE_CAP)) {
592 		dev_info(&pdev->dev,
593 			"%s: probe error: RoCE is not supported on this device",
594 			ROCE_DRV_MODULE_NAME);
595 		return ERR_PTR(-ENODEV);
596 	}
597 
598 	/* Bump net device reference count */
599 	if (!try_module_get(pdev->driver->driver.owner))
600 		return ERR_PTR(-ENODEV);
601 
602 	dev_hold(netdev);
603 
604 	return en_dev;
605 }
606 
hw_rev_show(struct device * device,struct device_attribute * attr,char * buf)607 static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
608 			   char *buf)
609 {
610 	struct bnxt_re_dev *rdev =
611 		rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
612 
613 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
614 }
615 static DEVICE_ATTR_RO(hw_rev);
616 
hca_type_show(struct device * device,struct device_attribute * attr,char * buf)617 static ssize_t hca_type_show(struct device *device,
618 			     struct device_attribute *attr, char *buf)
619 {
620 	struct bnxt_re_dev *rdev =
621 		rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
622 
623 	return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc);
624 }
625 static DEVICE_ATTR_RO(hca_type);
626 
627 static struct attribute *bnxt_re_attributes[] = {
628 	&dev_attr_hw_rev.attr,
629 	&dev_attr_hca_type.attr,
630 	NULL
631 };
632 
633 static const struct attribute_group bnxt_re_dev_attr_group = {
634 	.attrs = bnxt_re_attributes,
635 };
636 
637 static const struct ib_device_ops bnxt_re_dev_ops = {
638 	.owner = THIS_MODULE,
639 	.driver_id = RDMA_DRIVER_BNXT_RE,
640 	.uverbs_abi_ver = BNXT_RE_ABI_VERSION,
641 
642 	.add_gid = bnxt_re_add_gid,
643 	.alloc_hw_stats = bnxt_re_ib_alloc_hw_stats,
644 	.alloc_mr = bnxt_re_alloc_mr,
645 	.alloc_pd = bnxt_re_alloc_pd,
646 	.alloc_ucontext = bnxt_re_alloc_ucontext,
647 	.create_ah = bnxt_re_create_ah,
648 	.create_cq = bnxt_re_create_cq,
649 	.create_qp = bnxt_re_create_qp,
650 	.create_srq = bnxt_re_create_srq,
651 	.dealloc_driver = bnxt_re_dealloc_driver,
652 	.dealloc_pd = bnxt_re_dealloc_pd,
653 	.dealloc_ucontext = bnxt_re_dealloc_ucontext,
654 	.del_gid = bnxt_re_del_gid,
655 	.dereg_mr = bnxt_re_dereg_mr,
656 	.destroy_ah = bnxt_re_destroy_ah,
657 	.destroy_cq = bnxt_re_destroy_cq,
658 	.destroy_qp = bnxt_re_destroy_qp,
659 	.destroy_srq = bnxt_re_destroy_srq,
660 	.get_dev_fw_str = bnxt_re_query_fw_str,
661 	.get_dma_mr = bnxt_re_get_dma_mr,
662 	.get_hw_stats = bnxt_re_ib_get_hw_stats,
663 	.get_link_layer = bnxt_re_get_link_layer,
664 	.get_port_immutable = bnxt_re_get_port_immutable,
665 	.map_mr_sg = bnxt_re_map_mr_sg,
666 	.mmap = bnxt_re_mmap,
667 	.modify_ah = bnxt_re_modify_ah,
668 	.modify_qp = bnxt_re_modify_qp,
669 	.modify_srq = bnxt_re_modify_srq,
670 	.poll_cq = bnxt_re_poll_cq,
671 	.post_recv = bnxt_re_post_recv,
672 	.post_send = bnxt_re_post_send,
673 	.post_srq_recv = bnxt_re_post_srq_recv,
674 	.query_ah = bnxt_re_query_ah,
675 	.query_device = bnxt_re_query_device,
676 	.query_pkey = bnxt_re_query_pkey,
677 	.query_port = bnxt_re_query_port,
678 	.query_qp = bnxt_re_query_qp,
679 	.query_srq = bnxt_re_query_srq,
680 	.reg_user_mr = bnxt_re_reg_user_mr,
681 	.req_notify_cq = bnxt_re_req_notify_cq,
682 	INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
683 	INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq),
684 	INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
685 	INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq),
686 	INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx),
687 };
688 
bnxt_re_register_ib(struct bnxt_re_dev * rdev)689 static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
690 {
691 	struct ib_device *ibdev = &rdev->ibdev;
692 	int ret;
693 
694 	/* ib device init */
695 	ibdev->node_type = RDMA_NODE_IB_CA;
696 	strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
697 		strlen(BNXT_RE_DESC) + 5);
698 	ibdev->phys_port_cnt = 1;
699 
700 	bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid);
701 
702 	ibdev->num_comp_vectors	= rdev->num_msix - 1;
703 	ibdev->dev.parent = &rdev->en_dev->pdev->dev;
704 	ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
705 
706 	/* User space */
707 	ibdev->uverbs_cmd_mask =
708 			(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
709 			(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
710 			(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
711 			(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
712 			(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
713 			(1ull << IB_USER_VERBS_CMD_REG_MR)		|
714 			(1ull << IB_USER_VERBS_CMD_REREG_MR)		|
715 			(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
716 			(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
717 			(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
718 			(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
719 			(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
720 			(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
721 			(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
722 			(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
723 			(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
724 			(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
725 			(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
726 			(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
727 			(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)		|
728 			(1ull << IB_USER_VERBS_CMD_CREATE_AH)		|
729 			(1ull << IB_USER_VERBS_CMD_MODIFY_AH)		|
730 			(1ull << IB_USER_VERBS_CMD_QUERY_AH)		|
731 			(1ull << IB_USER_VERBS_CMD_DESTROY_AH);
732 	/* POLL_CQ and REQ_NOTIFY_CQ is directly handled in libbnxt_re */
733 
734 
735 	rdma_set_device_sysfs_group(ibdev, &bnxt_re_dev_attr_group);
736 	ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
737 	ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
738 	if (ret)
739 		return ret;
740 
741 	dma_set_max_seg_size(&rdev->en_dev->pdev->dev, UINT_MAX);
742 	return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev);
743 }
744 
bnxt_re_dev_remove(struct bnxt_re_dev * rdev)745 static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
746 {
747 	dev_put(rdev->netdev);
748 	rdev->netdev = NULL;
749 	mutex_lock(&bnxt_re_dev_lock);
750 	list_del_rcu(&rdev->list);
751 	mutex_unlock(&bnxt_re_dev_lock);
752 
753 	synchronize_rcu();
754 }
755 
bnxt_re_dev_add(struct net_device * netdev,struct bnxt_en_dev * en_dev)756 static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
757 					   struct bnxt_en_dev *en_dev)
758 {
759 	struct bnxt_re_dev *rdev;
760 
761 	/* Allocate bnxt_re_dev instance here */
762 	rdev = ib_alloc_device(bnxt_re_dev, ibdev);
763 	if (!rdev) {
764 		ibdev_err(NULL, "%s: bnxt_re_dev allocation failure!",
765 			  ROCE_DRV_MODULE_NAME);
766 		return NULL;
767 	}
768 	/* Default values */
769 	rdev->netdev = netdev;
770 	dev_hold(rdev->netdev);
771 	rdev->en_dev = en_dev;
772 	rdev->id = rdev->en_dev->pdev->devfn;
773 	INIT_LIST_HEAD(&rdev->qp_list);
774 	mutex_init(&rdev->qp_lock);
775 	atomic_set(&rdev->qp_count, 0);
776 	atomic_set(&rdev->cq_count, 0);
777 	atomic_set(&rdev->srq_count, 0);
778 	atomic_set(&rdev->mr_count, 0);
779 	atomic_set(&rdev->mw_count, 0);
780 	rdev->cosq[0] = 0xFFFF;
781 	rdev->cosq[1] = 0xFFFF;
782 
783 	mutex_lock(&bnxt_re_dev_lock);
784 	list_add_tail_rcu(&rdev->list, &bnxt_re_dev_list);
785 	mutex_unlock(&bnxt_re_dev_lock);
786 	return rdev;
787 }
788 
bnxt_re_handle_unaffi_async_event(struct creq_func_event * unaffi_async)789 static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
790 					     *unaffi_async)
791 {
792 	switch (unaffi_async->event) {
793 	case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
794 		break;
795 	case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
796 		break;
797 	case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
798 		break;
799 	case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
800 		break;
801 	case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
802 		break;
803 	case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
804 		break;
805 	case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
806 		break;
807 	case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
808 		break;
809 	case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
810 		break;
811 	case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
812 		break;
813 	case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
814 		break;
815 	default:
816 		return -EINVAL;
817 	}
818 	return 0;
819 }
820 
bnxt_re_handle_qp_async_event(struct creq_qp_event * qp_event,struct bnxt_re_qp * qp)821 static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
822 					 struct bnxt_re_qp *qp)
823 {
824 	struct ib_event event;
825 	unsigned int flags;
826 
827 	if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
828 	    rdma_is_kernel_res(&qp->ib_qp.res)) {
829 		flags = bnxt_re_lock_cqs(qp);
830 		bnxt_qplib_add_flush_qp(&qp->qplib_qp);
831 		bnxt_re_unlock_cqs(qp, flags);
832 	}
833 
834 	memset(&event, 0, sizeof(event));
835 	if (qp->qplib_qp.srq) {
836 		event.device = &qp->rdev->ibdev;
837 		event.element.qp = &qp->ib_qp;
838 		event.event = IB_EVENT_QP_LAST_WQE_REACHED;
839 	}
840 
841 	if (event.device && qp->ib_qp.event_handler)
842 		qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
843 
844 	return 0;
845 }
846 
bnxt_re_handle_affi_async_event(struct creq_qp_event * affi_async,void * obj)847 static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async,
848 					   void *obj)
849 {
850 	int rc = 0;
851 	u8 event;
852 
853 	if (!obj)
854 		return rc; /* QP was already dead, still return success */
855 
856 	event = affi_async->event;
857 	if (event == CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION) {
858 		struct bnxt_qplib_qp *lib_qp = obj;
859 		struct bnxt_re_qp *qp = container_of(lib_qp, struct bnxt_re_qp,
860 						     qplib_qp);
861 		rc = bnxt_re_handle_qp_async_event(affi_async, qp);
862 	}
863 	return rc;
864 }
865 
bnxt_re_aeq_handler(struct bnxt_qplib_rcfw * rcfw,void * aeqe,void * obj)866 static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
867 			       void *aeqe, void *obj)
868 {
869 	struct creq_qp_event *affi_async;
870 	struct creq_func_event *unaffi_async;
871 	u8 type;
872 	int rc;
873 
874 	type = ((struct creq_base *)aeqe)->type;
875 	if (type == CREQ_BASE_TYPE_FUNC_EVENT) {
876 		unaffi_async = aeqe;
877 		rc = bnxt_re_handle_unaffi_async_event(unaffi_async);
878 	} else {
879 		affi_async = aeqe;
880 		rc = bnxt_re_handle_affi_async_event(affi_async, obj);
881 	}
882 
883 	return rc;
884 }
885 
bnxt_re_srqn_handler(struct bnxt_qplib_nq * nq,struct bnxt_qplib_srq * handle,u8 event)886 static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
887 				struct bnxt_qplib_srq *handle, u8 event)
888 {
889 	struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq,
890 					       qplib_srq);
891 	struct ib_event ib_event;
892 	int rc = 0;
893 
894 	if (!srq) {
895 		ibdev_err(NULL, "%s: SRQ is NULL, SRQN not handled",
896 			  ROCE_DRV_MODULE_NAME);
897 		rc = -EINVAL;
898 		goto done;
899 	}
900 	ib_event.device = &srq->rdev->ibdev;
901 	ib_event.element.srq = &srq->ib_srq;
902 	if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT)
903 		ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED;
904 	else
905 		ib_event.event = IB_EVENT_SRQ_ERR;
906 
907 	if (srq->ib_srq.event_handler) {
908 		/* Lock event_handler? */
909 		(*srq->ib_srq.event_handler)(&ib_event,
910 					     srq->ib_srq.srq_context);
911 	}
912 done:
913 	return rc;
914 }
915 
bnxt_re_cqn_handler(struct bnxt_qplib_nq * nq,struct bnxt_qplib_cq * handle)916 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
917 			       struct bnxt_qplib_cq *handle)
918 {
919 	struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq,
920 					     qplib_cq);
921 
922 	if (!cq) {
923 		ibdev_err(NULL, "%s: CQ is NULL, CQN not handled",
924 			  ROCE_DRV_MODULE_NAME);
925 		return -EINVAL;
926 	}
927 	if (cq->ib_cq.comp_handler) {
928 		/* Lock comp_handler? */
929 		(*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context);
930 	}
931 
932 	return 0;
933 }
934 
935 #define BNXT_RE_GEN_P5_PF_NQ_DB		0x10000
936 #define BNXT_RE_GEN_P5_VF_NQ_DB		0x4000
bnxt_re_get_nqdb_offset(struct bnxt_re_dev * rdev,u16 indx)937 static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
938 {
939 	return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
940 		(rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
941 				   BNXT_RE_GEN_P5_PF_NQ_DB) :
942 				   rdev->msix_entries[indx].db_offset;
943 }
944 
bnxt_re_cleanup_res(struct bnxt_re_dev * rdev)945 static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
946 {
947 	int i;
948 
949 	for (i = 1; i < rdev->num_msix; i++)
950 		bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
951 
952 	if (rdev->qplib_res.rcfw)
953 		bnxt_qplib_cleanup_res(&rdev->qplib_res);
954 }
955 
bnxt_re_init_res(struct bnxt_re_dev * rdev)956 static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
957 {
958 	int num_vec_enabled = 0;
959 	int rc = 0, i;
960 	u32 db_offt;
961 
962 	bnxt_qplib_init_res(&rdev->qplib_res);
963 
964 	for (i = 1; i < rdev->num_msix ; i++) {
965 		db_offt = bnxt_re_get_nqdb_offset(rdev, i);
966 		rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
967 					  i - 1, rdev->msix_entries[i].vector,
968 					  db_offt, &bnxt_re_cqn_handler,
969 					  &bnxt_re_srqn_handler);
970 		if (rc) {
971 			ibdev_err(&rdev->ibdev,
972 				  "Failed to enable NQ with rc = 0x%x", rc);
973 			goto fail;
974 		}
975 		num_vec_enabled++;
976 	}
977 	return 0;
978 fail:
979 	for (i = num_vec_enabled; i >= 0; i--)
980 		bnxt_qplib_disable_nq(&rdev->nq[i]);
981 	return rc;
982 }
983 
bnxt_re_free_nq_res(struct bnxt_re_dev * rdev)984 static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
985 {
986 	u8 type;
987 	int i;
988 
989 	for (i = 0; i < rdev->num_msix - 1; i++) {
990 		type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
991 		bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
992 		bnxt_qplib_free_nq(&rdev->nq[i]);
993 		rdev->nq[i].res = NULL;
994 	}
995 }
996 
bnxt_re_free_res(struct bnxt_re_dev * rdev)997 static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
998 {
999 	bnxt_re_free_nq_res(rdev);
1000 
1001 	if (rdev->qplib_res.dpi_tbl.max) {
1002 		bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
1003 				       &rdev->qplib_res.dpi_tbl,
1004 				       &rdev->dpi_privileged);
1005 	}
1006 	if (rdev->qplib_res.rcfw) {
1007 		bnxt_qplib_free_res(&rdev->qplib_res);
1008 		rdev->qplib_res.rcfw = NULL;
1009 	}
1010 }
1011 
bnxt_re_alloc_res(struct bnxt_re_dev * rdev)1012 static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
1013 {
1014 	struct bnxt_re_ring_attr rattr = {};
1015 	int num_vec_created = 0;
1016 	int rc = 0, i;
1017 	u8 type;
1018 
1019 	/* Configure and allocate resources for qplib */
1020 	rdev->qplib_res.rcfw = &rdev->rcfw;
1021 	rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
1022 				     rdev->is_virtfn);
1023 	if (rc)
1024 		goto fail;
1025 
1026 	rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev,
1027 				  rdev->netdev, &rdev->dev_attr);
1028 	if (rc)
1029 		goto fail;
1030 
1031 	rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
1032 				  &rdev->dpi_privileged,
1033 				  rdev);
1034 	if (rc)
1035 		goto dealloc_res;
1036 
1037 	for (i = 0; i < rdev->num_msix - 1; i++) {
1038 		struct bnxt_qplib_nq *nq;
1039 
1040 		nq = &rdev->nq[i];
1041 		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
1042 		rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]);
1043 		if (rc) {
1044 			ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x",
1045 				  i, rc);
1046 			goto free_nq;
1047 		}
1048 		type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1049 		rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr;
1050 		rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count;
1051 		rattr.type = type;
1052 		rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
1053 		rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1;
1054 		rattr.lrid = rdev->msix_entries[i + 1].ring_idx;
1055 		rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
1056 		if (rc) {
1057 			ibdev_err(&rdev->ibdev,
1058 				  "Failed to allocate NQ fw id with rc = 0x%x",
1059 				  rc);
1060 			bnxt_qplib_free_nq(&rdev->nq[i]);
1061 			goto free_nq;
1062 		}
1063 		num_vec_created++;
1064 	}
1065 	return 0;
1066 free_nq:
1067 	for (i = num_vec_created - 1; i >= 0; i--) {
1068 		type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1069 		bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
1070 		bnxt_qplib_free_nq(&rdev->nq[i]);
1071 	}
1072 	bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
1073 			       &rdev->qplib_res.dpi_tbl,
1074 			       &rdev->dpi_privileged);
1075 dealloc_res:
1076 	bnxt_qplib_free_res(&rdev->qplib_res);
1077 
1078 fail:
1079 	rdev->qplib_res.rcfw = NULL;
1080 	return rc;
1081 }
1082 
bnxt_re_dispatch_event(struct ib_device * ibdev,struct ib_qp * qp,u8 port_num,enum ib_event_type event)1083 static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
1084 				   u8 port_num, enum ib_event_type event)
1085 {
1086 	struct ib_event ib_event;
1087 
1088 	ib_event.device = ibdev;
1089 	if (qp) {
1090 		ib_event.element.qp = qp;
1091 		ib_event.event = event;
1092 		if (qp->event_handler)
1093 			qp->event_handler(&ib_event, qp->qp_context);
1094 
1095 	} else {
1096 		ib_event.element.port_num = port_num;
1097 		ib_event.event = event;
1098 		ib_dispatch_event(&ib_event);
1099 	}
1100 }
1101 
1102 #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN      0x02
bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev * rdev,u8 dir,u64 * cid_map)1103 static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
1104 				      u64 *cid_map)
1105 {
1106 	struct hwrm_queue_pri2cos_qcfg_input req = {0};
1107 	struct bnxt *bp = netdev_priv(rdev->netdev);
1108 	struct hwrm_queue_pri2cos_qcfg_output resp;
1109 	struct bnxt_en_dev *en_dev = rdev->en_dev;
1110 	struct bnxt_fw_msg fw_msg;
1111 	u32 flags = 0;
1112 	u8 *qcfgmap, *tmp_map;
1113 	int rc = 0, i;
1114 
1115 	if (!cid_map)
1116 		return -EINVAL;
1117 
1118 	memset(&fw_msg, 0, sizeof(fw_msg));
1119 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1120 			      HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
1121 	flags |= (dir & 0x01);
1122 	flags |= HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN;
1123 	req.flags = cpu_to_le32(flags);
1124 	req.port_id = bp->pf.port_id;
1125 
1126 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
1127 			    sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
1128 	rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
1129 	if (rc)
1130 		return rc;
1131 
1132 	if (resp.queue_cfg_info) {
1133 		ibdev_warn(&rdev->ibdev,
1134 			   "Asymmetric cos queue configuration detected");
1135 		ibdev_warn(&rdev->ibdev,
1136 			   " on device, QoS may not be fully functional\n");
1137 	}
1138 	qcfgmap = &resp.pri0_cos_queue_id;
1139 	tmp_map = (u8 *)cid_map;
1140 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1141 		tmp_map[i] = qcfgmap[i];
1142 
1143 	return rc;
1144 }
1145 
bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp)1146 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
1147 					struct bnxt_re_qp *qp)
1148 {
1149 	return (qp->ib_qp.qp_type == IB_QPT_GSI) ||
1150 	       (qp == rdev->gsi_ctx.gsi_sqp);
1151 }
1152 
bnxt_re_dev_stop(struct bnxt_re_dev * rdev)1153 static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
1154 {
1155 	int mask = IB_QP_STATE;
1156 	struct ib_qp_attr qp_attr;
1157 	struct bnxt_re_qp *qp;
1158 
1159 	qp_attr.qp_state = IB_QPS_ERR;
1160 	mutex_lock(&rdev->qp_lock);
1161 	list_for_each_entry(qp, &rdev->qp_list, list) {
1162 		/* Modify the state of all QPs except QP1/Shadow QP */
1163 		if (!bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) {
1164 			if (qp->qplib_qp.state !=
1165 			    CMDQ_MODIFY_QP_NEW_STATE_RESET &&
1166 			    qp->qplib_qp.state !=
1167 			    CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1168 				bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
1169 						       1, IB_EVENT_QP_FATAL);
1170 				bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
1171 						  NULL);
1172 			}
1173 		}
1174 	}
1175 	mutex_unlock(&rdev->qp_lock);
1176 }
1177 
bnxt_re_update_gid(struct bnxt_re_dev * rdev)1178 static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
1179 {
1180 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
1181 	struct bnxt_qplib_gid gid;
1182 	u16 gid_idx, index;
1183 	int rc = 0;
1184 
1185 	if (!ib_device_try_get(&rdev->ibdev))
1186 		return 0;
1187 
1188 	if (!sgid_tbl) {
1189 		ibdev_err(&rdev->ibdev, "QPLIB: SGID table not allocated");
1190 		rc = -EINVAL;
1191 		goto out;
1192 	}
1193 
1194 	for (index = 0; index < sgid_tbl->active; index++) {
1195 		gid_idx = sgid_tbl->hw_id[index];
1196 
1197 		if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
1198 			    sizeof(bnxt_qplib_gid_zero)))
1199 			continue;
1200 		/* need to modify the VLAN enable setting of non VLAN GID only
1201 		 * as setting is done for VLAN GID while adding GID
1202 		 */
1203 		if (sgid_tbl->vlan[index])
1204 			continue;
1205 
1206 		memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid));
1207 
1208 		rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
1209 					    rdev->qplib_res.netdev->dev_addr);
1210 	}
1211 out:
1212 	ib_device_put(&rdev->ibdev);
1213 	return rc;
1214 }
1215 
bnxt_re_get_priority_mask(struct bnxt_re_dev * rdev)1216 static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
1217 {
1218 	u32 prio_map = 0, tmp_map = 0;
1219 	struct net_device *netdev;
1220 	struct dcb_app app;
1221 
1222 	netdev = rdev->netdev;
1223 
1224 	memset(&app, 0, sizeof(app));
1225 	app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
1226 	app.protocol = ETH_P_IBOE;
1227 	tmp_map = dcb_ieee_getapp_mask(netdev, &app);
1228 	prio_map = tmp_map;
1229 
1230 	app.selector = IEEE_8021QAZ_APP_SEL_DGRAM;
1231 	app.protocol = ROCE_V2_UDP_DPORT;
1232 	tmp_map = dcb_ieee_getapp_mask(netdev, &app);
1233 	prio_map |= tmp_map;
1234 
1235 	return prio_map;
1236 }
1237 
bnxt_re_parse_cid_map(u8 prio_map,u8 * cid_map,u16 * cosq)1238 static void bnxt_re_parse_cid_map(u8 prio_map, u8 *cid_map, u16 *cosq)
1239 {
1240 	u16 prio;
1241 	u8 id;
1242 
1243 	for (prio = 0, id = 0; prio < 8; prio++) {
1244 		if (prio_map & (1 << prio)) {
1245 			cosq[id] = cid_map[prio];
1246 			id++;
1247 			if (id == 2) /* Max 2 tcs supported */
1248 				break;
1249 		}
1250 	}
1251 }
1252 
bnxt_re_setup_qos(struct bnxt_re_dev * rdev)1253 static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
1254 {
1255 	u8 prio_map = 0;
1256 	u64 cid_map;
1257 	int rc;
1258 
1259 	/* Get priority for roce */
1260 	prio_map = bnxt_re_get_priority_mask(rdev);
1261 
1262 	if (prio_map == rdev->cur_prio_map)
1263 		return 0;
1264 	rdev->cur_prio_map = prio_map;
1265 	/* Get cosq id for this priority */
1266 	rc = bnxt_re_query_hwrm_pri2cos(rdev, 0, &cid_map);
1267 	if (rc) {
1268 		ibdev_warn(&rdev->ibdev, "no cos for p_mask %x\n", prio_map);
1269 		return rc;
1270 	}
1271 	/* Parse CoS IDs for app priority */
1272 	bnxt_re_parse_cid_map(prio_map, (u8 *)&cid_map, rdev->cosq);
1273 
1274 	/* Config BONO. */
1275 	rc = bnxt_qplib_map_tc2cos(&rdev->qplib_res, rdev->cosq);
1276 	if (rc) {
1277 		ibdev_warn(&rdev->ibdev, "no tc for cos{%x, %x}\n",
1278 			   rdev->cosq[0], rdev->cosq[1]);
1279 		return rc;
1280 	}
1281 
1282 	/* Actual priorities are not programmed as they are already
1283 	 * done by L2 driver; just enable or disable priority vlan tagging
1284 	 */
1285 	if ((prio_map == 0 && rdev->qplib_res.prio) ||
1286 	    (prio_map != 0 && !rdev->qplib_res.prio)) {
1287 		rdev->qplib_res.prio = prio_map ? true : false;
1288 
1289 		bnxt_re_update_gid(rdev);
1290 	}
1291 
1292 	return 0;
1293 }
1294 
bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev * rdev)1295 static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
1296 {
1297 	struct bnxt_en_dev *en_dev = rdev->en_dev;
1298 	struct hwrm_ver_get_output resp = {0};
1299 	struct hwrm_ver_get_input req = {0};
1300 	struct bnxt_fw_msg fw_msg;
1301 	int rc = 0;
1302 
1303 	memset(&fw_msg, 0, sizeof(fw_msg));
1304 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1305 			      HWRM_VER_GET, -1, -1);
1306 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1307 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
1308 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1309 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
1310 			    sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
1311 	rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
1312 	if (rc) {
1313 		ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x",
1314 			  rc);
1315 		return;
1316 	}
1317 	rdev->qplib_ctx.hwrm_intf_ver =
1318 		(u64)le16_to_cpu(resp.hwrm_intf_major) << 48 |
1319 		(u64)le16_to_cpu(resp.hwrm_intf_minor) << 32 |
1320 		(u64)le16_to_cpu(resp.hwrm_intf_build) << 16 |
1321 		le16_to_cpu(resp.hwrm_intf_patch);
1322 }
1323 
bnxt_re_ib_init(struct bnxt_re_dev * rdev)1324 static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
1325 {
1326 	int rc = 0;
1327 	u32 event;
1328 
1329 	/* Register ib dev */
1330 	rc = bnxt_re_register_ib(rdev);
1331 	if (rc) {
1332 		pr_err("Failed to register with IB: %#x\n", rc);
1333 		return rc;
1334 	}
1335 	dev_info(rdev_to_dev(rdev), "Device registered successfully");
1336 	ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1337 			 &rdev->active_width);
1338 	set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
1339 
1340 	event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ?
1341 		IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
1342 
1343 	bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, event);
1344 
1345 	return rc;
1346 }
1347 
bnxt_re_dev_uninit(struct bnxt_re_dev * rdev)1348 static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
1349 {
1350 	u8 type;
1351 	int rc;
1352 
1353 	if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
1354 		cancel_delayed_work_sync(&rdev->worker);
1355 
1356 	if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
1357 			       &rdev->flags))
1358 		bnxt_re_cleanup_res(rdev);
1359 	if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags))
1360 		bnxt_re_free_res(rdev);
1361 
1362 	if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
1363 		rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
1364 		if (rc)
1365 			ibdev_warn(&rdev->ibdev,
1366 				   "Failed to deinitialize RCFW: %#x", rc);
1367 		bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1368 		bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
1369 		bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1370 		type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1371 		bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
1372 		bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1373 	}
1374 	if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
1375 		rc = bnxt_re_free_msix(rdev);
1376 		if (rc)
1377 			ibdev_warn(&rdev->ibdev,
1378 				   "Failed to free MSI-X vectors: %#x", rc);
1379 	}
1380 
1381 	bnxt_re_destroy_chip_ctx(rdev);
1382 	if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
1383 		rc = bnxt_re_unregister_netdev(rdev);
1384 		if (rc)
1385 			ibdev_warn(&rdev->ibdev,
1386 				   "Failed to unregister with netdev: %#x", rc);
1387 	}
1388 }
1389 
1390 /* worker thread for polling periodic events. Now used for QoS programming*/
bnxt_re_worker(struct work_struct * work)1391 static void bnxt_re_worker(struct work_struct *work)
1392 {
1393 	struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
1394 						worker.work);
1395 
1396 	bnxt_re_setup_qos(rdev);
1397 	schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1398 }
1399 
bnxt_re_dev_init(struct bnxt_re_dev * rdev,u8 wqe_mode)1400 static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
1401 {
1402 	struct bnxt_qplib_creq_ctx *creq;
1403 	struct bnxt_re_ring_attr rattr;
1404 	u32 db_offt;
1405 	int vid;
1406 	u8 type;
1407 	int rc;
1408 
1409 	/* Registered a new RoCE device instance to netdev */
1410 	memset(&rattr, 0, sizeof(rattr));
1411 	rc = bnxt_re_register_netdev(rdev);
1412 	if (rc) {
1413 		ibdev_err(&rdev->ibdev,
1414 			  "Failed to register with netedev: %#x\n", rc);
1415 		return -EINVAL;
1416 	}
1417 	set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
1418 
1419 	rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode);
1420 	if (rc) {
1421 		ibdev_err(&rdev->ibdev, "Failed to get chip context\n");
1422 		return -EINVAL;
1423 	}
1424 
1425 	/* Check whether VF or PF */
1426 	bnxt_re_get_sriov_func_type(rdev);
1427 
1428 	rc = bnxt_re_request_msix(rdev);
1429 	if (rc) {
1430 		ibdev_err(&rdev->ibdev,
1431 			  "Failed to get MSI-X vectors: %#x\n", rc);
1432 		rc = -EINVAL;
1433 		goto fail;
1434 	}
1435 	set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
1436 
1437 	bnxt_re_query_hwrm_intf_version(rdev);
1438 
1439 	/* Establish RCFW Communication Channel to initialize the context
1440 	 * memory for the function and all child VFs
1441 	 */
1442 	rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw,
1443 					   &rdev->qplib_ctx,
1444 					   BNXT_RE_MAX_QPC_COUNT);
1445 	if (rc) {
1446 		ibdev_err(&rdev->ibdev,
1447 			  "Failed to allocate RCFW Channel: %#x\n", rc);
1448 		goto fail;
1449 	}
1450 
1451 	type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1452 	creq = &rdev->rcfw.creq;
1453 	rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr;
1454 	rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count;
1455 	rattr.type = type;
1456 	rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
1457 	rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
1458 	rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
1459 	rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
1460 	if (rc) {
1461 		ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
1462 		goto free_rcfw;
1463 	}
1464 	db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
1465 	vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector;
1466 	rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
1467 					    vid, db_offt, rdev->is_virtfn,
1468 					    &bnxt_re_aeq_handler);
1469 	if (rc) {
1470 		ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n",
1471 			  rc);
1472 		goto free_ring;
1473 	}
1474 
1475 	rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
1476 				     rdev->is_virtfn);
1477 	if (rc)
1478 		goto disable_rcfw;
1479 
1480 	bnxt_re_set_resource_limits(rdev);
1481 
1482 	rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0,
1483 				  bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx));
1484 	if (rc) {
1485 		ibdev_err(&rdev->ibdev,
1486 			  "Failed to allocate QPLIB context: %#x\n", rc);
1487 		goto disable_rcfw;
1488 	}
1489 	rc = bnxt_re_net_stats_ctx_alloc(rdev,
1490 					 rdev->qplib_ctx.stats.dma_map,
1491 					 &rdev->qplib_ctx.stats.fw_id);
1492 	if (rc) {
1493 		ibdev_err(&rdev->ibdev,
1494 			  "Failed to allocate stats context: %#x\n", rc);
1495 		goto free_ctx;
1496 	}
1497 
1498 	rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx,
1499 				  rdev->is_virtfn);
1500 	if (rc) {
1501 		ibdev_err(&rdev->ibdev,
1502 			  "Failed to initialize RCFW: %#x\n", rc);
1503 		goto free_sctx;
1504 	}
1505 	set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags);
1506 
1507 	/* Resources based on the 'new' device caps */
1508 	rc = bnxt_re_alloc_res(rdev);
1509 	if (rc) {
1510 		ibdev_err(&rdev->ibdev,
1511 			  "Failed to allocate resources: %#x\n", rc);
1512 		goto fail;
1513 	}
1514 	set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags);
1515 	rc = bnxt_re_init_res(rdev);
1516 	if (rc) {
1517 		ibdev_err(&rdev->ibdev,
1518 			  "Failed to initialize resources: %#x\n", rc);
1519 		goto fail;
1520 	}
1521 
1522 	set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
1523 
1524 	if (!rdev->is_virtfn) {
1525 		rc = bnxt_re_setup_qos(rdev);
1526 		if (rc)
1527 			ibdev_info(&rdev->ibdev,
1528 				   "RoCE priority not yet configured\n");
1529 
1530 		INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
1531 		set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
1532 		schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1533 	}
1534 
1535 	return 0;
1536 free_sctx:
1537 	bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1538 free_ctx:
1539 	bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
1540 disable_rcfw:
1541 	bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1542 free_ring:
1543 	type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1544 	bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
1545 free_rcfw:
1546 	bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1547 fail:
1548 	bnxt_re_dev_uninit(rdev);
1549 
1550 	return rc;
1551 }
1552 
bnxt_re_dev_unreg(struct bnxt_re_dev * rdev)1553 static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev)
1554 {
1555 	struct bnxt_en_dev *en_dev = rdev->en_dev;
1556 	struct net_device *netdev = rdev->netdev;
1557 
1558 	bnxt_re_dev_remove(rdev);
1559 
1560 	if (netdev)
1561 		bnxt_re_dev_unprobe(netdev, en_dev);
1562 }
1563 
bnxt_re_dev_reg(struct bnxt_re_dev ** rdev,struct net_device * netdev)1564 static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev)
1565 {
1566 	struct bnxt_en_dev *en_dev;
1567 	int rc = 0;
1568 
1569 	if (!is_bnxt_re_dev(netdev))
1570 		return -ENODEV;
1571 
1572 	en_dev = bnxt_re_dev_probe(netdev);
1573 	if (IS_ERR(en_dev)) {
1574 		if (en_dev != ERR_PTR(-ENODEV))
1575 			ibdev_err(&(*rdev)->ibdev, "%s: Failed to probe\n",
1576 				  ROCE_DRV_MODULE_NAME);
1577 		rc = PTR_ERR(en_dev);
1578 		goto exit;
1579 	}
1580 	*rdev = bnxt_re_dev_add(netdev, en_dev);
1581 	if (!*rdev) {
1582 		rc = -ENOMEM;
1583 		bnxt_re_dev_unprobe(netdev, en_dev);
1584 		goto exit;
1585 	}
1586 exit:
1587 	return rc;
1588 }
1589 
bnxt_re_remove_device(struct bnxt_re_dev * rdev)1590 static void bnxt_re_remove_device(struct bnxt_re_dev *rdev)
1591 {
1592 	bnxt_re_dev_uninit(rdev);
1593 	pci_dev_put(rdev->en_dev->pdev);
1594 	bnxt_re_dev_unreg(rdev);
1595 }
1596 
bnxt_re_add_device(struct bnxt_re_dev ** rdev,struct net_device * netdev,u8 wqe_mode)1597 static int bnxt_re_add_device(struct bnxt_re_dev **rdev,
1598 			      struct net_device *netdev, u8 wqe_mode)
1599 {
1600 	int rc;
1601 
1602 	rc = bnxt_re_dev_reg(rdev, netdev);
1603 	if (rc == -ENODEV)
1604 		return rc;
1605 	if (rc) {
1606 		pr_err("Failed to register with the device %s: %#x\n",
1607 		       netdev->name, rc);
1608 		return rc;
1609 	}
1610 
1611 	pci_dev_get((*rdev)->en_dev->pdev);
1612 	rc = bnxt_re_dev_init(*rdev, wqe_mode);
1613 	if (rc) {
1614 		pci_dev_put((*rdev)->en_dev->pdev);
1615 		bnxt_re_dev_unreg(*rdev);
1616 	}
1617 
1618 	return rc;
1619 }
1620 
bnxt_re_dealloc_driver(struct ib_device * ib_dev)1621 static void bnxt_re_dealloc_driver(struct ib_device *ib_dev)
1622 {
1623 	struct bnxt_re_dev *rdev =
1624 		container_of(ib_dev, struct bnxt_re_dev, ibdev);
1625 
1626 	dev_info(rdev_to_dev(rdev), "Unregistering Device");
1627 
1628 	rtnl_lock();
1629 	bnxt_re_remove_device(rdev);
1630 	rtnl_unlock();
1631 }
1632 
1633 /* Handle all deferred netevents tasks */
bnxt_re_task(struct work_struct * work)1634 static void bnxt_re_task(struct work_struct *work)
1635 {
1636 	struct bnxt_re_work *re_work;
1637 	struct bnxt_re_dev *rdev;
1638 	int rc = 0;
1639 
1640 	re_work = container_of(work, struct bnxt_re_work, work);
1641 	rdev = re_work->rdev;
1642 
1643 	if (re_work->event == NETDEV_REGISTER) {
1644 		rc = bnxt_re_ib_init(rdev);
1645 		if (rc) {
1646 			ibdev_err(&rdev->ibdev,
1647 				  "Failed to register with IB: %#x", rc);
1648 			rtnl_lock();
1649 			bnxt_re_remove_device(rdev);
1650 			rtnl_unlock();
1651 			goto exit;
1652 		}
1653 		goto exit;
1654 	}
1655 
1656 	if (!ib_device_try_get(&rdev->ibdev))
1657 		goto exit;
1658 
1659 	switch (re_work->event) {
1660 	case NETDEV_UP:
1661 		bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1662 				       IB_EVENT_PORT_ACTIVE);
1663 		break;
1664 	case NETDEV_DOWN:
1665 		bnxt_re_dev_stop(rdev);
1666 		break;
1667 	case NETDEV_CHANGE:
1668 		if (!netif_carrier_ok(rdev->netdev))
1669 			bnxt_re_dev_stop(rdev);
1670 		else if (netif_carrier_ok(rdev->netdev))
1671 			bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1672 					       IB_EVENT_PORT_ACTIVE);
1673 		ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1674 				 &rdev->active_width);
1675 		break;
1676 	default:
1677 		break;
1678 	}
1679 	ib_device_put(&rdev->ibdev);
1680 exit:
1681 	put_device(&rdev->ibdev.dev);
1682 	kfree(re_work);
1683 }
1684 
1685 /*
1686  * "Notifier chain callback can be invoked for the same chain from
1687  * different CPUs at the same time".
1688  *
1689  * For cases when the netdev is already present, our call to the
1690  * register_netdevice_notifier() will actually get the rtnl_lock()
1691  * before sending NETDEV_REGISTER and (if up) NETDEV_UP
1692  * events.
1693  *
1694  * But for cases when the netdev is not already present, the notifier
1695  * chain is subjected to be invoked from different CPUs simultaneously.
1696  *
1697  * This is protected by the netdev_mutex.
1698  */
bnxt_re_netdev_event(struct notifier_block * notifier,unsigned long event,void * ptr)1699 static int bnxt_re_netdev_event(struct notifier_block *notifier,
1700 				unsigned long event, void *ptr)
1701 {
1702 	struct net_device *real_dev, *netdev = netdev_notifier_info_to_dev(ptr);
1703 	struct bnxt_re_work *re_work;
1704 	struct bnxt_re_dev *rdev;
1705 	int rc = 0;
1706 	bool sch_work = false;
1707 	bool release = true;
1708 
1709 	real_dev = rdma_vlan_dev_real_dev(netdev);
1710 	if (!real_dev)
1711 		real_dev = netdev;
1712 
1713 	rdev = bnxt_re_from_netdev(real_dev);
1714 	if (!rdev && event != NETDEV_REGISTER)
1715 		return NOTIFY_OK;
1716 
1717 	if (real_dev != netdev)
1718 		goto exit;
1719 
1720 	switch (event) {
1721 	case NETDEV_REGISTER:
1722 		if (rdev)
1723 			break;
1724 		rc = bnxt_re_add_device(&rdev, real_dev,
1725 					BNXT_QPLIB_WQE_MODE_STATIC);
1726 		if (!rc)
1727 			sch_work = true;
1728 		release = false;
1729 		break;
1730 
1731 	case NETDEV_UNREGISTER:
1732 		ib_unregister_device_queued(&rdev->ibdev);
1733 		break;
1734 
1735 	default:
1736 		sch_work = true;
1737 		break;
1738 	}
1739 	if (sch_work) {
1740 		/* Allocate for the deferred task */
1741 		re_work = kzalloc(sizeof(*re_work), GFP_ATOMIC);
1742 		if (re_work) {
1743 			get_device(&rdev->ibdev.dev);
1744 			re_work->rdev = rdev;
1745 			re_work->event = event;
1746 			re_work->vlan_dev = (real_dev == netdev ?
1747 					     NULL : netdev);
1748 			INIT_WORK(&re_work->work, bnxt_re_task);
1749 			queue_work(bnxt_re_wq, &re_work->work);
1750 		}
1751 	}
1752 
1753 exit:
1754 	if (rdev && release)
1755 		ib_device_put(&rdev->ibdev);
1756 	return NOTIFY_DONE;
1757 }
1758 
1759 static struct notifier_block bnxt_re_netdev_notifier = {
1760 	.notifier_call = bnxt_re_netdev_event
1761 };
1762 
bnxt_re_mod_init(void)1763 static int __init bnxt_re_mod_init(void)
1764 {
1765 	int rc = 0;
1766 
1767 	pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version);
1768 
1769 	bnxt_re_wq = create_singlethread_workqueue("bnxt_re");
1770 	if (!bnxt_re_wq)
1771 		return -ENOMEM;
1772 
1773 	INIT_LIST_HEAD(&bnxt_re_dev_list);
1774 
1775 	rc = register_netdevice_notifier(&bnxt_re_netdev_notifier);
1776 	if (rc) {
1777 		pr_err("%s: Cannot register to netdevice_notifier",
1778 		       ROCE_DRV_MODULE_NAME);
1779 		goto err_netdev;
1780 	}
1781 	return 0;
1782 
1783 err_netdev:
1784 	destroy_workqueue(bnxt_re_wq);
1785 
1786 	return rc;
1787 }
1788 
bnxt_re_mod_exit(void)1789 static void __exit bnxt_re_mod_exit(void)
1790 {
1791 	struct bnxt_re_dev *rdev;
1792 
1793 	unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
1794 	if (bnxt_re_wq)
1795 		destroy_workqueue(bnxt_re_wq);
1796 	list_for_each_entry(rdev, &bnxt_re_dev_list, list) {
1797 		/* VF device removal should be called before the removal
1798 		 * of PF device. Queue VFs unregister first, so that VFs
1799 		 * shall be removed before the PF during the call of
1800 		 * ib_unregister_driver.
1801 		 */
1802 		if (rdev->is_virtfn)
1803 			ib_unregister_device(&rdev->ibdev);
1804 	}
1805 	ib_unregister_driver(RDMA_DRIVER_BNXT_RE);
1806 }
1807 
1808 module_init(bnxt_re_mod_init);
1809 module_exit(bnxt_re_mod_exit);
1810