• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: RDMA Controller HW interface
37  */
38 
39 #define dev_fmt(fmt) "QPLIB: " fmt
40 
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/pci.h>
44 #include <linux/prefetch.h>
45 #include <linux/delay.h>
46 
47 #include "roce_hsi.h"
48 #include "qplib_res.h"
49 #include "qplib_rcfw.h"
50 #include "qplib_sp.h"
51 #include "qplib_fp.h"
52 
53 static void bnxt_qplib_service_creq(struct tasklet_struct *t);
54 
55 /* Hardware communication channel */
__wait_for_resp(struct bnxt_qplib_rcfw * rcfw,u16 cookie)56 static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
57 {
58 	struct bnxt_qplib_cmdq_ctx *cmdq;
59 	u16 cbit;
60 	int rc;
61 
62 	cmdq = &rcfw->cmdq;
63 	cbit = cookie % rcfw->cmdq_depth;
64 	rc = wait_event_timeout(cmdq->waitq,
65 				!test_bit(cbit, cmdq->cmdq_bitmap),
66 				msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
67 	return rc ? 0 : -ETIMEDOUT;
68 };
69 
__block_for_resp(struct bnxt_qplib_rcfw * rcfw,u16 cookie)70 static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
71 {
72 	u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
73 	struct bnxt_qplib_cmdq_ctx *cmdq;
74 	u16 cbit;
75 
76 	cmdq = &rcfw->cmdq;
77 	cbit = cookie % rcfw->cmdq_depth;
78 	if (!test_bit(cbit, cmdq->cmdq_bitmap))
79 		goto done;
80 	do {
81 		udelay(1);
82 		bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet);
83 	} while (test_bit(cbit, cmdq->cmdq_bitmap) && --count);
84 done:
85 	return count ? 0 : -ETIMEDOUT;
86 };
87 
__send_message(struct bnxt_qplib_rcfw * rcfw,struct cmdq_base * req,struct creq_base * resp,void * sb,u8 is_block)88 static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
89 			  struct creq_base *resp, void *sb, u8 is_block)
90 {
91 	struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
92 	struct bnxt_qplib_hwq *hwq = &cmdq->hwq;
93 	struct bnxt_qplib_crsqe *crsqe;
94 	struct bnxt_qplib_cmdqe *cmdqe;
95 	u32 sw_prod, cmdq_prod;
96 	struct pci_dev *pdev;
97 	unsigned long flags;
98 	u32 size, opcode;
99 	u16 cookie, cbit;
100 	u8 *preq;
101 
102 	pdev = rcfw->pdev;
103 
104 	opcode = req->opcode;
105 	if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
106 	    (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
107 	     opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
108 	     opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
109 		dev_err(&pdev->dev,
110 			"RCFW not initialized, reject opcode 0x%x\n", opcode);
111 		return -EINVAL;
112 	}
113 
114 	if (test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
115 	    opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
116 		dev_err(&pdev->dev, "RCFW already initialized!\n");
117 		return -EINVAL;
118 	}
119 
120 	if (test_bit(FIRMWARE_TIMED_OUT, &cmdq->flags))
121 		return -ETIMEDOUT;
122 
123 	/* Cmdq are in 16-byte units, each request can consume 1 or more
124 	 * cmdqe
125 	 */
126 	spin_lock_irqsave(&hwq->lock, flags);
127 	if (req->cmd_size >= HWQ_FREE_SLOTS(hwq)) {
128 		dev_err(&pdev->dev, "RCFW: CMDQ is full!\n");
129 		spin_unlock_irqrestore(&hwq->lock, flags);
130 		return -EAGAIN;
131 	}
132 
133 
134 	cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;
135 	cbit = cookie % rcfw->cmdq_depth;
136 	if (is_block)
137 		cookie |= RCFW_CMD_IS_BLOCKING;
138 
139 	set_bit(cbit, cmdq->cmdq_bitmap);
140 	req->cookie = cpu_to_le16(cookie);
141 	crsqe = &rcfw->crsqe_tbl[cbit];
142 	if (crsqe->resp) {
143 		spin_unlock_irqrestore(&hwq->lock, flags);
144 		return -EBUSY;
145 	}
146 
147 	size = req->cmd_size;
148 	/* change the cmd_size to the number of 16byte cmdq unit.
149 	 * req->cmd_size is modified here
150 	 */
151 	bnxt_qplib_set_cmd_slots(req);
152 
153 	memset(resp, 0, sizeof(*resp));
154 	crsqe->resp = (struct creq_qp_event *)resp;
155 	crsqe->resp->cookie = req->cookie;
156 	crsqe->req_size = req->cmd_size;
157 	if (req->resp_size && sb) {
158 		struct bnxt_qplib_rcfw_sbuf *sbuf = sb;
159 
160 		req->resp_addr = cpu_to_le64(sbuf->dma_addr);
161 		req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
162 				  BNXT_QPLIB_CMDQE_UNITS;
163 	}
164 
165 	preq = (u8 *)req;
166 	do {
167 		/* Locate the next cmdq slot */
168 		sw_prod = HWQ_CMP(hwq->prod, hwq);
169 		cmdqe = bnxt_qplib_get_qe(hwq, sw_prod, NULL);
170 		if (!cmdqe) {
171 			dev_err(&pdev->dev,
172 				"RCFW request failed with no cmdqe!\n");
173 			goto done;
174 		}
175 		/* Copy a segment of the req cmd to the cmdq */
176 		memset(cmdqe, 0, sizeof(*cmdqe));
177 		memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe)));
178 		preq += min_t(u32, size, sizeof(*cmdqe));
179 		size -= min_t(u32, size, sizeof(*cmdqe));
180 		hwq->prod++;
181 	} while (size > 0);
182 	cmdq->seq_num++;
183 
184 	cmdq_prod = hwq->prod & 0xFFFF;
185 	if (test_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags)) {
186 		/* The very first doorbell write
187 		 * is required to set this flag
188 		 * which prompts the FW to reset
189 		 * its internal pointers
190 		 */
191 		cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
192 		clear_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
193 	}
194 
195 	/* ring CMDQ DB */
196 	wmb();
197 	writel(cmdq_prod, cmdq->cmdq_mbox.prod);
198 	writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
199 done:
200 	spin_unlock_irqrestore(&hwq->lock, flags);
201 	/* Return the CREQ response pointer */
202 	return 0;
203 }
204 
bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw * rcfw,struct cmdq_base * req,struct creq_base * resp,void * sb,u8 is_block)205 int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
206 				 struct cmdq_base *req,
207 				 struct creq_base *resp,
208 				 void *sb, u8 is_block)
209 {
210 	struct creq_qp_event *evnt = (struct creq_qp_event *)resp;
211 	u16 cookie;
212 	u8 opcode, retry_cnt = 0xFF;
213 	int rc = 0;
214 
215 	/* Prevent posting if f/w is not in a state to process */
216 	if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags))
217 		return 0;
218 
219 	do {
220 		opcode = req->opcode;
221 		rc = __send_message(rcfw, req, resp, sb, is_block);
222 		cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
223 		if (!rc)
224 			break;
225 
226 		if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
227 			/* send failed */
228 			dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x send failed\n",
229 				cookie, opcode);
230 			return rc;
231 		}
232 		is_block ? mdelay(1) : usleep_range(500, 1000);
233 
234 	} while (retry_cnt--);
235 
236 	if (is_block)
237 		rc = __block_for_resp(rcfw, cookie);
238 	else
239 		rc = __wait_for_resp(rcfw, cookie);
240 	if (rc) {
241 		/* timed out */
242 		dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x timedout (%d)msec\n",
243 			cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
244 		set_bit(FIRMWARE_TIMED_OUT, &rcfw->cmdq.flags);
245 		return rc;
246 	}
247 
248 	if (evnt->status) {
249 		/* failed with status */
250 		dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
251 			cookie, opcode, evnt->status);
252 		rc = -EFAULT;
253 	}
254 
255 	return rc;
256 }
257 /* Completions */
bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw * rcfw,struct creq_func_event * func_event)258 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
259 					 struct creq_func_event *func_event)
260 {
261 	int rc;
262 
263 	switch (func_event->event) {
264 	case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
265 		break;
266 	case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
267 		break;
268 	case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
269 		break;
270 	case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
271 		break;
272 	case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
273 		break;
274 	case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
275 		break;
276 	case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
277 		break;
278 	case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
279 		/* SRQ ctx error, call srq_handler??
280 		 * But there's no SRQ handle!
281 		 */
282 		break;
283 	case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
284 		break;
285 	case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
286 		break;
287 	case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
288 		break;
289 	case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
290 		break;
291 	case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
292 		break;
293 	default:
294 		return -EINVAL;
295 	}
296 
297 	rc = rcfw->creq.aeq_handler(rcfw, (void *)func_event, NULL);
298 	return rc;
299 }
300 
bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw * rcfw,struct creq_qp_event * qp_event,u32 * num_wait)301 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
302 				       struct creq_qp_event *qp_event,
303 				       u32 *num_wait)
304 {
305 	struct creq_qp_error_notification *err_event;
306 	struct bnxt_qplib_hwq *hwq = &rcfw->cmdq.hwq;
307 	struct bnxt_qplib_crsqe *crsqe;
308 	struct bnxt_qplib_qp *qp;
309 	u16 cbit, blocked = 0;
310 	struct pci_dev *pdev;
311 	unsigned long flags;
312 	u32 wait_cmds = 0;
313 	__le16  mcookie;
314 	u16 cookie;
315 	int rc = 0;
316 	u32 qp_id, tbl_indx;
317 
318 	pdev = rcfw->pdev;
319 	switch (qp_event->event) {
320 	case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
321 		err_event = (struct creq_qp_error_notification *)qp_event;
322 		qp_id = le32_to_cpu(err_event->xid);
323 		tbl_indx = map_qp_id_to_tbl_indx(qp_id, rcfw);
324 		qp = rcfw->qp_tbl[tbl_indx].qp_handle;
325 		dev_dbg(&pdev->dev, "Received QP error notification\n");
326 		dev_dbg(&pdev->dev,
327 			"qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
328 			qp_id, err_event->req_err_state_reason,
329 			err_event->res_err_state_reason);
330 		if (!qp)
331 			break;
332 		bnxt_qplib_mark_qp_error(qp);
333 		rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp);
334 		break;
335 	default:
336 		/*
337 		 * Command Response
338 		 * cmdq->lock needs to be acquired to synchronie
339 		 * the command send and completion reaping. This function
340 		 * is always called with creq->lock held. Using
341 		 * the nested variant of spin_lock.
342 		 *
343 		 */
344 
345 		spin_lock_irqsave_nested(&hwq->lock, flags,
346 					 SINGLE_DEPTH_NESTING);
347 		cookie = le16_to_cpu(qp_event->cookie);
348 		mcookie = qp_event->cookie;
349 		blocked = cookie & RCFW_CMD_IS_BLOCKING;
350 		cookie &= RCFW_MAX_COOKIE_VALUE;
351 		cbit = cookie % rcfw->cmdq_depth;
352 		crsqe = &rcfw->crsqe_tbl[cbit];
353 		if (crsqe->resp &&
354 		    crsqe->resp->cookie  == mcookie) {
355 			memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
356 			crsqe->resp = NULL;
357 		} else {
358 			if (crsqe->resp && crsqe->resp->cookie)
359 				dev_err(&pdev->dev,
360 					"CMD %s cookie sent=%#x, recd=%#x\n",
361 					crsqe->resp ? "mismatch" : "collision",
362 					crsqe->resp ? crsqe->resp->cookie : 0,
363 					mcookie);
364 		}
365 		if (!test_and_clear_bit(cbit, rcfw->cmdq.cmdq_bitmap))
366 			dev_warn(&pdev->dev,
367 				 "CMD bit %d was not requested\n", cbit);
368 		hwq->cons += crsqe->req_size;
369 		crsqe->req_size = 0;
370 
371 		if (!blocked)
372 			wait_cmds++;
373 		spin_unlock_irqrestore(&hwq->lock, flags);
374 	}
375 	*num_wait += wait_cmds;
376 	return rc;
377 }
378 
379 /* SP - CREQ Completion handlers */
bnxt_qplib_service_creq(struct tasklet_struct * t)380 static void bnxt_qplib_service_creq(struct tasklet_struct *t)
381 {
382 	struct bnxt_qplib_rcfw *rcfw = from_tasklet(rcfw, t, creq.creq_tasklet);
383 	struct bnxt_qplib_creq_ctx *creq = &rcfw->creq;
384 	u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
385 	struct bnxt_qplib_hwq *hwq = &creq->hwq;
386 	struct creq_base *creqe;
387 	u32 sw_cons, raw_cons;
388 	unsigned long flags;
389 	u32 num_wakeup = 0;
390 
391 	/* Service the CREQ until budget is over */
392 	spin_lock_irqsave(&hwq->lock, flags);
393 	raw_cons = hwq->cons;
394 	while (budget > 0) {
395 		sw_cons = HWQ_CMP(raw_cons, hwq);
396 		creqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
397 		if (!CREQ_CMP_VALID(creqe, raw_cons, hwq->max_elements))
398 			break;
399 		/* The valid test of the entry must be done first before
400 		 * reading any further.
401 		 */
402 		dma_rmb();
403 
404 		type = creqe->type & CREQ_BASE_TYPE_MASK;
405 		switch (type) {
406 		case CREQ_BASE_TYPE_QP_EVENT:
407 			bnxt_qplib_process_qp_event
408 				(rcfw, (struct creq_qp_event *)creqe,
409 				 &num_wakeup);
410 			creq->stats.creq_qp_event_processed++;
411 			break;
412 		case CREQ_BASE_TYPE_FUNC_EVENT:
413 			if (!bnxt_qplib_process_func_event
414 			    (rcfw, (struct creq_func_event *)creqe))
415 				creq->stats.creq_func_event_processed++;
416 			else
417 				dev_warn(&rcfw->pdev->dev,
418 					 "aeqe:%#x Not handled\n", type);
419 			break;
420 		default:
421 			if (type != ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT)
422 				dev_warn(&rcfw->pdev->dev,
423 					 "creqe with event 0x%x not handled\n",
424 					 type);
425 			break;
426 		}
427 		raw_cons++;
428 		budget--;
429 	}
430 
431 	if (hwq->cons != raw_cons) {
432 		hwq->cons = raw_cons;
433 		bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo,
434 				      rcfw->res->cctx, true);
435 	}
436 	spin_unlock_irqrestore(&hwq->lock, flags);
437 	if (num_wakeup)
438 		wake_up_nr(&rcfw->cmdq.waitq, num_wakeup);
439 }
440 
bnxt_qplib_creq_irq(int irq,void * dev_instance)441 static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
442 {
443 	struct bnxt_qplib_rcfw *rcfw = dev_instance;
444 	struct bnxt_qplib_creq_ctx *creq;
445 	struct bnxt_qplib_hwq *hwq;
446 	u32 sw_cons;
447 
448 	creq = &rcfw->creq;
449 	hwq = &creq->hwq;
450 	/* Prefetch the CREQ element */
451 	sw_cons = HWQ_CMP(hwq->cons, hwq);
452 	prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
453 
454 	tasklet_schedule(&creq->creq_tasklet);
455 
456 	return IRQ_HANDLED;
457 }
458 
459 /* RCFW */
bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw * rcfw)460 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
461 {
462 	struct cmdq_deinitialize_fw req;
463 	struct creq_deinitialize_fw_resp resp;
464 	u16 cmd_flags = 0;
465 	int rc;
466 
467 	RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
468 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
469 					  NULL, 0);
470 	if (rc)
471 		return rc;
472 
473 	clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
474 	return 0;
475 }
476 
bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw * rcfw,struct bnxt_qplib_ctx * ctx,int is_virtfn)477 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
478 			 struct bnxt_qplib_ctx *ctx, int is_virtfn)
479 {
480 	struct creq_initialize_fw_resp resp;
481 	struct cmdq_initialize_fw req;
482 	u16 cmd_flags = 0;
483 	u8 pgsz, lvl;
484 	int rc;
485 
486 	RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
487 	/* Supply (log-base-2-of-host-page-size - base-page-shift)
488 	 * to bono to adjust the doorbell page sizes.
489 	 */
490 	req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
491 					   RCFW_DBR_BASE_PAGE_SHIFT);
492 	/*
493 	 * Gen P5 devices doesn't require this allocation
494 	 * as the L2 driver does the same for RoCE also.
495 	 * Also, VFs need not setup the HW context area, PF
496 	 * shall setup this area for VF. Skipping the
497 	 * HW programming
498 	 */
499 	if (is_virtfn)
500 		goto skip_ctx_setup;
501 	if (bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
502 		goto config_vf_res;
503 
504 	lvl = ctx->qpc_tbl.level;
505 	pgsz = bnxt_qplib_base_pg_size(&ctx->qpc_tbl);
506 	req.qpc_pg_size_qpc_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
507 				   lvl;
508 	lvl = ctx->mrw_tbl.level;
509 	pgsz = bnxt_qplib_base_pg_size(&ctx->mrw_tbl);
510 	req.mrw_pg_size_mrw_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
511 				   lvl;
512 	lvl = ctx->srqc_tbl.level;
513 	pgsz = bnxt_qplib_base_pg_size(&ctx->srqc_tbl);
514 	req.srq_pg_size_srq_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
515 				   lvl;
516 	lvl = ctx->cq_tbl.level;
517 	pgsz = bnxt_qplib_base_pg_size(&ctx->cq_tbl);
518 	req.cq_pg_size_cq_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
519 				 lvl;
520 	lvl = ctx->tim_tbl.level;
521 	pgsz = bnxt_qplib_base_pg_size(&ctx->tim_tbl);
522 	req.tim_pg_size_tim_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
523 				   lvl;
524 	lvl = ctx->tqm_ctx.pde.level;
525 	pgsz = bnxt_qplib_base_pg_size(&ctx->tqm_ctx.pde);
526 	req.tqm_pg_size_tqm_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
527 				   lvl;
528 	req.qpc_page_dir =
529 		cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
530 	req.mrw_page_dir =
531 		cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
532 	req.srq_page_dir =
533 		cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
534 	req.cq_page_dir =
535 		cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
536 	req.tim_page_dir =
537 		cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
538 	req.tqm_page_dir =
539 		cpu_to_le64(ctx->tqm_ctx.pde.pbl[PBL_LVL_0].pg_map_arr[0]);
540 
541 	req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
542 	req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
543 	req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
544 	req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
545 
546 config_vf_res:
547 	req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
548 	req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
549 	req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
550 	req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
551 	req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
552 
553 skip_ctx_setup:
554 	req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
555 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
556 					  NULL, 0);
557 	if (rc)
558 		return rc;
559 	set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
560 	return 0;
561 }
562 
bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw * rcfw)563 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
564 {
565 	bitmap_free(rcfw->cmdq.cmdq_bitmap);
566 	kfree(rcfw->qp_tbl);
567 	kfree(rcfw->crsqe_tbl);
568 	bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
569 	bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq.hwq);
570 	rcfw->pdev = NULL;
571 }
572 
bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res * res,struct bnxt_qplib_rcfw * rcfw,struct bnxt_qplib_ctx * ctx,int qp_tbl_sz)573 int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
574 				  struct bnxt_qplib_rcfw *rcfw,
575 				  struct bnxt_qplib_ctx *ctx,
576 				  int qp_tbl_sz)
577 {
578 	struct bnxt_qplib_hwq_attr hwq_attr = {};
579 	struct bnxt_qplib_sg_info sginfo = {};
580 	struct bnxt_qplib_cmdq_ctx *cmdq;
581 	struct bnxt_qplib_creq_ctx *creq;
582 
583 	rcfw->pdev = res->pdev;
584 	cmdq = &rcfw->cmdq;
585 	creq = &rcfw->creq;
586 	rcfw->res = res;
587 
588 	sginfo.pgsize = PAGE_SIZE;
589 	sginfo.pgshft = PAGE_SHIFT;
590 
591 	hwq_attr.sginfo = &sginfo;
592 	hwq_attr.res = rcfw->res;
593 	hwq_attr.depth = BNXT_QPLIB_CREQE_MAX_CNT;
594 	hwq_attr.stride = BNXT_QPLIB_CREQE_UNITS;
595 	hwq_attr.type = bnxt_qplib_get_hwq_type(res);
596 
597 	if (bnxt_qplib_alloc_init_hwq(&creq->hwq, &hwq_attr)) {
598 		dev_err(&rcfw->pdev->dev,
599 			"HW channel CREQ allocation failed\n");
600 		goto fail;
601 	}
602 	if (ctx->hwrm_intf_ver < HWRM_VERSION_RCFW_CMDQ_DEPTH_CHECK)
603 		rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT_256;
604 	else
605 		rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT_8192;
606 
607 	sginfo.pgsize = bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth);
608 	hwq_attr.depth = rcfw->cmdq_depth & 0x7FFFFFFF;
609 	hwq_attr.stride = BNXT_QPLIB_CMDQE_UNITS;
610 	hwq_attr.type = HWQ_TYPE_CTX;
611 	if (bnxt_qplib_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) {
612 		dev_err(&rcfw->pdev->dev,
613 			"HW channel CMDQ allocation failed\n");
614 		goto fail;
615 	}
616 
617 	rcfw->crsqe_tbl = kcalloc(cmdq->hwq.max_elements,
618 				  sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
619 	if (!rcfw->crsqe_tbl)
620 		goto fail;
621 
622 	cmdq->cmdq_bitmap = bitmap_zalloc(rcfw->cmdq_depth, GFP_KERNEL);
623 	if (!cmdq->cmdq_bitmap)
624 		goto fail;
625 
626 	/* Allocate one extra to hold the QP1 entries */
627 	rcfw->qp_tbl_size = qp_tbl_sz + 1;
628 	rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
629 			       GFP_KERNEL);
630 	if (!rcfw->qp_tbl)
631 		goto fail;
632 
633 	return 0;
634 
635 fail:
636 	bnxt_qplib_free_rcfw_channel(rcfw);
637 	return -ENOMEM;
638 }
639 
bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw * rcfw,bool kill)640 void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
641 {
642 	struct bnxt_qplib_creq_ctx *creq;
643 
644 	creq = &rcfw->creq;
645 
646 	if (!creq->requested)
647 		return;
648 
649 	tasklet_disable(&creq->creq_tasklet);
650 	/* Mask h/w interrupts */
651 	bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false);
652 	/* Sync with last running IRQ-handler */
653 	synchronize_irq(creq->msix_vec);
654 	if (kill)
655 		tasklet_kill(&creq->creq_tasklet);
656 
657 	free_irq(creq->msix_vec, rcfw);
658 	kfree(creq->irq_name);
659 	creq->irq_name = NULL;
660 	creq->requested = false;
661 }
662 
bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw * rcfw)663 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
664 {
665 	struct bnxt_qplib_creq_ctx *creq;
666 	struct bnxt_qplib_cmdq_ctx *cmdq;
667 	unsigned long indx;
668 
669 	creq = &rcfw->creq;
670 	cmdq = &rcfw->cmdq;
671 	/* Make sure the HW channel is stopped! */
672 	bnxt_qplib_rcfw_stop_irq(rcfw, true);
673 
674 	iounmap(cmdq->cmdq_mbox.reg.bar_reg);
675 	iounmap(creq->creq_db.reg.bar_reg);
676 
677 	indx = find_first_bit(cmdq->cmdq_bitmap, rcfw->cmdq_depth);
678 	if (indx != rcfw->cmdq_depth)
679 		dev_err(&rcfw->pdev->dev,
680 			"disabling RCFW with pending cmd-bit %lx\n", indx);
681 
682 	cmdq->cmdq_mbox.reg.bar_reg = NULL;
683 	creq->creq_db.reg.bar_reg = NULL;
684 	creq->aeq_handler = NULL;
685 	creq->msix_vec = 0;
686 }
687 
bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw * rcfw,int msix_vector,bool need_init)688 int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
689 			      bool need_init)
690 {
691 	struct bnxt_qplib_creq_ctx *creq;
692 	struct bnxt_qplib_res *res;
693 	int rc;
694 
695 	creq = &rcfw->creq;
696 	res = rcfw->res;
697 
698 	if (creq->requested)
699 		return -EFAULT;
700 
701 	creq->msix_vec = msix_vector;
702 	if (need_init)
703 		tasklet_setup(&creq->creq_tasklet, bnxt_qplib_service_creq);
704 	else
705 		tasklet_enable(&creq->creq_tasklet);
706 
707 	creq->irq_name = kasprintf(GFP_KERNEL, "bnxt_re-creq@pci:%s",
708 				   pci_name(res->pdev));
709 	if (!creq->irq_name)
710 		return -ENOMEM;
711 	rc = request_irq(creq->msix_vec, bnxt_qplib_creq_irq, 0,
712 			 creq->irq_name, rcfw);
713 	if (rc) {
714 		kfree(creq->irq_name);
715 		creq->irq_name = NULL;
716 		tasklet_disable(&creq->creq_tasklet);
717 		return rc;
718 	}
719 	creq->requested = true;
720 
721 	bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, true);
722 
723 	return 0;
724 }
725 
bnxt_qplib_map_cmdq_mbox(struct bnxt_qplib_rcfw * rcfw,bool is_vf)726 static int bnxt_qplib_map_cmdq_mbox(struct bnxt_qplib_rcfw *rcfw, bool is_vf)
727 {
728 	struct bnxt_qplib_cmdq_mbox *mbox;
729 	resource_size_t bar_reg;
730 	struct pci_dev *pdev;
731 	u16 prod_offt;
732 	int rc = 0;
733 
734 	pdev = rcfw->pdev;
735 	mbox = &rcfw->cmdq.cmdq_mbox;
736 
737 	mbox->reg.bar_id = RCFW_COMM_PCI_BAR_REGION;
738 	mbox->reg.len = RCFW_COMM_SIZE;
739 	mbox->reg.bar_base = pci_resource_start(pdev, mbox->reg.bar_id);
740 	if (!mbox->reg.bar_base) {
741 		dev_err(&pdev->dev,
742 			"QPLIB: CMDQ BAR region %d resc start is 0!\n",
743 			mbox->reg.bar_id);
744 		return -ENOMEM;
745 	}
746 
747 	bar_reg = mbox->reg.bar_base + RCFW_COMM_BASE_OFFSET;
748 	mbox->reg.len = RCFW_COMM_SIZE;
749 	mbox->reg.bar_reg = ioremap(bar_reg, mbox->reg.len);
750 	if (!mbox->reg.bar_reg) {
751 		dev_err(&pdev->dev,
752 			"QPLIB: CMDQ BAR region %d mapping failed\n",
753 			mbox->reg.bar_id);
754 		return -ENOMEM;
755 	}
756 
757 	prod_offt = is_vf ? RCFW_VF_COMM_PROD_OFFSET :
758 			    RCFW_PF_COMM_PROD_OFFSET;
759 	mbox->prod = (void  __iomem *)(mbox->reg.bar_reg + prod_offt);
760 	mbox->db = (void __iomem *)(mbox->reg.bar_reg + RCFW_COMM_TRIG_OFFSET);
761 	return rc;
762 }
763 
bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw * rcfw,u32 reg_offt)764 static int bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw *rcfw, u32 reg_offt)
765 {
766 	struct bnxt_qplib_creq_db *creq_db;
767 	resource_size_t bar_reg;
768 	struct pci_dev *pdev;
769 
770 	pdev = rcfw->pdev;
771 	creq_db = &rcfw->creq.creq_db;
772 
773 	creq_db->reg.bar_id = RCFW_COMM_CONS_PCI_BAR_REGION;
774 	creq_db->reg.bar_base = pci_resource_start(pdev, creq_db->reg.bar_id);
775 	if (!creq_db->reg.bar_id)
776 		dev_err(&pdev->dev,
777 			"QPLIB: CREQ BAR region %d resc start is 0!",
778 			creq_db->reg.bar_id);
779 
780 	bar_reg = creq_db->reg.bar_base + reg_offt;
781 	/* Unconditionally map 8 bytes to support 57500 series */
782 	creq_db->reg.len = 8;
783 	creq_db->reg.bar_reg = ioremap(bar_reg, creq_db->reg.len);
784 	if (!creq_db->reg.bar_reg) {
785 		dev_err(&pdev->dev,
786 			"QPLIB: CREQ BAR region %d mapping failed",
787 			creq_db->reg.bar_id);
788 		return -ENOMEM;
789 	}
790 	creq_db->dbinfo.db = creq_db->reg.bar_reg;
791 	creq_db->dbinfo.hwq = &rcfw->creq.hwq;
792 	creq_db->dbinfo.xid = rcfw->creq.ring_id;
793 	return 0;
794 }
795 
bnxt_qplib_start_rcfw(struct bnxt_qplib_rcfw * rcfw)796 static void bnxt_qplib_start_rcfw(struct bnxt_qplib_rcfw *rcfw)
797 {
798 	struct bnxt_qplib_cmdq_ctx *cmdq;
799 	struct bnxt_qplib_creq_ctx *creq;
800 	struct bnxt_qplib_cmdq_mbox *mbox;
801 	struct cmdq_init init = {0};
802 
803 	cmdq = &rcfw->cmdq;
804 	creq = &rcfw->creq;
805 	mbox = &cmdq->cmdq_mbox;
806 
807 	init.cmdq_pbl = cpu_to_le64(cmdq->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
808 	init.cmdq_size_cmdq_lvl =
809 			cpu_to_le16(((rcfw->cmdq_depth <<
810 				      CMDQ_INIT_CMDQ_SIZE_SFT) &
811 				    CMDQ_INIT_CMDQ_SIZE_MASK) |
812 				    ((cmdq->hwq.level <<
813 				      CMDQ_INIT_CMDQ_LVL_SFT) &
814 				    CMDQ_INIT_CMDQ_LVL_MASK));
815 	init.creq_ring_id = cpu_to_le16(creq->ring_id);
816 	/* Write to the Bono mailbox register */
817 	__iowrite32_copy(mbox->reg.bar_reg, &init, sizeof(init) / 4);
818 }
819 
bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw * rcfw,int msix_vector,int cp_bar_reg_off,int virt_fn,aeq_handler_t aeq_handler)820 int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
821 				   int msix_vector,
822 				   int cp_bar_reg_off, int virt_fn,
823 				   aeq_handler_t aeq_handler)
824 {
825 	struct bnxt_qplib_cmdq_ctx *cmdq;
826 	struct bnxt_qplib_creq_ctx *creq;
827 	int rc;
828 
829 	cmdq = &rcfw->cmdq;
830 	creq = &rcfw->creq;
831 
832 	/* Clear to defaults */
833 
834 	cmdq->seq_num = 0;
835 	set_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
836 	init_waitqueue_head(&cmdq->waitq);
837 
838 	creq->stats.creq_qp_event_processed = 0;
839 	creq->stats.creq_func_event_processed = 0;
840 	creq->aeq_handler = aeq_handler;
841 
842 	rc = bnxt_qplib_map_cmdq_mbox(rcfw, virt_fn);
843 	if (rc)
844 		return rc;
845 
846 	rc = bnxt_qplib_map_creq_db(rcfw, cp_bar_reg_off);
847 	if (rc)
848 		return rc;
849 
850 	rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
851 	if (rc) {
852 		dev_err(&rcfw->pdev->dev,
853 			"Failed to request IRQ for CREQ rc = 0x%x\n", rc);
854 		bnxt_qplib_disable_rcfw_channel(rcfw);
855 		return rc;
856 	}
857 
858 	bnxt_qplib_start_rcfw(rcfw);
859 
860 	return 0;
861 }
862 
bnxt_qplib_rcfw_alloc_sbuf(struct bnxt_qplib_rcfw * rcfw,u32 size)863 struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
864 		struct bnxt_qplib_rcfw *rcfw,
865 		u32 size)
866 {
867 	struct bnxt_qplib_rcfw_sbuf *sbuf;
868 
869 	sbuf = kzalloc(sizeof(*sbuf), GFP_KERNEL);
870 	if (!sbuf)
871 		return NULL;
872 
873 	sbuf->size = size;
874 	sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size,
875 				      &sbuf->dma_addr, GFP_KERNEL);
876 	if (!sbuf->sb)
877 		goto bail;
878 
879 	return sbuf;
880 bail:
881 	kfree(sbuf);
882 	return NULL;
883 }
884 
bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw * rcfw,struct bnxt_qplib_rcfw_sbuf * sbuf)885 void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
886 			       struct bnxt_qplib_rcfw_sbuf *sbuf)
887 {
888 	if (sbuf->sb)
889 		dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
890 				  sbuf->sb, sbuf->dma_addr);
891 	kfree(sbuf);
892 }
893