• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: Fast Path Operators
37  */
38 
39 #define dev_fmt(fmt) "QPLIB: " fmt
40 
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
50 
51 #include "roce_hsi.h"
52 
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
55 #include "qplib_sp.h"
56 #include "qplib_fp.h"
57 
58 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
59 
bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp * qp)60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
61 {
62 	qp->sq.condition = false;
63 	qp->sq.send_phantom = false;
64 	qp->sq.single = false;
65 }
66 
67 /* Flush list */
__bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
69 {
70 	struct bnxt_qplib_cq *scq, *rcq;
71 
72 	scq = qp->scq;
73 	rcq = qp->rcq;
74 
75 	if (!qp->sq.flushed) {
76 		dev_dbg(&scq->hwq.pdev->dev,
77 			"FP: Adding to SQ Flush list = %p\n", qp);
78 		bnxt_qplib_cancel_phantom_processing(qp);
79 		list_add_tail(&qp->sq_flush, &scq->sqf_head);
80 		qp->sq.flushed = true;
81 	}
82 	if (!qp->srq) {
83 		if (!qp->rq.flushed) {
84 			dev_dbg(&rcq->hwq.pdev->dev,
85 				"FP: Adding to RQ Flush list = %p\n", qp);
86 			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87 			qp->rq.flushed = true;
88 		}
89 	}
90 }
91 
bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)92 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
93 				       unsigned long *flags)
94 	__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
95 {
96 	spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97 	if (qp->scq == qp->rcq)
98 		__acquire(&qp->rcq->flush_lock);
99 	else
100 		spin_lock(&qp->rcq->flush_lock);
101 }
102 
bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104 				       unsigned long *flags)
105 	__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
106 {
107 	if (qp->scq == qp->rcq)
108 		__release(&qp->rcq->flush_lock);
109 	else
110 		spin_unlock(&qp->rcq->flush_lock);
111 	spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
112 }
113 
bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
115 {
116 	unsigned long flags;
117 
118 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119 	__bnxt_qplib_add_flush_qp(qp);
120 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
121 }
122 
__bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp * qp)123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
124 {
125 	if (qp->sq.flushed) {
126 		qp->sq.flushed = false;
127 		list_del(&qp->sq_flush);
128 	}
129 	if (!qp->srq) {
130 		if (qp->rq.flushed) {
131 			qp->rq.flushed = false;
132 			list_del(&qp->rq_flush);
133 		}
134 	}
135 }
136 
bnxt_qplib_clean_qp(struct bnxt_qplib_qp * qp)137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
138 {
139 	unsigned long flags;
140 
141 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142 	__clean_cq(qp->scq, (u64)(unsigned long)qp);
143 	qp->sq.hwq.prod = 0;
144 	qp->sq.hwq.cons = 0;
145 	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
146 	qp->rq.hwq.prod = 0;
147 	qp->rq.hwq.cons = 0;
148 
149 	__bnxt_qplib_del_flush_qp(qp);
150 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
151 }
152 
bnxt_qpn_cqn_sched_task(struct work_struct * work)153 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
154 {
155 	struct bnxt_qplib_nq_work *nq_work =
156 			container_of(work, struct bnxt_qplib_nq_work, work);
157 
158 	struct bnxt_qplib_cq *cq = nq_work->cq;
159 	struct bnxt_qplib_nq *nq = nq_work->nq;
160 
161 	if (cq && nq) {
162 		spin_lock_bh(&cq->compl_lock);
163 		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164 			dev_dbg(&nq->pdev->dev,
165 				"%s:Trigger cq  = %p event nq = %p\n",
166 				__func__, cq, nq);
167 			nq->cqn_handler(nq, cq);
168 		}
169 		spin_unlock_bh(&cq->compl_lock);
170 	}
171 	kfree(nq_work);
172 }
173 
bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)174 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175 				       struct bnxt_qplib_qp *qp)
176 {
177 	struct bnxt_qplib_q *rq = &qp->rq;
178 	struct bnxt_qplib_q *sq = &qp->sq;
179 
180 	if (qp->rq_hdr_buf)
181 		dma_free_coherent(&res->pdev->dev,
182 				  rq->max_wqe * qp->rq_hdr_buf_size,
183 				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
184 	if (qp->sq_hdr_buf)
185 		dma_free_coherent(&res->pdev->dev,
186 				  sq->max_wqe * qp->sq_hdr_buf_size,
187 				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188 	qp->rq_hdr_buf = NULL;
189 	qp->sq_hdr_buf = NULL;
190 	qp->rq_hdr_buf_map = 0;
191 	qp->sq_hdr_buf_map = 0;
192 	qp->sq_hdr_buf_size = 0;
193 	qp->rq_hdr_buf_size = 0;
194 }
195 
bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)196 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197 				       struct bnxt_qplib_qp *qp)
198 {
199 	struct bnxt_qplib_q *rq = &qp->rq;
200 	struct bnxt_qplib_q *sq = &qp->sq;
201 	int rc = 0;
202 
203 	if (qp->sq_hdr_buf_size && sq->max_wqe) {
204 		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205 					sq->max_wqe * qp->sq_hdr_buf_size,
206 					&qp->sq_hdr_buf_map, GFP_KERNEL);
207 		if (!qp->sq_hdr_buf) {
208 			rc = -ENOMEM;
209 			dev_err(&res->pdev->dev,
210 				"Failed to create sq_hdr_buf\n");
211 			goto fail;
212 		}
213 	}
214 
215 	if (qp->rq_hdr_buf_size && rq->max_wqe) {
216 		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
217 						    rq->max_wqe *
218 						    qp->rq_hdr_buf_size,
219 						    &qp->rq_hdr_buf_map,
220 						    GFP_KERNEL);
221 		if (!qp->rq_hdr_buf) {
222 			rc = -ENOMEM;
223 			dev_err(&res->pdev->dev,
224 				"Failed to create rq_hdr_buf\n");
225 			goto fail;
226 		}
227 	}
228 	return 0;
229 
230 fail:
231 	bnxt_qplib_free_qp_hdr_buf(res, qp);
232 	return rc;
233 }
234 
clean_nq(struct bnxt_qplib_nq * nq,struct bnxt_qplib_cq * cq)235 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
236 {
237 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
238 	struct nq_base *nqe, **nq_ptr;
239 	int budget = nq->budget;
240 	u32 sw_cons, raw_cons;
241 	uintptr_t q_handle;
242 	u16 type;
243 
244 	spin_lock_bh(&hwq->lock);
245 	/* Service the NQ until empty */
246 	raw_cons = hwq->cons;
247 	while (budget--) {
248 		sw_cons = HWQ_CMP(raw_cons, hwq);
249 		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
250 		nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
251 		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
252 			break;
253 
254 		/*
255 		 * The valid test of the entry must be done first before
256 		 * reading any further.
257 		 */
258 		dma_rmb();
259 
260 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
261 		switch (type) {
262 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
263 		{
264 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
265 
266 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
267 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
268 						     << 32;
269 			if ((unsigned long)cq == q_handle) {
270 				nqcne->cq_handle_low = 0;
271 				nqcne->cq_handle_high = 0;
272 				cq->cnq_events++;
273 			}
274 			break;
275 		}
276 		default:
277 			break;
278 		}
279 		raw_cons++;
280 	}
281 	spin_unlock_bh(&hwq->lock);
282 }
283 
284 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
285  * this CQ.
286  */
__wait_for_all_nqes(struct bnxt_qplib_cq * cq,u16 cnq_events)287 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
288 {
289 	u32 retry_cnt = 100;
290 
291 	while (retry_cnt--) {
292 		if (cnq_events == cq->cnq_events)
293 			return;
294 		usleep_range(50, 100);
295 		clean_nq(cq->nq, cq);
296 	}
297 }
298 
bnxt_qplib_service_nq(struct tasklet_struct * t)299 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
300 {
301 	struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
302 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
303 	int num_srqne_processed = 0;
304 	int num_cqne_processed = 0;
305 	struct bnxt_qplib_cq *cq;
306 	int budget = nq->budget;
307 	u32 sw_cons, raw_cons;
308 	struct nq_base *nqe;
309 	uintptr_t q_handle;
310 	u16 type;
311 
312 	spin_lock_bh(&hwq->lock);
313 	/* Service the NQ until empty */
314 	raw_cons = hwq->cons;
315 	while (budget--) {
316 		sw_cons = HWQ_CMP(raw_cons, hwq);
317 		nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
318 		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
319 			break;
320 
321 		/*
322 		 * The valid test of the entry must be done first before
323 		 * reading any further.
324 		 */
325 		dma_rmb();
326 
327 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
328 		switch (type) {
329 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
330 		{
331 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
332 
333 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
334 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
335 						     << 32;
336 			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
337 			if (!cq)
338 				break;
339 			bnxt_qplib_armen_db(&cq->dbinfo,
340 					    DBC_DBC_TYPE_CQ_ARMENA);
341 			spin_lock_bh(&cq->compl_lock);
342 			atomic_set(&cq->arm_state, 0);
343 			if (!nq->cqn_handler(nq, (cq)))
344 				num_cqne_processed++;
345 			else
346 				dev_warn(&nq->pdev->dev,
347 					 "cqn - type 0x%x not handled\n", type);
348 			cq->cnq_events++;
349 			spin_unlock_bh(&cq->compl_lock);
350 			break;
351 		}
352 		case NQ_BASE_TYPE_SRQ_EVENT:
353 		{
354 			struct bnxt_qplib_srq *srq;
355 			struct nq_srq_event *nqsrqe =
356 						(struct nq_srq_event *)nqe;
357 
358 			q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
359 			q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
360 				     << 32;
361 			srq = (struct bnxt_qplib_srq *)q_handle;
362 			bnxt_qplib_armen_db(&srq->dbinfo,
363 					    DBC_DBC_TYPE_SRQ_ARMENA);
364 			if (!nq->srqn_handler(nq,
365 					      (struct bnxt_qplib_srq *)q_handle,
366 					      nqsrqe->event))
367 				num_srqne_processed++;
368 			else
369 				dev_warn(&nq->pdev->dev,
370 					 "SRQ event 0x%x not handled\n",
371 					 nqsrqe->event);
372 			break;
373 		}
374 		case NQ_BASE_TYPE_DBQ_EVENT:
375 			break;
376 		default:
377 			dev_warn(&nq->pdev->dev,
378 				 "nqe with type = 0x%x not handled\n", type);
379 			break;
380 		}
381 		raw_cons++;
382 	}
383 	if (hwq->cons != raw_cons) {
384 		hwq->cons = raw_cons;
385 		bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
386 	}
387 	spin_unlock_bh(&hwq->lock);
388 }
389 
390 /* bnxt_re_synchronize_nq - self polling notification queue.
391  * @nq      -     notification queue pointer
392  *
393  * This function will start polling entries of a given notification queue
394  * for all pending  entries.
395  * This function is useful to synchronize notification entries while resources
396  * are going away.
397  */
398 
bnxt_re_synchronize_nq(struct bnxt_qplib_nq * nq)399 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
400 {
401 	int budget = nq->budget;
402 
403 	nq->budget = nq->hwq.max_elements;
404 	bnxt_qplib_service_nq(&nq->nq_tasklet);
405 	nq->budget = budget;
406 }
407 
bnxt_qplib_nq_irq(int irq,void * dev_instance)408 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
409 {
410 	struct bnxt_qplib_nq *nq = dev_instance;
411 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
412 	u32 sw_cons;
413 
414 	/* Prefetch the NQ element */
415 	sw_cons = HWQ_CMP(hwq->cons, hwq);
416 	prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
417 
418 	/* Fan out to CPU affinitized kthreads? */
419 	tasklet_schedule(&nq->nq_tasklet);
420 
421 	return IRQ_HANDLED;
422 }
423 
bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq * nq,bool kill)424 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
425 {
426 	if (!nq->requested)
427 		return;
428 
429 	tasklet_disable(&nq->nq_tasklet);
430 	/* Mask h/w interrupt */
431 	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
432 	/* Sync with last running IRQ handler */
433 	synchronize_irq(nq->msix_vec);
434 	if (kill)
435 		tasklet_kill(&nq->nq_tasklet);
436 
437 	irq_set_affinity_hint(nq->msix_vec, NULL);
438 	free_irq(nq->msix_vec, nq);
439 	kfree(nq->name);
440 	nq->name = NULL;
441 	nq->requested = false;
442 }
443 
bnxt_qplib_disable_nq(struct bnxt_qplib_nq * nq)444 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
445 {
446 	if (nq->cqn_wq) {
447 		destroy_workqueue(nq->cqn_wq);
448 		nq->cqn_wq = NULL;
449 	}
450 
451 	/* Make sure the HW is stopped! */
452 	bnxt_qplib_nq_stop_irq(nq, true);
453 
454 	if (nq->nq_db.reg.bar_reg) {
455 		iounmap(nq->nq_db.reg.bar_reg);
456 		nq->nq_db.reg.bar_reg = NULL;
457 	}
458 
459 	nq->cqn_handler = NULL;
460 	nq->srqn_handler = NULL;
461 	nq->msix_vec = 0;
462 }
463 
bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq * nq,int nq_indx,int msix_vector,bool need_init)464 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
465 			    int msix_vector, bool need_init)
466 {
467 	struct bnxt_qplib_res *res = nq->res;
468 	int rc;
469 
470 	if (nq->requested)
471 		return -EFAULT;
472 
473 	nq->msix_vec = msix_vector;
474 	if (need_init)
475 		tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
476 	else
477 		tasklet_enable(&nq->nq_tasklet);
478 
479 	nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
480 			     nq_indx, pci_name(res->pdev));
481 	if (!nq->name)
482 		return -ENOMEM;
483 	rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
484 	if (rc) {
485 		kfree(nq->name);
486 		nq->name = NULL;
487 		tasklet_disable(&nq->nq_tasklet);
488 		return rc;
489 	}
490 
491 	cpumask_clear(&nq->mask);
492 	cpumask_set_cpu(nq_indx, &nq->mask);
493 	rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
494 	if (rc) {
495 		dev_warn(&nq->pdev->dev,
496 			 "set affinity failed; vector: %d nq_idx: %d\n",
497 			 nq->msix_vec, nq_indx);
498 	}
499 	nq->requested = true;
500 	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
501 
502 	return rc;
503 }
504 
bnxt_qplib_map_nq_db(struct bnxt_qplib_nq * nq,u32 reg_offt)505 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq,  u32 reg_offt)
506 {
507 	resource_size_t reg_base;
508 	struct bnxt_qplib_nq_db *nq_db;
509 	struct pci_dev *pdev;
510 	int rc = 0;
511 
512 	pdev = nq->pdev;
513 	nq_db = &nq->nq_db;
514 
515 	nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
516 	nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
517 	if (!nq_db->reg.bar_base) {
518 		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
519 			nq_db->reg.bar_id);
520 		rc = -ENOMEM;
521 		goto fail;
522 	}
523 
524 	reg_base = nq_db->reg.bar_base + reg_offt;
525 	/* Unconditionally map 8 bytes to support 57500 series */
526 	nq_db->reg.len = 8;
527 	nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
528 	if (!nq_db->reg.bar_reg) {
529 		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
530 			nq_db->reg.bar_id);
531 		rc = -ENOMEM;
532 		goto fail;
533 	}
534 
535 	nq_db->dbinfo.db = nq_db->reg.bar_reg;
536 	nq_db->dbinfo.hwq = &nq->hwq;
537 	nq_db->dbinfo.xid = nq->ring_id;
538 fail:
539 	return rc;
540 }
541 
bnxt_qplib_enable_nq(struct pci_dev * pdev,struct bnxt_qplib_nq * nq,int nq_idx,int msix_vector,int bar_reg_offset,cqn_handler_t cqn_handler,srqn_handler_t srqn_handler)542 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
543 			 int nq_idx, int msix_vector, int bar_reg_offset,
544 			 cqn_handler_t cqn_handler,
545 			 srqn_handler_t srqn_handler)
546 {
547 	int rc = -1;
548 
549 	nq->pdev = pdev;
550 	nq->cqn_handler = cqn_handler;
551 	nq->srqn_handler = srqn_handler;
552 
553 	/* Have a task to schedule CQ notifiers in post send case */
554 	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
555 	if (!nq->cqn_wq)
556 		return -ENOMEM;
557 
558 	rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
559 	if (rc)
560 		goto fail;
561 
562 	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
563 	if (rc) {
564 		dev_err(&nq->pdev->dev,
565 			"Failed to request irq for nq-idx %d\n", nq_idx);
566 		goto fail;
567 	}
568 
569 	return 0;
570 fail:
571 	bnxt_qplib_disable_nq(nq);
572 	return rc;
573 }
574 
bnxt_qplib_free_nq(struct bnxt_qplib_nq * nq)575 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
576 {
577 	if (nq->hwq.max_elements) {
578 		bnxt_qplib_free_hwq(nq->res, &nq->hwq);
579 		nq->hwq.max_elements = 0;
580 	}
581 }
582 
bnxt_qplib_alloc_nq(struct bnxt_qplib_res * res,struct bnxt_qplib_nq * nq)583 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
584 {
585 	struct bnxt_qplib_hwq_attr hwq_attr = {};
586 	struct bnxt_qplib_sg_info sginfo = {};
587 
588 	nq->pdev = res->pdev;
589 	nq->res = res;
590 	if (!nq->hwq.max_elements ||
591 	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
592 		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
593 
594 	sginfo.pgsize = PAGE_SIZE;
595 	sginfo.pgshft = PAGE_SHIFT;
596 	hwq_attr.res = res;
597 	hwq_attr.sginfo = &sginfo;
598 	hwq_attr.depth = nq->hwq.max_elements;
599 	hwq_attr.stride = sizeof(struct nq_base);
600 	hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
601 	if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
602 		dev_err(&nq->pdev->dev, "FP NQ allocation failed");
603 		return -ENOMEM;
604 	}
605 	nq->budget = 8;
606 	return 0;
607 }
608 
609 /* SRQ */
bnxt_qplib_destroy_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)610 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
611 			   struct bnxt_qplib_srq *srq)
612 {
613 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
614 	struct cmdq_destroy_srq req;
615 	struct creq_destroy_srq_resp resp;
616 	u16 cmd_flags = 0;
617 	int rc;
618 
619 	RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
620 
621 	/* Configure the request */
622 	req.srq_cid = cpu_to_le32(srq->id);
623 
624 	rc = bnxt_qplib_rcfw_send_message(rcfw, (struct cmdq_base *)&req,
625 					  (struct creq_base *)&resp, NULL, 0);
626 	kfree(srq->swq);
627 	if (rc)
628 		return;
629 	bnxt_qplib_free_hwq(res, &srq->hwq);
630 }
631 
bnxt_qplib_create_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)632 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
633 			  struct bnxt_qplib_srq *srq)
634 {
635 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
636 	struct bnxt_qplib_hwq_attr hwq_attr = {};
637 	struct creq_create_srq_resp resp;
638 	struct cmdq_create_srq req;
639 	struct bnxt_qplib_pbl *pbl;
640 	u16 cmd_flags = 0;
641 	u16 pg_sz_lvl;
642 	int rc, idx;
643 
644 	hwq_attr.res = res;
645 	hwq_attr.sginfo = &srq->sg_info;
646 	hwq_attr.depth = srq->max_wqe;
647 	hwq_attr.stride = srq->wqe_size;
648 	hwq_attr.type = HWQ_TYPE_QUEUE;
649 	rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
650 	if (rc)
651 		goto exit;
652 
653 	srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
654 			   GFP_KERNEL);
655 	if (!srq->swq) {
656 		rc = -ENOMEM;
657 		goto fail;
658 	}
659 
660 	RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
661 
662 	/* Configure the request */
663 	req.dpi = cpu_to_le32(srq->dpi->dpi);
664 	req.srq_handle = cpu_to_le64((uintptr_t)srq);
665 
666 	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
667 	pbl = &srq->hwq.pbl[PBL_LVL_0];
668 	pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
669 		     CMDQ_CREATE_SRQ_PG_SIZE_SFT);
670 	pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
671 		      CMDQ_CREATE_SRQ_LVL_SFT;
672 	req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
673 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
674 	req.pd_id = cpu_to_le32(srq->pd->id);
675 	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
676 
677 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
678 					  (void *)&resp, NULL, 0);
679 	if (rc)
680 		goto fail;
681 
682 	spin_lock_init(&srq->lock);
683 	srq->start_idx = 0;
684 	srq->last_idx = srq->hwq.max_elements - 1;
685 	for (idx = 0; idx < srq->hwq.max_elements; idx++)
686 		srq->swq[idx].next_idx = idx + 1;
687 	srq->swq[srq->last_idx].next_idx = -1;
688 
689 	srq->id = le32_to_cpu(resp.xid);
690 	srq->dbinfo.hwq = &srq->hwq;
691 	srq->dbinfo.xid = srq->id;
692 	srq->dbinfo.db = srq->dpi->dbr;
693 	srq->dbinfo.max_slot = 1;
694 	srq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
695 	if (srq->threshold)
696 		bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
697 	srq->arm_req = false;
698 
699 	return 0;
700 fail:
701 	bnxt_qplib_free_hwq(res, &srq->hwq);
702 	kfree(srq->swq);
703 exit:
704 	return rc;
705 }
706 
bnxt_qplib_modify_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)707 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
708 			  struct bnxt_qplib_srq *srq)
709 {
710 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
711 	u32 sw_prod, sw_cons, count = 0;
712 
713 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
714 	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
715 
716 	count = sw_prod > sw_cons ? sw_prod - sw_cons :
717 				    srq_hwq->max_elements - sw_cons + sw_prod;
718 	if (count > srq->threshold) {
719 		srq->arm_req = false;
720 		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
721 	} else {
722 		/* Deferred arming */
723 		srq->arm_req = true;
724 	}
725 
726 	return 0;
727 }
728 
bnxt_qplib_query_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)729 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
730 			 struct bnxt_qplib_srq *srq)
731 {
732 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
733 	struct cmdq_query_srq req;
734 	struct creq_query_srq_resp resp;
735 	struct bnxt_qplib_rcfw_sbuf *sbuf;
736 	struct creq_query_srq_resp_sb *sb;
737 	u16 cmd_flags = 0;
738 	int rc = 0;
739 
740 	RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
741 
742 	/* Configure the request */
743 	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
744 	if (!sbuf)
745 		return -ENOMEM;
746 	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
747 	req.srq_cid = cpu_to_le32(srq->id);
748 	sb = sbuf->sb;
749 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
750 					  (void *)sbuf, 0);
751 	srq->threshold = le16_to_cpu(sb->srq_limit);
752 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
753 
754 	return rc;
755 }
756 
bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq * srq,struct bnxt_qplib_swqe * wqe)757 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
758 			     struct bnxt_qplib_swqe *wqe)
759 {
760 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
761 	struct rq_wqe *srqe;
762 	struct sq_sge *hw_sge;
763 	u32 sw_prod, sw_cons, count = 0;
764 	int i, rc = 0, next;
765 
766 	spin_lock(&srq_hwq->lock);
767 	if (srq->start_idx == srq->last_idx) {
768 		dev_err(&srq_hwq->pdev->dev,
769 			"FP: SRQ (0x%x) is full!\n", srq->id);
770 		rc = -EINVAL;
771 		spin_unlock(&srq_hwq->lock);
772 		goto done;
773 	}
774 	next = srq->start_idx;
775 	srq->start_idx = srq->swq[next].next_idx;
776 	spin_unlock(&srq_hwq->lock);
777 
778 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
779 	srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
780 	memset(srqe, 0, srq->wqe_size);
781 	/* Calculate wqe_size16 and data_len */
782 	for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
783 	     i < wqe->num_sge; i++, hw_sge++) {
784 		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
785 		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
786 		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
787 	}
788 	srqe->wqe_type = wqe->type;
789 	srqe->flags = wqe->flags;
790 	srqe->wqe_size = wqe->num_sge +
791 			((offsetof(typeof(*srqe), data) + 15) >> 4);
792 	srqe->wr_id[0] = cpu_to_le32((u32)next);
793 	srq->swq[next].wr_id = wqe->wr_id;
794 
795 	srq_hwq->prod++;
796 
797 	spin_lock(&srq_hwq->lock);
798 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
799 	/* retaining srq_hwq->cons for this logic
800 	 * actually the lock is only required to
801 	 * read srq_hwq->cons.
802 	 */
803 	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
804 	count = sw_prod > sw_cons ? sw_prod - sw_cons :
805 				    srq_hwq->max_elements - sw_cons + sw_prod;
806 	spin_unlock(&srq_hwq->lock);
807 	/* Ring DB */
808 	bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
809 	if (srq->arm_req == true && count > srq->threshold) {
810 		srq->arm_req = false;
811 		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
812 	}
813 done:
814 	return rc;
815 }
816 
817 /* QP */
818 
bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q * que)819 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
820 {
821 	int rc = 0;
822 	int indx;
823 
824 	que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
825 	if (!que->swq) {
826 		rc = -ENOMEM;
827 		goto out;
828 	}
829 
830 	que->swq_start = 0;
831 	que->swq_last = que->max_wqe - 1;
832 	for (indx = 0; indx < que->max_wqe; indx++)
833 		que->swq[indx].next_idx = indx + 1;
834 	que->swq[que->swq_last].next_idx = 0; /* Make it circular */
835 	que->swq_last = 0;
836 out:
837 	return rc;
838 }
839 
bnxt_qplib_create_qp1(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)840 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
841 {
842 	struct bnxt_qplib_hwq_attr hwq_attr = {};
843 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
844 	struct bnxt_qplib_q *sq = &qp->sq;
845 	struct bnxt_qplib_q *rq = &qp->rq;
846 	struct creq_create_qp1_resp resp;
847 	struct cmdq_create_qp1 req;
848 	struct bnxt_qplib_pbl *pbl;
849 	u16 cmd_flags = 0;
850 	u32 qp_flags = 0;
851 	u8 pg_sz_lvl;
852 	u32 tbl_indx;
853 	int rc;
854 
855 	RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
856 
857 	/* General */
858 	req.type = qp->type;
859 	req.dpi = cpu_to_le32(qp->dpi->dpi);
860 	req.qp_handle = cpu_to_le64(qp->qp_handle);
861 
862 	/* SQ */
863 	hwq_attr.res = res;
864 	hwq_attr.sginfo = &sq->sg_info;
865 	hwq_attr.stride = sizeof(struct sq_sge);
866 	hwq_attr.depth = bnxt_qplib_get_depth(sq);
867 	hwq_attr.type = HWQ_TYPE_QUEUE;
868 	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
869 	if (rc)
870 		goto exit;
871 
872 	rc = bnxt_qplib_alloc_init_swq(sq);
873 	if (rc)
874 		goto fail_sq;
875 
876 	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
877 	pbl = &sq->hwq.pbl[PBL_LVL_0];
878 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
879 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
880 		     CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
881 	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
882 	req.sq_pg_size_sq_lvl = pg_sz_lvl;
883 	req.sq_fwo_sq_sge =
884 		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
885 			     CMDQ_CREATE_QP1_SQ_SGE_SFT);
886 	req.scq_cid = cpu_to_le32(qp->scq->id);
887 
888 	/* RQ */
889 	if (rq->max_wqe) {
890 		hwq_attr.res = res;
891 		hwq_attr.sginfo = &rq->sg_info;
892 		hwq_attr.stride = sizeof(struct sq_sge);
893 		hwq_attr.depth = bnxt_qplib_get_depth(rq);
894 		hwq_attr.type = HWQ_TYPE_QUEUE;
895 		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
896 		if (rc)
897 			goto sq_swq;
898 		rc = bnxt_qplib_alloc_init_swq(rq);
899 		if (rc)
900 			goto fail_rq;
901 		req.rq_size = cpu_to_le32(rq->max_wqe);
902 		pbl = &rq->hwq.pbl[PBL_LVL_0];
903 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
904 		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
905 			     CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
906 		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
907 		req.rq_pg_size_rq_lvl = pg_sz_lvl;
908 		req.rq_fwo_rq_sge =
909 			cpu_to_le16((rq->max_sge &
910 				     CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
911 				    CMDQ_CREATE_QP1_RQ_SGE_SFT);
912 	}
913 	req.rcq_cid = cpu_to_le32(qp->rcq->id);
914 	/* Header buffer - allow hdr_buf pass in */
915 	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
916 	if (rc) {
917 		rc = -ENOMEM;
918 		goto rq_rwq;
919 	}
920 	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
921 	req.qp_flags = cpu_to_le32(qp_flags);
922 	req.pd_id = cpu_to_le32(qp->pd->id);
923 
924 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
925 					  (void *)&resp, NULL, 0);
926 	if (rc)
927 		goto fail;
928 
929 	qp->id = le32_to_cpu(resp.xid);
930 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
931 	qp->cctx = res->cctx;
932 	sq->dbinfo.hwq = &sq->hwq;
933 	sq->dbinfo.xid = qp->id;
934 	sq->dbinfo.db = qp->dpi->dbr;
935 	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
936 	if (rq->max_wqe) {
937 		rq->dbinfo.hwq = &rq->hwq;
938 		rq->dbinfo.xid = qp->id;
939 		rq->dbinfo.db = qp->dpi->dbr;
940 		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
941 	}
942 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
943 	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
944 	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
945 
946 	return 0;
947 
948 fail:
949 	bnxt_qplib_free_qp_hdr_buf(res, qp);
950 rq_rwq:
951 	kfree(rq->swq);
952 fail_rq:
953 	bnxt_qplib_free_hwq(res, &rq->hwq);
954 sq_swq:
955 	kfree(sq->swq);
956 fail_sq:
957 	bnxt_qplib_free_hwq(res, &sq->hwq);
958 exit:
959 	return rc;
960 }
961 
bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp * qp,int size)962 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
963 {
964 	struct bnxt_qplib_hwq *hwq;
965 	struct bnxt_qplib_q *sq;
966 	u64 fpsne, psn_pg;
967 	u16 indx_pad = 0;
968 
969 	sq = &qp->sq;
970 	hwq = &sq->hwq;
971 	/* First psn entry */
972 	fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
973 	if (!IS_ALIGNED(fpsne, PAGE_SIZE))
974 		indx_pad = (fpsne & ~PAGE_MASK) / size;
975 	hwq->pad_pgofft = indx_pad;
976 	hwq->pad_pg = (u64 *)psn_pg;
977 	hwq->pad_stride = size;
978 }
979 
bnxt_qplib_create_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)980 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
981 {
982 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
983 	struct bnxt_qplib_hwq_attr hwq_attr = {};
984 	struct bnxt_qplib_sg_info sginfo = {};
985 	struct bnxt_qplib_q *sq = &qp->sq;
986 	struct bnxt_qplib_q *rq = &qp->rq;
987 	struct creq_create_qp_resp resp;
988 	int rc, req_size, psn_sz = 0;
989 	struct bnxt_qplib_hwq *xrrq;
990 	struct bnxt_qplib_pbl *pbl;
991 	struct cmdq_create_qp req;
992 	u16 cmd_flags = 0;
993 	u32 qp_flags = 0;
994 	u8 pg_sz_lvl;
995 	u32 tbl_indx;
996 	u16 nsge;
997 
998 	RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
999 
1000 	/* General */
1001 	req.type = qp->type;
1002 	req.dpi = cpu_to_le32(qp->dpi->dpi);
1003 	req.qp_handle = cpu_to_le64(qp->qp_handle);
1004 
1005 	/* SQ */
1006 	if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1007 		psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
1008 			 sizeof(struct sq_psn_search_ext) :
1009 			 sizeof(struct sq_psn_search);
1010 	}
1011 
1012 	hwq_attr.res = res;
1013 	hwq_attr.sginfo = &sq->sg_info;
1014 	hwq_attr.stride = sizeof(struct sq_sge);
1015 	hwq_attr.depth = bnxt_qplib_get_depth(sq);
1016 	hwq_attr.aux_stride = psn_sz;
1017 	hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
1018 	hwq_attr.type = HWQ_TYPE_QUEUE;
1019 	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1020 	if (rc)
1021 		goto exit;
1022 
1023 	rc = bnxt_qplib_alloc_init_swq(sq);
1024 	if (rc)
1025 		goto fail_sq;
1026 
1027 	if (psn_sz)
1028 		bnxt_qplib_init_psn_ptr(qp, psn_sz);
1029 
1030 	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1031 	pbl = &sq->hwq.pbl[PBL_LVL_0];
1032 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1033 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1034 		     CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1035 	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1036 	req.sq_pg_size_sq_lvl = pg_sz_lvl;
1037 	req.sq_fwo_sq_sge =
1038 		cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1039 			     CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1040 	req.scq_cid = cpu_to_le32(qp->scq->id);
1041 
1042 	/* RQ */
1043 	if (!qp->srq) {
1044 		hwq_attr.res = res;
1045 		hwq_attr.sginfo = &rq->sg_info;
1046 		hwq_attr.stride = sizeof(struct sq_sge);
1047 		hwq_attr.depth = bnxt_qplib_get_depth(rq);
1048 		hwq_attr.aux_stride = 0;
1049 		hwq_attr.aux_depth = 0;
1050 		hwq_attr.type = HWQ_TYPE_QUEUE;
1051 		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1052 		if (rc)
1053 			goto sq_swq;
1054 		rc = bnxt_qplib_alloc_init_swq(rq);
1055 		if (rc)
1056 			goto fail_rq;
1057 
1058 		req.rq_size = cpu_to_le32(rq->max_wqe);
1059 		pbl = &rq->hwq.pbl[PBL_LVL_0];
1060 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1061 		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1062 			     CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1063 		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1064 		req.rq_pg_size_rq_lvl = pg_sz_lvl;
1065 		nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1066 			6 : rq->max_sge;
1067 		req.rq_fwo_rq_sge =
1068 			cpu_to_le16(((nsge &
1069 				      CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1070 				     CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1071 	} else {
1072 		/* SRQ */
1073 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1074 		req.srq_cid = cpu_to_le32(qp->srq->id);
1075 	}
1076 	req.rcq_cid = cpu_to_le32(qp->rcq->id);
1077 
1078 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1079 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1080 	if (qp->sig_type)
1081 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1082 	if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1083 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1084 	if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1085 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1086 
1087 	req.qp_flags = cpu_to_le32(qp_flags);
1088 
1089 	/* ORRQ and IRRQ */
1090 	if (psn_sz) {
1091 		xrrq = &qp->orrq;
1092 		xrrq->max_elements =
1093 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1094 		req_size = xrrq->max_elements *
1095 			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1096 		req_size &= ~(PAGE_SIZE - 1);
1097 		sginfo.pgsize = req_size;
1098 		sginfo.pgshft = PAGE_SHIFT;
1099 
1100 		hwq_attr.res = res;
1101 		hwq_attr.sginfo = &sginfo;
1102 		hwq_attr.depth = xrrq->max_elements;
1103 		hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1104 		hwq_attr.aux_stride = 0;
1105 		hwq_attr.aux_depth = 0;
1106 		hwq_attr.type = HWQ_TYPE_CTX;
1107 		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1108 		if (rc)
1109 			goto rq_swq;
1110 		pbl = &xrrq->pbl[PBL_LVL_0];
1111 		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1112 
1113 		xrrq = &qp->irrq;
1114 		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1115 						qp->max_dest_rd_atomic);
1116 		req_size = xrrq->max_elements *
1117 			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1118 		req_size &= ~(PAGE_SIZE - 1);
1119 		sginfo.pgsize = req_size;
1120 		hwq_attr.depth =  xrrq->max_elements;
1121 		hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1122 		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1123 		if (rc)
1124 			goto fail_orrq;
1125 
1126 		pbl = &xrrq->pbl[PBL_LVL_0];
1127 		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1128 	}
1129 	req.pd_id = cpu_to_le32(qp->pd->id);
1130 
1131 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1132 					  (void *)&resp, NULL, 0);
1133 	if (rc)
1134 		goto fail;
1135 
1136 	qp->id = le32_to_cpu(resp.xid);
1137 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1138 	INIT_LIST_HEAD(&qp->sq_flush);
1139 	INIT_LIST_HEAD(&qp->rq_flush);
1140 	qp->cctx = res->cctx;
1141 	sq->dbinfo.hwq = &sq->hwq;
1142 	sq->dbinfo.xid = qp->id;
1143 	sq->dbinfo.db = qp->dpi->dbr;
1144 	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1145 	if (rq->max_wqe) {
1146 		rq->dbinfo.hwq = &rq->hwq;
1147 		rq->dbinfo.xid = qp->id;
1148 		rq->dbinfo.db = qp->dpi->dbr;
1149 		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1150 	}
1151 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1152 	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1153 	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1154 
1155 	return 0;
1156 fail:
1157 	bnxt_qplib_free_hwq(res, &qp->irrq);
1158 fail_orrq:
1159 	bnxt_qplib_free_hwq(res, &qp->orrq);
1160 rq_swq:
1161 	kfree(rq->swq);
1162 fail_rq:
1163 	bnxt_qplib_free_hwq(res, &rq->hwq);
1164 sq_swq:
1165 	kfree(sq->swq);
1166 fail_sq:
1167 	bnxt_qplib_free_hwq(res, &sq->hwq);
1168 exit:
1169 	return rc;
1170 }
1171 
__modify_flags_from_init_state(struct bnxt_qplib_qp * qp)1172 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1173 {
1174 	switch (qp->state) {
1175 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1176 		/* INIT->RTR, configure the path_mtu to the default
1177 		 * 2048 if not being requested
1178 		 */
1179 		if (!(qp->modify_flags &
1180 		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1181 			qp->modify_flags |=
1182 				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1183 			qp->path_mtu =
1184 				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1185 		}
1186 		qp->modify_flags &=
1187 			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1188 		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
1189 		if (qp->max_dest_rd_atomic < 1)
1190 			qp->max_dest_rd_atomic = 1;
1191 		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1192 		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
1193 		if (!(qp->modify_flags &
1194 		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1195 			qp->modify_flags |=
1196 				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1197 			qp->ah.sgid_index = 0;
1198 		}
1199 		break;
1200 	default:
1201 		break;
1202 	}
1203 }
1204 
__modify_flags_from_rtr_state(struct bnxt_qplib_qp * qp)1205 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1206 {
1207 	switch (qp->state) {
1208 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1209 		/* Bono FW requires the max_rd_atomic to be >= 1 */
1210 		if (qp->max_rd_atomic < 1)
1211 			qp->max_rd_atomic = 1;
1212 		/* Bono FW does not allow PKEY_INDEX,
1213 		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1214 		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1215 		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1216 		 * modification
1217 		 */
1218 		qp->modify_flags &=
1219 			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1220 			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1221 			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1222 			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1223 			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1224 			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1225 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1226 			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1227 			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1228 			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1229 			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1230 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1231 		break;
1232 	default:
1233 		break;
1234 	}
1235 }
1236 
__filter_modify_flags(struct bnxt_qplib_qp * qp)1237 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1238 {
1239 	switch (qp->cur_qp_state) {
1240 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1241 		break;
1242 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1243 		__modify_flags_from_init_state(qp);
1244 		break;
1245 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1246 		__modify_flags_from_rtr_state(qp);
1247 		break;
1248 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1249 		break;
1250 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1251 		break;
1252 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1253 		break;
1254 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1255 		break;
1256 	default:
1257 		break;
1258 	}
1259 }
1260 
bnxt_qplib_modify_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1261 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1262 {
1263 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1264 	struct cmdq_modify_qp req;
1265 	struct creq_modify_qp_resp resp;
1266 	u16 cmd_flags = 0;
1267 	u32 temp32[4];
1268 	u32 bmask;
1269 	int rc;
1270 
1271 	RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
1272 
1273 	/* Filter out the qp_attr_mask based on the state->new transition */
1274 	__filter_modify_flags(qp);
1275 	bmask = qp->modify_flags;
1276 	req.modify_mask = cpu_to_le32(qp->modify_flags);
1277 	req.qp_cid = cpu_to_le32(qp->id);
1278 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1279 		req.network_type_en_sqd_async_notify_new_state =
1280 				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1281 				(qp->en_sqd_async_notify ?
1282 					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1283 	}
1284 	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1285 
1286 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1287 		req.access = qp->access;
1288 
1289 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1290 		req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1291 
1292 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1293 		req.qkey = cpu_to_le32(qp->qkey);
1294 
1295 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1296 		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1297 		req.dgid[0] = cpu_to_le32(temp32[0]);
1298 		req.dgid[1] = cpu_to_le32(temp32[1]);
1299 		req.dgid[2] = cpu_to_le32(temp32[2]);
1300 		req.dgid[3] = cpu_to_le32(temp32[3]);
1301 	}
1302 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1303 		req.flow_label = cpu_to_le32(qp->ah.flow_label);
1304 
1305 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1306 		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1307 					     [qp->ah.sgid_index]);
1308 
1309 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1310 		req.hop_limit = qp->ah.hop_limit;
1311 
1312 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1313 		req.traffic_class = qp->ah.traffic_class;
1314 
1315 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1316 		memcpy(req.dest_mac, qp->ah.dmac, 6);
1317 
1318 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1319 		req.path_mtu = qp->path_mtu;
1320 
1321 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1322 		req.timeout = qp->timeout;
1323 
1324 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1325 		req.retry_cnt = qp->retry_cnt;
1326 
1327 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1328 		req.rnr_retry = qp->rnr_retry;
1329 
1330 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1331 		req.min_rnr_timer = qp->min_rnr_timer;
1332 
1333 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1334 		req.rq_psn = cpu_to_le32(qp->rq.psn);
1335 
1336 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1337 		req.sq_psn = cpu_to_le32(qp->sq.psn);
1338 
1339 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1340 		req.max_rd_atomic =
1341 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1342 
1343 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1344 		req.max_dest_rd_atomic =
1345 			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1346 
1347 	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1348 	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1349 	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1350 	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1351 	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1352 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1353 		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1354 
1355 	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1356 
1357 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1358 					  (void *)&resp, NULL, 0);
1359 	if (rc)
1360 		return rc;
1361 	qp->cur_qp_state = qp->state;
1362 	return 0;
1363 }
1364 
bnxt_qplib_query_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1365 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1366 {
1367 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1368 	struct cmdq_query_qp req;
1369 	struct creq_query_qp_resp resp;
1370 	struct bnxt_qplib_rcfw_sbuf *sbuf;
1371 	struct creq_query_qp_resp_sb *sb;
1372 	u16 cmd_flags = 0;
1373 	u32 temp32[4];
1374 	int i, rc = 0;
1375 
1376 	RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
1377 
1378 	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1379 	if (!sbuf)
1380 		return -ENOMEM;
1381 	sb = sbuf->sb;
1382 
1383 	req.qp_cid = cpu_to_le32(qp->id);
1384 	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1385 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1386 					  (void *)sbuf, 0);
1387 	if (rc)
1388 		goto bail;
1389 	/* Extract the context from the side buffer */
1390 	qp->state = sb->en_sqd_async_notify_state &
1391 			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1392 	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1393 				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1394 				  true : false;
1395 	qp->access = sb->access;
1396 	qp->pkey_index = le16_to_cpu(sb->pkey);
1397 	qp->qkey = le32_to_cpu(sb->qkey);
1398 
1399 	temp32[0] = le32_to_cpu(sb->dgid[0]);
1400 	temp32[1] = le32_to_cpu(sb->dgid[1]);
1401 	temp32[2] = le32_to_cpu(sb->dgid[2]);
1402 	temp32[3] = le32_to_cpu(sb->dgid[3]);
1403 	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1404 
1405 	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1406 
1407 	qp->ah.sgid_index = 0;
1408 	for (i = 0; i < res->sgid_tbl.max; i++) {
1409 		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1410 			qp->ah.sgid_index = i;
1411 			break;
1412 		}
1413 	}
1414 	if (i == res->sgid_tbl.max)
1415 		dev_warn(&res->pdev->dev, "SGID not found??\n");
1416 
1417 	qp->ah.hop_limit = sb->hop_limit;
1418 	qp->ah.traffic_class = sb->traffic_class;
1419 	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1420 	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1421 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1422 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1423 	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1424 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1425 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1426 	qp->timeout = sb->timeout;
1427 	qp->retry_cnt = sb->retry_cnt;
1428 	qp->rnr_retry = sb->rnr_retry;
1429 	qp->min_rnr_timer = sb->min_rnr_timer;
1430 	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1431 	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1432 	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1433 	qp->max_dest_rd_atomic =
1434 			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1435 	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1436 	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1437 	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1438 	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1439 	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1440 	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1441 	memcpy(qp->smac, sb->src_mac, 6);
1442 	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1443 bail:
1444 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1445 	return rc;
1446 }
1447 
__clean_cq(struct bnxt_qplib_cq * cq,u64 qp)1448 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1449 {
1450 	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1451 	struct cq_base *hw_cqe;
1452 	int i;
1453 
1454 	for (i = 0; i < cq_hwq->max_elements; i++) {
1455 		hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
1456 		if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1457 			continue;
1458 		/*
1459 		 * The valid test of the entry must be done first before
1460 		 * reading any further.
1461 		 */
1462 		dma_rmb();
1463 		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1464 		case CQ_BASE_CQE_TYPE_REQ:
1465 		case CQ_BASE_CQE_TYPE_TERMINAL:
1466 		{
1467 			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1468 
1469 			if (qp == le64_to_cpu(cqe->qp_handle))
1470 				cqe->qp_handle = 0;
1471 			break;
1472 		}
1473 		case CQ_BASE_CQE_TYPE_RES_RC:
1474 		case CQ_BASE_CQE_TYPE_RES_UD:
1475 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1476 		{
1477 			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1478 
1479 			if (qp == le64_to_cpu(cqe->qp_handle))
1480 				cqe->qp_handle = 0;
1481 			break;
1482 		}
1483 		default:
1484 			break;
1485 		}
1486 	}
1487 }
1488 
bnxt_qplib_destroy_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1489 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1490 			  struct bnxt_qplib_qp *qp)
1491 {
1492 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1493 	struct cmdq_destroy_qp req;
1494 	struct creq_destroy_qp_resp resp;
1495 	u16 cmd_flags = 0;
1496 	u32 tbl_indx;
1497 	int rc;
1498 
1499 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1500 	rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1501 	rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1502 
1503 	RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1504 
1505 	req.qp_cid = cpu_to_le32(qp->id);
1506 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1507 					  (void *)&resp, NULL, 0);
1508 	if (rc) {
1509 		rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1510 		rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1511 		return rc;
1512 	}
1513 
1514 	return 0;
1515 }
1516 
bnxt_qplib_free_qp_res(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1517 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1518 			    struct bnxt_qplib_qp *qp)
1519 {
1520 	bnxt_qplib_free_qp_hdr_buf(res, qp);
1521 	bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1522 	kfree(qp->sq.swq);
1523 
1524 	bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1525 	kfree(qp->rq.swq);
1526 
1527 	if (qp->irrq.max_elements)
1528 		bnxt_qplib_free_hwq(res, &qp->irrq);
1529 	if (qp->orrq.max_elements)
1530 		bnxt_qplib_free_hwq(res, &qp->orrq);
1531 
1532 }
1533 
bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1534 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1535 				struct bnxt_qplib_sge *sge)
1536 {
1537 	struct bnxt_qplib_q *sq = &qp->sq;
1538 	u32 sw_prod;
1539 
1540 	memset(sge, 0, sizeof(*sge));
1541 
1542 	if (qp->sq_hdr_buf) {
1543 		sw_prod = sq->swq_start;
1544 		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1545 					 sw_prod * qp->sq_hdr_buf_size);
1546 		sge->lkey = 0xFFFFFFFF;
1547 		sge->size = qp->sq_hdr_buf_size;
1548 		return qp->sq_hdr_buf + sw_prod * sge->size;
1549 	}
1550 	return NULL;
1551 }
1552 
bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp * qp)1553 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1554 {
1555 	struct bnxt_qplib_q *rq = &qp->rq;
1556 
1557 	return rq->swq_start;
1558 }
1559 
bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp * qp,u32 index)1560 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1561 {
1562 	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1563 }
1564 
bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1565 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1566 				struct bnxt_qplib_sge *sge)
1567 {
1568 	struct bnxt_qplib_q *rq = &qp->rq;
1569 	u32 sw_prod;
1570 
1571 	memset(sge, 0, sizeof(*sge));
1572 
1573 	if (qp->rq_hdr_buf) {
1574 		sw_prod = rq->swq_start;
1575 		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1576 					 sw_prod * qp->rq_hdr_buf_size);
1577 		sge->lkey = 0xFFFFFFFF;
1578 		sge->size = qp->rq_hdr_buf_size;
1579 		return qp->rq_hdr_buf + sw_prod * sge->size;
1580 	}
1581 	return NULL;
1582 }
1583 
bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,struct bnxt_qplib_swq * swq)1584 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1585 				       struct bnxt_qplib_swqe *wqe,
1586 				       struct bnxt_qplib_swq *swq)
1587 {
1588 	struct sq_psn_search_ext *psns_ext;
1589 	struct sq_psn_search *psns;
1590 	u32 flg_npsn;
1591 	u32 op_spsn;
1592 
1593 	if (!swq->psn_search)
1594 		return;
1595 	psns = swq->psn_search;
1596 	psns_ext = swq->psn_ext;
1597 
1598 	op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1599 		    SQ_PSN_SEARCH_START_PSN_MASK);
1600 	op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1601 		     SQ_PSN_SEARCH_OPCODE_MASK);
1602 	flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1603 		     SQ_PSN_SEARCH_NEXT_PSN_MASK);
1604 
1605 	if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
1606 		psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1607 		psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1608 		psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1609 	} else {
1610 		psns->opcode_start_psn = cpu_to_le32(op_spsn);
1611 		psns->flags_next_psn = cpu_to_le32(flg_npsn);
1612 	}
1613 }
1614 
bnxt_qplib_put_inline(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u16 * idx)1615 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1616 				 struct bnxt_qplib_swqe *wqe,
1617 				 u16 *idx)
1618 {
1619 	struct bnxt_qplib_hwq *hwq;
1620 	int len, t_len, offt;
1621 	bool pull_dst = true;
1622 	void *il_dst = NULL;
1623 	void *il_src = NULL;
1624 	int t_cplen, cplen;
1625 	int indx;
1626 
1627 	hwq = &qp->sq.hwq;
1628 	t_len = 0;
1629 	for (indx = 0; indx < wqe->num_sge; indx++) {
1630 		len = wqe->sg_list[indx].size;
1631 		il_src = (void *)wqe->sg_list[indx].addr;
1632 		t_len += len;
1633 		if (t_len > qp->max_inline_data)
1634 			return -ENOMEM;
1635 		while (len) {
1636 			if (pull_dst) {
1637 				pull_dst = false;
1638 				il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1639 				(*idx)++;
1640 				t_cplen = 0;
1641 				offt = 0;
1642 			}
1643 			cplen = min_t(int, len, sizeof(struct sq_sge));
1644 			cplen = min_t(int, cplen,
1645 					(sizeof(struct sq_sge) - offt));
1646 			memcpy(il_dst, il_src, cplen);
1647 			t_cplen += cplen;
1648 			il_src += cplen;
1649 			il_dst += cplen;
1650 			offt += cplen;
1651 			len -= cplen;
1652 			if (t_cplen == sizeof(struct sq_sge))
1653 				pull_dst = true;
1654 		}
1655 	}
1656 
1657 	return t_len;
1658 }
1659 
bnxt_qplib_put_sges(struct bnxt_qplib_hwq * hwq,struct bnxt_qplib_sge * ssge,u16 nsge,u16 * idx)1660 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1661 			       struct bnxt_qplib_sge *ssge,
1662 			       u16 nsge, u16 *idx)
1663 {
1664 	struct sq_sge *dsge;
1665 	int indx, len = 0;
1666 
1667 	for (indx = 0; indx < nsge; indx++, (*idx)++) {
1668 		dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1669 		dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1670 		dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1671 		dsge->size = cpu_to_le32(ssge[indx].size);
1672 		len += ssge[indx].size;
1673 	}
1674 
1675 	return len;
1676 }
1677 
bnxt_qplib_required_slots(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u16 * wqe_sz,u16 * qdf,u8 mode)1678 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1679 				     struct bnxt_qplib_swqe *wqe,
1680 				     u16 *wqe_sz, u16 *qdf, u8 mode)
1681 {
1682 	u32 ilsize, bytes;
1683 	u16 nsge;
1684 	u16 slot;
1685 
1686 	nsge = wqe->num_sge;
1687 	/* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1688 	bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1689 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1690 		ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1691 		bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1692 		bytes += sizeof(struct sq_send_hdr);
1693 	}
1694 
1695 	*qdf =  __xlate_qfd(qp->sq.q_full_delta, bytes);
1696 	slot = bytes >> 4;
1697 	*wqe_sz = slot;
1698 	if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1699 		slot = 8;
1700 	return slot;
1701 }
1702 
bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q * sq,struct bnxt_qplib_swq * swq)1703 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
1704 				     struct bnxt_qplib_swq *swq)
1705 {
1706 	struct bnxt_qplib_hwq *hwq;
1707 	u32 pg_num, pg_indx;
1708 	void *buff;
1709 	u32 tail;
1710 
1711 	hwq = &sq->hwq;
1712 	if (!hwq->pad_pg)
1713 		return;
1714 	tail = swq->slot_idx / sq->dbinfo.max_slot;
1715 	pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1716 	pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1717 	buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1718 	swq->psn_ext = buff;
1719 	swq->psn_search = buff;
1720 }
1721 
bnxt_qplib_post_send_db(struct bnxt_qplib_qp * qp)1722 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1723 {
1724 	struct bnxt_qplib_q *sq = &qp->sq;
1725 
1726 	bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1727 }
1728 
bnxt_qplib_post_send(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)1729 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1730 			 struct bnxt_qplib_swqe *wqe)
1731 {
1732 	struct bnxt_qplib_nq_work *nq_work = NULL;
1733 	int i, rc = 0, data_len = 0, pkt_num = 0;
1734 	struct bnxt_qplib_q *sq = &qp->sq;
1735 	struct bnxt_qplib_hwq *hwq;
1736 	struct bnxt_qplib_swq *swq;
1737 	bool sch_handler = false;
1738 	u16 wqe_sz, qdf = 0;
1739 	void *base_hdr;
1740 	void *ext_hdr;
1741 	__le32 temp32;
1742 	u32 wqe_idx;
1743 	u32 slots;
1744 	u16 idx;
1745 
1746 	hwq = &sq->hwq;
1747 	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1748 	    qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1749 		dev_err(&hwq->pdev->dev,
1750 			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
1751 			qp->id, qp->state);
1752 		rc = -EINVAL;
1753 		goto done;
1754 	}
1755 
1756 	slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1757 	if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1758 		dev_err(&hwq->pdev->dev,
1759 			"prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1760 			hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1761 		rc = -ENOMEM;
1762 		goto done;
1763 	}
1764 
1765 	swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1766 	bnxt_qplib_pull_psn_buff(sq, swq);
1767 
1768 	idx = 0;
1769 	swq->slot_idx = hwq->prod;
1770 	swq->slots = slots;
1771 	swq->wr_id = wqe->wr_id;
1772 	swq->type = wqe->type;
1773 	swq->flags = wqe->flags;
1774 	swq->start_psn = sq->psn & BTH_PSN_MASK;
1775 	if (qp->sig_type)
1776 		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1777 
1778 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1779 		sch_handler = true;
1780 		dev_dbg(&hwq->pdev->dev,
1781 			"%s Error QP. Scheduling for poll_cq\n", __func__);
1782 		goto queue_err;
1783 	}
1784 
1785 	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1786 	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1787 	memset(base_hdr, 0, sizeof(struct sq_sge));
1788 	memset(ext_hdr, 0, sizeof(struct sq_sge));
1789 
1790 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1791 		/* Copy the inline data */
1792 		data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1793 	else
1794 		data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1795 					       &idx);
1796 	if (data_len < 0)
1797 		goto queue_err;
1798 	/* Specifics */
1799 	switch (wqe->type) {
1800 	case BNXT_QPLIB_SWQE_TYPE_SEND:
1801 		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1802 			struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1803 			struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1804 			/* Assemble info for Raw Ethertype QPs */
1805 
1806 			sqe->wqe_type = wqe->type;
1807 			sqe->flags = wqe->flags;
1808 			sqe->wqe_size = wqe_sz;
1809 			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1810 			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1811 			sqe->length = cpu_to_le32(data_len);
1812 			ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1813 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1814 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1815 
1816 			break;
1817 		}
1818 		fallthrough;
1819 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1820 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1821 	{
1822 		struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1823 		struct sq_send_hdr *sqe = base_hdr;
1824 
1825 		sqe->wqe_type = wqe->type;
1826 		sqe->flags = wqe->flags;
1827 		sqe->wqe_size = wqe_sz;
1828 		sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1829 		if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1830 		    qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1831 			sqe->q_key = cpu_to_le32(wqe->send.q_key);
1832 			sqe->length = cpu_to_le32(data_len);
1833 			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1834 			ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1835 						      SQ_SEND_DST_QP_MASK);
1836 			ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1837 						    SQ_SEND_AVID_MASK);
1838 		} else {
1839 			sqe->length = cpu_to_le32(data_len);
1840 			if (qp->mtu)
1841 				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1842 			if (!pkt_num)
1843 				pkt_num = 1;
1844 			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1845 		}
1846 		break;
1847 	}
1848 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1849 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1850 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1851 	{
1852 		struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1853 		struct sq_rdma_hdr *sqe = base_hdr;
1854 
1855 		sqe->wqe_type = wqe->type;
1856 		sqe->flags = wqe->flags;
1857 		sqe->wqe_size = wqe_sz;
1858 		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1859 		sqe->length = cpu_to_le32((u32)data_len);
1860 		ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1861 		ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1862 		if (qp->mtu)
1863 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1864 		if (!pkt_num)
1865 			pkt_num = 1;
1866 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1867 		break;
1868 	}
1869 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1870 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1871 	{
1872 		struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1873 		struct sq_atomic_hdr *sqe = base_hdr;
1874 
1875 		sqe->wqe_type = wqe->type;
1876 		sqe->flags = wqe->flags;
1877 		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1878 		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1879 		ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1880 		ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1881 		if (qp->mtu)
1882 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1883 		if (!pkt_num)
1884 			pkt_num = 1;
1885 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1886 		break;
1887 	}
1888 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1889 	{
1890 		struct sq_localinvalidate *sqe = base_hdr;
1891 
1892 		sqe->wqe_type = wqe->type;
1893 		sqe->flags = wqe->flags;
1894 		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1895 
1896 		break;
1897 	}
1898 	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1899 	{
1900 		struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1901 		struct sq_fr_pmr_hdr *sqe = base_hdr;
1902 
1903 		sqe->wqe_type = wqe->type;
1904 		sqe->flags = wqe->flags;
1905 		sqe->access_cntl = wqe->frmr.access_cntl |
1906 				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1907 		sqe->zero_based_page_size_log =
1908 			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1909 			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1910 			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1911 		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1912 		temp32 = cpu_to_le32(wqe->frmr.length);
1913 		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1914 		sqe->numlevels_pbl_page_size_log =
1915 			((wqe->frmr.pbl_pg_sz_log <<
1916 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1917 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1918 			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1919 					SQ_FR_PMR_NUMLEVELS_MASK);
1920 
1921 		for (i = 0; i < wqe->frmr.page_list_len; i++)
1922 			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1923 						wqe->frmr.page_list[i] |
1924 						PTU_PTE_VALID);
1925 		ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1926 		ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1927 
1928 		break;
1929 	}
1930 	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1931 	{
1932 		struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1933 		struct sq_bind_hdr *sqe = base_hdr;
1934 
1935 		sqe->wqe_type = wqe->type;
1936 		sqe->flags = wqe->flags;
1937 		sqe->access_cntl = wqe->bind.access_cntl;
1938 		sqe->mw_type_zero_based = wqe->bind.mw_type |
1939 			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1940 		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1941 		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1942 		ext_sqe->va = cpu_to_le64(wqe->bind.va);
1943 		ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
1944 		break;
1945 	}
1946 	default:
1947 		/* Bad wqe, return error */
1948 		rc = -EINVAL;
1949 		goto done;
1950 	}
1951 	swq->next_psn = sq->psn & BTH_PSN_MASK;
1952 	bnxt_qplib_fill_psn_search(qp, wqe, swq);
1953 queue_err:
1954 	bnxt_qplib_swq_mod_start(sq, wqe_idx);
1955 	bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
1956 	qp->wqe_cnt++;
1957 done:
1958 	if (sch_handler) {
1959 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1960 		if (nq_work) {
1961 			nq_work->cq = qp->scq;
1962 			nq_work->nq = qp->scq->nq;
1963 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1964 			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1965 		} else {
1966 			dev_err(&hwq->pdev->dev,
1967 				"FP: Failed to allocate SQ nq_work!\n");
1968 			rc = -ENOMEM;
1969 		}
1970 	}
1971 	return rc;
1972 }
1973 
bnxt_qplib_post_recv_db(struct bnxt_qplib_qp * qp)1974 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1975 {
1976 	struct bnxt_qplib_q *rq = &qp->rq;
1977 
1978 	bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
1979 }
1980 
bnxt_qplib_post_recv(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)1981 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1982 			 struct bnxt_qplib_swqe *wqe)
1983 {
1984 	struct bnxt_qplib_nq_work *nq_work = NULL;
1985 	struct bnxt_qplib_q *rq = &qp->rq;
1986 	struct rq_wqe_hdr *base_hdr;
1987 	struct rq_ext_hdr *ext_hdr;
1988 	struct bnxt_qplib_hwq *hwq;
1989 	struct bnxt_qplib_swq *swq;
1990 	bool sch_handler = false;
1991 	u16 wqe_sz, idx;
1992 	u32 wqe_idx;
1993 	int rc = 0;
1994 
1995 	hwq = &rq->hwq;
1996 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1997 		dev_err(&hwq->pdev->dev,
1998 			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
1999 			qp->id, qp->state);
2000 		rc = -EINVAL;
2001 		goto done;
2002 	}
2003 
2004 	if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2005 		dev_err(&hwq->pdev->dev,
2006 			"FP: QP (0x%x) RQ is full!\n", qp->id);
2007 		rc = -EINVAL;
2008 		goto done;
2009 	}
2010 
2011 	swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2012 	swq->wr_id = wqe->wr_id;
2013 	swq->slots = rq->dbinfo.max_slot;
2014 
2015 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2016 		sch_handler = true;
2017 		dev_dbg(&hwq->pdev->dev,
2018 			"%s: Error QP. Scheduling for poll_cq\n", __func__);
2019 		goto queue_err;
2020 	}
2021 
2022 	idx = 0;
2023 	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2024 	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2025 	memset(base_hdr, 0, sizeof(struct sq_sge));
2026 	memset(ext_hdr, 0, sizeof(struct sq_sge));
2027 	wqe_sz = (sizeof(struct rq_wqe_hdr) +
2028 	wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2029 	bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2030 	if (!wqe->num_sge) {
2031 		struct sq_sge *sge;
2032 
2033 		sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2034 		sge->size = 0;
2035 		wqe_sz++;
2036 	}
2037 	base_hdr->wqe_type = wqe->type;
2038 	base_hdr->flags = wqe->flags;
2039 	base_hdr->wqe_size = wqe_sz;
2040 	base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2041 queue_err:
2042 	bnxt_qplib_swq_mod_start(rq, wqe_idx);
2043 	bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
2044 done:
2045 	if (sch_handler) {
2046 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2047 		if (nq_work) {
2048 			nq_work->cq = qp->rcq;
2049 			nq_work->nq = qp->rcq->nq;
2050 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2051 			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2052 		} else {
2053 			dev_err(&hwq->pdev->dev,
2054 				"FP: Failed to allocate RQ nq_work!\n");
2055 			rc = -ENOMEM;
2056 		}
2057 	}
2058 
2059 	return rc;
2060 }
2061 
2062 /* CQ */
bnxt_qplib_create_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2063 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2064 {
2065 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2066 	struct bnxt_qplib_hwq_attr hwq_attr = {};
2067 	struct creq_create_cq_resp resp;
2068 	struct bnxt_qplib_pbl *pbl;
2069 	struct cmdq_create_cq req;
2070 	u16 cmd_flags = 0;
2071 	u32 pg_sz_lvl;
2072 	int rc;
2073 
2074 	if (!cq->dpi) {
2075 		dev_err(&rcfw->pdev->dev,
2076 			"FP: CREATE_CQ failed due to NULL DPI\n");
2077 		return -EINVAL;
2078 	}
2079 
2080 	hwq_attr.res = res;
2081 	hwq_attr.depth = cq->max_wqe;
2082 	hwq_attr.stride = sizeof(struct cq_base);
2083 	hwq_attr.type = HWQ_TYPE_QUEUE;
2084 	hwq_attr.sginfo = &cq->sg_info;
2085 	rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2086 	if (rc)
2087 		return rc;
2088 
2089 	RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
2090 
2091 	req.dpi = cpu_to_le32(cq->dpi->dpi);
2092 	req.cq_handle = cpu_to_le64(cq->cq_handle);
2093 	req.cq_size = cpu_to_le32(cq->hwq.max_elements);
2094 	pbl = &cq->hwq.pbl[PBL_LVL_0];
2095 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2096 		     CMDQ_CREATE_CQ_PG_SIZE_SFT);
2097 	pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2098 	req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2099 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2100 	req.cq_fco_cnq_id = cpu_to_le32(
2101 			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2102 			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2103 
2104 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2105 					  (void *)&resp, NULL, 0);
2106 	if (rc)
2107 		goto fail;
2108 
2109 	cq->id = le32_to_cpu(resp.xid);
2110 	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2111 	init_waitqueue_head(&cq->waitq);
2112 	INIT_LIST_HEAD(&cq->sqf_head);
2113 	INIT_LIST_HEAD(&cq->rqf_head);
2114 	spin_lock_init(&cq->compl_lock);
2115 	spin_lock_init(&cq->flush_lock);
2116 
2117 	cq->dbinfo.hwq = &cq->hwq;
2118 	cq->dbinfo.xid = cq->id;
2119 	cq->dbinfo.db = cq->dpi->dbr;
2120 	cq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
2121 
2122 	bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2123 
2124 	return 0;
2125 
2126 fail:
2127 	bnxt_qplib_free_hwq(res, &cq->hwq);
2128 	return rc;
2129 }
2130 
bnxt_qplib_destroy_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2131 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2132 {
2133 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2134 	struct cmdq_destroy_cq req;
2135 	struct creq_destroy_cq_resp resp;
2136 	u16 total_cnq_events;
2137 	u16 cmd_flags = 0;
2138 	int rc;
2139 
2140 	RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
2141 
2142 	req.cq_cid = cpu_to_le32(cq->id);
2143 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2144 					  (void *)&resp, NULL, 0);
2145 	if (rc)
2146 		return rc;
2147 	total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2148 	__wait_for_all_nqes(cq, total_cnq_events);
2149 	bnxt_qplib_free_hwq(res, &cq->hwq);
2150 	return 0;
2151 }
2152 
__flush_sq(struct bnxt_qplib_q * sq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2153 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2154 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2155 {
2156 	struct bnxt_qplib_cqe *cqe;
2157 	u32 start, last;
2158 	int rc = 0;
2159 
2160 	/* Now complete all outstanding SQEs with FLUSHED_ERR */
2161 	start = sq->swq_start;
2162 	cqe = *pcqe;
2163 	while (*budget) {
2164 		last = sq->swq_last;
2165 		if (start == last)
2166 			break;
2167 		/* Skip the FENCE WQE completions */
2168 		if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2169 			bnxt_qplib_cancel_phantom_processing(qp);
2170 			goto skip_compl;
2171 		}
2172 		memset(cqe, 0, sizeof(*cqe));
2173 		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2174 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2175 		cqe->qp_handle = (u64)(unsigned long)qp;
2176 		cqe->wr_id = sq->swq[last].wr_id;
2177 		cqe->src_qp = qp->id;
2178 		cqe->type = sq->swq[last].type;
2179 		cqe++;
2180 		(*budget)--;
2181 skip_compl:
2182 		bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[last].slots);
2183 		sq->swq_last = sq->swq[last].next_idx;
2184 	}
2185 	*pcqe = cqe;
2186 	if (!(*budget) && sq->swq_last != start)
2187 		/* Out of budget */
2188 		rc = -EAGAIN;
2189 
2190 	return rc;
2191 }
2192 
__flush_rq(struct bnxt_qplib_q * rq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2193 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2194 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2195 {
2196 	struct bnxt_qplib_cqe *cqe;
2197 	u32 start, last;
2198 	int opcode = 0;
2199 	int rc = 0;
2200 
2201 	switch (qp->type) {
2202 	case CMDQ_CREATE_QP1_TYPE_GSI:
2203 		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2204 		break;
2205 	case CMDQ_CREATE_QP_TYPE_RC:
2206 		opcode = CQ_BASE_CQE_TYPE_RES_RC;
2207 		break;
2208 	case CMDQ_CREATE_QP_TYPE_UD:
2209 	case CMDQ_CREATE_QP_TYPE_GSI:
2210 		opcode = CQ_BASE_CQE_TYPE_RES_UD;
2211 		break;
2212 	}
2213 
2214 	/* Flush the rest of the RQ */
2215 	start = rq->swq_start;
2216 	cqe = *pcqe;
2217 	while (*budget) {
2218 		last = rq->swq_last;
2219 		if (last == start)
2220 			break;
2221 		memset(cqe, 0, sizeof(*cqe));
2222 		cqe->status =
2223 		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2224 		cqe->opcode = opcode;
2225 		cqe->qp_handle = (unsigned long)qp;
2226 		cqe->wr_id = rq->swq[last].wr_id;
2227 		cqe++;
2228 		(*budget)--;
2229 		bnxt_qplib_hwq_incr_cons(&rq->hwq, rq->swq[last].slots);
2230 		rq->swq_last = rq->swq[last].next_idx;
2231 	}
2232 	*pcqe = cqe;
2233 	if (!*budget && rq->swq_last != start)
2234 		/* Out of budget */
2235 		rc = -EAGAIN;
2236 
2237 	return rc;
2238 }
2239 
bnxt_qplib_mark_qp_error(void * qp_handle)2240 void bnxt_qplib_mark_qp_error(void *qp_handle)
2241 {
2242 	struct bnxt_qplib_qp *qp = qp_handle;
2243 
2244 	if (!qp)
2245 		return;
2246 
2247 	/* Must block new posting of SQ and RQ */
2248 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2249 	bnxt_qplib_cancel_phantom_processing(qp);
2250 }
2251 
2252 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2253  *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2254  */
do_wa9060(struct bnxt_qplib_qp * qp,struct bnxt_qplib_cq * cq,u32 cq_cons,u32 swq_last,u32 cqe_sq_cons)2255 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2256 		     u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2257 {
2258 	u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2259 	struct bnxt_qplib_q *sq = &qp->sq;
2260 	struct cq_req *peek_req_hwcqe;
2261 	struct bnxt_qplib_qp *peek_qp;
2262 	struct bnxt_qplib_q *peek_sq;
2263 	struct bnxt_qplib_swq *swq;
2264 	struct cq_base *peek_hwcqe;
2265 	int i, rc = 0;
2266 
2267 	/* Normal mode */
2268 	/* Check for the psn_search marking before completing */
2269 	swq = &sq->swq[swq_last];
2270 	if (swq->psn_search &&
2271 	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2272 		/* Unmark */
2273 		swq->psn_search->flags_next_psn = cpu_to_le32
2274 			(le32_to_cpu(swq->psn_search->flags_next_psn)
2275 				     & ~0x80000000);
2276 		dev_dbg(&cq->hwq.pdev->dev,
2277 			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2278 			cq_cons, qp->id, swq_last, cqe_sq_cons);
2279 		sq->condition = true;
2280 		sq->send_phantom = true;
2281 
2282 		/* TODO: Only ARM if the previous SQE is ARMALL */
2283 		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2284 		rc = -EAGAIN;
2285 		goto out;
2286 	}
2287 	if (sq->condition) {
2288 		/* Peek at the completions */
2289 		peek_raw_cq_cons = cq->hwq.cons;
2290 		peek_sw_cq_cons = cq_cons;
2291 		i = cq->hwq.max_elements;
2292 		while (i--) {
2293 			peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2294 			peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2295 						       peek_sw_cq_cons, NULL);
2296 			/* If the next hwcqe is VALID */
2297 			if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2298 					  cq->hwq.max_elements)) {
2299 			/*
2300 			 * The valid test of the entry must be done first before
2301 			 * reading any further.
2302 			 */
2303 				dma_rmb();
2304 				/* If the next hwcqe is a REQ */
2305 				if ((peek_hwcqe->cqe_type_toggle &
2306 				    CQ_BASE_CQE_TYPE_MASK) ==
2307 				    CQ_BASE_CQE_TYPE_REQ) {
2308 					peek_req_hwcqe = (struct cq_req *)
2309 							 peek_hwcqe;
2310 					peek_qp = (struct bnxt_qplib_qp *)
2311 						((unsigned long)
2312 						 le64_to_cpu
2313 						 (peek_req_hwcqe->qp_handle));
2314 					peek_sq = &peek_qp->sq;
2315 					peek_sq_cons_idx =
2316 						((le16_to_cpu(
2317 						  peek_req_hwcqe->sq_cons_idx)
2318 						  - 1) % sq->max_wqe);
2319 					/* If the hwcqe's sq's wr_id matches */
2320 					if (peek_sq == sq &&
2321 					    sq->swq[peek_sq_cons_idx].wr_id ==
2322 					    BNXT_QPLIB_FENCE_WRID) {
2323 						/*
2324 						 *  Unbreak only if the phantom
2325 						 *  comes back
2326 						 */
2327 						dev_dbg(&cq->hwq.pdev->dev,
2328 							"FP: Got Phantom CQE\n");
2329 						sq->condition = false;
2330 						sq->single = true;
2331 						rc = 0;
2332 						goto out;
2333 					}
2334 				}
2335 				/* Valid but not the phantom, so keep looping */
2336 			} else {
2337 				/* Not valid yet, just exit and wait */
2338 				rc = -EINVAL;
2339 				goto out;
2340 			}
2341 			peek_sw_cq_cons++;
2342 			peek_raw_cq_cons++;
2343 		}
2344 		dev_err(&cq->hwq.pdev->dev,
2345 			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2346 			cq_cons, qp->id, swq_last, cqe_sq_cons);
2347 		rc = -EINVAL;
2348 	}
2349 out:
2350 	return rc;
2351 }
2352 
bnxt_qplib_cq_process_req(struct bnxt_qplib_cq * cq,struct cq_req * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget,u32 cq_cons,struct bnxt_qplib_qp ** lib_qp)2353 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2354 				     struct cq_req *hwcqe,
2355 				     struct bnxt_qplib_cqe **pcqe, int *budget,
2356 				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2357 {
2358 	struct bnxt_qplib_swq *swq;
2359 	struct bnxt_qplib_cqe *cqe;
2360 	struct bnxt_qplib_qp *qp;
2361 	struct bnxt_qplib_q *sq;
2362 	u32 cqe_sq_cons;
2363 	int rc = 0;
2364 
2365 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2366 				      le64_to_cpu(hwcqe->qp_handle));
2367 	if (!qp) {
2368 		dev_err(&cq->hwq.pdev->dev,
2369 			"FP: Process Req qp is NULL\n");
2370 		return -EINVAL;
2371 	}
2372 	sq = &qp->sq;
2373 
2374 	cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
2375 	if (qp->sq.flushed) {
2376 		dev_dbg(&cq->hwq.pdev->dev,
2377 			"%s: QP in Flush QP = %p\n", __func__, qp);
2378 		goto done;
2379 	}
2380 	/* Require to walk the sq's swq to fabricate CQEs for all previously
2381 	 * signaled SWQEs due to CQE aggregation from the current sq cons
2382 	 * to the cqe_sq_cons
2383 	 */
2384 	cqe = *pcqe;
2385 	while (*budget) {
2386 		if (sq->swq_last == cqe_sq_cons)
2387 			/* Done */
2388 			break;
2389 
2390 		swq = &sq->swq[sq->swq_last];
2391 		memset(cqe, 0, sizeof(*cqe));
2392 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2393 		cqe->qp_handle = (u64)(unsigned long)qp;
2394 		cqe->src_qp = qp->id;
2395 		cqe->wr_id = swq->wr_id;
2396 		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2397 			goto skip;
2398 		cqe->type = swq->type;
2399 
2400 		/* For the last CQE, check for status.  For errors, regardless
2401 		 * of the request being signaled or not, it must complete with
2402 		 * the hwcqe error status
2403 		 */
2404 		if (swq->next_idx == cqe_sq_cons &&
2405 		    hwcqe->status != CQ_REQ_STATUS_OK) {
2406 			cqe->status = hwcqe->status;
2407 			dev_err(&cq->hwq.pdev->dev,
2408 				"FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2409 				sq->swq_last, cqe->wr_id, cqe->status);
2410 			cqe++;
2411 			(*budget)--;
2412 			bnxt_qplib_mark_qp_error(qp);
2413 			/* Add qp to flush list of the CQ */
2414 			bnxt_qplib_add_flush_qp(qp);
2415 		} else {
2416 			/* Before we complete, do WA 9060 */
2417 			if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2418 				      cqe_sq_cons)) {
2419 				*lib_qp = qp;
2420 				goto out;
2421 			}
2422 			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2423 				cqe->status = CQ_REQ_STATUS_OK;
2424 				cqe++;
2425 				(*budget)--;
2426 			}
2427 		}
2428 skip:
2429 		bnxt_qplib_hwq_incr_cons(&sq->hwq, swq->slots);
2430 		sq->swq_last = swq->next_idx;
2431 		if (sq->single)
2432 			break;
2433 	}
2434 out:
2435 	*pcqe = cqe;
2436 	if (sq->swq_last != cqe_sq_cons) {
2437 		/* Out of budget */
2438 		rc = -EAGAIN;
2439 		goto done;
2440 	}
2441 	/*
2442 	 * Back to normal completion mode only after it has completed all of
2443 	 * the WC for this CQE
2444 	 */
2445 	sq->single = false;
2446 done:
2447 	return rc;
2448 }
2449 
bnxt_qplib_release_srqe(struct bnxt_qplib_srq * srq,u32 tag)2450 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2451 {
2452 	spin_lock(&srq->hwq.lock);
2453 	srq->swq[srq->last_idx].next_idx = (int)tag;
2454 	srq->last_idx = (int)tag;
2455 	srq->swq[srq->last_idx].next_idx = -1;
2456 	srq->hwq.cons++; /* Support for SRQE counter */
2457 	spin_unlock(&srq->hwq.lock);
2458 }
2459 
bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq * cq,struct cq_res_rc * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2460 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2461 					struct cq_res_rc *hwcqe,
2462 					struct bnxt_qplib_cqe **pcqe,
2463 					int *budget)
2464 {
2465 	struct bnxt_qplib_srq *srq;
2466 	struct bnxt_qplib_cqe *cqe;
2467 	struct bnxt_qplib_qp *qp;
2468 	struct bnxt_qplib_q *rq;
2469 	u32 wr_id_idx;
2470 	int rc = 0;
2471 
2472 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2473 				      le64_to_cpu(hwcqe->qp_handle));
2474 	if (!qp) {
2475 		dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2476 		return -EINVAL;
2477 	}
2478 	if (qp->rq.flushed) {
2479 		dev_dbg(&cq->hwq.pdev->dev,
2480 			"%s: QP in Flush QP = %p\n", __func__, qp);
2481 		goto done;
2482 	}
2483 
2484 	cqe = *pcqe;
2485 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2486 	cqe->length = le32_to_cpu(hwcqe->length);
2487 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2488 	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2489 	cqe->flags = le16_to_cpu(hwcqe->flags);
2490 	cqe->status = hwcqe->status;
2491 	cqe->qp_handle = (u64)(unsigned long)qp;
2492 
2493 	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2494 				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2495 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2496 		srq = qp->srq;
2497 		if (!srq)
2498 			return -EINVAL;
2499 		if (wr_id_idx >= srq->hwq.max_elements) {
2500 			dev_err(&cq->hwq.pdev->dev,
2501 				"FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2502 				wr_id_idx, srq->hwq.max_elements);
2503 			return -EINVAL;
2504 		}
2505 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2506 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2507 		cqe++;
2508 		(*budget)--;
2509 		*pcqe = cqe;
2510 	} else {
2511 		struct bnxt_qplib_swq *swq;
2512 
2513 		rq = &qp->rq;
2514 		if (wr_id_idx > (rq->max_wqe - 1)) {
2515 			dev_err(&cq->hwq.pdev->dev,
2516 				"FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2517 				wr_id_idx, rq->max_wqe);
2518 			return -EINVAL;
2519 		}
2520 		if (wr_id_idx != rq->swq_last)
2521 			return -EINVAL;
2522 		swq = &rq->swq[rq->swq_last];
2523 		cqe->wr_id = swq->wr_id;
2524 		cqe++;
2525 		(*budget)--;
2526 		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2527 		rq->swq_last = swq->next_idx;
2528 		*pcqe = cqe;
2529 
2530 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2531 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2532 			/* Add qp to flush list of the CQ */
2533 			bnxt_qplib_add_flush_qp(qp);
2534 		}
2535 	}
2536 
2537 done:
2538 	return rc;
2539 }
2540 
bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq * cq,struct cq_res_ud * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2541 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2542 					struct cq_res_ud *hwcqe,
2543 					struct bnxt_qplib_cqe **pcqe,
2544 					int *budget)
2545 {
2546 	struct bnxt_qplib_srq *srq;
2547 	struct bnxt_qplib_cqe *cqe;
2548 	struct bnxt_qplib_qp *qp;
2549 	struct bnxt_qplib_q *rq;
2550 	u32 wr_id_idx;
2551 	int rc = 0;
2552 
2553 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2554 				      le64_to_cpu(hwcqe->qp_handle));
2555 	if (!qp) {
2556 		dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2557 		return -EINVAL;
2558 	}
2559 	if (qp->rq.flushed) {
2560 		dev_dbg(&cq->hwq.pdev->dev,
2561 			"%s: QP in Flush QP = %p\n", __func__, qp);
2562 		goto done;
2563 	}
2564 	cqe = *pcqe;
2565 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2566 	cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2567 	cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2568 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2569 	cqe->flags = le16_to_cpu(hwcqe->flags);
2570 	cqe->status = hwcqe->status;
2571 	cqe->qp_handle = (u64)(unsigned long)qp;
2572 	/*FIXME: Endianness fix needed for smace */
2573 	memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2574 	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2575 				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2576 	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2577 				  ((le32_to_cpu(
2578 				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2579 				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2580 
2581 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2582 		srq = qp->srq;
2583 		if (!srq)
2584 			return -EINVAL;
2585 
2586 		if (wr_id_idx >= srq->hwq.max_elements) {
2587 			dev_err(&cq->hwq.pdev->dev,
2588 				"FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2589 				wr_id_idx, srq->hwq.max_elements);
2590 			return -EINVAL;
2591 		}
2592 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2593 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2594 		cqe++;
2595 		(*budget)--;
2596 		*pcqe = cqe;
2597 	} else {
2598 		struct bnxt_qplib_swq *swq;
2599 
2600 		rq = &qp->rq;
2601 		if (wr_id_idx > (rq->max_wqe - 1)) {
2602 			dev_err(&cq->hwq.pdev->dev,
2603 				"FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2604 				wr_id_idx, rq->max_wqe);
2605 			return -EINVAL;
2606 		}
2607 
2608 		if (rq->swq_last != wr_id_idx)
2609 			return -EINVAL;
2610 		swq = &rq->swq[rq->swq_last];
2611 		cqe->wr_id = swq->wr_id;
2612 		cqe++;
2613 		(*budget)--;
2614 		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2615 		rq->swq_last = swq->next_idx;
2616 		*pcqe = cqe;
2617 
2618 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2619 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2620 			/* Add qp to flush list of the CQ */
2621 			bnxt_qplib_add_flush_qp(qp);
2622 		}
2623 	}
2624 done:
2625 	return rc;
2626 }
2627 
bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq * cq)2628 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2629 {
2630 	struct cq_base *hw_cqe;
2631 	u32 sw_cons, raw_cons;
2632 	bool rc = true;
2633 
2634 	raw_cons = cq->hwq.cons;
2635 	sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2636 	hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2637 	 /* Check for Valid bit. If the CQE is valid, return false */
2638 	rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2639 	return rc;
2640 }
2641 
bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq * cq,struct cq_res_raweth_qp1 * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2642 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2643 						struct cq_res_raweth_qp1 *hwcqe,
2644 						struct bnxt_qplib_cqe **pcqe,
2645 						int *budget)
2646 {
2647 	struct bnxt_qplib_qp *qp;
2648 	struct bnxt_qplib_q *rq;
2649 	struct bnxt_qplib_srq *srq;
2650 	struct bnxt_qplib_cqe *cqe;
2651 	u32 wr_id_idx;
2652 	int rc = 0;
2653 
2654 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2655 				      le64_to_cpu(hwcqe->qp_handle));
2656 	if (!qp) {
2657 		dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2658 		return -EINVAL;
2659 	}
2660 	if (qp->rq.flushed) {
2661 		dev_dbg(&cq->hwq.pdev->dev,
2662 			"%s: QP in Flush QP = %p\n", __func__, qp);
2663 		goto done;
2664 	}
2665 	cqe = *pcqe;
2666 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2667 	cqe->flags = le16_to_cpu(hwcqe->flags);
2668 	cqe->qp_handle = (u64)(unsigned long)qp;
2669 
2670 	wr_id_idx =
2671 		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2672 				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2673 	cqe->src_qp = qp->id;
2674 	if (qp->id == 1 && !cqe->length) {
2675 		/* Add workaround for the length misdetection */
2676 		cqe->length = 296;
2677 	} else {
2678 		cqe->length = le16_to_cpu(hwcqe->length);
2679 	}
2680 	cqe->pkey_index = qp->pkey_index;
2681 	memcpy(cqe->smac, qp->smac, 6);
2682 
2683 	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2684 	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2685 	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2686 
2687 	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2688 		srq = qp->srq;
2689 		if (!srq) {
2690 			dev_err(&cq->hwq.pdev->dev,
2691 				"FP: SRQ used but not defined??\n");
2692 			return -EINVAL;
2693 		}
2694 		if (wr_id_idx >= srq->hwq.max_elements) {
2695 			dev_err(&cq->hwq.pdev->dev,
2696 				"FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2697 				wr_id_idx, srq->hwq.max_elements);
2698 			return -EINVAL;
2699 		}
2700 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2701 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2702 		cqe++;
2703 		(*budget)--;
2704 		*pcqe = cqe;
2705 	} else {
2706 		struct bnxt_qplib_swq *swq;
2707 
2708 		rq = &qp->rq;
2709 		if (wr_id_idx > (rq->max_wqe - 1)) {
2710 			dev_err(&cq->hwq.pdev->dev,
2711 				"FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2712 				wr_id_idx, rq->max_wqe);
2713 			return -EINVAL;
2714 		}
2715 		if (rq->swq_last != wr_id_idx)
2716 			return -EINVAL;
2717 		swq = &rq->swq[rq->swq_last];
2718 		cqe->wr_id = swq->wr_id;
2719 		cqe++;
2720 		(*budget)--;
2721 		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2722 		rq->swq_last = swq->next_idx;
2723 		*pcqe = cqe;
2724 
2725 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2726 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2727 			/* Add qp to flush list of the CQ */
2728 			bnxt_qplib_add_flush_qp(qp);
2729 		}
2730 	}
2731 
2732 done:
2733 	return rc;
2734 }
2735 
bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq * cq,struct cq_terminal * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2736 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2737 					  struct cq_terminal *hwcqe,
2738 					  struct bnxt_qplib_cqe **pcqe,
2739 					  int *budget)
2740 {
2741 	struct bnxt_qplib_qp *qp;
2742 	struct bnxt_qplib_q *sq, *rq;
2743 	struct bnxt_qplib_cqe *cqe;
2744 	u32 swq_last = 0, cqe_cons;
2745 	int rc = 0;
2746 
2747 	/* Check the Status */
2748 	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2749 		dev_warn(&cq->hwq.pdev->dev,
2750 			 "FP: CQ Process Terminal Error status = 0x%x\n",
2751 			 hwcqe->status);
2752 
2753 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2754 				      le64_to_cpu(hwcqe->qp_handle));
2755 	if (!qp)
2756 		return -EINVAL;
2757 
2758 	/* Must block new posting of SQ and RQ */
2759 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2760 
2761 	sq = &qp->sq;
2762 	rq = &qp->rq;
2763 
2764 	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2765 	if (cqe_cons == 0xFFFF)
2766 		goto do_rq;
2767 	cqe_cons %= sq->max_wqe;
2768 
2769 	if (qp->sq.flushed) {
2770 		dev_dbg(&cq->hwq.pdev->dev,
2771 			"%s: QP in Flush QP = %p\n", __func__, qp);
2772 		goto sq_done;
2773 	}
2774 
2775 	/* Terminal CQE can also include aggregated successful CQEs prior.
2776 	 * So we must complete all CQEs from the current sq's cons to the
2777 	 * cq_cons with status OK
2778 	 */
2779 	cqe = *pcqe;
2780 	while (*budget) {
2781 		swq_last = sq->swq_last;
2782 		if (swq_last == cqe_cons)
2783 			break;
2784 		if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2785 			memset(cqe, 0, sizeof(*cqe));
2786 			cqe->status = CQ_REQ_STATUS_OK;
2787 			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2788 			cqe->qp_handle = (u64)(unsigned long)qp;
2789 			cqe->src_qp = qp->id;
2790 			cqe->wr_id = sq->swq[swq_last].wr_id;
2791 			cqe->type = sq->swq[swq_last].type;
2792 			cqe++;
2793 			(*budget)--;
2794 		}
2795 		bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[swq_last].slots);
2796 		sq->swq_last = sq->swq[swq_last].next_idx;
2797 	}
2798 	*pcqe = cqe;
2799 	if (!(*budget) && swq_last != cqe_cons) {
2800 		/* Out of budget */
2801 		rc = -EAGAIN;
2802 		goto sq_done;
2803 	}
2804 sq_done:
2805 	if (rc)
2806 		return rc;
2807 do_rq:
2808 	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2809 	if (cqe_cons == 0xFFFF) {
2810 		goto done;
2811 	} else if (cqe_cons > rq->max_wqe - 1) {
2812 		dev_err(&cq->hwq.pdev->dev,
2813 			"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2814 			cqe_cons, rq->max_wqe);
2815 		rc = -EINVAL;
2816 		goto done;
2817 	}
2818 
2819 	if (qp->rq.flushed) {
2820 		dev_dbg(&cq->hwq.pdev->dev,
2821 			"%s: QP in Flush QP = %p\n", __func__, qp);
2822 		rc = 0;
2823 		goto done;
2824 	}
2825 
2826 	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2827 	 * from the current rq->cons to the rq->prod regardless what the
2828 	 * rq->cons the terminal CQE indicates
2829 	 */
2830 
2831 	/* Add qp to flush list of the CQ */
2832 	bnxt_qplib_add_flush_qp(qp);
2833 done:
2834 	return rc;
2835 }
2836 
bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq * cq,struct cq_cutoff * hwcqe)2837 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2838 					struct cq_cutoff *hwcqe)
2839 {
2840 	/* Check the Status */
2841 	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2842 		dev_err(&cq->hwq.pdev->dev,
2843 			"FP: CQ Process Cutoff Error status = 0x%x\n",
2844 			hwcqe->status);
2845 		return -EINVAL;
2846 	}
2847 	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2848 	wake_up_interruptible(&cq->waitq);
2849 
2850 	return 0;
2851 }
2852 
bnxt_qplib_process_flush_list(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes)2853 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2854 				  struct bnxt_qplib_cqe *cqe,
2855 				  int num_cqes)
2856 {
2857 	struct bnxt_qplib_qp *qp = NULL;
2858 	u32 budget = num_cqes;
2859 	unsigned long flags;
2860 
2861 	spin_lock_irqsave(&cq->flush_lock, flags);
2862 	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2863 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2864 		__flush_sq(&qp->sq, qp, &cqe, &budget);
2865 	}
2866 
2867 	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2868 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2869 		__flush_rq(&qp->rq, qp, &cqe, &budget);
2870 	}
2871 	spin_unlock_irqrestore(&cq->flush_lock, flags);
2872 
2873 	return num_cqes - budget;
2874 }
2875 
bnxt_qplib_poll_cq(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes,struct bnxt_qplib_qp ** lib_qp)2876 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2877 		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
2878 {
2879 	struct cq_base *hw_cqe;
2880 	u32 sw_cons, raw_cons;
2881 	int budget, rc = 0;
2882 	u8 type;
2883 
2884 	raw_cons = cq->hwq.cons;
2885 	budget = num_cqes;
2886 
2887 	while (budget) {
2888 		sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2889 		hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2890 
2891 		/* Check for Valid bit */
2892 		if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2893 			break;
2894 
2895 		/*
2896 		 * The valid test of the entry must be done first before
2897 		 * reading any further.
2898 		 */
2899 		dma_rmb();
2900 		/* From the device's respective CQE format to qplib_wc*/
2901 		type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2902 		switch (type) {
2903 		case CQ_BASE_CQE_TYPE_REQ:
2904 			rc = bnxt_qplib_cq_process_req(cq,
2905 						       (struct cq_req *)hw_cqe,
2906 						       &cqe, &budget,
2907 						       sw_cons, lib_qp);
2908 			break;
2909 		case CQ_BASE_CQE_TYPE_RES_RC:
2910 			rc = bnxt_qplib_cq_process_res_rc(cq,
2911 							  (struct cq_res_rc *)
2912 							  hw_cqe, &cqe,
2913 							  &budget);
2914 			break;
2915 		case CQ_BASE_CQE_TYPE_RES_UD:
2916 			rc = bnxt_qplib_cq_process_res_ud
2917 					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
2918 					 &budget);
2919 			break;
2920 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2921 			rc = bnxt_qplib_cq_process_res_raweth_qp1
2922 					(cq, (struct cq_res_raweth_qp1 *)
2923 					 hw_cqe, &cqe, &budget);
2924 			break;
2925 		case CQ_BASE_CQE_TYPE_TERMINAL:
2926 			rc = bnxt_qplib_cq_process_terminal
2927 					(cq, (struct cq_terminal *)hw_cqe,
2928 					 &cqe, &budget);
2929 			break;
2930 		case CQ_BASE_CQE_TYPE_CUT_OFF:
2931 			bnxt_qplib_cq_process_cutoff
2932 					(cq, (struct cq_cutoff *)hw_cqe);
2933 			/* Done processing this CQ */
2934 			goto exit;
2935 		default:
2936 			dev_err(&cq->hwq.pdev->dev,
2937 				"process_cq unknown type 0x%lx\n",
2938 				hw_cqe->cqe_type_toggle &
2939 				CQ_BASE_CQE_TYPE_MASK);
2940 			rc = -EINVAL;
2941 			break;
2942 		}
2943 		if (rc < 0) {
2944 			if (rc == -EAGAIN)
2945 				break;
2946 			/* Error while processing the CQE, just skip to the
2947 			 * next one
2948 			 */
2949 			if (type != CQ_BASE_CQE_TYPE_TERMINAL)
2950 				dev_err(&cq->hwq.pdev->dev,
2951 					"process_cqe error rc = 0x%x\n", rc);
2952 		}
2953 		raw_cons++;
2954 	}
2955 	if (cq->hwq.cons != raw_cons) {
2956 		cq->hwq.cons = raw_cons;
2957 		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
2958 	}
2959 exit:
2960 	return num_cqes - budget;
2961 }
2962 
bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq * cq,u32 arm_type)2963 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2964 {
2965 	if (arm_type)
2966 		bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
2967 	/* Using cq->arm_state variable to track whether to issue cq handler */
2968 	atomic_set(&cq->arm_state, 1);
2969 }
2970 
bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp * qp)2971 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
2972 {
2973 	flush_workqueue(qp->scq->nq->cqn_wq);
2974 	if (qp->scq != qp->rcq)
2975 		flush_workqueue(qp->rcq->nq->cqn_wq);
2976 }
2977