1 /*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: Fast Path Operators
37 */
38
39 #define dev_fmt(fmt) "QPLIB: " fmt
40
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
50
51 #include "roce_hsi.h"
52
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
55 #include "qplib_sp.h"
56 #include "qplib_fp.h"
57 #include <rdma/ib_addr.h>
58 #include "bnxt_ulp.h"
59 #include "bnxt_re.h"
60 #include "ib_verbs.h"
61
62 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
63
bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp * qp)64 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
65 {
66 qp->sq.condition = false;
67 qp->sq.send_phantom = false;
68 qp->sq.single = false;
69 }
70
71 /* Flush list */
__bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)72 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
73 {
74 struct bnxt_qplib_cq *scq, *rcq;
75
76 scq = qp->scq;
77 rcq = qp->rcq;
78
79 if (!qp->sq.flushed) {
80 dev_dbg(&scq->hwq.pdev->dev,
81 "FP: Adding to SQ Flush list = %p\n", qp);
82 bnxt_qplib_cancel_phantom_processing(qp);
83 list_add_tail(&qp->sq_flush, &scq->sqf_head);
84 qp->sq.flushed = true;
85 }
86 if (!qp->srq) {
87 if (!qp->rq.flushed) {
88 dev_dbg(&rcq->hwq.pdev->dev,
89 "FP: Adding to RQ Flush list = %p\n", qp);
90 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
91 qp->rq.flushed = true;
92 }
93 }
94 }
95
bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)96 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
97 unsigned long *flags)
98 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
99 {
100 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
101 if (qp->scq == qp->rcq)
102 __acquire(&qp->rcq->flush_lock);
103 else
104 spin_lock(&qp->rcq->flush_lock);
105 }
106
bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)107 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
108 unsigned long *flags)
109 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
110 {
111 if (qp->scq == qp->rcq)
112 __release(&qp->rcq->flush_lock);
113 else
114 spin_unlock(&qp->rcq->flush_lock);
115 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
116 }
117
bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)118 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
119 {
120 unsigned long flags;
121
122 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
123 __bnxt_qplib_add_flush_qp(qp);
124 bnxt_qplib_release_cq_flush_locks(qp, &flags);
125 }
126
__bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp * qp)127 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
128 {
129 if (qp->sq.flushed) {
130 qp->sq.flushed = false;
131 list_del(&qp->sq_flush);
132 }
133 if (!qp->srq) {
134 if (qp->rq.flushed) {
135 qp->rq.flushed = false;
136 list_del(&qp->rq_flush);
137 }
138 }
139 }
140
bnxt_qplib_clean_qp(struct bnxt_qplib_qp * qp)141 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
142 {
143 unsigned long flags;
144
145 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
146 __clean_cq(qp->scq, (u64)(unsigned long)qp);
147 qp->sq.hwq.prod = 0;
148 qp->sq.hwq.cons = 0;
149 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
150 qp->rq.hwq.prod = 0;
151 qp->rq.hwq.cons = 0;
152
153 __bnxt_qplib_del_flush_qp(qp);
154 bnxt_qplib_release_cq_flush_locks(qp, &flags);
155 }
156
bnxt_qpn_cqn_sched_task(struct work_struct * work)157 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
158 {
159 struct bnxt_qplib_nq_work *nq_work =
160 container_of(work, struct bnxt_qplib_nq_work, work);
161
162 struct bnxt_qplib_cq *cq = nq_work->cq;
163 struct bnxt_qplib_nq *nq = nq_work->nq;
164
165 if (cq && nq) {
166 spin_lock_bh(&cq->compl_lock);
167 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
168 dev_dbg(&nq->pdev->dev,
169 "%s:Trigger cq = %p event nq = %p\n",
170 __func__, cq, nq);
171 nq->cqn_handler(nq, cq);
172 }
173 spin_unlock_bh(&cq->compl_lock);
174 }
175 kfree(nq_work);
176 }
177
bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)178 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
179 struct bnxt_qplib_qp *qp)
180 {
181 struct bnxt_qplib_q *rq = &qp->rq;
182 struct bnxt_qplib_q *sq = &qp->sq;
183
184 if (qp->rq_hdr_buf)
185 dma_free_coherent(&res->pdev->dev,
186 rq->max_wqe * qp->rq_hdr_buf_size,
187 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
188 if (qp->sq_hdr_buf)
189 dma_free_coherent(&res->pdev->dev,
190 sq->max_wqe * qp->sq_hdr_buf_size,
191 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
192 qp->rq_hdr_buf = NULL;
193 qp->sq_hdr_buf = NULL;
194 qp->rq_hdr_buf_map = 0;
195 qp->sq_hdr_buf_map = 0;
196 qp->sq_hdr_buf_size = 0;
197 qp->rq_hdr_buf_size = 0;
198 }
199
bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)200 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
201 struct bnxt_qplib_qp *qp)
202 {
203 struct bnxt_qplib_q *rq = &qp->rq;
204 struct bnxt_qplib_q *sq = &qp->sq;
205 int rc = 0;
206
207 if (qp->sq_hdr_buf_size && sq->max_wqe) {
208 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
209 sq->max_wqe * qp->sq_hdr_buf_size,
210 &qp->sq_hdr_buf_map, GFP_KERNEL);
211 if (!qp->sq_hdr_buf) {
212 rc = -ENOMEM;
213 dev_err(&res->pdev->dev,
214 "Failed to create sq_hdr_buf\n");
215 goto fail;
216 }
217 }
218
219 if (qp->rq_hdr_buf_size && rq->max_wqe) {
220 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
221 rq->max_wqe *
222 qp->rq_hdr_buf_size,
223 &qp->rq_hdr_buf_map,
224 GFP_KERNEL);
225 if (!qp->rq_hdr_buf) {
226 rc = -ENOMEM;
227 dev_err(&res->pdev->dev,
228 "Failed to create rq_hdr_buf\n");
229 goto fail;
230 }
231 }
232 return 0;
233
234 fail:
235 bnxt_qplib_free_qp_hdr_buf(res, qp);
236 return rc;
237 }
238
clean_nq(struct bnxt_qplib_nq * nq,struct bnxt_qplib_cq * cq)239 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
240 {
241 struct bnxt_qplib_hwq *hwq = &nq->hwq;
242 struct nq_base *nqe, **nq_ptr;
243 int budget = nq->budget;
244 uintptr_t q_handle;
245 u16 type;
246
247 spin_lock_bh(&hwq->lock);
248 /* Service the NQ until empty */
249 while (budget--) {
250 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
251 nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
252 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
253 break;
254
255 /*
256 * The valid test of the entry must be done first before
257 * reading any further.
258 */
259 dma_rmb();
260
261 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
262 switch (type) {
263 case NQ_BASE_TYPE_CQ_NOTIFICATION:
264 {
265 struct nq_cn *nqcne = (struct nq_cn *)nqe;
266
267 q_handle = le32_to_cpu(nqcne->cq_handle_low);
268 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
269 << 32;
270 if ((unsigned long)cq == q_handle) {
271 nqcne->cq_handle_low = 0;
272 nqcne->cq_handle_high = 0;
273 cq->cnq_events++;
274 }
275 break;
276 }
277 default:
278 break;
279 }
280 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
281 1, &nq->nq_db.dbinfo.flags);
282 }
283 spin_unlock_bh(&hwq->lock);
284 }
285
286 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
287 * this CQ.
288 */
__wait_for_all_nqes(struct bnxt_qplib_cq * cq,u16 cnq_events)289 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
290 {
291 u32 retry_cnt = 100;
292
293 while (retry_cnt--) {
294 if (cnq_events == cq->cnq_events)
295 return;
296 usleep_range(50, 100);
297 clean_nq(cq->nq, cq);
298 }
299 }
300
bnxt_qplib_service_nq(struct tasklet_struct * t)301 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
302 {
303 struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
304 struct bnxt_qplib_hwq *hwq = &nq->hwq;
305 struct bnxt_qplib_cq *cq;
306 int budget = nq->budget;
307 struct nq_base *nqe;
308 uintptr_t q_handle;
309 u32 hw_polled = 0;
310 u16 type;
311
312 spin_lock_bh(&hwq->lock);
313 /* Service the NQ until empty */
314 while (budget--) {
315 nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
316 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
317 break;
318
319 /*
320 * The valid test of the entry must be done first before
321 * reading any further.
322 */
323 dma_rmb();
324
325 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
326 switch (type) {
327 case NQ_BASE_TYPE_CQ_NOTIFICATION:
328 {
329 struct nq_cn *nqcne = (struct nq_cn *)nqe;
330 struct bnxt_re_cq *cq_p;
331
332 q_handle = le32_to_cpu(nqcne->cq_handle_low);
333 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
334 << 32;
335 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
336 if (!cq)
337 break;
338 cq->toggle = (le16_to_cpu(nqe->info10_type) &
339 NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
340 cq->dbinfo.toggle = cq->toggle;
341 cq_p = container_of(cq, struct bnxt_re_cq, qplib_cq);
342 if (cq_p->uctx_cq_page)
343 *((u32 *)cq_p->uctx_cq_page) = cq->toggle;
344
345 bnxt_qplib_armen_db(&cq->dbinfo,
346 DBC_DBC_TYPE_CQ_ARMENA);
347 spin_lock_bh(&cq->compl_lock);
348 atomic_set(&cq->arm_state, 0);
349 if (nq->cqn_handler(nq, (cq)))
350 dev_warn(&nq->pdev->dev,
351 "cqn - type 0x%x not handled\n", type);
352 cq->cnq_events++;
353 spin_unlock_bh(&cq->compl_lock);
354 break;
355 }
356 case NQ_BASE_TYPE_SRQ_EVENT:
357 {
358 struct bnxt_qplib_srq *srq;
359 struct bnxt_re_srq *srq_p;
360 struct nq_srq_event *nqsrqe =
361 (struct nq_srq_event *)nqe;
362
363 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
364 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
365 << 32;
366 srq = (struct bnxt_qplib_srq *)q_handle;
367 srq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK)
368 >> NQ_CN_TOGGLE_SFT;
369 srq->dbinfo.toggle = srq->toggle;
370 srq_p = container_of(srq, struct bnxt_re_srq, qplib_srq);
371 if (srq_p->uctx_srq_page)
372 *((u32 *)srq_p->uctx_srq_page) = srq->toggle;
373 bnxt_qplib_armen_db(&srq->dbinfo,
374 DBC_DBC_TYPE_SRQ_ARMENA);
375 if (nq->srqn_handler(nq,
376 (struct bnxt_qplib_srq *)q_handle,
377 nqsrqe->event))
378 dev_warn(&nq->pdev->dev,
379 "SRQ event 0x%x not handled\n",
380 nqsrqe->event);
381 break;
382 }
383 case NQ_BASE_TYPE_DBQ_EVENT:
384 break;
385 default:
386 dev_warn(&nq->pdev->dev,
387 "nqe with type = 0x%x not handled\n", type);
388 break;
389 }
390 hw_polled++;
391 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
392 1, &nq->nq_db.dbinfo.flags);
393 }
394 if (hw_polled)
395 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
396 spin_unlock_bh(&hwq->lock);
397 }
398
399 /* bnxt_re_synchronize_nq - self polling notification queue.
400 * @nq - notification queue pointer
401 *
402 * This function will start polling entries of a given notification queue
403 * for all pending entries.
404 * This function is useful to synchronize notification entries while resources
405 * are going away.
406 */
407
bnxt_re_synchronize_nq(struct bnxt_qplib_nq * nq)408 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
409 {
410 int budget = nq->budget;
411
412 nq->budget = nq->hwq.max_elements;
413 bnxt_qplib_service_nq(&nq->nq_tasklet);
414 nq->budget = budget;
415 }
416
bnxt_qplib_nq_irq(int irq,void * dev_instance)417 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
418 {
419 struct bnxt_qplib_nq *nq = dev_instance;
420 struct bnxt_qplib_hwq *hwq = &nq->hwq;
421 u32 sw_cons;
422
423 /* Prefetch the NQ element */
424 sw_cons = HWQ_CMP(hwq->cons, hwq);
425 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
426
427 /* Fan out to CPU affinitized kthreads? */
428 tasklet_schedule(&nq->nq_tasklet);
429
430 return IRQ_HANDLED;
431 }
432
bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq * nq,bool kill)433 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
434 {
435 if (!nq->requested)
436 return;
437
438 nq->requested = false;
439 /* Mask h/w interrupt */
440 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
441 /* Sync with last running IRQ handler */
442 synchronize_irq(nq->msix_vec);
443 irq_set_affinity_hint(nq->msix_vec, NULL);
444 free_irq(nq->msix_vec, nq);
445 kfree(nq->name);
446 nq->name = NULL;
447
448 if (kill)
449 tasklet_kill(&nq->nq_tasklet);
450 tasklet_disable(&nq->nq_tasklet);
451 }
452
bnxt_qplib_disable_nq(struct bnxt_qplib_nq * nq)453 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
454 {
455 if (nq->cqn_wq) {
456 destroy_workqueue(nq->cqn_wq);
457 nq->cqn_wq = NULL;
458 }
459
460 /* Make sure the HW is stopped! */
461 bnxt_qplib_nq_stop_irq(nq, true);
462
463 if (nq->nq_db.reg.bar_reg) {
464 iounmap(nq->nq_db.reg.bar_reg);
465 nq->nq_db.reg.bar_reg = NULL;
466 }
467
468 nq->cqn_handler = NULL;
469 nq->srqn_handler = NULL;
470 nq->msix_vec = 0;
471 }
472
bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq * nq,int nq_indx,int msix_vector,bool need_init)473 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
474 int msix_vector, bool need_init)
475 {
476 struct bnxt_qplib_res *res = nq->res;
477 int rc;
478
479 if (nq->requested)
480 return -EFAULT;
481
482 nq->msix_vec = msix_vector;
483 if (need_init)
484 tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
485 else
486 tasklet_enable(&nq->nq_tasklet);
487
488 nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
489 nq_indx, pci_name(res->pdev));
490 if (!nq->name)
491 return -ENOMEM;
492 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
493 if (rc) {
494 kfree(nq->name);
495 nq->name = NULL;
496 tasklet_disable(&nq->nq_tasklet);
497 return rc;
498 }
499
500 cpumask_clear(&nq->mask);
501 cpumask_set_cpu(nq_indx, &nq->mask);
502 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
503 if (rc) {
504 dev_warn(&nq->pdev->dev,
505 "set affinity failed; vector: %d nq_idx: %d\n",
506 nq->msix_vec, nq_indx);
507 }
508 nq->requested = true;
509 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
510
511 return rc;
512 }
513
bnxt_qplib_map_nq_db(struct bnxt_qplib_nq * nq,u32 reg_offt)514 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
515 {
516 resource_size_t reg_base;
517 struct bnxt_qplib_nq_db *nq_db;
518 struct pci_dev *pdev;
519
520 pdev = nq->pdev;
521 nq_db = &nq->nq_db;
522
523 nq_db->dbinfo.flags = 0;
524 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
525 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
526 if (!nq_db->reg.bar_base) {
527 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
528 nq_db->reg.bar_id);
529 return -ENOMEM;
530 }
531
532 reg_base = nq_db->reg.bar_base + reg_offt;
533 /* Unconditionally map 8 bytes to support 57500 series */
534 nq_db->reg.len = 8;
535 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
536 if (!nq_db->reg.bar_reg) {
537 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
538 nq_db->reg.bar_id);
539 return -ENOMEM;
540 }
541
542 nq_db->dbinfo.db = nq_db->reg.bar_reg;
543 nq_db->dbinfo.hwq = &nq->hwq;
544 nq_db->dbinfo.xid = nq->ring_id;
545
546 return 0;
547 }
548
bnxt_qplib_enable_nq(struct pci_dev * pdev,struct bnxt_qplib_nq * nq,int nq_idx,int msix_vector,int bar_reg_offset,cqn_handler_t cqn_handler,srqn_handler_t srqn_handler)549 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
550 int nq_idx, int msix_vector, int bar_reg_offset,
551 cqn_handler_t cqn_handler,
552 srqn_handler_t srqn_handler)
553 {
554 int rc;
555
556 nq->pdev = pdev;
557 nq->cqn_handler = cqn_handler;
558 nq->srqn_handler = srqn_handler;
559
560 /* Have a task to schedule CQ notifiers in post send case */
561 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
562 if (!nq->cqn_wq)
563 return -ENOMEM;
564
565 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
566 if (rc)
567 goto fail;
568
569 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
570 if (rc) {
571 dev_err(&nq->pdev->dev,
572 "Failed to request irq for nq-idx %d\n", nq_idx);
573 goto fail;
574 }
575
576 return 0;
577 fail:
578 bnxt_qplib_disable_nq(nq);
579 return rc;
580 }
581
bnxt_qplib_free_nq(struct bnxt_qplib_nq * nq)582 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
583 {
584 if (nq->hwq.max_elements) {
585 bnxt_qplib_free_hwq(nq->res, &nq->hwq);
586 nq->hwq.max_elements = 0;
587 }
588 }
589
bnxt_qplib_alloc_nq(struct bnxt_qplib_res * res,struct bnxt_qplib_nq * nq)590 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
591 {
592 struct bnxt_qplib_hwq_attr hwq_attr = {};
593 struct bnxt_qplib_sg_info sginfo = {};
594
595 nq->pdev = res->pdev;
596 nq->res = res;
597 if (!nq->hwq.max_elements ||
598 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
599 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
600
601 sginfo.pgsize = PAGE_SIZE;
602 sginfo.pgshft = PAGE_SHIFT;
603 hwq_attr.res = res;
604 hwq_attr.sginfo = &sginfo;
605 hwq_attr.depth = nq->hwq.max_elements;
606 hwq_attr.stride = sizeof(struct nq_base);
607 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
608 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
609 dev_err(&nq->pdev->dev, "FP NQ allocation failed");
610 return -ENOMEM;
611 }
612 nq->budget = 8;
613 return 0;
614 }
615
616 /* SRQ */
bnxt_qplib_destroy_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)617 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
618 struct bnxt_qplib_srq *srq)
619 {
620 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
621 struct creq_destroy_srq_resp resp = {};
622 struct bnxt_qplib_cmdqmsg msg = {};
623 struct cmdq_destroy_srq req = {};
624 int rc;
625
626 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
627 CMDQ_BASE_OPCODE_DESTROY_SRQ,
628 sizeof(req));
629
630 /* Configure the request */
631 req.srq_cid = cpu_to_le32(srq->id);
632
633 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
634 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
635 kfree(srq->swq);
636 if (rc)
637 return;
638 bnxt_qplib_free_hwq(res, &srq->hwq);
639 }
640
bnxt_qplib_create_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)641 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
642 struct bnxt_qplib_srq *srq)
643 {
644 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
645 struct bnxt_qplib_hwq_attr hwq_attr = {};
646 struct creq_create_srq_resp resp = {};
647 struct bnxt_qplib_cmdqmsg msg = {};
648 struct cmdq_create_srq req = {};
649 struct bnxt_qplib_pbl *pbl;
650 u16 pg_sz_lvl;
651 int rc, idx;
652
653 hwq_attr.res = res;
654 hwq_attr.sginfo = &srq->sg_info;
655 hwq_attr.depth = srq->max_wqe;
656 hwq_attr.stride = srq->wqe_size;
657 hwq_attr.type = HWQ_TYPE_QUEUE;
658 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
659 if (rc)
660 return rc;
661 srq->dbinfo.flags = 0;
662 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
663 CMDQ_BASE_OPCODE_CREATE_SRQ,
664 sizeof(req));
665
666 /* Configure the request */
667 req.dpi = cpu_to_le32(srq->dpi->dpi);
668 req.srq_handle = cpu_to_le64((uintptr_t)srq);
669
670 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
671 pbl = &srq->hwq.pbl[PBL_LVL_0];
672 pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
673 CMDQ_CREATE_SRQ_PG_SIZE_SFT);
674 pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
675 CMDQ_CREATE_SRQ_LVL_SFT;
676 req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
677 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
678 req.pd_id = cpu_to_le32(srq->pd->id);
679 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
680
681 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
682 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
683 if (rc)
684 goto fail;
685
686 spin_lock_init(&srq->lock);
687 srq->start_idx = 0;
688 srq->last_idx = srq->hwq.max_elements - 1;
689 if (!srq->hwq.is_user) {
690 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
691 GFP_KERNEL);
692 if (!srq->swq) {
693 rc = -ENOMEM;
694 goto fail;
695 }
696 for (idx = 0; idx < srq->hwq.max_elements; idx++)
697 srq->swq[idx].next_idx = idx + 1;
698 srq->swq[srq->last_idx].next_idx = -1;
699 }
700
701 srq->id = le32_to_cpu(resp.xid);
702 srq->dbinfo.hwq = &srq->hwq;
703 srq->dbinfo.xid = srq->id;
704 srq->dbinfo.db = srq->dpi->dbr;
705 srq->dbinfo.max_slot = 1;
706 srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
707 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
708
709 return 0;
710 fail:
711 bnxt_qplib_free_hwq(res, &srq->hwq);
712 kfree(srq->swq);
713
714 return rc;
715 }
716
bnxt_qplib_query_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)717 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
718 struct bnxt_qplib_srq *srq)
719 {
720 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
721 struct creq_query_srq_resp resp = {};
722 struct bnxt_qplib_cmdqmsg msg = {};
723 struct bnxt_qplib_rcfw_sbuf sbuf;
724 struct creq_query_srq_resp_sb *sb;
725 struct cmdq_query_srq req = {};
726 int rc;
727
728 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
729 CMDQ_BASE_OPCODE_QUERY_SRQ,
730 sizeof(req));
731
732 /* Configure the request */
733 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
734 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
735 &sbuf.dma_addr, GFP_KERNEL);
736 if (!sbuf.sb)
737 return -ENOMEM;
738 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
739 req.srq_cid = cpu_to_le32(srq->id);
740 sb = sbuf.sb;
741 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
742 sizeof(resp), 0);
743 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
744 if (!rc)
745 srq->threshold = le16_to_cpu(sb->srq_limit);
746 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
747 sbuf.sb, sbuf.dma_addr);
748
749 return rc;
750 }
751
bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq * srq,struct bnxt_qplib_swqe * wqe)752 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
753 struct bnxt_qplib_swqe *wqe)
754 {
755 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
756 struct rq_wqe *srqe;
757 struct sq_sge *hw_sge;
758 int i, next;
759
760 spin_lock(&srq_hwq->lock);
761 if (srq->start_idx == srq->last_idx) {
762 dev_err(&srq_hwq->pdev->dev,
763 "FP: SRQ (0x%x) is full!\n", srq->id);
764 spin_unlock(&srq_hwq->lock);
765 return -EINVAL;
766 }
767 next = srq->start_idx;
768 srq->start_idx = srq->swq[next].next_idx;
769 spin_unlock(&srq_hwq->lock);
770
771 srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
772 memset(srqe, 0, srq->wqe_size);
773 /* Calculate wqe_size16 and data_len */
774 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
775 i < wqe->num_sge; i++, hw_sge++) {
776 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
777 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
778 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
779 }
780 srqe->wqe_type = wqe->type;
781 srqe->flags = wqe->flags;
782 srqe->wqe_size = wqe->num_sge +
783 ((offsetof(typeof(*srqe), data) + 15) >> 4);
784 srqe->wr_id[0] = cpu_to_le32((u32)next);
785 srq->swq[next].wr_id = wqe->wr_id;
786
787 bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
788
789 /* Ring DB */
790 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
791
792 return 0;
793 }
794
795 /* QP */
796
bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q * que)797 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
798 {
799 int indx;
800
801 que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
802 if (!que->swq)
803 return -ENOMEM;
804
805 que->swq_start = 0;
806 que->swq_last = que->max_sw_wqe - 1;
807 for (indx = 0; indx < que->max_sw_wqe; indx++)
808 que->swq[indx].next_idx = indx + 1;
809 que->swq[que->swq_last].next_idx = 0; /* Make it circular */
810 que->swq_last = 0;
811
812 return 0;
813 }
814
bnxt_qplib_create_qp1(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)815 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
816 {
817 struct bnxt_qplib_hwq_attr hwq_attr = {};
818 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
819 struct creq_create_qp1_resp resp = {};
820 struct bnxt_qplib_cmdqmsg msg = {};
821 struct bnxt_qplib_q *sq = &qp->sq;
822 struct bnxt_qplib_q *rq = &qp->rq;
823 struct cmdq_create_qp1 req = {};
824 struct bnxt_qplib_pbl *pbl;
825 u32 qp_flags = 0;
826 u8 pg_sz_lvl;
827 u32 tbl_indx;
828 int rc;
829
830 sq->dbinfo.flags = 0;
831 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
832 CMDQ_BASE_OPCODE_CREATE_QP1,
833 sizeof(req));
834 /* General */
835 req.type = qp->type;
836 req.dpi = cpu_to_le32(qp->dpi->dpi);
837 req.qp_handle = cpu_to_le64(qp->qp_handle);
838
839 /* SQ */
840 hwq_attr.res = res;
841 hwq_attr.sginfo = &sq->sg_info;
842 hwq_attr.stride = sizeof(struct sq_sge);
843 hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
844 hwq_attr.type = HWQ_TYPE_QUEUE;
845 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
846 if (rc)
847 return rc;
848
849 rc = bnxt_qplib_alloc_init_swq(sq);
850 if (rc)
851 goto fail_sq;
852
853 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
854 pbl = &sq->hwq.pbl[PBL_LVL_0];
855 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
856 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
857 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
858 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
859 req.sq_pg_size_sq_lvl = pg_sz_lvl;
860 req.sq_fwo_sq_sge =
861 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
862 CMDQ_CREATE_QP1_SQ_SGE_SFT);
863 req.scq_cid = cpu_to_le32(qp->scq->id);
864
865 /* RQ */
866 if (rq->max_wqe) {
867 rq->dbinfo.flags = 0;
868 hwq_attr.res = res;
869 hwq_attr.sginfo = &rq->sg_info;
870 hwq_attr.stride = sizeof(struct sq_sge);
871 hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
872 hwq_attr.type = HWQ_TYPE_QUEUE;
873 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
874 if (rc)
875 goto sq_swq;
876 rc = bnxt_qplib_alloc_init_swq(rq);
877 if (rc)
878 goto fail_rq;
879 req.rq_size = cpu_to_le32(rq->max_wqe);
880 pbl = &rq->hwq.pbl[PBL_LVL_0];
881 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
882 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
883 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
884 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
885 req.rq_pg_size_rq_lvl = pg_sz_lvl;
886 req.rq_fwo_rq_sge =
887 cpu_to_le16((rq->max_sge &
888 CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
889 CMDQ_CREATE_QP1_RQ_SGE_SFT);
890 }
891 req.rcq_cid = cpu_to_le32(qp->rcq->id);
892 /* Header buffer - allow hdr_buf pass in */
893 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
894 if (rc) {
895 rc = -ENOMEM;
896 goto rq_rwq;
897 }
898 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
899 req.qp_flags = cpu_to_le32(qp_flags);
900 req.pd_id = cpu_to_le32(qp->pd->id);
901
902 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
903 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
904 if (rc)
905 goto fail;
906
907 qp->id = le32_to_cpu(resp.xid);
908 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
909 qp->cctx = res->cctx;
910 sq->dbinfo.hwq = &sq->hwq;
911 sq->dbinfo.xid = qp->id;
912 sq->dbinfo.db = qp->dpi->dbr;
913 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
914 if (rq->max_wqe) {
915 rq->dbinfo.hwq = &rq->hwq;
916 rq->dbinfo.xid = qp->id;
917 rq->dbinfo.db = qp->dpi->dbr;
918 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
919 }
920 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
921 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
922 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
923
924 return 0;
925
926 fail:
927 bnxt_qplib_free_qp_hdr_buf(res, qp);
928 rq_rwq:
929 kfree(rq->swq);
930 fail_rq:
931 bnxt_qplib_free_hwq(res, &rq->hwq);
932 sq_swq:
933 kfree(sq->swq);
934 fail_sq:
935 bnxt_qplib_free_hwq(res, &sq->hwq);
936 return rc;
937 }
938
bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp * qp,int size)939 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
940 {
941 struct bnxt_qplib_hwq *hwq;
942 struct bnxt_qplib_q *sq;
943 u64 fpsne, psn_pg;
944 u16 indx_pad = 0;
945
946 sq = &qp->sq;
947 hwq = &sq->hwq;
948 /* First psn entry */
949 fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
950 if (!IS_ALIGNED(fpsne, PAGE_SIZE))
951 indx_pad = (fpsne & ~PAGE_MASK) / size;
952 hwq->pad_pgofft = indx_pad;
953 hwq->pad_pg = (u64 *)psn_pg;
954 hwq->pad_stride = size;
955 }
956
bnxt_qplib_create_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)957 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
958 {
959 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
960 struct bnxt_qplib_hwq_attr hwq_attr = {};
961 struct bnxt_qplib_sg_info sginfo = {};
962 struct creq_create_qp_resp resp = {};
963 struct bnxt_qplib_cmdqmsg msg = {};
964 struct bnxt_qplib_q *sq = &qp->sq;
965 struct bnxt_qplib_q *rq = &qp->rq;
966 struct cmdq_create_qp req = {};
967 int rc, req_size, psn_sz = 0;
968 struct bnxt_qplib_hwq *xrrq;
969 struct bnxt_qplib_pbl *pbl;
970 u32 qp_flags = 0;
971 u8 pg_sz_lvl;
972 u32 tbl_indx;
973 u16 nsge;
974
975 qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
976 sq->dbinfo.flags = 0;
977 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
978 CMDQ_BASE_OPCODE_CREATE_QP,
979 sizeof(req));
980
981 /* General */
982 req.type = qp->type;
983 req.dpi = cpu_to_le32(qp->dpi->dpi);
984 req.qp_handle = cpu_to_le64(qp->qp_handle);
985
986 /* SQ */
987 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
988 psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
989 sizeof(struct sq_psn_search_ext) :
990 sizeof(struct sq_psn_search);
991
992 if (qp->is_host_msn_tbl) {
993 psn_sz = sizeof(struct sq_msn_search);
994 qp->msn = 0;
995 }
996 }
997
998 hwq_attr.res = res;
999 hwq_attr.sginfo = &sq->sg_info;
1000 hwq_attr.stride = sizeof(struct sq_sge);
1001 hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
1002 hwq_attr.aux_stride = psn_sz;
1003 hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
1004 : 0;
1005 /* Update msn tbl size */
1006 if (qp->is_host_msn_tbl && psn_sz) {
1007 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1008 hwq_attr.aux_depth =
1009 roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1010 else
1011 hwq_attr.aux_depth =
1012 roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
1013 qp->msn_tbl_sz = hwq_attr.aux_depth;
1014 qp->msn = 0;
1015 }
1016
1017 hwq_attr.type = HWQ_TYPE_QUEUE;
1018 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1019 if (rc)
1020 return rc;
1021
1022 if (!sq->hwq.is_user) {
1023 rc = bnxt_qplib_alloc_init_swq(sq);
1024 if (rc)
1025 goto fail_sq;
1026
1027 if (psn_sz)
1028 bnxt_qplib_init_psn_ptr(qp, psn_sz);
1029 }
1030 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1031 pbl = &sq->hwq.pbl[PBL_LVL_0];
1032 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1033 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1034 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1035 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1036 req.sq_pg_size_sq_lvl = pg_sz_lvl;
1037 req.sq_fwo_sq_sge =
1038 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1039 CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1040 req.scq_cid = cpu_to_le32(qp->scq->id);
1041
1042 /* RQ */
1043 if (!qp->srq) {
1044 rq->dbinfo.flags = 0;
1045 hwq_attr.res = res;
1046 hwq_attr.sginfo = &rq->sg_info;
1047 hwq_attr.stride = sizeof(struct sq_sge);
1048 hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
1049 hwq_attr.aux_stride = 0;
1050 hwq_attr.aux_depth = 0;
1051 hwq_attr.type = HWQ_TYPE_QUEUE;
1052 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1053 if (rc)
1054 goto sq_swq;
1055 if (!rq->hwq.is_user) {
1056 rc = bnxt_qplib_alloc_init_swq(rq);
1057 if (rc)
1058 goto fail_rq;
1059 }
1060
1061 req.rq_size = cpu_to_le32(rq->max_wqe);
1062 pbl = &rq->hwq.pbl[PBL_LVL_0];
1063 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1064 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1065 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1066 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1067 req.rq_pg_size_rq_lvl = pg_sz_lvl;
1068 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1069 6 : rq->max_sge;
1070 req.rq_fwo_rq_sge =
1071 cpu_to_le16(((nsge &
1072 CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1073 CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1074 } else {
1075 /* SRQ */
1076 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1077 req.srq_cid = cpu_to_le32(qp->srq->id);
1078 }
1079 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1080
1081 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1082 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1083 if (qp->sig_type)
1084 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1085 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1086 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1087 if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1088 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1089
1090 req.qp_flags = cpu_to_le32(qp_flags);
1091
1092 /* ORRQ and IRRQ */
1093 if (psn_sz) {
1094 xrrq = &qp->orrq;
1095 xrrq->max_elements =
1096 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1097 req_size = xrrq->max_elements *
1098 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1099 req_size &= ~(PAGE_SIZE - 1);
1100 sginfo.pgsize = req_size;
1101 sginfo.pgshft = PAGE_SHIFT;
1102
1103 hwq_attr.res = res;
1104 hwq_attr.sginfo = &sginfo;
1105 hwq_attr.depth = xrrq->max_elements;
1106 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1107 hwq_attr.aux_stride = 0;
1108 hwq_attr.aux_depth = 0;
1109 hwq_attr.type = HWQ_TYPE_CTX;
1110 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1111 if (rc)
1112 goto rq_swq;
1113 pbl = &xrrq->pbl[PBL_LVL_0];
1114 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1115
1116 xrrq = &qp->irrq;
1117 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1118 qp->max_dest_rd_atomic);
1119 req_size = xrrq->max_elements *
1120 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1121 req_size &= ~(PAGE_SIZE - 1);
1122 sginfo.pgsize = req_size;
1123 hwq_attr.depth = xrrq->max_elements;
1124 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1125 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1126 if (rc)
1127 goto fail_orrq;
1128
1129 pbl = &xrrq->pbl[PBL_LVL_0];
1130 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1131 }
1132 req.pd_id = cpu_to_le32(qp->pd->id);
1133
1134 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1135 sizeof(resp), 0);
1136 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1137 if (rc)
1138 goto fail;
1139
1140 qp->id = le32_to_cpu(resp.xid);
1141 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1142 INIT_LIST_HEAD(&qp->sq_flush);
1143 INIT_LIST_HEAD(&qp->rq_flush);
1144 qp->cctx = res->cctx;
1145 sq->dbinfo.hwq = &sq->hwq;
1146 sq->dbinfo.xid = qp->id;
1147 sq->dbinfo.db = qp->dpi->dbr;
1148 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1149 if (rq->max_wqe) {
1150 rq->dbinfo.hwq = &rq->hwq;
1151 rq->dbinfo.xid = qp->id;
1152 rq->dbinfo.db = qp->dpi->dbr;
1153 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1154 }
1155 spin_lock_bh(&rcfw->tbl_lock);
1156 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1157 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1158 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1159 spin_unlock_bh(&rcfw->tbl_lock);
1160
1161 return 0;
1162 fail:
1163 bnxt_qplib_free_hwq(res, &qp->irrq);
1164 fail_orrq:
1165 bnxt_qplib_free_hwq(res, &qp->orrq);
1166 rq_swq:
1167 kfree(rq->swq);
1168 fail_rq:
1169 bnxt_qplib_free_hwq(res, &rq->hwq);
1170 sq_swq:
1171 kfree(sq->swq);
1172 fail_sq:
1173 bnxt_qplib_free_hwq(res, &sq->hwq);
1174 return rc;
1175 }
1176
__modify_flags_from_init_state(struct bnxt_qplib_qp * qp)1177 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1178 {
1179 switch (qp->state) {
1180 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1181 /* INIT->RTR, configure the path_mtu to the default
1182 * 2048 if not being requested
1183 */
1184 if (!(qp->modify_flags &
1185 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1186 qp->modify_flags |=
1187 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1188 qp->path_mtu =
1189 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1190 }
1191 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1192 if (qp->max_dest_rd_atomic < 1)
1193 qp->max_dest_rd_atomic = 1;
1194 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1195 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1196 if (!(qp->modify_flags &
1197 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1198 qp->modify_flags |=
1199 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1200 qp->ah.sgid_index = 0;
1201 }
1202 break;
1203 default:
1204 break;
1205 }
1206 }
1207
__modify_flags_from_rtr_state(struct bnxt_qplib_qp * qp)1208 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1209 {
1210 switch (qp->state) {
1211 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1212 /* Bono FW requires the max_rd_atomic to be >= 1 */
1213 if (qp->max_rd_atomic < 1)
1214 qp->max_rd_atomic = 1;
1215 /* Bono FW does not allow PKEY_INDEX,
1216 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1217 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1218 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1219 * modification
1220 */
1221 qp->modify_flags &=
1222 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1223 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1224 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1225 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1226 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1227 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1228 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1229 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1230 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1231 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1232 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1233 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1234 break;
1235 default:
1236 break;
1237 }
1238 }
1239
__filter_modify_flags(struct bnxt_qplib_qp * qp)1240 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1241 {
1242 switch (qp->cur_qp_state) {
1243 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1244 break;
1245 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1246 __modify_flags_from_init_state(qp);
1247 break;
1248 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1249 __modify_flags_from_rtr_state(qp);
1250 break;
1251 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1252 break;
1253 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1254 break;
1255 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1256 break;
1257 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1258 break;
1259 default:
1260 break;
1261 }
1262 }
1263
bnxt_qplib_modify_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1264 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1265 {
1266 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1267 struct creq_modify_qp_resp resp = {};
1268 struct bnxt_qplib_cmdqmsg msg = {};
1269 struct cmdq_modify_qp req = {};
1270 u32 temp32[4];
1271 u32 bmask;
1272 int rc;
1273
1274 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1275 CMDQ_BASE_OPCODE_MODIFY_QP,
1276 sizeof(req));
1277
1278 /* Filter out the qp_attr_mask based on the state->new transition */
1279 __filter_modify_flags(qp);
1280 bmask = qp->modify_flags;
1281 req.modify_mask = cpu_to_le32(qp->modify_flags);
1282 req.qp_cid = cpu_to_le32(qp->id);
1283 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1284 req.network_type_en_sqd_async_notify_new_state =
1285 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1286 (qp->en_sqd_async_notify ?
1287 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1288 }
1289 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1290
1291 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1292 req.access = qp->access;
1293
1294 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1295 req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1296
1297 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1298 req.qkey = cpu_to_le32(qp->qkey);
1299
1300 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1301 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1302 req.dgid[0] = cpu_to_le32(temp32[0]);
1303 req.dgid[1] = cpu_to_le32(temp32[1]);
1304 req.dgid[2] = cpu_to_le32(temp32[2]);
1305 req.dgid[3] = cpu_to_le32(temp32[3]);
1306 }
1307 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1308 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1309
1310 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1311 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1312 [qp->ah.sgid_index]);
1313
1314 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1315 req.hop_limit = qp->ah.hop_limit;
1316
1317 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1318 req.traffic_class = qp->ah.traffic_class;
1319
1320 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1321 memcpy(req.dest_mac, qp->ah.dmac, 6);
1322
1323 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1324 req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1325
1326 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1327 req.timeout = qp->timeout;
1328
1329 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1330 req.retry_cnt = qp->retry_cnt;
1331
1332 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1333 req.rnr_retry = qp->rnr_retry;
1334
1335 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1336 req.min_rnr_timer = qp->min_rnr_timer;
1337
1338 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1339 req.rq_psn = cpu_to_le32(qp->rq.psn);
1340
1341 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1342 req.sq_psn = cpu_to_le32(qp->sq.psn);
1343
1344 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1345 req.max_rd_atomic =
1346 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1347
1348 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1349 req.max_dest_rd_atomic =
1350 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1351
1352 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1353 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1354 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1355 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1356 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1357 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1358 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1359
1360 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1361
1362 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
1363 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1364 if (rc)
1365 return rc;
1366 qp->cur_qp_state = qp->state;
1367 return 0;
1368 }
1369
bnxt_qplib_query_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1370 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1371 {
1372 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1373 struct creq_query_qp_resp resp = {};
1374 struct bnxt_qplib_cmdqmsg msg = {};
1375 struct bnxt_qplib_rcfw_sbuf sbuf;
1376 struct creq_query_qp_resp_sb *sb;
1377 struct cmdq_query_qp req = {};
1378 u32 temp32[4];
1379 int i, rc;
1380
1381 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1382 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1383 &sbuf.dma_addr, GFP_KERNEL);
1384 if (!sbuf.sb)
1385 return -ENOMEM;
1386 sb = sbuf.sb;
1387
1388 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1389 CMDQ_BASE_OPCODE_QUERY_QP,
1390 sizeof(req));
1391
1392 req.qp_cid = cpu_to_le32(qp->id);
1393 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1394 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1395 sizeof(resp), 0);
1396 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1397 if (rc)
1398 goto bail;
1399 /* Extract the context from the side buffer */
1400 qp->state = sb->en_sqd_async_notify_state &
1401 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1402 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1403 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
1404 qp->access = sb->access;
1405 qp->pkey_index = le16_to_cpu(sb->pkey);
1406 qp->qkey = le32_to_cpu(sb->qkey);
1407
1408 temp32[0] = le32_to_cpu(sb->dgid[0]);
1409 temp32[1] = le32_to_cpu(sb->dgid[1]);
1410 temp32[2] = le32_to_cpu(sb->dgid[2]);
1411 temp32[3] = le32_to_cpu(sb->dgid[3]);
1412 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1413
1414 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1415
1416 qp->ah.sgid_index = 0;
1417 for (i = 0; i < res->sgid_tbl.max; i++) {
1418 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1419 qp->ah.sgid_index = i;
1420 break;
1421 }
1422 }
1423 if (i == res->sgid_tbl.max)
1424 dev_warn(&res->pdev->dev, "SGID not found??\n");
1425
1426 qp->ah.hop_limit = sb->hop_limit;
1427 qp->ah.traffic_class = sb->traffic_class;
1428 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1429 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1430 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1431 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1432 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1433 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1434 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1435 qp->timeout = sb->timeout;
1436 qp->retry_cnt = sb->retry_cnt;
1437 qp->rnr_retry = sb->rnr_retry;
1438 qp->min_rnr_timer = sb->min_rnr_timer;
1439 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1440 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1441 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1442 qp->max_dest_rd_atomic =
1443 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1444 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1445 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1446 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1447 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1448 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1449 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1450 memcpy(qp->smac, sb->src_mac, 6);
1451 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1452 qp->port_id = le16_to_cpu(sb->port_id);
1453 bail:
1454 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1455 sbuf.sb, sbuf.dma_addr);
1456 return rc;
1457 }
1458
__clean_cq(struct bnxt_qplib_cq * cq,u64 qp)1459 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1460 {
1461 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1462 u32 peek_flags, peek_cons;
1463 struct cq_base *hw_cqe;
1464 int i;
1465
1466 peek_flags = cq->dbinfo.flags;
1467 peek_cons = cq_hwq->cons;
1468 for (i = 0; i < cq_hwq->max_elements; i++) {
1469 hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
1470 if (!CQE_CMP_VALID(hw_cqe, peek_flags))
1471 continue;
1472 /*
1473 * The valid test of the entry must be done first before
1474 * reading any further.
1475 */
1476 dma_rmb();
1477 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1478 case CQ_BASE_CQE_TYPE_REQ:
1479 case CQ_BASE_CQE_TYPE_TERMINAL:
1480 {
1481 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1482
1483 if (qp == le64_to_cpu(cqe->qp_handle))
1484 cqe->qp_handle = 0;
1485 break;
1486 }
1487 case CQ_BASE_CQE_TYPE_RES_RC:
1488 case CQ_BASE_CQE_TYPE_RES_UD:
1489 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1490 {
1491 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1492
1493 if (qp == le64_to_cpu(cqe->qp_handle))
1494 cqe->qp_handle = 0;
1495 break;
1496 }
1497 default:
1498 break;
1499 }
1500 bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
1501 1, &peek_flags);
1502 }
1503 }
1504
bnxt_qplib_destroy_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1505 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1506 struct bnxt_qplib_qp *qp)
1507 {
1508 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1509 struct creq_destroy_qp_resp resp = {};
1510 struct bnxt_qplib_cmdqmsg msg = {};
1511 struct cmdq_destroy_qp req = {};
1512 u32 tbl_indx;
1513 int rc;
1514
1515 spin_lock_bh(&rcfw->tbl_lock);
1516 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1517 rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1518 rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1519 spin_unlock_bh(&rcfw->tbl_lock);
1520
1521 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1522 CMDQ_BASE_OPCODE_DESTROY_QP,
1523 sizeof(req));
1524
1525 req.qp_cid = cpu_to_le32(qp->id);
1526 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1527 sizeof(resp), 0);
1528 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1529 if (rc) {
1530 spin_lock_bh(&rcfw->tbl_lock);
1531 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1532 rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1533 spin_unlock_bh(&rcfw->tbl_lock);
1534 return rc;
1535 }
1536
1537 return 0;
1538 }
1539
bnxt_qplib_free_qp_res(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1540 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1541 struct bnxt_qplib_qp *qp)
1542 {
1543 bnxt_qplib_free_qp_hdr_buf(res, qp);
1544 bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1545 kfree(qp->sq.swq);
1546
1547 bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1548 kfree(qp->rq.swq);
1549
1550 if (qp->irrq.max_elements)
1551 bnxt_qplib_free_hwq(res, &qp->irrq);
1552 if (qp->orrq.max_elements)
1553 bnxt_qplib_free_hwq(res, &qp->orrq);
1554
1555 }
1556
bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1557 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1558 struct bnxt_qplib_sge *sge)
1559 {
1560 struct bnxt_qplib_q *sq = &qp->sq;
1561 u32 sw_prod;
1562
1563 memset(sge, 0, sizeof(*sge));
1564
1565 if (qp->sq_hdr_buf) {
1566 sw_prod = sq->swq_start;
1567 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1568 sw_prod * qp->sq_hdr_buf_size);
1569 sge->lkey = 0xFFFFFFFF;
1570 sge->size = qp->sq_hdr_buf_size;
1571 return qp->sq_hdr_buf + sw_prod * sge->size;
1572 }
1573 return NULL;
1574 }
1575
bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp * qp)1576 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1577 {
1578 struct bnxt_qplib_q *rq = &qp->rq;
1579
1580 return rq->swq_start;
1581 }
1582
bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp * qp,u32 index)1583 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1584 {
1585 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1586 }
1587
bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1588 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1589 struct bnxt_qplib_sge *sge)
1590 {
1591 struct bnxt_qplib_q *rq = &qp->rq;
1592 u32 sw_prod;
1593
1594 memset(sge, 0, sizeof(*sge));
1595
1596 if (qp->rq_hdr_buf) {
1597 sw_prod = rq->swq_start;
1598 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1599 sw_prod * qp->rq_hdr_buf_size);
1600 sge->lkey = 0xFFFFFFFF;
1601 sge->size = qp->rq_hdr_buf_size;
1602 return qp->rq_hdr_buf + sw_prod * sge->size;
1603 }
1604 return NULL;
1605 }
1606
1607 /* Fil the MSN table into the next psn row */
bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,struct bnxt_qplib_swq * swq)1608 static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1609 struct bnxt_qplib_swqe *wqe,
1610 struct bnxt_qplib_swq *swq)
1611 {
1612 struct sq_msn_search *msns;
1613 u32 start_psn, next_psn;
1614 u16 start_idx;
1615
1616 msns = (struct sq_msn_search *)swq->psn_search;
1617 msns->start_idx_next_psn_start_psn = 0;
1618
1619 start_psn = swq->start_psn;
1620 next_psn = swq->next_psn;
1621 start_idx = swq->slot_idx;
1622 msns->start_idx_next_psn_start_psn |=
1623 bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
1624 qp->msn++;
1625 qp->msn %= qp->msn_tbl_sz;
1626 }
1627
bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,struct bnxt_qplib_swq * swq)1628 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1629 struct bnxt_qplib_swqe *wqe,
1630 struct bnxt_qplib_swq *swq)
1631 {
1632 struct sq_psn_search_ext *psns_ext;
1633 struct sq_psn_search *psns;
1634 u32 flg_npsn;
1635 u32 op_spsn;
1636
1637 if (!swq->psn_search)
1638 return;
1639 /* Handle MSN differently on cap flags */
1640 if (qp->is_host_msn_tbl) {
1641 bnxt_qplib_fill_msn_search(qp, wqe, swq);
1642 return;
1643 }
1644 psns = (struct sq_psn_search *)swq->psn_search;
1645 psns = swq->psn_search;
1646 psns_ext = swq->psn_ext;
1647
1648 op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1649 SQ_PSN_SEARCH_START_PSN_MASK);
1650 op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1651 SQ_PSN_SEARCH_OPCODE_MASK);
1652 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1653 SQ_PSN_SEARCH_NEXT_PSN_MASK);
1654
1655 if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
1656 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1657 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1658 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1659 } else {
1660 psns->opcode_start_psn = cpu_to_le32(op_spsn);
1661 psns->flags_next_psn = cpu_to_le32(flg_npsn);
1662 }
1663 }
1664
bnxt_qplib_put_inline(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u16 * idx)1665 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1666 struct bnxt_qplib_swqe *wqe,
1667 u16 *idx)
1668 {
1669 struct bnxt_qplib_hwq *hwq;
1670 int len, t_len, offt;
1671 bool pull_dst = true;
1672 void *il_dst = NULL;
1673 void *il_src = NULL;
1674 int t_cplen, cplen;
1675 int indx;
1676
1677 hwq = &qp->sq.hwq;
1678 t_len = 0;
1679 for (indx = 0; indx < wqe->num_sge; indx++) {
1680 len = wqe->sg_list[indx].size;
1681 il_src = (void *)wqe->sg_list[indx].addr;
1682 t_len += len;
1683 if (t_len > qp->max_inline_data)
1684 return -ENOMEM;
1685 while (len) {
1686 if (pull_dst) {
1687 pull_dst = false;
1688 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1689 (*idx)++;
1690 t_cplen = 0;
1691 offt = 0;
1692 }
1693 cplen = min_t(int, len, sizeof(struct sq_sge));
1694 cplen = min_t(int, cplen,
1695 (sizeof(struct sq_sge) - offt));
1696 memcpy(il_dst, il_src, cplen);
1697 t_cplen += cplen;
1698 il_src += cplen;
1699 il_dst += cplen;
1700 offt += cplen;
1701 len -= cplen;
1702 if (t_cplen == sizeof(struct sq_sge))
1703 pull_dst = true;
1704 }
1705 }
1706
1707 return t_len;
1708 }
1709
bnxt_qplib_put_sges(struct bnxt_qplib_hwq * hwq,struct bnxt_qplib_sge * ssge,u16 nsge,u16 * idx)1710 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1711 struct bnxt_qplib_sge *ssge,
1712 u16 nsge, u16 *idx)
1713 {
1714 struct sq_sge *dsge;
1715 int indx, len = 0;
1716
1717 for (indx = 0; indx < nsge; indx++, (*idx)++) {
1718 dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1719 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1720 dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1721 dsge->size = cpu_to_le32(ssge[indx].size);
1722 len += ssge[indx].size;
1723 }
1724
1725 return len;
1726 }
1727
bnxt_qplib_required_slots(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u16 * wqe_sz,u16 * qdf,u8 mode)1728 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1729 struct bnxt_qplib_swqe *wqe,
1730 u16 *wqe_sz, u16 *qdf, u8 mode)
1731 {
1732 u32 ilsize, bytes;
1733 u16 nsge;
1734 u16 slot;
1735
1736 nsge = wqe->num_sge;
1737 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1738 bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1739 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1740 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1741 bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1742 bytes += sizeof(struct sq_send_hdr);
1743 }
1744
1745 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes);
1746 slot = bytes >> 4;
1747 *wqe_sz = slot;
1748 if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1749 slot = 8;
1750 return slot;
1751 }
1752
bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp * qp,struct bnxt_qplib_q * sq,struct bnxt_qplib_swq * swq,bool hw_retx)1753 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1754 struct bnxt_qplib_swq *swq, bool hw_retx)
1755 {
1756 struct bnxt_qplib_hwq *hwq;
1757 u32 pg_num, pg_indx;
1758 void *buff;
1759 u32 tail;
1760
1761 hwq = &sq->hwq;
1762 if (!hwq->pad_pg)
1763 return;
1764 tail = swq->slot_idx / sq->dbinfo.max_slot;
1765 if (hw_retx) {
1766 /* For HW retx use qp msn index */
1767 tail = qp->msn;
1768 tail %= qp->msn_tbl_sz;
1769 }
1770 pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1771 pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1772 buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1773 swq->psn_ext = buff;
1774 swq->psn_search = buff;
1775 }
1776
bnxt_qplib_post_send_db(struct bnxt_qplib_qp * qp)1777 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1778 {
1779 struct bnxt_qplib_q *sq = &qp->sq;
1780
1781 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1782 }
1783
bnxt_qplib_post_send(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)1784 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1785 struct bnxt_qplib_swqe *wqe)
1786 {
1787 struct bnxt_qplib_nq_work *nq_work = NULL;
1788 int i, rc = 0, data_len = 0, pkt_num = 0;
1789 struct bnxt_qplib_q *sq = &qp->sq;
1790 struct bnxt_qplib_hwq *hwq;
1791 struct bnxt_qplib_swq *swq;
1792 bool sch_handler = false;
1793 u16 wqe_sz, qdf = 0;
1794 bool msn_update;
1795 void *base_hdr;
1796 void *ext_hdr;
1797 __le32 temp32;
1798 u32 wqe_idx;
1799 u32 slots;
1800 u16 idx;
1801
1802 hwq = &sq->hwq;
1803 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1804 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1805 dev_err(&hwq->pdev->dev,
1806 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1807 qp->id, qp->state);
1808 rc = -EINVAL;
1809 goto done;
1810 }
1811
1812 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1813 if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1814 dev_err(&hwq->pdev->dev,
1815 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1816 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1817 rc = -ENOMEM;
1818 goto done;
1819 }
1820
1821 swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1822 bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl);
1823
1824 idx = 0;
1825 swq->slot_idx = hwq->prod;
1826 swq->slots = slots;
1827 swq->wr_id = wqe->wr_id;
1828 swq->type = wqe->type;
1829 swq->flags = wqe->flags;
1830 swq->start_psn = sq->psn & BTH_PSN_MASK;
1831 if (qp->sig_type)
1832 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1833
1834 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1835 sch_handler = true;
1836 dev_dbg(&hwq->pdev->dev,
1837 "%s Error QP. Scheduling for poll_cq\n", __func__);
1838 goto queue_err;
1839 }
1840
1841 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1842 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1843 memset(base_hdr, 0, sizeof(struct sq_sge));
1844 memset(ext_hdr, 0, sizeof(struct sq_sge));
1845
1846 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1847 /* Copy the inline data */
1848 data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1849 else
1850 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1851 &idx);
1852 if (data_len < 0)
1853 goto queue_err;
1854 /* Make sure we update MSN table only for wired wqes */
1855 msn_update = true;
1856 /* Specifics */
1857 switch (wqe->type) {
1858 case BNXT_QPLIB_SWQE_TYPE_SEND:
1859 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1860 struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1861 struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1862 /* Assemble info for Raw Ethertype QPs */
1863
1864 sqe->wqe_type = wqe->type;
1865 sqe->flags = wqe->flags;
1866 sqe->wqe_size = wqe_sz;
1867 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1868 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1869 sqe->length = cpu_to_le32(data_len);
1870 ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1871 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1872 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1873
1874 break;
1875 }
1876 fallthrough;
1877 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1878 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1879 {
1880 struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1881 struct sq_send_hdr *sqe = base_hdr;
1882
1883 sqe->wqe_type = wqe->type;
1884 sqe->flags = wqe->flags;
1885 sqe->wqe_size = wqe_sz;
1886 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1887 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1888 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1889 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1890 sqe->length = cpu_to_le32(data_len);
1891 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1892 ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1893 SQ_SEND_DST_QP_MASK);
1894 ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1895 SQ_SEND_AVID_MASK);
1896 msn_update = false;
1897 } else {
1898 sqe->length = cpu_to_le32(data_len);
1899 if (qp->mtu)
1900 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1901 if (!pkt_num)
1902 pkt_num = 1;
1903 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1904 }
1905 break;
1906 }
1907 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1908 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1909 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1910 {
1911 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1912 struct sq_rdma_hdr *sqe = base_hdr;
1913
1914 sqe->wqe_type = wqe->type;
1915 sqe->flags = wqe->flags;
1916 sqe->wqe_size = wqe_sz;
1917 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1918 sqe->length = cpu_to_le32((u32)data_len);
1919 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1920 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1921 if (qp->mtu)
1922 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1923 if (!pkt_num)
1924 pkt_num = 1;
1925 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1926 break;
1927 }
1928 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1929 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1930 {
1931 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1932 struct sq_atomic_hdr *sqe = base_hdr;
1933
1934 sqe->wqe_type = wqe->type;
1935 sqe->flags = wqe->flags;
1936 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1937 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1938 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1939 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1940 if (qp->mtu)
1941 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1942 if (!pkt_num)
1943 pkt_num = 1;
1944 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1945 break;
1946 }
1947 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1948 {
1949 struct sq_localinvalidate *sqe = base_hdr;
1950
1951 sqe->wqe_type = wqe->type;
1952 sqe->flags = wqe->flags;
1953 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1954 msn_update = false;
1955 break;
1956 }
1957 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1958 {
1959 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1960 struct sq_fr_pmr_hdr *sqe = base_hdr;
1961
1962 sqe->wqe_type = wqe->type;
1963 sqe->flags = wqe->flags;
1964 sqe->access_cntl = wqe->frmr.access_cntl |
1965 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1966 sqe->zero_based_page_size_log =
1967 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1968 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1969 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1970 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1971 temp32 = cpu_to_le32(wqe->frmr.length);
1972 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1973 sqe->numlevels_pbl_page_size_log =
1974 ((wqe->frmr.pbl_pg_sz_log <<
1975 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1976 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1977 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1978 SQ_FR_PMR_NUMLEVELS_MASK);
1979
1980 for (i = 0; i < wqe->frmr.page_list_len; i++)
1981 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1982 wqe->frmr.page_list[i] |
1983 PTU_PTE_VALID);
1984 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1985 ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1986 msn_update = false;
1987
1988 break;
1989 }
1990 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1991 {
1992 struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1993 struct sq_bind_hdr *sqe = base_hdr;
1994
1995 sqe->wqe_type = wqe->type;
1996 sqe->flags = wqe->flags;
1997 sqe->access_cntl = wqe->bind.access_cntl;
1998 sqe->mw_type_zero_based = wqe->bind.mw_type |
1999 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
2000 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
2001 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
2002 ext_sqe->va = cpu_to_le64(wqe->bind.va);
2003 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
2004 msn_update = false;
2005 break;
2006 }
2007 default:
2008 /* Bad wqe, return error */
2009 rc = -EINVAL;
2010 goto done;
2011 }
2012 if (!qp->is_host_msn_tbl || msn_update) {
2013 swq->next_psn = sq->psn & BTH_PSN_MASK;
2014 bnxt_qplib_fill_psn_search(qp, wqe, swq);
2015 }
2016 queue_err:
2017 bnxt_qplib_swq_mod_start(sq, wqe_idx);
2018 bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
2019 qp->wqe_cnt++;
2020 done:
2021 if (sch_handler) {
2022 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2023 if (nq_work) {
2024 nq_work->cq = qp->scq;
2025 nq_work->nq = qp->scq->nq;
2026 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2027 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2028 } else {
2029 dev_err(&hwq->pdev->dev,
2030 "FP: Failed to allocate SQ nq_work!\n");
2031 rc = -ENOMEM;
2032 }
2033 }
2034 return rc;
2035 }
2036
bnxt_qplib_post_recv_db(struct bnxt_qplib_qp * qp)2037 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2038 {
2039 struct bnxt_qplib_q *rq = &qp->rq;
2040
2041 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
2042 }
2043
bnxt_qplib_post_recv(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)2044 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2045 struct bnxt_qplib_swqe *wqe)
2046 {
2047 struct bnxt_qplib_nq_work *nq_work = NULL;
2048 struct bnxt_qplib_q *rq = &qp->rq;
2049 struct rq_wqe_hdr *base_hdr;
2050 struct rq_ext_hdr *ext_hdr;
2051 struct bnxt_qplib_hwq *hwq;
2052 struct bnxt_qplib_swq *swq;
2053 bool sch_handler = false;
2054 u16 wqe_sz, idx;
2055 u32 wqe_idx;
2056 int rc = 0;
2057
2058 hwq = &rq->hwq;
2059 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2060 dev_err(&hwq->pdev->dev,
2061 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
2062 qp->id, qp->state);
2063 rc = -EINVAL;
2064 goto done;
2065 }
2066
2067 if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2068 dev_err(&hwq->pdev->dev,
2069 "FP: QP (0x%x) RQ is full!\n", qp->id);
2070 rc = -EINVAL;
2071 goto done;
2072 }
2073
2074 swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2075 swq->wr_id = wqe->wr_id;
2076 swq->slots = rq->dbinfo.max_slot;
2077
2078 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2079 sch_handler = true;
2080 dev_dbg(&hwq->pdev->dev,
2081 "%s: Error QP. Scheduling for poll_cq\n", __func__);
2082 goto queue_err;
2083 }
2084
2085 idx = 0;
2086 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2087 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2088 memset(base_hdr, 0, sizeof(struct sq_sge));
2089 memset(ext_hdr, 0, sizeof(struct sq_sge));
2090 wqe_sz = (sizeof(struct rq_wqe_hdr) +
2091 wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2092 bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2093 if (!wqe->num_sge) {
2094 struct sq_sge *sge;
2095
2096 sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2097 sge->size = 0;
2098 wqe_sz++;
2099 }
2100 base_hdr->wqe_type = wqe->type;
2101 base_hdr->flags = wqe->flags;
2102 base_hdr->wqe_size = wqe_sz;
2103 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2104 queue_err:
2105 bnxt_qplib_swq_mod_start(rq, wqe_idx);
2106 bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
2107 done:
2108 if (sch_handler) {
2109 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2110 if (nq_work) {
2111 nq_work->cq = qp->rcq;
2112 nq_work->nq = qp->rcq->nq;
2113 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2114 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2115 } else {
2116 dev_err(&hwq->pdev->dev,
2117 "FP: Failed to allocate RQ nq_work!\n");
2118 rc = -ENOMEM;
2119 }
2120 }
2121
2122 return rc;
2123 }
2124
2125 /* CQ */
bnxt_qplib_create_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2126 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2127 {
2128 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2129 struct bnxt_qplib_hwq_attr hwq_attr = {};
2130 struct creq_create_cq_resp resp = {};
2131 struct bnxt_qplib_cmdqmsg msg = {};
2132 struct cmdq_create_cq req = {};
2133 struct bnxt_qplib_pbl *pbl;
2134 u32 pg_sz_lvl;
2135 int rc;
2136
2137 if (!cq->dpi) {
2138 dev_err(&rcfw->pdev->dev,
2139 "FP: CREATE_CQ failed due to NULL DPI\n");
2140 return -EINVAL;
2141 }
2142
2143 cq->dbinfo.flags = 0;
2144 hwq_attr.res = res;
2145 hwq_attr.depth = cq->max_wqe;
2146 hwq_attr.stride = sizeof(struct cq_base);
2147 hwq_attr.type = HWQ_TYPE_QUEUE;
2148 hwq_attr.sginfo = &cq->sg_info;
2149 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2150 if (rc)
2151 return rc;
2152
2153 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2154 CMDQ_BASE_OPCODE_CREATE_CQ,
2155 sizeof(req));
2156
2157 req.dpi = cpu_to_le32(cq->dpi->dpi);
2158 req.cq_handle = cpu_to_le64(cq->cq_handle);
2159 req.cq_size = cpu_to_le32(cq->max_wqe);
2160 pbl = &cq->hwq.pbl[PBL_LVL_0];
2161 pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2162 CMDQ_CREATE_CQ_PG_SIZE_SFT);
2163 pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2164 req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2165 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2166 req.cq_fco_cnq_id = cpu_to_le32(
2167 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2168 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2169 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2170 sizeof(resp), 0);
2171 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2172 if (rc)
2173 goto fail;
2174
2175 cq->id = le32_to_cpu(resp.xid);
2176 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2177 init_waitqueue_head(&cq->waitq);
2178 INIT_LIST_HEAD(&cq->sqf_head);
2179 INIT_LIST_HEAD(&cq->rqf_head);
2180 spin_lock_init(&cq->compl_lock);
2181 spin_lock_init(&cq->flush_lock);
2182
2183 cq->dbinfo.hwq = &cq->hwq;
2184 cq->dbinfo.xid = cq->id;
2185 cq->dbinfo.db = cq->dpi->dbr;
2186 cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2187 cq->dbinfo.flags = 0;
2188 cq->dbinfo.toggle = 0;
2189
2190 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2191
2192 return 0;
2193
2194 fail:
2195 bnxt_qplib_free_hwq(res, &cq->hwq);
2196 return rc;
2197 }
2198
bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2199 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2200 struct bnxt_qplib_cq *cq)
2201 {
2202 bnxt_qplib_free_hwq(res, &cq->hwq);
2203 memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2204 /* Reset only the cons bit in the flags */
2205 cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
2206 }
2207
bnxt_qplib_resize_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq,int new_cqes)2208 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2209 int new_cqes)
2210 {
2211 struct bnxt_qplib_hwq_attr hwq_attr = {};
2212 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2213 struct creq_resize_cq_resp resp = {};
2214 struct bnxt_qplib_cmdqmsg msg = {};
2215 struct cmdq_resize_cq req = {};
2216 struct bnxt_qplib_pbl *pbl;
2217 u32 pg_sz, lvl, new_sz;
2218 int rc;
2219
2220 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2221 CMDQ_BASE_OPCODE_RESIZE_CQ,
2222 sizeof(req));
2223 hwq_attr.sginfo = &cq->sg_info;
2224 hwq_attr.res = res;
2225 hwq_attr.depth = new_cqes;
2226 hwq_attr.stride = sizeof(struct cq_base);
2227 hwq_attr.type = HWQ_TYPE_QUEUE;
2228 rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2229 if (rc)
2230 return rc;
2231
2232 req.cq_cid = cpu_to_le32(cq->id);
2233 pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2234 pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2235 lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2236 CMDQ_RESIZE_CQ_LVL_MASK;
2237 new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2238 CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2239 req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2240 req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2241
2242 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2243 sizeof(resp), 0);
2244 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2245 return rc;
2246 }
2247
bnxt_qplib_destroy_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2248 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2249 {
2250 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2251 struct creq_destroy_cq_resp resp = {};
2252 struct bnxt_qplib_cmdqmsg msg = {};
2253 struct cmdq_destroy_cq req = {};
2254 u16 total_cnq_events;
2255 int rc;
2256
2257 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2258 CMDQ_BASE_OPCODE_DESTROY_CQ,
2259 sizeof(req));
2260
2261 req.cq_cid = cpu_to_le32(cq->id);
2262 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2263 sizeof(resp), 0);
2264 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2265 if (rc)
2266 return rc;
2267 total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2268 __wait_for_all_nqes(cq, total_cnq_events);
2269 bnxt_qplib_free_hwq(res, &cq->hwq);
2270 return 0;
2271 }
2272
__flush_sq(struct bnxt_qplib_q * sq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2273 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2274 struct bnxt_qplib_cqe **pcqe, int *budget)
2275 {
2276 struct bnxt_qplib_cqe *cqe;
2277 u32 start, last;
2278 int rc = 0;
2279
2280 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2281 start = sq->swq_start;
2282 cqe = *pcqe;
2283 while (*budget) {
2284 last = sq->swq_last;
2285 if (start == last)
2286 break;
2287 /* Skip the FENCE WQE completions */
2288 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2289 bnxt_qplib_cancel_phantom_processing(qp);
2290 goto skip_compl;
2291 }
2292 memset(cqe, 0, sizeof(*cqe));
2293 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2294 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2295 cqe->qp_handle = (u64)(unsigned long)qp;
2296 cqe->wr_id = sq->swq[last].wr_id;
2297 cqe->src_qp = qp->id;
2298 cqe->type = sq->swq[last].type;
2299 cqe++;
2300 (*budget)--;
2301 skip_compl:
2302 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2303 sq->swq[last].slots, &sq->dbinfo.flags);
2304 sq->swq_last = sq->swq[last].next_idx;
2305 }
2306 *pcqe = cqe;
2307 if (!(*budget) && sq->swq_last != start)
2308 /* Out of budget */
2309 rc = -EAGAIN;
2310
2311 return rc;
2312 }
2313
__flush_rq(struct bnxt_qplib_q * rq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2314 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2315 struct bnxt_qplib_cqe **pcqe, int *budget)
2316 {
2317 struct bnxt_qplib_cqe *cqe;
2318 u32 start, last;
2319 int opcode = 0;
2320 int rc = 0;
2321
2322 switch (qp->type) {
2323 case CMDQ_CREATE_QP1_TYPE_GSI:
2324 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2325 break;
2326 case CMDQ_CREATE_QP_TYPE_RC:
2327 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2328 break;
2329 case CMDQ_CREATE_QP_TYPE_UD:
2330 case CMDQ_CREATE_QP_TYPE_GSI:
2331 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2332 break;
2333 }
2334
2335 /* Flush the rest of the RQ */
2336 start = rq->swq_start;
2337 cqe = *pcqe;
2338 while (*budget) {
2339 last = rq->swq_last;
2340 if (last == start)
2341 break;
2342 memset(cqe, 0, sizeof(*cqe));
2343 cqe->status =
2344 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2345 cqe->opcode = opcode;
2346 cqe->qp_handle = (unsigned long)qp;
2347 cqe->wr_id = rq->swq[last].wr_id;
2348 cqe++;
2349 (*budget)--;
2350 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2351 rq->swq[last].slots, &rq->dbinfo.flags);
2352 rq->swq_last = rq->swq[last].next_idx;
2353 }
2354 *pcqe = cqe;
2355 if (!*budget && rq->swq_last != start)
2356 /* Out of budget */
2357 rc = -EAGAIN;
2358
2359 return rc;
2360 }
2361
bnxt_qplib_mark_qp_error(void * qp_handle)2362 void bnxt_qplib_mark_qp_error(void *qp_handle)
2363 {
2364 struct bnxt_qplib_qp *qp = qp_handle;
2365
2366 if (!qp)
2367 return;
2368
2369 /* Must block new posting of SQ and RQ */
2370 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2371 bnxt_qplib_cancel_phantom_processing(qp);
2372 }
2373
2374 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2375 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2376 */
do_wa9060(struct bnxt_qplib_qp * qp,struct bnxt_qplib_cq * cq,u32 cq_cons,u32 swq_last,u32 cqe_sq_cons)2377 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2378 u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2379 {
2380 u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
2381 struct bnxt_qplib_q *sq = &qp->sq;
2382 struct cq_req *peek_req_hwcqe;
2383 struct bnxt_qplib_qp *peek_qp;
2384 struct bnxt_qplib_q *peek_sq;
2385 struct bnxt_qplib_swq *swq;
2386 struct cq_base *peek_hwcqe;
2387 int i, rc = 0;
2388
2389 /* Normal mode */
2390 /* Check for the psn_search marking before completing */
2391 swq = &sq->swq[swq_last];
2392 if (swq->psn_search &&
2393 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2394 /* Unmark */
2395 swq->psn_search->flags_next_psn = cpu_to_le32
2396 (le32_to_cpu(swq->psn_search->flags_next_psn)
2397 & ~0x80000000);
2398 dev_dbg(&cq->hwq.pdev->dev,
2399 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2400 cq_cons, qp->id, swq_last, cqe_sq_cons);
2401 sq->condition = true;
2402 sq->send_phantom = true;
2403
2404 /* TODO: Only ARM if the previous SQE is ARMALL */
2405 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2406 rc = -EAGAIN;
2407 goto out;
2408 }
2409 if (sq->condition) {
2410 /* Peek at the completions */
2411 peek_flags = cq->dbinfo.flags;
2412 peek_sw_cq_cons = cq_cons;
2413 i = cq->hwq.max_elements;
2414 while (i--) {
2415 peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2416 peek_sw_cq_cons, NULL);
2417 /* If the next hwcqe is VALID */
2418 if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
2419 /*
2420 * The valid test of the entry must be done first before
2421 * reading any further.
2422 */
2423 dma_rmb();
2424 /* If the next hwcqe is a REQ */
2425 if ((peek_hwcqe->cqe_type_toggle &
2426 CQ_BASE_CQE_TYPE_MASK) ==
2427 CQ_BASE_CQE_TYPE_REQ) {
2428 peek_req_hwcqe = (struct cq_req *)
2429 peek_hwcqe;
2430 peek_qp = (struct bnxt_qplib_qp *)
2431 ((unsigned long)
2432 le64_to_cpu
2433 (peek_req_hwcqe->qp_handle));
2434 peek_sq = &peek_qp->sq;
2435 peek_sq_cons_idx =
2436 ((le16_to_cpu(
2437 peek_req_hwcqe->sq_cons_idx)
2438 - 1) % sq->max_wqe);
2439 /* If the hwcqe's sq's wr_id matches */
2440 if (peek_sq == sq &&
2441 sq->swq[peek_sq_cons_idx].wr_id ==
2442 BNXT_QPLIB_FENCE_WRID) {
2443 /*
2444 * Unbreak only if the phantom
2445 * comes back
2446 */
2447 dev_dbg(&cq->hwq.pdev->dev,
2448 "FP: Got Phantom CQE\n");
2449 sq->condition = false;
2450 sq->single = true;
2451 rc = 0;
2452 goto out;
2453 }
2454 }
2455 /* Valid but not the phantom, so keep looping */
2456 } else {
2457 /* Not valid yet, just exit and wait */
2458 rc = -EINVAL;
2459 goto out;
2460 }
2461 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
2462 &peek_sw_cq_cons,
2463 1, &peek_flags);
2464 }
2465 dev_err(&cq->hwq.pdev->dev,
2466 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2467 cq_cons, qp->id, swq_last, cqe_sq_cons);
2468 rc = -EINVAL;
2469 }
2470 out:
2471 return rc;
2472 }
2473
bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q * sq,u32 cqe_slot)2474 static int bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q *sq, u32 cqe_slot)
2475 {
2476 struct bnxt_qplib_hwq *sq_hwq;
2477 struct bnxt_qplib_swq *swq;
2478 int cqe_sq_cons = -1;
2479 u32 start, last;
2480
2481 sq_hwq = &sq->hwq;
2482
2483 start = sq->swq_start;
2484 last = sq->swq_last;
2485
2486 while (last != start) {
2487 swq = &sq->swq[last];
2488 if (swq->slot_idx == cqe_slot) {
2489 cqe_sq_cons = swq->next_idx;
2490 dev_err(&sq_hwq->pdev->dev, "%s: Found cons wqe = %d slot = %d\n",
2491 __func__, cqe_sq_cons, cqe_slot);
2492 break;
2493 }
2494
2495 last = swq->next_idx;
2496 }
2497 return cqe_sq_cons;
2498 }
2499
bnxt_qplib_cq_process_req(struct bnxt_qplib_cq * cq,struct cq_req * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget,u32 cq_cons,struct bnxt_qplib_qp ** lib_qp)2500 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2501 struct cq_req *hwcqe,
2502 struct bnxt_qplib_cqe **pcqe, int *budget,
2503 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2504 {
2505 struct bnxt_qplib_swq *swq;
2506 struct bnxt_qplib_cqe *cqe;
2507 u32 cqe_sq_cons, slot_num;
2508 struct bnxt_qplib_qp *qp;
2509 struct bnxt_qplib_q *sq;
2510 int cqe_cons;
2511 int rc = 0;
2512
2513 qp = (struct bnxt_qplib_qp *)((unsigned long)
2514 le64_to_cpu(hwcqe->qp_handle));
2515 if (!qp) {
2516 dev_err(&cq->hwq.pdev->dev,
2517 "FP: Process Req qp is NULL\n");
2518 return -EINVAL;
2519 }
2520 sq = &qp->sq;
2521
2522 cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
2523 if (qp->sq.flushed) {
2524 dev_dbg(&cq->hwq.pdev->dev,
2525 "%s: QP in Flush QP = %p\n", __func__, qp);
2526 goto done;
2527 }
2528
2529 if (__is_err_cqe_for_var_wqe(qp, hwcqe->status)) {
2530 slot_num = le16_to_cpu(hwcqe->sq_cons_idx);
2531 cqe_cons = bnxt_qplib_get_cqe_sq_cons(sq, slot_num);
2532 if (cqe_cons < 0) {
2533 dev_err(&cq->hwq.pdev->dev, "%s: Wrong SQ cons cqe_slot_indx = %d\n",
2534 __func__, slot_num);
2535 goto done;
2536 }
2537 cqe_sq_cons = cqe_cons;
2538 dev_err(&cq->hwq.pdev->dev, "%s: cqe_sq_cons = %d swq_last = %d swq_start = %d\n",
2539 __func__, cqe_sq_cons, sq->swq_last, sq->swq_start);
2540 }
2541
2542 /* Require to walk the sq's swq to fabricate CQEs for all previously
2543 * signaled SWQEs due to CQE aggregation from the current sq cons
2544 * to the cqe_sq_cons
2545 */
2546 cqe = *pcqe;
2547 while (*budget) {
2548 if (sq->swq_last == cqe_sq_cons)
2549 /* Done */
2550 break;
2551
2552 swq = &sq->swq[sq->swq_last];
2553 memset(cqe, 0, sizeof(*cqe));
2554 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2555 cqe->qp_handle = (u64)(unsigned long)qp;
2556 cqe->src_qp = qp->id;
2557 cqe->wr_id = swq->wr_id;
2558 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2559 goto skip;
2560 cqe->type = swq->type;
2561
2562 /* For the last CQE, check for status. For errors, regardless
2563 * of the request being signaled or not, it must complete with
2564 * the hwcqe error status
2565 */
2566 if (swq->next_idx == cqe_sq_cons &&
2567 hwcqe->status != CQ_REQ_STATUS_OK) {
2568 cqe->status = hwcqe->status;
2569 dev_err(&cq->hwq.pdev->dev,
2570 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2571 sq->swq_last, cqe->wr_id, cqe->status);
2572 cqe++;
2573 (*budget)--;
2574 bnxt_qplib_mark_qp_error(qp);
2575 /* Add qp to flush list of the CQ */
2576 bnxt_qplib_add_flush_qp(qp);
2577 } else {
2578 /* Before we complete, do WA 9060 */
2579 if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
2580 if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2581 cqe_sq_cons)) {
2582 *lib_qp = qp;
2583 goto out;
2584 }
2585 }
2586 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2587 cqe->status = CQ_REQ_STATUS_OK;
2588 cqe++;
2589 (*budget)--;
2590 }
2591 }
2592 skip:
2593 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2594 swq->slots, &sq->dbinfo.flags);
2595 sq->swq_last = swq->next_idx;
2596 if (sq->single)
2597 break;
2598 }
2599 out:
2600 *pcqe = cqe;
2601 if (sq->swq_last != cqe_sq_cons) {
2602 /* Out of budget */
2603 rc = -EAGAIN;
2604 goto done;
2605 }
2606 /*
2607 * Back to normal completion mode only after it has completed all of
2608 * the WC for this CQE
2609 */
2610 sq->single = false;
2611 done:
2612 return rc;
2613 }
2614
bnxt_qplib_release_srqe(struct bnxt_qplib_srq * srq,u32 tag)2615 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2616 {
2617 spin_lock(&srq->hwq.lock);
2618 srq->swq[srq->last_idx].next_idx = (int)tag;
2619 srq->last_idx = (int)tag;
2620 srq->swq[srq->last_idx].next_idx = -1;
2621 bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
2622 srq->dbinfo.max_slot, &srq->dbinfo.flags);
2623 spin_unlock(&srq->hwq.lock);
2624 }
2625
bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq * cq,struct cq_res_rc * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2626 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2627 struct cq_res_rc *hwcqe,
2628 struct bnxt_qplib_cqe **pcqe,
2629 int *budget)
2630 {
2631 struct bnxt_qplib_srq *srq;
2632 struct bnxt_qplib_cqe *cqe;
2633 struct bnxt_qplib_qp *qp;
2634 struct bnxt_qplib_q *rq;
2635 u32 wr_id_idx;
2636
2637 qp = (struct bnxt_qplib_qp *)((unsigned long)
2638 le64_to_cpu(hwcqe->qp_handle));
2639 if (!qp) {
2640 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2641 return -EINVAL;
2642 }
2643 if (qp->rq.flushed) {
2644 dev_dbg(&cq->hwq.pdev->dev,
2645 "%s: QP in Flush QP = %p\n", __func__, qp);
2646 return 0;
2647 }
2648
2649 cqe = *pcqe;
2650 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2651 cqe->length = le32_to_cpu(hwcqe->length);
2652 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2653 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2654 cqe->flags = le16_to_cpu(hwcqe->flags);
2655 cqe->status = hwcqe->status;
2656 cqe->qp_handle = (u64)(unsigned long)qp;
2657
2658 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2659 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2660 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2661 srq = qp->srq;
2662 if (!srq)
2663 return -EINVAL;
2664 if (wr_id_idx >= srq->hwq.max_elements) {
2665 dev_err(&cq->hwq.pdev->dev,
2666 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2667 wr_id_idx, srq->hwq.max_elements);
2668 return -EINVAL;
2669 }
2670 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2671 bnxt_qplib_release_srqe(srq, wr_id_idx);
2672 cqe++;
2673 (*budget)--;
2674 *pcqe = cqe;
2675 } else {
2676 struct bnxt_qplib_swq *swq;
2677
2678 rq = &qp->rq;
2679 if (wr_id_idx > (rq->max_wqe - 1)) {
2680 dev_err(&cq->hwq.pdev->dev,
2681 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2682 wr_id_idx, rq->max_wqe);
2683 return -EINVAL;
2684 }
2685 if (wr_id_idx != rq->swq_last)
2686 return -EINVAL;
2687 swq = &rq->swq[rq->swq_last];
2688 cqe->wr_id = swq->wr_id;
2689 cqe++;
2690 (*budget)--;
2691 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2692 swq->slots, &rq->dbinfo.flags);
2693 rq->swq_last = swq->next_idx;
2694 *pcqe = cqe;
2695
2696 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2697 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2698 /* Add qp to flush list of the CQ */
2699 bnxt_qplib_add_flush_qp(qp);
2700 }
2701 }
2702
2703 return 0;
2704 }
2705
bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq * cq,struct cq_res_ud * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2706 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2707 struct cq_res_ud *hwcqe,
2708 struct bnxt_qplib_cqe **pcqe,
2709 int *budget)
2710 {
2711 struct bnxt_qplib_srq *srq;
2712 struct bnxt_qplib_cqe *cqe;
2713 struct bnxt_qplib_qp *qp;
2714 struct bnxt_qplib_q *rq;
2715 u32 wr_id_idx;
2716
2717 qp = (struct bnxt_qplib_qp *)((unsigned long)
2718 le64_to_cpu(hwcqe->qp_handle));
2719 if (!qp) {
2720 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2721 return -EINVAL;
2722 }
2723 if (qp->rq.flushed) {
2724 dev_dbg(&cq->hwq.pdev->dev,
2725 "%s: QP in Flush QP = %p\n", __func__, qp);
2726 return 0;
2727 }
2728 cqe = *pcqe;
2729 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2730 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2731 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2732 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2733 cqe->flags = le16_to_cpu(hwcqe->flags);
2734 cqe->status = hwcqe->status;
2735 cqe->qp_handle = (u64)(unsigned long)qp;
2736 /*FIXME: Endianness fix needed for smace */
2737 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2738 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2739 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2740 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2741 ((le32_to_cpu(
2742 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2743 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2744
2745 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2746 srq = qp->srq;
2747 if (!srq)
2748 return -EINVAL;
2749
2750 if (wr_id_idx >= srq->hwq.max_elements) {
2751 dev_err(&cq->hwq.pdev->dev,
2752 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2753 wr_id_idx, srq->hwq.max_elements);
2754 return -EINVAL;
2755 }
2756 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2757 bnxt_qplib_release_srqe(srq, wr_id_idx);
2758 cqe++;
2759 (*budget)--;
2760 *pcqe = cqe;
2761 } else {
2762 struct bnxt_qplib_swq *swq;
2763
2764 rq = &qp->rq;
2765 if (wr_id_idx > (rq->max_wqe - 1)) {
2766 dev_err(&cq->hwq.pdev->dev,
2767 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2768 wr_id_idx, rq->max_wqe);
2769 return -EINVAL;
2770 }
2771
2772 if (rq->swq_last != wr_id_idx)
2773 return -EINVAL;
2774 swq = &rq->swq[rq->swq_last];
2775 cqe->wr_id = swq->wr_id;
2776 cqe++;
2777 (*budget)--;
2778 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2779 swq->slots, &rq->dbinfo.flags);
2780 rq->swq_last = swq->next_idx;
2781 *pcqe = cqe;
2782
2783 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2784 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2785 /* Add qp to flush list of the CQ */
2786 bnxt_qplib_add_flush_qp(qp);
2787 }
2788 }
2789
2790 return 0;
2791 }
2792
bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq * cq)2793 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2794 {
2795 struct cq_base *hw_cqe;
2796 bool rc = true;
2797
2798 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
2799 /* Check for Valid bit. If the CQE is valid, return false */
2800 rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
2801 return rc;
2802 }
2803
bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq * cq,struct cq_res_raweth_qp1 * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2804 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2805 struct cq_res_raweth_qp1 *hwcqe,
2806 struct bnxt_qplib_cqe **pcqe,
2807 int *budget)
2808 {
2809 struct bnxt_qplib_qp *qp;
2810 struct bnxt_qplib_q *rq;
2811 struct bnxt_qplib_srq *srq;
2812 struct bnxt_qplib_cqe *cqe;
2813 u32 wr_id_idx;
2814
2815 qp = (struct bnxt_qplib_qp *)((unsigned long)
2816 le64_to_cpu(hwcqe->qp_handle));
2817 if (!qp) {
2818 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2819 return -EINVAL;
2820 }
2821 if (qp->rq.flushed) {
2822 dev_dbg(&cq->hwq.pdev->dev,
2823 "%s: QP in Flush QP = %p\n", __func__, qp);
2824 return 0;
2825 }
2826 cqe = *pcqe;
2827 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2828 cqe->flags = le16_to_cpu(hwcqe->flags);
2829 cqe->qp_handle = (u64)(unsigned long)qp;
2830
2831 wr_id_idx =
2832 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2833 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2834 cqe->src_qp = qp->id;
2835 if (qp->id == 1 && !cqe->length) {
2836 /* Add workaround for the length misdetection */
2837 cqe->length = 296;
2838 } else {
2839 cqe->length = le16_to_cpu(hwcqe->length);
2840 }
2841 cqe->pkey_index = qp->pkey_index;
2842 memcpy(cqe->smac, qp->smac, 6);
2843
2844 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2845 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2846 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2847
2848 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2849 srq = qp->srq;
2850 if (!srq) {
2851 dev_err(&cq->hwq.pdev->dev,
2852 "FP: SRQ used but not defined??\n");
2853 return -EINVAL;
2854 }
2855 if (wr_id_idx >= srq->hwq.max_elements) {
2856 dev_err(&cq->hwq.pdev->dev,
2857 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2858 wr_id_idx, srq->hwq.max_elements);
2859 return -EINVAL;
2860 }
2861 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2862 bnxt_qplib_release_srqe(srq, wr_id_idx);
2863 cqe++;
2864 (*budget)--;
2865 *pcqe = cqe;
2866 } else {
2867 struct bnxt_qplib_swq *swq;
2868
2869 rq = &qp->rq;
2870 if (wr_id_idx > (rq->max_wqe - 1)) {
2871 dev_err(&cq->hwq.pdev->dev,
2872 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2873 wr_id_idx, rq->max_wqe);
2874 return -EINVAL;
2875 }
2876 if (rq->swq_last != wr_id_idx)
2877 return -EINVAL;
2878 swq = &rq->swq[rq->swq_last];
2879 cqe->wr_id = swq->wr_id;
2880 cqe++;
2881 (*budget)--;
2882 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2883 swq->slots, &rq->dbinfo.flags);
2884 rq->swq_last = swq->next_idx;
2885 *pcqe = cqe;
2886
2887 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2888 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2889 /* Add qp to flush list of the CQ */
2890 bnxt_qplib_add_flush_qp(qp);
2891 }
2892 }
2893
2894 return 0;
2895 }
2896
bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq * cq,struct cq_terminal * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2897 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2898 struct cq_terminal *hwcqe,
2899 struct bnxt_qplib_cqe **pcqe,
2900 int *budget)
2901 {
2902 struct bnxt_qplib_qp *qp;
2903 struct bnxt_qplib_q *sq, *rq;
2904 struct bnxt_qplib_cqe *cqe;
2905 u32 swq_last = 0, cqe_cons;
2906 int rc = 0;
2907
2908 /* Check the Status */
2909 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2910 dev_warn(&cq->hwq.pdev->dev,
2911 "FP: CQ Process Terminal Error status = 0x%x\n",
2912 hwcqe->status);
2913
2914 qp = (struct bnxt_qplib_qp *)((unsigned long)
2915 le64_to_cpu(hwcqe->qp_handle));
2916 if (!qp)
2917 return -EINVAL;
2918
2919 /* Must block new posting of SQ and RQ */
2920 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2921
2922 sq = &qp->sq;
2923 rq = &qp->rq;
2924
2925 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2926 if (cqe_cons == 0xFFFF)
2927 goto do_rq;
2928 cqe_cons %= sq->max_sw_wqe;
2929
2930 if (qp->sq.flushed) {
2931 dev_dbg(&cq->hwq.pdev->dev,
2932 "%s: QP in Flush QP = %p\n", __func__, qp);
2933 goto sq_done;
2934 }
2935
2936 /* Terminal CQE can also include aggregated successful CQEs prior.
2937 * So we must complete all CQEs from the current sq's cons to the
2938 * cq_cons with status OK
2939 */
2940 cqe = *pcqe;
2941 while (*budget) {
2942 swq_last = sq->swq_last;
2943 if (swq_last == cqe_cons)
2944 break;
2945 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2946 memset(cqe, 0, sizeof(*cqe));
2947 cqe->status = CQ_REQ_STATUS_OK;
2948 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2949 cqe->qp_handle = (u64)(unsigned long)qp;
2950 cqe->src_qp = qp->id;
2951 cqe->wr_id = sq->swq[swq_last].wr_id;
2952 cqe->type = sq->swq[swq_last].type;
2953 cqe++;
2954 (*budget)--;
2955 }
2956 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2957 sq->swq[swq_last].slots, &sq->dbinfo.flags);
2958 sq->swq_last = sq->swq[swq_last].next_idx;
2959 }
2960 *pcqe = cqe;
2961 if (!(*budget) && swq_last != cqe_cons) {
2962 /* Out of budget */
2963 rc = -EAGAIN;
2964 goto sq_done;
2965 }
2966 sq_done:
2967 if (rc)
2968 return rc;
2969 do_rq:
2970 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2971 if (cqe_cons == 0xFFFF) {
2972 goto done;
2973 } else if (cqe_cons > rq->max_wqe - 1) {
2974 dev_err(&cq->hwq.pdev->dev,
2975 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2976 cqe_cons, rq->max_wqe);
2977 rc = -EINVAL;
2978 goto done;
2979 }
2980
2981 if (qp->rq.flushed) {
2982 dev_dbg(&cq->hwq.pdev->dev,
2983 "%s: QP in Flush QP = %p\n", __func__, qp);
2984 rc = 0;
2985 goto done;
2986 }
2987
2988 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2989 * from the current rq->cons to the rq->prod regardless what the
2990 * rq->cons the terminal CQE indicates
2991 */
2992
2993 /* Add qp to flush list of the CQ */
2994 bnxt_qplib_add_flush_qp(qp);
2995 done:
2996 return rc;
2997 }
2998
bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq * cq,struct cq_cutoff * hwcqe)2999 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
3000 struct cq_cutoff *hwcqe)
3001 {
3002 /* Check the Status */
3003 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
3004 dev_err(&cq->hwq.pdev->dev,
3005 "FP: CQ Process Cutoff Error status = 0x%x\n",
3006 hwcqe->status);
3007 return -EINVAL;
3008 }
3009 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
3010 wake_up_interruptible(&cq->waitq);
3011
3012 return 0;
3013 }
3014
bnxt_qplib_process_flush_list(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes)3015 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
3016 struct bnxt_qplib_cqe *cqe,
3017 int num_cqes)
3018 {
3019 struct bnxt_qplib_qp *qp = NULL;
3020 u32 budget = num_cqes;
3021 unsigned long flags;
3022
3023 spin_lock_irqsave(&cq->flush_lock, flags);
3024 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
3025 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
3026 __flush_sq(&qp->sq, qp, &cqe, &budget);
3027 }
3028
3029 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
3030 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
3031 __flush_rq(&qp->rq, qp, &cqe, &budget);
3032 }
3033 spin_unlock_irqrestore(&cq->flush_lock, flags);
3034
3035 return num_cqes - budget;
3036 }
3037
bnxt_qplib_poll_cq(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes,struct bnxt_qplib_qp ** lib_qp)3038 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
3039 int num_cqes, struct bnxt_qplib_qp **lib_qp)
3040 {
3041 struct cq_base *hw_cqe;
3042 int budget, rc = 0;
3043 u32 hw_polled = 0;
3044 u8 type;
3045
3046 budget = num_cqes;
3047
3048 while (budget) {
3049 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
3050
3051 /* Check for Valid bit */
3052 if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
3053 break;
3054
3055 /*
3056 * The valid test of the entry must be done first before
3057 * reading any further.
3058 */
3059 dma_rmb();
3060 /* From the device's respective CQE format to qplib_wc*/
3061 type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3062 switch (type) {
3063 case CQ_BASE_CQE_TYPE_REQ:
3064 rc = bnxt_qplib_cq_process_req(cq,
3065 (struct cq_req *)hw_cqe,
3066 &cqe, &budget,
3067 cq->hwq.cons, lib_qp);
3068 break;
3069 case CQ_BASE_CQE_TYPE_RES_RC:
3070 rc = bnxt_qplib_cq_process_res_rc(cq,
3071 (struct cq_res_rc *)
3072 hw_cqe, &cqe,
3073 &budget);
3074 break;
3075 case CQ_BASE_CQE_TYPE_RES_UD:
3076 rc = bnxt_qplib_cq_process_res_ud
3077 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
3078 &budget);
3079 break;
3080 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3081 rc = bnxt_qplib_cq_process_res_raweth_qp1
3082 (cq, (struct cq_res_raweth_qp1 *)
3083 hw_cqe, &cqe, &budget);
3084 break;
3085 case CQ_BASE_CQE_TYPE_TERMINAL:
3086 rc = bnxt_qplib_cq_process_terminal
3087 (cq, (struct cq_terminal *)hw_cqe,
3088 &cqe, &budget);
3089 break;
3090 case CQ_BASE_CQE_TYPE_CUT_OFF:
3091 bnxt_qplib_cq_process_cutoff
3092 (cq, (struct cq_cutoff *)hw_cqe);
3093 /* Done processing this CQ */
3094 goto exit;
3095 default:
3096 dev_err(&cq->hwq.pdev->dev,
3097 "process_cq unknown type 0x%lx\n",
3098 hw_cqe->cqe_type_toggle &
3099 CQ_BASE_CQE_TYPE_MASK);
3100 rc = -EINVAL;
3101 break;
3102 }
3103 if (rc < 0) {
3104 if (rc == -EAGAIN)
3105 break;
3106 /* Error while processing the CQE, just skip to the
3107 * next one
3108 */
3109 if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3110 dev_err(&cq->hwq.pdev->dev,
3111 "process_cqe error rc = 0x%x\n", rc);
3112 }
3113 hw_polled++;
3114 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
3115 1, &cq->dbinfo.flags);
3116
3117 }
3118 if (hw_polled)
3119 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3120 exit:
3121 return num_cqes - budget;
3122 }
3123
bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq * cq,u32 arm_type)3124 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3125 {
3126 cq->dbinfo.toggle = cq->toggle;
3127 if (arm_type)
3128 bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3129 /* Using cq->arm_state variable to track whether to issue cq handler */
3130 atomic_set(&cq->arm_state, 1);
3131 }
3132
bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp * qp)3133 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3134 {
3135 flush_workqueue(qp->scq->nq->cqn_wq);
3136 if (qp->scq != qp->rcq)
3137 flush_workqueue(qp->rcq->nq->cqn_wq);
3138 }
3139