1 /*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: Fast Path Operators
37 */
38
39 #define dev_fmt(fmt) "QPLIB: " fmt
40
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
50
51 #include "roce_hsi.h"
52
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
55 #include "qplib_sp.h"
56 #include "qplib_fp.h"
57
58 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
59
bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp * qp)60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
61 {
62 qp->sq.condition = false;
63 qp->sq.send_phantom = false;
64 qp->sq.single = false;
65 }
66
67 /* Flush list */
__bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
69 {
70 struct bnxt_qplib_cq *scq, *rcq;
71
72 scq = qp->scq;
73 rcq = qp->rcq;
74
75 if (!qp->sq.flushed) {
76 dev_dbg(&scq->hwq.pdev->dev,
77 "FP: Adding to SQ Flush list = %p\n", qp);
78 bnxt_qplib_cancel_phantom_processing(qp);
79 list_add_tail(&qp->sq_flush, &scq->sqf_head);
80 qp->sq.flushed = true;
81 }
82 if (!qp->srq) {
83 if (!qp->rq.flushed) {
84 dev_dbg(&rcq->hwq.pdev->dev,
85 "FP: Adding to RQ Flush list = %p\n", qp);
86 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87 qp->rq.flushed = true;
88 }
89 }
90 }
91
bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)92 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
93 unsigned long *flags)
94 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
95 {
96 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97 if (qp->scq == qp->rcq)
98 __acquire(&qp->rcq->flush_lock);
99 else
100 spin_lock(&qp->rcq->flush_lock);
101 }
102
bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104 unsigned long *flags)
105 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
106 {
107 if (qp->scq == qp->rcq)
108 __release(&qp->rcq->flush_lock);
109 else
110 spin_unlock(&qp->rcq->flush_lock);
111 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
112 }
113
bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
115 {
116 unsigned long flags;
117
118 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119 __bnxt_qplib_add_flush_qp(qp);
120 bnxt_qplib_release_cq_flush_locks(qp, &flags);
121 }
122
__bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp * qp)123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
124 {
125 if (qp->sq.flushed) {
126 qp->sq.flushed = false;
127 list_del(&qp->sq_flush);
128 }
129 if (!qp->srq) {
130 if (qp->rq.flushed) {
131 qp->rq.flushed = false;
132 list_del(&qp->rq_flush);
133 }
134 }
135 }
136
bnxt_qplib_clean_qp(struct bnxt_qplib_qp * qp)137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
138 {
139 unsigned long flags;
140
141 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142 __clean_cq(qp->scq, (u64)(unsigned long)qp);
143 qp->sq.hwq.prod = 0;
144 qp->sq.hwq.cons = 0;
145 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
146 qp->rq.hwq.prod = 0;
147 qp->rq.hwq.cons = 0;
148
149 __bnxt_qplib_del_flush_qp(qp);
150 bnxt_qplib_release_cq_flush_locks(qp, &flags);
151 }
152
bnxt_qpn_cqn_sched_task(struct work_struct * work)153 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
154 {
155 struct bnxt_qplib_nq_work *nq_work =
156 container_of(work, struct bnxt_qplib_nq_work, work);
157
158 struct bnxt_qplib_cq *cq = nq_work->cq;
159 struct bnxt_qplib_nq *nq = nq_work->nq;
160
161 if (cq && nq) {
162 spin_lock_bh(&cq->compl_lock);
163 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164 dev_dbg(&nq->pdev->dev,
165 "%s:Trigger cq = %p event nq = %p\n",
166 __func__, cq, nq);
167 nq->cqn_handler(nq, cq);
168 }
169 spin_unlock_bh(&cq->compl_lock);
170 }
171 kfree(nq_work);
172 }
173
bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)174 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175 struct bnxt_qplib_qp *qp)
176 {
177 struct bnxt_qplib_q *rq = &qp->rq;
178 struct bnxt_qplib_q *sq = &qp->sq;
179
180 if (qp->rq_hdr_buf)
181 dma_free_coherent(&res->pdev->dev,
182 rq->max_wqe * qp->rq_hdr_buf_size,
183 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
184 if (qp->sq_hdr_buf)
185 dma_free_coherent(&res->pdev->dev,
186 sq->max_wqe * qp->sq_hdr_buf_size,
187 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188 qp->rq_hdr_buf = NULL;
189 qp->sq_hdr_buf = NULL;
190 qp->rq_hdr_buf_map = 0;
191 qp->sq_hdr_buf_map = 0;
192 qp->sq_hdr_buf_size = 0;
193 qp->rq_hdr_buf_size = 0;
194 }
195
bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)196 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197 struct bnxt_qplib_qp *qp)
198 {
199 struct bnxt_qplib_q *rq = &qp->rq;
200 struct bnxt_qplib_q *sq = &qp->sq;
201 int rc = 0;
202
203 if (qp->sq_hdr_buf_size && sq->max_wqe) {
204 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205 sq->max_wqe * qp->sq_hdr_buf_size,
206 &qp->sq_hdr_buf_map, GFP_KERNEL);
207 if (!qp->sq_hdr_buf) {
208 rc = -ENOMEM;
209 dev_err(&res->pdev->dev,
210 "Failed to create sq_hdr_buf\n");
211 goto fail;
212 }
213 }
214
215 if (qp->rq_hdr_buf_size && rq->max_wqe) {
216 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
217 rq->max_wqe *
218 qp->rq_hdr_buf_size,
219 &qp->rq_hdr_buf_map,
220 GFP_KERNEL);
221 if (!qp->rq_hdr_buf) {
222 rc = -ENOMEM;
223 dev_err(&res->pdev->dev,
224 "Failed to create rq_hdr_buf\n");
225 goto fail;
226 }
227 }
228 return 0;
229
230 fail:
231 bnxt_qplib_free_qp_hdr_buf(res, qp);
232 return rc;
233 }
234
clean_nq(struct bnxt_qplib_nq * nq,struct bnxt_qplib_cq * cq)235 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
236 {
237 struct bnxt_qplib_hwq *hwq = &nq->hwq;
238 struct nq_base *nqe, **nq_ptr;
239 int budget = nq->budget;
240 u32 sw_cons, raw_cons;
241 uintptr_t q_handle;
242 u16 type;
243
244 spin_lock_bh(&hwq->lock);
245 /* Service the NQ until empty */
246 raw_cons = hwq->cons;
247 while (budget--) {
248 sw_cons = HWQ_CMP(raw_cons, hwq);
249 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
250 nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
251 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
252 break;
253
254 /*
255 * The valid test of the entry must be done first before
256 * reading any further.
257 */
258 dma_rmb();
259
260 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
261 switch (type) {
262 case NQ_BASE_TYPE_CQ_NOTIFICATION:
263 {
264 struct nq_cn *nqcne = (struct nq_cn *)nqe;
265
266 q_handle = le32_to_cpu(nqcne->cq_handle_low);
267 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
268 << 32;
269 if ((unsigned long)cq == q_handle) {
270 nqcne->cq_handle_low = 0;
271 nqcne->cq_handle_high = 0;
272 cq->cnq_events++;
273 }
274 break;
275 }
276 default:
277 break;
278 }
279 raw_cons++;
280 }
281 spin_unlock_bh(&hwq->lock);
282 }
283
284 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
285 * this CQ.
286 */
__wait_for_all_nqes(struct bnxt_qplib_cq * cq,u16 cnq_events)287 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
288 {
289 u32 retry_cnt = 100;
290
291 while (retry_cnt--) {
292 if (cnq_events == cq->cnq_events)
293 return;
294 usleep_range(50, 100);
295 clean_nq(cq->nq, cq);
296 }
297 }
298
bnxt_qplib_service_nq(struct tasklet_struct * t)299 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
300 {
301 struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
302 struct bnxt_qplib_hwq *hwq = &nq->hwq;
303 struct bnxt_qplib_cq *cq;
304 int budget = nq->budget;
305 u32 sw_cons, raw_cons;
306 struct nq_base *nqe;
307 uintptr_t q_handle;
308 u16 type;
309
310 spin_lock_bh(&hwq->lock);
311 /* Service the NQ until empty */
312 raw_cons = hwq->cons;
313 while (budget--) {
314 sw_cons = HWQ_CMP(raw_cons, hwq);
315 nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
316 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
317 break;
318
319 /*
320 * The valid test of the entry must be done first before
321 * reading any further.
322 */
323 dma_rmb();
324
325 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
326 switch (type) {
327 case NQ_BASE_TYPE_CQ_NOTIFICATION:
328 {
329 struct nq_cn *nqcne = (struct nq_cn *)nqe;
330
331 q_handle = le32_to_cpu(nqcne->cq_handle_low);
332 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
333 << 32;
334 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
335 if (!cq)
336 break;
337 bnxt_qplib_armen_db(&cq->dbinfo,
338 DBC_DBC_TYPE_CQ_ARMENA);
339 spin_lock_bh(&cq->compl_lock);
340 atomic_set(&cq->arm_state, 0);
341 if (nq->cqn_handler(nq, (cq)))
342 dev_warn(&nq->pdev->dev,
343 "cqn - type 0x%x not handled\n", type);
344 cq->cnq_events++;
345 spin_unlock_bh(&cq->compl_lock);
346 break;
347 }
348 case NQ_BASE_TYPE_SRQ_EVENT:
349 {
350 struct bnxt_qplib_srq *srq;
351 struct nq_srq_event *nqsrqe =
352 (struct nq_srq_event *)nqe;
353
354 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
355 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
356 << 32;
357 srq = (struct bnxt_qplib_srq *)q_handle;
358 bnxt_qplib_armen_db(&srq->dbinfo,
359 DBC_DBC_TYPE_SRQ_ARMENA);
360 if (nq->srqn_handler(nq,
361 (struct bnxt_qplib_srq *)q_handle,
362 nqsrqe->event))
363 dev_warn(&nq->pdev->dev,
364 "SRQ event 0x%x not handled\n",
365 nqsrqe->event);
366 break;
367 }
368 case NQ_BASE_TYPE_DBQ_EVENT:
369 break;
370 default:
371 dev_warn(&nq->pdev->dev,
372 "nqe with type = 0x%x not handled\n", type);
373 break;
374 }
375 raw_cons++;
376 }
377 if (hwq->cons != raw_cons) {
378 hwq->cons = raw_cons;
379 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
380 }
381 spin_unlock_bh(&hwq->lock);
382 }
383
384 /* bnxt_re_synchronize_nq - self polling notification queue.
385 * @nq - notification queue pointer
386 *
387 * This function will start polling entries of a given notification queue
388 * for all pending entries.
389 * This function is useful to synchronize notification entries while resources
390 * are going away.
391 */
392
bnxt_re_synchronize_nq(struct bnxt_qplib_nq * nq)393 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
394 {
395 int budget = nq->budget;
396
397 nq->budget = nq->hwq.max_elements;
398 bnxt_qplib_service_nq(&nq->nq_tasklet);
399 nq->budget = budget;
400 }
401
bnxt_qplib_nq_irq(int irq,void * dev_instance)402 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
403 {
404 struct bnxt_qplib_nq *nq = dev_instance;
405 struct bnxt_qplib_hwq *hwq = &nq->hwq;
406 u32 sw_cons;
407
408 /* Prefetch the NQ element */
409 sw_cons = HWQ_CMP(hwq->cons, hwq);
410 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
411
412 /* Fan out to CPU affinitized kthreads? */
413 tasklet_schedule(&nq->nq_tasklet);
414
415 return IRQ_HANDLED;
416 }
417
bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq * nq,bool kill)418 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
419 {
420 if (!nq->requested)
421 return;
422
423 nq->requested = false;
424 /* Mask h/w interrupt */
425 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
426 /* Sync with last running IRQ handler */
427 synchronize_irq(nq->msix_vec);
428 irq_set_affinity_hint(nq->msix_vec, NULL);
429 free_irq(nq->msix_vec, nq);
430 kfree(nq->name);
431 nq->name = NULL;
432
433 if (kill)
434 tasklet_kill(&nq->nq_tasklet);
435 tasklet_disable(&nq->nq_tasklet);
436 }
437
bnxt_qplib_disable_nq(struct bnxt_qplib_nq * nq)438 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
439 {
440 if (nq->cqn_wq) {
441 destroy_workqueue(nq->cqn_wq);
442 nq->cqn_wq = NULL;
443 }
444
445 /* Make sure the HW is stopped! */
446 bnxt_qplib_nq_stop_irq(nq, true);
447
448 if (nq->nq_db.reg.bar_reg) {
449 iounmap(nq->nq_db.reg.bar_reg);
450 nq->nq_db.reg.bar_reg = NULL;
451 }
452
453 nq->cqn_handler = NULL;
454 nq->srqn_handler = NULL;
455 nq->msix_vec = 0;
456 }
457
bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq * nq,int nq_indx,int msix_vector,bool need_init)458 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
459 int msix_vector, bool need_init)
460 {
461 struct bnxt_qplib_res *res = nq->res;
462 int rc;
463
464 if (nq->requested)
465 return -EFAULT;
466
467 nq->msix_vec = msix_vector;
468 if (need_init)
469 tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
470 else
471 tasklet_enable(&nq->nq_tasklet);
472
473 nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
474 nq_indx, pci_name(res->pdev));
475 if (!nq->name)
476 return -ENOMEM;
477 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
478 if (rc) {
479 kfree(nq->name);
480 nq->name = NULL;
481 tasklet_disable(&nq->nq_tasklet);
482 return rc;
483 }
484
485 cpumask_clear(&nq->mask);
486 cpumask_set_cpu(nq_indx, &nq->mask);
487 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
488 if (rc) {
489 dev_warn(&nq->pdev->dev,
490 "set affinity failed; vector: %d nq_idx: %d\n",
491 nq->msix_vec, nq_indx);
492 }
493 nq->requested = true;
494 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
495
496 return rc;
497 }
498
bnxt_qplib_map_nq_db(struct bnxt_qplib_nq * nq,u32 reg_offt)499 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
500 {
501 resource_size_t reg_base;
502 struct bnxt_qplib_nq_db *nq_db;
503 struct pci_dev *pdev;
504
505 pdev = nq->pdev;
506 nq_db = &nq->nq_db;
507
508 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
509 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
510 if (!nq_db->reg.bar_base) {
511 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
512 nq_db->reg.bar_id);
513 return -ENOMEM;
514 }
515
516 reg_base = nq_db->reg.bar_base + reg_offt;
517 /* Unconditionally map 8 bytes to support 57500 series */
518 nq_db->reg.len = 8;
519 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
520 if (!nq_db->reg.bar_reg) {
521 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
522 nq_db->reg.bar_id);
523 return -ENOMEM;
524 }
525
526 nq_db->dbinfo.db = nq_db->reg.bar_reg;
527 nq_db->dbinfo.hwq = &nq->hwq;
528 nq_db->dbinfo.xid = nq->ring_id;
529
530 return 0;
531 }
532
bnxt_qplib_enable_nq(struct pci_dev * pdev,struct bnxt_qplib_nq * nq,int nq_idx,int msix_vector,int bar_reg_offset,cqn_handler_t cqn_handler,srqn_handler_t srqn_handler)533 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
534 int nq_idx, int msix_vector, int bar_reg_offset,
535 cqn_handler_t cqn_handler,
536 srqn_handler_t srqn_handler)
537 {
538 int rc;
539
540 nq->pdev = pdev;
541 nq->cqn_handler = cqn_handler;
542 nq->srqn_handler = srqn_handler;
543
544 /* Have a task to schedule CQ notifiers in post send case */
545 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
546 if (!nq->cqn_wq)
547 return -ENOMEM;
548
549 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
550 if (rc)
551 goto fail;
552
553 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
554 if (rc) {
555 dev_err(&nq->pdev->dev,
556 "Failed to request irq for nq-idx %d\n", nq_idx);
557 goto fail;
558 }
559
560 return 0;
561 fail:
562 bnxt_qplib_disable_nq(nq);
563 return rc;
564 }
565
bnxt_qplib_free_nq(struct bnxt_qplib_nq * nq)566 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
567 {
568 if (nq->hwq.max_elements) {
569 bnxt_qplib_free_hwq(nq->res, &nq->hwq);
570 nq->hwq.max_elements = 0;
571 }
572 }
573
bnxt_qplib_alloc_nq(struct bnxt_qplib_res * res,struct bnxt_qplib_nq * nq)574 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
575 {
576 struct bnxt_qplib_hwq_attr hwq_attr = {};
577 struct bnxt_qplib_sg_info sginfo = {};
578
579 nq->pdev = res->pdev;
580 nq->res = res;
581 if (!nq->hwq.max_elements ||
582 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
583 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
584
585 sginfo.pgsize = PAGE_SIZE;
586 sginfo.pgshft = PAGE_SHIFT;
587 hwq_attr.res = res;
588 hwq_attr.sginfo = &sginfo;
589 hwq_attr.depth = nq->hwq.max_elements;
590 hwq_attr.stride = sizeof(struct nq_base);
591 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
592 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
593 dev_err(&nq->pdev->dev, "FP NQ allocation failed");
594 return -ENOMEM;
595 }
596 nq->budget = 8;
597 return 0;
598 }
599
600 /* SRQ */
bnxt_qplib_destroy_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)601 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
602 struct bnxt_qplib_srq *srq)
603 {
604 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
605 struct creq_destroy_srq_resp resp = {};
606 struct bnxt_qplib_cmdqmsg msg = {};
607 struct cmdq_destroy_srq req = {};
608 int rc;
609
610 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
611 CMDQ_BASE_OPCODE_DESTROY_SRQ,
612 sizeof(req));
613
614 /* Configure the request */
615 req.srq_cid = cpu_to_le32(srq->id);
616
617 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
618 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
619 kfree(srq->swq);
620 if (rc)
621 return;
622 bnxt_qplib_free_hwq(res, &srq->hwq);
623 }
624
bnxt_qplib_create_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)625 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
626 struct bnxt_qplib_srq *srq)
627 {
628 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
629 struct bnxt_qplib_hwq_attr hwq_attr = {};
630 struct creq_create_srq_resp resp = {};
631 struct bnxt_qplib_cmdqmsg msg = {};
632 struct cmdq_create_srq req = {};
633 struct bnxt_qplib_pbl *pbl;
634 u16 pg_sz_lvl;
635 int rc, idx;
636
637 hwq_attr.res = res;
638 hwq_attr.sginfo = &srq->sg_info;
639 hwq_attr.depth = srq->max_wqe;
640 hwq_attr.stride = srq->wqe_size;
641 hwq_attr.type = HWQ_TYPE_QUEUE;
642 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
643 if (rc)
644 return rc;
645
646 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
647 GFP_KERNEL);
648 if (!srq->swq) {
649 rc = -ENOMEM;
650 goto fail;
651 }
652
653 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
654 CMDQ_BASE_OPCODE_CREATE_SRQ,
655 sizeof(req));
656
657 /* Configure the request */
658 req.dpi = cpu_to_le32(srq->dpi->dpi);
659 req.srq_handle = cpu_to_le64((uintptr_t)srq);
660
661 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
662 pbl = &srq->hwq.pbl[PBL_LVL_0];
663 pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
664 CMDQ_CREATE_SRQ_PG_SIZE_SFT);
665 pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
666 CMDQ_CREATE_SRQ_LVL_SFT;
667 req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
668 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
669 req.pd_id = cpu_to_le32(srq->pd->id);
670 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
671
672 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
673 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
674 if (rc)
675 goto fail;
676
677 spin_lock_init(&srq->lock);
678 srq->start_idx = 0;
679 srq->last_idx = srq->hwq.max_elements - 1;
680 for (idx = 0; idx < srq->hwq.max_elements; idx++)
681 srq->swq[idx].next_idx = idx + 1;
682 srq->swq[srq->last_idx].next_idx = -1;
683
684 srq->id = le32_to_cpu(resp.xid);
685 srq->dbinfo.hwq = &srq->hwq;
686 srq->dbinfo.xid = srq->id;
687 srq->dbinfo.db = srq->dpi->dbr;
688 srq->dbinfo.max_slot = 1;
689 srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
690 if (srq->threshold)
691 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
692 srq->arm_req = false;
693
694 return 0;
695 fail:
696 bnxt_qplib_free_hwq(res, &srq->hwq);
697 kfree(srq->swq);
698
699 return rc;
700 }
701
bnxt_qplib_modify_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)702 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
703 struct bnxt_qplib_srq *srq)
704 {
705 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
706 u32 sw_prod, sw_cons, count = 0;
707
708 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
709 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
710
711 count = sw_prod > sw_cons ? sw_prod - sw_cons :
712 srq_hwq->max_elements - sw_cons + sw_prod;
713 if (count > srq->threshold) {
714 srq->arm_req = false;
715 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
716 } else {
717 /* Deferred arming */
718 srq->arm_req = true;
719 }
720
721 return 0;
722 }
723
bnxt_qplib_query_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)724 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
725 struct bnxt_qplib_srq *srq)
726 {
727 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
728 struct creq_query_srq_resp resp = {};
729 struct bnxt_qplib_cmdqmsg msg = {};
730 struct bnxt_qplib_rcfw_sbuf sbuf;
731 struct creq_query_srq_resp_sb *sb;
732 struct cmdq_query_srq req = {};
733 int rc;
734
735 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
736 CMDQ_BASE_OPCODE_QUERY_SRQ,
737 sizeof(req));
738
739 /* Configure the request */
740 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
741 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
742 &sbuf.dma_addr, GFP_KERNEL);
743 if (!sbuf.sb)
744 return -ENOMEM;
745 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
746 req.srq_cid = cpu_to_le32(srq->id);
747 sb = sbuf.sb;
748 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
749 sizeof(resp), 0);
750 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
751 if (!rc)
752 srq->threshold = le16_to_cpu(sb->srq_limit);
753 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
754 sbuf.sb, sbuf.dma_addr);
755
756 return rc;
757 }
758
bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq * srq,struct bnxt_qplib_swqe * wqe)759 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
760 struct bnxt_qplib_swqe *wqe)
761 {
762 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
763 struct rq_wqe *srqe;
764 struct sq_sge *hw_sge;
765 u32 sw_prod, sw_cons, count = 0;
766 int i, next;
767
768 spin_lock(&srq_hwq->lock);
769 if (srq->start_idx == srq->last_idx) {
770 dev_err(&srq_hwq->pdev->dev,
771 "FP: SRQ (0x%x) is full!\n", srq->id);
772 spin_unlock(&srq_hwq->lock);
773 return -EINVAL;
774 }
775 next = srq->start_idx;
776 srq->start_idx = srq->swq[next].next_idx;
777 spin_unlock(&srq_hwq->lock);
778
779 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
780 srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
781 memset(srqe, 0, srq->wqe_size);
782 /* Calculate wqe_size16 and data_len */
783 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
784 i < wqe->num_sge; i++, hw_sge++) {
785 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
786 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
787 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
788 }
789 srqe->wqe_type = wqe->type;
790 srqe->flags = wqe->flags;
791 srqe->wqe_size = wqe->num_sge +
792 ((offsetof(typeof(*srqe), data) + 15) >> 4);
793 srqe->wr_id[0] = cpu_to_le32((u32)next);
794 srq->swq[next].wr_id = wqe->wr_id;
795
796 srq_hwq->prod++;
797
798 spin_lock(&srq_hwq->lock);
799 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
800 /* retaining srq_hwq->cons for this logic
801 * actually the lock is only required to
802 * read srq_hwq->cons.
803 */
804 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
805 count = sw_prod > sw_cons ? sw_prod - sw_cons :
806 srq_hwq->max_elements - sw_cons + sw_prod;
807 spin_unlock(&srq_hwq->lock);
808 /* Ring DB */
809 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
810 if (srq->arm_req == true && count > srq->threshold) {
811 srq->arm_req = false;
812 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
813 }
814
815 return 0;
816 }
817
818 /* QP */
819
bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q * que)820 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
821 {
822 int indx;
823
824 que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
825 if (!que->swq)
826 return -ENOMEM;
827
828 que->swq_start = 0;
829 que->swq_last = que->max_wqe - 1;
830 for (indx = 0; indx < que->max_wqe; indx++)
831 que->swq[indx].next_idx = indx + 1;
832 que->swq[que->swq_last].next_idx = 0; /* Make it circular */
833 que->swq_last = 0;
834
835 return 0;
836 }
837
bnxt_qplib_create_qp1(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)838 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
839 {
840 struct bnxt_qplib_hwq_attr hwq_attr = {};
841 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
842 struct creq_create_qp1_resp resp = {};
843 struct bnxt_qplib_cmdqmsg msg = {};
844 struct bnxt_qplib_q *sq = &qp->sq;
845 struct bnxt_qplib_q *rq = &qp->rq;
846 struct cmdq_create_qp1 req = {};
847 struct bnxt_qplib_pbl *pbl;
848 u32 qp_flags = 0;
849 u8 pg_sz_lvl;
850 u32 tbl_indx;
851 int rc;
852
853 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
854 CMDQ_BASE_OPCODE_CREATE_QP1,
855 sizeof(req));
856 /* General */
857 req.type = qp->type;
858 req.dpi = cpu_to_le32(qp->dpi->dpi);
859 req.qp_handle = cpu_to_le64(qp->qp_handle);
860
861 /* SQ */
862 hwq_attr.res = res;
863 hwq_attr.sginfo = &sq->sg_info;
864 hwq_attr.stride = sizeof(struct sq_sge);
865 hwq_attr.depth = bnxt_qplib_get_depth(sq);
866 hwq_attr.type = HWQ_TYPE_QUEUE;
867 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
868 if (rc)
869 return rc;
870
871 rc = bnxt_qplib_alloc_init_swq(sq);
872 if (rc)
873 goto fail_sq;
874
875 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
876 pbl = &sq->hwq.pbl[PBL_LVL_0];
877 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
878 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
879 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
880 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
881 req.sq_pg_size_sq_lvl = pg_sz_lvl;
882 req.sq_fwo_sq_sge =
883 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
884 CMDQ_CREATE_QP1_SQ_SGE_SFT);
885 req.scq_cid = cpu_to_le32(qp->scq->id);
886
887 /* RQ */
888 if (rq->max_wqe) {
889 hwq_attr.res = res;
890 hwq_attr.sginfo = &rq->sg_info;
891 hwq_attr.stride = sizeof(struct sq_sge);
892 hwq_attr.depth = bnxt_qplib_get_depth(rq);
893 hwq_attr.type = HWQ_TYPE_QUEUE;
894 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
895 if (rc)
896 goto sq_swq;
897 rc = bnxt_qplib_alloc_init_swq(rq);
898 if (rc)
899 goto fail_rq;
900 req.rq_size = cpu_to_le32(rq->max_wqe);
901 pbl = &rq->hwq.pbl[PBL_LVL_0];
902 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
903 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
904 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
905 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
906 req.rq_pg_size_rq_lvl = pg_sz_lvl;
907 req.rq_fwo_rq_sge =
908 cpu_to_le16((rq->max_sge &
909 CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
910 CMDQ_CREATE_QP1_RQ_SGE_SFT);
911 }
912 req.rcq_cid = cpu_to_le32(qp->rcq->id);
913 /* Header buffer - allow hdr_buf pass in */
914 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
915 if (rc) {
916 rc = -ENOMEM;
917 goto rq_rwq;
918 }
919 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
920 req.qp_flags = cpu_to_le32(qp_flags);
921 req.pd_id = cpu_to_le32(qp->pd->id);
922
923 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
924 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
925 if (rc)
926 goto fail;
927
928 qp->id = le32_to_cpu(resp.xid);
929 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
930 qp->cctx = res->cctx;
931 sq->dbinfo.hwq = &sq->hwq;
932 sq->dbinfo.xid = qp->id;
933 sq->dbinfo.db = qp->dpi->dbr;
934 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
935 if (rq->max_wqe) {
936 rq->dbinfo.hwq = &rq->hwq;
937 rq->dbinfo.xid = qp->id;
938 rq->dbinfo.db = qp->dpi->dbr;
939 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
940 }
941 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
942 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
943 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
944
945 return 0;
946
947 fail:
948 bnxt_qplib_free_qp_hdr_buf(res, qp);
949 rq_rwq:
950 kfree(rq->swq);
951 fail_rq:
952 bnxt_qplib_free_hwq(res, &rq->hwq);
953 sq_swq:
954 kfree(sq->swq);
955 fail_sq:
956 bnxt_qplib_free_hwq(res, &sq->hwq);
957 return rc;
958 }
959
bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp * qp,int size)960 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
961 {
962 struct bnxt_qplib_hwq *hwq;
963 struct bnxt_qplib_q *sq;
964 u64 fpsne, psn_pg;
965 u16 indx_pad = 0;
966
967 sq = &qp->sq;
968 hwq = &sq->hwq;
969 /* First psn entry */
970 fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
971 if (!IS_ALIGNED(fpsne, PAGE_SIZE))
972 indx_pad = (fpsne & ~PAGE_MASK) / size;
973 hwq->pad_pgofft = indx_pad;
974 hwq->pad_pg = (u64 *)psn_pg;
975 hwq->pad_stride = size;
976 }
977
bnxt_qplib_create_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)978 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
979 {
980 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
981 struct bnxt_qplib_hwq_attr hwq_attr = {};
982 struct bnxt_qplib_sg_info sginfo = {};
983 struct creq_create_qp_resp resp = {};
984 struct bnxt_qplib_cmdqmsg msg = {};
985 struct bnxt_qplib_q *sq = &qp->sq;
986 struct bnxt_qplib_q *rq = &qp->rq;
987 struct cmdq_create_qp req = {};
988 int rc, req_size, psn_sz = 0;
989 struct bnxt_qplib_hwq *xrrq;
990 struct bnxt_qplib_pbl *pbl;
991 u32 qp_flags = 0;
992 u8 pg_sz_lvl;
993 u32 tbl_indx;
994 u16 nsge;
995
996 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
997 CMDQ_BASE_OPCODE_CREATE_QP,
998 sizeof(req));
999
1000 /* General */
1001 req.type = qp->type;
1002 req.dpi = cpu_to_le32(qp->dpi->dpi);
1003 req.qp_handle = cpu_to_le64(qp->qp_handle);
1004
1005 /* SQ */
1006 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1007 psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
1008 sizeof(struct sq_psn_search_ext) :
1009 sizeof(struct sq_psn_search);
1010 }
1011
1012 hwq_attr.res = res;
1013 hwq_attr.sginfo = &sq->sg_info;
1014 hwq_attr.stride = sizeof(struct sq_sge);
1015 hwq_attr.depth = bnxt_qplib_get_depth(sq);
1016 hwq_attr.aux_stride = psn_sz;
1017 hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
1018 hwq_attr.type = HWQ_TYPE_QUEUE;
1019 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1020 if (rc)
1021 return rc;
1022
1023 rc = bnxt_qplib_alloc_init_swq(sq);
1024 if (rc)
1025 goto fail_sq;
1026
1027 if (psn_sz)
1028 bnxt_qplib_init_psn_ptr(qp, psn_sz);
1029
1030 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1031 pbl = &sq->hwq.pbl[PBL_LVL_0];
1032 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1033 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1034 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1035 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1036 req.sq_pg_size_sq_lvl = pg_sz_lvl;
1037 req.sq_fwo_sq_sge =
1038 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1039 CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1040 req.scq_cid = cpu_to_le32(qp->scq->id);
1041
1042 /* RQ */
1043 if (!qp->srq) {
1044 hwq_attr.res = res;
1045 hwq_attr.sginfo = &rq->sg_info;
1046 hwq_attr.stride = sizeof(struct sq_sge);
1047 hwq_attr.depth = bnxt_qplib_get_depth(rq);
1048 hwq_attr.aux_stride = 0;
1049 hwq_attr.aux_depth = 0;
1050 hwq_attr.type = HWQ_TYPE_QUEUE;
1051 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1052 if (rc)
1053 goto sq_swq;
1054 rc = bnxt_qplib_alloc_init_swq(rq);
1055 if (rc)
1056 goto fail_rq;
1057
1058 req.rq_size = cpu_to_le32(rq->max_wqe);
1059 pbl = &rq->hwq.pbl[PBL_LVL_0];
1060 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1061 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1062 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1063 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1064 req.rq_pg_size_rq_lvl = pg_sz_lvl;
1065 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1066 6 : rq->max_sge;
1067 req.rq_fwo_rq_sge =
1068 cpu_to_le16(((nsge &
1069 CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1070 CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1071 } else {
1072 /* SRQ */
1073 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1074 req.srq_cid = cpu_to_le32(qp->srq->id);
1075 }
1076 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1077
1078 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1079 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1080 if (qp->sig_type)
1081 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1082 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1083 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1084 if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1085 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1086
1087 req.qp_flags = cpu_to_le32(qp_flags);
1088
1089 /* ORRQ and IRRQ */
1090 if (psn_sz) {
1091 xrrq = &qp->orrq;
1092 xrrq->max_elements =
1093 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1094 req_size = xrrq->max_elements *
1095 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1096 req_size &= ~(PAGE_SIZE - 1);
1097 sginfo.pgsize = req_size;
1098 sginfo.pgshft = PAGE_SHIFT;
1099
1100 hwq_attr.res = res;
1101 hwq_attr.sginfo = &sginfo;
1102 hwq_attr.depth = xrrq->max_elements;
1103 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1104 hwq_attr.aux_stride = 0;
1105 hwq_attr.aux_depth = 0;
1106 hwq_attr.type = HWQ_TYPE_CTX;
1107 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1108 if (rc)
1109 goto rq_swq;
1110 pbl = &xrrq->pbl[PBL_LVL_0];
1111 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1112
1113 xrrq = &qp->irrq;
1114 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1115 qp->max_dest_rd_atomic);
1116 req_size = xrrq->max_elements *
1117 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1118 req_size &= ~(PAGE_SIZE - 1);
1119 sginfo.pgsize = req_size;
1120 hwq_attr.depth = xrrq->max_elements;
1121 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1122 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1123 if (rc)
1124 goto fail_orrq;
1125
1126 pbl = &xrrq->pbl[PBL_LVL_0];
1127 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1128 }
1129 req.pd_id = cpu_to_le32(qp->pd->id);
1130
1131 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1132 sizeof(resp), 0);
1133 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1134 if (rc)
1135 goto fail;
1136
1137 qp->id = le32_to_cpu(resp.xid);
1138 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1139 INIT_LIST_HEAD(&qp->sq_flush);
1140 INIT_LIST_HEAD(&qp->rq_flush);
1141 qp->cctx = res->cctx;
1142 sq->dbinfo.hwq = &sq->hwq;
1143 sq->dbinfo.xid = qp->id;
1144 sq->dbinfo.db = qp->dpi->dbr;
1145 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1146 if (rq->max_wqe) {
1147 rq->dbinfo.hwq = &rq->hwq;
1148 rq->dbinfo.xid = qp->id;
1149 rq->dbinfo.db = qp->dpi->dbr;
1150 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1151 }
1152 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1153 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1154 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1155
1156 return 0;
1157 fail:
1158 bnxt_qplib_free_hwq(res, &qp->irrq);
1159 fail_orrq:
1160 bnxt_qplib_free_hwq(res, &qp->orrq);
1161 rq_swq:
1162 kfree(rq->swq);
1163 fail_rq:
1164 bnxt_qplib_free_hwq(res, &rq->hwq);
1165 sq_swq:
1166 kfree(sq->swq);
1167 fail_sq:
1168 bnxt_qplib_free_hwq(res, &sq->hwq);
1169 return rc;
1170 }
1171
__modify_flags_from_init_state(struct bnxt_qplib_qp * qp)1172 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1173 {
1174 switch (qp->state) {
1175 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1176 /* INIT->RTR, configure the path_mtu to the default
1177 * 2048 if not being requested
1178 */
1179 if (!(qp->modify_flags &
1180 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1181 qp->modify_flags |=
1182 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1183 qp->path_mtu =
1184 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1185 }
1186 qp->modify_flags &=
1187 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1188 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1189 if (qp->max_dest_rd_atomic < 1)
1190 qp->max_dest_rd_atomic = 1;
1191 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1192 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1193 if (!(qp->modify_flags &
1194 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1195 qp->modify_flags |=
1196 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1197 qp->ah.sgid_index = 0;
1198 }
1199 break;
1200 default:
1201 break;
1202 }
1203 }
1204
__modify_flags_from_rtr_state(struct bnxt_qplib_qp * qp)1205 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1206 {
1207 switch (qp->state) {
1208 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1209 /* Bono FW requires the max_rd_atomic to be >= 1 */
1210 if (qp->max_rd_atomic < 1)
1211 qp->max_rd_atomic = 1;
1212 /* Bono FW does not allow PKEY_INDEX,
1213 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1214 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1215 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1216 * modification
1217 */
1218 qp->modify_flags &=
1219 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1220 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1221 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1222 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1223 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1224 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1225 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1226 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1227 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1228 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1229 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1230 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1231 break;
1232 default:
1233 break;
1234 }
1235 }
1236
__filter_modify_flags(struct bnxt_qplib_qp * qp)1237 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1238 {
1239 switch (qp->cur_qp_state) {
1240 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1241 break;
1242 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1243 __modify_flags_from_init_state(qp);
1244 break;
1245 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1246 __modify_flags_from_rtr_state(qp);
1247 break;
1248 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1249 break;
1250 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1251 break;
1252 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1253 break;
1254 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1255 break;
1256 default:
1257 break;
1258 }
1259 }
1260
bnxt_qplib_modify_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1261 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1262 {
1263 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1264 struct creq_modify_qp_resp resp = {};
1265 struct bnxt_qplib_cmdqmsg msg = {};
1266 struct cmdq_modify_qp req = {};
1267 u32 temp32[4];
1268 u32 bmask;
1269 int rc;
1270
1271 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1272 CMDQ_BASE_OPCODE_MODIFY_QP,
1273 sizeof(req));
1274
1275 /* Filter out the qp_attr_mask based on the state->new transition */
1276 __filter_modify_flags(qp);
1277 bmask = qp->modify_flags;
1278 req.modify_mask = cpu_to_le32(qp->modify_flags);
1279 req.qp_cid = cpu_to_le32(qp->id);
1280 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1281 req.network_type_en_sqd_async_notify_new_state =
1282 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1283 (qp->en_sqd_async_notify ?
1284 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1285 }
1286 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1287
1288 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1289 req.access = qp->access;
1290
1291 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1292 req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1293
1294 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1295 req.qkey = cpu_to_le32(qp->qkey);
1296
1297 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1298 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1299 req.dgid[0] = cpu_to_le32(temp32[0]);
1300 req.dgid[1] = cpu_to_le32(temp32[1]);
1301 req.dgid[2] = cpu_to_le32(temp32[2]);
1302 req.dgid[3] = cpu_to_le32(temp32[3]);
1303 }
1304 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1305 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1306
1307 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1308 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1309 [qp->ah.sgid_index]);
1310
1311 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1312 req.hop_limit = qp->ah.hop_limit;
1313
1314 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1315 req.traffic_class = qp->ah.traffic_class;
1316
1317 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1318 memcpy(req.dest_mac, qp->ah.dmac, 6);
1319
1320 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1321 req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1322
1323 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1324 req.timeout = qp->timeout;
1325
1326 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1327 req.retry_cnt = qp->retry_cnt;
1328
1329 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1330 req.rnr_retry = qp->rnr_retry;
1331
1332 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1333 req.min_rnr_timer = qp->min_rnr_timer;
1334
1335 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1336 req.rq_psn = cpu_to_le32(qp->rq.psn);
1337
1338 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1339 req.sq_psn = cpu_to_le32(qp->sq.psn);
1340
1341 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1342 req.max_rd_atomic =
1343 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1344
1345 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1346 req.max_dest_rd_atomic =
1347 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1348
1349 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1350 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1351 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1352 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1353 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1354 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1355 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1356
1357 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1358
1359 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
1360 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1361 if (rc)
1362 return rc;
1363 qp->cur_qp_state = qp->state;
1364 return 0;
1365 }
1366
bnxt_qplib_query_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1367 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1368 {
1369 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1370 struct creq_query_qp_resp resp = {};
1371 struct bnxt_qplib_cmdqmsg msg = {};
1372 struct bnxt_qplib_rcfw_sbuf sbuf;
1373 struct creq_query_qp_resp_sb *sb;
1374 struct cmdq_query_qp req = {};
1375 u32 temp32[4];
1376 int i, rc;
1377
1378 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1379 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1380 &sbuf.dma_addr, GFP_KERNEL);
1381 if (!sbuf.sb)
1382 return -ENOMEM;
1383 sb = sbuf.sb;
1384
1385 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1386 CMDQ_BASE_OPCODE_QUERY_QP,
1387 sizeof(req));
1388
1389 req.qp_cid = cpu_to_le32(qp->id);
1390 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1391 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1392 sizeof(resp), 0);
1393 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1394 if (rc)
1395 goto bail;
1396 /* Extract the context from the side buffer */
1397 qp->state = sb->en_sqd_async_notify_state &
1398 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1399 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1400 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
1401 qp->access = sb->access;
1402 qp->pkey_index = le16_to_cpu(sb->pkey);
1403 qp->qkey = le32_to_cpu(sb->qkey);
1404
1405 temp32[0] = le32_to_cpu(sb->dgid[0]);
1406 temp32[1] = le32_to_cpu(sb->dgid[1]);
1407 temp32[2] = le32_to_cpu(sb->dgid[2]);
1408 temp32[3] = le32_to_cpu(sb->dgid[3]);
1409 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1410
1411 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1412
1413 qp->ah.sgid_index = 0;
1414 for (i = 0; i < res->sgid_tbl.max; i++) {
1415 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1416 qp->ah.sgid_index = i;
1417 break;
1418 }
1419 }
1420 if (i == res->sgid_tbl.max)
1421 dev_warn(&res->pdev->dev, "SGID not found??\n");
1422
1423 qp->ah.hop_limit = sb->hop_limit;
1424 qp->ah.traffic_class = sb->traffic_class;
1425 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1426 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1427 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1428 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1429 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1430 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1431 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1432 qp->timeout = sb->timeout;
1433 qp->retry_cnt = sb->retry_cnt;
1434 qp->rnr_retry = sb->rnr_retry;
1435 qp->min_rnr_timer = sb->min_rnr_timer;
1436 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1437 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1438 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1439 qp->max_dest_rd_atomic =
1440 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1441 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1442 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1443 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1444 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1445 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1446 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1447 memcpy(qp->smac, sb->src_mac, 6);
1448 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1449 bail:
1450 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1451 sbuf.sb, sbuf.dma_addr);
1452 return rc;
1453 }
1454
__clean_cq(struct bnxt_qplib_cq * cq,u64 qp)1455 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1456 {
1457 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1458 struct cq_base *hw_cqe;
1459 int i;
1460
1461 for (i = 0; i < cq_hwq->max_elements; i++) {
1462 hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
1463 if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1464 continue;
1465 /*
1466 * The valid test of the entry must be done first before
1467 * reading any further.
1468 */
1469 dma_rmb();
1470 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1471 case CQ_BASE_CQE_TYPE_REQ:
1472 case CQ_BASE_CQE_TYPE_TERMINAL:
1473 {
1474 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1475
1476 if (qp == le64_to_cpu(cqe->qp_handle))
1477 cqe->qp_handle = 0;
1478 break;
1479 }
1480 case CQ_BASE_CQE_TYPE_RES_RC:
1481 case CQ_BASE_CQE_TYPE_RES_UD:
1482 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1483 {
1484 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1485
1486 if (qp == le64_to_cpu(cqe->qp_handle))
1487 cqe->qp_handle = 0;
1488 break;
1489 }
1490 default:
1491 break;
1492 }
1493 }
1494 }
1495
bnxt_qplib_destroy_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1496 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1497 struct bnxt_qplib_qp *qp)
1498 {
1499 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1500 struct creq_destroy_qp_resp resp = {};
1501 struct bnxt_qplib_cmdqmsg msg = {};
1502 struct cmdq_destroy_qp req = {};
1503 u32 tbl_indx;
1504 int rc;
1505
1506 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1507 rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1508 rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1509
1510 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1511 CMDQ_BASE_OPCODE_DESTROY_QP,
1512 sizeof(req));
1513
1514 req.qp_cid = cpu_to_le32(qp->id);
1515 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1516 sizeof(resp), 0);
1517 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1518 if (rc) {
1519 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1520 rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1521 return rc;
1522 }
1523
1524 return 0;
1525 }
1526
bnxt_qplib_free_qp_res(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1527 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1528 struct bnxt_qplib_qp *qp)
1529 {
1530 bnxt_qplib_free_qp_hdr_buf(res, qp);
1531 bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1532 kfree(qp->sq.swq);
1533
1534 bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1535 kfree(qp->rq.swq);
1536
1537 if (qp->irrq.max_elements)
1538 bnxt_qplib_free_hwq(res, &qp->irrq);
1539 if (qp->orrq.max_elements)
1540 bnxt_qplib_free_hwq(res, &qp->orrq);
1541
1542 }
1543
bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1544 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1545 struct bnxt_qplib_sge *sge)
1546 {
1547 struct bnxt_qplib_q *sq = &qp->sq;
1548 u32 sw_prod;
1549
1550 memset(sge, 0, sizeof(*sge));
1551
1552 if (qp->sq_hdr_buf) {
1553 sw_prod = sq->swq_start;
1554 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1555 sw_prod * qp->sq_hdr_buf_size);
1556 sge->lkey = 0xFFFFFFFF;
1557 sge->size = qp->sq_hdr_buf_size;
1558 return qp->sq_hdr_buf + sw_prod * sge->size;
1559 }
1560 return NULL;
1561 }
1562
bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp * qp)1563 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1564 {
1565 struct bnxt_qplib_q *rq = &qp->rq;
1566
1567 return rq->swq_start;
1568 }
1569
bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp * qp,u32 index)1570 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1571 {
1572 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1573 }
1574
bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1575 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1576 struct bnxt_qplib_sge *sge)
1577 {
1578 struct bnxt_qplib_q *rq = &qp->rq;
1579 u32 sw_prod;
1580
1581 memset(sge, 0, sizeof(*sge));
1582
1583 if (qp->rq_hdr_buf) {
1584 sw_prod = rq->swq_start;
1585 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1586 sw_prod * qp->rq_hdr_buf_size);
1587 sge->lkey = 0xFFFFFFFF;
1588 sge->size = qp->rq_hdr_buf_size;
1589 return qp->rq_hdr_buf + sw_prod * sge->size;
1590 }
1591 return NULL;
1592 }
1593
bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,struct bnxt_qplib_swq * swq)1594 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1595 struct bnxt_qplib_swqe *wqe,
1596 struct bnxt_qplib_swq *swq)
1597 {
1598 struct sq_psn_search_ext *psns_ext;
1599 struct sq_psn_search *psns;
1600 u32 flg_npsn;
1601 u32 op_spsn;
1602
1603 if (!swq->psn_search)
1604 return;
1605 psns = swq->psn_search;
1606 psns_ext = swq->psn_ext;
1607
1608 op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1609 SQ_PSN_SEARCH_START_PSN_MASK);
1610 op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1611 SQ_PSN_SEARCH_OPCODE_MASK);
1612 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1613 SQ_PSN_SEARCH_NEXT_PSN_MASK);
1614
1615 if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
1616 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1617 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1618 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1619 } else {
1620 psns->opcode_start_psn = cpu_to_le32(op_spsn);
1621 psns->flags_next_psn = cpu_to_le32(flg_npsn);
1622 }
1623 }
1624
bnxt_qplib_put_inline(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u16 * idx)1625 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1626 struct bnxt_qplib_swqe *wqe,
1627 u16 *idx)
1628 {
1629 struct bnxt_qplib_hwq *hwq;
1630 int len, t_len, offt;
1631 bool pull_dst = true;
1632 void *il_dst = NULL;
1633 void *il_src = NULL;
1634 int t_cplen, cplen;
1635 int indx;
1636
1637 hwq = &qp->sq.hwq;
1638 t_len = 0;
1639 for (indx = 0; indx < wqe->num_sge; indx++) {
1640 len = wqe->sg_list[indx].size;
1641 il_src = (void *)wqe->sg_list[indx].addr;
1642 t_len += len;
1643 if (t_len > qp->max_inline_data)
1644 return -ENOMEM;
1645 while (len) {
1646 if (pull_dst) {
1647 pull_dst = false;
1648 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1649 (*idx)++;
1650 t_cplen = 0;
1651 offt = 0;
1652 }
1653 cplen = min_t(int, len, sizeof(struct sq_sge));
1654 cplen = min_t(int, cplen,
1655 (sizeof(struct sq_sge) - offt));
1656 memcpy(il_dst, il_src, cplen);
1657 t_cplen += cplen;
1658 il_src += cplen;
1659 il_dst += cplen;
1660 offt += cplen;
1661 len -= cplen;
1662 if (t_cplen == sizeof(struct sq_sge))
1663 pull_dst = true;
1664 }
1665 }
1666
1667 return t_len;
1668 }
1669
bnxt_qplib_put_sges(struct bnxt_qplib_hwq * hwq,struct bnxt_qplib_sge * ssge,u16 nsge,u16 * idx)1670 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1671 struct bnxt_qplib_sge *ssge,
1672 u16 nsge, u16 *idx)
1673 {
1674 struct sq_sge *dsge;
1675 int indx, len = 0;
1676
1677 for (indx = 0; indx < nsge; indx++, (*idx)++) {
1678 dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1679 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1680 dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1681 dsge->size = cpu_to_le32(ssge[indx].size);
1682 len += ssge[indx].size;
1683 }
1684
1685 return len;
1686 }
1687
bnxt_qplib_required_slots(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u16 * wqe_sz,u16 * qdf,u8 mode)1688 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1689 struct bnxt_qplib_swqe *wqe,
1690 u16 *wqe_sz, u16 *qdf, u8 mode)
1691 {
1692 u32 ilsize, bytes;
1693 u16 nsge;
1694 u16 slot;
1695
1696 nsge = wqe->num_sge;
1697 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1698 bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1699 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1700 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1701 bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1702 bytes += sizeof(struct sq_send_hdr);
1703 }
1704
1705 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes);
1706 slot = bytes >> 4;
1707 *wqe_sz = slot;
1708 if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1709 slot = 8;
1710 return slot;
1711 }
1712
bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q * sq,struct bnxt_qplib_swq * swq)1713 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
1714 struct bnxt_qplib_swq *swq)
1715 {
1716 struct bnxt_qplib_hwq *hwq;
1717 u32 pg_num, pg_indx;
1718 void *buff;
1719 u32 tail;
1720
1721 hwq = &sq->hwq;
1722 if (!hwq->pad_pg)
1723 return;
1724 tail = swq->slot_idx / sq->dbinfo.max_slot;
1725 pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1726 pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1727 buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1728 swq->psn_ext = buff;
1729 swq->psn_search = buff;
1730 }
1731
bnxt_qplib_post_send_db(struct bnxt_qplib_qp * qp)1732 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1733 {
1734 struct bnxt_qplib_q *sq = &qp->sq;
1735
1736 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1737 }
1738
bnxt_qplib_post_send(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)1739 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1740 struct bnxt_qplib_swqe *wqe)
1741 {
1742 struct bnxt_qplib_nq_work *nq_work = NULL;
1743 int i, rc = 0, data_len = 0, pkt_num = 0;
1744 struct bnxt_qplib_q *sq = &qp->sq;
1745 struct bnxt_qplib_hwq *hwq;
1746 struct bnxt_qplib_swq *swq;
1747 bool sch_handler = false;
1748 u16 wqe_sz, qdf = 0;
1749 void *base_hdr;
1750 void *ext_hdr;
1751 __le32 temp32;
1752 u32 wqe_idx;
1753 u32 slots;
1754 u16 idx;
1755
1756 hwq = &sq->hwq;
1757 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1758 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1759 dev_err(&hwq->pdev->dev,
1760 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1761 qp->id, qp->state);
1762 rc = -EINVAL;
1763 goto done;
1764 }
1765
1766 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1767 if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1768 dev_err(&hwq->pdev->dev,
1769 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1770 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1771 rc = -ENOMEM;
1772 goto done;
1773 }
1774
1775 swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1776 bnxt_qplib_pull_psn_buff(sq, swq);
1777
1778 idx = 0;
1779 swq->slot_idx = hwq->prod;
1780 swq->slots = slots;
1781 swq->wr_id = wqe->wr_id;
1782 swq->type = wqe->type;
1783 swq->flags = wqe->flags;
1784 swq->start_psn = sq->psn & BTH_PSN_MASK;
1785 if (qp->sig_type)
1786 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1787
1788 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1789 sch_handler = true;
1790 dev_dbg(&hwq->pdev->dev,
1791 "%s Error QP. Scheduling for poll_cq\n", __func__);
1792 goto queue_err;
1793 }
1794
1795 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1796 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1797 memset(base_hdr, 0, sizeof(struct sq_sge));
1798 memset(ext_hdr, 0, sizeof(struct sq_sge));
1799
1800 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1801 /* Copy the inline data */
1802 data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1803 else
1804 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1805 &idx);
1806 if (data_len < 0)
1807 goto queue_err;
1808 /* Specifics */
1809 switch (wqe->type) {
1810 case BNXT_QPLIB_SWQE_TYPE_SEND:
1811 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1812 struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1813 struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1814 /* Assemble info for Raw Ethertype QPs */
1815
1816 sqe->wqe_type = wqe->type;
1817 sqe->flags = wqe->flags;
1818 sqe->wqe_size = wqe_sz;
1819 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1820 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1821 sqe->length = cpu_to_le32(data_len);
1822 ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1823 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1824 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1825
1826 break;
1827 }
1828 fallthrough;
1829 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1830 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1831 {
1832 struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1833 struct sq_send_hdr *sqe = base_hdr;
1834
1835 sqe->wqe_type = wqe->type;
1836 sqe->flags = wqe->flags;
1837 sqe->wqe_size = wqe_sz;
1838 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1839 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1840 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1841 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1842 sqe->length = cpu_to_le32(data_len);
1843 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1844 ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1845 SQ_SEND_DST_QP_MASK);
1846 ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1847 SQ_SEND_AVID_MASK);
1848 } else {
1849 sqe->length = cpu_to_le32(data_len);
1850 if (qp->mtu)
1851 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1852 if (!pkt_num)
1853 pkt_num = 1;
1854 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1855 }
1856 break;
1857 }
1858 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1859 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1860 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1861 {
1862 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1863 struct sq_rdma_hdr *sqe = base_hdr;
1864
1865 sqe->wqe_type = wqe->type;
1866 sqe->flags = wqe->flags;
1867 sqe->wqe_size = wqe_sz;
1868 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1869 sqe->length = cpu_to_le32((u32)data_len);
1870 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1871 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1872 if (qp->mtu)
1873 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1874 if (!pkt_num)
1875 pkt_num = 1;
1876 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1877 break;
1878 }
1879 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1880 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1881 {
1882 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1883 struct sq_atomic_hdr *sqe = base_hdr;
1884
1885 sqe->wqe_type = wqe->type;
1886 sqe->flags = wqe->flags;
1887 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1888 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1889 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1890 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1891 if (qp->mtu)
1892 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1893 if (!pkt_num)
1894 pkt_num = 1;
1895 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1896 break;
1897 }
1898 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1899 {
1900 struct sq_localinvalidate *sqe = base_hdr;
1901
1902 sqe->wqe_type = wqe->type;
1903 sqe->flags = wqe->flags;
1904 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1905
1906 break;
1907 }
1908 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1909 {
1910 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1911 struct sq_fr_pmr_hdr *sqe = base_hdr;
1912
1913 sqe->wqe_type = wqe->type;
1914 sqe->flags = wqe->flags;
1915 sqe->access_cntl = wqe->frmr.access_cntl |
1916 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1917 sqe->zero_based_page_size_log =
1918 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1919 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1920 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1921 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1922 temp32 = cpu_to_le32(wqe->frmr.length);
1923 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1924 sqe->numlevels_pbl_page_size_log =
1925 ((wqe->frmr.pbl_pg_sz_log <<
1926 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1927 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1928 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1929 SQ_FR_PMR_NUMLEVELS_MASK);
1930
1931 for (i = 0; i < wqe->frmr.page_list_len; i++)
1932 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1933 wqe->frmr.page_list[i] |
1934 PTU_PTE_VALID);
1935 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1936 ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1937
1938 break;
1939 }
1940 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1941 {
1942 struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1943 struct sq_bind_hdr *sqe = base_hdr;
1944
1945 sqe->wqe_type = wqe->type;
1946 sqe->flags = wqe->flags;
1947 sqe->access_cntl = wqe->bind.access_cntl;
1948 sqe->mw_type_zero_based = wqe->bind.mw_type |
1949 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1950 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1951 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1952 ext_sqe->va = cpu_to_le64(wqe->bind.va);
1953 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
1954 break;
1955 }
1956 default:
1957 /* Bad wqe, return error */
1958 rc = -EINVAL;
1959 goto done;
1960 }
1961 swq->next_psn = sq->psn & BTH_PSN_MASK;
1962 bnxt_qplib_fill_psn_search(qp, wqe, swq);
1963 queue_err:
1964 bnxt_qplib_swq_mod_start(sq, wqe_idx);
1965 bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
1966 qp->wqe_cnt++;
1967 done:
1968 if (sch_handler) {
1969 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1970 if (nq_work) {
1971 nq_work->cq = qp->scq;
1972 nq_work->nq = qp->scq->nq;
1973 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1974 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1975 } else {
1976 dev_err(&hwq->pdev->dev,
1977 "FP: Failed to allocate SQ nq_work!\n");
1978 rc = -ENOMEM;
1979 }
1980 }
1981 return rc;
1982 }
1983
bnxt_qplib_post_recv_db(struct bnxt_qplib_qp * qp)1984 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1985 {
1986 struct bnxt_qplib_q *rq = &qp->rq;
1987
1988 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
1989 }
1990
bnxt_qplib_post_recv(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)1991 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1992 struct bnxt_qplib_swqe *wqe)
1993 {
1994 struct bnxt_qplib_nq_work *nq_work = NULL;
1995 struct bnxt_qplib_q *rq = &qp->rq;
1996 struct rq_wqe_hdr *base_hdr;
1997 struct rq_ext_hdr *ext_hdr;
1998 struct bnxt_qplib_hwq *hwq;
1999 struct bnxt_qplib_swq *swq;
2000 bool sch_handler = false;
2001 u16 wqe_sz, idx;
2002 u32 wqe_idx;
2003 int rc = 0;
2004
2005 hwq = &rq->hwq;
2006 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2007 dev_err(&hwq->pdev->dev,
2008 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
2009 qp->id, qp->state);
2010 rc = -EINVAL;
2011 goto done;
2012 }
2013
2014 if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2015 dev_err(&hwq->pdev->dev,
2016 "FP: QP (0x%x) RQ is full!\n", qp->id);
2017 rc = -EINVAL;
2018 goto done;
2019 }
2020
2021 swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2022 swq->wr_id = wqe->wr_id;
2023 swq->slots = rq->dbinfo.max_slot;
2024
2025 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2026 sch_handler = true;
2027 dev_dbg(&hwq->pdev->dev,
2028 "%s: Error QP. Scheduling for poll_cq\n", __func__);
2029 goto queue_err;
2030 }
2031
2032 idx = 0;
2033 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2034 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2035 memset(base_hdr, 0, sizeof(struct sq_sge));
2036 memset(ext_hdr, 0, sizeof(struct sq_sge));
2037 wqe_sz = (sizeof(struct rq_wqe_hdr) +
2038 wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2039 bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2040 if (!wqe->num_sge) {
2041 struct sq_sge *sge;
2042
2043 sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2044 sge->size = 0;
2045 wqe_sz++;
2046 }
2047 base_hdr->wqe_type = wqe->type;
2048 base_hdr->flags = wqe->flags;
2049 base_hdr->wqe_size = wqe_sz;
2050 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2051 queue_err:
2052 bnxt_qplib_swq_mod_start(rq, wqe_idx);
2053 bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
2054 done:
2055 if (sch_handler) {
2056 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2057 if (nq_work) {
2058 nq_work->cq = qp->rcq;
2059 nq_work->nq = qp->rcq->nq;
2060 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2061 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2062 } else {
2063 dev_err(&hwq->pdev->dev,
2064 "FP: Failed to allocate RQ nq_work!\n");
2065 rc = -ENOMEM;
2066 }
2067 }
2068
2069 return rc;
2070 }
2071
2072 /* CQ */
bnxt_qplib_create_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2073 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2074 {
2075 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2076 struct bnxt_qplib_hwq_attr hwq_attr = {};
2077 struct creq_create_cq_resp resp = {};
2078 struct bnxt_qplib_cmdqmsg msg = {};
2079 struct cmdq_create_cq req = {};
2080 struct bnxt_qplib_pbl *pbl;
2081 u32 pg_sz_lvl;
2082 int rc;
2083
2084 if (!cq->dpi) {
2085 dev_err(&rcfw->pdev->dev,
2086 "FP: CREATE_CQ failed due to NULL DPI\n");
2087 return -EINVAL;
2088 }
2089
2090 hwq_attr.res = res;
2091 hwq_attr.depth = cq->max_wqe;
2092 hwq_attr.stride = sizeof(struct cq_base);
2093 hwq_attr.type = HWQ_TYPE_QUEUE;
2094 hwq_attr.sginfo = &cq->sg_info;
2095 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2096 if (rc)
2097 return rc;
2098
2099 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2100 CMDQ_BASE_OPCODE_CREATE_CQ,
2101 sizeof(req));
2102
2103 req.dpi = cpu_to_le32(cq->dpi->dpi);
2104 req.cq_handle = cpu_to_le64(cq->cq_handle);
2105 req.cq_size = cpu_to_le32(cq->hwq.max_elements);
2106 pbl = &cq->hwq.pbl[PBL_LVL_0];
2107 pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2108 CMDQ_CREATE_CQ_PG_SIZE_SFT);
2109 pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2110 req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2111 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2112 req.cq_fco_cnq_id = cpu_to_le32(
2113 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2114 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2115 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2116 sizeof(resp), 0);
2117 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2118 if (rc)
2119 goto fail;
2120
2121 cq->id = le32_to_cpu(resp.xid);
2122 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2123 init_waitqueue_head(&cq->waitq);
2124 INIT_LIST_HEAD(&cq->sqf_head);
2125 INIT_LIST_HEAD(&cq->rqf_head);
2126 spin_lock_init(&cq->compl_lock);
2127 spin_lock_init(&cq->flush_lock);
2128
2129 cq->dbinfo.hwq = &cq->hwq;
2130 cq->dbinfo.xid = cq->id;
2131 cq->dbinfo.db = cq->dpi->dbr;
2132 cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2133
2134 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2135
2136 return 0;
2137
2138 fail:
2139 bnxt_qplib_free_hwq(res, &cq->hwq);
2140 return rc;
2141 }
2142
bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2143 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2144 struct bnxt_qplib_cq *cq)
2145 {
2146 bnxt_qplib_free_hwq(res, &cq->hwq);
2147 memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2148 }
2149
bnxt_qplib_resize_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq,int new_cqes)2150 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2151 int new_cqes)
2152 {
2153 struct bnxt_qplib_hwq_attr hwq_attr = {};
2154 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2155 struct creq_resize_cq_resp resp = {};
2156 struct bnxt_qplib_cmdqmsg msg = {};
2157 struct cmdq_resize_cq req = {};
2158 struct bnxt_qplib_pbl *pbl;
2159 u32 pg_sz, lvl, new_sz;
2160 int rc;
2161
2162 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2163 CMDQ_BASE_OPCODE_RESIZE_CQ,
2164 sizeof(req));
2165 hwq_attr.sginfo = &cq->sg_info;
2166 hwq_attr.res = res;
2167 hwq_attr.depth = new_cqes;
2168 hwq_attr.stride = sizeof(struct cq_base);
2169 hwq_attr.type = HWQ_TYPE_QUEUE;
2170 rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2171 if (rc)
2172 return rc;
2173
2174 req.cq_cid = cpu_to_le32(cq->id);
2175 pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2176 pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2177 lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2178 CMDQ_RESIZE_CQ_LVL_MASK;
2179 new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2180 CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2181 req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2182 req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2183
2184 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2185 sizeof(resp), 0);
2186 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2187 return rc;
2188 }
2189
bnxt_qplib_destroy_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2190 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2191 {
2192 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2193 struct creq_destroy_cq_resp resp = {};
2194 struct bnxt_qplib_cmdqmsg msg = {};
2195 struct cmdq_destroy_cq req = {};
2196 u16 total_cnq_events;
2197 int rc;
2198
2199 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2200 CMDQ_BASE_OPCODE_DESTROY_CQ,
2201 sizeof(req));
2202
2203 req.cq_cid = cpu_to_le32(cq->id);
2204 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2205 sizeof(resp), 0);
2206 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2207 if (rc)
2208 return rc;
2209 total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2210 __wait_for_all_nqes(cq, total_cnq_events);
2211 bnxt_qplib_free_hwq(res, &cq->hwq);
2212 return 0;
2213 }
2214
__flush_sq(struct bnxt_qplib_q * sq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2215 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2216 struct bnxt_qplib_cqe **pcqe, int *budget)
2217 {
2218 struct bnxt_qplib_cqe *cqe;
2219 u32 start, last;
2220 int rc = 0;
2221
2222 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2223 start = sq->swq_start;
2224 cqe = *pcqe;
2225 while (*budget) {
2226 last = sq->swq_last;
2227 if (start == last)
2228 break;
2229 /* Skip the FENCE WQE completions */
2230 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2231 bnxt_qplib_cancel_phantom_processing(qp);
2232 goto skip_compl;
2233 }
2234 memset(cqe, 0, sizeof(*cqe));
2235 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2236 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2237 cqe->qp_handle = (u64)(unsigned long)qp;
2238 cqe->wr_id = sq->swq[last].wr_id;
2239 cqe->src_qp = qp->id;
2240 cqe->type = sq->swq[last].type;
2241 cqe++;
2242 (*budget)--;
2243 skip_compl:
2244 bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[last].slots);
2245 sq->swq_last = sq->swq[last].next_idx;
2246 }
2247 *pcqe = cqe;
2248 if (!(*budget) && sq->swq_last != start)
2249 /* Out of budget */
2250 rc = -EAGAIN;
2251
2252 return rc;
2253 }
2254
__flush_rq(struct bnxt_qplib_q * rq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2255 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2256 struct bnxt_qplib_cqe **pcqe, int *budget)
2257 {
2258 struct bnxt_qplib_cqe *cqe;
2259 u32 start, last;
2260 int opcode = 0;
2261 int rc = 0;
2262
2263 switch (qp->type) {
2264 case CMDQ_CREATE_QP1_TYPE_GSI:
2265 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2266 break;
2267 case CMDQ_CREATE_QP_TYPE_RC:
2268 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2269 break;
2270 case CMDQ_CREATE_QP_TYPE_UD:
2271 case CMDQ_CREATE_QP_TYPE_GSI:
2272 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2273 break;
2274 }
2275
2276 /* Flush the rest of the RQ */
2277 start = rq->swq_start;
2278 cqe = *pcqe;
2279 while (*budget) {
2280 last = rq->swq_last;
2281 if (last == start)
2282 break;
2283 memset(cqe, 0, sizeof(*cqe));
2284 cqe->status =
2285 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2286 cqe->opcode = opcode;
2287 cqe->qp_handle = (unsigned long)qp;
2288 cqe->wr_id = rq->swq[last].wr_id;
2289 cqe++;
2290 (*budget)--;
2291 bnxt_qplib_hwq_incr_cons(&rq->hwq, rq->swq[last].slots);
2292 rq->swq_last = rq->swq[last].next_idx;
2293 }
2294 *pcqe = cqe;
2295 if (!*budget && rq->swq_last != start)
2296 /* Out of budget */
2297 rc = -EAGAIN;
2298
2299 return rc;
2300 }
2301
bnxt_qplib_mark_qp_error(void * qp_handle)2302 void bnxt_qplib_mark_qp_error(void *qp_handle)
2303 {
2304 struct bnxt_qplib_qp *qp = qp_handle;
2305
2306 if (!qp)
2307 return;
2308
2309 /* Must block new posting of SQ and RQ */
2310 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2311 bnxt_qplib_cancel_phantom_processing(qp);
2312 }
2313
2314 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2315 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2316 */
do_wa9060(struct bnxt_qplib_qp * qp,struct bnxt_qplib_cq * cq,u32 cq_cons,u32 swq_last,u32 cqe_sq_cons)2317 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2318 u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2319 {
2320 u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2321 struct bnxt_qplib_q *sq = &qp->sq;
2322 struct cq_req *peek_req_hwcqe;
2323 struct bnxt_qplib_qp *peek_qp;
2324 struct bnxt_qplib_q *peek_sq;
2325 struct bnxt_qplib_swq *swq;
2326 struct cq_base *peek_hwcqe;
2327 int i, rc = 0;
2328
2329 /* Normal mode */
2330 /* Check for the psn_search marking before completing */
2331 swq = &sq->swq[swq_last];
2332 if (swq->psn_search &&
2333 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2334 /* Unmark */
2335 swq->psn_search->flags_next_psn = cpu_to_le32
2336 (le32_to_cpu(swq->psn_search->flags_next_psn)
2337 & ~0x80000000);
2338 dev_dbg(&cq->hwq.pdev->dev,
2339 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2340 cq_cons, qp->id, swq_last, cqe_sq_cons);
2341 sq->condition = true;
2342 sq->send_phantom = true;
2343
2344 /* TODO: Only ARM if the previous SQE is ARMALL */
2345 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2346 rc = -EAGAIN;
2347 goto out;
2348 }
2349 if (sq->condition) {
2350 /* Peek at the completions */
2351 peek_raw_cq_cons = cq->hwq.cons;
2352 peek_sw_cq_cons = cq_cons;
2353 i = cq->hwq.max_elements;
2354 while (i--) {
2355 peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2356 peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2357 peek_sw_cq_cons, NULL);
2358 /* If the next hwcqe is VALID */
2359 if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2360 cq->hwq.max_elements)) {
2361 /*
2362 * The valid test of the entry must be done first before
2363 * reading any further.
2364 */
2365 dma_rmb();
2366 /* If the next hwcqe is a REQ */
2367 if ((peek_hwcqe->cqe_type_toggle &
2368 CQ_BASE_CQE_TYPE_MASK) ==
2369 CQ_BASE_CQE_TYPE_REQ) {
2370 peek_req_hwcqe = (struct cq_req *)
2371 peek_hwcqe;
2372 peek_qp = (struct bnxt_qplib_qp *)
2373 ((unsigned long)
2374 le64_to_cpu
2375 (peek_req_hwcqe->qp_handle));
2376 peek_sq = &peek_qp->sq;
2377 peek_sq_cons_idx =
2378 ((le16_to_cpu(
2379 peek_req_hwcqe->sq_cons_idx)
2380 - 1) % sq->max_wqe);
2381 /* If the hwcqe's sq's wr_id matches */
2382 if (peek_sq == sq &&
2383 sq->swq[peek_sq_cons_idx].wr_id ==
2384 BNXT_QPLIB_FENCE_WRID) {
2385 /*
2386 * Unbreak only if the phantom
2387 * comes back
2388 */
2389 dev_dbg(&cq->hwq.pdev->dev,
2390 "FP: Got Phantom CQE\n");
2391 sq->condition = false;
2392 sq->single = true;
2393 rc = 0;
2394 goto out;
2395 }
2396 }
2397 /* Valid but not the phantom, so keep looping */
2398 } else {
2399 /* Not valid yet, just exit and wait */
2400 rc = -EINVAL;
2401 goto out;
2402 }
2403 peek_sw_cq_cons++;
2404 peek_raw_cq_cons++;
2405 }
2406 dev_err(&cq->hwq.pdev->dev,
2407 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2408 cq_cons, qp->id, swq_last, cqe_sq_cons);
2409 rc = -EINVAL;
2410 }
2411 out:
2412 return rc;
2413 }
2414
bnxt_qplib_cq_process_req(struct bnxt_qplib_cq * cq,struct cq_req * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget,u32 cq_cons,struct bnxt_qplib_qp ** lib_qp)2415 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2416 struct cq_req *hwcqe,
2417 struct bnxt_qplib_cqe **pcqe, int *budget,
2418 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2419 {
2420 struct bnxt_qplib_swq *swq;
2421 struct bnxt_qplib_cqe *cqe;
2422 struct bnxt_qplib_qp *qp;
2423 struct bnxt_qplib_q *sq;
2424 u32 cqe_sq_cons;
2425 int rc = 0;
2426
2427 qp = (struct bnxt_qplib_qp *)((unsigned long)
2428 le64_to_cpu(hwcqe->qp_handle));
2429 if (!qp) {
2430 dev_err(&cq->hwq.pdev->dev,
2431 "FP: Process Req qp is NULL\n");
2432 return -EINVAL;
2433 }
2434 sq = &qp->sq;
2435
2436 cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
2437 if (qp->sq.flushed) {
2438 dev_dbg(&cq->hwq.pdev->dev,
2439 "%s: QP in Flush QP = %p\n", __func__, qp);
2440 goto done;
2441 }
2442 /* Require to walk the sq's swq to fabricate CQEs for all previously
2443 * signaled SWQEs due to CQE aggregation from the current sq cons
2444 * to the cqe_sq_cons
2445 */
2446 cqe = *pcqe;
2447 while (*budget) {
2448 if (sq->swq_last == cqe_sq_cons)
2449 /* Done */
2450 break;
2451
2452 swq = &sq->swq[sq->swq_last];
2453 memset(cqe, 0, sizeof(*cqe));
2454 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2455 cqe->qp_handle = (u64)(unsigned long)qp;
2456 cqe->src_qp = qp->id;
2457 cqe->wr_id = swq->wr_id;
2458 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2459 goto skip;
2460 cqe->type = swq->type;
2461
2462 /* For the last CQE, check for status. For errors, regardless
2463 * of the request being signaled or not, it must complete with
2464 * the hwcqe error status
2465 */
2466 if (swq->next_idx == cqe_sq_cons &&
2467 hwcqe->status != CQ_REQ_STATUS_OK) {
2468 cqe->status = hwcqe->status;
2469 dev_err(&cq->hwq.pdev->dev,
2470 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2471 sq->swq_last, cqe->wr_id, cqe->status);
2472 cqe++;
2473 (*budget)--;
2474 bnxt_qplib_mark_qp_error(qp);
2475 /* Add qp to flush list of the CQ */
2476 bnxt_qplib_add_flush_qp(qp);
2477 } else {
2478 /* Before we complete, do WA 9060 */
2479 if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2480 cqe_sq_cons)) {
2481 *lib_qp = qp;
2482 goto out;
2483 }
2484 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2485 cqe->status = CQ_REQ_STATUS_OK;
2486 cqe++;
2487 (*budget)--;
2488 }
2489 }
2490 skip:
2491 bnxt_qplib_hwq_incr_cons(&sq->hwq, swq->slots);
2492 sq->swq_last = swq->next_idx;
2493 if (sq->single)
2494 break;
2495 }
2496 out:
2497 *pcqe = cqe;
2498 if (sq->swq_last != cqe_sq_cons) {
2499 /* Out of budget */
2500 rc = -EAGAIN;
2501 goto done;
2502 }
2503 /*
2504 * Back to normal completion mode only after it has completed all of
2505 * the WC for this CQE
2506 */
2507 sq->single = false;
2508 done:
2509 return rc;
2510 }
2511
bnxt_qplib_release_srqe(struct bnxt_qplib_srq * srq,u32 tag)2512 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2513 {
2514 spin_lock(&srq->hwq.lock);
2515 srq->swq[srq->last_idx].next_idx = (int)tag;
2516 srq->last_idx = (int)tag;
2517 srq->swq[srq->last_idx].next_idx = -1;
2518 srq->hwq.cons++; /* Support for SRQE counter */
2519 spin_unlock(&srq->hwq.lock);
2520 }
2521
bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq * cq,struct cq_res_rc * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2522 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2523 struct cq_res_rc *hwcqe,
2524 struct bnxt_qplib_cqe **pcqe,
2525 int *budget)
2526 {
2527 struct bnxt_qplib_srq *srq;
2528 struct bnxt_qplib_cqe *cqe;
2529 struct bnxt_qplib_qp *qp;
2530 struct bnxt_qplib_q *rq;
2531 u32 wr_id_idx;
2532
2533 qp = (struct bnxt_qplib_qp *)((unsigned long)
2534 le64_to_cpu(hwcqe->qp_handle));
2535 if (!qp) {
2536 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2537 return -EINVAL;
2538 }
2539 if (qp->rq.flushed) {
2540 dev_dbg(&cq->hwq.pdev->dev,
2541 "%s: QP in Flush QP = %p\n", __func__, qp);
2542 return 0;
2543 }
2544
2545 cqe = *pcqe;
2546 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2547 cqe->length = le32_to_cpu(hwcqe->length);
2548 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2549 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2550 cqe->flags = le16_to_cpu(hwcqe->flags);
2551 cqe->status = hwcqe->status;
2552 cqe->qp_handle = (u64)(unsigned long)qp;
2553
2554 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2555 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2556 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2557 srq = qp->srq;
2558 if (!srq)
2559 return -EINVAL;
2560 if (wr_id_idx >= srq->hwq.max_elements) {
2561 dev_err(&cq->hwq.pdev->dev,
2562 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2563 wr_id_idx, srq->hwq.max_elements);
2564 return -EINVAL;
2565 }
2566 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2567 bnxt_qplib_release_srqe(srq, wr_id_idx);
2568 cqe++;
2569 (*budget)--;
2570 *pcqe = cqe;
2571 } else {
2572 struct bnxt_qplib_swq *swq;
2573
2574 rq = &qp->rq;
2575 if (wr_id_idx > (rq->max_wqe - 1)) {
2576 dev_err(&cq->hwq.pdev->dev,
2577 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2578 wr_id_idx, rq->max_wqe);
2579 return -EINVAL;
2580 }
2581 if (wr_id_idx != rq->swq_last)
2582 return -EINVAL;
2583 swq = &rq->swq[rq->swq_last];
2584 cqe->wr_id = swq->wr_id;
2585 cqe++;
2586 (*budget)--;
2587 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2588 rq->swq_last = swq->next_idx;
2589 *pcqe = cqe;
2590
2591 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2592 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2593 /* Add qp to flush list of the CQ */
2594 bnxt_qplib_add_flush_qp(qp);
2595 }
2596 }
2597
2598 return 0;
2599 }
2600
bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq * cq,struct cq_res_ud * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2601 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2602 struct cq_res_ud *hwcqe,
2603 struct bnxt_qplib_cqe **pcqe,
2604 int *budget)
2605 {
2606 struct bnxt_qplib_srq *srq;
2607 struct bnxt_qplib_cqe *cqe;
2608 struct bnxt_qplib_qp *qp;
2609 struct bnxt_qplib_q *rq;
2610 u32 wr_id_idx;
2611
2612 qp = (struct bnxt_qplib_qp *)((unsigned long)
2613 le64_to_cpu(hwcqe->qp_handle));
2614 if (!qp) {
2615 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2616 return -EINVAL;
2617 }
2618 if (qp->rq.flushed) {
2619 dev_dbg(&cq->hwq.pdev->dev,
2620 "%s: QP in Flush QP = %p\n", __func__, qp);
2621 return 0;
2622 }
2623 cqe = *pcqe;
2624 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2625 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2626 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2627 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2628 cqe->flags = le16_to_cpu(hwcqe->flags);
2629 cqe->status = hwcqe->status;
2630 cqe->qp_handle = (u64)(unsigned long)qp;
2631 /*FIXME: Endianness fix needed for smace */
2632 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2633 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2634 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2635 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2636 ((le32_to_cpu(
2637 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2638 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2639
2640 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2641 srq = qp->srq;
2642 if (!srq)
2643 return -EINVAL;
2644
2645 if (wr_id_idx >= srq->hwq.max_elements) {
2646 dev_err(&cq->hwq.pdev->dev,
2647 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2648 wr_id_idx, srq->hwq.max_elements);
2649 return -EINVAL;
2650 }
2651 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2652 bnxt_qplib_release_srqe(srq, wr_id_idx);
2653 cqe++;
2654 (*budget)--;
2655 *pcqe = cqe;
2656 } else {
2657 struct bnxt_qplib_swq *swq;
2658
2659 rq = &qp->rq;
2660 if (wr_id_idx > (rq->max_wqe - 1)) {
2661 dev_err(&cq->hwq.pdev->dev,
2662 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2663 wr_id_idx, rq->max_wqe);
2664 return -EINVAL;
2665 }
2666
2667 if (rq->swq_last != wr_id_idx)
2668 return -EINVAL;
2669 swq = &rq->swq[rq->swq_last];
2670 cqe->wr_id = swq->wr_id;
2671 cqe++;
2672 (*budget)--;
2673 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2674 rq->swq_last = swq->next_idx;
2675 *pcqe = cqe;
2676
2677 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2678 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2679 /* Add qp to flush list of the CQ */
2680 bnxt_qplib_add_flush_qp(qp);
2681 }
2682 }
2683
2684 return 0;
2685 }
2686
bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq * cq)2687 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2688 {
2689 struct cq_base *hw_cqe;
2690 u32 sw_cons, raw_cons;
2691 bool rc = true;
2692
2693 raw_cons = cq->hwq.cons;
2694 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2695 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2696 /* Check for Valid bit. If the CQE is valid, return false */
2697 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2698 return rc;
2699 }
2700
bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq * cq,struct cq_res_raweth_qp1 * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2701 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2702 struct cq_res_raweth_qp1 *hwcqe,
2703 struct bnxt_qplib_cqe **pcqe,
2704 int *budget)
2705 {
2706 struct bnxt_qplib_qp *qp;
2707 struct bnxt_qplib_q *rq;
2708 struct bnxt_qplib_srq *srq;
2709 struct bnxt_qplib_cqe *cqe;
2710 u32 wr_id_idx;
2711
2712 qp = (struct bnxt_qplib_qp *)((unsigned long)
2713 le64_to_cpu(hwcqe->qp_handle));
2714 if (!qp) {
2715 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2716 return -EINVAL;
2717 }
2718 if (qp->rq.flushed) {
2719 dev_dbg(&cq->hwq.pdev->dev,
2720 "%s: QP in Flush QP = %p\n", __func__, qp);
2721 return 0;
2722 }
2723 cqe = *pcqe;
2724 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2725 cqe->flags = le16_to_cpu(hwcqe->flags);
2726 cqe->qp_handle = (u64)(unsigned long)qp;
2727
2728 wr_id_idx =
2729 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2730 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2731 cqe->src_qp = qp->id;
2732 if (qp->id == 1 && !cqe->length) {
2733 /* Add workaround for the length misdetection */
2734 cqe->length = 296;
2735 } else {
2736 cqe->length = le16_to_cpu(hwcqe->length);
2737 }
2738 cqe->pkey_index = qp->pkey_index;
2739 memcpy(cqe->smac, qp->smac, 6);
2740
2741 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2742 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2743 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2744
2745 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2746 srq = qp->srq;
2747 if (!srq) {
2748 dev_err(&cq->hwq.pdev->dev,
2749 "FP: SRQ used but not defined??\n");
2750 return -EINVAL;
2751 }
2752 if (wr_id_idx >= srq->hwq.max_elements) {
2753 dev_err(&cq->hwq.pdev->dev,
2754 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2755 wr_id_idx, srq->hwq.max_elements);
2756 return -EINVAL;
2757 }
2758 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2759 bnxt_qplib_release_srqe(srq, wr_id_idx);
2760 cqe++;
2761 (*budget)--;
2762 *pcqe = cqe;
2763 } else {
2764 struct bnxt_qplib_swq *swq;
2765
2766 rq = &qp->rq;
2767 if (wr_id_idx > (rq->max_wqe - 1)) {
2768 dev_err(&cq->hwq.pdev->dev,
2769 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2770 wr_id_idx, rq->max_wqe);
2771 return -EINVAL;
2772 }
2773 if (rq->swq_last != wr_id_idx)
2774 return -EINVAL;
2775 swq = &rq->swq[rq->swq_last];
2776 cqe->wr_id = swq->wr_id;
2777 cqe++;
2778 (*budget)--;
2779 bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2780 rq->swq_last = swq->next_idx;
2781 *pcqe = cqe;
2782
2783 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2784 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2785 /* Add qp to flush list of the CQ */
2786 bnxt_qplib_add_flush_qp(qp);
2787 }
2788 }
2789
2790 return 0;
2791 }
2792
bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq * cq,struct cq_terminal * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2793 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2794 struct cq_terminal *hwcqe,
2795 struct bnxt_qplib_cqe **pcqe,
2796 int *budget)
2797 {
2798 struct bnxt_qplib_qp *qp;
2799 struct bnxt_qplib_q *sq, *rq;
2800 struct bnxt_qplib_cqe *cqe;
2801 u32 swq_last = 0, cqe_cons;
2802 int rc = 0;
2803
2804 /* Check the Status */
2805 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2806 dev_warn(&cq->hwq.pdev->dev,
2807 "FP: CQ Process Terminal Error status = 0x%x\n",
2808 hwcqe->status);
2809
2810 qp = (struct bnxt_qplib_qp *)((unsigned long)
2811 le64_to_cpu(hwcqe->qp_handle));
2812 if (!qp)
2813 return -EINVAL;
2814
2815 /* Must block new posting of SQ and RQ */
2816 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2817
2818 sq = &qp->sq;
2819 rq = &qp->rq;
2820
2821 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2822 if (cqe_cons == 0xFFFF)
2823 goto do_rq;
2824 cqe_cons %= sq->max_wqe;
2825
2826 if (qp->sq.flushed) {
2827 dev_dbg(&cq->hwq.pdev->dev,
2828 "%s: QP in Flush QP = %p\n", __func__, qp);
2829 goto sq_done;
2830 }
2831
2832 /* Terminal CQE can also include aggregated successful CQEs prior.
2833 * So we must complete all CQEs from the current sq's cons to the
2834 * cq_cons with status OK
2835 */
2836 cqe = *pcqe;
2837 while (*budget) {
2838 swq_last = sq->swq_last;
2839 if (swq_last == cqe_cons)
2840 break;
2841 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2842 memset(cqe, 0, sizeof(*cqe));
2843 cqe->status = CQ_REQ_STATUS_OK;
2844 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2845 cqe->qp_handle = (u64)(unsigned long)qp;
2846 cqe->src_qp = qp->id;
2847 cqe->wr_id = sq->swq[swq_last].wr_id;
2848 cqe->type = sq->swq[swq_last].type;
2849 cqe++;
2850 (*budget)--;
2851 }
2852 bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[swq_last].slots);
2853 sq->swq_last = sq->swq[swq_last].next_idx;
2854 }
2855 *pcqe = cqe;
2856 if (!(*budget) && swq_last != cqe_cons) {
2857 /* Out of budget */
2858 rc = -EAGAIN;
2859 goto sq_done;
2860 }
2861 sq_done:
2862 if (rc)
2863 return rc;
2864 do_rq:
2865 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2866 if (cqe_cons == 0xFFFF) {
2867 goto done;
2868 } else if (cqe_cons > rq->max_wqe - 1) {
2869 dev_err(&cq->hwq.pdev->dev,
2870 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2871 cqe_cons, rq->max_wqe);
2872 rc = -EINVAL;
2873 goto done;
2874 }
2875
2876 if (qp->rq.flushed) {
2877 dev_dbg(&cq->hwq.pdev->dev,
2878 "%s: QP in Flush QP = %p\n", __func__, qp);
2879 rc = 0;
2880 goto done;
2881 }
2882
2883 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2884 * from the current rq->cons to the rq->prod regardless what the
2885 * rq->cons the terminal CQE indicates
2886 */
2887
2888 /* Add qp to flush list of the CQ */
2889 bnxt_qplib_add_flush_qp(qp);
2890 done:
2891 return rc;
2892 }
2893
bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq * cq,struct cq_cutoff * hwcqe)2894 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2895 struct cq_cutoff *hwcqe)
2896 {
2897 /* Check the Status */
2898 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2899 dev_err(&cq->hwq.pdev->dev,
2900 "FP: CQ Process Cutoff Error status = 0x%x\n",
2901 hwcqe->status);
2902 return -EINVAL;
2903 }
2904 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2905 wake_up_interruptible(&cq->waitq);
2906
2907 return 0;
2908 }
2909
bnxt_qplib_process_flush_list(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes)2910 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2911 struct bnxt_qplib_cqe *cqe,
2912 int num_cqes)
2913 {
2914 struct bnxt_qplib_qp *qp = NULL;
2915 u32 budget = num_cqes;
2916 unsigned long flags;
2917
2918 spin_lock_irqsave(&cq->flush_lock, flags);
2919 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2920 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2921 __flush_sq(&qp->sq, qp, &cqe, &budget);
2922 }
2923
2924 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2925 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2926 __flush_rq(&qp->rq, qp, &cqe, &budget);
2927 }
2928 spin_unlock_irqrestore(&cq->flush_lock, flags);
2929
2930 return num_cqes - budget;
2931 }
2932
bnxt_qplib_poll_cq(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes,struct bnxt_qplib_qp ** lib_qp)2933 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2934 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2935 {
2936 struct cq_base *hw_cqe;
2937 u32 sw_cons, raw_cons;
2938 int budget, rc = 0;
2939 u8 type;
2940
2941 raw_cons = cq->hwq.cons;
2942 budget = num_cqes;
2943
2944 while (budget) {
2945 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2946 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2947
2948 /* Check for Valid bit */
2949 if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2950 break;
2951
2952 /*
2953 * The valid test of the entry must be done first before
2954 * reading any further.
2955 */
2956 dma_rmb();
2957 /* From the device's respective CQE format to qplib_wc*/
2958 type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2959 switch (type) {
2960 case CQ_BASE_CQE_TYPE_REQ:
2961 rc = bnxt_qplib_cq_process_req(cq,
2962 (struct cq_req *)hw_cqe,
2963 &cqe, &budget,
2964 sw_cons, lib_qp);
2965 break;
2966 case CQ_BASE_CQE_TYPE_RES_RC:
2967 rc = bnxt_qplib_cq_process_res_rc(cq,
2968 (struct cq_res_rc *)
2969 hw_cqe, &cqe,
2970 &budget);
2971 break;
2972 case CQ_BASE_CQE_TYPE_RES_UD:
2973 rc = bnxt_qplib_cq_process_res_ud
2974 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
2975 &budget);
2976 break;
2977 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2978 rc = bnxt_qplib_cq_process_res_raweth_qp1
2979 (cq, (struct cq_res_raweth_qp1 *)
2980 hw_cqe, &cqe, &budget);
2981 break;
2982 case CQ_BASE_CQE_TYPE_TERMINAL:
2983 rc = bnxt_qplib_cq_process_terminal
2984 (cq, (struct cq_terminal *)hw_cqe,
2985 &cqe, &budget);
2986 break;
2987 case CQ_BASE_CQE_TYPE_CUT_OFF:
2988 bnxt_qplib_cq_process_cutoff
2989 (cq, (struct cq_cutoff *)hw_cqe);
2990 /* Done processing this CQ */
2991 goto exit;
2992 default:
2993 dev_err(&cq->hwq.pdev->dev,
2994 "process_cq unknown type 0x%lx\n",
2995 hw_cqe->cqe_type_toggle &
2996 CQ_BASE_CQE_TYPE_MASK);
2997 rc = -EINVAL;
2998 break;
2999 }
3000 if (rc < 0) {
3001 if (rc == -EAGAIN)
3002 break;
3003 /* Error while processing the CQE, just skip to the
3004 * next one
3005 */
3006 if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3007 dev_err(&cq->hwq.pdev->dev,
3008 "process_cqe error rc = 0x%x\n", rc);
3009 }
3010 raw_cons++;
3011 }
3012 if (cq->hwq.cons != raw_cons) {
3013 cq->hwq.cons = raw_cons;
3014 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3015 }
3016 exit:
3017 return num_cqes - budget;
3018 }
3019
bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq * cq,u32 arm_type)3020 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3021 {
3022 if (arm_type)
3023 bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3024 /* Using cq->arm_state variable to track whether to issue cq handler */
3025 atomic_set(&cq->arm_state, 1);
3026 }
3027
bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp * qp)3028 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3029 {
3030 flush_workqueue(qp->scq->nq->cqn_wq);
3031 if (qp->scq != qp->rcq)
3032 flush_workqueue(qp->rcq->nq->cqn_wq);
3033 }
3034