1 /*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: QPLib resource manager
37 */
38
39 #define dev_fmt(fmt) "QPLIB: " fmt
40
41 #include <linux/spinlock.h>
42 #include <linux/pci.h>
43 #include <linux/interrupt.h>
44 #include <linux/inetdevice.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/if_vlan.h>
47 #include <linux/vmalloc.h>
48 #include <rdma/ib_verbs.h>
49 #include <rdma/ib_umem.h>
50
51 #include "roce_hsi.h"
52 #include "qplib_res.h"
53 #include "qplib_sp.h"
54 #include "qplib_rcfw.h"
55
56 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
57 struct bnxt_qplib_stats *stats);
58 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
59 struct bnxt_qplib_chip_ctx *cctx,
60 struct bnxt_qplib_stats *stats);
61
62 /* PBL */
__free_pbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pbl * pbl,bool is_umem)63 static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
64 bool is_umem)
65 {
66 struct pci_dev *pdev = res->pdev;
67 int i;
68
69 if (!is_umem) {
70 for (i = 0; i < pbl->pg_count; i++) {
71 if (pbl->pg_arr[i])
72 dma_free_coherent(&pdev->dev, pbl->pg_size,
73 (void *)((unsigned long)
74 pbl->pg_arr[i] &
75 PAGE_MASK),
76 pbl->pg_map_arr[i]);
77 else
78 dev_warn(&pdev->dev,
79 "PBL free pg_arr[%d] empty?!\n", i);
80 pbl->pg_arr[i] = NULL;
81 }
82 }
83 vfree(pbl->pg_arr);
84 pbl->pg_arr = NULL;
85 vfree(pbl->pg_map_arr);
86 pbl->pg_map_arr = NULL;
87 pbl->pg_count = 0;
88 pbl->pg_size = 0;
89 }
90
bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl * pbl,struct bnxt_qplib_sg_info * sginfo)91 static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
92 struct bnxt_qplib_sg_info *sginfo)
93 {
94 struct ib_block_iter biter;
95 int i = 0;
96
97 rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
98 pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
99 pbl->pg_arr[i] = NULL;
100 pbl->pg_count++;
101 i++;
102 }
103 }
104
__alloc_pbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pbl * pbl,struct bnxt_qplib_sg_info * sginfo)105 static int __alloc_pbl(struct bnxt_qplib_res *res,
106 struct bnxt_qplib_pbl *pbl,
107 struct bnxt_qplib_sg_info *sginfo)
108 {
109 struct pci_dev *pdev = res->pdev;
110 bool is_umem = false;
111 u32 pages;
112 int i;
113
114 if (sginfo->nopte)
115 return 0;
116 if (sginfo->umem)
117 pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
118 else
119 pages = sginfo->npages;
120 /* page ptr arrays */
121 pbl->pg_arr = vmalloc(pages * sizeof(void *));
122 if (!pbl->pg_arr)
123 return -ENOMEM;
124
125 pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
126 if (!pbl->pg_map_arr) {
127 vfree(pbl->pg_arr);
128 pbl->pg_arr = NULL;
129 return -ENOMEM;
130 }
131 pbl->pg_count = 0;
132 pbl->pg_size = sginfo->pgsize;
133
134 if (!sginfo->umem) {
135 for (i = 0; i < pages; i++) {
136 pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
137 pbl->pg_size,
138 &pbl->pg_map_arr[i],
139 GFP_KERNEL);
140 if (!pbl->pg_arr[i])
141 goto fail;
142 pbl->pg_count++;
143 }
144 } else {
145 is_umem = true;
146 bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
147 }
148
149 return 0;
150 fail:
151 __free_pbl(res, pbl, is_umem);
152 return -ENOMEM;
153 }
154
155 /* HWQ */
bnxt_qplib_free_hwq(struct bnxt_qplib_res * res,struct bnxt_qplib_hwq * hwq)156 void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
157 struct bnxt_qplib_hwq *hwq)
158 {
159 int i;
160
161 if (!hwq->max_elements)
162 return;
163 if (hwq->level >= PBL_LVL_MAX)
164 return;
165
166 for (i = 0; i < hwq->level + 1; i++) {
167 if (i == hwq->level)
168 __free_pbl(res, &hwq->pbl[i], hwq->is_user);
169 else
170 __free_pbl(res, &hwq->pbl[i], false);
171 }
172
173 hwq->level = PBL_LVL_MAX;
174 hwq->max_elements = 0;
175 hwq->element_size = 0;
176 hwq->prod = 0;
177 hwq->cons = 0;
178 hwq->cp_bit = 0;
179 }
180
181 /* All HWQs are power of 2 in size */
182
bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq * hwq,struct bnxt_qplib_hwq_attr * hwq_attr)183 int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
184 struct bnxt_qplib_hwq_attr *hwq_attr)
185 {
186 u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
187 struct bnxt_qplib_sg_info sginfo = {};
188 u32 depth, stride, npbl, npde;
189 dma_addr_t *src_phys_ptr, **dst_virt_ptr;
190 struct bnxt_qplib_res *res;
191 struct pci_dev *pdev;
192 int i, rc, lvl;
193
194 res = hwq_attr->res;
195 pdev = res->pdev;
196 pg_size = hwq_attr->sginfo->pgsize;
197 hwq->level = PBL_LVL_MAX;
198
199 depth = roundup_pow_of_two(hwq_attr->depth);
200 stride = roundup_pow_of_two(hwq_attr->stride);
201 if (hwq_attr->aux_depth) {
202 aux_slots = hwq_attr->aux_depth;
203 aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
204 aux_pages = (aux_slots * aux_size) / pg_size;
205 if ((aux_slots * aux_size) % pg_size)
206 aux_pages++;
207 }
208
209 if (!hwq_attr->sginfo->umem) {
210 hwq->is_user = false;
211 npages = (depth * stride) / pg_size + aux_pages;
212 if ((depth * stride) % pg_size)
213 npages++;
214 if (!npages)
215 return -EINVAL;
216 hwq_attr->sginfo->npages = npages;
217 } else {
218 npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem,
219 hwq_attr->sginfo->pgsize);
220 hwq->is_user = true;
221 }
222
223 if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
224 /* This request is Level 0, map PTE */
225 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
226 if (rc)
227 goto fail;
228 hwq->level = PBL_LVL_0;
229 goto done;
230 }
231
232 if (npages >= MAX_PBL_LVL_0_PGS) {
233 if (npages > MAX_PBL_LVL_1_PGS) {
234 u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
235 0 : PTU_PTE_VALID;
236 /* 2 levels of indirection */
237 npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
238 if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
239 npbl++;
240 npde = npbl >> MAX_PDL_LVL_SHIFT;
241 if (npbl % BIT(MAX_PDL_LVL_SHIFT))
242 npde++;
243 /* Alloc PDE pages */
244 sginfo.pgsize = npde * pg_size;
245 sginfo.npages = 1;
246 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
247
248 /* Alloc PBL pages */
249 sginfo.npages = npbl;
250 sginfo.pgsize = PAGE_SIZE;
251 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
252 if (rc)
253 goto fail;
254 /* Fill PDL with PBL page pointers */
255 dst_virt_ptr =
256 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
257 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
258 if (hwq_attr->type == HWQ_TYPE_MR) {
259 /* For MR it is expected that we supply only 1 contigous
260 * page i.e only 1 entry in the PDL that will contain
261 * all the PBLs for the user supplied memory region
262 */
263 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
264 i++)
265 dst_virt_ptr[0][i] = src_phys_ptr[i] |
266 flag;
267 } else {
268 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
269 i++)
270 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
271 src_phys_ptr[i] |
272 PTU_PDE_VALID;
273 }
274 /* Alloc or init PTEs */
275 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
276 hwq_attr->sginfo);
277 if (rc)
278 goto fail;
279 hwq->level = PBL_LVL_2;
280 if (hwq_attr->sginfo->nopte)
281 goto done;
282 /* Fill PBLs with PTE pointers */
283 dst_virt_ptr =
284 (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
285 src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
286 for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
287 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
288 src_phys_ptr[i] | PTU_PTE_VALID;
289 }
290 if (hwq_attr->type == HWQ_TYPE_QUEUE) {
291 /* Find the last pg of the size */
292 i = hwq->pbl[PBL_LVL_2].pg_count;
293 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
294 PTU_PTE_LAST;
295 if (i > 1)
296 dst_virt_ptr[PTR_PG(i - 2)]
297 [PTR_IDX(i - 2)] |=
298 PTU_PTE_NEXT_TO_LAST;
299 }
300 } else { /* pages < 512 npbl = 1, npde = 0 */
301 u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
302 0 : PTU_PTE_VALID;
303
304 /* 1 level of indirection */
305 npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
306 if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
307 npbl++;
308 sginfo.npages = npbl;
309 sginfo.pgsize = PAGE_SIZE;
310 /* Alloc PBL page */
311 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
312 if (rc)
313 goto fail;
314 /* Alloc or init PTEs */
315 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
316 hwq_attr->sginfo);
317 if (rc)
318 goto fail;
319 hwq->level = PBL_LVL_1;
320 if (hwq_attr->sginfo->nopte)
321 goto done;
322 /* Fill PBL with PTE pointers */
323 dst_virt_ptr =
324 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
325 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
326 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
327 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
328 src_phys_ptr[i] | flag;
329 if (hwq_attr->type == HWQ_TYPE_QUEUE) {
330 /* Find the last pg of the size */
331 i = hwq->pbl[PBL_LVL_1].pg_count;
332 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
333 PTU_PTE_LAST;
334 if (i > 1)
335 dst_virt_ptr[PTR_PG(i - 2)]
336 [PTR_IDX(i - 2)] |=
337 PTU_PTE_NEXT_TO_LAST;
338 }
339 }
340 }
341 done:
342 hwq->prod = 0;
343 hwq->cons = 0;
344 hwq->pdev = pdev;
345 hwq->depth = hwq_attr->depth;
346 hwq->max_elements = depth;
347 hwq->element_size = stride;
348 hwq->qe_ppg = pg_size / stride;
349 /* For direct access to the elements */
350 lvl = hwq->level;
351 if (hwq_attr->sginfo->nopte && hwq->level)
352 lvl = hwq->level - 1;
353 hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
354 hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
355 spin_lock_init(&hwq->lock);
356
357 return 0;
358 fail:
359 bnxt_qplib_free_hwq(res, hwq);
360 return -ENOMEM;
361 }
362
363 /* Context Tables */
bnxt_qplib_free_ctx(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx)364 void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
365 struct bnxt_qplib_ctx *ctx)
366 {
367 int i;
368
369 bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
370 bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
371 bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
372 bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
373 bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
374 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
375 bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
376 /* restore original pde level before destroy */
377 ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
378 bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
379 bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
380 }
381
bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx)382 static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
383 struct bnxt_qplib_ctx *ctx)
384 {
385 struct bnxt_qplib_hwq_attr hwq_attr = {};
386 struct bnxt_qplib_sg_info sginfo = {};
387 struct bnxt_qplib_tqm_ctx *tqmctx;
388 int rc = 0;
389 int i;
390
391 tqmctx = &ctx->tqm_ctx;
392
393 sginfo.pgsize = PAGE_SIZE;
394 sginfo.pgshft = PAGE_SHIFT;
395 hwq_attr.sginfo = &sginfo;
396 hwq_attr.res = res;
397 hwq_attr.type = HWQ_TYPE_CTX;
398 hwq_attr.depth = 512;
399 hwq_attr.stride = sizeof(u64);
400 /* Alloc pdl buffer */
401 rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
402 if (rc)
403 goto out;
404 /* Save original pdl level */
405 tqmctx->pde_level = tqmctx->pde.level;
406
407 hwq_attr.stride = 1;
408 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
409 if (!tqmctx->qcount[i])
410 continue;
411 hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
412 rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
413 if (rc)
414 goto out;
415 }
416 out:
417 return rc;
418 }
419
bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx * ctx)420 static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
421 {
422 struct bnxt_qplib_hwq *tbl;
423 dma_addr_t *dma_ptr;
424 __le64 **pbl_ptr, *ptr;
425 int i, j, k;
426 int fnz_idx = -1;
427 int pg_count;
428
429 pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
430
431 for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
432 i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
433 tbl = &ctx->qtbl[i];
434 if (!tbl->max_elements)
435 continue;
436 if (fnz_idx == -1)
437 fnz_idx = i; /* first non-zero index */
438 switch (tbl->level) {
439 case PBL_LVL_2:
440 pg_count = tbl->pbl[PBL_LVL_1].pg_count;
441 for (k = 0; k < pg_count; k++) {
442 ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
443 dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
444 *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
445 }
446 break;
447 case PBL_LVL_1:
448 case PBL_LVL_0:
449 default:
450 ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
451 *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
452 PTU_PTE_VALID);
453 break;
454 }
455 }
456 if (fnz_idx == -1)
457 fnz_idx = 0;
458 /* update pde level as per page table programming */
459 ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
460 ctx->qtbl[fnz_idx].level + 1;
461 }
462
bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx)463 static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
464 struct bnxt_qplib_ctx *ctx)
465 {
466 int rc = 0;
467
468 rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
469 if (rc)
470 goto fail;
471
472 bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
473 fail:
474 return rc;
475 }
476
477 /*
478 * Routine: bnxt_qplib_alloc_ctx
479 * Description:
480 * Context tables are memories which are used by the chip fw.
481 * The 6 tables defined are:
482 * QPC ctx - holds QP states
483 * MRW ctx - holds memory region and window
484 * SRQ ctx - holds shared RQ states
485 * CQ ctx - holds completion queue states
486 * TQM ctx - holds Tx Queue Manager context
487 * TIM ctx - holds timer context
488 * Depending on the size of the tbl requested, either a 1 Page Buffer List
489 * or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
490 * instead.
491 * Table might be employed as follows:
492 * For 0 < ctx size <= 1 PAGE, 0 level of ind is used
493 * For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
494 * For 512 < ctx size <= MAX, 2 levels of ind is used
495 * Returns:
496 * 0 if success, else -ERRORS
497 */
bnxt_qplib_alloc_ctx(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx,bool virt_fn,bool is_p5)498 int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
499 struct bnxt_qplib_ctx *ctx,
500 bool virt_fn, bool is_p5)
501 {
502 struct bnxt_qplib_hwq_attr hwq_attr = {};
503 struct bnxt_qplib_sg_info sginfo = {};
504 int rc = 0;
505
506 if (virt_fn || is_p5)
507 goto stats_alloc;
508
509 /* QPC Tables */
510 sginfo.pgsize = PAGE_SIZE;
511 sginfo.pgshft = PAGE_SHIFT;
512 hwq_attr.sginfo = &sginfo;
513
514 hwq_attr.res = res;
515 hwq_attr.depth = ctx->qpc_count;
516 hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
517 hwq_attr.type = HWQ_TYPE_CTX;
518 rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
519 if (rc)
520 goto fail;
521
522 /* MRW Tables */
523 hwq_attr.depth = ctx->mrw_count;
524 hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
525 rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
526 if (rc)
527 goto fail;
528
529 /* SRQ Tables */
530 hwq_attr.depth = ctx->srqc_count;
531 hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
532 rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
533 if (rc)
534 goto fail;
535
536 /* CQ Tables */
537 hwq_attr.depth = ctx->cq_count;
538 hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
539 rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
540 if (rc)
541 goto fail;
542
543 /* TQM Buffer */
544 rc = bnxt_qplib_setup_tqm_rings(res, ctx);
545 if (rc)
546 goto fail;
547 /* TIM Buffer */
548 ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
549 hwq_attr.depth = ctx->qpc_count * 16;
550 hwq_attr.stride = 1;
551 rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
552 if (rc)
553 goto fail;
554 stats_alloc:
555 /* Stats */
556 rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
557 if (rc)
558 goto fail;
559
560 return 0;
561
562 fail:
563 bnxt_qplib_free_ctx(res, ctx);
564 return rc;
565 }
566
bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl)567 static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
568 struct bnxt_qplib_sgid_tbl *sgid_tbl)
569 {
570 kfree(sgid_tbl->tbl);
571 kfree(sgid_tbl->hw_id);
572 kfree(sgid_tbl->ctx);
573 kfree(sgid_tbl->vlan);
574 sgid_tbl->tbl = NULL;
575 sgid_tbl->hw_id = NULL;
576 sgid_tbl->ctx = NULL;
577 sgid_tbl->vlan = NULL;
578 sgid_tbl->max = 0;
579 sgid_tbl->active = 0;
580 }
581
bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl,u16 max)582 static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
583 struct bnxt_qplib_sgid_tbl *sgid_tbl,
584 u16 max)
585 {
586 sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
587 if (!sgid_tbl->tbl)
588 return -ENOMEM;
589
590 sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
591 if (!sgid_tbl->hw_id)
592 goto out_free1;
593
594 sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
595 if (!sgid_tbl->ctx)
596 goto out_free2;
597
598 sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
599 if (!sgid_tbl->vlan)
600 goto out_free3;
601
602 sgid_tbl->max = max;
603 return 0;
604 out_free3:
605 kfree(sgid_tbl->ctx);
606 sgid_tbl->ctx = NULL;
607 out_free2:
608 kfree(sgid_tbl->hw_id);
609 sgid_tbl->hw_id = NULL;
610 out_free1:
611 kfree(sgid_tbl->tbl);
612 sgid_tbl->tbl = NULL;
613 return -ENOMEM;
614 };
615
bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl)616 static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
617 struct bnxt_qplib_sgid_tbl *sgid_tbl)
618 {
619 int i;
620
621 for (i = 0; i < sgid_tbl->max; i++) {
622 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
623 sizeof(bnxt_qplib_gid_zero)))
624 bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
625 sgid_tbl->tbl[i].vlan_id, true);
626 }
627 memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
628 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
629 memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
630 sgid_tbl->active = 0;
631 }
632
bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl * sgid_tbl,struct net_device * netdev)633 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
634 struct net_device *netdev)
635 {
636 u32 i;
637
638 for (i = 0; i < sgid_tbl->max; i++)
639 sgid_tbl->tbl[i].vlan_id = 0xffff;
640
641 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
642 }
643
644 /* PDs */
bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl * pdt,struct bnxt_qplib_pd * pd)645 int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
646 {
647 u32 bit_num;
648
649 bit_num = find_first_bit(pdt->tbl, pdt->max);
650 if (bit_num == pdt->max)
651 return -ENOMEM;
652
653 /* Found unused PD */
654 clear_bit(bit_num, pdt->tbl);
655 pd->id = bit_num;
656 return 0;
657 }
658
bnxt_qplib_dealloc_pd(struct bnxt_qplib_res * res,struct bnxt_qplib_pd_tbl * pdt,struct bnxt_qplib_pd * pd)659 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
660 struct bnxt_qplib_pd_tbl *pdt,
661 struct bnxt_qplib_pd *pd)
662 {
663 if (test_and_set_bit(pd->id, pdt->tbl)) {
664 dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
665 pd->id);
666 return -EINVAL;
667 }
668 pd->id = 0;
669 return 0;
670 }
671
bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl * pdt)672 static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
673 {
674 kfree(pdt->tbl);
675 pdt->tbl = NULL;
676 pdt->max = 0;
677 }
678
bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pd_tbl * pdt,u32 max)679 static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
680 struct bnxt_qplib_pd_tbl *pdt,
681 u32 max)
682 {
683 u32 bytes;
684
685 bytes = max >> 3;
686 if (!bytes)
687 bytes = 1;
688 pdt->tbl = kmalloc(bytes, GFP_KERNEL);
689 if (!pdt->tbl)
690 return -ENOMEM;
691
692 pdt->max = max;
693 memset((u8 *)pdt->tbl, 0xFF, bytes);
694
695 return 0;
696 }
697
698 /* DPIs */
bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl * dpit,struct bnxt_qplib_dpi * dpi,void * app)699 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
700 struct bnxt_qplib_dpi *dpi,
701 void *app)
702 {
703 u32 bit_num;
704
705 bit_num = find_first_bit(dpit->tbl, dpit->max);
706 if (bit_num == dpit->max)
707 return -ENOMEM;
708
709 /* Found unused DPI */
710 clear_bit(bit_num, dpit->tbl);
711 dpit->app_tbl[bit_num] = app;
712
713 dpi->dpi = bit_num;
714 dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
715 dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
716
717 return 0;
718 }
719
bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res * res,struct bnxt_qplib_dpi_tbl * dpit,struct bnxt_qplib_dpi * dpi)720 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
721 struct bnxt_qplib_dpi_tbl *dpit,
722 struct bnxt_qplib_dpi *dpi)
723 {
724 if (dpi->dpi >= dpit->max) {
725 dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
726 return -EINVAL;
727 }
728 if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
729 dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
730 dpi->dpi);
731 return -EINVAL;
732 }
733 if (dpit->app_tbl)
734 dpit->app_tbl[dpi->dpi] = NULL;
735 memset(dpi, 0, sizeof(*dpi));
736
737 return 0;
738 }
739
bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_dpi_tbl * dpit)740 static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res,
741 struct bnxt_qplib_dpi_tbl *dpit)
742 {
743 kfree(dpit->tbl);
744 kfree(dpit->app_tbl);
745 if (dpit->dbr_bar_reg_iomem)
746 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
747 memset(dpit, 0, sizeof(*dpit));
748 }
749
bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_dpi_tbl * dpit,u32 dbr_offset)750 static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
751 struct bnxt_qplib_dpi_tbl *dpit,
752 u32 dbr_offset)
753 {
754 u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
755 resource_size_t bar_reg_base;
756 u32 dbr_len, bytes;
757
758 if (dpit->dbr_bar_reg_iomem) {
759 dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
760 dbr_bar_reg);
761 return -EALREADY;
762 }
763
764 bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
765 if (!bar_reg_base) {
766 dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
767 dbr_bar_reg);
768 return -ENOMEM;
769 }
770
771 dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
772 if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
773 dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
774 return -ENOMEM;
775 }
776
777 dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
778 dbr_len);
779 if (!dpit->dbr_bar_reg_iomem) {
780 dev_err(&res->pdev->dev,
781 "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
782 return -ENOMEM;
783 }
784
785 dpit->unmapped_dbr = bar_reg_base + dbr_offset;
786 dpit->max = dbr_len / PAGE_SIZE;
787
788 dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
789 if (!dpit->app_tbl)
790 goto unmap_io;
791
792 bytes = dpit->max >> 3;
793 if (!bytes)
794 bytes = 1;
795
796 dpit->tbl = kmalloc(bytes, GFP_KERNEL);
797 if (!dpit->tbl) {
798 kfree(dpit->app_tbl);
799 dpit->app_tbl = NULL;
800 goto unmap_io;
801 }
802
803 memset((u8 *)dpit->tbl, 0xFF, bytes);
804
805 return 0;
806
807 unmap_io:
808 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
809 dpit->dbr_bar_reg_iomem = NULL;
810 return -ENOMEM;
811 }
812
813 /* Stats */
bnxt_qplib_free_stats_ctx(struct pci_dev * pdev,struct bnxt_qplib_stats * stats)814 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
815 struct bnxt_qplib_stats *stats)
816 {
817 if (stats->dma) {
818 dma_free_coherent(&pdev->dev, stats->size,
819 stats->dma, stats->dma_map);
820 }
821 memset(stats, 0, sizeof(*stats));
822 stats->fw_id = -1;
823 }
824
bnxt_qplib_alloc_stats_ctx(struct pci_dev * pdev,struct bnxt_qplib_chip_ctx * cctx,struct bnxt_qplib_stats * stats)825 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
826 struct bnxt_qplib_chip_ctx *cctx,
827 struct bnxt_qplib_stats *stats)
828 {
829 memset(stats, 0, sizeof(*stats));
830 stats->fw_id = -1;
831 stats->size = cctx->hw_stats_size;
832 stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
833 &stats->dma_map, GFP_KERNEL);
834 if (!stats->dma) {
835 dev_err(&pdev->dev, "Stats DMA allocation failed\n");
836 return -ENOMEM;
837 }
838 return 0;
839 }
840
bnxt_qplib_cleanup_res(struct bnxt_qplib_res * res)841 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
842 {
843 bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
844 }
845
bnxt_qplib_init_res(struct bnxt_qplib_res * res)846 int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
847 {
848 bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
849
850 return 0;
851 }
852
bnxt_qplib_free_res(struct bnxt_qplib_res * res)853 void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
854 {
855 bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
856 bnxt_qplib_free_pd_tbl(&res->pd_tbl);
857 bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
858 }
859
bnxt_qplib_alloc_res(struct bnxt_qplib_res * res,struct pci_dev * pdev,struct net_device * netdev,struct bnxt_qplib_dev_attr * dev_attr)860 int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
861 struct net_device *netdev,
862 struct bnxt_qplib_dev_attr *dev_attr)
863 {
864 int rc = 0;
865
866 res->pdev = pdev;
867 res->netdev = netdev;
868
869 rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
870 if (rc)
871 goto fail;
872
873 rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
874 if (rc)
875 goto fail;
876
877 rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
878 if (rc)
879 goto fail;
880
881 return 0;
882 fail:
883 bnxt_qplib_free_res(res);
884 return rc;
885 }
886
bnxt_qplib_determine_atomics(struct pci_dev * dev)887 int bnxt_qplib_determine_atomics(struct pci_dev *dev)
888 {
889 int comp;
890 u16 ctl2;
891
892 comp = pci_enable_atomic_ops_to_root(dev,
893 PCI_EXP_DEVCAP2_ATOMIC_COMP32);
894 if (comp)
895 return -EOPNOTSUPP;
896 comp = pci_enable_atomic_ops_to_root(dev,
897 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
898 if (comp)
899 return -EOPNOTSUPP;
900 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctl2);
901 return !(ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
902 }
903