• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: QPLib resource manager
37  */
38 
39 #define dev_fmt(fmt) "QPLIB: " fmt
40 
41 #include <linux/spinlock.h>
42 #include <linux/pci.h>
43 #include <linux/interrupt.h>
44 #include <linux/inetdevice.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/if_vlan.h>
47 #include <linux/vmalloc.h>
48 #include <rdma/ib_verbs.h>
49 #include <rdma/ib_umem.h>
50 
51 #include "roce_hsi.h"
52 #include "qplib_res.h"
53 #include "qplib_sp.h"
54 #include "qplib_rcfw.h"
55 
56 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
57 				      struct bnxt_qplib_stats *stats);
58 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
59 				      struct bnxt_qplib_chip_ctx *cctx,
60 				      struct bnxt_qplib_stats *stats);
61 
62 /* PBL */
__free_pbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pbl * pbl,bool is_umem)63 static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
64 		       bool is_umem)
65 {
66 	struct pci_dev *pdev = res->pdev;
67 	int i;
68 
69 	if (!is_umem) {
70 		for (i = 0; i < pbl->pg_count; i++) {
71 			if (pbl->pg_arr[i])
72 				dma_free_coherent(&pdev->dev, pbl->pg_size,
73 						  (void *)((unsigned long)
74 						   pbl->pg_arr[i] &
75 						  PAGE_MASK),
76 						  pbl->pg_map_arr[i]);
77 			else
78 				dev_warn(&pdev->dev,
79 					 "PBL free pg_arr[%d] empty?!\n", i);
80 			pbl->pg_arr[i] = NULL;
81 		}
82 	}
83 	vfree(pbl->pg_arr);
84 	pbl->pg_arr = NULL;
85 	vfree(pbl->pg_map_arr);
86 	pbl->pg_map_arr = NULL;
87 	pbl->pg_count = 0;
88 	pbl->pg_size = 0;
89 }
90 
bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl * pbl,struct bnxt_qplib_sg_info * sginfo)91 static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
92 					   struct bnxt_qplib_sg_info *sginfo)
93 {
94 	struct ib_block_iter biter;
95 	int i = 0;
96 
97 	rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
98 		pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
99 		pbl->pg_arr[i] = NULL;
100 		pbl->pg_count++;
101 		i++;
102 	}
103 }
104 
__alloc_pbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pbl * pbl,struct bnxt_qplib_sg_info * sginfo)105 static int __alloc_pbl(struct bnxt_qplib_res *res,
106 		       struct bnxt_qplib_pbl *pbl,
107 		       struct bnxt_qplib_sg_info *sginfo)
108 {
109 	struct pci_dev *pdev = res->pdev;
110 	bool is_umem = false;
111 	u32 pages;
112 	int i;
113 
114 	if (sginfo->nopte)
115 		return 0;
116 	if (sginfo->umem)
117 		pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
118 	else
119 		pages = sginfo->npages;
120 	/* page ptr arrays */
121 	pbl->pg_arr = vmalloc(pages * sizeof(void *));
122 	if (!pbl->pg_arr)
123 		return -ENOMEM;
124 
125 	pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
126 	if (!pbl->pg_map_arr) {
127 		vfree(pbl->pg_arr);
128 		pbl->pg_arr = NULL;
129 		return -ENOMEM;
130 	}
131 	pbl->pg_count = 0;
132 	pbl->pg_size = sginfo->pgsize;
133 
134 	if (!sginfo->umem) {
135 		for (i = 0; i < pages; i++) {
136 			pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
137 							    pbl->pg_size,
138 							    &pbl->pg_map_arr[i],
139 							    GFP_KERNEL);
140 			if (!pbl->pg_arr[i])
141 				goto fail;
142 			pbl->pg_count++;
143 		}
144 	} else {
145 		is_umem = true;
146 		bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
147 	}
148 
149 	return 0;
150 fail:
151 	__free_pbl(res, pbl, is_umem);
152 	return -ENOMEM;
153 }
154 
155 /* HWQ */
bnxt_qplib_free_hwq(struct bnxt_qplib_res * res,struct bnxt_qplib_hwq * hwq)156 void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
157 			 struct bnxt_qplib_hwq *hwq)
158 {
159 	int i;
160 
161 	if (!hwq->max_elements)
162 		return;
163 	if (hwq->level >= PBL_LVL_MAX)
164 		return;
165 
166 	for (i = 0; i < hwq->level + 1; i++) {
167 		if (i == hwq->level)
168 			__free_pbl(res, &hwq->pbl[i], hwq->is_user);
169 		else
170 			__free_pbl(res, &hwq->pbl[i], false);
171 	}
172 
173 	hwq->level = PBL_LVL_MAX;
174 	hwq->max_elements = 0;
175 	hwq->element_size = 0;
176 	hwq->prod = 0;
177 	hwq->cons = 0;
178 	hwq->cp_bit = 0;
179 }
180 
181 /* All HWQs are power of 2 in size */
182 
bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq * hwq,struct bnxt_qplib_hwq_attr * hwq_attr)183 int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
184 			      struct bnxt_qplib_hwq_attr *hwq_attr)
185 {
186 	u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
187 	struct bnxt_qplib_sg_info sginfo = {};
188 	u32 depth, stride, npbl, npde;
189 	dma_addr_t *src_phys_ptr, **dst_virt_ptr;
190 	struct bnxt_qplib_res *res;
191 	struct pci_dev *pdev;
192 	int i, rc, lvl;
193 
194 	res = hwq_attr->res;
195 	pdev = res->pdev;
196 	pg_size = hwq_attr->sginfo->pgsize;
197 	hwq->level = PBL_LVL_MAX;
198 
199 	depth = roundup_pow_of_two(hwq_attr->depth);
200 	stride = roundup_pow_of_two(hwq_attr->stride);
201 	if (hwq_attr->aux_depth) {
202 		aux_slots = hwq_attr->aux_depth;
203 		aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
204 		aux_pages = (aux_slots * aux_size) / pg_size;
205 		if ((aux_slots * aux_size) % pg_size)
206 			aux_pages++;
207 	}
208 
209 	if (!hwq_attr->sginfo->umem) {
210 		hwq->is_user = false;
211 		npages = (depth * stride) / pg_size + aux_pages;
212 		if ((depth * stride) % pg_size)
213 			npages++;
214 		if (!npages)
215 			return -EINVAL;
216 		hwq_attr->sginfo->npages = npages;
217 	} else {
218 		npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem,
219 						hwq_attr->sginfo->pgsize);
220 		hwq->is_user = true;
221 	}
222 
223 	if (npages == MAX_PBL_LVL_0_PGS) {
224 		/* This request is Level 0, map PTE */
225 		rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
226 		if (rc)
227 			goto fail;
228 		hwq->level = PBL_LVL_0;
229 	}
230 
231 	if (npages > MAX_PBL_LVL_0_PGS) {
232 		if (npages > MAX_PBL_LVL_1_PGS) {
233 			u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
234 				    0 : PTU_PTE_VALID;
235 			/* 2 levels of indirection */
236 			npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
237 			if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
238 				npbl++;
239 			npde = npbl >> MAX_PDL_LVL_SHIFT;
240 			if (npbl % BIT(MAX_PDL_LVL_SHIFT))
241 				npde++;
242 			/* Alloc PDE pages */
243 			sginfo.pgsize = npde * pg_size;
244 			sginfo.npages = 1;
245 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
246 
247 			/* Alloc PBL pages */
248 			sginfo.npages = npbl;
249 			sginfo.pgsize = PAGE_SIZE;
250 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
251 			if (rc)
252 				goto fail;
253 			/* Fill PDL with PBL page pointers */
254 			dst_virt_ptr =
255 				(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
256 			src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
257 			if (hwq_attr->type == HWQ_TYPE_MR) {
258 			/* For MR it is expected that we supply only 1 contigous
259 			 * page i.e only 1 entry in the PDL that will contain
260 			 * all the PBLs for the user supplied memory region
261 			 */
262 				for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
263 				     i++)
264 					dst_virt_ptr[0][i] = src_phys_ptr[i] |
265 						flag;
266 			} else {
267 				for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
268 				     i++)
269 					dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
270 						src_phys_ptr[i] |
271 						PTU_PDE_VALID;
272 			}
273 			/* Alloc or init PTEs */
274 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
275 					 hwq_attr->sginfo);
276 			if (rc)
277 				goto fail;
278 			hwq->level = PBL_LVL_2;
279 			if (hwq_attr->sginfo->nopte)
280 				goto done;
281 			/* Fill PBLs with PTE pointers */
282 			dst_virt_ptr =
283 				(dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
284 			src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
285 			for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
286 				dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
287 					src_phys_ptr[i] | PTU_PTE_VALID;
288 			}
289 			if (hwq_attr->type == HWQ_TYPE_QUEUE) {
290 				/* Find the last pg of the size */
291 				i = hwq->pbl[PBL_LVL_2].pg_count;
292 				dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
293 								  PTU_PTE_LAST;
294 				if (i > 1)
295 					dst_virt_ptr[PTR_PG(i - 2)]
296 						    [PTR_IDX(i - 2)] |=
297 						    PTU_PTE_NEXT_TO_LAST;
298 			}
299 		} else { /* pages < 512 npbl = 1, npde = 0 */
300 			u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
301 				    0 : PTU_PTE_VALID;
302 
303 			/* 1 level of indirection */
304 			npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
305 			if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
306 				npbl++;
307 			sginfo.npages = npbl;
308 			sginfo.pgsize = PAGE_SIZE;
309 			/* Alloc PBL page */
310 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
311 			if (rc)
312 				goto fail;
313 			/* Alloc or init  PTEs */
314 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
315 					 hwq_attr->sginfo);
316 			if (rc)
317 				goto fail;
318 			hwq->level = PBL_LVL_1;
319 			if (hwq_attr->sginfo->nopte)
320 				goto done;
321 			/* Fill PBL with PTE pointers */
322 			dst_virt_ptr =
323 				(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
324 			src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
325 			for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
326 				dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
327 					src_phys_ptr[i] | flag;
328 			if (hwq_attr->type == HWQ_TYPE_QUEUE) {
329 				/* Find the last pg of the size */
330 				i = hwq->pbl[PBL_LVL_1].pg_count;
331 				dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
332 								  PTU_PTE_LAST;
333 				if (i > 1)
334 					dst_virt_ptr[PTR_PG(i - 2)]
335 						    [PTR_IDX(i - 2)] |=
336 						    PTU_PTE_NEXT_TO_LAST;
337 			}
338 		}
339 	}
340 done:
341 	hwq->prod = 0;
342 	hwq->cons = 0;
343 	hwq->pdev = pdev;
344 	hwq->depth = hwq_attr->depth;
345 	hwq->max_elements = depth;
346 	hwq->element_size = stride;
347 	hwq->qe_ppg = pg_size / stride;
348 	/* For direct access to the elements */
349 	lvl = hwq->level;
350 	if (hwq_attr->sginfo->nopte && hwq->level)
351 		lvl = hwq->level - 1;
352 	hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
353 	hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
354 	spin_lock_init(&hwq->lock);
355 
356 	return 0;
357 fail:
358 	bnxt_qplib_free_hwq(res, hwq);
359 	return -ENOMEM;
360 }
361 
362 /* Context Tables */
bnxt_qplib_free_ctx(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx)363 void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
364 			 struct bnxt_qplib_ctx *ctx)
365 {
366 	int i;
367 
368 	bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
369 	bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
370 	bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
371 	bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
372 	bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
373 	for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
374 		bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
375 	/* restore original pde level before destroy */
376 	ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
377 	bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
378 	bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
379 }
380 
bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx)381 static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
382 				      struct bnxt_qplib_ctx *ctx)
383 {
384 	struct bnxt_qplib_hwq_attr hwq_attr = {};
385 	struct bnxt_qplib_sg_info sginfo = {};
386 	struct bnxt_qplib_tqm_ctx *tqmctx;
387 	int rc = 0;
388 	int i;
389 
390 	tqmctx = &ctx->tqm_ctx;
391 
392 	sginfo.pgsize = PAGE_SIZE;
393 	sginfo.pgshft = PAGE_SHIFT;
394 	hwq_attr.sginfo = &sginfo;
395 	hwq_attr.res = res;
396 	hwq_attr.type = HWQ_TYPE_CTX;
397 	hwq_attr.depth = 512;
398 	hwq_attr.stride = sizeof(u64);
399 	/* Alloc pdl buffer */
400 	rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
401 	if (rc)
402 		goto out;
403 	/* Save original pdl level */
404 	tqmctx->pde_level = tqmctx->pde.level;
405 
406 	hwq_attr.stride = 1;
407 	for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
408 		if (!tqmctx->qcount[i])
409 			continue;
410 		hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
411 		rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
412 		if (rc)
413 			goto out;
414 	}
415 out:
416 	return rc;
417 }
418 
bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx * ctx)419 static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
420 {
421 	struct bnxt_qplib_hwq *tbl;
422 	dma_addr_t *dma_ptr;
423 	__le64 **pbl_ptr, *ptr;
424 	int i, j, k;
425 	int fnz_idx = -1;
426 	int pg_count;
427 
428 	pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
429 
430 	for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
431 	     i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
432 		tbl = &ctx->qtbl[i];
433 		if (!tbl->max_elements)
434 			continue;
435 		if (fnz_idx == -1)
436 			fnz_idx = i; /* first non-zero index */
437 		switch (tbl->level) {
438 		case PBL_LVL_2:
439 			pg_count = tbl->pbl[PBL_LVL_1].pg_count;
440 			for (k = 0; k < pg_count; k++) {
441 				ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
442 				dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
443 				*ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
444 			}
445 			break;
446 		case PBL_LVL_1:
447 		case PBL_LVL_0:
448 		default:
449 			ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
450 			*ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
451 					   PTU_PTE_VALID);
452 			break;
453 		}
454 	}
455 	if (fnz_idx == -1)
456 		fnz_idx = 0;
457 	/* update pde level as per page table programming */
458 	ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
459 			  ctx->qtbl[fnz_idx].level + 1;
460 }
461 
bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx)462 static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
463 				      struct bnxt_qplib_ctx *ctx)
464 {
465 	int rc = 0;
466 
467 	rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
468 	if (rc)
469 		goto fail;
470 
471 	bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
472 fail:
473 	return rc;
474 }
475 
476 /*
477  * Routine: bnxt_qplib_alloc_ctx
478  * Description:
479  *     Context tables are memories which are used by the chip fw.
480  *     The 6 tables defined are:
481  *             QPC ctx - holds QP states
482  *             MRW ctx - holds memory region and window
483  *             SRQ ctx - holds shared RQ states
484  *             CQ ctx - holds completion queue states
485  *             TQM ctx - holds Tx Queue Manager context
486  *             TIM ctx - holds timer context
487  *     Depending on the size of the tbl requested, either a 1 Page Buffer List
488  *     or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
489  *     instead.
490  *     Table might be employed as follows:
491  *             For 0      < ctx size <= 1 PAGE, 0 level of ind is used
492  *             For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
493  *             For 512    < ctx size <= MAX, 2 levels of ind is used
494  * Returns:
495  *     0 if success, else -ERRORS
496  */
bnxt_qplib_alloc_ctx(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx,bool virt_fn,bool is_p5)497 int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
498 			 struct bnxt_qplib_ctx *ctx,
499 			 bool virt_fn, bool is_p5)
500 {
501 	struct bnxt_qplib_hwq_attr hwq_attr = {};
502 	struct bnxt_qplib_sg_info sginfo = {};
503 	int rc = 0;
504 
505 	if (virt_fn || is_p5)
506 		goto stats_alloc;
507 
508 	/* QPC Tables */
509 	sginfo.pgsize = PAGE_SIZE;
510 	sginfo.pgshft = PAGE_SHIFT;
511 	hwq_attr.sginfo = &sginfo;
512 
513 	hwq_attr.res = res;
514 	hwq_attr.depth = ctx->qpc_count;
515 	hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
516 	hwq_attr.type = HWQ_TYPE_CTX;
517 	rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
518 	if (rc)
519 		goto fail;
520 
521 	/* MRW Tables */
522 	hwq_attr.depth = ctx->mrw_count;
523 	hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
524 	rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
525 	if (rc)
526 		goto fail;
527 
528 	/* SRQ Tables */
529 	hwq_attr.depth = ctx->srqc_count;
530 	hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
531 	rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
532 	if (rc)
533 		goto fail;
534 
535 	/* CQ Tables */
536 	hwq_attr.depth = ctx->cq_count;
537 	hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
538 	rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
539 	if (rc)
540 		goto fail;
541 
542 	/* TQM Buffer */
543 	rc = bnxt_qplib_setup_tqm_rings(res, ctx);
544 	if (rc)
545 		goto fail;
546 	/* TIM Buffer */
547 	ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
548 	hwq_attr.depth = ctx->qpc_count * 16;
549 	hwq_attr.stride = 1;
550 	rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
551 	if (rc)
552 		goto fail;
553 stats_alloc:
554 	/* Stats */
555 	rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
556 	if (rc)
557 		goto fail;
558 
559 	return 0;
560 
561 fail:
562 	bnxt_qplib_free_ctx(res, ctx);
563 	return rc;
564 }
565 
566 /* GUID */
bnxt_qplib_get_guid(u8 * dev_addr,u8 * guid)567 void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
568 {
569 	u8 mac[ETH_ALEN];
570 
571 	/* MAC-48 to EUI-64 mapping */
572 	memcpy(mac, dev_addr, ETH_ALEN);
573 	guid[0] = mac[0] ^ 2;
574 	guid[1] = mac[1];
575 	guid[2] = mac[2];
576 	guid[3] = 0xff;
577 	guid[4] = 0xfe;
578 	guid[5] = mac[3];
579 	guid[6] = mac[4];
580 	guid[7] = mac[5];
581 }
582 
bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl)583 static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
584 				     struct bnxt_qplib_sgid_tbl *sgid_tbl)
585 {
586 	kfree(sgid_tbl->tbl);
587 	kfree(sgid_tbl->hw_id);
588 	kfree(sgid_tbl->ctx);
589 	kfree(sgid_tbl->vlan);
590 	sgid_tbl->tbl = NULL;
591 	sgid_tbl->hw_id = NULL;
592 	sgid_tbl->ctx = NULL;
593 	sgid_tbl->vlan = NULL;
594 	sgid_tbl->max = 0;
595 	sgid_tbl->active = 0;
596 }
597 
bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl,u16 max)598 static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
599 				     struct bnxt_qplib_sgid_tbl *sgid_tbl,
600 				     u16 max)
601 {
602 	sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
603 	if (!sgid_tbl->tbl)
604 		return -ENOMEM;
605 
606 	sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
607 	if (!sgid_tbl->hw_id)
608 		goto out_free1;
609 
610 	sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
611 	if (!sgid_tbl->ctx)
612 		goto out_free2;
613 
614 	sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
615 	if (!sgid_tbl->vlan)
616 		goto out_free3;
617 
618 	sgid_tbl->max = max;
619 	return 0;
620 out_free3:
621 	kfree(sgid_tbl->ctx);
622 	sgid_tbl->ctx = NULL;
623 out_free2:
624 	kfree(sgid_tbl->hw_id);
625 	sgid_tbl->hw_id = NULL;
626 out_free1:
627 	kfree(sgid_tbl->tbl);
628 	sgid_tbl->tbl = NULL;
629 	return -ENOMEM;
630 };
631 
bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl)632 static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
633 					struct bnxt_qplib_sgid_tbl *sgid_tbl)
634 {
635 	int i;
636 
637 	for (i = 0; i < sgid_tbl->max; i++) {
638 		if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
639 			   sizeof(bnxt_qplib_gid_zero)))
640 			bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
641 					    sgid_tbl->tbl[i].vlan_id, true);
642 	}
643 	memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
644 	memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
645 	memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
646 	sgid_tbl->active = 0;
647 }
648 
bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl * sgid_tbl,struct net_device * netdev)649 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
650 				     struct net_device *netdev)
651 {
652 	u32 i;
653 
654 	for (i = 0; i < sgid_tbl->max; i++)
655 		sgid_tbl->tbl[i].vlan_id = 0xffff;
656 
657 	memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
658 }
659 
bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pkey_tbl * pkey_tbl)660 static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
661 				     struct bnxt_qplib_pkey_tbl *pkey_tbl)
662 {
663 	if (!pkey_tbl->tbl)
664 		dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
665 	else
666 		kfree(pkey_tbl->tbl);
667 
668 	pkey_tbl->tbl = NULL;
669 	pkey_tbl->max = 0;
670 	pkey_tbl->active = 0;
671 }
672 
bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pkey_tbl * pkey_tbl,u16 max)673 static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
674 				     struct bnxt_qplib_pkey_tbl *pkey_tbl,
675 				     u16 max)
676 {
677 	pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
678 	if (!pkey_tbl->tbl)
679 		return -ENOMEM;
680 
681 	pkey_tbl->max = max;
682 	return 0;
683 };
684 
685 /* PDs */
bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl * pdt,struct bnxt_qplib_pd * pd)686 int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
687 {
688 	u32 bit_num;
689 
690 	bit_num = find_first_bit(pdt->tbl, pdt->max);
691 	if (bit_num == pdt->max)
692 		return -ENOMEM;
693 
694 	/* Found unused PD */
695 	clear_bit(bit_num, pdt->tbl);
696 	pd->id = bit_num;
697 	return 0;
698 }
699 
bnxt_qplib_dealloc_pd(struct bnxt_qplib_res * res,struct bnxt_qplib_pd_tbl * pdt,struct bnxt_qplib_pd * pd)700 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
701 			  struct bnxt_qplib_pd_tbl *pdt,
702 			  struct bnxt_qplib_pd *pd)
703 {
704 	if (test_and_set_bit(pd->id, pdt->tbl)) {
705 		dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
706 			 pd->id);
707 		return -EINVAL;
708 	}
709 	pd->id = 0;
710 	return 0;
711 }
712 
bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl * pdt)713 static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
714 {
715 	kfree(pdt->tbl);
716 	pdt->tbl = NULL;
717 	pdt->max = 0;
718 }
719 
bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pd_tbl * pdt,u32 max)720 static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
721 				   struct bnxt_qplib_pd_tbl *pdt,
722 				   u32 max)
723 {
724 	u32 bytes;
725 
726 	bytes = max >> 3;
727 	if (!bytes)
728 		bytes = 1;
729 	pdt->tbl = kmalloc(bytes, GFP_KERNEL);
730 	if (!pdt->tbl)
731 		return -ENOMEM;
732 
733 	pdt->max = max;
734 	memset((u8 *)pdt->tbl, 0xFF, bytes);
735 
736 	return 0;
737 }
738 
739 /* DPIs */
bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl * dpit,struct bnxt_qplib_dpi * dpi,void * app)740 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
741 			 struct bnxt_qplib_dpi     *dpi,
742 			 void                      *app)
743 {
744 	u32 bit_num;
745 
746 	bit_num = find_first_bit(dpit->tbl, dpit->max);
747 	if (bit_num == dpit->max)
748 		return -ENOMEM;
749 
750 	/* Found unused DPI */
751 	clear_bit(bit_num, dpit->tbl);
752 	dpit->app_tbl[bit_num] = app;
753 
754 	dpi->dpi = bit_num;
755 	dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
756 	dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
757 
758 	return 0;
759 }
760 
bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res * res,struct bnxt_qplib_dpi_tbl * dpit,struct bnxt_qplib_dpi * dpi)761 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
762 			   struct bnxt_qplib_dpi_tbl *dpit,
763 			   struct bnxt_qplib_dpi     *dpi)
764 {
765 	if (dpi->dpi >= dpit->max) {
766 		dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
767 		return -EINVAL;
768 	}
769 	if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
770 		dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
771 			 dpi->dpi);
772 		return -EINVAL;
773 	}
774 	if (dpit->app_tbl)
775 		dpit->app_tbl[dpi->dpi] = NULL;
776 	memset(dpi, 0, sizeof(*dpi));
777 
778 	return 0;
779 }
780 
bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_dpi_tbl * dpit)781 static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res     *res,
782 				    struct bnxt_qplib_dpi_tbl *dpit)
783 {
784 	kfree(dpit->tbl);
785 	kfree(dpit->app_tbl);
786 	if (dpit->dbr_bar_reg_iomem)
787 		pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
788 	memset(dpit, 0, sizeof(*dpit));
789 }
790 
bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_dpi_tbl * dpit,u32 dbr_offset)791 static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res     *res,
792 				    struct bnxt_qplib_dpi_tbl *dpit,
793 				    u32                       dbr_offset)
794 {
795 	u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
796 	resource_size_t bar_reg_base;
797 	u32 dbr_len, bytes;
798 
799 	if (dpit->dbr_bar_reg_iomem) {
800 		dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
801 			dbr_bar_reg);
802 		return -EALREADY;
803 	}
804 
805 	bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
806 	if (!bar_reg_base) {
807 		dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
808 			dbr_bar_reg);
809 		return -ENOMEM;
810 	}
811 
812 	dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
813 	if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
814 		dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
815 		return -ENOMEM;
816 	}
817 
818 	dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
819 						  dbr_len);
820 	if (!dpit->dbr_bar_reg_iomem) {
821 		dev_err(&res->pdev->dev,
822 			"FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
823 		return -ENOMEM;
824 	}
825 
826 	dpit->unmapped_dbr = bar_reg_base + dbr_offset;
827 	dpit->max = dbr_len / PAGE_SIZE;
828 
829 	dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
830 	if (!dpit->app_tbl)
831 		goto unmap_io;
832 
833 	bytes = dpit->max >> 3;
834 	if (!bytes)
835 		bytes = 1;
836 
837 	dpit->tbl = kmalloc(bytes, GFP_KERNEL);
838 	if (!dpit->tbl) {
839 		kfree(dpit->app_tbl);
840 		dpit->app_tbl = NULL;
841 		goto unmap_io;
842 	}
843 
844 	memset((u8 *)dpit->tbl, 0xFF, bytes);
845 
846 	return 0;
847 
848 unmap_io:
849 	pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
850 	dpit->dbr_bar_reg_iomem = NULL;
851 	return -ENOMEM;
852 }
853 
854 /* PKEYs */
bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl * pkey_tbl)855 static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
856 {
857 	memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
858 	pkey_tbl->active = 0;
859 }
860 
bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pkey_tbl * pkey_tbl)861 static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
862 				     struct bnxt_qplib_pkey_tbl *pkey_tbl)
863 {
864 	u16 pkey = 0xFFFF;
865 
866 	memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
867 
868 	/* pkey default = 0xFFFF */
869 	bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
870 }
871 
872 /* Stats */
bnxt_qplib_free_stats_ctx(struct pci_dev * pdev,struct bnxt_qplib_stats * stats)873 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
874 				      struct bnxt_qplib_stats *stats)
875 {
876 	if (stats->dma) {
877 		dma_free_coherent(&pdev->dev, stats->size,
878 				  stats->dma, stats->dma_map);
879 	}
880 	memset(stats, 0, sizeof(*stats));
881 	stats->fw_id = -1;
882 }
883 
bnxt_qplib_alloc_stats_ctx(struct pci_dev * pdev,struct bnxt_qplib_chip_ctx * cctx,struct bnxt_qplib_stats * stats)884 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
885 				      struct bnxt_qplib_chip_ctx *cctx,
886 				      struct bnxt_qplib_stats *stats)
887 {
888 	memset(stats, 0, sizeof(*stats));
889 	stats->fw_id = -1;
890 	stats->size = cctx->hw_stats_size;
891 	stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
892 					&stats->dma_map, GFP_KERNEL);
893 	if (!stats->dma) {
894 		dev_err(&pdev->dev, "Stats DMA allocation failed\n");
895 		return -ENOMEM;
896 	}
897 	return 0;
898 }
899 
bnxt_qplib_cleanup_res(struct bnxt_qplib_res * res)900 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
901 {
902 	bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
903 	bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
904 }
905 
bnxt_qplib_init_res(struct bnxt_qplib_res * res)906 int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
907 {
908 	bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
909 	bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
910 
911 	return 0;
912 }
913 
bnxt_qplib_free_res(struct bnxt_qplib_res * res)914 void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
915 {
916 	bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
917 	bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
918 	bnxt_qplib_free_pd_tbl(&res->pd_tbl);
919 	bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
920 }
921 
bnxt_qplib_alloc_res(struct bnxt_qplib_res * res,struct pci_dev * pdev,struct net_device * netdev,struct bnxt_qplib_dev_attr * dev_attr)922 int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
923 			 struct net_device *netdev,
924 			 struct bnxt_qplib_dev_attr *dev_attr)
925 {
926 	int rc = 0;
927 
928 	res->pdev = pdev;
929 	res->netdev = netdev;
930 
931 	rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
932 	if (rc)
933 		goto fail;
934 
935 	rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
936 	if (rc)
937 		goto fail;
938 
939 	rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
940 	if (rc)
941 		goto fail;
942 
943 	rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
944 	if (rc)
945 		goto fail;
946 
947 	return 0;
948 fail:
949 	bnxt_qplib_free_res(res);
950 	return rc;
951 }
952 
bnxt_qplib_determine_atomics(struct pci_dev * dev)953 int bnxt_qplib_determine_atomics(struct pci_dev *dev)
954 {
955 	int comp;
956 	u16 ctl2;
957 
958 	comp = pci_enable_atomic_ops_to_root(dev,
959 					     PCI_EXP_DEVCAP2_ATOMIC_COMP32);
960 	if (comp)
961 		return -EOPNOTSUPP;
962 	comp = pci_enable_atomic_ops_to_root(dev,
963 					     PCI_EXP_DEVCAP2_ATOMIC_COMP64);
964 	if (comp)
965 		return -EOPNOTSUPP;
966 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctl2);
967 	return !(ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
968 }
969