1 /*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: Slow Path Operators
37 */
38
39 #define dev_fmt(fmt) "QPLIB: " fmt
40
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/pci.h>
45
46 #include "roce_hsi.h"
47
48 #include "qplib_res.h"
49 #include "qplib_rcfw.h"
50 #include "qplib_sp.h"
51
52 const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 0, 0, 0, 0, 0, 0 } };
54
55 /* Device */
56
bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw * rcfw)57 static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
58 {
59 u16 pcie_ctl2 = 0;
60
61 if (!bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
62 return false;
63
64 pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, &pcie_ctl2);
65 return (pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
66 }
67
bnxt_qplib_query_version(struct bnxt_qplib_rcfw * rcfw,char * fw_ver)68 static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
69 char *fw_ver)
70 {
71 struct cmdq_query_version req;
72 struct creq_query_version_resp resp;
73 u16 cmd_flags = 0;
74 int rc = 0;
75
76 RCFW_CMD_PREP(req, QUERY_VERSION, cmd_flags);
77
78 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
79 (void *)&resp, NULL, 0);
80 if (rc)
81 return;
82 fw_ver[0] = resp.fw_maj;
83 fw_ver[1] = resp.fw_minor;
84 fw_ver[2] = resp.fw_bld;
85 fw_ver[3] = resp.fw_rsvd;
86 }
87
bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw * rcfw,struct bnxt_qplib_dev_attr * attr,bool vf)88 int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
89 struct bnxt_qplib_dev_attr *attr, bool vf)
90 {
91 struct cmdq_query_func req;
92 struct creq_query_func_resp resp;
93 struct bnxt_qplib_rcfw_sbuf *sbuf;
94 struct creq_query_func_resp_sb *sb;
95 u16 cmd_flags = 0;
96 u32 temp;
97 u8 *tqm_alloc;
98 int i, rc = 0;
99
100 RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags);
101
102 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
103 if (!sbuf) {
104 dev_err(&rcfw->pdev->dev,
105 "SP: QUERY_FUNC alloc side buffer failed\n");
106 return -ENOMEM;
107 }
108
109 sb = sbuf->sb;
110 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
111 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
112 (void *)sbuf, 0);
113 if (rc)
114 goto bail;
115
116 /* Extract the context from the side buffer */
117 attr->max_qp = le32_to_cpu(sb->max_qp);
118 /* max_qp value reported by FW for PF doesn't include the QP1 for PF */
119 if (!vf)
120 attr->max_qp += 1;
121 attr->max_qp_rd_atom =
122 sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
123 BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
124 attr->max_qp_init_rd_atom =
125 sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
126 BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
127 attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
128 /*
129 * 128 WQEs needs to be reserved for the HW (8916). Prevent
130 * reporting the max number
131 */
132 attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
133 attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx) ?
134 6 : sb->max_sge;
135 attr->max_cq = le32_to_cpu(sb->max_cq);
136 attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
137 attr->max_cq_sges = attr->max_qp_sges;
138 attr->max_mr = le32_to_cpu(sb->max_mr);
139 attr->max_mw = le32_to_cpu(sb->max_mw);
140
141 attr->max_mr_size = le64_to_cpu(sb->max_mr_size);
142 attr->max_pd = 64 * 1024;
143 attr->max_raw_ethy_qp = le32_to_cpu(sb->max_raw_eth_qp);
144 attr->max_ah = le32_to_cpu(sb->max_ah);
145
146 attr->max_srq = le16_to_cpu(sb->max_srq);
147 attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
148 attr->max_srq_sges = sb->max_srq_sge;
149 attr->max_pkey = 1;
150 attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
151 attr->l2_db_size = (sb->l2_db_space_size + 1) *
152 (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
153 attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED;
154 attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
155
156 bnxt_qplib_query_version(rcfw, attr->fw_ver);
157
158 for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
159 temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
160 tqm_alloc = (u8 *)&temp;
161 attr->tqm_alloc_reqs[i * 4] = *tqm_alloc;
162 attr->tqm_alloc_reqs[i * 4 + 1] = *(++tqm_alloc);
163 attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc);
164 attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
165 }
166
167 attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
168 bail:
169 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
170 return rc;
171 }
172
bnxt_qplib_set_func_resources(struct bnxt_qplib_res * res,struct bnxt_qplib_rcfw * rcfw,struct bnxt_qplib_ctx * ctx)173 int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
174 struct bnxt_qplib_rcfw *rcfw,
175 struct bnxt_qplib_ctx *ctx)
176 {
177 struct cmdq_set_func_resources req;
178 struct creq_set_func_resources_resp resp;
179 u16 cmd_flags = 0;
180 int rc = 0;
181
182 RCFW_CMD_PREP(req, SET_FUNC_RESOURCES, cmd_flags);
183
184 req.number_of_qp = cpu_to_le32(ctx->qpc_count);
185 req.number_of_mrw = cpu_to_le32(ctx->mrw_count);
186 req.number_of_srq = cpu_to_le32(ctx->srqc_count);
187 req.number_of_cq = cpu_to_le32(ctx->cq_count);
188
189 req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
190 req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
191 req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
192 req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
193 req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
194
195 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
196 (void *)&resp,
197 NULL, 0);
198 if (rc) {
199 dev_err(&res->pdev->dev, "Failed to set function resources\n");
200 }
201 return rc;
202 }
203
204 /* SGID */
bnxt_qplib_get_sgid(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl,int index,struct bnxt_qplib_gid * gid)205 int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
206 struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
207 struct bnxt_qplib_gid *gid)
208 {
209 if (index >= sgid_tbl->max) {
210 dev_err(&res->pdev->dev,
211 "Index %d exceeded SGID table max (%d)\n",
212 index, sgid_tbl->max);
213 return -EINVAL;
214 }
215 memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid));
216 return 0;
217 }
218
bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl * sgid_tbl,struct bnxt_qplib_gid * gid,u16 vlan_id,bool update)219 int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
220 struct bnxt_qplib_gid *gid, u16 vlan_id, bool update)
221 {
222 struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
223 struct bnxt_qplib_res,
224 sgid_tbl);
225 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
226 int index;
227
228 if (!sgid_tbl) {
229 dev_err(&res->pdev->dev, "SGID table not allocated\n");
230 return -EINVAL;
231 }
232 /* Do we need a sgid_lock here? */
233 if (!sgid_tbl->active) {
234 dev_err(&res->pdev->dev, "SGID table has no active entries\n");
235 return -ENOMEM;
236 }
237 for (index = 0; index < sgid_tbl->max; index++) {
238 if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) &&
239 vlan_id == sgid_tbl->tbl[index].vlan_id)
240 break;
241 }
242 if (index == sgid_tbl->max) {
243 dev_warn(&res->pdev->dev, "GID not found in the SGID table\n");
244 return 0;
245 }
246 /* Remove GID from the SGID table */
247 if (update) {
248 struct cmdq_delete_gid req;
249 struct creq_delete_gid_resp resp;
250 u16 cmd_flags = 0;
251 int rc;
252
253 RCFW_CMD_PREP(req, DELETE_GID, cmd_flags);
254 if (sgid_tbl->hw_id[index] == 0xFFFF) {
255 dev_err(&res->pdev->dev,
256 "GID entry contains an invalid HW id\n");
257 return -EINVAL;
258 }
259 req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
260 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
261 (void *)&resp, NULL, 0);
262 if (rc)
263 return rc;
264 }
265 memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero,
266 sizeof(bnxt_qplib_gid_zero));
267 sgid_tbl->tbl[index].vlan_id = 0xFFFF;
268 sgid_tbl->vlan[index] = 0;
269 sgid_tbl->active--;
270 dev_dbg(&res->pdev->dev,
271 "SGID deleted hw_id[0x%x] = 0x%x active = 0x%x\n",
272 index, sgid_tbl->hw_id[index], sgid_tbl->active);
273 sgid_tbl->hw_id[index] = (u16)-1;
274
275 /* unlock */
276 return 0;
277 }
278
bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl * sgid_tbl,struct bnxt_qplib_gid * gid,const u8 * smac,u16 vlan_id,bool update,u32 * index)279 int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
280 struct bnxt_qplib_gid *gid, const u8 *smac,
281 u16 vlan_id, bool update, u32 *index)
282 {
283 struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
284 struct bnxt_qplib_res,
285 sgid_tbl);
286 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
287 int i, free_idx;
288
289 if (!sgid_tbl) {
290 dev_err(&res->pdev->dev, "SGID table not allocated\n");
291 return -EINVAL;
292 }
293 /* Do we need a sgid_lock here? */
294 if (sgid_tbl->active == sgid_tbl->max) {
295 dev_err(&res->pdev->dev, "SGID table is full\n");
296 return -ENOMEM;
297 }
298 free_idx = sgid_tbl->max;
299 for (i = 0; i < sgid_tbl->max; i++) {
300 if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) &&
301 sgid_tbl->tbl[i].vlan_id == vlan_id) {
302 dev_dbg(&res->pdev->dev,
303 "SGID entry already exist in entry %d!\n", i);
304 *index = i;
305 return -EALREADY;
306 } else if (!memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
307 sizeof(bnxt_qplib_gid_zero)) &&
308 free_idx == sgid_tbl->max) {
309 free_idx = i;
310 }
311 }
312 if (free_idx == sgid_tbl->max) {
313 dev_err(&res->pdev->dev,
314 "SGID table is FULL but count is not MAX??\n");
315 return -ENOMEM;
316 }
317 if (update) {
318 struct cmdq_add_gid req;
319 struct creq_add_gid_resp resp;
320 u16 cmd_flags = 0;
321 int rc;
322
323 RCFW_CMD_PREP(req, ADD_GID, cmd_flags);
324
325 req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
326 req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
327 req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
328 req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
329 /*
330 * driver should ensure that all RoCE traffic is always VLAN
331 * tagged if RoCE traffic is running on non-zero VLAN ID or
332 * RoCE traffic is running on non-zero Priority.
333 */
334 if ((vlan_id != 0xFFFF) || res->prio) {
335 if (vlan_id != 0xFFFF)
336 req.vlan = cpu_to_le16
337 (vlan_id & CMDQ_ADD_GID_VLAN_VLAN_ID_MASK);
338 req.vlan |= cpu_to_le16
339 (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
340 CMDQ_ADD_GID_VLAN_VLAN_EN);
341 }
342
343 /* MAC in network format */
344 req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
345 req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
346 req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
347
348 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
349 (void *)&resp, NULL, 0);
350 if (rc)
351 return rc;
352 sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid);
353 }
354 /* Add GID to the sgid_tbl */
355 memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
356 sgid_tbl->tbl[free_idx].vlan_id = vlan_id;
357 sgid_tbl->active++;
358 if (vlan_id != 0xFFFF)
359 sgid_tbl->vlan[free_idx] = 1;
360
361 dev_dbg(&res->pdev->dev,
362 "SGID added hw_id[0x%x] = 0x%x active = 0x%x\n",
363 free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active);
364
365 *index = free_idx;
366 /* unlock */
367 return 0;
368 }
369
bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl * sgid_tbl,struct bnxt_qplib_gid * gid,u16 gid_idx,const u8 * smac)370 int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
371 struct bnxt_qplib_gid *gid, u16 gid_idx,
372 const u8 *smac)
373 {
374 struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
375 struct bnxt_qplib_res,
376 sgid_tbl);
377 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
378 struct creq_modify_gid_resp resp;
379 struct cmdq_modify_gid req;
380 int rc;
381 u16 cmd_flags = 0;
382
383 RCFW_CMD_PREP(req, MODIFY_GID, cmd_flags);
384
385 req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
386 req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
387 req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
388 req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
389 if (res->prio) {
390 req.vlan |= cpu_to_le16
391 (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
392 CMDQ_ADD_GID_VLAN_VLAN_EN);
393 }
394
395 /* MAC in network format */
396 req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
397 req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
398 req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
399
400 req.gid_index = cpu_to_le16(gid_idx);
401
402 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
403 (void *)&resp, NULL, 0);
404 return rc;
405 }
406
407 /* AH */
bnxt_qplib_create_ah(struct bnxt_qplib_res * res,struct bnxt_qplib_ah * ah,bool block)408 int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
409 bool block)
410 {
411 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
412 struct cmdq_create_ah req;
413 struct creq_create_ah_resp resp;
414 u16 cmd_flags = 0;
415 u32 temp32[4];
416 u16 temp16[3];
417 int rc;
418
419 RCFW_CMD_PREP(req, CREATE_AH, cmd_flags);
420
421 memcpy(temp32, ah->dgid.data, sizeof(struct bnxt_qplib_gid));
422 req.dgid[0] = cpu_to_le32(temp32[0]);
423 req.dgid[1] = cpu_to_le32(temp32[1]);
424 req.dgid[2] = cpu_to_le32(temp32[2]);
425 req.dgid[3] = cpu_to_le32(temp32[3]);
426
427 req.type = ah->nw_type;
428 req.hop_limit = ah->hop_limit;
429 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id[ah->sgid_index]);
430 req.dest_vlan_id_flow_label = cpu_to_le32((ah->flow_label &
431 CMDQ_CREATE_AH_FLOW_LABEL_MASK) |
432 CMDQ_CREATE_AH_DEST_VLAN_ID_MASK);
433 req.pd_id = cpu_to_le32(ah->pd->id);
434 req.traffic_class = ah->traffic_class;
435
436 /* MAC in network format */
437 memcpy(temp16, ah->dmac, 6);
438 req.dest_mac[0] = cpu_to_le16(temp16[0]);
439 req.dest_mac[1] = cpu_to_le16(temp16[1]);
440 req.dest_mac[2] = cpu_to_le16(temp16[2]);
441
442 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
443 NULL, block);
444 if (rc)
445 return rc;
446
447 ah->id = le32_to_cpu(resp.xid);
448 return 0;
449 }
450
bnxt_qplib_destroy_ah(struct bnxt_qplib_res * res,struct bnxt_qplib_ah * ah,bool block)451 void bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
452 bool block)
453 {
454 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
455 struct cmdq_destroy_ah req;
456 struct creq_destroy_ah_resp resp;
457 u16 cmd_flags = 0;
458
459 /* Clean up the AH table in the device */
460 RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags);
461
462 req.ah_cid = cpu_to_le32(ah->id);
463
464 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL,
465 block);
466 }
467
468 /* MRW */
bnxt_qplib_free_mrw(struct bnxt_qplib_res * res,struct bnxt_qplib_mrw * mrw)469 int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
470 {
471 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
472 struct cmdq_deallocate_key req;
473 struct creq_deallocate_key_resp resp;
474 u16 cmd_flags = 0;
475 int rc;
476
477 if (mrw->lkey == 0xFFFFFFFF) {
478 dev_info(&res->pdev->dev, "SP: Free a reserved lkey MRW\n");
479 return 0;
480 }
481
482 RCFW_CMD_PREP(req, DEALLOCATE_KEY, cmd_flags);
483
484 req.mrw_flags = mrw->type;
485
486 if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) ||
487 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
488 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
489 req.key = cpu_to_le32(mrw->rkey);
490 else
491 req.key = cpu_to_le32(mrw->lkey);
492
493 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
494 NULL, 0);
495 if (rc)
496 return rc;
497
498 /* Free the qplib's MRW memory */
499 if (mrw->hwq.max_elements)
500 bnxt_qplib_free_hwq(res, &mrw->hwq);
501
502 return 0;
503 }
504
bnxt_qplib_alloc_mrw(struct bnxt_qplib_res * res,struct bnxt_qplib_mrw * mrw)505 int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
506 {
507 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
508 struct cmdq_allocate_mrw req;
509 struct creq_allocate_mrw_resp resp;
510 u16 cmd_flags = 0;
511 unsigned long tmp;
512 int rc;
513
514 RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags);
515
516 req.pd_id = cpu_to_le32(mrw->pd->id);
517 req.mrw_flags = mrw->type;
518 if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR &&
519 mrw->flags & BNXT_QPLIB_FR_PMR) ||
520 mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A ||
521 mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)
522 req.access = CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY;
523 tmp = (unsigned long)mrw;
524 req.mrw_handle = cpu_to_le64(tmp);
525
526 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
527 (void *)&resp, NULL, 0);
528 if (rc)
529 return rc;
530
531 if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) ||
532 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
533 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
534 mrw->rkey = le32_to_cpu(resp.xid);
535 else
536 mrw->lkey = le32_to_cpu(resp.xid);
537 return 0;
538 }
539
bnxt_qplib_dereg_mrw(struct bnxt_qplib_res * res,struct bnxt_qplib_mrw * mrw,bool block)540 int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
541 bool block)
542 {
543 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
544 struct cmdq_deregister_mr req;
545 struct creq_deregister_mr_resp resp;
546 u16 cmd_flags = 0;
547 int rc;
548
549 RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags);
550
551 req.lkey = cpu_to_le32(mrw->lkey);
552 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
553 (void *)&resp, NULL, block);
554 if (rc)
555 return rc;
556
557 /* Free the qplib's MR memory */
558 if (mrw->hwq.max_elements) {
559 mrw->va = 0;
560 mrw->total_size = 0;
561 bnxt_qplib_free_hwq(res, &mrw->hwq);
562 }
563
564 return 0;
565 }
566
bnxt_qplib_reg_mr(struct bnxt_qplib_res * res,struct bnxt_qplib_mrw * mr,struct ib_umem * umem,int num_pbls,u32 buf_pg_size)567 int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
568 struct ib_umem *umem, int num_pbls, u32 buf_pg_size)
569 {
570 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
571 struct bnxt_qplib_hwq_attr hwq_attr = {};
572 struct bnxt_qplib_sg_info sginfo = {};
573 struct creq_register_mr_resp resp;
574 struct cmdq_register_mr req;
575 u16 cmd_flags = 0, level;
576 int pages, rc;
577 u32 pg_size;
578
579 if (num_pbls) {
580 pages = roundup_pow_of_two(num_pbls);
581 /* Allocate memory for the non-leaf pages to store buf ptrs.
582 * Non-leaf pages always uses system PAGE_SIZE
583 */
584 /* Free the hwq if it already exist, must be a rereg */
585 if (mr->hwq.max_elements)
586 bnxt_qplib_free_hwq(res, &mr->hwq);
587 hwq_attr.res = res;
588 hwq_attr.depth = pages;
589 hwq_attr.stride = sizeof(dma_addr_t);
590 hwq_attr.type = HWQ_TYPE_MR;
591 hwq_attr.sginfo = &sginfo;
592 hwq_attr.sginfo->umem = umem;
593 hwq_attr.sginfo->npages = pages;
594 hwq_attr.sginfo->pgsize = buf_pg_size;
595 hwq_attr.sginfo->pgshft = ilog2(buf_pg_size);
596 rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr);
597 if (rc) {
598 dev_err(&res->pdev->dev,
599 "SP: Reg MR memory allocation failed\n");
600 return -ENOMEM;
601 }
602 }
603
604 RCFW_CMD_PREP(req, REGISTER_MR, cmd_flags);
605
606 /* Configure the request */
607 if (mr->hwq.level == PBL_LVL_MAX) {
608 /* No PBL provided, just use system PAGE_SIZE */
609 level = 0;
610 req.pbl = 0;
611 pg_size = PAGE_SIZE;
612 } else {
613 level = mr->hwq.level;
614 req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
615 }
616 pg_size = buf_pg_size ? buf_pg_size : PAGE_SIZE;
617 req.log2_pg_size_lvl = (level << CMDQ_REGISTER_MR_LVL_SFT) |
618 ((ilog2(pg_size) <<
619 CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT) &
620 CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK);
621 req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) <<
622 CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) &
623 CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK));
624 req.access = (mr->flags & 0xFFFF);
625 req.va = cpu_to_le64(mr->va);
626 req.key = cpu_to_le32(mr->lkey);
627 req.mr_size = cpu_to_le64(mr->total_size);
628
629 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
630 (void *)&resp, NULL, false);
631 if (rc)
632 goto fail;
633
634 return 0;
635
636 fail:
637 if (mr->hwq.max_elements)
638 bnxt_qplib_free_hwq(res, &mr->hwq);
639 return rc;
640 }
641
bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res * res,struct bnxt_qplib_frpl * frpl,int max_pg_ptrs)642 int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
643 struct bnxt_qplib_frpl *frpl,
644 int max_pg_ptrs)
645 {
646 struct bnxt_qplib_hwq_attr hwq_attr = {};
647 struct bnxt_qplib_sg_info sginfo = {};
648 int pg_ptrs, pages, rc;
649
650 /* Re-calculate the max to fit the HWQ allocation model */
651 pg_ptrs = roundup_pow_of_two(max_pg_ptrs);
652 pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT;
653 if (!pages)
654 pages++;
655
656 if (pages > MAX_PBL_LVL_1_PGS)
657 return -ENOMEM;
658
659 sginfo.pgsize = PAGE_SIZE;
660 sginfo.nopte = true;
661
662 hwq_attr.res = res;
663 hwq_attr.depth = pg_ptrs;
664 hwq_attr.stride = PAGE_SIZE;
665 hwq_attr.sginfo = &sginfo;
666 hwq_attr.type = HWQ_TYPE_CTX;
667 rc = bnxt_qplib_alloc_init_hwq(&frpl->hwq, &hwq_attr);
668 if (!rc)
669 frpl->max_pg_ptrs = pg_ptrs;
670
671 return rc;
672 }
673
bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res * res,struct bnxt_qplib_frpl * frpl)674 int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
675 struct bnxt_qplib_frpl *frpl)
676 {
677 bnxt_qplib_free_hwq(res, &frpl->hwq);
678 return 0;
679 }
680
bnxt_qplib_map_tc2cos(struct bnxt_qplib_res * res,u16 * cids)681 int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
682 {
683 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
684 struct cmdq_map_tc_to_cos req;
685 struct creq_map_tc_to_cos_resp resp;
686 u16 cmd_flags = 0;
687
688 RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
689 req.cos0 = cpu_to_le16(cids[0]);
690 req.cos1 = cpu_to_le16(cids[1]);
691
692 return bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
693 NULL, 0);
694 }
695
bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw * rcfw,struct bnxt_qplib_roce_stats * stats)696 int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
697 struct bnxt_qplib_roce_stats *stats)
698 {
699 struct cmdq_query_roce_stats req;
700 struct creq_query_roce_stats_resp resp;
701 struct bnxt_qplib_rcfw_sbuf *sbuf;
702 struct creq_query_roce_stats_resp_sb *sb;
703 u16 cmd_flags = 0;
704 int rc = 0;
705
706 RCFW_CMD_PREP(req, QUERY_ROCE_STATS, cmd_flags);
707
708 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
709 if (!sbuf) {
710 dev_err(&rcfw->pdev->dev,
711 "SP: QUERY_ROCE_STATS alloc side buffer failed\n");
712 return -ENOMEM;
713 }
714
715 sb = sbuf->sb;
716 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
717 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
718 (void *)sbuf, 0);
719 if (rc)
720 goto bail;
721 /* Extract the context from the side buffer */
722 stats->to_retransmits = le64_to_cpu(sb->to_retransmits);
723 stats->seq_err_naks_rcvd = le64_to_cpu(sb->seq_err_naks_rcvd);
724 stats->max_retry_exceeded = le64_to_cpu(sb->max_retry_exceeded);
725 stats->rnr_naks_rcvd = le64_to_cpu(sb->rnr_naks_rcvd);
726 stats->missing_resp = le64_to_cpu(sb->missing_resp);
727 stats->unrecoverable_err = le64_to_cpu(sb->unrecoverable_err);
728 stats->bad_resp_err = le64_to_cpu(sb->bad_resp_err);
729 stats->local_qp_op_err = le64_to_cpu(sb->local_qp_op_err);
730 stats->local_protection_err = le64_to_cpu(sb->local_protection_err);
731 stats->mem_mgmt_op_err = le64_to_cpu(sb->mem_mgmt_op_err);
732 stats->remote_invalid_req_err = le64_to_cpu(sb->remote_invalid_req_err);
733 stats->remote_access_err = le64_to_cpu(sb->remote_access_err);
734 stats->remote_op_err = le64_to_cpu(sb->remote_op_err);
735 stats->dup_req = le64_to_cpu(sb->dup_req);
736 stats->res_exceed_max = le64_to_cpu(sb->res_exceed_max);
737 stats->res_length_mismatch = le64_to_cpu(sb->res_length_mismatch);
738 stats->res_exceeds_wqe = le64_to_cpu(sb->res_exceeds_wqe);
739 stats->res_opcode_err = le64_to_cpu(sb->res_opcode_err);
740 stats->res_rx_invalid_rkey = le64_to_cpu(sb->res_rx_invalid_rkey);
741 stats->res_rx_domain_err = le64_to_cpu(sb->res_rx_domain_err);
742 stats->res_rx_no_perm = le64_to_cpu(sb->res_rx_no_perm);
743 stats->res_rx_range_err = le64_to_cpu(sb->res_rx_range_err);
744 stats->res_tx_invalid_rkey = le64_to_cpu(sb->res_tx_invalid_rkey);
745 stats->res_tx_domain_err = le64_to_cpu(sb->res_tx_domain_err);
746 stats->res_tx_no_perm = le64_to_cpu(sb->res_tx_no_perm);
747 stats->res_tx_range_err = le64_to_cpu(sb->res_tx_range_err);
748 stats->res_irrq_oflow = le64_to_cpu(sb->res_irrq_oflow);
749 stats->res_unsup_opcode = le64_to_cpu(sb->res_unsup_opcode);
750 stats->res_unaligned_atomic = le64_to_cpu(sb->res_unaligned_atomic);
751 stats->res_rem_inv_err = le64_to_cpu(sb->res_rem_inv_err);
752 stats->res_mem_error = le64_to_cpu(sb->res_mem_error);
753 stats->res_srq_err = le64_to_cpu(sb->res_srq_err);
754 stats->res_cmp_err = le64_to_cpu(sb->res_cmp_err);
755 stats->res_invalid_dup_rkey = le64_to_cpu(sb->res_invalid_dup_rkey);
756 stats->res_wqe_format_err = le64_to_cpu(sb->res_wqe_format_err);
757 stats->res_cq_load_err = le64_to_cpu(sb->res_cq_load_err);
758 stats->res_srq_load_err = le64_to_cpu(sb->res_srq_load_err);
759 stats->res_tx_pci_err = le64_to_cpu(sb->res_tx_pci_err);
760 stats->res_rx_pci_err = le64_to_cpu(sb->res_rx_pci_err);
761 if (!rcfw->init_oos_stats) {
762 rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count);
763 rcfw->init_oos_stats = 1;
764 } else {
765 stats->res_oos_drop_count +=
766 (le64_to_cpu(sb->res_oos_drop_count) -
767 rcfw->oos_prev) & BNXT_QPLIB_OOS_COUNT_MASK;
768 rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count);
769 }
770
771 bail:
772 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
773 return rc;
774 }
775
bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw * rcfw,u32 fid,struct bnxt_qplib_ext_stat * estat)776 int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
777 struct bnxt_qplib_ext_stat *estat)
778 {
779 struct creq_query_roce_stats_ext_resp resp = {};
780 struct creq_query_roce_stats_ext_resp_sb *sb;
781 struct cmdq_query_roce_stats_ext req = {};
782 struct bnxt_qplib_rcfw_sbuf *sbuf;
783 u16 cmd_flags = 0;
784 int rc;
785
786 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
787 if (!sbuf) {
788 dev_err(&rcfw->pdev->dev,
789 "SP: QUERY_ROCE_STATS_EXT alloc sb failed");
790 return -ENOMEM;
791 }
792
793 RCFW_CMD_PREP(req, QUERY_ROCE_STATS_EXT, cmd_flags);
794
795 req.resp_size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
796 req.resp_addr = cpu_to_le64(sbuf->dma_addr);
797 req.function_id = cpu_to_le32(fid);
798 req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID);
799
800 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
801 (void *)&resp, (void *)sbuf, 0);
802 if (rc)
803 goto bail;
804
805 sb = sbuf->sb;
806 estat->tx_atomic_req = le64_to_cpu(sb->tx_atomic_req_pkts);
807 estat->tx_read_req = le64_to_cpu(sb->tx_read_req_pkts);
808 estat->tx_read_res = le64_to_cpu(sb->tx_read_res_pkts);
809 estat->tx_write_req = le64_to_cpu(sb->tx_write_req_pkts);
810 estat->tx_send_req = le64_to_cpu(sb->tx_send_req_pkts);
811 estat->rx_atomic_req = le64_to_cpu(sb->rx_atomic_req_pkts);
812 estat->rx_read_req = le64_to_cpu(sb->rx_read_req_pkts);
813 estat->rx_read_res = le64_to_cpu(sb->rx_read_res_pkts);
814 estat->rx_write_req = le64_to_cpu(sb->rx_write_req_pkts);
815 estat->rx_send_req = le64_to_cpu(sb->rx_send_req_pkts);
816 estat->rx_roce_good_pkts = le64_to_cpu(sb->rx_roce_good_pkts);
817 estat->rx_roce_good_bytes = le64_to_cpu(sb->rx_roce_good_bytes);
818 estat->rx_out_of_buffer = le64_to_cpu(sb->rx_out_of_buffer_pkts);
819 estat->rx_out_of_sequence = le64_to_cpu(sb->rx_out_of_sequence_pkts);
820
821 bail:
822 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
823 return rc;
824 }
825