• Home
  • Raw
  • Download

Lines Matching refs:rdev

81 static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev);
83 static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev) in bnxt_re_destroy_chip_ctx() argument
85 rdev->rcfw.res = NULL; in bnxt_re_destroy_chip_ctx()
86 rdev->qplib_res.cctx = NULL; in bnxt_re_destroy_chip_ctx()
89 static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev) in bnxt_re_setup_chip_ctx() argument
94 en_dev = rdev->en_dev; in bnxt_re_setup_chip_ctx()
97 rdev->chip_ctx.chip_num = bp->chip_num; in bnxt_re_setup_chip_ctx()
100 rdev->qplib_res.cctx = &rdev->chip_ctx; in bnxt_re_setup_chip_ctx()
101 rdev->rcfw.res = &rdev->qplib_res; in bnxt_re_setup_chip_ctx()
108 static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev) in bnxt_re_get_sriov_func_type() argument
112 bp = netdev_priv(rdev->en_dev->net); in bnxt_re_get_sriov_func_type()
114 rdev->is_virtfn = 1; in bnxt_re_get_sriov_func_type()
122 static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev) in bnxt_re_set_resource_limits() argument
128 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; in bnxt_re_set_resource_limits()
130 rdev->qplib_ctx.qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT, in bnxt_re_set_resource_limits()
133 rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT_256K; in bnxt_re_set_resource_limits()
135 rdev->qplib_ctx.mrw_count = min_t(u32, rdev->qplib_ctx.mrw_count, in bnxt_re_set_resource_limits()
137 rdev->qplib_ctx.srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT, in bnxt_re_set_resource_limits()
139 rdev->qplib_ctx.cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, in bnxt_re_set_resource_limits()
143 rdev->qplib_ctx.tqm_count[i] = in bnxt_re_set_resource_limits()
144 rdev->dev_attr.tqm_alloc_reqs[i]; in bnxt_re_set_resource_limits()
146 if (rdev->num_vfs) { in bnxt_re_set_resource_limits()
152 num_vfs = 100 * rdev->num_vfs; in bnxt_re_set_resource_limits()
153 vf_qps = (rdev->qplib_ctx.qpc_count * vf_pct) / num_vfs; in bnxt_re_set_resource_limits()
154 vf_srqs = (rdev->qplib_ctx.srqc_count * vf_pct) / num_vfs; in bnxt_re_set_resource_limits()
155 vf_cqs = (rdev->qplib_ctx.cq_count * vf_pct) / num_vfs; in bnxt_re_set_resource_limits()
165 if (rdev->qplib_ctx.mrw_count < BNXT_RE_MAX_MRW_COUNT_64K) in bnxt_re_set_resource_limits()
166 vf_mrws = rdev->qplib_ctx.mrw_count * vf_pct / num_vfs; in bnxt_re_set_resource_limits()
168 vf_mrws = (rdev->qplib_ctx.mrw_count - in bnxt_re_set_resource_limits()
169 BNXT_RE_RESVD_MR_FOR_PF) / rdev->num_vfs; in bnxt_re_set_resource_limits()
172 rdev->qplib_ctx.vf_res.max_mrw_per_vf = vf_mrws; in bnxt_re_set_resource_limits()
173 rdev->qplib_ctx.vf_res.max_gid_per_vf = vf_gids; in bnxt_re_set_resource_limits()
174 rdev->qplib_ctx.vf_res.max_qp_per_vf = vf_qps; in bnxt_re_set_resource_limits()
175 rdev->qplib_ctx.vf_res.max_srq_per_vf = vf_srqs; in bnxt_re_set_resource_limits()
176 rdev->qplib_ctx.vf_res.max_cq_per_vf = vf_cqs; in bnxt_re_set_resource_limits()
190 struct bnxt_re_dev *rdev = p; in bnxt_re_sriov_config() local
192 if (!rdev) in bnxt_re_sriov_config()
195 rdev->num_vfs = num_vfs; in bnxt_re_sriov_config()
196 bnxt_re_set_resource_limits(rdev); in bnxt_re_sriov_config()
197 bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw, in bnxt_re_sriov_config()
198 &rdev->qplib_ctx); in bnxt_re_sriov_config()
203 struct bnxt_re_dev *rdev = p; in bnxt_re_shutdown() local
205 if (!rdev) in bnxt_re_shutdown()
208 bnxt_re_ib_unreg(rdev); in bnxt_re_shutdown()
213 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; in bnxt_re_stop_irq() local
214 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw; in bnxt_re_stop_irq()
218 for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) { in bnxt_re_stop_irq()
219 nq = &rdev->nq[indx - 1]; in bnxt_re_stop_irq()
228 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; in bnxt_re_start_irq() local
229 struct bnxt_msix_entry *msix_ent = rdev->msix_entries; in bnxt_re_start_irq()
230 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw; in bnxt_re_start_irq()
240 dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n"); in bnxt_re_start_irq()
247 for (indx = 0; indx < rdev->num_msix; indx++) in bnxt_re_start_irq()
248 rdev->msix_entries[indx].vector = ent[indx].vector; in bnxt_re_start_irq()
252 for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) { in bnxt_re_start_irq()
253 nq = &rdev->nq[indx - 1]; in bnxt_re_start_irq()
257 dev_warn(rdev_to_dev(rdev), in bnxt_re_start_irq()
277 static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev) in bnxt_re_unregister_netdev() argument
282 if (!rdev) in bnxt_re_unregister_netdev()
285 en_dev = rdev->en_dev; in bnxt_re_unregister_netdev()
287 rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev, in bnxt_re_unregister_netdev()
292 static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev) in bnxt_re_register_netdev() argument
297 if (!rdev) in bnxt_re_register_netdev()
300 en_dev = rdev->en_dev; in bnxt_re_register_netdev()
303 &bnxt_re_ulp_ops, rdev); in bnxt_re_register_netdev()
304 rdev->qplib_res.pdev = rdev->en_dev->pdev; in bnxt_re_register_netdev()
308 static int bnxt_re_free_msix(struct bnxt_re_dev *rdev) in bnxt_re_free_msix() argument
313 if (!rdev) in bnxt_re_free_msix()
316 en_dev = rdev->en_dev; in bnxt_re_free_msix()
319 rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP); in bnxt_re_free_msix()
324 static int bnxt_re_request_msix(struct bnxt_re_dev *rdev) in bnxt_re_request_msix() argument
329 if (!rdev) in bnxt_re_request_msix()
332 en_dev = rdev->en_dev; in bnxt_re_request_msix()
337 rdev->msix_entries, in bnxt_re_request_msix()
344 dev_warn(rdev_to_dev(rdev), in bnxt_re_request_msix()
348 rdev->num_msix = num_msix_got; in bnxt_re_request_msix()
353 static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, struct input *hdr, in bnxt_re_init_hwrm_hdr() argument
372 static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, in bnxt_re_net_ring_free() argument
375 struct bnxt_en_dev *en_dev = rdev->en_dev; in bnxt_re_net_ring_free()
386 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1); in bnxt_re_net_ring_free()
393 dev_err(rdev_to_dev(rdev), in bnxt_re_net_ring_free()
398 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr, in bnxt_re_net_ring_alloc() argument
402 struct bnxt_en_dev *en_dev = rdev->en_dev; in bnxt_re_net_ring_alloc()
412 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1); in bnxt_re_net_ring_alloc()
435 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, in bnxt_re_net_stats_ctx_free() argument
438 struct bnxt_en_dev *en_dev = rdev->en_dev; in bnxt_re_net_stats_ctx_free()
448 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1); in bnxt_re_net_stats_ctx_free()
454 dev_err(rdev_to_dev(rdev), in bnxt_re_net_stats_ctx_free()
460 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, in bnxt_re_net_stats_ctx_alloc() argument
466 struct bnxt_en_dev *en_dev = rdev->en_dev; in bnxt_re_net_stats_ctx_alloc()
477 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); in bnxt_re_net_stats_ctx_alloc()
510 struct bnxt_re_dev *rdev; in bnxt_re_from_netdev() local
513 list_for_each_entry_rcu(rdev, &bnxt_re_dev_list, list) { in bnxt_re_from_netdev()
514 if (rdev->netdev == netdev) { in bnxt_re_from_netdev()
516 return rdev; in bnxt_re_from_netdev()
567 struct bnxt_re_dev *rdev = in hw_rev_show() local
570 return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor); in hw_rev_show()
577 struct bnxt_re_dev *rdev = in hca_type_show() local
580 return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc); in hca_type_show()
594 static void bnxt_re_unregister_ib(struct bnxt_re_dev *rdev) in bnxt_re_unregister_ib() argument
596 ib_unregister_device(&rdev->ibdev); in bnxt_re_unregister_ib()
651 static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) in bnxt_re_register_ib() argument
653 struct ib_device *ibdev = &rdev->ibdev; in bnxt_re_register_ib()
662 bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid); in bnxt_re_register_ib()
665 ibdev->dev.parent = &rdev->en_dev->pdev->dev; in bnxt_re_register_ib()
699 ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1); in bnxt_re_register_ib()
706 static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev) in bnxt_re_dev_remove() argument
708 dev_put(rdev->netdev); in bnxt_re_dev_remove()
709 rdev->netdev = NULL; in bnxt_re_dev_remove()
712 list_del_rcu(&rdev->list); in bnxt_re_dev_remove()
717 ib_dealloc_device(&rdev->ibdev); in bnxt_re_dev_remove()
724 struct bnxt_re_dev *rdev; in bnxt_re_dev_add() local
727 rdev = ib_alloc_device(bnxt_re_dev, ibdev); in bnxt_re_dev_add()
728 if (!rdev) { in bnxt_re_dev_add()
734 rdev->netdev = netdev; in bnxt_re_dev_add()
735 dev_hold(rdev->netdev); in bnxt_re_dev_add()
736 rdev->en_dev = en_dev; in bnxt_re_dev_add()
737 rdev->id = rdev->en_dev->pdev->devfn; in bnxt_re_dev_add()
738 INIT_LIST_HEAD(&rdev->qp_list); in bnxt_re_dev_add()
739 mutex_init(&rdev->qp_lock); in bnxt_re_dev_add()
740 atomic_set(&rdev->qp_count, 0); in bnxt_re_dev_add()
741 atomic_set(&rdev->cq_count, 0); in bnxt_re_dev_add()
742 atomic_set(&rdev->srq_count, 0); in bnxt_re_dev_add()
743 atomic_set(&rdev->mr_count, 0); in bnxt_re_dev_add()
744 atomic_set(&rdev->mw_count, 0); in bnxt_re_dev_add()
745 rdev->cosq[0] = 0xFFFF; in bnxt_re_dev_add()
746 rdev->cosq[1] = 0xFFFF; in bnxt_re_dev_add()
749 list_add_tail_rcu(&rdev->list, &bnxt_re_dev_list); in bnxt_re_dev_add()
751 return rdev; in bnxt_re_dev_add()
800 event.device = &qp->rdev->ibdev; in bnxt_re_handle_qp_async_event()
864 ib_event.device = &srq->rdev->ibdev; in bnxt_re_srqn_handler()
899 static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx) in bnxt_re_get_nqdb_offset() argument
901 return bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ? in bnxt_re_get_nqdb_offset()
902 0x10000 : rdev->msix_entries[indx].db_offset; in bnxt_re_get_nqdb_offset()
905 static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev) in bnxt_re_cleanup_res() argument
909 for (i = 1; i < rdev->num_msix; i++) in bnxt_re_cleanup_res()
910 bnxt_qplib_disable_nq(&rdev->nq[i - 1]); in bnxt_re_cleanup_res()
912 if (rdev->qplib_res.rcfw) in bnxt_re_cleanup_res()
913 bnxt_qplib_cleanup_res(&rdev->qplib_res); in bnxt_re_cleanup_res()
916 static int bnxt_re_init_res(struct bnxt_re_dev *rdev) in bnxt_re_init_res() argument
922 bnxt_qplib_init_res(&rdev->qplib_res); in bnxt_re_init_res()
924 for (i = 1; i < rdev->num_msix ; i++) { in bnxt_re_init_res()
925 db_offt = bnxt_re_get_nqdb_offset(rdev, i); in bnxt_re_init_res()
926 rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1], in bnxt_re_init_res()
927 i - 1, rdev->msix_entries[i].vector, in bnxt_re_init_res()
931 dev_err(rdev_to_dev(rdev), in bnxt_re_init_res()
940 bnxt_qplib_disable_nq(&rdev->nq[i]); in bnxt_re_init_res()
944 static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev) in bnxt_re_free_nq_res() argument
949 for (i = 0; i < rdev->num_msix - 1; i++) { in bnxt_re_free_nq_res()
950 type = bnxt_qplib_get_ring_type(&rdev->chip_ctx); in bnxt_re_free_nq_res()
951 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type); in bnxt_re_free_nq_res()
952 rdev->nq[i].res = NULL; in bnxt_re_free_nq_res()
953 bnxt_qplib_free_nq(&rdev->nq[i]); in bnxt_re_free_nq_res()
957 static void bnxt_re_free_res(struct bnxt_re_dev *rdev) in bnxt_re_free_res() argument
959 bnxt_re_free_nq_res(rdev); in bnxt_re_free_res()
961 if (rdev->qplib_res.dpi_tbl.max) { in bnxt_re_free_res()
962 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, in bnxt_re_free_res()
963 &rdev->qplib_res.dpi_tbl, in bnxt_re_free_res()
964 &rdev->dpi_privileged); in bnxt_re_free_res()
966 if (rdev->qplib_res.rcfw) { in bnxt_re_free_res()
967 bnxt_qplib_free_res(&rdev->qplib_res); in bnxt_re_free_res()
968 rdev->qplib_res.rcfw = NULL; in bnxt_re_free_res()
972 static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) in bnxt_re_alloc_res() argument
981 rdev->qplib_res.rcfw = &rdev->rcfw; in bnxt_re_alloc_res()
982 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr, in bnxt_re_alloc_res()
983 rdev->is_virtfn); in bnxt_re_alloc_res()
987 rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev, in bnxt_re_alloc_res()
988 rdev->netdev, &rdev->dev_attr); in bnxt_re_alloc_res()
992 rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl, in bnxt_re_alloc_res()
993 &rdev->dpi_privileged, in bnxt_re_alloc_res()
994 rdev); in bnxt_re_alloc_res()
998 for (i = 0; i < rdev->num_msix - 1; i++) { in bnxt_re_alloc_res()
999 rdev->nq[i].res = &rdev->qplib_res; in bnxt_re_alloc_res()
1000 rdev->nq[i].hwq.max_elements = BNXT_RE_MAX_CQ_COUNT + in bnxt_re_alloc_res()
1002 rc = bnxt_qplib_alloc_nq(rdev->en_dev->pdev, &rdev->nq[i]); in bnxt_re_alloc_res()
1004 dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x", in bnxt_re_alloc_res()
1008 type = bnxt_qplib_get_ring_type(&rdev->chip_ctx); in bnxt_re_alloc_res()
1009 pg_map = rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr; in bnxt_re_alloc_res()
1010 pages = rdev->nq[i].hwq.pbl[rdev->nq[i].hwq.level].pg_count; in bnxt_re_alloc_res()
1011 rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type, in bnxt_re_alloc_res()
1013 rdev->msix_entries[i + 1].ring_idx, in bnxt_re_alloc_res()
1014 &rdev->nq[i].ring_id); in bnxt_re_alloc_res()
1016 dev_err(rdev_to_dev(rdev), in bnxt_re_alloc_res()
1019 bnxt_qplib_free_nq(&rdev->nq[i]); in bnxt_re_alloc_res()
1027 type = bnxt_qplib_get_ring_type(&rdev->chip_ctx); in bnxt_re_alloc_res()
1028 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type); in bnxt_re_alloc_res()
1029 bnxt_qplib_free_nq(&rdev->nq[i]); in bnxt_re_alloc_res()
1031 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, in bnxt_re_alloc_res()
1032 &rdev->qplib_res.dpi_tbl, in bnxt_re_alloc_res()
1033 &rdev->dpi_privileged); in bnxt_re_alloc_res()
1035 bnxt_qplib_free_res(&rdev->qplib_res); in bnxt_re_alloc_res()
1038 rdev->qplib_res.rcfw = NULL; in bnxt_re_alloc_res()
1062 static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir, in bnxt_re_query_hwrm_pri2cos() argument
1066 struct bnxt *bp = netdev_priv(rdev->netdev); in bnxt_re_query_hwrm_pri2cos()
1068 struct bnxt_en_dev *en_dev = rdev->en_dev; in bnxt_re_query_hwrm_pri2cos()
1078 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, in bnxt_re_query_hwrm_pri2cos()
1092 dev_warn(rdev_to_dev(rdev), in bnxt_re_query_hwrm_pri2cos()
1094 dev_warn(rdev_to_dev(rdev), in bnxt_re_query_hwrm_pri2cos()
1105 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev, in bnxt_re_is_qp1_or_shadow_qp() argument
1108 return (qp->ib_qp.qp_type == IB_QPT_GSI) || (qp == rdev->qp1_sqp); in bnxt_re_is_qp1_or_shadow_qp()
1111 static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev) in bnxt_re_dev_stop() argument
1118 mutex_lock(&rdev->qp_lock); in bnxt_re_dev_stop()
1119 list_for_each_entry(qp, &rdev->qp_list, list) { in bnxt_re_dev_stop()
1121 if (!bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) { in bnxt_re_dev_stop()
1126 bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp, in bnxt_re_dev_stop()
1133 mutex_unlock(&rdev->qp_lock); in bnxt_re_dev_stop()
1136 static int bnxt_re_update_gid(struct bnxt_re_dev *rdev) in bnxt_re_update_gid() argument
1138 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; in bnxt_re_update_gid()
1143 if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) in bnxt_re_update_gid()
1147 dev_err(rdev_to_dev(rdev), "QPLIB: SGID table not allocated"); in bnxt_re_update_gid()
1166 rdev->qplib_res.netdev->dev_addr); in bnxt_re_update_gid()
1172 static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev) in bnxt_re_get_priority_mask() argument
1178 netdev = rdev->netdev; in bnxt_re_get_priority_mask()
1209 static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev) in bnxt_re_setup_qos() argument
1216 prio_map = bnxt_re_get_priority_mask(rdev); in bnxt_re_setup_qos()
1218 if (prio_map == rdev->cur_prio_map) in bnxt_re_setup_qos()
1220 rdev->cur_prio_map = prio_map; in bnxt_re_setup_qos()
1222 rc = bnxt_re_query_hwrm_pri2cos(rdev, 0, &cid_map); in bnxt_re_setup_qos()
1224 dev_warn(rdev_to_dev(rdev), "no cos for p_mask %x\n", prio_map); in bnxt_re_setup_qos()
1228 bnxt_re_parse_cid_map(prio_map, (u8 *)&cid_map, rdev->cosq); in bnxt_re_setup_qos()
1231 rc = bnxt_qplib_map_tc2cos(&rdev->qplib_res, rdev->cosq); in bnxt_re_setup_qos()
1233 dev_warn(rdev_to_dev(rdev), "no tc for cos{%x, %x}\n", in bnxt_re_setup_qos()
1234 rdev->cosq[0], rdev->cosq[1]); in bnxt_re_setup_qos()
1241 if ((prio_map == 0 && rdev->qplib_res.prio) || in bnxt_re_setup_qos()
1242 (prio_map != 0 && !rdev->qplib_res.prio)) { in bnxt_re_setup_qos()
1243 rdev->qplib_res.prio = prio_map ? true : false; in bnxt_re_setup_qos()
1245 bnxt_re_update_gid(rdev); in bnxt_re_setup_qos()
1251 static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev) in bnxt_re_query_hwrm_intf_version() argument
1253 struct bnxt_en_dev *en_dev = rdev->en_dev; in bnxt_re_query_hwrm_intf_version()
1260 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, in bnxt_re_query_hwrm_intf_version()
1269 dev_err(rdev_to_dev(rdev), in bnxt_re_query_hwrm_intf_version()
1273 rdev->qplib_ctx.hwrm_intf_ver = in bnxt_re_query_hwrm_intf_version()
1280 static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev) in bnxt_re_ib_unreg() argument
1285 if (test_and_clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) { in bnxt_re_ib_unreg()
1287 bnxt_re_unregister_ib(rdev); in bnxt_re_ib_unreg()
1289 if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags)) in bnxt_re_ib_unreg()
1290 cancel_delayed_work_sync(&rdev->worker); in bnxt_re_ib_unreg()
1293 &rdev->flags)) in bnxt_re_ib_unreg()
1294 bnxt_re_cleanup_res(rdev); in bnxt_re_ib_unreg()
1295 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags)) in bnxt_re_ib_unreg()
1296 bnxt_re_free_res(rdev); in bnxt_re_ib_unreg()
1298 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) { in bnxt_re_ib_unreg()
1299 rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw); in bnxt_re_ib_unreg()
1301 dev_warn(rdev_to_dev(rdev), in bnxt_re_ib_unreg()
1303 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); in bnxt_re_ib_unreg()
1304 bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); in bnxt_re_ib_unreg()
1305 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); in bnxt_re_ib_unreg()
1306 type = bnxt_qplib_get_ring_type(&rdev->chip_ctx); in bnxt_re_ib_unreg()
1307 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, type); in bnxt_re_ib_unreg()
1308 bnxt_qplib_free_rcfw_channel(&rdev->rcfw); in bnxt_re_ib_unreg()
1310 if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) { in bnxt_re_ib_unreg()
1311 rc = bnxt_re_free_msix(rdev); in bnxt_re_ib_unreg()
1313 dev_warn(rdev_to_dev(rdev), in bnxt_re_ib_unreg()
1317 bnxt_re_destroy_chip_ctx(rdev); in bnxt_re_ib_unreg()
1318 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) { in bnxt_re_ib_unreg()
1319 rc = bnxt_re_unregister_netdev(rdev); in bnxt_re_ib_unreg()
1321 dev_warn(rdev_to_dev(rdev), in bnxt_re_ib_unreg()
1329 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev, in bnxt_re_worker() local
1332 bnxt_re_setup_qos(rdev); in bnxt_re_worker()
1333 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); in bnxt_re_worker()
1336 static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) in bnxt_re_ib_reg() argument
1350 rc = bnxt_re_register_netdev(rdev); in bnxt_re_ib_reg()
1356 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); in bnxt_re_ib_reg()
1358 rc = bnxt_re_setup_chip_ctx(rdev); in bnxt_re_ib_reg()
1360 dev_err(rdev_to_dev(rdev), "Failed to get chip context\n"); in bnxt_re_ib_reg()
1365 bnxt_re_get_sriov_func_type(rdev); in bnxt_re_ib_reg()
1367 rc = bnxt_re_request_msix(rdev); in bnxt_re_ib_reg()
1373 set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags); in bnxt_re_ib_reg()
1375 bnxt_re_query_hwrm_intf_version(rdev); in bnxt_re_ib_reg()
1380 rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw, in bnxt_re_ib_reg()
1381 &rdev->qplib_ctx, in bnxt_re_ib_reg()
1387 type = bnxt_qplib_get_ring_type(&rdev->chip_ctx); in bnxt_re_ib_reg()
1388 pg_map = rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr; in bnxt_re_ib_reg()
1389 pages = rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count; in bnxt_re_ib_reg()
1390 ridx = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx; in bnxt_re_ib_reg()
1391 rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type, in bnxt_re_ib_reg()
1393 ridx, &rdev->rcfw.creq_ring_id); in bnxt_re_ib_reg()
1398 db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX); in bnxt_re_ib_reg()
1399 vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector; in bnxt_re_ib_reg()
1400 rc = bnxt_qplib_enable_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw, in bnxt_re_ib_reg()
1401 vid, db_offt, rdev->is_virtfn, in bnxt_re_ib_reg()
1408 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr, in bnxt_re_ib_reg()
1409 rdev->is_virtfn); in bnxt_re_ib_reg()
1412 if (!rdev->is_virtfn) in bnxt_re_ib_reg()
1413 bnxt_re_set_resource_limits(rdev); in bnxt_re_ib_reg()
1415 rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0, in bnxt_re_ib_reg()
1416 bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx)); in bnxt_re_ib_reg()
1421 rc = bnxt_re_net_stats_ctx_alloc(rdev, in bnxt_re_ib_reg()
1422 rdev->qplib_ctx.stats.dma_map, in bnxt_re_ib_reg()
1423 &rdev->qplib_ctx.stats.fw_id); in bnxt_re_ib_reg()
1429 rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx, in bnxt_re_ib_reg()
1430 rdev->is_virtfn); in bnxt_re_ib_reg()
1435 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags); in bnxt_re_ib_reg()
1438 rc = bnxt_re_alloc_res(rdev); in bnxt_re_ib_reg()
1443 set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags); in bnxt_re_ib_reg()
1444 rc = bnxt_re_init_res(rdev); in bnxt_re_ib_reg()
1450 set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags); in bnxt_re_ib_reg()
1452 if (!rdev->is_virtfn) { in bnxt_re_ib_reg()
1453 rc = bnxt_re_setup_qos(rdev); in bnxt_re_ib_reg()
1457 INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker); in bnxt_re_ib_reg()
1458 set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags); in bnxt_re_ib_reg()
1459 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); in bnxt_re_ib_reg()
1466 rc = bnxt_re_register_ib(rdev); in bnxt_re_ib_reg()
1471 set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); in bnxt_re_ib_reg()
1472 dev_info(rdev_to_dev(rdev), "Device registered successfully"); in bnxt_re_ib_reg()
1473 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, in bnxt_re_ib_reg()
1474 &rdev->active_width); in bnxt_re_ib_reg()
1475 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); in bnxt_re_ib_reg()
1476 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); in bnxt_re_ib_reg()
1480 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); in bnxt_re_ib_reg()
1482 bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); in bnxt_re_ib_reg()
1484 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); in bnxt_re_ib_reg()
1486 type = bnxt_qplib_get_ring_type(&rdev->chip_ctx); in bnxt_re_ib_reg()
1487 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, type); in bnxt_re_ib_reg()
1489 bnxt_qplib_free_rcfw_channel(&rdev->rcfw); in bnxt_re_ib_reg()
1493 bnxt_re_ib_unreg(rdev); in bnxt_re_ib_reg()
1499 static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev) in bnxt_re_dev_unreg() argument
1501 struct bnxt_en_dev *en_dev = rdev->en_dev; in bnxt_re_dev_unreg()
1502 struct net_device *netdev = rdev->netdev; in bnxt_re_dev_unreg()
1504 bnxt_re_dev_remove(rdev); in bnxt_re_dev_unreg()
1510 static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev) in bnxt_re_dev_reg() argument
1525 *rdev = bnxt_re_dev_add(netdev, en_dev); in bnxt_re_dev_reg()
1526 if (!*rdev) { in bnxt_re_dev_reg()
1535 static void bnxt_re_remove_one(struct bnxt_re_dev *rdev) in bnxt_re_remove_one() argument
1537 pci_dev_put(rdev->en_dev->pdev); in bnxt_re_remove_one()
1544 struct bnxt_re_dev *rdev; in bnxt_re_task() local
1548 rdev = re_work->rdev; in bnxt_re_task()
1551 !test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) in bnxt_re_task()
1556 rc = bnxt_re_ib_reg(rdev); in bnxt_re_task()
1558 dev_err(rdev_to_dev(rdev), in bnxt_re_task()
1560 bnxt_re_remove_one(rdev); in bnxt_re_task()
1561 bnxt_re_dev_unreg(rdev); in bnxt_re_task()
1566 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, in bnxt_re_task()
1570 bnxt_re_dev_stop(rdev); in bnxt_re_task()
1573 if (!netif_carrier_ok(rdev->netdev)) in bnxt_re_task()
1574 bnxt_re_dev_stop(rdev); in bnxt_re_task()
1575 else if (netif_carrier_ok(rdev->netdev)) in bnxt_re_task()
1576 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, in bnxt_re_task()
1578 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, in bnxt_re_task()
1579 &rdev->active_width); in bnxt_re_task()
1585 atomic_dec(&rdev->sched_count); in bnxt_re_task()
1590 static void bnxt_re_init_one(struct bnxt_re_dev *rdev) in bnxt_re_init_one() argument
1592 pci_dev_get(rdev->en_dev->pdev); in bnxt_re_init_one()
1614 struct bnxt_re_dev *rdev; in bnxt_re_netdev_event() local
1622 rdev = bnxt_re_from_netdev(real_dev); in bnxt_re_netdev_event()
1623 if (!rdev && event != NETDEV_REGISTER) in bnxt_re_netdev_event()
1630 if (rdev) in bnxt_re_netdev_event()
1632 rc = bnxt_re_dev_reg(&rdev, real_dev); in bnxt_re_netdev_event()
1640 bnxt_re_init_one(rdev); in bnxt_re_netdev_event()
1648 if (atomic_read(&rdev->sched_count) > 0) in bnxt_re_netdev_event()
1650 bnxt_re_ib_unreg(rdev); in bnxt_re_netdev_event()
1651 bnxt_re_remove_one(rdev); in bnxt_re_netdev_event()
1652 bnxt_re_dev_unreg(rdev); in bnxt_re_netdev_event()
1663 re_work->rdev = rdev; in bnxt_re_netdev_event()
1668 atomic_inc(&rdev->sched_count); in bnxt_re_netdev_event()
1709 struct bnxt_re_dev *rdev, *next; in bnxt_re_mod_exit() local
1721 list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) { in bnxt_re_mod_exit()
1722 dev_info(rdev_to_dev(rdev), "Unregistering Device"); in bnxt_re_mod_exit()
1728 bnxt_re_dev_stop(rdev); in bnxt_re_mod_exit()
1731 bnxt_re_ib_unreg(rdev); in bnxt_re_mod_exit()
1733 bnxt_re_remove_one(rdev); in bnxt_re_mod_exit()
1734 bnxt_re_dev_unreg(rdev); in bnxt_re_mod_exit()