Lines Matching refs:ibdev
82 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
134 struct mlx4_ib_dev *ibdev = to_mdev(device); in mlx4_ib_get_netdev() local
138 dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num); in mlx4_ib_get_netdev()
141 if (mlx4_is_bonded(ibdev->dev)) { in mlx4_ib_get_netdev()
162 struct mlx4_ib_dev *ibdev, in mlx4_ib_update_gids_v1() argument
167 struct mlx4_dev *dev = ibdev->dev; in mlx4_ib_update_gids_v1()
195 struct mlx4_ib_dev *ibdev, in mlx4_ib_update_gids_v1_v2() argument
200 struct mlx4_dev *dev = ibdev->dev; in mlx4_ib_update_gids_v1_v2()
240 struct mlx4_ib_dev *ibdev, in mlx4_ib_update_gids() argument
243 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) in mlx4_ib_update_gids()
244 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num); in mlx4_ib_update_gids()
246 return mlx4_ib_update_gids_v1(gids, ibdev, port_num); in mlx4_ib_update_gids()
251 struct mlx4_ib_dev *ibdev = to_mdev(attr->device); in mlx4_ib_add_gid() local
252 struct mlx4_ib_iboe *iboe = &ibdev->iboe; in mlx4_ib_add_gid()
319 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num); in mlx4_ib_add_gid()
329 struct mlx4_ib_dev *ibdev = to_mdev(attr->device); in mlx4_ib_del_gid() local
330 struct mlx4_ib_iboe *iboe = &ibdev->iboe; in mlx4_ib_del_gid()
376 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num); in mlx4_ib_del_gid()
382 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev, in mlx4_ib_gid_index_to_real_index() argument
385 struct mlx4_ib_iboe *iboe = &ibdev->iboe; in mlx4_ib_gid_index_to_real_index()
396 if (mlx4_is_bonded(ibdev->dev)) in mlx4_ib_gid_index_to_real_index()
399 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num)) in mlx4_ib_gid_index_to_real_index()
421 static int mlx4_ib_query_device(struct ib_device *ibdev, in mlx4_ib_query_device() argument
425 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_query_device()
460 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS, in mlx4_ib_query_device()
550 if (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET || in mlx4_ib_query_device()
551 mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET) { in mlx4_ib_query_device()
612 ((mlx4_ib_port_link_layer(ibdev, 1) == in mlx4_ib_query_device()
614 (mlx4_ib_port_link_layer(ibdev, 2) == in mlx4_ib_query_device()
645 static int ib_link_query_port(struct ib_device *ibdev, u8 port, in ib_link_query_port() argument
663 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view) in ib_link_query_port()
666 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, in ib_link_query_port()
682 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; in ib_link_query_port()
683 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; in ib_link_query_port()
684 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port]; in ib_link_query_port()
715 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, in ib_link_query_port()
741 static int eth_link_query_port(struct ib_device *ibdev, u8 port, in eth_link_query_port() argument
745 struct mlx4_ib_dev *mdev = to_mdev(ibdev); in eth_link_query_port()
801 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port, in __mlx4_ib_query_port() argument
808 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ? in __mlx4_ib_query_port()
809 ib_link_query_port(ibdev, port, props, netw_view) : in __mlx4_ib_query_port()
810 eth_link_query_port(ibdev, port, props); in __mlx4_ib_query_port()
815 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, in mlx4_ib_query_port() argument
819 return __mlx4_ib_query_port(ibdev, port, props, 0); in mlx4_ib_query_port()
822 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, in __mlx4_ib_query_gid() argument
828 struct mlx4_ib_dev *dev = to_mdev(ibdev); in __mlx4_ib_query_gid()
878 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, in mlx4_ib_query_gid() argument
881 if (rdma_protocol_ib(ibdev, port)) in mlx4_ib_query_gid()
882 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0); in mlx4_ib_query_gid()
886 static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl) in mlx4_ib_query_sl2vl() argument
895 if (mlx4_is_slave(to_mdev(ibdev)->dev)) { in mlx4_ib_query_sl2vl()
909 if (mlx4_is_mfunc(to_mdev(ibdev)->dev)) in mlx4_ib_query_sl2vl()
912 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, in mlx4_ib_query_sl2vl()
946 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, in __mlx4_ib_query_pkey() argument
963 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view) in __mlx4_ib_query_pkey()
966 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, in __mlx4_ib_query_pkey()
979 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) in mlx4_ib_query_pkey() argument
981 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0); in mlx4_ib_query_pkey()
984 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, in mlx4_ib_modify_device() argument
996 if (mlx4_is_slave(to_mdev(ibdev)->dev)) in mlx4_ib_modify_device()
999 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags); in mlx4_ib_modify_device()
1000 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); in mlx4_ib_modify_device()
1001 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags); in mlx4_ib_modify_device()
1007 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev); in mlx4_ib_modify_device()
1012 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0, in mlx4_ib_modify_device()
1015 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox); in mlx4_ib_modify_device()
1046 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, in mlx4_ib_modify_port() argument
1049 struct mlx4_ib_dev *mdev = to_mdev(ibdev); in mlx4_ib_modify_port()
1064 err = ib_query_port(ibdev, port, &attr); in mlx4_ib_modify_port()
1076 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); in mlx4_ib_modify_port()
1083 struct ib_device *ibdev = uctx->device; in mlx4_ib_alloc_ucontext() local
1084 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_alloc_ucontext()
1093 if (ibdev->ops.uverbs_abi_ver == in mlx4_ib_alloc_ucontext()
1106 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar); in mlx4_ib_alloc_ucontext()
1116 if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) in mlx4_ib_alloc_ucontext()
1122 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar); in mlx4_ib_alloc_ucontext()
1185 struct ib_device *ibdev = ibpd->device; in mlx4_ib_alloc_pd() local
1188 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn); in mlx4_ib_alloc_pd()
1193 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn); in mlx4_ib_alloc_pd()
1204 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, in mlx4_ib_alloc_xrcd() argument
1211 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) in mlx4_ib_alloc_xrcd()
1218 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn); in mlx4_ib_alloc_xrcd()
1222 xrcd->pd = ib_alloc_pd(ibdev, 0); in mlx4_ib_alloc_xrcd()
1229 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr); in mlx4_ib_alloc_xrcd()
1240 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn); in mlx4_ib_alloc_xrcd()
1279 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev, in mlx4_ib_delete_counters_table() argument
1288 mlx4_counter_free(ibdev->dev, counter->index); in mlx4_ib_delete_counters_table()
2104 static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev, in mlx4_ib_alloc_hw_stats() argument
2107 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_alloc_hw_stats()
2118 static int mlx4_ib_get_hw_stats(struct ib_device *ibdev, in mlx4_ib_get_hw_stats() argument
2122 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_get_hw_stats()
2143 static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev, in __mlx4_ib_alloc_diag_counters() argument
2153 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) in __mlx4_ib_alloc_diag_counters()
2176 static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev, in mlx4_ib_fill_diag_counters() argument
2189 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) { in mlx4_ib_fill_diag_counters()
2209 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev) in mlx4_ib_alloc_diag_counters() argument
2211 struct mlx4_ib_diag_counters *diag = ibdev->diag_counters; in mlx4_ib_alloc_diag_counters()
2214 bool per_port = !!(ibdev->dev->caps.flags2 & in mlx4_ib_alloc_diag_counters()
2217 if (mlx4_is_slave(ibdev->dev)) in mlx4_ib_alloc_diag_counters()
2225 ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name, in mlx4_ib_alloc_diag_counters()
2231 mlx4_ib_fill_diag_counters(ibdev, diag[i].name, in mlx4_ib_alloc_diag_counters()
2235 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops); in mlx4_ib_alloc_diag_counters()
2248 static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev) in mlx4_ib_diag_cleanup() argument
2253 kfree(ibdev->diag_counters[i].offset); in mlx4_ib_diag_cleanup()
2254 kfree(ibdev->diag_counters[i].name); in mlx4_ib_diag_cleanup()
2259 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, in mlx4_ib_update_qps() argument
2271 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac); in mlx4_ib_update_qps()
2274 if (!mlx4_is_mfunc(ibdev->dev)) in mlx4_ib_update_qps()
2277 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); in mlx4_ib_update_qps()
2278 qp = ibdev->qp1_proxy[port - 1]; in mlx4_ib_update_qps()
2289 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac); in mlx4_ib_update_qps()
2295 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC, in mlx4_ib_update_qps()
2310 mlx4_unregister_mac(ibdev->dev, port, release_mac); in mlx4_ib_update_qps()
2313 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); in mlx4_ib_update_qps()
2316 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, in mlx4_ib_scan_netdevs() argument
2327 iboe = &ibdev->iboe; in mlx4_ib_scan_netdevs()
2330 mlx4_foreach_ib_transport_port(port, ibdev->dev) { in mlx4_ib_scan_netdevs()
2333 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); in mlx4_ib_scan_netdevs()
2345 if (ib_get_cached_port_state(&ibdev->ib_dev, port, in mlx4_ib_scan_netdevs()
2359 ibev.device = &ibdev->ib_dev; in mlx4_ib_scan_netdevs()
2370 mlx4_ib_update_qps(ibdev, dev, update_qps_port); in mlx4_ib_scan_netdevs()
2377 struct mlx4_ib_dev *ibdev; in mlx4_ib_netdev_event() local
2382 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); in mlx4_ib_netdev_event()
2383 mlx4_ib_scan_netdevs(ibdev, dev, event); in mlx4_ib_netdev_event()
2388 static void init_pkeys(struct mlx4_ib_dev *ibdev) in init_pkeys() argument
2394 if (mlx4_is_master(ibdev->dev)) { in init_pkeys()
2395 for (slave = 0; slave <= ibdev->dev->persist->num_vfs; in init_pkeys()
2397 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { in init_pkeys()
2399 i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; in init_pkeys()
2401 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] = in init_pkeys()
2403 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i : in init_pkeys()
2404 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1; in init_pkeys()
2405 mlx4_sync_pkey_table(ibdev->dev, slave, port, i, in init_pkeys()
2406 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]); in init_pkeys()
2411 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { in init_pkeys()
2413 i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; in init_pkeys()
2415 ibdev->pkeys.phys_pkey_cache[port-1][i] = in init_pkeys()
2421 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) in mlx4_ib_alloc_eqs() argument
2425 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors, in mlx4_ib_alloc_eqs()
2426 sizeof(ibdev->eq_table[0]), GFP_KERNEL); in mlx4_ib_alloc_eqs()
2427 if (!ibdev->eq_table) in mlx4_ib_alloc_eqs()
2435 ibdev->eq_table[eq] = total_eqs; in mlx4_ib_alloc_eqs()
2437 &ibdev->eq_table[eq])) in mlx4_ib_alloc_eqs()
2440 ibdev->eq_table[eq] = -1; in mlx4_ib_alloc_eqs()
2445 ibdev->eq_table[i++] = -1) in mlx4_ib_alloc_eqs()
2449 ibdev->ib_dev.num_comp_vectors = eq; in mlx4_ib_alloc_eqs()
2452 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) in mlx4_ib_free_eqs() argument
2455 int total_eqs = ibdev->ib_dev.num_comp_vectors; in mlx4_ib_free_eqs()
2458 if (!ibdev->eq_table) in mlx4_ib_free_eqs()
2462 ibdev->ib_dev.num_comp_vectors = 0; in mlx4_ib_free_eqs()
2465 mlx4_release_eq(dev, ibdev->eq_table[i]); in mlx4_ib_free_eqs()
2467 kfree(ibdev->eq_table); in mlx4_ib_free_eqs()
2468 ibdev->eq_table = NULL; in mlx4_ib_free_eqs()
2471 static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num, in mlx4_port_immutable() argument
2475 struct mlx4_ib_dev *mdev = to_mdev(ibdev); in mlx4_port_immutable()
2478 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) { in mlx4_port_immutable()
2493 err = ib_query_port(ibdev, port_num, &attr); in mlx4_port_immutable()
2607 struct mlx4_ib_dev *ibdev; in mlx4_ib_add() local
2628 ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev); in mlx4_ib_add()
2629 if (!ibdev) { in mlx4_ib_add()
2635 iboe = &ibdev->iboe; in mlx4_ib_add()
2637 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn)) in mlx4_ib_add()
2640 if (mlx4_uar_alloc(dev, &ibdev->priv_uar)) in mlx4_ib_add()
2643 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT, in mlx4_ib_add()
2645 if (!ibdev->uar_map) in mlx4_ib_add()
2647 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); in mlx4_ib_add()
2649 ibdev->dev = dev; in mlx4_ib_add()
2650 ibdev->bond_next_port = 0; in mlx4_ib_add()
2652 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; in mlx4_ib_add()
2653 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; in mlx4_ib_add()
2654 ibdev->num_ports = num_ports; in mlx4_ib_add()
2655 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ? in mlx4_ib_add()
2656 1 : ibdev->num_ports; in mlx4_ib_add()
2657 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; in mlx4_ib_add()
2658 ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev; in mlx4_ib_add()
2660 ibdev->ib_dev.uverbs_cmd_mask = in mlx4_ib_add()
2686 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops); in mlx4_ib_add()
2687 ibdev->ib_dev.uverbs_ex_cmd_mask |= in mlx4_ib_add()
2694 ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) == in mlx4_ib_add()
2696 (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) == in mlx4_ib_add()
2698 ibdev->ib_dev.uverbs_ex_cmd_mask |= in mlx4_ib_add()
2704 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops); in mlx4_ib_add()
2707 if (!mlx4_is_slave(ibdev->dev)) in mlx4_ib_add()
2708 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fmr_ops); in mlx4_ib_add()
2712 ibdev->ib_dev.uverbs_cmd_mask |= in mlx4_ib_add()
2715 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops); in mlx4_ib_add()
2719 ibdev->ib_dev.uverbs_cmd_mask |= in mlx4_ib_add()
2722 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops); in mlx4_ib_add()
2726 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED; in mlx4_ib_add()
2727 ibdev->ib_dev.uverbs_ex_cmd_mask |= in mlx4_ib_add()
2730 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops); in mlx4_ib_add()
2734 ibdev->ib_dev.ops.uverbs_abi_ver = in mlx4_ib_add()
2737 mlx4_ib_alloc_eqs(dev, ibdev); in mlx4_ib_add()
2741 if (init_node_data(ibdev)) in mlx4_ib_add()
2743 mlx4_init_sl2vl_tbl(ibdev); in mlx4_ib_add()
2745 for (i = 0; i < ibdev->num_ports; ++i) { in mlx4_ib_add()
2746 mutex_init(&ibdev->counters_table[i].mutex); in mlx4_ib_add()
2747 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list); in mlx4_ib_add()
2751 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; in mlx4_ib_add()
2753 mutex_init(&ibdev->qp1_proxy_lock[i]); in mlx4_ib_add()
2755 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == in mlx4_ib_add()
2757 err = mlx4_counter_alloc(ibdev->dev, &counter_index, in mlx4_ib_add()
2774 mlx4_counter_free(ibdev->dev, counter_index); in mlx4_ib_add()
2780 &ibdev->counters_table[i].counters_list); in mlx4_ib_add()
2781 ibdev->counters_table[i].default_counter = counter_index; in mlx4_ib_add()
2786 for (i = 1; i < ibdev->num_ports ; ++i) { in mlx4_ib_add()
2795 &ibdev->counters_table[i].counters_list); in mlx4_ib_add()
2796 ibdev->counters_table[i].default_counter = in mlx4_ib_add()
2803 spin_lock_init(&ibdev->sm_lock); in mlx4_ib_add()
2804 mutex_init(&ibdev->cap_mask_mutex); in mlx4_ib_add()
2805 INIT_LIST_HEAD(&ibdev->qp_list); in mlx4_ib_add()
2806 spin_lock_init(&ibdev->reset_flow_resource_lock); in mlx4_ib_add()
2808 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && in mlx4_ib_add()
2810 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; in mlx4_ib_add()
2811 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, in mlx4_ib_add()
2813 &ibdev->steer_qpn_base, 0, in mlx4_ib_add()
2818 ibdev->ib_uc_qpns_bitmap = in mlx4_ib_add()
2819 kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count), in mlx4_ib_add()
2822 if (!ibdev->ib_uc_qpns_bitmap) in mlx4_ib_add()
2826 bitmap_zero(ibdev->ib_uc_qpns_bitmap, in mlx4_ib_add()
2827 ibdev->steer_qpn_count); in mlx4_ib_add()
2829 dev, ibdev->steer_qpn_base, in mlx4_ib_add()
2830 ibdev->steer_qpn_base + in mlx4_ib_add()
2831 ibdev->steer_qpn_count - 1); in mlx4_ib_add()
2835 bitmap_fill(ibdev->ib_uc_qpns_bitmap, in mlx4_ib_add()
2836 ibdev->steer_qpn_count); in mlx4_ib_add()
2840 for (j = 1; j <= ibdev->dev->caps.num_ports; j++) in mlx4_ib_add()
2841 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]); in mlx4_ib_add()
2843 if (mlx4_ib_alloc_diag_counters(ibdev)) in mlx4_ib_add()
2846 rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group); in mlx4_ib_add()
2847 if (ib_register_device(&ibdev->ib_dev, "mlx4_%d")) in mlx4_ib_add()
2850 if (mlx4_ib_mad_init(ibdev)) in mlx4_ib_add()
2853 if (mlx4_ib_init_sriov(ibdev)) in mlx4_ib_add()
2870 ibdev->ib_active = true; in mlx4_ib_add()
2873 &ibdev->ib_dev); in mlx4_ib_add()
2875 if (mlx4_is_mfunc(ibdev->dev)) in mlx4_ib_add()
2876 init_pkeys(ibdev); in mlx4_ib_add()
2879 if (mlx4_is_master(ibdev->dev)) { in mlx4_ib_add()
2881 if (j == mlx4_master_func_num(ibdev->dev)) in mlx4_ib_add()
2883 if (mlx4_is_slave_active(ibdev->dev, j)) in mlx4_ib_add()
2884 do_slave_init(ibdev, j, 1); in mlx4_ib_add()
2887 return ibdev; in mlx4_ib_add()
2890 if (ibdev->iboe.nb.notifier_call) { in mlx4_ib_add()
2891 if (unregister_netdevice_notifier(&ibdev->iboe.nb)) in mlx4_ib_add()
2893 ibdev->iboe.nb.notifier_call = NULL; in mlx4_ib_add()
2897 mlx4_ib_close_sriov(ibdev); in mlx4_ib_add()
2900 mlx4_ib_mad_cleanup(ibdev); in mlx4_ib_add()
2903 ib_unregister_device(&ibdev->ib_dev); in mlx4_ib_add()
2906 mlx4_ib_diag_cleanup(ibdev); in mlx4_ib_add()
2909 kfree(ibdev->ib_uc_qpns_bitmap); in mlx4_ib_add()
2912 mlx4_qp_release_range(dev, ibdev->steer_qpn_base, in mlx4_ib_add()
2913 ibdev->steer_qpn_count); in mlx4_ib_add()
2915 for (i = 0; i < ibdev->num_ports; ++i) in mlx4_ib_add()
2916 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]); in mlx4_ib_add()
2919 mlx4_ib_free_eqs(dev, ibdev); in mlx4_ib_add()
2920 iounmap(ibdev->uar_map); in mlx4_ib_add()
2923 mlx4_uar_free(dev, &ibdev->priv_uar); in mlx4_ib_add()
2926 mlx4_pd_free(dev, ibdev->priv_pdn); in mlx4_ib_add()
2929 ib_dealloc_device(&ibdev->ib_dev); in mlx4_ib_add()
3002 struct mlx4_ib_dev *ibdev = ibdev_ptr; in mlx4_ib_remove() local
3008 ibdev->ib_active = false; in mlx4_ib_remove()
3011 if (ibdev->iboe.nb.notifier_call) { in mlx4_ib_remove()
3012 if (unregister_netdevice_notifier(&ibdev->iboe.nb)) in mlx4_ib_remove()
3014 ibdev->iboe.nb.notifier_call = NULL; in mlx4_ib_remove()
3017 mlx4_ib_close_sriov(ibdev); in mlx4_ib_remove()
3018 mlx4_ib_mad_cleanup(ibdev); in mlx4_ib_remove()
3019 ib_unregister_device(&ibdev->ib_dev); in mlx4_ib_remove()
3020 mlx4_ib_diag_cleanup(ibdev); in mlx4_ib_remove()
3022 mlx4_qp_release_range(dev, ibdev->steer_qpn_base, in mlx4_ib_remove()
3023 ibdev->steer_qpn_count); in mlx4_ib_remove()
3024 kfree(ibdev->ib_uc_qpns_bitmap); in mlx4_ib_remove()
3026 iounmap(ibdev->uar_map); in mlx4_ib_remove()
3027 for (p = 0; p < ibdev->num_ports; ++p) in mlx4_ib_remove()
3028 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]); in mlx4_ib_remove()
3033 mlx4_ib_free_eqs(dev, ibdev); in mlx4_ib_remove()
3035 mlx4_uar_free(dev, &ibdev->priv_uar); in mlx4_ib_remove()
3036 mlx4_pd_free(dev, ibdev->priv_pdn); in mlx4_ib_remove()
3037 ib_dealloc_device(&ibdev->ib_dev); in mlx4_ib_remove()
3040 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init) in do_slave_init() argument
3043 struct mlx4_dev *dev = ibdev->dev; in do_slave_init()
3072 dm[i]->dev = ibdev; in do_slave_init()
3075 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); in do_slave_init()
3076 if (!ibdev->sriov.is_going_down) { in do_slave_init()
3078 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); in do_slave_init()
3079 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); in do_slave_init()
3081 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); in do_slave_init()
3090 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev) in mlx4_ib_handle_catas_error() argument
3104 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); in mlx4_ib_handle_catas_error()
3106 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { in mlx4_ib_handle_catas_error()
3147 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); in mlx4_ib_handle_catas_error()
3155 struct mlx4_ib_dev *ibdev = ew->ib_dev; in handle_bonded_port_state_event() local
3161 spin_lock_bh(&ibdev->iboe.lock); in handle_bonded_port_state_event()
3163 struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; in handle_bonded_port_state_event()
3177 spin_unlock_bh(&ibdev->iboe.lock); in handle_bonded_port_state_event()
3179 ibev.device = &ibdev->ib_dev; in handle_bonded_port_state_event()
3212 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev, in mlx4_sched_ib_sl2vl_update_work() argument
3221 ew->ib_dev = ibdev; in mlx4_sched_ib_sl2vl_update_work()
3230 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); in mlx4_ib_event() local
3242 ew->ib_dev = ibdev; in mlx4_ib_event()
3254 if (p > ibdev->num_ports) in mlx4_ib_event()
3257 rdma_port_get_link_layer(&ibdev->ib_dev, p) == in mlx4_ib_event()
3260 mlx4_ib_invalidate_all_guid_record(ibdev, p); in mlx4_ib_event()
3261 if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST && in mlx4_ib_event()
3262 !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) in mlx4_ib_event()
3263 mlx4_sched_ib_sl2vl_update_work(ibdev, p); in mlx4_ib_event()
3269 if (p > ibdev->num_ports) in mlx4_ib_event()
3275 ibdev->ib_active = false; in mlx4_ib_event()
3277 mlx4_ib_handle_catas_error(ibdev); in mlx4_ib_event()
3287 ew->ib_dev = ibdev; in mlx4_ib_event()
3297 do_slave_init(ibdev, p, 1); in mlx4_ib_event()
3301 for (i = 1; i <= ibdev->num_ports; i++) { in mlx4_ib_event()
3302 if (rdma_port_get_link_layer(&ibdev->ib_dev, i) in mlx4_ib_event()
3304 mlx4_ib_slave_alias_guid_event(ibdev, in mlx4_ib_event()
3315 for (i = 1; i <= ibdev->num_ports; i++) { in mlx4_ib_event()
3316 if (rdma_port_get_link_layer(&ibdev->ib_dev, i) in mlx4_ib_event()
3318 mlx4_ib_slave_alias_guid_event(ibdev, in mlx4_ib_event()
3324 do_slave_init(ibdev, p, 0); in mlx4_ib_event()
3332 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p; in mlx4_ib_event()