/drivers/infiniband/hw/mlx5/ |
D | ib_rep.c | 16 ibdev = mlx5_ib_get_uplink_ibdev(dev->priv.eswitch); in mlx5_ib_set_vport_rep() 23 mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport); in mlx5_ib_set_vport_rep() 57 mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport); in mlx5_ib_vport_rep_load() 99 struct mlx5_eswitch *esw = mdev->priv.eswitch; in mlx5_ib_register_vport_reps() 106 struct mlx5_eswitch *esw = mdev->priv.eswitch; in mlx5_ib_unregister_vport_reps() 143 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; in create_flow_rule_vport_sq()
|
D | main.c | 164 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; in mlx5_get_rep_roce() 3490 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; in mlx5_ib_set_rule_source_port() 5338 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == in is_mdev_switchdev_mode() 6908 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) { in mlx5_ib_add()
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | sriov.c | 80 mlx5_eswitch_update_num_of_vfs(dev->priv.eswitch, num_vfs); in mlx5_device_enable_sriov() 81 err = mlx5_eswitch_enable(dev->priv.eswitch, MLX5_ESWITCH_LEGACY); in mlx5_device_enable_sriov() 130 mlx5_eswitch_disable(dev->priv.eswitch); in mlx5_device_disable_sriov()
|
D | en_rep.c | 137 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in mlx5e_rep_update_hw_counters() 240 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in mlx5e_replace_rep_vport_rx_rule() 467 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in mlx5e_add_sqs_fwd_rules() 496 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in mlx5e_remove_sqs_fwd_rules() 603 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in mlx5e_rep_update_flows() 1539 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in mlx5e_create_rep_vport_rx_rule() 1801 mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport); in is_devlink_port_supported() 1834 } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, in register_devlink_port() 1979 struct mlx5_eswitch *esw = mdev->priv.eswitch; in mlx5e_rep_register_vport_reps() 1986 struct mlx5_eswitch *esw = mdev->priv.eswitch; in mlx5e_rep_unregister_vport_reps()
|
D | eswitch_offloads.c | 105 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch, in mlx5_eswitch_set_rule_source_port() 655 dest->vport.num = peer_dev->priv.eswitch->manager_vport; in peer_miss_rules_setup() 710 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, in esw_add_fdb_peer_miss_rules() 735 peer_dev->priv.eswitch, in esw_add_fdb_peer_miss_rules() 2309 if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE && in mlx5_devlink_eswitch_check() 2327 cur_mlx5_mode = dev->priv.eswitch->mode; in mlx5_devlink_eswitch_mode_set() 2336 return esw_offloads_start(dev->priv.eswitch, extack); in mlx5_devlink_eswitch_mode_set() 2338 return esw_offloads_stop(dev->priv.eswitch, extack); in mlx5_devlink_eswitch_mode_set() 2352 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); in mlx5_devlink_eswitch_mode_get() 2359 struct mlx5_eswitch *esw = dev->priv.eswitch; in mlx5_devlink_eswitch_inline_mode_set() [all …]
|
D | en_tc.c | 302 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in get_mod_hdr_table() 1124 esw = flow->priv->mdev->priv.eswitch; in add_unready_flow() 1139 esw = flow->priv->mdev->priv.eswitch; in remove_unready_flow() 1153 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in mlx5e_tc_add_fdb_flow() 1257 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in mlx5e_tc_del_fdb_flow() 1298 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in mlx5e_tc_encap_flows_add() 1363 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in mlx5e_tc_encap_flows_del() 1508 esw = priv->mdev->priv.eswitch; in mlx5e_tc_update_neigh_used_value() 1571 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in mlx5e_encap_put() 1585 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in mlx5e_detach_encap() [all …]
|
D | eswitch.c | 915 struct mlx5_eswitch *esw = dev->priv.eswitch; in esw_vport_change_handle_locked() 949 struct mlx5_eswitch *esw = vport->dev->priv.eswitch; in esw_vport_change_handler() 2021 dev->priv.eswitch = esw; in mlx5_eswitch_init() 2039 esw->dev->priv.eswitch = NULL; in mlx5_eswitch_cleanup() 2469 struct mlx5_eswitch *esw = dev->priv.eswitch; in mlx5_eswitch_query_vport_drop_stats() 2592 esw = dev->priv.eswitch; in mlx5_eswitch_get_encap_mode() 2600 if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE && in mlx5_esw_lag_prereq() 2601 dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) || in mlx5_esw_lag_prereq() 2602 (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS && in mlx5_esw_lag_prereq() 2603 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS)) in mlx5_esw_lag_prereq() [all …]
|
D | rdma.c | 84 dev->priv.eswitch->manager_vport); in mlx5_rdma_enable_roce_steering()
|
D | Makefile | 44 mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
|
D | devlink.c | 125 eswitch_mode = mlx5_eswitch_mode(dev->priv.eswitch); in mlx5_devlink_fs_mode_validate()
|
D | lag.c | 308 roce_lag &= dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE && in mlx5_do_bond() 309 dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE; in mlx5_do_bond()
|
D | en_main.c | 4044 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac); in mlx5e_set_vf_mac() 4056 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1, in mlx5e_set_vf_vlan() 4065 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting); in mlx5e_set_vf_spoofchk() 4073 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting); in mlx5e_set_vf_trust() 4082 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1, in mlx5e_set_vf_rate() 4114 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1, in mlx5e_set_vf_link_state() 4125 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi); in mlx5e_get_vf_config() 4138 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1, in mlx5e_get_vf_stats() 4503 err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting); in mlx5e_bridge_getlink() 4544 return mlx5_eswitch_set_vepa(mdev->priv.eswitch, setting); in mlx5e_bridge_setlink() [all …]
|
D | Kconfig | 79 Switchdev mode (eswitch offloads).
|
D | main.c | 889 mlx5_eswitch_cleanup(dev->priv.eswitch); in mlx5_init_once() 919 mlx5_eswitch_cleanup(dev->priv.eswitch); in mlx5_cleanup_once()
|
/drivers/net/ethernet/broadcom/ |
D | Kconfig | 225 offload for eswitch. This option enables SR-IOV switchdev eswitch
|
/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | tc_tun.c | 29 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; in get_route_and_out_devs() 89 struct mlx5_eswitch *esw = mdev->priv.eswitch; in mlx5e_route_lookup_ipv4()
|
/drivers/net/ethernet/qlogic/qlcnic/ |
D | qlcnic_83xx_vnic.c | 282 adapter->eswitch[*port_id].flags |= QLCNIC_SWITCH_ENABLE; in qlcnic_83xx_set_port_eswitch_status()
|
D | qlcnic_ctx.c | 1032 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) { in qlcnic_config_port_mirroring() 1185 int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, in qlcnic_get_eswitch_stats() argument 1207 esw_stats->context_id = eswitch; in qlcnic_get_eswitch_stats() 1210 if (adapter->npars[i].phy_port != eswitch) in qlcnic_get_eswitch_stats()
|
D | qlcnic_main.c | 1032 adapter->eswitch = kcalloc(QLCNIC_NIU_MAX_XG_PORTS, in qlcnic_init_pci_info() 1035 if (!adapter->eswitch) { in qlcnic_init_pci_info() 1080 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE; in qlcnic_init_pci_info() 1087 kfree(adapter->eswitch); in qlcnic_init_pci_info() 1088 adapter->eswitch = NULL; in qlcnic_init_pci_info() 2773 kfree(adapter->eswitch); in qlcnic_remove()
|
D | qlcnic.h | 1161 struct qlcnic_eswitch *eswitch; member
|
/drivers/net/ethernet/qlogic/qed/ |
D | qed_mcp.h | 1050 enum qed_ov_eswitch eswitch);
|
D | qed_mcp.c | 2725 enum qed_ov_eswitch eswitch) in qed_mcp_ov_update_eswitch() argument 2731 switch (eswitch) { in qed_mcp_ov_update_eswitch() 2742 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch); in qed_mcp_ov_update_eswitch()
|