• Home
  • Raw
  • Download

Lines Matching refs:sriov

284 				if (!dev->sriov.is_going_down)  in smp_snoop()
299 !dev->sriov.is_going_down) { in smp_snoop()
438 if (dev->sriov.demux[port - 1].guid_cache[i] == guid) in mlx4_ib_find_real_gid()
535 tun_ctx = dev->sriov.demux[port-1].tun[slave]; in mlx4_ib_send_to_slave()
757 atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) { in mlx4_ib_demux_mad()
1080 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) in handle_lid_change_event()
1091 if (!dev->sriov.is_going_down) { in handle_client_rereg_event()
1092 mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0); in handle_client_rereg_event()
1218 (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix), in handle_port_mgmt_change_event()
1220 atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix, in handle_port_mgmt_change_event()
1237 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) in handle_port_mgmt_change_event()
1245 else if (!dev->sriov.is_going_down) { in handle_port_mgmt_change_event()
1294 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); in mlx4_ib_tunnel_comp_handler()
1295 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) in mlx4_ib_tunnel_comp_handler()
1297 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); in mlx4_ib_tunnel_comp_handler()
1306 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); in mlx4_ib_wire_comp_handler()
1307 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) in mlx4_ib_wire_comp_handler()
1309 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); in mlx4_ib_wire_comp_handler()
1372 sqp_ctx = dev->sriov.sqps[port-1]; in mlx4_ib_send_to_wire()
1971 if (dev->sriov.demux[port - 1].tun[slave]) { in free_pv_object()
1972 kfree(dev->sriov.demux[port - 1].tun[slave]); in free_pv_object()
1973 dev->sriov.demux[port - 1].tun[slave] = NULL; in free_pv_object()
2049 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; in create_pv_resources()
2050 ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq; in create_pv_resources()
2121 clean_vf_mcast(&dev->sriov.demux[port - 1], slave); in mlx4_ib_tunnels_update()
2125 dev->sriov.sqps[port - 1], 1); in mlx4_ib_tunnels_update()
2128 dev->sriov.demux[port - 1].tun[slave], 1); in mlx4_ib_tunnels_update()
2134 dev->sriov.demux[port - 1].tun[slave]); in mlx4_ib_tunnels_update()
2139 dev->sriov.sqps[port - 1]); in mlx4_ib_tunnels_update()
2304 dev->sriov.is_going_down = 0; in mlx4_ib_init_sriov()
2305 spin_lock_init(&dev->sriov.going_down_lock); in mlx4_ib_init_sriov()
2340 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id; in mlx4_ib_init_sriov()
2341 atomic64_set(&dev->sriov.demux[i].subnet_prefix, in mlx4_ib_init_sriov()
2344 &dev->sriov.sqps[i]); in mlx4_ib_init_sriov()
2347 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1); in mlx4_ib_init_sriov()
2359 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); in mlx4_ib_init_sriov()
2380 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); in mlx4_ib_close_sriov()
2381 dev->sriov.is_going_down = 1; in mlx4_ib_close_sriov()
2382 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); in mlx4_ib_close_sriov()
2385 flush_workqueue(dev->sriov.demux[i].ud_wq); in mlx4_ib_close_sriov()
2386 mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]); in mlx4_ib_close_sriov()
2387 kfree(dev->sriov.sqps[i]); in mlx4_ib_close_sriov()
2388 dev->sriov.sqps[i] = NULL; in mlx4_ib_close_sriov()
2389 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); in mlx4_ib_close_sriov()