Lines Matching refs:sriov

284 				if (!dev->sriov.is_going_down)  in smp_snoop()
299 !dev->sriov.is_going_down) { in smp_snoop()
439 if (dev->sriov.demux[port - 1].guid_cache[i] == guid) in mlx4_ib_find_real_gid()
536 tun_ctx = dev->sriov.demux[port-1].tun[slave]; in mlx4_ib_send_to_slave()
758 atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) { in mlx4_ib_demux_mad()
1082 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) in handle_lid_change_event()
1093 if (!dev->sriov.is_going_down) { in handle_client_rereg_event()
1094 mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0); in handle_client_rereg_event()
1220 (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix), in handle_port_mgmt_change_event()
1222 atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix, in handle_port_mgmt_change_event()
1239 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) in handle_port_mgmt_change_event()
1247 else if (!dev->sriov.is_going_down) { in handle_port_mgmt_change_event()
1296 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); in mlx4_ib_tunnel_comp_handler()
1297 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) in mlx4_ib_tunnel_comp_handler()
1299 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); in mlx4_ib_tunnel_comp_handler()
1308 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); in mlx4_ib_wire_comp_handler()
1309 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) in mlx4_ib_wire_comp_handler()
1311 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); in mlx4_ib_wire_comp_handler()
1374 sqp_ctx = dev->sriov.sqps[port-1]; in mlx4_ib_send_to_wire()
1974 if (dev->sriov.demux[port - 1].tun[slave]) { in free_pv_object()
1975 kfree(dev->sriov.demux[port - 1].tun[slave]); in free_pv_object()
1976 dev->sriov.demux[port - 1].tun[slave] = NULL; in free_pv_object()
2052 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; in create_pv_resources()
2053 ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq; in create_pv_resources()
2124 clean_vf_mcast(&dev->sriov.demux[port - 1], slave); in mlx4_ib_tunnels_update()
2128 dev->sriov.sqps[port - 1], 1); in mlx4_ib_tunnels_update()
2131 dev->sriov.demux[port - 1].tun[slave], 1); in mlx4_ib_tunnels_update()
2137 dev->sriov.demux[port - 1].tun[slave]); in mlx4_ib_tunnels_update()
2142 dev->sriov.sqps[port - 1]); in mlx4_ib_tunnels_update()
2307 dev->sriov.is_going_down = 0; in mlx4_ib_init_sriov()
2308 spin_lock_init(&dev->sriov.going_down_lock); in mlx4_ib_init_sriov()
2343 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id; in mlx4_ib_init_sriov()
2344 atomic64_set(&dev->sriov.demux[i].subnet_prefix, in mlx4_ib_init_sriov()
2347 &dev->sriov.sqps[i]); in mlx4_ib_init_sriov()
2350 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1); in mlx4_ib_init_sriov()
2362 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); in mlx4_ib_init_sriov()
2383 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); in mlx4_ib_close_sriov()
2384 dev->sriov.is_going_down = 1; in mlx4_ib_close_sriov()
2385 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); in mlx4_ib_close_sriov()
2388 flush_workqueue(dev->sriov.demux[i].ud_wq); in mlx4_ib_close_sriov()
2389 mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]); in mlx4_ib_close_sriov()
2390 kfree(dev->sriov.sqps[i]); in mlx4_ib_close_sriov()
2391 dev->sriov.sqps[i] = NULL; in mlx4_ib_close_sriov()
2392 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); in mlx4_ib_close_sriov()