| /drivers/net/ethernet/mellanox/mlx5/core/ |
| A D | sriov.c | 42 struct mlx5_core_sriov *sriov = &dev->priv.sriov; in sriov_restore_guids() local 74 struct mlx5_core_sriov *sriov = &dev->priv.sriov; in mlx5_device_enable_sriov() local 128 struct mlx5_core_sriov *sriov = &dev->priv.sriov; in mlx5_device_disable_sriov() local 216 struct mlx5_core_sriov *sriov = &dev->priv.sriov; in mlx5_core_sriov_configure() local 247 sriov = &dev->priv.sriov; in mlx5_core_sriov_set_msix_vec_count() 297 struct mlx5_core_sriov *sriov = &dev->priv.sriov; in mlx5_sriov_init() local 308 sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL); in mlx5_sriov_init() 309 if (!sriov->vfs_ctx) in mlx5_sriov_init() 320 struct mlx5_core_sriov *sriov = &dev->priv.sriov; in mlx5_sriov_cleanup() local 343 sriov = &mdev->priv.sriov; in mlx5_sriov_blocking_notifier_unregister() [all …]
|
| /drivers/gpu/drm/xe/tests/ |
| A D | xe_sriov_pf_service_kunit.c | 36 xe->sriov.pf.service.version.latest.major); in pf_service_test_init() 37 if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major) in pf_service_test_init() 39 xe->sriov.pf.service.version.latest.minor); in pf_service_test_init() 63 xe->sriov.pf.service.version.base.major, in pf_negotiate_base_match() 64 xe->sriov.pf.service.version.base.minor, in pf_negotiate_base_match() 77 xe->sriov.pf.service.version.base.major, in pf_negotiate_base_newer() 82 if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major) in pf_negotiate_base_newer() 110 if (!xe->sriov.pf.service.version.base.minor) in pf_negotiate_base_older() 115 xe->sriov.pf.service.version.base.major, in pf_negotiate_base_older() 177 if (!xe->sriov.pf.service.version.latest.minor) in pf_negotiate_latest_older() [all …]
|
| /drivers/infiniband/hw/mlx4/ |
| A D | alias_GUID.c | 140 curr_guid = *(__be64 *)&dev->sriov. in mlx4_ib_slave_alias_guid_event() 247 spin_unlock_irqrestore(&dev->sriov. in mlx4_ib_notify_slaves_on_guid_change() 435 if (!dev->sriov.is_going_down) { in aliasguid_query_handler() 569 if (!dev->sriov.is_going_down) { in set_guid_rec() 782 if (!dev->sriov.is_going_down) { in mlx4_ib_init_alias_guid_work() 799 struct mlx4_ib_sriov *sriov = &dev->sriov; in mlx4_ib_destroy_alias_guid_service() local 827 kfree(dev->sriov.alias_guid.sa_client); in mlx4_ib_destroy_alias_guid_service() 838 dev->sriov.alias_guid.sa_client = in mlx4_ib_init_alias_guid_service() 840 if (!dev->sriov.alias_guid.sa_client) in mlx4_ib_init_alias_guid_service() 874 dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; in mlx4_ib_init_alias_guid_service() [all …]
|
| A D | cm.c | 183 struct mlx4_ib_sriov *sriov = &dev->sriov; in id_map_ent_timeout() local 186 spin_lock(&sriov->id_map_lock); in id_map_ent_timeout() 236 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; in id_map_alloc() local 268 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; in id_map_get() local 270 spin_lock(&sriov->id_map_lock); in id_map_get() 284 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; in schedule_delayed() local 366 xa_lock(&sriov->xa_rej_tmout); in alloc_rej_tmout() 411 xa_lock(&sriov->xa_rej_tmout); in lookup_rej_tmout_slave() 429 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; in mlx4_ib_demux_cm_handler() local 501 xa_lock(&sriov->xa_rej_tmout); in rej_tmout_xa_cleanup() [all …]
|
| A D | mad.c | 284 if (!dev->sriov.is_going_down) in smp_snoop() 299 !dev->sriov.is_going_down) { in smp_snoop() 1093 if (!dev->sriov.is_going_down) { in handle_client_rereg_event() 1247 else if (!dev->sriov.is_going_down) { in handle_port_mgmt_change_event() 1374 sqp_ctx = dev->sriov.sqps[port-1]; in mlx4_ib_send_to_wire() 2142 dev->sriov.sqps[port - 1]); in mlx4_ib_tunnels_update() 2303 dev->sriov.is_going_down = 0; in mlx4_ib_init_sriov() 2343 &dev->sriov.sqps[i]); in mlx4_ib_init_sriov() 2380 dev->sriov.is_going_down = 1; in mlx4_ib_close_sriov() 2386 kfree(dev->sriov.sqps[i]); in mlx4_ib_close_sriov() [all …]
|
| /drivers/net/ethernet/qlogic/qlcnic/ |
| A D | qlcnic_sriov_common.c | 153 if (!sriov) in qlcnic_sriov_init() 156 adapter->ahw->sriov = sriov; in qlcnic_sriov_init() 263 struct qlcnic_sriov *sriov = adapter->ahw->sriov; in __qlcnic_sriov_cleanup() local 436 struct qlcnic_sriov *sriov = adapter->ahw->sriov; in qlcnic_sriov_set_guest_vlan_mode() local 471 struct qlcnic_sriov *sriov = adapter->ahw->sriov; in qlcnic_sriov_get_vf_acl() local 1306 sriov = adapter->ahw->sriov; in qlcnic_sriov_handle_bc_event() 1506 struct qlcnic_sriov *sriov = adapter->ahw->sriov; in qlcnic_vf_add_mc_list() local 1992 struct qlcnic_sriov *sriov = adapter->ahw->sriov; in qlcnic_sriov_validate_vlan_cfg() local 2033 sriov = adapter->ahw->sriov; in qlcnic_sriov_vlan_operation() 2058 struct qlcnic_sriov *sriov = adapter->ahw->sriov; in qlcnic_sriov_cfg_vf_guest_vlan() local [all …]
|
| A D | qlcnic_sriov_pf.c | 70 struct qlcnic_sriov *sriov = adapter->ahw->sriov; in qlcnic_sriov_pf_cal_res_limit() local 158 struct qlcnic_sriov *sriov = adapter->ahw->sriov; in qlcnic_sriov_set_vf_max_vlan() local 235 struct qlcnic_sriov *sriov = adapter->ahw->sriov; in qlcnic_sriov_pf_reset_vport_handle() local 253 struct qlcnic_sriov *sriov = adapter->ahw->sriov; in qlcnic_sriov_pf_set_vport_handle() local 271 struct qlcnic_sriov *sriov = adapter->ahw->sriov; in qlcnic_sriov_pf_get_vport_handle() local 408 struct qlcnic_sriov *sriov = adapter->ahw->sriov; in qlcnic_sriov_pf_del_flr_queue() local 752 sriov = adapter->ahw->sriov; in qlcnic_sriov_pf_channel_cfg_cmd() 863 sriov = adapter->ahw->sriov; in qlcnic_83xx_cfg_default_mac_vlan() 1345 sriov = adapter->ahw->sriov; in qlcnic_sriov_pf_get_acl_cmd() 1372 struct qlcnic_sriov *sriov = adapter->ahw->sriov; in qlcnic_sriov_pf_del_guest_vlan() local [all …]
|
| /drivers/gpu/drm/xe/ |
| A D | xe_tile_sriov_vf.c | 24 tile->sriov.vf.ggtt_balloon[0] = xe_ggtt_node_init(ggtt); in vf_init_ggtt_balloons() 25 if (IS_ERR(tile->sriov.vf.ggtt_balloon[0])) in vf_init_ggtt_balloons() 26 return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]); in vf_init_ggtt_balloons() 28 tile->sriov.vf.ggtt_balloon[1] = xe_ggtt_node_init(ggtt); in vf_init_ggtt_balloons() 29 if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) { in vf_init_ggtt_balloons() 30 xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[0]); in vf_init_ggtt_balloons() 31 return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]); in vf_init_ggtt_balloons() 121 xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[1]); in xe_tile_sriov_vf_deballoon_ggtt_locked() 122 xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[0]); in xe_tile_sriov_vf_deballoon_ggtt_locked() 136 xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[1]); in vf_fini_ggtt_balloons() [all …]
|
| A D | xe_gt_sriov_vf.c | 178 gt->sriov.vf.wanted_guc_version = wanted; in vf_handshake_with_guc() 291 *found = gt->sriov.vf.guc_version; in xe_gt_sriov_vf_guc_versions() 420 xe_gt_assert(gt, gt->sriov.vf.guc_version.major > 1 || gt->sriov.vf.guc_version.minor >= 2); in xe_gt_sriov_vf_gmdid() 693 xe->sriov.vf.pf_version.major = major; in vf_connect_pf() 694 xe->sriov.vf.pf_version.minor = minor; in vf_connect_pf() 803 gt->sriov.vf.runtime.regs = NULL; in vf_prepare_runtime_info() 804 gt->sriov.vf.runtime.num_regs = 0; in vf_prepare_runtime_info() 805 gt->sriov.vf.runtime.regs_size = 0; in vf_prepare_runtime_info() 812 gt->sriov.vf.runtime.regs = regs; in vf_prepare_runtime_info() 976 addr, gt->sriov.vf.runtime.gmdid); in xe_gt_sriov_vf_read32() [all …]
|
| A D | xe_sriov_pf_service.c | 32 xe->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR; in xe_sriov_pf_service_init() 33 xe->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR; in xe_sriov_pf_service_init() 92 xe->sriov.pf.vfs[vfid].version.major = major; in pf_connect() 93 xe->sriov.pf.vfs[vfid].version.minor = minor; in pf_connect() 100 xe->sriov.pf.vfs[vfid].version.major = 0; in pf_disconnect() 101 xe->sriov.pf.vfs[vfid].version.minor = 0; in pf_disconnect() 121 return major == xe->sriov.pf.vfs[vfid].version.major && in xe_sriov_pf_service_is_negotiated() 122 minor <= xe->sriov.pf.vfs[vfid].version.minor; in xe_sriov_pf_service_is_negotiated() 202 print_pf_version(p, "base", &xe->sriov.pf.service.version.base); in xe_sriov_pf_service_print_versions() 203 print_pf_version(p, "latest", &xe->sriov.pf.service.version.latest); in xe_sriov_pf_service_print_versions() [all …]
|
| A D | xe_sriov.c | 80 xe_assert(xe, !xe->sriov.__mode); in xe_sriov_probe_early() 81 xe->sriov.__mode = mode; in xe_sriov_probe_early() 82 xe_assert(xe, xe->sriov.__mode); in xe_sriov_probe_early() 93 destroy_workqueue(xe->sriov.wq); in fini_sriov() 94 xe->sriov.wq = NULL; in fini_sriov() 121 xe_assert(xe, !xe->sriov.wq); in xe_sriov_init() 122 xe->sriov.wq = alloc_workqueue("xe-sriov-wq", 0, 0); in xe_sriov_init() 123 if (!xe->sriov.wq) in xe_sriov_init()
|
| A D | xe_gt_sriov_pf.c | 41 gt->sriov.pf.vfs = drmm_kcalloc(>_to_xe(gt)->drm, 1 + num_vfs, in pf_alloc_metadata() 42 sizeof(*gt->sriov.pf.vfs), GFP_KERNEL); in pf_alloc_metadata() 43 if (!gt->sriov.pf.vfs) in pf_alloc_metadata() 52 INIT_WORK(>->sriov.pf.workers.restart, pf_worker_restart_func); in pf_init_workers() 58 disable_work_sync(>->sriov.pf.workers.restart); in pf_fini_workers() 210 if (cancel_work_sync(>->sriov.pf.workers.restart)) in pf_cancel_restart() 239 struct xe_gt *gt = container_of(w, typeof(*gt), sriov.pf.workers.restart); in pf_worker_restart_func() 250 if (!queue_work(xe->sriov.wq, >->sriov.pf.workers.restart)) in pf_queue_restart() 268 flush_work(>->sriov.pf.workers.restart); in pf_flush_restart()
|
| A D | xe_gt_sriov_pf_service.c | 155 gt->sriov.pf.service.runtime.size = size; in pf_alloc_runtime_info() 156 gt->sriov.pf.service.runtime.regs = regs; in pf_alloc_runtime_info() 157 gt->sriov.pf.service.runtime.values = values; in pf_alloc_runtime_info() 175 if (!gt->sriov.pf.service.runtime.size) in pf_prepare_runtime_info() 178 size = gt->sriov.pf.service.runtime.size; in pf_prepare_runtime_info() 179 regs = gt->sriov.pf.service.runtime.regs; in pf_prepare_runtime_info() 180 values = gt->sriov.pf.service.runtime.values; in pf_prepare_runtime_info() 280 runtime = >->sriov.pf.service.runtime; in pf_service_runtime_query() 396 size = gt->sriov.pf.service.runtime.size; in xe_gt_sriov_pf_service_print_runtime() 397 regs = gt->sriov.pf.service.runtime.regs; in xe_gt_sriov_pf_service_print_runtime() [all …]
|
| A D | xe_gt_sriov_pf_policy.c | 145 gt->sriov.pf.vfs[n].config.sched_priority = priority; in pf_bulk_reset_sched_priority() 156 >->sriov.pf.policy.guc.sched_if_idle, in pf_provision_sched_if_idle() 178 gt->sriov.pf.policy.guc.sched_if_idle = false; in pf_sanitize_sched_if_idle() 216 enable = gt->sriov.pf.policy.guc.sched_if_idle; in xe_gt_sriov_pf_policy_get_sched_if_idle() 228 >->sriov.pf.policy.guc.reset_engine, enable); in pf_provision_reset_engine() 244 gt->sriov.pf.policy.guc.reset_engine = false; in pf_sanitize_reset_engine() 282 enable = gt->sriov.pf.policy.guc.reset_engine; in xe_gt_sriov_pf_policy_get_reset_engine() 294 >->sriov.pf.policy.guc.sample_period, value); in pf_provision_sample_period() 310 gt->sriov.pf.policy.guc.sample_period = 0; in pf_sanitize_sample_period() 348 value = gt->sriov.pf.policy.guc.sample_period; in xe_gt_sriov_pf_policy_get_sample_period() [all …]
|
| A D | xe_sriov_pf.c | 74 xe->sriov.pf.device_total_vfs = totalvfs; in xe_sriov_pf_readiness() 75 xe->sriov.pf.driver_max_vfs = newlimit; in xe_sriov_pf_readiness() 92 xe->sriov.pf.vfs = drmm_kcalloc(&xe->drm, 1 + xe_sriov_pf_get_totalvfs(xe), in xe_sriov_pf_init_early() 93 sizeof(*xe->sriov.pf.vfs), GFP_KERNEL); in xe_sriov_pf_init_early() 94 if (!xe->sriov.pf.vfs) in xe_sriov_pf_init_early() 97 err = drmm_mutex_init(&xe->drm, &xe->sriov.pf.master_lock); in xe_sriov_pf_init_early() 145 drm_printf(p, "total: %u\n", xe->sriov.pf.device_total_vfs); in xe_sriov_pf_print_vfs_summary() 146 drm_printf(p, "supported: %u\n", xe->sriov.pf.driver_max_vfs); in xe_sriov_pf_print_vfs_summary()
|
| A D | xe_sriov_vf.c | 144 INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func); in xe_sriov_vf_init_early() 152 return test_bit(gt->info.id, >_to_xe(gt)->sriov.vf.migration.gt_flags); in gt_vf_post_migration_needed() 189 if (test_and_clear_bit(id, &xe->sriov.vf.migration.gt_flags)) in vf_get_next_migrated_gt_id() 266 sriov.vf.migration.worker); in migration_worker_func() 284 if (test_bit(id, &xe->sriov.vf.migration.gt_flags)) in vf_ready_to_recovery_on_any_gts() 305 started = queue_work(xe->sriov.wq, &xe->sriov.vf.migration.worker); in xe_sriov_vf_start_migration_recovery()
|
| A D | xe_gt_sriov_pf_migration.c | 119 return gt->sriov.pf.migration.supported; in pf_migration_supported() 125 return >->sriov.pf.migration.snapshot_lock; in pf_migration_mutex() 135 return >->sriov.pf.vfs[vfid].snapshot; in pf_pick_vf_snapshot() 140 return container_of(snapshot, struct xe_gt_sriov_metadata, snapshot) - gt->sriov.pf.vfs; in pf_snapshot_index() 409 gt->sriov.pf.migration.supported = pf_check_migration_support(gt); in xe_gt_sriov_pf_migration_init() 414 err = drmm_mutex_init(&xe->drm, >->sriov.pf.migration.snapshot_lock); in xe_gt_sriov_pf_migration_init()
|
| A D | xe_sriov_pf_helpers.h | 37 return xe->sriov.pf.driver_max_vfs; in xe_sriov_pf_get_totalvfs() 43 return &xe->sriov.pf.master_lock; in xe_sriov_pf_master_mutex()
|
| A D | xe_gt_sriov_pf_monitor.c | 32 gt->sriov.pf.vfs[vfid].monitor.guc.events[e] = 0; in xe_gt_sriov_pf_monitor_flr() 41 gt->sriov.pf.vfs[vfid].monitor.guc.events[e]++; in pf_update_event_counter() 127 data = >->sriov.pf.vfs[n].monitor; in xe_gt_sriov_pf_monitor_print_events()
|
| A D | xe_gt_sriov_pf_control.c | 219 return >->sriov.pf.vfs[vfid].control; in pf_pick_vf_control() 336 queue_work(xe->sriov.wq, >->sriov.pf.control.worker); in pf_queue_control_worker() 341 struct xe_gt_sriov_pf_control *pfc = >->sriov.pf.control; in pf_queue_vf() 346 list_move_tail(>->sriov.pf.vfs[vfid].control.link, &pfc->list); in pf_queue_vf() 1393 return container_of(cs, struct xe_gt_sriov_metadata, control) - gt->sriov.pf.vfs; in pf_control_state_index() 1398 struct xe_gt_sriov_pf_control *pfc = >->sriov.pf.control; in pf_worker_find_work() 1427 struct xe_gt *gt = container_of(w, struct xe_gt, sriov.pf.control.worker); in control_worker_func() 1436 cancel_work_sync(>->sriov.pf.control.worker); in pf_stop_worker() 1469 spin_lock_init(>->sriov.pf.control.lock); in xe_gt_sriov_pf_control_init() 1470 INIT_LIST_HEAD(>->sriov.pf.control.list); in xe_gt_sriov_pf_control_init() [all …]
|
| A D | xe_sriov.h | 24 xe_assert(xe, xe->sriov.__mode); in xe_device_sriov_mode() 25 return xe->sriov.__mode; in xe_device_sriov_mode()
|
| A D | xe_sriov_printk.h | 15 ((xe)->sriov.__mode == XE_SRIOV_MODE_PF ? "PF: " : \ 16 (xe)->sriov.__mode == XE_SRIOV_MODE_VF ? "VF: " : "")
|
| A D | xe_gt_sriov_pf_config.c | 237 return >->sriov.pf.vfs[vfid].config; in pf_pick_vf_config() 403 spare = gt->sriov.pf.spare.ggtt_size; in pf_get_spare_ggtt() 419 gt->sriov.pf.spare.ggtt_size = size; in pf_set_spare_ggtt() 742 spare = gt->sriov.pf.spare.num_ctxs; in pf_get_spare_ctxs() 759 gt->sriov.pf.spare.num_ctxs = spare; in pf_set_spare_ctxs() 1044 spare = gt->sriov.pf.spare.num_dbs; in pf_get_spare_dbs() 1061 gt->sriov.pf.spare.num_dbs = spare; in pf_set_spare_dbs() 1358 lmtt = &tile->sriov.pf.lmtt; in pf_force_lmtt_invalidate() 1373 lmtt = &tile->sriov.pf.lmtt; in pf_reset_vf_lmtt() 1398 lmtt = &tile->sriov.pf.lmtt; in pf_update_vf_lmtt() [all …]
|
| /drivers/pci/ |
| A D | iov.c | 37 dev->sriov->stride * vf_id) & 0xff; in pci_iov_virtfn_devfn() 50 pf->sriov->stride; in pci_iov_vf_id() 196 &physfn->sriov->class); in pci_read_vf_config_common() 503 if (pdev->sriov->num_VFs) { in sriov_numvfs_store() 895 dev->sriov = iov; in sriov_init() 904 dev->sriov = NULL; in sriov_init() 918 BUG_ON(dev->sriov->num_VFs); in sriov_release() 920 if (dev != dev->sriov->dev) in sriov_release() 923 kfree(dev->sriov); in sriov_release() 924 dev->sriov = NULL; in sriov_release() [all …]
|
| /drivers/infiniband/hw/mlx5/ |
| A D | ib_virt.c | 98 struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx; in mlx5_ib_set_vf_link_state() 157 struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx; in set_vf_node_guid() 181 struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx; in set_vf_port_guid() 216 struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx; in mlx5_ib_get_vf_guid()
|