Lines Matching refs:hive

3190 				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);  in amdgpu_device_ip_init()  local
3192 if (WARN_ON(!hive)) { in amdgpu_device_ip_init()
3197 if (!hive->reset_domain || in amdgpu_device_ip_init()
3198 !amdgpu_reset_get_reset_domain(hive->reset_domain)) { in amdgpu_device_ip_init()
3200 amdgpu_put_xgmi_hive(hive); in amdgpu_device_ip_init()
3206 adev->reset_domain = hive->reset_domain; in amdgpu_device_ip_init()
3207 amdgpu_put_xgmi_hive(hive); in amdgpu_device_ip_init()
4173 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); in amdgpu_device_xgmi_reset_func() local
4176 if (WARN_ON(!hive)) in amdgpu_device_xgmi_reset_func()
4187 task_barrier_enter(&hive->tb); in amdgpu_device_xgmi_reset_func()
4193 task_barrier_exit(&hive->tb); in amdgpu_device_xgmi_reset_func()
4202 task_barrier_full(&hive->tb); in amdgpu_device_xgmi_reset_func()
4211 amdgpu_put_xgmi_hive(hive); in amdgpu_device_xgmi_reset_func()
5492 struct amdgpu_hive_info *hive = NULL; in amdgpu_device_reset_sriov() local
5528 hive = amdgpu_get_xgmi_hive(adev); in amdgpu_device_reset_sriov()
5530 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) in amdgpu_device_reset_sriov()
5531 r = amdgpu_xgmi_update_topology(hive, adev); in amdgpu_device_reset_sriov()
5532 if (hive) in amdgpu_device_reset_sriov()
5533 amdgpu_put_xgmi_hive(hive); in amdgpu_device_reset_sriov()
5885 if (!reset_context->hive && in amdgpu_device_reinit_after_reset()
5914 if (reset_context->hive && in amdgpu_device_reinit_after_reset()
5917 reset_context->hive, tmp_adev); in amdgpu_device_reinit_after_reset()
6141 struct amdgpu_hive_info *hive) in amdgpu_device_recovery_prepare() argument
6151 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) { in amdgpu_device_recovery_prepare()
6152 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_device_recovery_prepare()
6202 struct amdgpu_hive_info *hive, in amdgpu_device_halt_activities() argument
6418 struct amdgpu_hive_info *hive = NULL; in amdgpu_device_gpu_recover() local
6456 hive = amdgpu_get_xgmi_hive(adev); in amdgpu_device_gpu_recover()
6457 if (hive) in amdgpu_device_gpu_recover()
6458 mutex_lock(&hive->hive_lock); in amdgpu_device_gpu_recover()
6461 reset_context->hive = hive; in amdgpu_device_gpu_recover()
6464 if (amdgpu_device_recovery_prepare(adev, &device_list, hive)) in amdgpu_device_gpu_recover()
6471 hive, need_emergency_restart); in amdgpu_device_gpu_recover()
6498 if (hive) { in amdgpu_device_gpu_recover()
6499 mutex_unlock(&hive->hive_lock); in amdgpu_device_gpu_recover()
6500 amdgpu_put_xgmi_hive(hive); in amdgpu_device_gpu_recover()
6887 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); in amdgpu_pci_error_detected() local
6908 if (hive) in amdgpu_pci_error_detected()
6909 mutex_lock(&hive->hive_lock); in amdgpu_pci_error_detected()
6914 amdgpu_device_recovery_prepare(adev, &device_list, hive); in amdgpu_pci_error_detected()
6917 hive, false); in amdgpu_pci_error_detected()
6918 if (hive) { in amdgpu_pci_error_detected()
6919 mutex_unlock(&hive->hive_lock); in amdgpu_pci_error_detected()
6920 amdgpu_put_xgmi_hive(hive); in amdgpu_pci_error_detected()
6967 struct amdgpu_hive_info *hive; in amdgpu_pci_slot_reset() local
7007 hive = amdgpu_get_xgmi_hive(adev); in amdgpu_pci_slot_reset()
7008 if (hive) { in amdgpu_pci_slot_reset()
7009 mutex_lock(&hive->hive_lock); in amdgpu_pci_slot_reset()
7010 reset_context.hive = hive; in amdgpu_pci_slot_reset()
7011 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_pci_slot_reset()
7028 if (hive) { in amdgpu_pci_slot_reset()
7035 if (hive) { in amdgpu_pci_slot_reset()
7036 mutex_unlock(&hive->hive_lock); in amdgpu_pci_slot_reset()
7037 amdgpu_put_xgmi_hive(hive); in amdgpu_pci_slot_reset()
7055 struct amdgpu_hive_info *hive = NULL; in amdgpu_pci_resume() local
7066 hive = amdgpu_get_xgmi_hive(adev); in amdgpu_pci_resume()
7067 if (hive) { in amdgpu_pci_resume()
7068 mutex_lock(&hive->hive_lock); in amdgpu_pci_resume()
7069 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_pci_resume()
7081 if (hive) { in amdgpu_pci_resume()
7082 mutex_unlock(&hive->hive_lock); in amdgpu_pci_resume()
7083 amdgpu_put_xgmi_hive(hive); in amdgpu_pci_resume()