Lines Matching refs:xgmi

324 	return sysfs_emit(buf, "%llu\n", adev->gmc.xgmi.node_id);  in amdgpu_xgmi_show_device_id()
439 if (!adev->gmc.xgmi.hive_id) in amdgpu_get_xgmi_hive()
450 if (hive->hive_id == adev->gmc.xgmi.hive_id) in amdgpu_get_xgmi_hive()
501 hive->hive_id = adev->gmc.xgmi.hive_id; in amdgpu_get_xgmi_hive()
572 request_adev->gmc.xgmi.node_id, in amdgpu_xgmi_set_pstate()
573 request_adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_set_pstate()
604 adev->gmc.xgmi.node_id, in amdgpu_xgmi_update_topology()
605 adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_update_topology()
625 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) in amdgpu_xgmi_get_hops_count()
637 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) in amdgpu_xgmi_get_num_links()
654 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_initialize_hive_get_data_partition()
677 if (!adev->gmc.xgmi.supported) in amdgpu_xgmi_add_device()
680 if (!adev->gmc.xgmi.pending_reset && in amdgpu_xgmi_add_device()
689 ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id); in amdgpu_xgmi_add_device()
696 ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id); in amdgpu_xgmi_add_device()
703 adev->gmc.xgmi.hive_id = 16; in amdgpu_xgmi_add_device()
704 adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16; in amdgpu_xgmi_add_device()
712 adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id); in amdgpu_xgmi_add_device()
719 list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); in amdgpu_xgmi_add_device()
727 if (!adev->gmc.xgmi.pending_reset && in amdgpu_xgmi_add_device()
729 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
734 adev->gmc.xgmi.node_id; in amdgpu_xgmi_add_device()
743 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
749 tmp_adev->gmc.xgmi.node_id, in amdgpu_xgmi_add_device()
750 tmp_adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_add_device()
765 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
771 tmp_adev->gmc.xgmi.node_id, in amdgpu_xgmi_add_device()
772 tmp_adev->gmc.xgmi.hive_id, ret); in amdgpu_xgmi_add_device()
785 if (!ret && !adev->gmc.xgmi.pending_reset) in amdgpu_xgmi_add_device()
794 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id); in amdgpu_xgmi_add_device()
798 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id, in amdgpu_xgmi_add_device()
809 if (!adev->gmc.xgmi.supported) in amdgpu_xgmi_remove_device()
820 list_del(&adev->gmc.xgmi.head); in amdgpu_xgmi_remove_device()
840 if (!adev->gmc.xgmi.supported || in amdgpu_xgmi_ras_late_init()
841 adev->gmc.xgmi.num_physical_nodes == 0) in amdgpu_xgmi_ras_late_init()
844 adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev); in amdgpu_xgmi_ras_late_init()
852 struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi; in amdgpu_xgmi_get_relative_phy_addr() local
853 return (addr + xgmi->physical_node_id * xgmi->node_segment_size); in amdgpu_xgmi_get_relative_phy_addr()
1010 adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev); in amdgpu_xgmi_query_ras_error_count()