Lines Matching refs:adev
409 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, in gmc_v9_0_ecc_interrupt_state() argument
418 if (adev->asic_type >= CHIP_VEGA20) in gmc_v9_0_ecc_interrupt_state()
459 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, in gmc_v9_0_vm_fault_interrupt_state() argument
477 for (j = 0; j < adev->num_vmhubs; j++) { in gmc_v9_0_vm_fault_interrupt_state()
478 hub = &adev->vmhub[j]; in gmc_v9_0_vm_fault_interrupt_state()
488 for (j = 0; j < adev->num_vmhubs; j++) { in gmc_v9_0_vm_fault_interrupt_state()
489 hub = &adev->vmhub[j]; in gmc_v9_0_vm_fault_interrupt_state()
505 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, in gmc_v9_0_process_interrupt() argument
525 if (entry->ih != &adev->irq.ih_soft && in gmc_v9_0_process_interrupt()
526 amdgpu_gmc_filter_faults(adev, addr, entry->pasid, in gmc_v9_0_process_interrupt()
533 if (entry->ih == &adev->irq.ih) { in gmc_v9_0_process_interrupt()
534 amdgpu_irq_delegate(adev, entry, 8); in gmc_v9_0_process_interrupt()
541 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault)) in gmc_v9_0_process_interrupt()
550 hub = &adev->vmhub[AMDGPU_MMHUB_0]; in gmc_v9_0_process_interrupt()
553 hub = &adev->vmhub[AMDGPU_MMHUB_1]; in gmc_v9_0_process_interrupt()
556 hub = &adev->vmhub[AMDGPU_GFXHUB_0]; in gmc_v9_0_process_interrupt()
560 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); in gmc_v9_0_process_interrupt()
562 dev_err(adev->dev, in gmc_v9_0_process_interrupt()
569 dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n", in gmc_v9_0_process_interrupt()
573 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_process_interrupt()
582 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) in gmc_v9_0_process_interrupt()
591 dev_err(adev->dev, in gmc_v9_0_process_interrupt()
594 if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) { in gmc_v9_0_process_interrupt()
595 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", in gmc_v9_0_process_interrupt()
600 switch (adev->ip_versions[MMHUB_HWIP][0]) { in gmc_v9_0_process_interrupt()
628 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", in gmc_v9_0_process_interrupt()
631 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", in gmc_v9_0_process_interrupt()
634 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", in gmc_v9_0_process_interrupt()
637 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", in gmc_v9_0_process_interrupt()
640 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", in gmc_v9_0_process_interrupt()
643 dev_err(adev->dev, "\t RW: 0x%x\n", rw); in gmc_v9_0_process_interrupt()
658 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_irq_funcs() argument
660 adev->gmc.vm_fault.num_types = 1; in gmc_v9_0_set_irq_funcs()
661 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; in gmc_v9_0_set_irq_funcs()
663 if (!amdgpu_sriov_vf(adev) && in gmc_v9_0_set_irq_funcs()
664 !adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_set_irq_funcs()
665 adev->gmc.ecc_irq.num_types = 1; in gmc_v9_0_set_irq_funcs()
666 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs; in gmc_v9_0_set_irq_funcs()
696 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, in gmc_v9_0_use_invalidate_semaphore() argument
699 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) in gmc_v9_0_use_invalidate_semaphore()
704 (!amdgpu_sriov_vf(adev)) && in gmc_v9_0_use_invalidate_semaphore()
705 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) && in gmc_v9_0_use_invalidate_semaphore()
706 (adev->apu_flags & AMD_APU_IS_PICASSO)))); in gmc_v9_0_use_invalidate_semaphore()
709 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, in gmc_v9_0_get_atc_vmid_pasid_mapping_info() argument
738 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, in gmc_v9_0_flush_gpu_tlb() argument
741 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); in gmc_v9_0_flush_gpu_tlb()
746 BUG_ON(vmhub >= adev->num_vmhubs); in gmc_v9_0_flush_gpu_tlb()
748 hub = &adev->vmhub[vmhub]; in gmc_v9_0_flush_gpu_tlb()
749 if (adev->gmc.xgmi.num_physical_nodes && in gmc_v9_0_flush_gpu_tlb()
750 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) { in gmc_v9_0_flush_gpu_tlb()
767 if (adev->gfx.kiq.ring.sched.ready && in gmc_v9_0_flush_gpu_tlb()
768 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && in gmc_v9_0_flush_gpu_tlb()
769 down_read_trylock(&adev->reset_sem)) { in gmc_v9_0_flush_gpu_tlb()
773 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, in gmc_v9_0_flush_gpu_tlb()
775 up_read(&adev->reset_sem); in gmc_v9_0_flush_gpu_tlb()
779 spin_lock(&adev->gmc.invalidate_lock); in gmc_v9_0_flush_gpu_tlb()
790 for (j = 0; j < adev->usec_timeout; j++) { in gmc_v9_0_flush_gpu_tlb()
799 if (j >= adev->usec_timeout) in gmc_v9_0_flush_gpu_tlb()
813 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) in gmc_v9_0_flush_gpu_tlb()
817 for (j = 0; j < adev->usec_timeout; j++) { in gmc_v9_0_flush_gpu_tlb()
838 spin_unlock(&adev->gmc.invalidate_lock); in gmc_v9_0_flush_gpu_tlb()
840 if (j < adev->usec_timeout) in gmc_v9_0_flush_gpu_tlb()
856 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, in gmc_v9_0_flush_gpu_tlb_pasid() argument
865 struct amdgpu_ring *ring = &adev->gfx.kiq.ring; in gmc_v9_0_flush_gpu_tlb_pasid()
866 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in gmc_v9_0_flush_gpu_tlb_pasid()
868 if (amdgpu_in_reset(adev)) in gmc_v9_0_flush_gpu_tlb_pasid()
871 if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) { in gmc_v9_0_flush_gpu_tlb_pasid()
878 bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes && in gmc_v9_0_flush_gpu_tlb_pasid()
879 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)); in gmc_v9_0_flush_gpu_tlb_pasid()
886 spin_lock(&adev->gfx.kiq.ring_lock); in gmc_v9_0_flush_gpu_tlb_pasid()
897 spin_unlock(&adev->gfx.kiq.ring_lock); in gmc_v9_0_flush_gpu_tlb_pasid()
898 up_read(&adev->reset_sem); in gmc_v9_0_flush_gpu_tlb_pasid()
903 spin_unlock(&adev->gfx.kiq.ring_lock); in gmc_v9_0_flush_gpu_tlb_pasid()
904 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); in gmc_v9_0_flush_gpu_tlb_pasid()
906 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); in gmc_v9_0_flush_gpu_tlb_pasid()
907 up_read(&adev->reset_sem); in gmc_v9_0_flush_gpu_tlb_pasid()
910 up_read(&adev->reset_sem); in gmc_v9_0_flush_gpu_tlb_pasid()
916 ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid, in gmc_v9_0_flush_gpu_tlb_pasid()
920 for (i = 0; i < adev->num_vmhubs; i++) in gmc_v9_0_flush_gpu_tlb_pasid()
921 gmc_v9_0_flush_gpu_tlb(adev, vmid, in gmc_v9_0_flush_gpu_tlb_pasid()
924 gmc_v9_0_flush_gpu_tlb(adev, vmid, in gmc_v9_0_flush_gpu_tlb_pasid()
938 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); in gmc_v9_0_emit_flush_gpu_tlb()
939 struct amdgpu_device *adev = ring->adev; in gmc_v9_0_emit_flush_gpu_tlb() local
940 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub]; in gmc_v9_0_emit_flush_gpu_tlb()
987 struct amdgpu_device *adev = ring->adev; in gmc_v9_0_emit_pasid_mapping() local
1034 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) in gmc_v9_0_map_mtype() argument
1055 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, in gmc_v9_0_get_vm_pde() argument
1059 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr); in gmc_v9_0_get_vm_pde()
1062 if (!adev->gmc.translate_further) in gmc_v9_0_get_vm_pde()
1078 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, in gmc_v9_0_get_vm_pte() argument
1093 if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || in gmc_v9_0_get_vm_pte()
1094 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) && in gmc_v9_0_get_vm_pte()
1099 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) in gmc_v9_0_get_vm_pte()
1103 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) in gmc_v9_0_get_vbios_fb_size() argument
1113 switch (adev->ip_versions[DCE_HWIP][0]) { in gmc_v9_0_get_vbios_fb_size()
1147 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_gmc_funcs() argument
1149 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs; in gmc_v9_0_set_gmc_funcs()
1152 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_umc_funcs() argument
1154 switch (adev->ip_versions[UMC_HWIP][0]) { in gmc_v9_0_set_umc_funcs()
1156 adev->umc.funcs = &umc_v6_0_funcs; in gmc_v9_0_set_umc_funcs()
1159 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; in gmc_v9_0_set_umc_funcs()
1160 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1161 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1162 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20; in gmc_v9_0_set_umc_funcs()
1163 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; in gmc_v9_0_set_umc_funcs()
1164 adev->umc.ras_funcs = &umc_v6_1_ras_funcs; in gmc_v9_0_set_umc_funcs()
1167 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; in gmc_v9_0_set_umc_funcs()
1168 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1169 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1170 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT; in gmc_v9_0_set_umc_funcs()
1171 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; in gmc_v9_0_set_umc_funcs()
1172 adev->umc.ras_funcs = &umc_v6_1_ras_funcs; in gmc_v9_0_set_umc_funcs()
1175 adev->umc.max_ras_err_cnt_per_query = UMC_V6_7_TOTAL_CHANNEL_NUM; in gmc_v9_0_set_umc_funcs()
1176 adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1177 adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1178 adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET; in gmc_v9_0_set_umc_funcs()
1179 if (!adev->gmc.xgmi.connected_to_cpu) in gmc_v9_0_set_umc_funcs()
1180 adev->umc.ras_funcs = &umc_v6_7_ras_funcs; in gmc_v9_0_set_umc_funcs()
1181 if (1 & adev->smuio.funcs->get_die_id(adev)) in gmc_v9_0_set_umc_funcs()
1182 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0]; in gmc_v9_0_set_umc_funcs()
1184 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0]; in gmc_v9_0_set_umc_funcs()
1191 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_mmhub_funcs() argument
1193 switch (adev->ip_versions[MMHUB_HWIP][0]) { in gmc_v9_0_set_mmhub_funcs()
1195 adev->mmhub.funcs = &mmhub_v9_4_funcs; in gmc_v9_0_set_mmhub_funcs()
1198 adev->mmhub.funcs = &mmhub_v1_7_funcs; in gmc_v9_0_set_mmhub_funcs()
1201 adev->mmhub.funcs = &mmhub_v1_0_funcs; in gmc_v9_0_set_mmhub_funcs()
1206 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_mmhub_ras_funcs() argument
1208 switch (adev->ip_versions[MMHUB_HWIP][0]) { in gmc_v9_0_set_mmhub_ras_funcs()
1210 adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs; in gmc_v9_0_set_mmhub_ras_funcs()
1213 adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs; in gmc_v9_0_set_mmhub_ras_funcs()
1216 adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs; in gmc_v9_0_set_mmhub_ras_funcs()
1224 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_gfxhub_funcs() argument
1226 adev->gfxhub.funcs = &gfxhub_v1_0_funcs; in gmc_v9_0_set_gfxhub_funcs()
1229 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_hdp_ras_funcs() argument
1231 adev->hdp.ras_funcs = &hdp_v4_0_ras_funcs; in gmc_v9_0_set_hdp_ras_funcs()
1234 static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_mca_funcs() argument
1237 switch (adev->ip_versions[UMC_HWIP][0]) { in gmc_v9_0_set_mca_funcs()
1239 if (!adev->gmc.xgmi.connected_to_cpu) in gmc_v9_0_set_mca_funcs()
1240 adev->mca.funcs = &mca_v3_0_funcs; in gmc_v9_0_set_mca_funcs()
1249 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_early_init() local
1252 if (adev->asic_type == CHIP_VEGA20 || in gmc_v9_0_early_init()
1253 adev->asic_type == CHIP_ARCTURUS) in gmc_v9_0_early_init()
1254 adev->gmc.xgmi.supported = true; in gmc_v9_0_early_init()
1256 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) { in gmc_v9_0_early_init()
1257 adev->gmc.xgmi.supported = true; in gmc_v9_0_early_init()
1258 adev->gmc.xgmi.connected_to_cpu = in gmc_v9_0_early_init()
1259 adev->smuio.funcs->is_host_gpu_xgmi_supported(adev); in gmc_v9_0_early_init()
1262 gmc_v9_0_set_gmc_funcs(adev); in gmc_v9_0_early_init()
1263 gmc_v9_0_set_irq_funcs(adev); in gmc_v9_0_early_init()
1264 gmc_v9_0_set_umc_funcs(adev); in gmc_v9_0_early_init()
1265 gmc_v9_0_set_mmhub_funcs(adev); in gmc_v9_0_early_init()
1266 gmc_v9_0_set_mmhub_ras_funcs(adev); in gmc_v9_0_early_init()
1267 gmc_v9_0_set_gfxhub_funcs(adev); in gmc_v9_0_early_init()
1268 gmc_v9_0_set_hdp_ras_funcs(adev); in gmc_v9_0_early_init()
1269 gmc_v9_0_set_mca_funcs(adev); in gmc_v9_0_early_init()
1271 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; in gmc_v9_0_early_init()
1272 adev->gmc.shared_aperture_end = in gmc_v9_0_early_init()
1273 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; in gmc_v9_0_early_init()
1274 adev->gmc.private_aperture_start = 0x1000000000000000ULL; in gmc_v9_0_early_init()
1275 adev->gmc.private_aperture_end = in gmc_v9_0_early_init()
1276 adev->gmc.private_aperture_start + (4ULL << 30) - 1; in gmc_v9_0_early_init()
1283 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_late_init() local
1286 r = amdgpu_gmc_allocate_vm_inv_eng(adev); in gmc_v9_0_late_init()
1294 if (!amdgpu_sriov_vf(adev) && in gmc_v9_0_late_init()
1295 (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) { in gmc_v9_0_late_init()
1296 if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) { in gmc_v9_0_late_init()
1297 if (adev->df.funcs->enable_ecc_force_par_wr_rmw) in gmc_v9_0_late_init()
1298 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); in gmc_v9_0_late_init()
1302 if (!amdgpu_persistent_edc_harvesting_supported(adev)) { in gmc_v9_0_late_init()
1303 if (adev->mmhub.ras_funcs && in gmc_v9_0_late_init()
1304 adev->mmhub.ras_funcs->reset_ras_error_count) in gmc_v9_0_late_init()
1305 adev->mmhub.ras_funcs->reset_ras_error_count(adev); in gmc_v9_0_late_init()
1307 if (adev->hdp.ras_funcs && in gmc_v9_0_late_init()
1308 adev->hdp.ras_funcs->reset_ras_error_count) in gmc_v9_0_late_init()
1309 adev->hdp.ras_funcs->reset_ras_error_count(adev); in gmc_v9_0_late_init()
1312 r = amdgpu_gmc_ras_late_init(adev); in gmc_v9_0_late_init()
1316 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); in gmc_v9_0_late_init()
1319 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, in gmc_v9_0_vram_gtt_location() argument
1322 u64 base = adev->mmhub.funcs->get_fb_location(adev); in gmc_v9_0_vram_gtt_location()
1325 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in gmc_v9_0_vram_gtt_location()
1326 if (adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_vram_gtt_location()
1327 amdgpu_gmc_sysvm_location(adev, mc); in gmc_v9_0_vram_gtt_location()
1329 amdgpu_gmc_vram_location(adev, mc, base); in gmc_v9_0_vram_gtt_location()
1330 amdgpu_gmc_gart_location(adev, mc); in gmc_v9_0_vram_gtt_location()
1331 amdgpu_gmc_agp_location(adev, mc); in gmc_v9_0_vram_gtt_location()
1334 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); in gmc_v9_0_vram_gtt_location()
1337 adev->vm_manager.vram_base_offset += in gmc_v9_0_vram_gtt_location()
1338 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in gmc_v9_0_vram_gtt_location()
1350 static int gmc_v9_0_mc_init(struct amdgpu_device *adev) in gmc_v9_0_mc_init() argument
1355 adev->gmc.mc_vram_size = in gmc_v9_0_mc_init()
1356 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; in gmc_v9_0_mc_init()
1357 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; in gmc_v9_0_mc_init()
1359 if (!(adev->flags & AMD_IS_APU) && in gmc_v9_0_mc_init()
1360 !adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_mc_init()
1361 r = amdgpu_device_resize_fb_bar(adev); in gmc_v9_0_mc_init()
1365 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); in gmc_v9_0_mc_init()
1366 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); in gmc_v9_0_mc_init()
1381 if ((adev->flags & AMD_IS_APU) || in gmc_v9_0_mc_init()
1382 (adev->gmc.xgmi.supported && in gmc_v9_0_mc_init()
1383 adev->gmc.xgmi.connected_to_cpu)) { in gmc_v9_0_mc_init()
1384 adev->gmc.aper_base = in gmc_v9_0_mc_init()
1385 adev->gfxhub.funcs->get_mc_fb_offset(adev) + in gmc_v9_0_mc_init()
1386 adev->gmc.xgmi.physical_node_id * in gmc_v9_0_mc_init()
1387 adev->gmc.xgmi.node_segment_size; in gmc_v9_0_mc_init()
1388 adev->gmc.aper_size = adev->gmc.real_vram_size; in gmc_v9_0_mc_init()
1393 adev->gmc.visible_vram_size = adev->gmc.aper_size; in gmc_v9_0_mc_init()
1394 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) in gmc_v9_0_mc_init()
1395 adev->gmc.visible_vram_size = adev->gmc.real_vram_size; in gmc_v9_0_mc_init()
1399 switch (adev->ip_versions[GC_HWIP][0]) { in gmc_v9_0_mc_init()
1406 adev->gmc.gart_size = 512ULL << 20; in gmc_v9_0_mc_init()
1411 adev->gmc.gart_size = 1024ULL << 20; in gmc_v9_0_mc_init()
1415 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; in gmc_v9_0_mc_init()
1418 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size; in gmc_v9_0_mc_init()
1420 gmc_v9_0_vram_gtt_location(adev, &adev->gmc); in gmc_v9_0_mc_init()
1425 static int gmc_v9_0_gart_init(struct amdgpu_device *adev) in gmc_v9_0_gart_init() argument
1429 if (adev->gart.bo) { in gmc_v9_0_gart_init()
1434 if (adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_gart_init()
1435 adev->gmc.vmid0_page_table_depth = 1; in gmc_v9_0_gart_init()
1436 adev->gmc.vmid0_page_table_block_size = 12; in gmc_v9_0_gart_init()
1438 adev->gmc.vmid0_page_table_depth = 0; in gmc_v9_0_gart_init()
1439 adev->gmc.vmid0_page_table_block_size = 0; in gmc_v9_0_gart_init()
1443 r = amdgpu_gart_init(adev); in gmc_v9_0_gart_init()
1446 adev->gart.table_size = adev->gart.num_gpu_pages * 8; in gmc_v9_0_gart_init()
1447 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) | in gmc_v9_0_gart_init()
1450 r = amdgpu_gart_table_vram_alloc(adev); in gmc_v9_0_gart_init()
1454 if (adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_gart_init()
1455 r = amdgpu_gmc_pdb0_alloc(adev); in gmc_v9_0_gart_init()
1469 static void gmc_v9_0_save_registers(struct amdgpu_device *adev) in gmc_v9_0_save_registers() argument
1471 if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || in gmc_v9_0_save_registers()
1472 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) in gmc_v9_0_save_registers()
1473 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); in gmc_v9_0_save_registers()
1479 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_sw_init() local
1481 adev->gfxhub.funcs->init(adev); in gmc_v9_0_sw_init()
1483 adev->mmhub.funcs->init(adev); in gmc_v9_0_sw_init()
1484 if (adev->mca.funcs) in gmc_v9_0_sw_init()
1485 adev->mca.funcs->init(adev); in gmc_v9_0_sw_init()
1487 spin_lock_init(&adev->gmc.invalidate_lock); in gmc_v9_0_sw_init()
1489 r = amdgpu_atomfirmware_get_vram_info(adev, in gmc_v9_0_sw_init()
1491 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_sw_init()
1496 adev->gmc.vram_width = 2048; in gmc_v9_0_sw_init()
1498 adev->gmc.vram_width = vram_width; in gmc_v9_0_sw_init()
1500 if (!adev->gmc.vram_width) { in gmc_v9_0_sw_init()
1504 if (adev->flags & AMD_IS_APU) in gmc_v9_0_sw_init()
1509 numchan = adev->df.funcs->get_hbm_channel_number(adev); in gmc_v9_0_sw_init()
1510 adev->gmc.vram_width = numchan * chansize; in gmc_v9_0_sw_init()
1513 adev->gmc.vram_type = vram_type; in gmc_v9_0_sw_init()
1514 adev->gmc.vram_vendor = vram_vendor; in gmc_v9_0_sw_init()
1515 switch (adev->ip_versions[GC_HWIP][0]) { in gmc_v9_0_sw_init()
1518 adev->num_vmhubs = 2; in gmc_v9_0_sw_init()
1520 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { in gmc_v9_0_sw_init()
1521 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v9_0_sw_init()
1524 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48); in gmc_v9_0_sw_init()
1525 adev->gmc.translate_further = in gmc_v9_0_sw_init()
1526 adev->vm_manager.num_level > 1; in gmc_v9_0_sw_init()
1534 adev->num_vmhubs = 2; in gmc_v9_0_sw_init()
1543 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_sw_init()
1544 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47); in gmc_v9_0_sw_init()
1546 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v9_0_sw_init()
1549 adev->num_vmhubs = 3; in gmc_v9_0_sw_init()
1552 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v9_0_sw_init()
1559 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT, in gmc_v9_0_sw_init()
1560 &adev->gmc.vm_fault); in gmc_v9_0_sw_init()
1564 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) { in gmc_v9_0_sw_init()
1565 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT, in gmc_v9_0_sw_init()
1566 &adev->gmc.vm_fault); in gmc_v9_0_sw_init()
1571 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT, in gmc_v9_0_sw_init()
1572 &adev->gmc.vm_fault); in gmc_v9_0_sw_init()
1577 if (!amdgpu_sriov_vf(adev) && in gmc_v9_0_sw_init()
1578 !adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_sw_init()
1580 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, in gmc_v9_0_sw_init()
1581 &adev->gmc.ecc_irq); in gmc_v9_0_sw_init()
1590 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ in gmc_v9_0_sw_init()
1592 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); in gmc_v9_0_sw_init()
1597 adev->need_swiotlb = drm_need_swiotlb(44); in gmc_v9_0_sw_init()
1599 if (adev->gmc.xgmi.supported) { in gmc_v9_0_sw_init()
1600 r = adev->gfxhub.funcs->get_xgmi_info(adev); in gmc_v9_0_sw_init()
1605 r = gmc_v9_0_mc_init(adev); in gmc_v9_0_sw_init()
1609 amdgpu_gmc_get_vbios_allocations(adev); in gmc_v9_0_sw_init()
1612 r = amdgpu_bo_init(adev); in gmc_v9_0_sw_init()
1616 r = gmc_v9_0_gart_init(adev); in gmc_v9_0_sw_init()
1630 adev->vm_manager.first_kfd_vmid = in gmc_v9_0_sw_init()
1631 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || in gmc_v9_0_sw_init()
1632 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) ? 3 : 8; in gmc_v9_0_sw_init()
1634 amdgpu_vm_manager_init(adev); in gmc_v9_0_sw_init()
1636 gmc_v9_0_save_registers(adev); in gmc_v9_0_sw_init()
1643 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_sw_fini() local
1645 amdgpu_gmc_ras_fini(adev); in gmc_v9_0_sw_fini()
1646 amdgpu_gem_force_release(adev); in gmc_v9_0_sw_fini()
1647 amdgpu_vm_manager_fini(adev); in gmc_v9_0_sw_fini()
1648 amdgpu_gart_table_vram_free(adev); in gmc_v9_0_sw_fini()
1649 amdgpu_bo_unref(&adev->gmc.pdb0_bo); in gmc_v9_0_sw_fini()
1650 amdgpu_bo_fini(adev); in gmc_v9_0_sw_fini()
1655 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) in gmc_v9_0_init_golden_registers() argument
1658 switch (adev->ip_versions[MMHUB_HWIP][0]) { in gmc_v9_0_init_golden_registers()
1660 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_init_golden_registers()
1664 soc15_program_register_sequence(adev, in gmc_v9_0_init_golden_registers()
1667 soc15_program_register_sequence(adev, in gmc_v9_0_init_golden_registers()
1674 soc15_program_register_sequence(adev, in gmc_v9_0_init_golden_registers()
1690 void gmc_v9_0_restore_registers(struct amdgpu_device *adev) in gmc_v9_0_restore_registers() argument
1692 if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || in gmc_v9_0_restore_registers()
1693 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) { in gmc_v9_0_restore_registers()
1694 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register); in gmc_v9_0_restore_registers()
1695 WARN_ON(adev->gmc.sdpif_register != in gmc_v9_0_restore_registers()
1705 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) in gmc_v9_0_gart_enable() argument
1709 if (adev->gmc.xgmi.connected_to_cpu) in gmc_v9_0_gart_enable()
1710 amdgpu_gmc_init_pdb0(adev); in gmc_v9_0_gart_enable()
1712 if (adev->gart.bo == NULL) { in gmc_v9_0_gart_enable()
1713 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); in gmc_v9_0_gart_enable()
1717 r = amdgpu_gart_table_vram_pin(adev); in gmc_v9_0_gart_enable()
1721 r = adev->gfxhub.funcs->gart_enable(adev); in gmc_v9_0_gart_enable()
1725 r = adev->mmhub.funcs->gart_enable(adev); in gmc_v9_0_gart_enable()
1730 (unsigned)(adev->gmc.gart_size >> 20)); in gmc_v9_0_gart_enable()
1731 if (adev->gmc.pdb0_bo) in gmc_v9_0_gart_enable()
1733 (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo)); in gmc_v9_0_gart_enable()
1735 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); in gmc_v9_0_gart_enable()
1737 adev->gart.ready = true; in gmc_v9_0_gart_enable()
1743 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_hw_init() local
1748 gmc_v9_0_init_golden_registers(adev); in gmc_v9_0_hw_init()
1750 if (adev->mode_info.num_crtc) { in gmc_v9_0_hw_init()
1757 if (adev->mmhub.funcs->update_power_gating) in gmc_v9_0_hw_init()
1758 adev->mmhub.funcs->update_power_gating(adev, true); in gmc_v9_0_hw_init()
1760 adev->hdp.funcs->init_registers(adev); in gmc_v9_0_hw_init()
1763 adev->hdp.funcs->flush_hdp(adev, NULL); in gmc_v9_0_hw_init()
1770 if (!amdgpu_sriov_vf(adev)) { in gmc_v9_0_hw_init()
1771 adev->gfxhub.funcs->set_fault_enable_default(adev, value); in gmc_v9_0_hw_init()
1772 adev->mmhub.funcs->set_fault_enable_default(adev, value); in gmc_v9_0_hw_init()
1774 for (i = 0; i < adev->num_vmhubs; ++i) in gmc_v9_0_hw_init()
1775 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); in gmc_v9_0_hw_init()
1777 if (adev->umc.funcs && adev->umc.funcs->init_registers) in gmc_v9_0_hw_init()
1778 adev->umc.funcs->init_registers(adev); in gmc_v9_0_hw_init()
1780 r = gmc_v9_0_gart_enable(adev); in gmc_v9_0_hw_init()
1792 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) in gmc_v9_0_gart_disable() argument
1794 adev->gfxhub.funcs->gart_disable(adev); in gmc_v9_0_gart_disable()
1795 adev->mmhub.funcs->gart_disable(adev); in gmc_v9_0_gart_disable()
1796 amdgpu_gart_table_vram_unpin(adev); in gmc_v9_0_gart_disable()
1801 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_hw_fini() local
1803 gmc_v9_0_gart_disable(adev); in gmc_v9_0_hw_fini()
1805 if (amdgpu_sriov_vf(adev)) { in gmc_v9_0_hw_fini()
1816 if (adev->mmhub.funcs->update_power_gating) in gmc_v9_0_hw_fini()
1817 adev->mmhub.funcs->update_power_gating(adev, false); in gmc_v9_0_hw_fini()
1819 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); in gmc_v9_0_hw_fini()
1820 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); in gmc_v9_0_hw_fini()
1827 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_suspend() local
1829 return gmc_v9_0_hw_fini(adev); in gmc_v9_0_suspend()
1835 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_resume() local
1837 r = gmc_v9_0_hw_init(adev); in gmc_v9_0_resume()
1841 amdgpu_vmid_reset_all(adev); in gmc_v9_0_resume()
1867 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_set_clockgating_state() local
1869 adev->mmhub.funcs->set_clockgating(adev, state); in gmc_v9_0_set_clockgating_state()
1871 athub_v1_0_set_clockgating(adev, state); in gmc_v9_0_set_clockgating_state()
1878 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v9_0_get_clockgating_state() local
1880 adev->mmhub.funcs->get_clockgating(adev, flags); in gmc_v9_0_get_clockgating_state()
1882 athub_v1_0_get_clockgating(adev, flags); in gmc_v9_0_get_clockgating_state()