Lines Matching refs:adev

412 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,  in gmc_v9_0_ecc_interrupt_state()  argument
422 if (adev->asic_type >= CHIP_VEGA20) in gmc_v9_0_ecc_interrupt_state()
463 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, in gmc_v9_0_vm_fault_interrupt_state() argument
481 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { in gmc_v9_0_vm_fault_interrupt_state()
482 hub = &adev->vmhub[j]; in gmc_v9_0_vm_fault_interrupt_state()
491 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0))) in gmc_v9_0_vm_fault_interrupt_state()
509 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { in gmc_v9_0_vm_fault_interrupt_state()
510 hub = &adev->vmhub[j]; in gmc_v9_0_vm_fault_interrupt_state()
519 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0))) in gmc_v9_0_vm_fault_interrupt_state()
543 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, in gmc_v9_0_process_interrupt() argument
573 if (adev->gfx.funcs->ih_node_to_logical_xcc) { in gmc_v9_0_process_interrupt()
574 xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev, in gmc_v9_0_process_interrupt()
581 hub = &adev->vmhub[vmhub]; in gmc_v9_0_process_interrupt()
584 if (adev->irq.retry_cam_enabled) { in gmc_v9_0_process_interrupt()
588 if (entry->ih == &adev->irq.ih) { in gmc_v9_0_process_interrupt()
589 amdgpu_irq_delegate(adev, entry, 8); in gmc_v9_0_process_interrupt()
595 ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id, in gmc_v9_0_process_interrupt()
597 WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index); in gmc_v9_0_process_interrupt()
602 if (entry->ih != &adev->irq.ih_soft && in gmc_v9_0_process_interrupt()
603 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, in gmc_v9_0_process_interrupt()
610 if (entry->ih == &adev->irq.ih) { in gmc_v9_0_process_interrupt()
611 amdgpu_irq_delegate(adev, entry, 8); in gmc_v9_0_process_interrupt()
618 if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id, in gmc_v9_0_process_interrupt()
624 if (kgd2kfd_vmfault_fast_path(adev, entry, retry_fault)) in gmc_v9_0_process_interrupt()
630 dev_err(adev->dev, in gmc_v9_0_process_interrupt()
635 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); in gmc_v9_0_process_interrupt()
637 amdgpu_vm_print_task_info(adev, task_info); in gmc_v9_0_process_interrupt()
641 dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n", in gmc_v9_0_process_interrupt()
645 if (amdgpu_is_multi_aid(adev)) in gmc_v9_0_process_interrupt()
646 dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n", in gmc_v9_0_process_interrupt()
650 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_process_interrupt()
659 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))) in gmc_v9_0_process_interrupt()
668 if (fed && amdgpu_ras_is_poison_mode_supported(adev) && in gmc_v9_0_process_interrupt()
669 (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2))) in gmc_v9_0_process_interrupt()
678 if (!amdgpu_sriov_vf(adev)) in gmc_v9_0_process_interrupt()
681 amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub); in gmc_v9_0_process_interrupt()
683 dev_err(adev->dev, in gmc_v9_0_process_interrupt()
687 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", in gmc_v9_0_process_interrupt()
692 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { in gmc_v9_0_process_interrupt()
721 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", in gmc_v9_0_process_interrupt()
724 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", in gmc_v9_0_process_interrupt()
727 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", in gmc_v9_0_process_interrupt()
730 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", in gmc_v9_0_process_interrupt()
733 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", in gmc_v9_0_process_interrupt()
736 dev_err(adev->dev, "\t RW: 0x%x\n", rw); in gmc_v9_0_process_interrupt()
751 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_irq_funcs() argument
753 adev->gmc.vm_fault.num_types = 1; in gmc_v9_0_set_irq_funcs()
754 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; in gmc_v9_0_set_irq_funcs()
756 if (!amdgpu_sriov_vf(adev) && in gmc_v9_0_set_irq_funcs()
757 !adev->gmc.xgmi.connected_to_cpu && in gmc_v9_0_set_irq_funcs()
758 !adev->gmc.is_app_apu) { in gmc_v9_0_set_irq_funcs()
759 adev->gmc.ecc_irq.num_types = 1; in gmc_v9_0_set_irq_funcs()
760 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs; in gmc_v9_0_set_irq_funcs()
790 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, in gmc_v9_0_use_invalidate_semaphore() argument
793 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || in gmc_v9_0_use_invalidate_semaphore()
794 amdgpu_is_multi_aid(adev)) in gmc_v9_0_use_invalidate_semaphore()
799 (!amdgpu_sriov_vf(adev)) && in gmc_v9_0_use_invalidate_semaphore()
800 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) && in gmc_v9_0_use_invalidate_semaphore()
801 (adev->apu_flags & AMD_APU_IS_PICASSO)))); in gmc_v9_0_use_invalidate_semaphore()
804 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, in gmc_v9_0_get_atc_vmid_pasid_mapping_info() argument
833 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, in gmc_v9_0_flush_gpu_tlb() argument
836 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); in gmc_v9_0_flush_gpu_tlb()
843 hub = &adev->vmhub[vmhub]; in gmc_v9_0_flush_gpu_tlb()
857 if (adev->gfx.kiq[inst].ring.sched.ready && in gmc_v9_0_flush_gpu_tlb()
858 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { in gmc_v9_0_flush_gpu_tlb()
862 amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req, in gmc_v9_0_flush_gpu_tlb()
868 spin_lock(&adev->gmc.invalidate_lock); in gmc_v9_0_flush_gpu_tlb()
879 for (j = 0; j < adev->usec_timeout; j++) { in gmc_v9_0_flush_gpu_tlb()
890 if (j >= adev->usec_timeout) in gmc_v9_0_flush_gpu_tlb()
905 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))) in gmc_v9_0_flush_gpu_tlb()
908 for (j = 0; j < adev->usec_timeout; j++) { in gmc_v9_0_flush_gpu_tlb()
930 spin_unlock(&adev->gmc.invalidate_lock); in gmc_v9_0_flush_gpu_tlb()
932 if (j < adev->usec_timeout) in gmc_v9_0_flush_gpu_tlb()
949 static void gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, in gmc_v9_0_flush_gpu_tlb_pasid() argument
959 valid = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid, in gmc_v9_0_flush_gpu_tlb_pasid()
965 for_each_set_bit(i, adev->vmhubs_mask, in gmc_v9_0_flush_gpu_tlb_pasid()
967 gmc_v9_0_flush_gpu_tlb(adev, vmid, i, in gmc_v9_0_flush_gpu_tlb_pasid()
970 gmc_v9_0_flush_gpu_tlb(adev, vmid, in gmc_v9_0_flush_gpu_tlb_pasid()
980 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); in gmc_v9_0_emit_flush_gpu_tlb()
981 struct amdgpu_device *adev = ring->adev; in gmc_v9_0_emit_flush_gpu_tlb() local
982 struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub]; in gmc_v9_0_emit_flush_gpu_tlb()
1029 struct amdgpu_device *adev = ring->adev; in gmc_v9_0_emit_pasid_mapping() local
1076 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) in gmc_v9_0_map_mtype() argument
1097 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, in gmc_v9_0_get_vm_pde() argument
1101 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr); in gmc_v9_0_get_vm_pde()
1104 if (!adev->gmc.translate_further) in gmc_v9_0_get_vm_pde()
1123 static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, in gmc_v9_0_get_coherence_flags() argument
1136 uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0); in gmc_v9_0_get_coherence_flags()
1146 if (bo_adev == adev) { in gmc_v9_0_get_coherence_flags()
1157 adev->gmc.xgmi.connected_to_cpu) in gmc_v9_0_get_coherence_flags()
1164 if (amdgpu_xgmi_same_hive(adev, bo_adev)) in gmc_v9_0_get_coherence_flags()
1197 is_local = (!is_vram && (adev->flags & AMD_IS_APU) && in gmc_v9_0_get_coherence_flags()
1199 (is_vram && adev == bo_adev && in gmc_v9_0_get_coherence_flags()
1200 KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id); in gmc_v9_0_get_coherence_flags()
1206 } else if (adev->flags & AMD_IS_APU) { in gmc_v9_0_get_coherence_flags()
1238 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, in gmc_v9_0_get_vm_pte() argument
1256 gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.vm, bo, in gmc_v9_0_get_vm_pte()
1260 static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, in gmc_v9_0_override_vm_pte_flags() argument
1269 if (!(adev->flags & AMD_IS_APU) || in gmc_v9_0_override_vm_pte_flags()
1270 amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3)) in gmc_v9_0_override_vm_pte_flags()
1276 if (!adev->ram_is_direct_mapped) { in gmc_v9_0_override_vm_pte_flags()
1277 dev_dbg_ratelimited(adev->dev, "RAM is not direct mapped\n"); in gmc_v9_0_override_vm_pte_flags()
1289 dev_dbg_ratelimited(adev->dev, "MTYPE is not NC or UC\n"); in gmc_v9_0_override_vm_pte_flags()
1297 if (adev->gmc.is_app_apu && vm->mem_id >= 0) { in gmc_v9_0_override_vm_pte_flags()
1298 local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node; in gmc_v9_0_override_vm_pte_flags()
1300 dev_dbg_ratelimited(adev->dev, "Only native mode APU is supported.\n"); in gmc_v9_0_override_vm_pte_flags()
1308 dev_dbg_ratelimited(adev->dev, "Page is not RAM.\n"); in gmc_v9_0_override_vm_pte_flags()
1312 dev_dbg_ratelimited(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n", in gmc_v9_0_override_vm_pte_flags()
1331 dev_dbg_ratelimited(adev->dev, "flags updated from %llx to %llx\n", in gmc_v9_0_override_vm_pte_flags()
1336 static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) in gmc_v9_0_get_vbios_fb_size() argument
1348 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { in gmc_v9_0_get_vbios_fb_size()
1378 static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev) in gmc_v9_0_need_reset_on_init() argument
1380 if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested && in gmc_v9_0_need_reset_on_init()
1381 adev->nbio.funcs->is_nps_switch_requested(adev)) { in gmc_v9_0_need_reset_on_init()
1382 adev->gmc.reset_flags |= AMDGPU_GMC_INIT_RESET_NPS; in gmc_v9_0_need_reset_on_init()
1404 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_gmc_funcs() argument
1406 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs; in gmc_v9_0_set_gmc_funcs()
1409 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_umc_funcs() argument
1411 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { in gmc_v9_0_set_umc_funcs()
1413 adev->umc.funcs = &umc_v6_0_funcs; in gmc_v9_0_set_umc_funcs()
1416 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; in gmc_v9_0_set_umc_funcs()
1417 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1418 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1419 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20; in gmc_v9_0_set_umc_funcs()
1420 adev->umc.retire_unit = 1; in gmc_v9_0_set_umc_funcs()
1421 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; in gmc_v9_0_set_umc_funcs()
1422 adev->umc.ras = &umc_v6_1_ras; in gmc_v9_0_set_umc_funcs()
1425 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; in gmc_v9_0_set_umc_funcs()
1426 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1427 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1428 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT; in gmc_v9_0_set_umc_funcs()
1429 adev->umc.retire_unit = 1; in gmc_v9_0_set_umc_funcs()
1430 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; in gmc_v9_0_set_umc_funcs()
1431 adev->umc.ras = &umc_v6_1_ras; in gmc_v9_0_set_umc_funcs()
1434 adev->umc.max_ras_err_cnt_per_query = in gmc_v9_0_set_umc_funcs()
1436 adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1437 adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1438 adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET; in gmc_v9_0_set_umc_funcs()
1439 adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2); in gmc_v9_0_set_umc_funcs()
1440 if (!adev->gmc.xgmi.connected_to_cpu) in gmc_v9_0_set_umc_funcs()
1441 adev->umc.ras = &umc_v6_7_ras; in gmc_v9_0_set_umc_funcs()
1442 if (1 & adev->smuio.funcs->get_die_id(adev)) in gmc_v9_0_set_umc_funcs()
1443 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0]; in gmc_v9_0_set_umc_funcs()
1445 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0]; in gmc_v9_0_set_umc_funcs()
1449 adev->umc.max_ras_err_cnt_per_query = in gmc_v9_0_set_umc_funcs()
1450 UMC_V12_0_TOTAL_CHANNEL_NUM(adev) * UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; in gmc_v9_0_set_umc_funcs()
1451 adev->umc.channel_inst_num = UMC_V12_0_CHANNEL_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1452 adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1453 adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM; in gmc_v9_0_set_umc_funcs()
1454 adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET; in gmc_v9_0_set_umc_funcs()
1455 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) in gmc_v9_0_set_umc_funcs()
1456 adev->umc.ras = &umc_v12_0_ras; in gmc_v9_0_set_umc_funcs()
1463 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_mmhub_funcs() argument
1465 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { in gmc_v9_0_set_mmhub_funcs()
1467 adev->mmhub.funcs = &mmhub_v9_4_funcs; in gmc_v9_0_set_mmhub_funcs()
1470 adev->mmhub.funcs = &mmhub_v1_7_funcs; in gmc_v9_0_set_mmhub_funcs()
1474 adev->mmhub.funcs = &mmhub_v1_8_funcs; in gmc_v9_0_set_mmhub_funcs()
1477 adev->mmhub.funcs = &mmhub_v1_0_funcs; in gmc_v9_0_set_mmhub_funcs()
1482 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_mmhub_ras_funcs() argument
1484 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { in gmc_v9_0_set_mmhub_ras_funcs()
1486 adev->mmhub.ras = &mmhub_v1_0_ras; in gmc_v9_0_set_mmhub_ras_funcs()
1489 adev->mmhub.ras = &mmhub_v9_4_ras; in gmc_v9_0_set_mmhub_ras_funcs()
1492 adev->mmhub.ras = &mmhub_v1_7_ras; in gmc_v9_0_set_mmhub_ras_funcs()
1496 adev->mmhub.ras = &mmhub_v1_8_ras; in gmc_v9_0_set_mmhub_ras_funcs()
1504 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_gfxhub_funcs() argument
1506 if (amdgpu_is_multi_aid(adev)) in gmc_v9_0_set_gfxhub_funcs()
1507 adev->gfxhub.funcs = &gfxhub_v1_2_funcs; in gmc_v9_0_set_gfxhub_funcs()
1509 adev->gfxhub.funcs = &gfxhub_v1_0_funcs; in gmc_v9_0_set_gfxhub_funcs()
1512 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_hdp_ras_funcs() argument
1514 adev->hdp.ras = &hdp_v4_0_ras; in gmc_v9_0_set_hdp_ras_funcs()
1517 static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_mca_ras_funcs() argument
1519 struct amdgpu_mca *mca = &adev->mca; in gmc_v9_0_set_mca_ras_funcs()
1522 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { in gmc_v9_0_set_mca_ras_funcs()
1524 if (!adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_set_mca_ras_funcs()
1535 static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev) in gmc_v9_0_set_xgmi_ras_funcs() argument
1537 if (!adev->gmc.xgmi.connected_to_cpu) in gmc_v9_0_set_xgmi_ras_funcs()
1538 adev->gmc.xgmi.ras = &xgmi_ras; in gmc_v9_0_set_xgmi_ras_funcs()
1541 static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev) in gmc_v9_0_init_nps_details() argument
1547 adev->gmc.supported_nps_modes = 0; in gmc_v9_0_init_nps_details()
1549 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) in gmc_v9_0_init_nps_details()
1552 mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes); in gmc_v9_0_init_nps_details()
1558 adev->gmc.supported_nps_modes |= BIT(i); in gmc_v9_0_init_nps_details()
1565 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { in gmc_v9_0_init_nps_details()
1568 adev->gmc.supported_nps_modes = in gmc_v9_0_init_nps_details()
1580 struct amdgpu_device *adev = ip_block->adev; in gmc_v9_0_early_init() local
1586 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) || in gmc_v9_0_early_init()
1587 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || in gmc_v9_0_early_init()
1588 amdgpu_is_multi_aid(adev)) in gmc_v9_0_early_init()
1589 adev->gmc.xgmi.supported = true; in gmc_v9_0_early_init()
1591 if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) { in gmc_v9_0_early_init()
1592 adev->gmc.xgmi.supported = true; in gmc_v9_0_early_init()
1593 adev->gmc.xgmi.connected_to_cpu = in gmc_v9_0_early_init()
1594 adev->smuio.funcs->is_host_gpu_xgmi_supported(adev); in gmc_v9_0_early_init()
1597 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) { in gmc_v9_0_early_init()
1599 adev->smuio.funcs->get_pkg_type(adev); in gmc_v9_0_early_init()
1607 adev->gmc.is_app_apu = (pkg_type == AMDGPU_PKG_TYPE_APU && in gmc_v9_0_early_init()
1608 !pci_resource_len(adev->pdev, 0)); in gmc_v9_0_early_init()
1611 gmc_v9_0_set_gmc_funcs(adev); in gmc_v9_0_early_init()
1612 gmc_v9_0_set_irq_funcs(adev); in gmc_v9_0_early_init()
1613 gmc_v9_0_set_umc_funcs(adev); in gmc_v9_0_early_init()
1614 gmc_v9_0_set_mmhub_funcs(adev); in gmc_v9_0_early_init()
1615 gmc_v9_0_set_mmhub_ras_funcs(adev); in gmc_v9_0_early_init()
1616 gmc_v9_0_set_gfxhub_funcs(adev); in gmc_v9_0_early_init()
1617 gmc_v9_0_set_hdp_ras_funcs(adev); in gmc_v9_0_early_init()
1618 gmc_v9_0_set_mca_ras_funcs(adev); in gmc_v9_0_early_init()
1619 gmc_v9_0_set_xgmi_ras_funcs(adev); in gmc_v9_0_early_init()
1621 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; in gmc_v9_0_early_init()
1622 adev->gmc.shared_aperture_end = in gmc_v9_0_early_init()
1623 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; in gmc_v9_0_early_init()
1624 adev->gmc.private_aperture_start = 0x1000000000000000ULL; in gmc_v9_0_early_init()
1625 adev->gmc.private_aperture_end = in gmc_v9_0_early_init()
1626 adev->gmc.private_aperture_start + (4ULL << 30) - 1; in gmc_v9_0_early_init()
1627 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; in gmc_v9_0_early_init()
1634 struct amdgpu_device *adev = ip_block->adev; in gmc_v9_0_late_init() local
1637 r = amdgpu_gmc_allocate_vm_inv_eng(adev); in gmc_v9_0_late_init()
1645 if (!amdgpu_sriov_vf(adev) && in gmc_v9_0_late_init()
1646 (amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(6, 0, 0))) { in gmc_v9_0_late_init()
1647 if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) { in gmc_v9_0_late_init()
1648 if (adev->df.funcs && in gmc_v9_0_late_init()
1649 adev->df.funcs->enable_ecc_force_par_wr_rmw) in gmc_v9_0_late_init()
1650 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); in gmc_v9_0_late_init()
1654 if (!amdgpu_persistent_edc_harvesting_supported(adev)) { in gmc_v9_0_late_init()
1655 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB); in gmc_v9_0_late_init()
1656 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__HDP); in gmc_v9_0_late_init()
1659 r = amdgpu_gmc_ras_late_init(adev); in gmc_v9_0_late_init()
1663 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); in gmc_v9_0_late_init()
1666 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, in gmc_v9_0_vram_gtt_location() argument
1669 u64 base = adev->mmhub.funcs->get_fb_location(adev); in gmc_v9_0_vram_gtt_location()
1671 amdgpu_gmc_set_agp_default(adev, mc); in gmc_v9_0_vram_gtt_location()
1674 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in gmc_v9_0_vram_gtt_location()
1675 if (amdgpu_gmc_is_pdb0_enabled(adev)) { in gmc_v9_0_vram_gtt_location()
1676 amdgpu_gmc_sysvm_location(adev, mc); in gmc_v9_0_vram_gtt_location()
1678 amdgpu_gmc_vram_location(adev, mc, base); in gmc_v9_0_vram_gtt_location()
1679 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); in gmc_v9_0_vram_gtt_location()
1680 if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1)) in gmc_v9_0_vram_gtt_location()
1681 amdgpu_gmc_agp_location(adev, mc); in gmc_v9_0_vram_gtt_location()
1684 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); in gmc_v9_0_vram_gtt_location()
1687 adev->vm_manager.vram_base_offset += in gmc_v9_0_vram_gtt_location()
1688 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in gmc_v9_0_vram_gtt_location()
1700 static int gmc_v9_0_mc_init(struct amdgpu_device *adev) in gmc_v9_0_mc_init() argument
1705 if (!adev->gmc.is_app_apu) { in gmc_v9_0_mc_init()
1706 adev->gmc.mc_vram_size = in gmc_v9_0_mc_init()
1707 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; in gmc_v9_0_mc_init()
1710 adev->gmc.mc_vram_size = 0; in gmc_v9_0_mc_init()
1712 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; in gmc_v9_0_mc_init()
1714 if (!(adev->flags & AMD_IS_APU) && in gmc_v9_0_mc_init()
1715 !adev->gmc.xgmi.connected_to_cpu) { in gmc_v9_0_mc_init()
1716 r = amdgpu_device_resize_fb_bar(adev); in gmc_v9_0_mc_init()
1720 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); in gmc_v9_0_mc_init()
1721 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); in gmc_v9_0_mc_init()
1736 if ((!amdgpu_sriov_vf(adev) && in gmc_v9_0_mc_init()
1737 (adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) || in gmc_v9_0_mc_init()
1738 (adev->gmc.xgmi.supported && in gmc_v9_0_mc_init()
1739 adev->gmc.xgmi.connected_to_cpu)) { in gmc_v9_0_mc_init()
1740 adev->gmc.aper_base = in gmc_v9_0_mc_init()
1741 adev->gfxhub.funcs->get_mc_fb_offset(adev) + in gmc_v9_0_mc_init()
1742 adev->gmc.xgmi.physical_node_id * in gmc_v9_0_mc_init()
1743 adev->gmc.xgmi.node_segment_size; in gmc_v9_0_mc_init()
1744 adev->gmc.aper_size = adev->gmc.real_vram_size; in gmc_v9_0_mc_init()
1748 adev->gmc.visible_vram_size = adev->gmc.aper_size; in gmc_v9_0_mc_init()
1752 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { in gmc_v9_0_mc_init()
1762 adev->gmc.gart_size = 512ULL << 20; in gmc_v9_0_mc_init()
1767 adev->gmc.gart_size = 1024ULL << 20; in gmc_v9_0_mc_init()
1771 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; in gmc_v9_0_mc_init()
1774 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size; in gmc_v9_0_mc_init()
1776 gmc_v9_0_vram_gtt_location(adev, &adev->gmc); in gmc_v9_0_mc_init()
1781 static int gmc_v9_0_gart_init(struct amdgpu_device *adev) in gmc_v9_0_gart_init() argument
1785 if (adev->gart.bo) { in gmc_v9_0_gart_init()
1790 if (amdgpu_gmc_is_pdb0_enabled(adev)) { in gmc_v9_0_gart_init()
1791 adev->gmc.vmid0_page_table_depth = 1; in gmc_v9_0_gart_init()
1792 adev->gmc.vmid0_page_table_block_size = 12; in gmc_v9_0_gart_init()
1794 adev->gmc.vmid0_page_table_depth = 0; in gmc_v9_0_gart_init()
1795 adev->gmc.vmid0_page_table_block_size = 0; in gmc_v9_0_gart_init()
1799 r = amdgpu_gart_init(adev); in gmc_v9_0_gart_init()
1802 adev->gart.table_size = adev->gart.num_gpu_pages * 8; in gmc_v9_0_gart_init()
1803 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC) | in gmc_v9_0_gart_init()
1806 if (!adev->gmc.real_vram_size) { in gmc_v9_0_gart_init()
1807 dev_info(adev->dev, "Put GART in system memory for APU\n"); in gmc_v9_0_gart_init()
1808 r = amdgpu_gart_table_ram_alloc(adev); in gmc_v9_0_gart_init()
1810 dev_err(adev->dev, "Failed to allocate GART in system memory\n"); in gmc_v9_0_gart_init()
1812 r = amdgpu_gart_table_vram_alloc(adev); in gmc_v9_0_gart_init()
1816 if (amdgpu_gmc_is_pdb0_enabled(adev)) in gmc_v9_0_gart_init()
1817 r = amdgpu_gmc_pdb0_alloc(adev); in gmc_v9_0_gart_init()
1831 static void gmc_v9_0_save_registers(struct amdgpu_device *adev) in gmc_v9_0_save_registers() argument
1833 if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) || in gmc_v9_0_save_registers()
1834 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) in gmc_v9_0_save_registers()
1835 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); in gmc_v9_0_save_registers()
1838 static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) in gmc_v9_4_3_init_vram_info() argument
1840 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; in gmc_v9_4_3_init_vram_info()
1841 adev->gmc.vram_width = 128 * 64; in gmc_v9_4_3_init_vram_info()
1843 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) in gmc_v9_4_3_init_vram_info()
1844 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; in gmc_v9_4_3_init_vram_info()
1850 struct amdgpu_device *adev = ip_block->adev; in gmc_v9_0_sw_init() local
1851 unsigned long inst_mask = adev->aid_mask; in gmc_v9_0_sw_init()
1853 adev->gfxhub.funcs->init(adev); in gmc_v9_0_sw_init()
1855 adev->mmhub.funcs->init(adev); in gmc_v9_0_sw_init()
1857 spin_lock_init(&adev->gmc.invalidate_lock); in gmc_v9_0_sw_init()
1859 if (amdgpu_is_multi_aid(adev)) { in gmc_v9_0_sw_init()
1860 gmc_v9_4_3_init_vram_info(adev); in gmc_v9_0_sw_init()
1861 } else if (!adev->bios) { in gmc_v9_0_sw_init()
1862 if (adev->flags & AMD_IS_APU) { in gmc_v9_0_sw_init()
1863 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4; in gmc_v9_0_sw_init()
1864 adev->gmc.vram_width = 64 * 64; in gmc_v9_0_sw_init()
1866 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; in gmc_v9_0_sw_init()
1867 adev->gmc.vram_width = 128 * 64; in gmc_v9_0_sw_init()
1870 r = amdgpu_atomfirmware_get_vram_info(adev, in gmc_v9_0_sw_init()
1872 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_sw_init()
1877 adev->gmc.vram_width = 2048; in gmc_v9_0_sw_init()
1879 adev->gmc.vram_width = vram_width; in gmc_v9_0_sw_init()
1881 if (!adev->gmc.vram_width) { in gmc_v9_0_sw_init()
1885 if (adev->flags & AMD_IS_APU) in gmc_v9_0_sw_init()
1889 if (adev->df.funcs && in gmc_v9_0_sw_init()
1890 adev->df.funcs->get_hbm_channel_number) { in gmc_v9_0_sw_init()
1891 numchan = adev->df.funcs->get_hbm_channel_number(adev); in gmc_v9_0_sw_init()
1892 adev->gmc.vram_width = numchan * chansize; in gmc_v9_0_sw_init()
1896 adev->gmc.vram_type = vram_type; in gmc_v9_0_sw_init()
1897 adev->gmc.vram_vendor = vram_vendor; in gmc_v9_0_sw_init()
1899 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { in gmc_v9_0_sw_init()
1902 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); in gmc_v9_0_sw_init()
1903 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); in gmc_v9_0_sw_init()
1905 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { in gmc_v9_0_sw_init()
1906 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v9_0_sw_init()
1909 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48); in gmc_v9_0_sw_init()
1910 adev->gmc.translate_further = in gmc_v9_0_sw_init()
1911 adev->vm_manager.num_level > 1; in gmc_v9_0_sw_init()
1919 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); in gmc_v9_0_sw_init()
1920 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); in gmc_v9_0_sw_init()
1928 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v9_0_sw_init()
1929 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) in gmc_v9_0_sw_init()
1930 adev->gmc.translate_further = adev->vm_manager.num_level > 1; in gmc_v9_0_sw_init()
1933 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); in gmc_v9_0_sw_init()
1934 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); in gmc_v9_0_sw_init()
1935 set_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask); in gmc_v9_0_sw_init()
1938 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v9_0_sw_init()
1939 adev->gmc.translate_further = adev->vm_manager.num_level > 1; in gmc_v9_0_sw_init()
1944 bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0), in gmc_v9_0_sw_init()
1945 NUM_XCC(adev->gfx.xcc_mask)); in gmc_v9_0_sw_init()
1948 bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32); in gmc_v9_0_sw_init()
1950 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v9_0_sw_init()
1951 adev->gmc.translate_further = adev->vm_manager.num_level > 1; in gmc_v9_0_sw_init()
1958 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT, in gmc_v9_0_sw_init()
1959 &adev->gmc.vm_fault); in gmc_v9_0_sw_init()
1963 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) { in gmc_v9_0_sw_init()
1964 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT, in gmc_v9_0_sw_init()
1965 &adev->gmc.vm_fault); in gmc_v9_0_sw_init()
1970 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT, in gmc_v9_0_sw_init()
1971 &adev->gmc.vm_fault); in gmc_v9_0_sw_init()
1976 if (!amdgpu_sriov_vf(adev) && in gmc_v9_0_sw_init()
1977 !adev->gmc.xgmi.connected_to_cpu && in gmc_v9_0_sw_init()
1978 !adev->gmc.is_app_apu) { in gmc_v9_0_sw_init()
1980 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, in gmc_v9_0_sw_init()
1981 &adev->gmc.ecc_irq); in gmc_v9_0_sw_init()
1990 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ in gmc_v9_0_sw_init()
1992 dma_addr_bits = amdgpu_ip_version(adev, GC_HWIP, 0) >= in gmc_v9_0_sw_init()
1996 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits)); in gmc_v9_0_sw_init()
1998 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); in gmc_v9_0_sw_init()
2001 adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits); in gmc_v9_0_sw_init()
2003 r = gmc_v9_0_mc_init(adev); in gmc_v9_0_sw_init()
2007 amdgpu_gmc_get_vbios_allocations(adev); in gmc_v9_0_sw_init()
2009 if (amdgpu_is_multi_aid(adev)) { in gmc_v9_0_sw_init()
2010 r = amdgpu_gmc_init_mem_ranges(adev); in gmc_v9_0_sw_init()
2016 r = amdgpu_bo_init(adev); in gmc_v9_0_sw_init()
2020 r = gmc_v9_0_gart_init(adev); in gmc_v9_0_sw_init()
2024 gmc_v9_0_init_nps_details(adev); in gmc_v9_0_sw_init()
2035 adev->vm_manager.first_kfd_vmid = in gmc_v9_0_sw_init()
2036 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || in gmc_v9_0_sw_init()
2037 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || in gmc_v9_0_sw_init()
2038 amdgpu_is_multi_aid(adev)) ? in gmc_v9_0_sw_init()
2042 amdgpu_vm_manager_init(adev); in gmc_v9_0_sw_init()
2044 gmc_v9_0_save_registers(adev); in gmc_v9_0_sw_init()
2046 r = amdgpu_gmc_ras_sw_init(adev); in gmc_v9_0_sw_init()
2050 if (amdgpu_is_multi_aid(adev)) in gmc_v9_0_sw_init()
2051 amdgpu_gmc_sysfs_init(adev); in gmc_v9_0_sw_init()
2058 struct amdgpu_device *adev = ip_block->adev; in gmc_v9_0_sw_fini() local
2060 if (amdgpu_is_multi_aid(adev)) in gmc_v9_0_sw_fini()
2061 amdgpu_gmc_sysfs_fini(adev); in gmc_v9_0_sw_fini()
2063 amdgpu_gmc_ras_fini(adev); in gmc_v9_0_sw_fini()
2064 amdgpu_gem_force_release(adev); in gmc_v9_0_sw_fini()
2065 amdgpu_vm_manager_fini(adev); in gmc_v9_0_sw_fini()
2066 if (!adev->gmc.real_vram_size) { in gmc_v9_0_sw_fini()
2067 dev_info(adev->dev, "Put GART in system memory for APU free\n"); in gmc_v9_0_sw_fini()
2068 amdgpu_gart_table_ram_free(adev); in gmc_v9_0_sw_fini()
2070 amdgpu_gart_table_vram_free(adev); in gmc_v9_0_sw_fini()
2072 amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0); in gmc_v9_0_sw_fini()
2073 amdgpu_bo_fini(adev); in gmc_v9_0_sw_fini()
2075 adev->gmc.num_mem_partitions = 0; in gmc_v9_0_sw_fini()
2076 kfree(adev->gmc.mem_partitions); in gmc_v9_0_sw_fini()
2081 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) in gmc_v9_0_init_golden_registers() argument
2083 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { in gmc_v9_0_init_golden_registers()
2085 if (amdgpu_sriov_vf(adev)) in gmc_v9_0_init_golden_registers()
2089 soc15_program_register_sequence(adev, in gmc_v9_0_init_golden_registers()
2092 soc15_program_register_sequence(adev, in gmc_v9_0_init_golden_registers()
2099 soc15_program_register_sequence(adev, in gmc_v9_0_init_golden_registers()
2115 void gmc_v9_0_restore_registers(struct amdgpu_device *adev) in gmc_v9_0_restore_registers() argument
2117 if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) || in gmc_v9_0_restore_registers()
2118 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) { in gmc_v9_0_restore_registers()
2119 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register); in gmc_v9_0_restore_registers()
2120 WARN_ON(adev->gmc.sdpif_register != in gmc_v9_0_restore_registers()
2130 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) in gmc_v9_0_gart_enable() argument
2134 if (amdgpu_gmc_is_pdb0_enabled(adev)) in gmc_v9_0_gart_enable()
2135 amdgpu_gmc_init_pdb0(adev); in gmc_v9_0_gart_enable()
2137 if (adev->gart.bo == NULL) { in gmc_v9_0_gart_enable()
2138 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); in gmc_v9_0_gart_enable()
2142 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); in gmc_v9_0_gart_enable()
2144 if (!adev->in_s0ix) { in gmc_v9_0_gart_enable()
2145 r = adev->gfxhub.funcs->gart_enable(adev); in gmc_v9_0_gart_enable()
2150 r = adev->mmhub.funcs->gart_enable(adev); in gmc_v9_0_gart_enable()
2155 (unsigned int)(adev->gmc.gart_size >> 20)); in gmc_v9_0_gart_enable()
2156 if (adev->gmc.pdb0_bo) in gmc_v9_0_gart_enable()
2158 (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo)); in gmc_v9_0_gart_enable()
2160 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); in gmc_v9_0_gart_enable()
2167 struct amdgpu_device *adev = ip_block->adev; in gmc_v9_0_hw_init() local
2171 adev->gmc.flush_pasid_uses_kiq = true; in gmc_v9_0_hw_init()
2178 adev->gmc.flush_tlb_needs_extra_type_2 = in gmc_v9_0_hw_init()
2179 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) && in gmc_v9_0_hw_init()
2180 adev->gmc.xgmi.num_physical_nodes; in gmc_v9_0_hw_init()
2183 gmc_v9_0_init_golden_registers(adev); in gmc_v9_0_hw_init()
2185 if (adev->mode_info.num_crtc) { in gmc_v9_0_hw_init()
2192 if (adev->mmhub.funcs->update_power_gating) in gmc_v9_0_hw_init()
2193 adev->mmhub.funcs->update_power_gating(adev, true); in gmc_v9_0_hw_init()
2195 adev->hdp.funcs->init_registers(adev); in gmc_v9_0_hw_init()
2198 amdgpu_device_flush_hdp(adev, NULL); in gmc_v9_0_hw_init()
2205 if (!amdgpu_sriov_vf(adev)) { in gmc_v9_0_hw_init()
2206 if (!adev->in_s0ix) in gmc_v9_0_hw_init()
2207 adev->gfxhub.funcs->set_fault_enable_default(adev, value); in gmc_v9_0_hw_init()
2208 adev->mmhub.funcs->set_fault_enable_default(adev, value); in gmc_v9_0_hw_init()
2210 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { in gmc_v9_0_hw_init()
2211 if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0))) in gmc_v9_0_hw_init()
2213 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); in gmc_v9_0_hw_init()
2216 if (adev->umc.funcs && adev->umc.funcs->init_registers) in gmc_v9_0_hw_init()
2217 adev->umc.funcs->init_registers(adev); in gmc_v9_0_hw_init()
2219 r = gmc_v9_0_gart_enable(adev); in gmc_v9_0_hw_init()
2224 return amdgpu_gmc_vram_checking(adev); in gmc_v9_0_hw_init()
2236 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) in gmc_v9_0_gart_disable() argument
2238 if (!adev->in_s0ix) in gmc_v9_0_gart_disable()
2239 adev->gfxhub.funcs->gart_disable(adev); in gmc_v9_0_gart_disable()
2240 adev->mmhub.funcs->gart_disable(adev); in gmc_v9_0_gart_disable()
2245 struct amdgpu_device *adev = ip_block->adev; in gmc_v9_0_hw_fini() local
2247 gmc_v9_0_gart_disable(adev); in gmc_v9_0_hw_fini()
2249 if (amdgpu_sriov_vf(adev)) { in gmc_v9_0_hw_fini()
2260 if (adev->mmhub.funcs->update_power_gating) in gmc_v9_0_hw_fini()
2261 adev->mmhub.funcs->update_power_gating(adev, false); in gmc_v9_0_hw_fini()
2267 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { in gmc_v9_0_hw_fini()
2268 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); in gmc_v9_0_hw_fini()
2270 if (adev->gmc.ecc_irq.funcs && in gmc_v9_0_hw_fini()
2271 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) in gmc_v9_0_hw_fini()
2272 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); in gmc_v9_0_hw_fini()
2285 struct amdgpu_device *adev = ip_block->adev; in gmc_v9_0_resume() local
2291 if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) { in gmc_v9_0_resume()
2292 amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); in gmc_v9_0_resume()
2293 adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS; in gmc_v9_0_resume()
2300 amdgpu_vmid_reset_all(ip_block->adev); in gmc_v9_0_resume()
2326 struct amdgpu_device *adev = ip_block->adev; in gmc_v9_0_set_clockgating_state() local
2328 adev->mmhub.funcs->set_clockgating(adev, state); in gmc_v9_0_set_clockgating_state()
2330 athub_v1_0_set_clockgating(adev, state); in gmc_v9_0_set_clockgating_state()
2337 struct amdgpu_device *adev = ip_block->adev; in gmc_v9_0_get_clockgating_state() local
2339 adev->mmhub.funcs->get_clockgating(adev, flags); in gmc_v9_0_get_clockgating_state()
2341 athub_v1_0_get_clockgating(adev, flags); in gmc_v9_0_get_clockgating_state()