Lines Matching refs:adev

54 static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,  in gmc_v10_0_ecc_interrupt_state()  argument
63 gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, in gmc_v10_0_vm_fault_interrupt_state() argument
70 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false); in gmc_v10_0_vm_fault_interrupt_state()
77 if (!adev->in_s0ix) in gmc_v10_0_vm_fault_interrupt_state()
78 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false); in gmc_v10_0_vm_fault_interrupt_state()
82 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true); in gmc_v10_0_vm_fault_interrupt_state()
89 if (!adev->in_s0ix) in gmc_v10_0_vm_fault_interrupt_state()
90 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true); in gmc_v10_0_vm_fault_interrupt_state()
99 static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, in gmc_v10_0_process_interrupt() argument
105 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index]; in gmc_v10_0_process_interrupt()
119 if (entry->ih != &adev->irq.ih_soft && in gmc_v10_0_process_interrupt()
120 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, in gmc_v10_0_process_interrupt()
127 if (entry->ih == &adev->irq.ih) { in gmc_v10_0_process_interrupt()
128 amdgpu_irq_delegate(adev, entry, 8); in gmc_v10_0_process_interrupt()
135 if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, in gmc_v10_0_process_interrupt()
140 if (!amdgpu_sriov_vf(adev)) { in gmc_v10_0_process_interrupt()
147 (amdgpu_ip_version(adev, GC_HWIP, 0) < in gmc_v10_0_process_interrupt()
154 amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, in gmc_v10_0_process_interrupt()
161 dev_err(adev->dev, in gmc_v10_0_process_interrupt()
165 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); in gmc_v10_0_process_interrupt()
167 amdgpu_vm_print_task_info(adev, task_info); in gmc_v10_0_process_interrupt()
171 dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n", in gmc_v10_0_process_interrupt()
179 hub->vmhub_funcs->print_l2_protection_fault_status(adev, in gmc_v10_0_process_interrupt()
195 static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev) in gmc_v10_0_set_irq_funcs() argument
197 adev->gmc.vm_fault.num_types = 1; in gmc_v10_0_set_irq_funcs()
198 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs; in gmc_v10_0_set_irq_funcs()
200 if (!amdgpu_sriov_vf(adev)) { in gmc_v10_0_set_irq_funcs()
201 adev->gmc.ecc_irq.num_types = 1; in gmc_v10_0_set_irq_funcs()
202 adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs; in gmc_v10_0_set_irq_funcs()
213 static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev, in gmc_v10_0_use_invalidate_semaphore() argument
217 (!amdgpu_sriov_vf(adev))); in gmc_v10_0_use_invalidate_semaphore()
221 struct amdgpu_device *adev, in gmc_v10_0_get_atc_vmid_pasid_mapping_info() argument
250 static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, in gmc_v10_0_flush_gpu_tlb() argument
253 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub); in gmc_v10_0_flush_gpu_tlb()
254 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; in gmc_v10_0_flush_gpu_tlb()
268 amdgpu_device_flush_hdp(adev, NULL); in gmc_v10_0_flush_gpu_tlb()
273 if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes && in gmc_v10_0_flush_gpu_tlb()
274 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { in gmc_v10_0_flush_gpu_tlb()
275 amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req, in gmc_v10_0_flush_gpu_tlb()
283 spin_lock(&adev->gmc.invalidate_lock); in gmc_v10_0_flush_gpu_tlb()
293 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v10_0_flush_gpu_tlb()
301 if (i >= adev->usec_timeout) in gmc_v10_0_flush_gpu_tlb()
312 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 3, 0))) in gmc_v10_0_flush_gpu_tlb()
316 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v10_0_flush_gpu_tlb()
329 spin_unlock(&adev->gmc.invalidate_lock); in gmc_v10_0_flush_gpu_tlb()
331 if (i >= adev->usec_timeout) in gmc_v10_0_flush_gpu_tlb()
332 dev_err(adev->dev, "Timeout waiting for VM flush hub: %d!\n", in gmc_v10_0_flush_gpu_tlb()
347 static void gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, in gmc_v10_0_flush_gpu_tlb_pasid() argument
357 valid = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid, in gmc_v10_0_flush_gpu_tlb_pasid()
363 for_each_set_bit(i, adev->vmhubs_mask, in gmc_v10_0_flush_gpu_tlb_pasid()
365 gmc_v10_0_flush_gpu_tlb(adev, vmid, i, in gmc_v10_0_flush_gpu_tlb_pasid()
368 gmc_v10_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), in gmc_v10_0_flush_gpu_tlb_pasid()
377 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); in gmc_v10_0_emit_flush_gpu_tlb()
378 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; in gmc_v10_0_emit_flush_gpu_tlb()
425 struct amdgpu_device *adev = ring->adev; in gmc_v10_0_emit_pasid_mapping() local
469 static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) in gmc_v10_0_map_mtype() argument
487 static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, in gmc_v10_0_get_vm_pde() argument
491 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr); in gmc_v10_0_get_vm_pde()
494 if (!adev->gmc.translate_further) in gmc_v10_0_get_vm_pde()
510 static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev, in gmc_v10_0_get_vm_pte() argument
539 static unsigned int gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) in gmc_v10_0_get_vbios_fb_size() argument
572 static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev) in gmc_v10_0_set_gmc_funcs() argument
574 if (adev->gmc.gmc_funcs == NULL) in gmc_v10_0_set_gmc_funcs()
575 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs; in gmc_v10_0_set_gmc_funcs()
578 static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev) in gmc_v10_0_set_umc_funcs() argument
580 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { in gmc_v10_0_set_umc_funcs()
582 adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM; in gmc_v10_0_set_umc_funcs()
583 adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM; in gmc_v10_0_set_umc_funcs()
584 adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM; in gmc_v10_0_set_umc_funcs()
585 adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA; in gmc_v10_0_set_umc_funcs()
586 adev->umc.retire_unit = 1; in gmc_v10_0_set_umc_funcs()
587 adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0]; in gmc_v10_0_set_umc_funcs()
588 adev->umc.ras = &umc_v8_7_ras; in gmc_v10_0_set_umc_funcs()
595 static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev) in gmc_v10_0_set_mmhub_funcs() argument
597 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { in gmc_v10_0_set_mmhub_funcs()
601 adev->mmhub.funcs = &mmhub_v2_3_funcs; in gmc_v10_0_set_mmhub_funcs()
604 adev->mmhub.funcs = &mmhub_v2_0_funcs; in gmc_v10_0_set_mmhub_funcs()
609 static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev) in gmc_v10_0_set_gfxhub_funcs() argument
611 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { in gmc_v10_0_set_gfxhub_funcs()
620 adev->gfxhub.funcs = &gfxhub_v2_1_funcs; in gmc_v10_0_set_gfxhub_funcs()
623 adev->gfxhub.funcs = &gfxhub_v2_0_funcs; in gmc_v10_0_set_gfxhub_funcs()
631 struct amdgpu_device *adev = ip_block->adev; in gmc_v10_0_early_init() local
633 gmc_v10_0_set_mmhub_funcs(adev); in gmc_v10_0_early_init()
634 gmc_v10_0_set_gfxhub_funcs(adev); in gmc_v10_0_early_init()
635 gmc_v10_0_set_gmc_funcs(adev); in gmc_v10_0_early_init()
636 gmc_v10_0_set_irq_funcs(adev); in gmc_v10_0_early_init()
637 gmc_v10_0_set_umc_funcs(adev); in gmc_v10_0_early_init()
639 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; in gmc_v10_0_early_init()
640 adev->gmc.shared_aperture_end = in gmc_v10_0_early_init()
641 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; in gmc_v10_0_early_init()
642 adev->gmc.private_aperture_start = 0x1000000000000000ULL; in gmc_v10_0_early_init()
643 adev->gmc.private_aperture_end = in gmc_v10_0_early_init()
644 adev->gmc.private_aperture_start + (4ULL << 30) - 1; in gmc_v10_0_early_init()
645 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; in gmc_v10_0_early_init()
652 struct amdgpu_device *adev = ip_block->adev; in gmc_v10_0_late_init() local
655 r = amdgpu_gmc_allocate_vm_inv_eng(adev); in gmc_v10_0_late_init()
659 r = amdgpu_gmc_ras_late_init(adev); in gmc_v10_0_late_init()
663 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); in gmc_v10_0_late_init()
666 static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, in gmc_v10_0_vram_gtt_location() argument
671 base = adev->gfxhub.funcs->get_fb_location(adev); in gmc_v10_0_vram_gtt_location()
674 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in gmc_v10_0_vram_gtt_location()
676 amdgpu_gmc_set_agp_default(adev, mc); in gmc_v10_0_vram_gtt_location()
677 amdgpu_gmc_vram_location(adev, &adev->gmc, base); in gmc_v10_0_vram_gtt_location()
678 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); in gmc_v10_0_vram_gtt_location()
679 if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1)) in gmc_v10_0_vram_gtt_location()
680 amdgpu_gmc_agp_location(adev, mc); in gmc_v10_0_vram_gtt_location()
683 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); in gmc_v10_0_vram_gtt_location()
686 adev->vm_manager.vram_base_offset += in gmc_v10_0_vram_gtt_location()
687 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in gmc_v10_0_vram_gtt_location()
699 static int gmc_v10_0_mc_init(struct amdgpu_device *adev) in gmc_v10_0_mc_init() argument
704 adev->gmc.mc_vram_size = in gmc_v10_0_mc_init()
705 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; in gmc_v10_0_mc_init()
706 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; in gmc_v10_0_mc_init()
708 if (!(adev->flags & AMD_IS_APU)) { in gmc_v10_0_mc_init()
709 r = amdgpu_device_resize_fb_bar(adev); in gmc_v10_0_mc_init()
713 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); in gmc_v10_0_mc_init()
714 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); in gmc_v10_0_mc_init()
717 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { in gmc_v10_0_mc_init()
718 adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev); in gmc_v10_0_mc_init()
719 adev->gmc.aper_size = adev->gmc.real_vram_size; in gmc_v10_0_mc_init()
723 adev->gmc.visible_vram_size = adev->gmc.aper_size; in gmc_v10_0_mc_init()
727 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { in gmc_v10_0_mc_init()
729 adev->gmc.gart_size = 512ULL << 20; in gmc_v10_0_mc_init()
735 adev->gmc.gart_size = 1024ULL << 20; in gmc_v10_0_mc_init()
739 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; in gmc_v10_0_mc_init()
742 gmc_v10_0_vram_gtt_location(adev, &adev->gmc); in gmc_v10_0_mc_init()
747 static int gmc_v10_0_gart_init(struct amdgpu_device *adev) in gmc_v10_0_gart_init() argument
751 if (adev->gart.bo) { in gmc_v10_0_gart_init()
757 r = amdgpu_gart_init(adev); in gmc_v10_0_gart_init()
761 adev->gart.table_size = adev->gart.num_gpu_pages * 8; in gmc_v10_0_gart_init()
762 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC) | in gmc_v10_0_gart_init()
765 return amdgpu_gart_table_vram_alloc(adev); in gmc_v10_0_gart_init()
771 struct amdgpu_device *adev = ip_block->adev; in gmc_v10_0_sw_init() local
773 adev->gfxhub.funcs->init(adev); in gmc_v10_0_sw_init()
775 adev->mmhub.funcs->init(adev); in gmc_v10_0_sw_init()
777 spin_lock_init(&adev->gmc.invalidate_lock); in gmc_v10_0_sw_init()
779 if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) { in gmc_v10_0_sw_init()
780 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4; in gmc_v10_0_sw_init()
781 adev->gmc.vram_width = 64; in gmc_v10_0_sw_init()
783 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6; in gmc_v10_0_sw_init()
784 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */ in gmc_v10_0_sw_init()
786 r = amdgpu_atomfirmware_get_vram_info(adev, in gmc_v10_0_sw_init()
788 adev->gmc.vram_width = vram_width; in gmc_v10_0_sw_init()
790 adev->gmc.vram_type = vram_type; in gmc_v10_0_sw_init()
791 adev->gmc.vram_vendor = vram_vendor; in gmc_v10_0_sw_init()
794 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { in gmc_v10_0_sw_init()
796 adev->gmc.mall_size = 128 * 1024 * 1024; in gmc_v10_0_sw_init()
799 adev->gmc.mall_size = 96 * 1024 * 1024; in gmc_v10_0_sw_init()
802 adev->gmc.mall_size = 32 * 1024 * 1024; in gmc_v10_0_sw_init()
805 adev->gmc.mall_size = 16 * 1024 * 1024; in gmc_v10_0_sw_init()
808 adev->gmc.mall_size = 0; in gmc_v10_0_sw_init()
812 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { in gmc_v10_0_sw_init()
826 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); in gmc_v10_0_sw_init()
827 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); in gmc_v10_0_sw_init()
833 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v10_0_sw_init()
840 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, in gmc_v10_0_sw_init()
842 &adev->gmc.vm_fault); in gmc_v10_0_sw_init()
847 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, in gmc_v10_0_sw_init()
849 &adev->gmc.vm_fault); in gmc_v10_0_sw_init()
853 if (!amdgpu_sriov_vf(adev)) { in gmc_v10_0_sw_init()
855 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, in gmc_v10_0_sw_init()
856 &adev->gmc.ecc_irq); in gmc_v10_0_sw_init()
865 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ in gmc_v10_0_sw_init()
867 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); in gmc_v10_0_sw_init()
869 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); in gmc_v10_0_sw_init()
873 adev->need_swiotlb = drm_need_swiotlb(44); in gmc_v10_0_sw_init()
875 r = gmc_v10_0_mc_init(adev); in gmc_v10_0_sw_init()
879 amdgpu_gmc_get_vbios_allocations(adev); in gmc_v10_0_sw_init()
882 r = amdgpu_bo_init(adev); in gmc_v10_0_sw_init()
886 r = gmc_v10_0_gart_init(adev); in gmc_v10_0_sw_init()
896 adev->vm_manager.first_kfd_vmid = 8; in gmc_v10_0_sw_init()
898 amdgpu_vm_manager_init(adev); in gmc_v10_0_sw_init()
900 r = amdgpu_gmc_ras_sw_init(adev); in gmc_v10_0_sw_init()
914 static void gmc_v10_0_gart_fini(struct amdgpu_device *adev) in gmc_v10_0_gart_fini() argument
916 amdgpu_gart_table_vram_free(adev); in gmc_v10_0_gart_fini()
921 struct amdgpu_device *adev = ip_block->adev; in gmc_v10_0_sw_fini() local
923 amdgpu_vm_manager_fini(adev); in gmc_v10_0_sw_fini()
924 gmc_v10_0_gart_fini(adev); in gmc_v10_0_sw_fini()
925 amdgpu_gem_force_release(adev); in gmc_v10_0_sw_fini()
926 amdgpu_bo_fini(adev); in gmc_v10_0_sw_fini()
931 static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev) in gmc_v10_0_init_golden_registers() argument
940 static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) in gmc_v10_0_gart_enable() argument
945 if (adev->gart.bo == NULL) { in gmc_v10_0_gart_enable()
946 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); in gmc_v10_0_gart_enable()
950 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); in gmc_v10_0_gart_enable()
952 if (!adev->in_s0ix) { in gmc_v10_0_gart_enable()
953 r = adev->gfxhub.funcs->gart_enable(adev); in gmc_v10_0_gart_enable()
958 r = adev->mmhub.funcs->gart_enable(adev); in gmc_v10_0_gart_enable()
962 adev->hdp.funcs->init_registers(adev); in gmc_v10_0_gart_enable()
965 amdgpu_device_flush_hdp(adev, NULL); in gmc_v10_0_gart_enable()
970 if (!adev->in_s0ix) in gmc_v10_0_gart_enable()
971 adev->gfxhub.funcs->set_fault_enable_default(adev, value); in gmc_v10_0_gart_enable()
972 adev->mmhub.funcs->set_fault_enable_default(adev, value); in gmc_v10_0_gart_enable()
973 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); in gmc_v10_0_gart_enable()
974 if (!adev->in_s0ix) in gmc_v10_0_gart_enable()
975 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); in gmc_v10_0_gart_enable()
978 (unsigned int)(adev->gmc.gart_size >> 20), in gmc_v10_0_gart_enable()
979 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); in gmc_v10_0_gart_enable()
986 struct amdgpu_device *adev = ip_block->adev; in gmc_v10_0_hw_init() local
989 adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode; in gmc_v10_0_hw_init()
992 gmc_v10_0_init_golden_registers(adev); in gmc_v10_0_hw_init()
998 if (!adev->in_s0ix && adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest) in gmc_v10_0_hw_init()
999 adev->gfxhub.funcs->utcl2_harvest(adev); in gmc_v10_0_hw_init()
1001 r = gmc_v10_0_gart_enable(adev); in gmc_v10_0_hw_init()
1006 r = amdgpu_gmc_vram_checking(adev); in gmc_v10_0_hw_init()
1011 if (adev->umc.funcs && adev->umc.funcs->init_registers) in gmc_v10_0_hw_init()
1012 adev->umc.funcs->init_registers(adev); in gmc_v10_0_hw_init()
1024 static void gmc_v10_0_gart_disable(struct amdgpu_device *adev) in gmc_v10_0_gart_disable() argument
1026 if (!adev->in_s0ix) in gmc_v10_0_gart_disable()
1027 adev->gfxhub.funcs->gart_disable(adev); in gmc_v10_0_gart_disable()
1028 adev->mmhub.funcs->gart_disable(adev); in gmc_v10_0_gart_disable()
1033 struct amdgpu_device *adev = ip_block->adev; in gmc_v10_0_hw_fini() local
1035 gmc_v10_0_gart_disable(adev); in gmc_v10_0_hw_fini()
1037 if (amdgpu_sriov_vf(adev)) { in gmc_v10_0_hw_fini()
1043 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); in gmc_v10_0_hw_fini()
1045 if (adev->gmc.ecc_irq.funcs && in gmc_v10_0_hw_fini()
1046 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) in gmc_v10_0_hw_fini()
1047 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); in gmc_v10_0_hw_fini()
1067 amdgpu_vmid_reset_all(ip_block->adev); in gmc_v10_0_resume()
1088 struct amdgpu_device *adev = ip_block->adev; in gmc_v10_0_set_clockgating_state() local
1095 if (adev->in_s0ix && in gmc_v10_0_set_clockgating_state()
1096 amdgpu_ip_version(adev, DF_HWIP, 0) > IP_VERSION(3, 0, 2)) { in gmc_v10_0_set_clockgating_state()
1097 dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n"); in gmc_v10_0_set_clockgating_state()
1101 r = adev->mmhub.funcs->set_clockgating(adev, state); in gmc_v10_0_set_clockgating_state()
1105 if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0)) in gmc_v10_0_set_clockgating_state()
1106 return athub_v2_1_set_clockgating(adev, state); in gmc_v10_0_set_clockgating_state()
1108 return athub_v2_0_set_clockgating(adev, state); in gmc_v10_0_set_clockgating_state()
1113 struct amdgpu_device *adev = ip_block->adev; in gmc_v10_0_get_clockgating_state() local
1115 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 3) || in gmc_v10_0_get_clockgating_state()
1116 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 4)) in gmc_v10_0_get_clockgating_state()
1119 adev->mmhub.funcs->get_clockgating(adev, flags); in gmc_v10_0_get_clockgating_state()
1121 if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0)) in gmc_v10_0_get_clockgating_state()
1122 athub_v2_1_get_clockgating(adev, flags); in gmc_v10_0_get_clockgating_state()
1124 athub_v2_0_get_clockgating(adev, flags); in gmc_v10_0_get_clockgating_state()