Lines Matching refs:adev

49 static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev,  in gmc_v11_0_ecc_interrupt_state()  argument
58 gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, in gmc_v11_0_vm_fault_interrupt_state() argument
65 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); in gmc_v11_0_vm_fault_interrupt_state()
72 if (!adev->in_s0ix) in gmc_v11_0_vm_fault_interrupt_state()
73 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); in gmc_v11_0_vm_fault_interrupt_state()
77 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); in gmc_v11_0_vm_fault_interrupt_state()
84 if (!adev->in_s0ix) in gmc_v11_0_vm_fault_interrupt_state()
85 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); in gmc_v11_0_vm_fault_interrupt_state()
94 static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, in gmc_v11_0_process_interrupt() argument
98 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; in gmc_v11_0_process_interrupt()
105 if (!amdgpu_sriov_vf(adev)) { in gmc_v11_0_process_interrupt()
122 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); in gmc_v11_0_process_interrupt()
124 dev_err(adev->dev, in gmc_v11_0_process_interrupt()
131 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", in gmc_v11_0_process_interrupt()
133 if (!amdgpu_sriov_vf(adev)) in gmc_v11_0_process_interrupt()
134 hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); in gmc_v11_0_process_interrupt()
150 static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev) in gmc_v11_0_set_irq_funcs() argument
152 adev->gmc.vm_fault.num_types = 1; in gmc_v11_0_set_irq_funcs()
153 adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs; in gmc_v11_0_set_irq_funcs()
155 if (!amdgpu_sriov_vf(adev)) { in gmc_v11_0_set_irq_funcs()
156 adev->gmc.ecc_irq.num_types = 1; in gmc_v11_0_set_irq_funcs()
157 adev->gmc.ecc_irq.funcs = &gmc_v11_0_ecc_funcs; in gmc_v11_0_set_irq_funcs()
168 static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev, in gmc_v11_0_use_invalidate_semaphore() argument
172 (!amdgpu_sriov_vf(adev))); in gmc_v11_0_use_invalidate_semaphore()
176 struct amdgpu_device *adev, in gmc_v11_0_get_vmid_pasid_mapping_info() argument
191 static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, in gmc_v11_0_flush_vm_hub() argument
194 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub); in gmc_v11_0_flush_vm_hub()
195 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; in gmc_v11_0_flush_vm_hub()
206 spin_lock(&adev->gmc.invalidate_lock); in gmc_v11_0_flush_vm_hub()
216 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v11_0_flush_vm_hub()
225 if (i >= adev->usec_timeout) in gmc_v11_0_flush_vm_hub()
232 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v11_0_flush_vm_hub()
254 !amdgpu_sriov_vf(adev)) { in gmc_v11_0_flush_vm_hub()
264 spin_unlock(&adev->gmc.invalidate_lock); in gmc_v11_0_flush_vm_hub()
266 if (i < adev->usec_timeout) in gmc_v11_0_flush_vm_hub()
280 static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, in gmc_v11_0_flush_gpu_tlb() argument
283 if ((vmhub == AMDGPU_GFXHUB_0) && !adev->gfx.is_poweron) in gmc_v11_0_flush_gpu_tlb()
287 adev->hdp.funcs->flush_hdp(adev, NULL); in gmc_v11_0_flush_gpu_tlb()
292 if ((adev->gfx.kiq.ring.sched.ready || adev->mes.ring.sched.ready) && in gmc_v11_0_flush_gpu_tlb()
293 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { in gmc_v11_0_flush_gpu_tlb()
294 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; in gmc_v11_0_flush_gpu_tlb()
300 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, in gmc_v11_0_flush_gpu_tlb()
305 mutex_lock(&adev->mman.gtt_window_lock); in gmc_v11_0_flush_gpu_tlb()
306 gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0); in gmc_v11_0_flush_gpu_tlb()
307 mutex_unlock(&adev->mman.gtt_window_lock); in gmc_v11_0_flush_gpu_tlb()
319 static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, in gmc_v11_0_flush_gpu_tlb_pasid() argument
328 struct amdgpu_ring *ring = &adev->gfx.kiq.ring; in gmc_v11_0_flush_gpu_tlb_pasid()
329 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in gmc_v11_0_flush_gpu_tlb_pasid()
332 spin_lock(&adev->gfx.kiq.ring_lock); in gmc_v11_0_flush_gpu_tlb_pasid()
340 spin_unlock(&adev->gfx.kiq.ring_lock); in gmc_v11_0_flush_gpu_tlb_pasid()
345 spin_unlock(&adev->gfx.kiq.ring_lock); in gmc_v11_0_flush_gpu_tlb_pasid()
346 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); in gmc_v11_0_flush_gpu_tlb_pasid()
348 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); in gmc_v11_0_flush_gpu_tlb_pasid()
357 ret = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid, in gmc_v11_0_flush_gpu_tlb_pasid()
361 for (i = 0; i < adev->num_vmhubs; i++) in gmc_v11_0_flush_gpu_tlb_pasid()
362 gmc_v11_0_flush_gpu_tlb(adev, vmid, in gmc_v11_0_flush_gpu_tlb_pasid()
365 gmc_v11_0_flush_gpu_tlb(adev, vmid, in gmc_v11_0_flush_gpu_tlb_pasid()
377 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); in gmc_v11_0_emit_flush_gpu_tlb()
378 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; in gmc_v11_0_emit_flush_gpu_tlb()
425 struct amdgpu_device *adev = ring->adev; in gmc_v11_0_emit_pasid_mapping() local
472 static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) in gmc_v11_0_map_mtype() argument
490 static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level, in gmc_v11_0_get_vm_pde() argument
494 *addr = adev->vm_manager.vram_base_offset + *addr - in gmc_v11_0_get_vm_pde()
495 adev->gmc.vram_start; in gmc_v11_0_get_vm_pde()
498 if (!adev->gmc.translate_further) in gmc_v11_0_get_vm_pde()
514 static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev, in gmc_v11_0_get_vm_pte() argument
543 static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev) in gmc_v11_0_get_vbios_fb_size() argument
559 static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev) in gmc_v11_0_set_gmc_funcs() argument
561 adev->gmc.gmc_funcs = &gmc_v11_0_gmc_funcs; in gmc_v11_0_set_gmc_funcs()
564 static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev) in gmc_v11_0_set_umc_funcs() argument
566 switch (adev->ip_versions[UMC_HWIP][0]) { in gmc_v11_0_set_umc_funcs()
568 adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM; in gmc_v11_0_set_umc_funcs()
569 adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM; in gmc_v11_0_set_umc_funcs()
570 adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev); in gmc_v11_0_set_umc_funcs()
571 adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET; in gmc_v11_0_set_umc_funcs()
572 adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; in gmc_v11_0_set_umc_funcs()
573 if (adev->umc.node_inst_num == 4) in gmc_v11_0_set_umc_funcs()
574 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl_ext0[0][0][0]; in gmc_v11_0_set_umc_funcs()
576 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0]; in gmc_v11_0_set_umc_funcs()
577 adev->umc.ras = &umc_v8_10_ras; in gmc_v11_0_set_umc_funcs()
585 if (adev->umc.ras) { in gmc_v11_0_set_umc_funcs()
586 amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block); in gmc_v11_0_set_umc_funcs()
588 strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc"); in gmc_v11_0_set_umc_funcs()
589 adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC; in gmc_v11_0_set_umc_funcs()
590 adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; in gmc_v11_0_set_umc_funcs()
591 adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm; in gmc_v11_0_set_umc_funcs()
594 if (!adev->umc.ras->ras_block.ras_late_init) in gmc_v11_0_set_umc_funcs()
595 adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init; in gmc_v11_0_set_umc_funcs()
598 if (!adev->umc.ras->ras_block.ras_cb) in gmc_v11_0_set_umc_funcs()
599 adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb; in gmc_v11_0_set_umc_funcs()
604 static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev) in gmc_v11_0_set_mmhub_funcs() argument
606 switch (adev->ip_versions[MMHUB_HWIP][0]) { in gmc_v11_0_set_mmhub_funcs()
608 adev->mmhub.funcs = &mmhub_v3_0_1_funcs; in gmc_v11_0_set_mmhub_funcs()
611 adev->mmhub.funcs = &mmhub_v3_0_2_funcs; in gmc_v11_0_set_mmhub_funcs()
614 adev->mmhub.funcs = &mmhub_v3_0_funcs; in gmc_v11_0_set_mmhub_funcs()
619 static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev) in gmc_v11_0_set_gfxhub_funcs() argument
621 switch (adev->ip_versions[GC_HWIP][0]) { in gmc_v11_0_set_gfxhub_funcs()
623 adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs; in gmc_v11_0_set_gfxhub_funcs()
626 adev->gfxhub.funcs = &gfxhub_v3_0_funcs; in gmc_v11_0_set_gfxhub_funcs()
633 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_early_init() local
635 gmc_v11_0_set_gfxhub_funcs(adev); in gmc_v11_0_early_init()
636 gmc_v11_0_set_mmhub_funcs(adev); in gmc_v11_0_early_init()
637 gmc_v11_0_set_gmc_funcs(adev); in gmc_v11_0_early_init()
638 gmc_v11_0_set_irq_funcs(adev); in gmc_v11_0_early_init()
639 gmc_v11_0_set_umc_funcs(adev); in gmc_v11_0_early_init()
641 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; in gmc_v11_0_early_init()
642 adev->gmc.shared_aperture_end = in gmc_v11_0_early_init()
643 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; in gmc_v11_0_early_init()
644 adev->gmc.private_aperture_start = 0x1000000000000000ULL; in gmc_v11_0_early_init()
645 adev->gmc.private_aperture_end = in gmc_v11_0_early_init()
646 adev->gmc.private_aperture_start + (4ULL << 30) - 1; in gmc_v11_0_early_init()
653 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_late_init() local
656 r = amdgpu_gmc_allocate_vm_inv_eng(adev); in gmc_v11_0_late_init()
660 r = amdgpu_gmc_ras_late_init(adev); in gmc_v11_0_late_init()
664 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); in gmc_v11_0_late_init()
667 static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev, in gmc_v11_0_vram_gtt_location() argument
672 base = adev->mmhub.funcs->get_fb_location(adev); in gmc_v11_0_vram_gtt_location()
674 amdgpu_gmc_vram_location(adev, &adev->gmc, base); in gmc_v11_0_vram_gtt_location()
675 amdgpu_gmc_gart_location(adev, mc); in gmc_v11_0_vram_gtt_location()
676 amdgpu_gmc_agp_location(adev, mc); in gmc_v11_0_vram_gtt_location()
679 if (amdgpu_sriov_vf(adev)) in gmc_v11_0_vram_gtt_location()
680 adev->vm_manager.vram_base_offset = 0; in gmc_v11_0_vram_gtt_location()
682 adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev); in gmc_v11_0_vram_gtt_location()
694 static int gmc_v11_0_mc_init(struct amdgpu_device *adev) in gmc_v11_0_mc_init() argument
699 adev->gmc.mc_vram_size = in gmc_v11_0_mc_init()
700 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; in gmc_v11_0_mc_init()
701 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; in gmc_v11_0_mc_init()
703 if (!(adev->flags & AMD_IS_APU)) { in gmc_v11_0_mc_init()
704 r = amdgpu_device_resize_fb_bar(adev); in gmc_v11_0_mc_init()
708 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); in gmc_v11_0_mc_init()
709 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); in gmc_v11_0_mc_init()
712 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { in gmc_v11_0_mc_init()
713 adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev); in gmc_v11_0_mc_init()
714 adev->gmc.aper_size = adev->gmc.real_vram_size; in gmc_v11_0_mc_init()
718 adev->gmc.visible_vram_size = adev->gmc.aper_size; in gmc_v11_0_mc_init()
719 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) in gmc_v11_0_mc_init()
720 adev->gmc.visible_vram_size = adev->gmc.real_vram_size; in gmc_v11_0_mc_init()
724 adev->gmc.gart_size = 512ULL << 20; in gmc_v11_0_mc_init()
726 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; in gmc_v11_0_mc_init()
728 gmc_v11_0_vram_gtt_location(adev, &adev->gmc); in gmc_v11_0_mc_init()
733 static int gmc_v11_0_gart_init(struct amdgpu_device *adev) in gmc_v11_0_gart_init() argument
737 if (adev->gart.bo) { in gmc_v11_0_gart_init()
743 r = amdgpu_gart_init(adev); in gmc_v11_0_gart_init()
747 adev->gart.table_size = adev->gart.num_gpu_pages * 8; in gmc_v11_0_gart_init()
748 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) | in gmc_v11_0_gart_init()
751 return amdgpu_gart_table_vram_alloc(adev); in gmc_v11_0_gart_init()
757 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_sw_init() local
759 adev->mmhub.funcs->init(adev); in gmc_v11_0_sw_init()
761 spin_lock_init(&adev->gmc.invalidate_lock); in gmc_v11_0_sw_init()
763 r = amdgpu_atomfirmware_get_vram_info(adev, in gmc_v11_0_sw_init()
765 adev->gmc.vram_width = vram_width; in gmc_v11_0_sw_init()
767 adev->gmc.vram_type = vram_type; in gmc_v11_0_sw_init()
768 adev->gmc.vram_vendor = vram_vendor; in gmc_v11_0_sw_init()
770 switch (adev->ip_versions[GC_HWIP][0]) { in gmc_v11_0_sw_init()
776 adev->num_vmhubs = 2; in gmc_v11_0_sw_init()
782 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v11_0_sw_init()
789 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC, in gmc_v11_0_sw_init()
791 &adev->gmc.vm_fault); in gmc_v11_0_sw_init()
796 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, in gmc_v11_0_sw_init()
798 &adev->gmc.vm_fault); in gmc_v11_0_sw_init()
802 if (!amdgpu_sriov_vf(adev)) { in gmc_v11_0_sw_init()
804 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0, in gmc_v11_0_sw_init()
805 &adev->gmc.ecc_irq); in gmc_v11_0_sw_init()
814 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ in gmc_v11_0_sw_init()
816 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); in gmc_v11_0_sw_init()
822 adev->need_swiotlb = drm_need_swiotlb(44); in gmc_v11_0_sw_init()
824 r = gmc_v11_0_mc_init(adev); in gmc_v11_0_sw_init()
828 amdgpu_gmc_get_vbios_allocations(adev); in gmc_v11_0_sw_init()
831 r = amdgpu_bo_init(adev); in gmc_v11_0_sw_init()
835 r = gmc_v11_0_gart_init(adev); in gmc_v11_0_sw_init()
845 adev->vm_manager.first_kfd_vmid = 8; in gmc_v11_0_sw_init()
847 amdgpu_vm_manager_init(adev); in gmc_v11_0_sw_init()
859 static void gmc_v11_0_gart_fini(struct amdgpu_device *adev) in gmc_v11_0_gart_fini() argument
861 amdgpu_gart_table_vram_free(adev); in gmc_v11_0_gart_fini()
866 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_sw_fini() local
868 amdgpu_vm_manager_fini(adev); in gmc_v11_0_sw_fini()
869 gmc_v11_0_gart_fini(adev); in gmc_v11_0_sw_fini()
870 amdgpu_gem_force_release(adev); in gmc_v11_0_sw_fini()
871 amdgpu_bo_fini(adev); in gmc_v11_0_sw_fini()
876 static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev) in gmc_v11_0_init_golden_registers() argument
885 static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) in gmc_v11_0_gart_enable() argument
890 if (adev->gart.bo == NULL) { in gmc_v11_0_gart_enable()
891 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); in gmc_v11_0_gart_enable()
895 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); in gmc_v11_0_gart_enable()
897 r = adev->mmhub.funcs->gart_enable(adev); in gmc_v11_0_gart_enable()
902 adev->hdp.funcs->flush_hdp(adev, NULL); in gmc_v11_0_gart_enable()
907 adev->mmhub.funcs->set_fault_enable_default(adev, value); in gmc_v11_0_gart_enable()
908 gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); in gmc_v11_0_gart_enable()
911 (unsigned)(adev->gmc.gart_size >> 20), in gmc_v11_0_gart_enable()
912 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); in gmc_v11_0_gart_enable()
920 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_hw_init() local
923 gmc_v11_0_init_golden_registers(adev); in gmc_v11_0_hw_init()
925 r = gmc_v11_0_gart_enable(adev); in gmc_v11_0_hw_init()
929 if (adev->umc.funcs && adev->umc.funcs->init_registers) in gmc_v11_0_hw_init()
930 adev->umc.funcs->init_registers(adev); in gmc_v11_0_hw_init()
942 static void gmc_v11_0_gart_disable(struct amdgpu_device *adev) in gmc_v11_0_gart_disable() argument
944 adev->mmhub.funcs->gart_disable(adev); in gmc_v11_0_gart_disable()
949 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_hw_fini() local
951 if (amdgpu_sriov_vf(adev)) { in gmc_v11_0_hw_fini()
957 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); in gmc_v11_0_hw_fini()
958 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); in gmc_v11_0_hw_fini()
959 gmc_v11_0_gart_disable(adev); in gmc_v11_0_hw_fini()
966 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_suspend() local
968 gmc_v11_0_hw_fini(adev); in gmc_v11_0_suspend()
976 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_resume() local
978 r = gmc_v11_0_hw_init(adev); in gmc_v11_0_resume()
982 amdgpu_vmid_reset_all(adev); in gmc_v11_0_resume()
1008 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_set_clockgating_state() local
1010 r = adev->mmhub.funcs->set_clockgating(adev, state); in gmc_v11_0_set_clockgating_state()
1014 return athub_v3_0_set_clockgating(adev, state); in gmc_v11_0_set_clockgating_state()
1019 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v11_0_get_clockgating_state() local
1021 adev->mmhub.funcs->get_clockgating(adev, flags); in gmc_v11_0_get_clockgating_state()
1023 athub_v3_0_get_clockgating(adev, flags); in gmc_v11_0_get_clockgating_state()