Lines Matching refs:adev
63 static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev, in gmc_v10_0_ecc_interrupt_state() argument
72 gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, in gmc_v10_0_vm_fault_interrupt_state() argument
79 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); in gmc_v10_0_vm_fault_interrupt_state()
86 if (!adev->in_s0ix) in gmc_v10_0_vm_fault_interrupt_state()
87 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); in gmc_v10_0_vm_fault_interrupt_state()
91 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); in gmc_v10_0_vm_fault_interrupt_state()
98 if (!adev->in_s0ix) in gmc_v10_0_vm_fault_interrupt_state()
99 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); in gmc_v10_0_vm_fault_interrupt_state()
108 static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, in gmc_v10_0_process_interrupt() argument
114 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; in gmc_v10_0_process_interrupt()
126 if (entry->ih != &adev->irq.ih_soft && in gmc_v10_0_process_interrupt()
127 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, in gmc_v10_0_process_interrupt()
134 if (entry->ih == &adev->irq.ih) { in gmc_v10_0_process_interrupt()
135 amdgpu_irq_delegate(adev, entry, 8); in gmc_v10_0_process_interrupt()
142 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault)) in gmc_v10_0_process_interrupt()
146 if (!amdgpu_sriov_vf(adev)) { in gmc_v10_0_process_interrupt()
153 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0))) in gmc_v10_0_process_interrupt()
164 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); in gmc_v10_0_process_interrupt()
166 dev_err(adev->dev, in gmc_v10_0_process_interrupt()
173 dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n", in gmc_v10_0_process_interrupt()
177 if (!amdgpu_sriov_vf(adev)) in gmc_v10_0_process_interrupt()
178 hub->vmhub_funcs->print_l2_protection_fault_status(adev, in gmc_v10_0_process_interrupt()
194 static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev) in gmc_v10_0_set_irq_funcs() argument
196 adev->gmc.vm_fault.num_types = 1; in gmc_v10_0_set_irq_funcs()
197 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs; in gmc_v10_0_set_irq_funcs()
199 if (!amdgpu_sriov_vf(adev)) { in gmc_v10_0_set_irq_funcs()
200 adev->gmc.ecc_irq.num_types = 1; in gmc_v10_0_set_irq_funcs()
201 adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs; in gmc_v10_0_set_irq_funcs()
212 static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev, in gmc_v10_0_use_invalidate_semaphore() argument
217 (!amdgpu_sriov_vf(adev))); in gmc_v10_0_use_invalidate_semaphore()
221 struct amdgpu_device *adev, in gmc_v10_0_get_atc_vmid_pasid_mapping_info() argument
240 static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, in gmc_v10_0_flush_vm_hub() argument
243 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub); in gmc_v10_0_flush_vm_hub()
244 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; in gmc_v10_0_flush_vm_hub()
255 spin_lock(&adev->gmc.invalidate_lock); in gmc_v10_0_flush_vm_hub()
265 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v10_0_flush_vm_hub()
275 if (i >= adev->usec_timeout) in gmc_v10_0_flush_vm_hub()
288 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0))) in gmc_v10_0_flush_vm_hub()
293 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v10_0_flush_vm_hub()
313 spin_unlock(&adev->gmc.invalidate_lock); in gmc_v10_0_flush_vm_hub()
315 if (i < adev->usec_timeout) in gmc_v10_0_flush_vm_hub()
331 static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, in gmc_v10_0_flush_gpu_tlb() argument
334 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; in gmc_v10_0_flush_gpu_tlb()
341 adev->hdp.funcs->flush_hdp(adev, NULL); in gmc_v10_0_flush_gpu_tlb()
346 if (adev->gfx.kiq.ring.sched.ready && !adev->enable_mes && in gmc_v10_0_flush_gpu_tlb()
347 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && in gmc_v10_0_flush_gpu_tlb()
348 down_read_trylock(&adev->reset_domain->sem)) { in gmc_v10_0_flush_gpu_tlb()
349 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; in gmc_v10_0_flush_gpu_tlb()
355 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, in gmc_v10_0_flush_gpu_tlb()
358 up_read(&adev->reset_domain->sem); in gmc_v10_0_flush_gpu_tlb()
362 mutex_lock(&adev->mman.gtt_window_lock); in gmc_v10_0_flush_gpu_tlb()
365 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0); in gmc_v10_0_flush_gpu_tlb()
366 mutex_unlock(&adev->mman.gtt_window_lock); in gmc_v10_0_flush_gpu_tlb()
372 if (!adev->mman.buffer_funcs_enabled || in gmc_v10_0_flush_gpu_tlb()
373 !adev->ib_pool_ready || in gmc_v10_0_flush_gpu_tlb()
374 amdgpu_in_reset(adev) || in gmc_v10_0_flush_gpu_tlb()
376 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0); in gmc_v10_0_flush_gpu_tlb()
377 mutex_unlock(&adev->mman.gtt_window_lock); in gmc_v10_0_flush_gpu_tlb()
386 r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity, in gmc_v10_0_flush_gpu_tlb()
393 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo); in gmc_v10_0_flush_gpu_tlb()
399 mutex_unlock(&adev->mman.gtt_window_lock); in gmc_v10_0_flush_gpu_tlb()
407 mutex_unlock(&adev->mman.gtt_window_lock); in gmc_v10_0_flush_gpu_tlb()
421 static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, in gmc_v10_0_flush_gpu_tlb_pasid() argument
430 u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout; in gmc_v10_0_flush_gpu_tlb_pasid()
431 struct amdgpu_ring *ring = &adev->gfx.kiq.ring; in gmc_v10_0_flush_gpu_tlb_pasid()
432 struct amdgpu_kiq *kiq = &adev->gfx.kiq; in gmc_v10_0_flush_gpu_tlb_pasid()
435 spin_lock(&adev->gfx.kiq.ring_lock); in gmc_v10_0_flush_gpu_tlb_pasid()
443 spin_unlock(&adev->gfx.kiq.ring_lock); in gmc_v10_0_flush_gpu_tlb_pasid()
448 spin_unlock(&adev->gfx.kiq.ring_lock); in gmc_v10_0_flush_gpu_tlb_pasid()
451 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); in gmc_v10_0_flush_gpu_tlb_pasid()
460 ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid, in gmc_v10_0_flush_gpu_tlb_pasid()
464 for (i = 0; i < adev->num_vmhubs; i++) in gmc_v10_0_flush_gpu_tlb_pasid()
465 gmc_v10_0_flush_gpu_tlb(adev, vmid, in gmc_v10_0_flush_gpu_tlb_pasid()
468 gmc_v10_0_flush_gpu_tlb(adev, vmid, in gmc_v10_0_flush_gpu_tlb_pasid()
471 if (!adev->enable_mes) in gmc_v10_0_flush_gpu_tlb_pasid()
482 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); in gmc_v10_0_emit_flush_gpu_tlb()
483 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; in gmc_v10_0_emit_flush_gpu_tlb()
530 struct amdgpu_device *adev = ring->adev; in gmc_v10_0_emit_pasid_mapping() local
578 static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) in gmc_v10_0_map_mtype() argument
596 static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, in gmc_v10_0_get_vm_pde() argument
600 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr); in gmc_v10_0_get_vm_pde()
603 if (!adev->gmc.translate_further) in gmc_v10_0_get_vm_pde()
619 static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev, in gmc_v10_0_get_vm_pte() argument
648 static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) in gmc_v10_0_get_vbios_fb_size() argument
681 static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev) in gmc_v10_0_set_gmc_funcs() argument
683 if (adev->gmc.gmc_funcs == NULL) in gmc_v10_0_set_gmc_funcs()
684 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs; in gmc_v10_0_set_gmc_funcs()
687 static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev) in gmc_v10_0_set_umc_funcs() argument
689 switch (adev->ip_versions[UMC_HWIP][0]) { in gmc_v10_0_set_umc_funcs()
691 adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM; in gmc_v10_0_set_umc_funcs()
692 adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM; in gmc_v10_0_set_umc_funcs()
693 adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM; in gmc_v10_0_set_umc_funcs()
694 adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA; in gmc_v10_0_set_umc_funcs()
695 adev->umc.retire_unit = 1; in gmc_v10_0_set_umc_funcs()
696 adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0]; in gmc_v10_0_set_umc_funcs()
697 adev->umc.ras = &umc_v8_7_ras; in gmc_v10_0_set_umc_funcs()
702 if (adev->umc.ras) { in gmc_v10_0_set_umc_funcs()
703 amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block); in gmc_v10_0_set_umc_funcs()
705 strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc"); in gmc_v10_0_set_umc_funcs()
706 adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC; in gmc_v10_0_set_umc_funcs()
707 adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; in gmc_v10_0_set_umc_funcs()
708 adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm; in gmc_v10_0_set_umc_funcs()
711 if (!adev->umc.ras->ras_block.ras_late_init) in gmc_v10_0_set_umc_funcs()
712 adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init; in gmc_v10_0_set_umc_funcs()
715 if (!adev->umc.ras->ras_block.ras_cb) in gmc_v10_0_set_umc_funcs()
716 adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb; in gmc_v10_0_set_umc_funcs()
721 static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev) in gmc_v10_0_set_mmhub_funcs() argument
723 switch (adev->ip_versions[MMHUB_HWIP][0]) { in gmc_v10_0_set_mmhub_funcs()
727 adev->mmhub.funcs = &mmhub_v2_3_funcs; in gmc_v10_0_set_mmhub_funcs()
730 adev->mmhub.funcs = &mmhub_v2_0_funcs; in gmc_v10_0_set_mmhub_funcs()
735 static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev) in gmc_v10_0_set_gfxhub_funcs() argument
737 switch (adev->ip_versions[GC_HWIP][0]) { in gmc_v10_0_set_gfxhub_funcs()
746 adev->gfxhub.funcs = &gfxhub_v2_1_funcs; in gmc_v10_0_set_gfxhub_funcs()
749 adev->gfxhub.funcs = &gfxhub_v2_0_funcs; in gmc_v10_0_set_gfxhub_funcs()
758 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v10_0_early_init() local
760 gmc_v10_0_set_mmhub_funcs(adev); in gmc_v10_0_early_init()
761 gmc_v10_0_set_gfxhub_funcs(adev); in gmc_v10_0_early_init()
762 gmc_v10_0_set_gmc_funcs(adev); in gmc_v10_0_early_init()
763 gmc_v10_0_set_irq_funcs(adev); in gmc_v10_0_early_init()
764 gmc_v10_0_set_umc_funcs(adev); in gmc_v10_0_early_init()
766 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; in gmc_v10_0_early_init()
767 adev->gmc.shared_aperture_end = in gmc_v10_0_early_init()
768 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; in gmc_v10_0_early_init()
769 adev->gmc.private_aperture_start = 0x1000000000000000ULL; in gmc_v10_0_early_init()
770 adev->gmc.private_aperture_end = in gmc_v10_0_early_init()
771 adev->gmc.private_aperture_start + (4ULL << 30) - 1; in gmc_v10_0_early_init()
773 r = amdgpu_gmc_ras_early_init(adev); in gmc_v10_0_early_init()
782 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v10_0_late_init() local
785 r = amdgpu_gmc_allocate_vm_inv_eng(adev); in gmc_v10_0_late_init()
789 r = amdgpu_gmc_ras_late_init(adev); in gmc_v10_0_late_init()
793 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); in gmc_v10_0_late_init()
796 static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, in gmc_v10_0_vram_gtt_location() argument
801 base = adev->gfxhub.funcs->get_fb_location(adev); in gmc_v10_0_vram_gtt_location()
804 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in gmc_v10_0_vram_gtt_location()
806 amdgpu_gmc_vram_location(adev, &adev->gmc, base); in gmc_v10_0_vram_gtt_location()
807 amdgpu_gmc_gart_location(adev, mc); in gmc_v10_0_vram_gtt_location()
808 amdgpu_gmc_agp_location(adev, mc); in gmc_v10_0_vram_gtt_location()
811 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); in gmc_v10_0_vram_gtt_location()
814 adev->vm_manager.vram_base_offset += in gmc_v10_0_vram_gtt_location()
815 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in gmc_v10_0_vram_gtt_location()
827 static int gmc_v10_0_mc_init(struct amdgpu_device *adev) in gmc_v10_0_mc_init() argument
832 adev->gmc.mc_vram_size = in gmc_v10_0_mc_init()
833 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; in gmc_v10_0_mc_init()
834 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; in gmc_v10_0_mc_init()
836 if (!(adev->flags & AMD_IS_APU)) { in gmc_v10_0_mc_init()
837 r = amdgpu_device_resize_fb_bar(adev); in gmc_v10_0_mc_init()
841 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); in gmc_v10_0_mc_init()
842 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); in gmc_v10_0_mc_init()
845 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { in gmc_v10_0_mc_init()
846 adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev); in gmc_v10_0_mc_init()
847 adev->gmc.aper_size = adev->gmc.real_vram_size; in gmc_v10_0_mc_init()
851 adev->gmc.visible_vram_size = adev->gmc.aper_size; in gmc_v10_0_mc_init()
855 switch (adev->ip_versions[GC_HWIP][0]) { in gmc_v10_0_mc_init()
857 adev->gmc.gart_size = 512ULL << 20; in gmc_v10_0_mc_init()
863 adev->gmc.gart_size = 1024ULL << 20; in gmc_v10_0_mc_init()
867 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; in gmc_v10_0_mc_init()
870 gmc_v10_0_vram_gtt_location(adev, &adev->gmc); in gmc_v10_0_mc_init()
875 static int gmc_v10_0_gart_init(struct amdgpu_device *adev) in gmc_v10_0_gart_init() argument
879 if (adev->gart.bo) { in gmc_v10_0_gart_init()
885 r = amdgpu_gart_init(adev); in gmc_v10_0_gart_init()
889 adev->gart.table_size = adev->gart.num_gpu_pages * 8; in gmc_v10_0_gart_init()
890 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) | in gmc_v10_0_gart_init()
893 return amdgpu_gart_table_vram_alloc(adev); in gmc_v10_0_gart_init()
899 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v10_0_sw_init() local
901 adev->gfxhub.funcs->init(adev); in gmc_v10_0_sw_init()
903 adev->mmhub.funcs->init(adev); in gmc_v10_0_sw_init()
905 spin_lock_init(&adev->gmc.invalidate_lock); in gmc_v10_0_sw_init()
907 if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) { in gmc_v10_0_sw_init()
908 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4; in gmc_v10_0_sw_init()
909 adev->gmc.vram_width = 64; in gmc_v10_0_sw_init()
911 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6; in gmc_v10_0_sw_init()
912 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */ in gmc_v10_0_sw_init()
914 r = amdgpu_atomfirmware_get_vram_info(adev, in gmc_v10_0_sw_init()
916 adev->gmc.vram_width = vram_width; in gmc_v10_0_sw_init()
918 adev->gmc.vram_type = vram_type; in gmc_v10_0_sw_init()
919 adev->gmc.vram_vendor = vram_vendor; in gmc_v10_0_sw_init()
922 switch (adev->ip_versions[GC_HWIP][0]) { in gmc_v10_0_sw_init()
924 adev->gmc.mall_size = 128 * 1024 * 1024; in gmc_v10_0_sw_init()
927 adev->gmc.mall_size = 96 * 1024 * 1024; in gmc_v10_0_sw_init()
930 adev->gmc.mall_size = 32 * 1024 * 1024; in gmc_v10_0_sw_init()
933 adev->gmc.mall_size = 16 * 1024 * 1024; in gmc_v10_0_sw_init()
936 adev->gmc.mall_size = 0; in gmc_v10_0_sw_init()
940 switch (adev->ip_versions[GC_HWIP][0]) { in gmc_v10_0_sw_init()
954 adev->num_vmhubs = 2; in gmc_v10_0_sw_init()
960 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v10_0_sw_init()
967 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, in gmc_v10_0_sw_init()
969 &adev->gmc.vm_fault); in gmc_v10_0_sw_init()
974 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, in gmc_v10_0_sw_init()
976 &adev->gmc.vm_fault); in gmc_v10_0_sw_init()
980 if (!amdgpu_sriov_vf(adev)) { in gmc_v10_0_sw_init()
982 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, in gmc_v10_0_sw_init()
983 &adev->gmc.ecc_irq); in gmc_v10_0_sw_init()
992 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ in gmc_v10_0_sw_init()
994 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); in gmc_v10_0_sw_init()
1000 adev->need_swiotlb = drm_need_swiotlb(44); in gmc_v10_0_sw_init()
1002 r = gmc_v10_0_mc_init(adev); in gmc_v10_0_sw_init()
1006 amdgpu_gmc_get_vbios_allocations(adev); in gmc_v10_0_sw_init()
1009 r = amdgpu_bo_init(adev); in gmc_v10_0_sw_init()
1013 r = gmc_v10_0_gart_init(adev); in gmc_v10_0_sw_init()
1023 adev->vm_manager.first_kfd_vmid = 8; in gmc_v10_0_sw_init()
1025 amdgpu_vm_manager_init(adev); in gmc_v10_0_sw_init()
1037 static void gmc_v10_0_gart_fini(struct amdgpu_device *adev) in gmc_v10_0_gart_fini() argument
1039 amdgpu_gart_table_vram_free(adev); in gmc_v10_0_gart_fini()
1044 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v10_0_sw_fini() local
1046 amdgpu_vm_manager_fini(adev); in gmc_v10_0_sw_fini()
1047 gmc_v10_0_gart_fini(adev); in gmc_v10_0_sw_fini()
1048 amdgpu_gem_force_release(adev); in gmc_v10_0_sw_fini()
1049 amdgpu_bo_fini(adev); in gmc_v10_0_sw_fini()
1054 static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev) in gmc_v10_0_init_golden_registers() argument
1063 static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) in gmc_v10_0_gart_enable() argument
1068 if (adev->gart.bo == NULL) { in gmc_v10_0_gart_enable()
1069 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); in gmc_v10_0_gart_enable()
1073 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); in gmc_v10_0_gart_enable()
1075 if (!adev->in_s0ix) { in gmc_v10_0_gart_enable()
1076 r = adev->gfxhub.funcs->gart_enable(adev); in gmc_v10_0_gart_enable()
1081 r = adev->mmhub.funcs->gart_enable(adev); in gmc_v10_0_gart_enable()
1085 adev->hdp.funcs->init_registers(adev); in gmc_v10_0_gart_enable()
1088 adev->hdp.funcs->flush_hdp(adev, NULL); in gmc_v10_0_gart_enable()
1093 if (!adev->in_s0ix) in gmc_v10_0_gart_enable()
1094 adev->gfxhub.funcs->set_fault_enable_default(adev, value); in gmc_v10_0_gart_enable()
1095 adev->mmhub.funcs->set_fault_enable_default(adev, value); in gmc_v10_0_gart_enable()
1096 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); in gmc_v10_0_gart_enable()
1097 if (!adev->in_s0ix) in gmc_v10_0_gart_enable()
1098 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); in gmc_v10_0_gart_enable()
1101 (unsigned)(adev->gmc.gart_size >> 20), in gmc_v10_0_gart_enable()
1102 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); in gmc_v10_0_gart_enable()
1110 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v10_0_hw_init() local
1113 gmc_v10_0_init_golden_registers(adev); in gmc_v10_0_hw_init()
1119 if (!adev->in_s0ix && adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest) in gmc_v10_0_hw_init()
1120 adev->gfxhub.funcs->utcl2_harvest(adev); in gmc_v10_0_hw_init()
1122 r = gmc_v10_0_gart_enable(adev); in gmc_v10_0_hw_init()
1127 r = amdgpu_gmc_vram_checking(adev); in gmc_v10_0_hw_init()
1132 if (adev->umc.funcs && adev->umc.funcs->init_registers) in gmc_v10_0_hw_init()
1133 adev->umc.funcs->init_registers(adev); in gmc_v10_0_hw_init()
1145 static void gmc_v10_0_gart_disable(struct amdgpu_device *adev) in gmc_v10_0_gart_disable() argument
1147 if (!adev->in_s0ix) in gmc_v10_0_gart_disable()
1148 adev->gfxhub.funcs->gart_disable(adev); in gmc_v10_0_gart_disable()
1149 adev->mmhub.funcs->gart_disable(adev); in gmc_v10_0_gart_disable()
1154 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v10_0_hw_fini() local
1156 gmc_v10_0_gart_disable(adev); in gmc_v10_0_hw_fini()
1158 if (amdgpu_sriov_vf(adev)) { in gmc_v10_0_hw_fini()
1164 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); in gmc_v10_0_hw_fini()
1165 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); in gmc_v10_0_hw_fini()
1172 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v10_0_suspend() local
1174 gmc_v10_0_hw_fini(adev); in gmc_v10_0_suspend()
1182 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v10_0_resume() local
1184 r = gmc_v10_0_hw_init(adev); in gmc_v10_0_resume()
1188 amdgpu_vmid_reset_all(adev); in gmc_v10_0_resume()
1214 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v10_0_set_clockgating_state() local
1221 if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) { in gmc_v10_0_set_clockgating_state()
1222 dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n"); in gmc_v10_0_set_clockgating_state()
1226 r = adev->mmhub.funcs->set_clockgating(adev, state); in gmc_v10_0_set_clockgating_state()
1230 if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0)) in gmc_v10_0_set_clockgating_state()
1231 return athub_v2_1_set_clockgating(adev, state); in gmc_v10_0_set_clockgating_state()
1233 return athub_v2_0_set_clockgating(adev, state); in gmc_v10_0_set_clockgating_state()
1238 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v10_0_get_clockgating_state() local
1240 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3) || in gmc_v10_0_get_clockgating_state()
1241 adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 4)) in gmc_v10_0_get_clockgating_state()
1244 adev->mmhub.funcs->get_clockgating(adev, flags); in gmc_v10_0_get_clockgating_state()
1246 if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0)) in gmc_v10_0_get_clockgating_state()
1247 athub_v2_1_get_clockgating(adev, flags); in gmc_v10_0_get_clockgating_state()
1249 athub_v2_0_get_clockgating(adev, flags); in gmc_v10_0_get_clockgating_state()