Lines Matching refs:adev
44 static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
45 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
64 static void gmc_v6_0_mc_stop(struct amdgpu_device *adev) in gmc_v6_0_mc_stop() argument
68 gmc_v6_0_wait_for_idle((void *)adev); in gmc_v6_0_mc_stop()
84 static void gmc_v6_0_mc_resume(struct amdgpu_device *adev) in gmc_v6_0_mc_resume() argument
98 static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) in gmc_v6_0_init_microcode() argument
107 switch (adev->asic_type) { in gmc_v6_0_init_microcode()
134 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev); in gmc_v6_0_init_microcode()
138 err = amdgpu_ucode_validate(adev->gmc.fw); in gmc_v6_0_init_microcode()
142 dev_err(adev->dev, in gmc_v6_0_init_microcode()
145 release_firmware(adev->gmc.fw); in gmc_v6_0_init_microcode()
146 adev->gmc.fw = NULL; in gmc_v6_0_init_microcode()
151 static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev) in gmc_v6_0_mc_load_microcode() argument
159 if (!adev->gmc.fw) in gmc_v6_0_mc_load_microcode()
162 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data; in gmc_v6_0_mc_load_microcode()
166 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version); in gmc_v6_0_mc_load_microcode()
169 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); in gmc_v6_0_mc_load_microcode()
172 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in gmc_v6_0_mc_load_microcode()
198 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v6_0_mc_load_microcode()
203 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v6_0_mc_load_microcode()
214 static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, in gmc_v6_0_vram_gtt_location() argument
220 amdgpu_gmc_vram_location(adev, mc, base); in gmc_v6_0_vram_gtt_location()
221 amdgpu_gmc_gart_location(adev, mc); in gmc_v6_0_vram_gtt_location()
224 static void gmc_v6_0_mc_program(struct amdgpu_device *adev) in gmc_v6_0_mc_program() argument
238 if (gmc_v6_0_wait_for_idle((void *)adev)) { in gmc_v6_0_mc_program()
239 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); in gmc_v6_0_mc_program()
242 if (adev->mode_info.num_crtc) { in gmc_v6_0_mc_program()
257 adev->gmc.vram_start >> 12); in gmc_v6_0_mc_program()
259 adev->gmc.vram_end >> 12); in gmc_v6_0_mc_program()
261 adev->vram_scratch.gpu_addr >> 12); in gmc_v6_0_mc_program()
266 if (gmc_v6_0_wait_for_idle((void *)adev)) { in gmc_v6_0_mc_program()
267 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); in gmc_v6_0_mc_program()
271 static int gmc_v6_0_mc_init(struct amdgpu_device *adev) in gmc_v6_0_mc_init() argument
317 adev->gmc.vram_width = numchan * chansize; in gmc_v6_0_mc_init()
319 adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; in gmc_v6_0_mc_init()
320 adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; in gmc_v6_0_mc_init()
322 if (!(adev->flags & AMD_IS_APU)) { in gmc_v6_0_mc_init()
323 r = amdgpu_device_resize_fb_bar(adev); in gmc_v6_0_mc_init()
327 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); in gmc_v6_0_mc_init()
328 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); in gmc_v6_0_mc_init()
329 adev->gmc.visible_vram_size = adev->gmc.aper_size; in gmc_v6_0_mc_init()
333 switch (adev->asic_type) { in gmc_v6_0_mc_init()
336 adev->gmc.gart_size = 256ULL << 20; in gmc_v6_0_mc_init()
342 adev->gmc.gart_size = 1024ULL << 20; in gmc_v6_0_mc_init()
346 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; in gmc_v6_0_mc_init()
349 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size; in gmc_v6_0_mc_init()
350 gmc_v6_0_vram_gtt_location(adev, &adev->gmc); in gmc_v6_0_mc_init()
355 static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, in gmc_v6_0_flush_gpu_tlb() argument
379 static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level, in gmc_v6_0_get_vm_pde() argument
385 static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev, in gmc_v6_0_get_vm_pte() argument
393 static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev, in gmc_v6_0_set_fault_enable_default() argument
420 static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable) in gmc_v6_0_set_prt() argument
424 if (enable && !adev->gmc.prt_warning) { in gmc_v6_0_set_prt()
425 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n"); in gmc_v6_0_set_prt()
426 adev->gmc.prt_warning = true; in gmc_v6_0_set_prt()
446 uint32_t high = adev->vm_manager.max_pfn - in gmc_v6_0_set_prt()
469 static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) in gmc_v6_0_gart_enable() argument
475 if (adev->gart.bo == NULL) { in gmc_v6_0_gart_enable()
476 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); in gmc_v6_0_gart_enable()
479 r = amdgpu_gart_table_vram_pin(adev); in gmc_v6_0_gart_enable()
483 table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); in gmc_v6_0_gart_enable()
505 field = adev->vm_manager.fragment_size; in gmc_v6_0_gart_enable()
511 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12); in gmc_v6_0_gart_enable()
512 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12); in gmc_v6_0_gart_enable()
515 (u32)(adev->dummy_page_addr >> 12)); in gmc_v6_0_gart_enable()
529 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1); in gmc_v6_0_gart_enable()
545 (u32)(adev->dummy_page_addr >> 12)); in gmc_v6_0_gart_enable()
550 ((adev->vm_manager.block_size - 9) in gmc_v6_0_gart_enable()
553 gmc_v6_0_set_fault_enable_default(adev, false); in gmc_v6_0_gart_enable()
555 gmc_v6_0_set_fault_enable_default(adev, true); in gmc_v6_0_gart_enable()
557 gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0); in gmc_v6_0_gart_enable()
558 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", in gmc_v6_0_gart_enable()
559 (unsigned)(adev->gmc.gart_size >> 20), in gmc_v6_0_gart_enable()
561 adev->gart.ready = true; in gmc_v6_0_gart_enable()
565 static int gmc_v6_0_gart_init(struct amdgpu_device *adev) in gmc_v6_0_gart_init() argument
569 if (adev->gart.bo) { in gmc_v6_0_gart_init()
570 dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n"); in gmc_v6_0_gart_init()
573 r = amdgpu_gart_init(adev); in gmc_v6_0_gart_init()
576 adev->gart.table_size = adev->gart.num_gpu_pages * 8; in gmc_v6_0_gart_init()
577 adev->gart.gart_pte_flags = 0; in gmc_v6_0_gart_init()
578 return amdgpu_gart_table_vram_alloc(adev); in gmc_v6_0_gart_init()
581 static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) in gmc_v6_0_gart_disable() argument
611 amdgpu_gart_table_vram_unpin(adev); in gmc_v6_0_gart_disable()
614 static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, in gmc_v6_0_vm_decode_fault() argument
627 dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", in gmc_v6_0_vm_decode_fault()
783 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v6_0_early_init() local
785 gmc_v6_0_set_gmc_funcs(adev); in gmc_v6_0_early_init()
786 gmc_v6_0_set_irq_funcs(adev); in gmc_v6_0_early_init()
793 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v6_0_late_init() local
796 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); in gmc_v6_0_late_init()
801 static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev) in gmc_v6_0_get_vbios_fb_size() argument
820 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v6_0_sw_init() local
822 adev->num_vmhubs = 1; in gmc_v6_0_sw_init()
824 if (adev->flags & AMD_IS_APU) { in gmc_v6_0_sw_init()
825 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; in gmc_v6_0_sw_init()
829 adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp); in gmc_v6_0_sw_init()
832 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault); in gmc_v6_0_sw_init()
836 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault); in gmc_v6_0_sw_init()
840 amdgpu_vm_adjust_size(adev, 64, 9, 1, 40); in gmc_v6_0_sw_init()
842 adev->gmc.mc_mask = 0xffffffffffULL; in gmc_v6_0_sw_init()
844 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40)); in gmc_v6_0_sw_init()
846 dev_warn(adev->dev, "No suitable DMA available.\n"); in gmc_v6_0_sw_init()
849 adev->need_swiotlb = drm_need_swiotlb(40); in gmc_v6_0_sw_init()
851 r = gmc_v6_0_init_microcode(adev); in gmc_v6_0_sw_init()
853 dev_err(adev->dev, "Failed to load mc firmware!\n"); in gmc_v6_0_sw_init()
857 r = gmc_v6_0_mc_init(adev); in gmc_v6_0_sw_init()
861 amdgpu_gmc_get_vbios_allocations(adev); in gmc_v6_0_sw_init()
863 r = amdgpu_bo_init(adev); in gmc_v6_0_sw_init()
867 r = gmc_v6_0_gart_init(adev); in gmc_v6_0_sw_init()
877 adev->vm_manager.first_kfd_vmid = 8; in gmc_v6_0_sw_init()
878 amdgpu_vm_manager_init(adev); in gmc_v6_0_sw_init()
881 if (adev->flags & AMD_IS_APU) { in gmc_v6_0_sw_init()
885 adev->vm_manager.vram_base_offset = tmp; in gmc_v6_0_sw_init()
887 adev->vm_manager.vram_base_offset = 0; in gmc_v6_0_sw_init()
895 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v6_0_sw_fini() local
897 amdgpu_gem_force_release(adev); in gmc_v6_0_sw_fini()
898 amdgpu_vm_manager_fini(adev); in gmc_v6_0_sw_fini()
899 amdgpu_gart_table_vram_free(adev); in gmc_v6_0_sw_fini()
900 amdgpu_bo_fini(adev); in gmc_v6_0_sw_fini()
901 release_firmware(adev->gmc.fw); in gmc_v6_0_sw_fini()
902 adev->gmc.fw = NULL; in gmc_v6_0_sw_fini()
910 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v6_0_hw_init() local
912 gmc_v6_0_mc_program(adev); in gmc_v6_0_hw_init()
914 if (!(adev->flags & AMD_IS_APU)) { in gmc_v6_0_hw_init()
915 r = gmc_v6_0_mc_load_microcode(adev); in gmc_v6_0_hw_init()
917 dev_err(adev->dev, "Failed to load MC firmware!\n"); in gmc_v6_0_hw_init()
922 r = gmc_v6_0_gart_enable(adev); in gmc_v6_0_hw_init()
931 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v6_0_hw_fini() local
933 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); in gmc_v6_0_hw_fini()
934 gmc_v6_0_gart_disable(adev); in gmc_v6_0_hw_fini()
941 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v6_0_suspend() local
943 gmc_v6_0_hw_fini(adev); in gmc_v6_0_suspend()
951 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v6_0_resume() local
953 r = gmc_v6_0_hw_init(adev); in gmc_v6_0_resume()
957 amdgpu_vmid_reset_all(adev); in gmc_v6_0_resume()
964 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v6_0_is_idle() local
977 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v6_0_wait_for_idle() local
979 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v6_0_wait_for_idle()
990 struct amdgpu_device *adev = (struct amdgpu_device *)handle; in gmc_v6_0_soft_reset() local
1000 if (!(adev->flags & AMD_IS_APU)) in gmc_v6_0_soft_reset()
1006 gmc_v6_0_mc_stop(adev); in gmc_v6_0_soft_reset()
1007 if (gmc_v6_0_wait_for_idle(adev)) { in gmc_v6_0_soft_reset()
1008 dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); in gmc_v6_0_soft_reset()
1014 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); in gmc_v6_0_soft_reset()
1026 gmc_v6_0_mc_resume(adev); in gmc_v6_0_soft_reset()
1033 static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev, in gmc_v6_0_vm_fault_interrupt_state() argument
1070 static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev, in gmc_v6_0_process_interrupt() argument
1084 gmc_v6_0_set_fault_enable_default(adev, false); in gmc_v6_0_process_interrupt()
1087 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", in gmc_v6_0_process_interrupt()
1089 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", in gmc_v6_0_process_interrupt()
1091 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", in gmc_v6_0_process_interrupt()
1093 gmc_v6_0_vm_decode_fault(adev, status, addr, 0); in gmc_v6_0_process_interrupt()
1142 static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev) in gmc_v6_0_set_gmc_funcs() argument
1144 adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs; in gmc_v6_0_set_gmc_funcs()
1147 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev) in gmc_v6_0_set_irq_funcs() argument
1149 adev->gmc.vm_fault.num_types = 1; in gmc_v6_0_set_irq_funcs()
1150 adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs; in gmc_v6_0_set_irq_funcs()