Lines Matching refs:adev

45 static int gmc_v12_0_ecc_interrupt_state(struct amdgpu_device *adev,  in gmc_v12_0_ecc_interrupt_state()  argument
53 static int gmc_v12_0_vm_fault_interrupt_state(struct amdgpu_device *adev, in gmc_v12_0_vm_fault_interrupt_state() argument
60 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false); in gmc_v12_0_vm_fault_interrupt_state()
67 if (!adev->in_s0ix) in gmc_v12_0_vm_fault_interrupt_state()
68 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false); in gmc_v12_0_vm_fault_interrupt_state()
72 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true); in gmc_v12_0_vm_fault_interrupt_state()
79 if (!adev->in_s0ix) in gmc_v12_0_vm_fault_interrupt_state()
80 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true); in gmc_v12_0_vm_fault_interrupt_state()
89 static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev, in gmc_v12_0_process_interrupt() argument
101 hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; in gmc_v12_0_process_interrupt()
103 hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; in gmc_v12_0_process_interrupt()
105 if (!amdgpu_sriov_vf(adev)) { in gmc_v12_0_process_interrupt()
117 amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, in gmc_v12_0_process_interrupt()
124 dev_err(adev->dev, in gmc_v12_0_process_interrupt()
128 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); in gmc_v12_0_process_interrupt()
130 amdgpu_vm_print_task_info(adev, task_info); in gmc_v12_0_process_interrupt()
134 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", in gmc_v12_0_process_interrupt()
141 hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); in gmc_v12_0_process_interrupt()
157 static void gmc_v12_0_set_irq_funcs(struct amdgpu_device *adev) in gmc_v12_0_set_irq_funcs() argument
159 adev->gmc.vm_fault.num_types = 1; in gmc_v12_0_set_irq_funcs()
160 adev->gmc.vm_fault.funcs = &gmc_v12_0_irq_funcs; in gmc_v12_0_set_irq_funcs()
162 if (!amdgpu_sriov_vf(adev)) { in gmc_v12_0_set_irq_funcs()
163 adev->gmc.ecc_irq.num_types = 1; in gmc_v12_0_set_irq_funcs()
164 adev->gmc.ecc_irq.funcs = &gmc_v12_0_ecc_funcs; in gmc_v12_0_set_irq_funcs()
175 static bool gmc_v12_0_use_invalidate_semaphore(struct amdgpu_device *adev, in gmc_v12_0_use_invalidate_semaphore() argument
179 (!amdgpu_sriov_vf(adev))); in gmc_v12_0_use_invalidate_semaphore()
183 struct amdgpu_device *adev, in gmc_v12_0_get_vmid_pasid_mapping_info() argument
198 static void gmc_v12_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, in gmc_v12_0_flush_vm_hub() argument
201 bool use_semaphore = gmc_v12_0_use_invalidate_semaphore(adev, vmhub); in gmc_v12_0_flush_vm_hub()
202 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; in gmc_v12_0_flush_vm_hub()
213 spin_lock(&adev->gmc.invalidate_lock); in gmc_v12_0_flush_vm_hub()
223 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v12_0_flush_vm_hub()
232 if (i >= adev->usec_timeout) in gmc_v12_0_flush_vm_hub()
233 dev_err(adev->dev, in gmc_v12_0_flush_vm_hub()
240 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v12_0_flush_vm_hub()
262 !amdgpu_sriov_vf(adev)) { in gmc_v12_0_flush_vm_hub()
272 spin_unlock(&adev->gmc.invalidate_lock); in gmc_v12_0_flush_vm_hub()
274 if (i < adev->usec_timeout) in gmc_v12_0_flush_vm_hub()
277 dev_err(adev->dev, "Timeout waiting for VM flush ACK!\n"); in gmc_v12_0_flush_vm_hub()
290 static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, in gmc_v12_0_flush_gpu_tlb() argument
293 if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron) in gmc_v12_0_flush_gpu_tlb()
297 amdgpu_device_flush_hdp(adev, NULL); in gmc_v12_0_flush_gpu_tlb()
302 if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) && in gmc_v12_0_flush_gpu_tlb()
303 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { in gmc_v12_0_flush_gpu_tlb()
304 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; in gmc_v12_0_flush_gpu_tlb()
310 amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req, in gmc_v12_0_flush_gpu_tlb()
315 mutex_lock(&adev->mman.gtt_window_lock); in gmc_v12_0_flush_gpu_tlb()
316 gmc_v12_0_flush_vm_hub(adev, vmid, vmhub, 0); in gmc_v12_0_flush_gpu_tlb()
317 mutex_unlock(&adev->mman.gtt_window_lock); in gmc_v12_0_flush_gpu_tlb()
332 static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, in gmc_v12_0_flush_gpu_tlb_pasid() argument
342 valid = gmc_v12_0_get_vmid_pasid_mapping_info(adev, vmid, in gmc_v12_0_flush_gpu_tlb_pasid()
348 for_each_set_bit(i, adev->vmhubs_mask, in gmc_v12_0_flush_gpu_tlb_pasid()
350 gmc_v12_0_flush_gpu_tlb(adev, vmid, i, in gmc_v12_0_flush_gpu_tlb_pasid()
353 gmc_v12_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), in gmc_v12_0_flush_gpu_tlb_pasid()
362 bool use_semaphore = gmc_v12_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); in gmc_v12_0_emit_flush_gpu_tlb()
363 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; in gmc_v12_0_emit_flush_gpu_tlb()
410 struct amdgpu_device *adev = ring->adev; in gmc_v12_0_emit_pasid_mapping() local
456 static uint64_t gmc_v12_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) in gmc_v12_0_map_mtype() argument
470 static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level, in gmc_v12_0_get_vm_pde() argument
474 *addr = adev->vm_manager.vram_base_offset + *addr - in gmc_v12_0_get_vm_pde()
475 adev->gmc.vram_start; in gmc_v12_0_get_vm_pde()
478 if (!adev->gmc.translate_further) in gmc_v12_0_get_vm_pde()
492 static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev, in gmc_v12_0_get_vm_pte() argument
519 static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev) in gmc_v12_0_get_vbios_fb_size() argument
524 static unsigned int gmc_v12_0_get_dcc_alignment(struct amdgpu_device *adev) in gmc_v12_0_get_dcc_alignment() argument
528 if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 0) && in gmc_v12_0_get_dcc_alignment()
529 amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 1)) in gmc_v12_0_get_dcc_alignment()
532 max_tex_channel_caches = adev->gfx.config.max_texture_channel_caches; in gmc_v12_0_get_dcc_alignment()
553 static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev) in gmc_v12_0_set_gmc_funcs() argument
555 adev->gmc.gmc_funcs = &gmc_v12_0_gmc_funcs; in gmc_v12_0_set_gmc_funcs()
558 static void gmc_v12_0_set_umc_funcs(struct amdgpu_device *adev) in gmc_v12_0_set_umc_funcs() argument
560 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { in gmc_v12_0_set_umc_funcs()
562 adev->umc.channel_inst_num = UMC_V8_14_CHANNEL_INSTANCE_NUM; in gmc_v12_0_set_umc_funcs()
563 adev->umc.umc_inst_num = UMC_V8_14_UMC_INSTANCE_NUM(adev); in gmc_v12_0_set_umc_funcs()
564 adev->umc.node_inst_num = 0; in gmc_v12_0_set_umc_funcs()
565 adev->umc.max_ras_err_cnt_per_query = UMC_V8_14_TOTAL_CHANNEL_NUM(adev); in gmc_v12_0_set_umc_funcs()
566 adev->umc.channel_offs = UMC_V8_14_PER_CHANNEL_OFFSET; in gmc_v12_0_set_umc_funcs()
567 adev->umc.ras = &umc_v8_14_ras; in gmc_v12_0_set_umc_funcs()
575 static void gmc_v12_0_set_mmhub_funcs(struct amdgpu_device *adev) in gmc_v12_0_set_mmhub_funcs() argument
577 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { in gmc_v12_0_set_mmhub_funcs()
579 adev->mmhub.funcs = &mmhub_v4_1_0_funcs; in gmc_v12_0_set_mmhub_funcs()
586 static void gmc_v12_0_set_gfxhub_funcs(struct amdgpu_device *adev) in gmc_v12_0_set_gfxhub_funcs() argument
588 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { in gmc_v12_0_set_gfxhub_funcs()
591 adev->gfxhub.funcs = &gfxhub_v12_0_funcs; in gmc_v12_0_set_gfxhub_funcs()
600 struct amdgpu_device *adev = ip_block->adev; in gmc_v12_0_early_init() local
602 gmc_v12_0_set_gfxhub_funcs(adev); in gmc_v12_0_early_init()
603 gmc_v12_0_set_mmhub_funcs(adev); in gmc_v12_0_early_init()
604 gmc_v12_0_set_gmc_funcs(adev); in gmc_v12_0_early_init()
605 gmc_v12_0_set_irq_funcs(adev); in gmc_v12_0_early_init()
606 gmc_v12_0_set_umc_funcs(adev); in gmc_v12_0_early_init()
608 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; in gmc_v12_0_early_init()
609 adev->gmc.shared_aperture_end = in gmc_v12_0_early_init()
610 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; in gmc_v12_0_early_init()
611 adev->gmc.private_aperture_start = 0x1000000000000000ULL; in gmc_v12_0_early_init()
612 adev->gmc.private_aperture_end = in gmc_v12_0_early_init()
613 adev->gmc.private_aperture_start + (4ULL << 30) - 1; in gmc_v12_0_early_init()
620 struct amdgpu_device *adev = ip_block->adev; in gmc_v12_0_late_init() local
623 r = amdgpu_gmc_allocate_vm_inv_eng(adev); in gmc_v12_0_late_init()
627 r = amdgpu_gmc_ras_late_init(adev); in gmc_v12_0_late_init()
631 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); in gmc_v12_0_late_init()
634 static void gmc_v12_0_vram_gtt_location(struct amdgpu_device *adev, in gmc_v12_0_vram_gtt_location() argument
639 base = adev->mmhub.funcs->get_fb_location(adev); in gmc_v12_0_vram_gtt_location()
641 amdgpu_gmc_set_agp_default(adev, mc); in gmc_v12_0_vram_gtt_location()
642 amdgpu_gmc_vram_location(adev, &adev->gmc, base); in gmc_v12_0_vram_gtt_location()
643 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_LOW); in gmc_v12_0_vram_gtt_location()
644 if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1)) in gmc_v12_0_vram_gtt_location()
645 amdgpu_gmc_agp_location(adev, mc); in gmc_v12_0_vram_gtt_location()
648 if (amdgpu_sriov_vf(adev)) in gmc_v12_0_vram_gtt_location()
649 adev->vm_manager.vram_base_offset = 0; in gmc_v12_0_vram_gtt_location()
651 adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev); in gmc_v12_0_vram_gtt_location()
663 static int gmc_v12_0_mc_init(struct amdgpu_device *adev) in gmc_v12_0_mc_init() argument
668 adev->gmc.mc_vram_size = in gmc_v12_0_mc_init()
669 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; in gmc_v12_0_mc_init()
670 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; in gmc_v12_0_mc_init()
672 if (!(adev->flags & AMD_IS_APU)) { in gmc_v12_0_mc_init()
673 r = amdgpu_device_resize_fb_bar(adev); in gmc_v12_0_mc_init()
678 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); in gmc_v12_0_mc_init()
679 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); in gmc_v12_0_mc_init()
682 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { in gmc_v12_0_mc_init()
683 adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev); in gmc_v12_0_mc_init()
684 adev->gmc.aper_size = adev->gmc.real_vram_size; in gmc_v12_0_mc_init()
688 adev->gmc.visible_vram_size = adev->gmc.aper_size; in gmc_v12_0_mc_init()
689 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) in gmc_v12_0_mc_init()
690 adev->gmc.visible_vram_size = adev->gmc.real_vram_size; in gmc_v12_0_mc_init()
694 adev->gmc.gart_size = 512ULL << 20; in gmc_v12_0_mc_init()
696 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; in gmc_v12_0_mc_init()
698 gmc_v12_0_vram_gtt_location(adev, &adev->gmc); in gmc_v12_0_mc_init()
703 static int gmc_v12_0_gart_init(struct amdgpu_device *adev) in gmc_v12_0_gart_init() argument
707 if (adev->gart.bo) { in gmc_v12_0_gart_init()
713 r = amdgpu_gart_init(adev); in gmc_v12_0_gart_init()
717 adev->gart.table_size = adev->gart.num_gpu_pages * 8; in gmc_v12_0_gart_init()
718 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC) | in gmc_v12_0_gart_init()
722 return amdgpu_gart_table_vram_alloc(adev); in gmc_v12_0_gart_init()
728 struct amdgpu_device *adev = ip_block->adev; in gmc_v12_0_sw_init() local
730 adev->mmhub.funcs->init(adev); in gmc_v12_0_sw_init()
732 adev->gfxhub.funcs->init(adev); in gmc_v12_0_sw_init()
734 spin_lock_init(&adev->gmc.invalidate_lock); in gmc_v12_0_sw_init()
736 r = amdgpu_atomfirmware_get_vram_info(adev, in gmc_v12_0_sw_init()
738 adev->gmc.vram_width = vram_width; in gmc_v12_0_sw_init()
740 adev->gmc.vram_type = vram_type; in gmc_v12_0_sw_init()
741 adev->gmc.vram_vendor = vram_vendor; in gmc_v12_0_sw_init()
743 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { in gmc_v12_0_sw_init()
746 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); in gmc_v12_0_sw_init()
747 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); in gmc_v12_0_sw_init()
753 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); in gmc_v12_0_sw_init()
760 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC, in gmc_v12_0_sw_init()
762 &adev->gmc.vm_fault); in gmc_v12_0_sw_init()
767 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, in gmc_v12_0_sw_init()
769 &adev->gmc.vm_fault); in gmc_v12_0_sw_init()
773 if (!amdgpu_sriov_vf(adev)) { in gmc_v12_0_sw_init()
775 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0, in gmc_v12_0_sw_init()
776 &adev->gmc.ecc_irq); in gmc_v12_0_sw_init()
785 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ in gmc_v12_0_sw_init()
787 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); in gmc_v12_0_sw_init()
793 adev->need_swiotlb = drm_need_swiotlb(44); in gmc_v12_0_sw_init()
795 r = gmc_v12_0_mc_init(adev); in gmc_v12_0_sw_init()
799 amdgpu_gmc_get_vbios_allocations(adev); in gmc_v12_0_sw_init()
802 r = amdgpu_bo_init(adev); in gmc_v12_0_sw_init()
806 r = gmc_v12_0_gart_init(adev); in gmc_v12_0_sw_init()
816 adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8; in gmc_v12_0_sw_init()
818 amdgpu_vm_manager_init(adev); in gmc_v12_0_sw_init()
820 r = amdgpu_gmc_ras_sw_init(adev); in gmc_v12_0_sw_init()
834 static void gmc_v12_0_gart_fini(struct amdgpu_device *adev) in gmc_v12_0_gart_fini() argument
836 amdgpu_gart_table_vram_free(adev); in gmc_v12_0_gart_fini()
841 struct amdgpu_device *adev = ip_block->adev; in gmc_v12_0_sw_fini() local
843 amdgpu_vm_manager_fini(adev); in gmc_v12_0_sw_fini()
844 gmc_v12_0_gart_fini(adev); in gmc_v12_0_sw_fini()
845 amdgpu_gem_force_release(adev); in gmc_v12_0_sw_fini()
846 amdgpu_bo_fini(adev); in gmc_v12_0_sw_fini()
851 static void gmc_v12_0_init_golden_registers(struct amdgpu_device *adev) in gmc_v12_0_init_golden_registers() argument
860 static int gmc_v12_0_gart_enable(struct amdgpu_device *adev) in gmc_v12_0_gart_enable() argument
865 if (adev->gart.bo == NULL) { in gmc_v12_0_gart_enable()
866 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); in gmc_v12_0_gart_enable()
870 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); in gmc_v12_0_gart_enable()
872 r = adev->mmhub.funcs->gart_enable(adev); in gmc_v12_0_gart_enable()
877 amdgpu_device_flush_hdp(adev, NULL); in gmc_v12_0_gart_enable()
882 adev->mmhub.funcs->set_fault_enable_default(adev, value); in gmc_v12_0_gart_enable()
883 gmc_v12_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); in gmc_v12_0_gart_enable()
885 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", in gmc_v12_0_gart_enable()
886 (unsigned)(adev->gmc.gart_size >> 20), in gmc_v12_0_gart_enable()
887 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); in gmc_v12_0_gart_enable()
895 struct amdgpu_device *adev = ip_block->adev; in gmc_v12_0_hw_init() local
898 gmc_v12_0_init_golden_registers(adev); in gmc_v12_0_hw_init()
900 r = gmc_v12_0_gart_enable(adev); in gmc_v12_0_hw_init()
904 if (adev->umc.funcs && adev->umc.funcs->init_registers) in gmc_v12_0_hw_init()
905 adev->umc.funcs->init_registers(adev); in gmc_v12_0_hw_init()
917 static void gmc_v12_0_gart_disable(struct amdgpu_device *adev) in gmc_v12_0_gart_disable() argument
919 adev->mmhub.funcs->gart_disable(adev); in gmc_v12_0_gart_disable()
924 struct amdgpu_device *adev = ip_block->adev; in gmc_v12_0_hw_fini() local
926 if (amdgpu_sriov_vf(adev)) { in gmc_v12_0_hw_fini()
932 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); in gmc_v12_0_hw_fini()
934 if (adev->gmc.ecc_irq.funcs && in gmc_v12_0_hw_fini()
935 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) in gmc_v12_0_hw_fini()
936 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); in gmc_v12_0_hw_fini()
938 gmc_v12_0_gart_disable(adev); in gmc_v12_0_hw_fini()
958 amdgpu_vmid_reset_all(ip_block->adev); in gmc_v12_0_resume()
979 struct amdgpu_device *adev = ip_block->adev; in gmc_v12_0_set_clockgating_state() local
981 r = adev->mmhub.funcs->set_clockgating(adev, state); in gmc_v12_0_set_clockgating_state()
985 return athub_v4_1_0_set_clockgating(adev, state); in gmc_v12_0_set_clockgating_state()
990 struct amdgpu_device *adev = ip_block->adev; in gmc_v12_0_get_clockgating_state() local
992 adev->mmhub.funcs->get_clockgating(adev, flags); in gmc_v12_0_get_clockgating_state()
994 athub_v4_1_0_get_clockgating(adev, flags); in gmc_v12_0_get_clockgating_state()