Lines Matching refs:pfdev

71 static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)  in wait_ready()  argument
78 ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr), in wait_ready()
83 panfrost_device_schedule_reset(pfdev); in wait_ready()
84 dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n"); in wait_ready()
90 static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd) in write_cmd() argument
95 status = wait_ready(pfdev, as_nr); in write_cmd()
97 mmu_write(pfdev, AS_COMMAND(as_nr), cmd); in write_cmd()
102 static void lock_region(struct panfrost_device *pfdev, u32 as_nr, in lock_region() argument
132 mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region)); in lock_region()
133 mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region)); in lock_region()
134 write_cmd(pfdev, as_nr, AS_COMMAND_LOCK); in lock_region()
138 static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr, in mmu_hw_do_operation_locked() argument
145 lock_region(pfdev, as_nr, iova, size); in mmu_hw_do_operation_locked()
148 write_cmd(pfdev, as_nr, op); in mmu_hw_do_operation_locked()
151 return wait_ready(pfdev, as_nr); in mmu_hw_do_operation_locked()
154 static int mmu_hw_do_operation(struct panfrost_device *pfdev, in mmu_hw_do_operation() argument
160 spin_lock(&pfdev->as_lock); in mmu_hw_do_operation()
161 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op); in mmu_hw_do_operation()
162 spin_unlock(&pfdev->as_lock); in mmu_hw_do_operation()
166 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_enable() argument
173 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM); in panfrost_mmu_enable()
175 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab)); in panfrost_mmu_enable()
176 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab)); in panfrost_mmu_enable()
181 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr)); in panfrost_mmu_enable()
182 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr)); in panfrost_mmu_enable()
184 mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg)); in panfrost_mmu_enable()
185 mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg)); in panfrost_mmu_enable()
187 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE); in panfrost_mmu_enable()
190 static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr) in panfrost_mmu_disable() argument
192 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM); in panfrost_mmu_disable()
194 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0); in panfrost_mmu_disable()
195 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0); in panfrost_mmu_disable()
197 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0); in panfrost_mmu_disable()
198 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0); in panfrost_mmu_disable()
200 mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED); in panfrost_mmu_disable()
201 mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), 0); in panfrost_mmu_disable()
203 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE); in panfrost_mmu_disable()
223 struct panfrost_device *pfdev = mmu->pfdev; in mmu_cfg_init_aarch64_4k() local
225 if (drm_WARN_ON(pfdev->ddev, pgtbl_cfg->arm_lpae_s1_cfg.ttbr & in mmu_cfg_init_aarch64_4k()
247 struct panfrost_device *pfdev = mmu->pfdev; in panfrost_mmu_cfg_init() local
256 drm_WARN(pfdev->ddev, 1, "Invalid pgtable format"); in panfrost_mmu_cfg_init()
261 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_as_get() argument
265 spin_lock(&pfdev->as_lock); in panfrost_mmu_as_get()
278 list_move(&mmu->list, &pfdev->as_lru_list); in panfrost_mmu_as_get()
280 if (pfdev->as_faulty_mask & mask) { in panfrost_mmu_as_get()
285 mmu_write(pfdev, MMU_INT_CLEAR, mask); in panfrost_mmu_as_get()
286 mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask); in panfrost_mmu_as_get()
287 pfdev->as_faulty_mask &= ~mask; in panfrost_mmu_as_get()
288 panfrost_mmu_enable(pfdev, mmu); in panfrost_mmu_as_get()
295 as = ffz(pfdev->as_alloc_mask); in panfrost_mmu_as_get()
296 if (!(BIT(as) & pfdev->features.as_present)) { in panfrost_mmu_as_get()
299 list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) { in panfrost_mmu_as_get()
303 WARN_ON(&lru_mmu->list == &pfdev->as_lru_list); in panfrost_mmu_as_get()
314 set_bit(as, &pfdev->as_alloc_mask); in panfrost_mmu_as_get()
316 list_add(&mmu->list, &pfdev->as_lru_list); in panfrost_mmu_as_get()
318 dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask); in panfrost_mmu_as_get()
320 panfrost_mmu_enable(pfdev, mmu); in panfrost_mmu_as_get()
323 spin_unlock(&pfdev->as_lock); in panfrost_mmu_as_get()
327 void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_as_put() argument
333 void panfrost_mmu_reset(struct panfrost_device *pfdev) in panfrost_mmu_reset() argument
337 clear_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended); in panfrost_mmu_reset()
339 spin_lock(&pfdev->as_lock); in panfrost_mmu_reset()
341 pfdev->as_alloc_mask = 0; in panfrost_mmu_reset()
342 pfdev->as_faulty_mask = 0; in panfrost_mmu_reset()
344 list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) { in panfrost_mmu_reset()
350 spin_unlock(&pfdev->as_lock); in panfrost_mmu_reset()
352 mmu_write(pfdev, MMU_INT_CLEAR, ~0); in panfrost_mmu_reset()
353 mmu_write(pfdev, MMU_INT_MASK, ~0); in panfrost_mmu_reset()
377 static void panfrost_mmu_flush_range(struct panfrost_device *pfdev, in panfrost_mmu_flush_range() argument
384 pm_runtime_get_noresume(pfdev->dev); in panfrost_mmu_flush_range()
387 if (pm_runtime_active(pfdev->dev)) in panfrost_mmu_flush_range()
388 mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT); in panfrost_mmu_flush_range()
390 pm_runtime_put_autosuspend(pfdev->dev); in panfrost_mmu_flush_range()
393 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, in mmu_map_sg() argument
405 dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len); in mmu_map_sg()
421 panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova); in mmu_map_sg()
431 struct panfrost_device *pfdev = to_panfrost_device(obj->dev); in panfrost_mmu_map() local
445 mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT, in panfrost_mmu_map()
456 struct panfrost_device *pfdev = to_panfrost_device(obj->dev); in panfrost_mmu_unmap() local
465 dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", in panfrost_mmu_unmap()
482 panfrost_mmu_flush_range(pfdev, mapping->mmu, in panfrost_mmu_unmap()
508 addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr) in addr_to_mapping() argument
515 spin_lock(&pfdev->as_lock); in addr_to_mapping()
516 list_for_each_entry(mmu, &pfdev->as_lru_list, list) { in addr_to_mapping()
538 spin_unlock(&pfdev->as_lock); in addr_to_mapping()
544 static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, in panfrost_mmu_map_fault_addr() argument
556 bomapping = addr_to_mapping(pfdev, as, addr); in panfrost_mmu_map_fault_addr()
562 dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", in panfrost_mmu_map_fault_addr()
629 ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0); in panfrost_mmu_map_fault_addr()
633 mmu_map_sg(pfdev, bomapping->mmu, addr, in panfrost_mmu_map_fault_addr()
639 dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); in panfrost_mmu_map_fault_addr()
661 struct panfrost_device *pfdev = mmu->pfdev; in panfrost_mmu_release_ctx() local
663 spin_lock(&pfdev->as_lock); in panfrost_mmu_release_ctx()
665 pm_runtime_get_noresume(pfdev->dev); in panfrost_mmu_release_ctx()
666 if (pm_runtime_active(pfdev->dev)) in panfrost_mmu_release_ctx()
667 panfrost_mmu_disable(pfdev, mmu->as); in panfrost_mmu_release_ctx()
668 pm_runtime_put_autosuspend(pfdev->dev); in panfrost_mmu_release_ctx()
670 clear_bit(mmu->as, &pfdev->as_alloc_mask); in panfrost_mmu_release_ctx()
671 clear_bit(mmu->as, &pfdev->as_in_use_mask); in panfrost_mmu_release_ctx()
674 spin_unlock(&pfdev->as_lock); in panfrost_mmu_release_ctx()
719 struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev) in panfrost_mmu_ctx_create() argument
721 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(pfdev->features.mmu_features); in panfrost_mmu_ctx_create()
722 u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(pfdev->features.mmu_features); in panfrost_mmu_ctx_create()
727 if (pfdev->comp->gpu_quirks & BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE)) { in panfrost_mmu_ctx_create()
728 if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU)) { in panfrost_mmu_ctx_create()
729 dev_err_once(pfdev->dev, in panfrost_mmu_ctx_create()
742 mmu->pfdev = pfdev; in panfrost_mmu_ctx_create()
756 .coherent_walk = pfdev->coherent, in panfrost_mmu_ctx_create()
758 .iommu_dev = pfdev->dev, in panfrost_mmu_ctx_create()
783 static const char *access_type_name(struct panfrost_device *pfdev, in access_type_name() argument
788 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU)) in access_type_name()
806 struct panfrost_device *pfdev = data; in panfrost_mmu_irq_handler() local
808 if (test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) in panfrost_mmu_irq_handler()
811 if (!mmu_read(pfdev, MMU_INT_STAT)) in panfrost_mmu_irq_handler()
814 mmu_write(pfdev, MMU_INT_MASK, 0); in panfrost_mmu_irq_handler()
820 struct panfrost_device *pfdev = data; in panfrost_mmu_irq_handler_thread() local
821 u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT); in panfrost_mmu_irq_handler_thread()
833 fault_status = mmu_read(pfdev, AS_FAULTSTATUS(as)); in panfrost_mmu_irq_handler_thread()
834 addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(as)); in panfrost_mmu_irq_handler_thread()
835 addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(as)) << 32; in panfrost_mmu_irq_handler_thread()
842 mmu_write(pfdev, MMU_INT_CLEAR, mask); in panfrost_mmu_irq_handler_thread()
847 ret = panfrost_mmu_map_fault_addr(pfdev, as, addr); in panfrost_mmu_irq_handler_thread()
851 dev_err(pfdev->dev, in panfrost_mmu_irq_handler_thread()
864 access_type, access_type_name(pfdev, fault_status), in panfrost_mmu_irq_handler_thread()
867 spin_lock(&pfdev->as_lock); in panfrost_mmu_irq_handler_thread()
871 pfdev->as_faulty_mask |= mask; in panfrost_mmu_irq_handler_thread()
874 panfrost_mmu_disable(pfdev, as); in panfrost_mmu_irq_handler_thread()
875 spin_unlock(&pfdev->as_lock); in panfrost_mmu_irq_handler_thread()
882 status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask; in panfrost_mmu_irq_handler_thread()
886 if (!test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) { in panfrost_mmu_irq_handler_thread()
887 spin_lock(&pfdev->as_lock); in panfrost_mmu_irq_handler_thread()
888 mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask); in panfrost_mmu_irq_handler_thread()
889 spin_unlock(&pfdev->as_lock); in panfrost_mmu_irq_handler_thread()
895 int panfrost_mmu_init(struct panfrost_device *pfdev) in panfrost_mmu_init() argument
899 pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu"); in panfrost_mmu_init()
900 if (pfdev->mmu_irq < 0) in panfrost_mmu_init()
901 return pfdev->mmu_irq; in panfrost_mmu_init()
903 err = devm_request_threaded_irq(pfdev->dev, pfdev->mmu_irq, in panfrost_mmu_init()
907 pfdev); in panfrost_mmu_init()
910 dev_err(pfdev->dev, "failed to request mmu irq"); in panfrost_mmu_init()
917 void panfrost_mmu_fini(struct panfrost_device *pfdev) in panfrost_mmu_fini() argument
919 mmu_write(pfdev, MMU_INT_MASK, 0); in panfrost_mmu_fini()
922 void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev) in panfrost_mmu_suspend_irq() argument
924 set_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended); in panfrost_mmu_suspend_irq()
926 mmu_write(pfdev, MMU_INT_MASK, 0); in panfrost_mmu_suspend_irq()
927 synchronize_irq(pfdev->mmu_irq); in panfrost_mmu_suspend_irq()