Lines Matching refs:ptdev
262 struct panthor_device *ptdev; member
445 drm_WARN_ON(&vm->ptdev->base, vm->op_ctx); in alloc_pt()
446 p = alloc_pages_node(dev_to_node(vm->ptdev->base.dev), in alloc_pt()
456 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K)) in alloc_pt()
462 if (drm_WARN_ON(&vm->ptdev->base, !vm->op_ctx) || in alloc_pt()
463 drm_WARN_ON(&vm->ptdev->base, in alloc_pt()
498 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K)) in free_pt()
505 static int wait_ready(struct panthor_device *ptdev, u32 as_nr) in wait_ready() argument
513 ret = gpu_read_relaxed_poll_timeout_atomic(ptdev, AS_STATUS(as_nr), val, in wait_ready()
518 panthor_device_schedule_reset(ptdev); in wait_ready()
519 drm_err(&ptdev->base, "AS_ACTIVE bit stuck\n"); in wait_ready()
525 static int write_cmd(struct panthor_device *ptdev, u32 as_nr, u32 cmd) in write_cmd() argument
530 status = wait_ready(ptdev, as_nr); in write_cmd()
532 gpu_write(ptdev, AS_COMMAND(as_nr), cmd); in write_cmd()
537 static void lock_region(struct panthor_device *ptdev, u32 as_nr, in lock_region() argument
567 gpu_write64(ptdev, AS_LOCKADDR(as_nr), region); in lock_region()
568 write_cmd(ptdev, as_nr, AS_COMMAND_LOCK); in lock_region()
571 static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr, in mmu_hw_do_operation_locked() argument
574 lockdep_assert_held(&ptdev->mmu->as.slots_lock); in mmu_hw_do_operation_locked()
586 lock_region(ptdev, as_nr, iova, size); in mmu_hw_do_operation_locked()
589 write_cmd(ptdev, as_nr, op); in mmu_hw_do_operation_locked()
592 return wait_ready(ptdev, as_nr); in mmu_hw_do_operation_locked()
598 struct panthor_device *ptdev = vm->ptdev; in mmu_hw_do_operation() local
601 mutex_lock(&ptdev->mmu->as.slots_lock); in mmu_hw_do_operation()
602 ret = mmu_hw_do_operation_locked(ptdev, vm->as.id, iova, size, op); in mmu_hw_do_operation()
603 mutex_unlock(&ptdev->mmu->as.slots_lock); in mmu_hw_do_operation()
608 static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr, in panthor_mmu_as_enable() argument
613 ret = mmu_hw_do_operation_locked(ptdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM); in panthor_mmu_as_enable()
617 gpu_write64(ptdev, AS_TRANSTAB(as_nr), transtab); in panthor_mmu_as_enable()
618 gpu_write64(ptdev, AS_MEMATTR(as_nr), memattr); in panthor_mmu_as_enable()
619 gpu_write64(ptdev, AS_TRANSCFG(as_nr), transcfg); in panthor_mmu_as_enable()
621 return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE); in panthor_mmu_as_enable()
624 static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr) in panthor_mmu_as_disable() argument
628 ret = mmu_hw_do_operation_locked(ptdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM); in panthor_mmu_as_disable()
632 gpu_write64(ptdev, AS_TRANSTAB(as_nr), 0); in panthor_mmu_as_disable()
633 gpu_write64(ptdev, AS_MEMATTR(as_nr), 0); in panthor_mmu_as_disable()
634 gpu_write64(ptdev, AS_TRANSCFG(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED); in panthor_mmu_as_disable()
636 return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE); in panthor_mmu_as_disable()
639 static u32 panthor_mmu_fault_mask(struct panthor_device *ptdev, u32 value) in panthor_mmu_fault_mask() argument
645 static u32 panthor_mmu_as_fault_mask(struct panthor_device *ptdev, u32 as) in panthor_mmu_as_fault_mask() argument
674 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_release_as_locked() local
676 lockdep_assert_held(&ptdev->mmu->as.slots_lock); in panthor_vm_release_as_locked()
678 if (drm_WARN_ON(&ptdev->base, vm->as.id < 0)) in panthor_vm_release_as_locked()
681 ptdev->mmu->as.slots[vm->as.id].vm = NULL; in panthor_vm_release_as_locked()
682 clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask); in panthor_vm_release_as_locked()
698 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_active() local
699 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features); in panthor_vm_active()
704 if (!drm_dev_enter(&ptdev->base, &cookie)) in panthor_vm_active()
710 mutex_lock(&ptdev->mmu->as.slots_lock); in panthor_vm_active()
720 if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) in panthor_vm_active()
728 drm_WARN_ON(&ptdev->base, ptdev->mmu->as.alloc_mask & BIT(0)); in panthor_vm_active()
731 as = ffz(ptdev->mmu->as.alloc_mask | BIT(0)); in panthor_vm_active()
734 if (!(BIT(as) & ptdev->gpu_info.as_present)) { in panthor_vm_active()
737 lru_vm = list_first_entry_or_null(&ptdev->mmu->as.lru_list, in panthor_vm_active()
740 if (drm_WARN_ON(&ptdev->base, !lru_vm)) { in panthor_vm_active()
745 drm_WARN_ON(&ptdev->base, refcount_read(&lru_vm->as.active_cnt)); in panthor_vm_active()
752 set_bit(as, &ptdev->mmu->as.alloc_mask); in panthor_vm_active()
753 ptdev->mmu->as.slots[as].vm = vm; in panthor_vm_active()
761 if (ptdev->coherent) in panthor_vm_active()
770 if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) { in panthor_vm_active()
771 gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as)); in panthor_vm_active()
772 ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as); in panthor_vm_active()
773 ptdev->mmu->irq.mask |= panthor_mmu_as_fault_mask(ptdev, as); in panthor_vm_active()
774 gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask); in panthor_vm_active()
777 ret = panthor_mmu_as_enable(vm->ptdev, vm->as.id, transtab, transcfg, vm->memattr); in panthor_vm_active()
786 mutex_unlock(&ptdev->mmu->as.slots_lock); in panthor_vm_active()
808 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_idle() local
810 if (!refcount_dec_and_mutex_lock(&vm->as.active_cnt, &ptdev->mmu->as.slots_lock)) in panthor_vm_idle()
813 if (!drm_WARN_ON(&ptdev->base, vm->as.id == -1 || !list_empty(&vm->as.lru_node))) in panthor_vm_idle()
814 list_add_tail(&vm->as.lru_node, &ptdev->mmu->as.lru_list); in panthor_vm_idle()
817 mutex_unlock(&ptdev->mmu->as.slots_lock); in panthor_vm_idle()
872 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_flush_range() local
879 if (!drm_dev_enter(&ptdev->base, &cookie)) in panthor_vm_flush_range()
890 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_unmap_pages() local
894 drm_dbg(&ptdev->base, "unmap: as=%d, iova=%llx, len=%llx", vm->as.id, iova, size); in panthor_vm_unmap_pages()
902 if (drm_WARN_ON(&ptdev->base, unmapped_sz != pgsize * pgcount)) { in panthor_vm_unmap_pages()
903 drm_err(&ptdev->base, "failed to unmap range %llx-%llx (requested range %llx-%llx)\n", in panthor_vm_unmap_pages()
920 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_map_pages() local
944 drm_dbg(&ptdev->base, "map: as=%d, iova=%llx, paddr=%pad, len=%zx", in panthor_vm_map_pages()
957 if (drm_WARN_ON(&ptdev->base, !ret && !mapped)) in panthor_vm_map_pages()
964 drm_WARN_ON(&ptdev->base, in panthor_vm_map_pages()
1446 panthor_vm_create_check_args(const struct panthor_device *ptdev, in panthor_vm_create_check_args() argument
1450 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features); in panthor_vm_create_check_args()
1481 int panthor_vm_pool_create_vm(struct panthor_device *ptdev, in panthor_vm_pool_create_vm() argument
1490 ret = panthor_vm_create_check_args(ptdev, args, &kernel_va_start, &kernel_va_range); in panthor_vm_pool_create_vm()
1494 vm = panthor_vm_create(ptdev, false, kernel_va_start, kernel_va_range, in panthor_vm_pool_create_vm()
1523 drm_WARN_ON(&vm->ptdev->base, in panthor_vm_destroy()
1628 static const char *access_type_name(struct panthor_device *ptdev, in access_type_name() argument
1641 drm_WARN_ON(&ptdev->base, 1); in access_type_name()
1646 static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status) in panthor_mmu_irq_handler() argument
1650 status = panthor_mmu_fault_mask(ptdev, status); in panthor_mmu_irq_handler()
1653 u32 mask = panthor_mmu_as_fault_mask(ptdev, as); in panthor_mmu_irq_handler()
1661 fault_status = gpu_read(ptdev, AS_FAULTSTATUS(as)); in panthor_mmu_irq_handler()
1662 addr = gpu_read64(ptdev, AS_FAULTADDRESS(as)); in panthor_mmu_irq_handler()
1669 mutex_lock(&ptdev->mmu->as.slots_lock); in panthor_mmu_irq_handler()
1671 ptdev->mmu->as.faulty_mask |= mask; in panthor_mmu_irq_handler()
1673 panthor_mmu_fault_mask(ptdev, ~ptdev->mmu->as.faulty_mask); in panthor_mmu_irq_handler()
1676 drm_err(&ptdev->base, in panthor_mmu_irq_handler()
1686 exception_type, panthor_exception_name(ptdev, exception_type), in panthor_mmu_irq_handler()
1687 access_type, access_type_name(ptdev, fault_status), in panthor_mmu_irq_handler()
1695 gpu_write(ptdev, MMU_INT_CLEAR, mask); in panthor_mmu_irq_handler()
1700 ptdev->mmu->irq.mask = new_int_mask; in panthor_mmu_irq_handler()
1702 if (ptdev->mmu->as.slots[as].vm) in panthor_mmu_irq_handler()
1703 ptdev->mmu->as.slots[as].vm->unhandled_fault = true; in panthor_mmu_irq_handler()
1706 panthor_mmu_as_disable(ptdev, as); in panthor_mmu_irq_handler()
1707 mutex_unlock(&ptdev->mmu->as.slots_lock); in panthor_mmu_irq_handler()
1714 panthor_sched_report_mmu_fault(ptdev); in panthor_mmu_irq_handler()
1728 void panthor_mmu_suspend(struct panthor_device *ptdev) in panthor_mmu_suspend() argument
1730 mutex_lock(&ptdev->mmu->as.slots_lock); in panthor_mmu_suspend()
1731 for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) { in panthor_mmu_suspend()
1732 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; in panthor_mmu_suspend()
1735 drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i)); in panthor_mmu_suspend()
1739 mutex_unlock(&ptdev->mmu->as.slots_lock); in panthor_mmu_suspend()
1741 panthor_mmu_irq_suspend(&ptdev->mmu->irq); in panthor_mmu_suspend()
1753 void panthor_mmu_resume(struct panthor_device *ptdev) in panthor_mmu_resume() argument
1755 mutex_lock(&ptdev->mmu->as.slots_lock); in panthor_mmu_resume()
1756 ptdev->mmu->as.alloc_mask = 0; in panthor_mmu_resume()
1757 ptdev->mmu->as.faulty_mask = 0; in panthor_mmu_resume()
1758 mutex_unlock(&ptdev->mmu->as.slots_lock); in panthor_mmu_resume()
1760 panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0)); in panthor_mmu_resume()
1773 void panthor_mmu_pre_reset(struct panthor_device *ptdev) in panthor_mmu_pre_reset() argument
1777 panthor_mmu_irq_suspend(&ptdev->mmu->irq); in panthor_mmu_pre_reset()
1779 mutex_lock(&ptdev->mmu->vm.lock); in panthor_mmu_pre_reset()
1780 ptdev->mmu->vm.reset_in_progress = true; in panthor_mmu_pre_reset()
1781 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) in panthor_mmu_pre_reset()
1783 mutex_unlock(&ptdev->mmu->vm.lock); in panthor_mmu_pre_reset()
1793 void panthor_mmu_post_reset(struct panthor_device *ptdev) in panthor_mmu_post_reset() argument
1797 mutex_lock(&ptdev->mmu->as.slots_lock); in panthor_mmu_post_reset()
1802 ptdev->mmu->as.alloc_mask = 0; in panthor_mmu_post_reset()
1803 ptdev->mmu->as.faulty_mask = 0; in panthor_mmu_post_reset()
1805 for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) { in panthor_mmu_post_reset()
1806 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; in panthor_mmu_post_reset()
1812 mutex_unlock(&ptdev->mmu->as.slots_lock); in panthor_mmu_post_reset()
1814 panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0)); in panthor_mmu_post_reset()
1817 mutex_lock(&ptdev->mmu->vm.lock); in panthor_mmu_post_reset()
1818 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) { in panthor_mmu_post_reset()
1821 ptdev->mmu->vm.reset_in_progress = false; in panthor_mmu_post_reset()
1822 mutex_unlock(&ptdev->mmu->vm.lock); in panthor_mmu_post_reset()
1828 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_free() local
1831 if (drm_WARN_ON(&ptdev->base, vm->heaps.pool)) in panthor_vm_free()
1836 mutex_lock(&ptdev->mmu->vm.lock); in panthor_vm_free()
1843 if (ptdev->mmu->vm.reset_in_progress) in panthor_vm_free()
1845 mutex_unlock(&ptdev->mmu->vm.lock); in panthor_vm_free()
1850 mutex_lock(&ptdev->mmu->as.slots_lock); in panthor_vm_free()
1854 if (drm_dev_enter(&ptdev->base, &cookie)) { in panthor_vm_free()
1855 panthor_mmu_as_disable(ptdev, vm->as.id); in panthor_vm_free()
1859 ptdev->mmu->as.slots[vm->as.id].vm = NULL; in panthor_vm_free()
1860 clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask); in panthor_vm_free()
1863 mutex_unlock(&ptdev->mmu->as.slots_lock); in panthor_vm_free()
1917 pool = panthor_heap_pool_create(vm->ptdev, vm); in panthor_vm_get_heap_pool()
2008 drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo)); in panthor_vma_link()
2125 if (drm_WARN_ON(&vm->ptdev->base, ret)) in panthor_gpuva_sm_step_unmap()
2283 panthor_vm_create(struct panthor_device *ptdev, bool for_mcu, in panthor_vm_create() argument
2287 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features); in panthor_vm_create()
2288 u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features); in panthor_vm_create()
2294 .submit_wq = ptdev->mmu->vm.wq, in panthor_vm_create()
2300 .dev = ptdev->base.dev, in panthor_vm_create()
2312 dummy_gem = drm_gpuvm_resv_object_alloc(&ptdev->base); in panthor_vm_create()
2320 vm->ptdev = ptdev; in panthor_vm_create()
2346 .coherent_walk = ptdev->coherent, in panthor_vm_create()
2348 .iommu_dev = ptdev->base.dev, in panthor_vm_create()
2369 vm->memattr = mair_to_memattr(mair, ptdev->coherent); in panthor_vm_create()
2371 mutex_lock(&ptdev->mmu->vm.lock); in panthor_vm_create()
2372 list_add_tail(&vm->node, &ptdev->mmu->vm.list); in panthor_vm_create()
2375 if (ptdev->mmu->vm.reset_in_progress) in panthor_vm_create()
2377 mutex_unlock(&ptdev->mmu->vm.lock); in panthor_vm_create()
2383 DRM_GPUVM_RESV_PROTECTED, &ptdev->base, dummy_gem, in panthor_vm_create()
2693 void panthor_mmu_unplug(struct panthor_device *ptdev) in panthor_mmu_unplug() argument
2695 if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev)) in panthor_mmu_unplug()
2696 panthor_mmu_irq_suspend(&ptdev->mmu->irq); in panthor_mmu_unplug()
2698 mutex_lock(&ptdev->mmu->as.slots_lock); in panthor_mmu_unplug()
2699 for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) { in panthor_mmu_unplug()
2700 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; in panthor_mmu_unplug()
2703 drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i)); in panthor_mmu_unplug()
2707 mutex_unlock(&ptdev->mmu->as.slots_lock); in panthor_mmu_unplug()
2721 int panthor_mmu_init(struct panthor_device *ptdev) in panthor_mmu_init() argument
2723 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features); in panthor_mmu_init()
2727 mmu = drmm_kzalloc(&ptdev->base, sizeof(*mmu), GFP_KERNEL); in panthor_mmu_init()
2733 ret = drmm_mutex_init(&ptdev->base, &mmu->as.slots_lock); in panthor_mmu_init()
2738 ret = drmm_mutex_init(&ptdev->base, &mmu->vm.lock); in panthor_mmu_init()
2742 ptdev->mmu = mmu; in panthor_mmu_init()
2744 irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "mmu"); in panthor_mmu_init()
2748 ret = panthor_request_mmu_irq(ptdev, &mmu->irq, irq, in panthor_mmu_init()
2749 panthor_mmu_fault_mask(ptdev, ~0)); in panthor_mmu_init()
2762 ptdev->gpu_info.mmu_features &= ~GENMASK(7, 0); in panthor_mmu_init()
2763 ptdev->gpu_info.mmu_features |= BITS_PER_LONG; in panthor_mmu_init()
2766 return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq); in panthor_mmu_init()
2785 struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base); in show_each_vm() local
2790 mutex_lock(&ptdev->mmu->vm.lock); in show_each_vm()
2791 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) { in show_each_vm()
2798 mutex_unlock(&ptdev->mmu->vm.lock); in show_each_vm()