Lines Matching refs:vm
96 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range, in xe_svm_garbage_collector_add_range() argument
99 struct xe_device *xe = vm->xe; in xe_svm_garbage_collector_add_range()
105 spin_lock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector_add_range()
108 &vm->svm.garbage_collector.range_list); in xe_svm_garbage_collector_add_range()
109 spin_unlock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector_add_range()
112 &vm->svm.garbage_collector.work); in xe_svm_garbage_collector_add_range()
116 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r, in xe_svm_range_notifier_event_begin() argument
121 struct xe_device *xe = vm->xe; in xe_svm_range_notifier_event_begin()
126 xe_svm_assert_in_notifier(vm); in xe_svm_range_notifier_event_begin()
146 if (xe_pt_zap_ptes_range(tile, vm, range)) { in xe_svm_range_notifier_event_begin()
160 xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r, in xe_svm_range_notifier_event_end() argument
165 xe_svm_assert_in_notifier(vm); in xe_svm_range_notifier_event_end()
167 drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx); in xe_svm_range_notifier_event_end()
168 if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP) in xe_svm_range_notifier_event_end()
169 xe_svm_garbage_collector_add_range(vm, to_xe_range(r), in xe_svm_range_notifier_event_end()
177 struct xe_vm *vm = gpusvm_to_vm(gpusvm); in xe_svm_invalidate() local
178 struct xe_device *xe = vm->xe; in xe_svm_invalidate()
184 xe_svm_assert_in_notifier(vm); in xe_svm_invalidate()
188 vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq, in xe_svm_invalidate()
204 if (xe_vm_is_closed(vm)) in xe_svm_invalidate()
212 err = dma_resv_wait_timeout(xe_vm_resv(vm), in xe_svm_invalidate()
219 tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range, in xe_svm_invalidate()
227 err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask); in xe_svm_invalidate()
233 xe_svm_range_notifier_event_end(vm, r, mmu_range); in xe_svm_invalidate()
236 static int __xe_svm_garbage_collector(struct xe_vm *vm, in __xe_svm_garbage_collector() argument
243 xe_vm_lock(vm, false); in __xe_svm_garbage_collector()
244 fence = xe_vm_range_unbind(vm, range); in __xe_svm_garbage_collector()
245 xe_vm_unlock(vm); in __xe_svm_garbage_collector()
250 drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base); in __xe_svm_garbage_collector()
255 static int xe_svm_garbage_collector(struct xe_vm *vm) in xe_svm_garbage_collector() argument
260 lockdep_assert_held_write(&vm->lock); in xe_svm_garbage_collector()
262 if (xe_vm_is_closed_or_banned(vm)) in xe_svm_garbage_collector()
265 spin_lock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector()
267 range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list, in xe_svm_garbage_collector()
274 spin_unlock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector()
276 err = __xe_svm_garbage_collector(vm, range); in xe_svm_garbage_collector()
278 drm_warn(&vm->xe->drm, in xe_svm_garbage_collector()
281 xe_vm_kill(vm, true); in xe_svm_garbage_collector()
285 spin_lock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector()
287 spin_unlock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector()
294 struct xe_vm *vm = container_of(w, struct xe_vm, in xe_svm_garbage_collector_work_func() local
297 down_write(&vm->lock); in xe_svm_garbage_collector_work_func()
298 xe_svm_garbage_collector(vm); in xe_svm_garbage_collector_work_func()
299 up_write(&vm->lock); in xe_svm_garbage_collector_work_func()
566 int xe_svm_init(struct xe_vm *vm) in xe_svm_init() argument
570 spin_lock_init(&vm->svm.garbage_collector.lock); in xe_svm_init()
571 INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list); in xe_svm_init()
572 INIT_WORK(&vm->svm.garbage_collector.work, in xe_svm_init()
575 err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm, in xe_svm_init()
576 current->mm, xe_svm_devm_owner(vm->xe), 0, in xe_svm_init()
577 vm->size, xe_modparam.svm_notifier_size * SZ_1M, in xe_svm_init()
583 drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock); in xe_svm_init()
594 void xe_svm_close(struct xe_vm *vm) in xe_svm_close() argument
596 xe_assert(vm->xe, xe_vm_is_closed(vm)); in xe_svm_close()
597 flush_work(&vm->svm.garbage_collector.work); in xe_svm_close()
606 void xe_svm_fini(struct xe_vm *vm) in xe_svm_fini() argument
608 xe_assert(vm->xe, xe_vm_is_closed(vm)); in xe_svm_fini()
610 drm_gpusvm_fini(&vm->svm.gpusvm); in xe_svm_fini()
629 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range) in xe_svm_range_migrate_to_smem() argument
632 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base); in xe_svm_range_migrate_to_smem()
647 bool xe_svm_range_validate(struct xe_vm *vm, in xe_svm_range_validate() argument
653 xe_svm_notifier_lock(vm); in xe_svm_range_validate()
658 xe_svm_notifier_unlock(vm); in xe_svm_range_validate()
678 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma) in xe_svm_find_vma_start() argument
680 return drm_gpusvm_find_vma_start(&vm->svm.gpusvm, in xe_svm_find_vma_start()
772 struct xe_vm *vm = range_to_vm(&range->base); in xe_svm_range_needs_migrate_to_vram() local
778 xe_assert(vm->xe, IS_DGFX(vm->xe)); in xe_svm_range_needs_migrate_to_vram()
781 drm_info(&vm->xe->drm, "Range is already in VRAM\n"); in xe_svm_range_needs_migrate_to_vram()
785 if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) { in xe_svm_range_needs_migrate_to_vram()
786 drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n"); in xe_svm_range_needs_migrate_to_vram()
806 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, in xe_svm_handle_pagefault() argument
812 .devmem_possible = IS_DGFX(vm->xe) && in xe_svm_handle_pagefault()
814 .check_pages_threshold = IS_DGFX(vm->xe) && in xe_svm_handle_pagefault()
816 .devmem_only = atomic && IS_DGFX(vm->xe) && in xe_svm_handle_pagefault()
818 .timeslice_ms = atomic && IS_DGFX(vm->xe) && in xe_svm_handle_pagefault()
820 vm->xe->atomic_svm_timeslice_ms : 0, in xe_svm_handle_pagefault()
829 lockdep_assert_held_write(&vm->lock); in xe_svm_handle_pagefault()
830 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma)); in xe_svm_handle_pagefault()
836 err = xe_svm_garbage_collector(vm); in xe_svm_handle_pagefault()
840 range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx); in xe_svm_handle_pagefault()
854 xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) { in xe_svm_handle_pagefault()
859 drm_dbg(&vm->xe->drm, in xe_svm_handle_pagefault()
861 vm->usm.asid, ERR_PTR(err)); in xe_svm_handle_pagefault()
864 drm_err(&vm->xe->drm, in xe_svm_handle_pagefault()
866 vm->usm.asid, ERR_PTR(err)); in xe_svm_handle_pagefault()
873 err = xe_svm_range_get_pages(vm, range, &ctx); in xe_svm_handle_pagefault()
878 drm_dbg(&vm->xe->drm, in xe_svm_handle_pagefault()
880 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); in xe_svm_handle_pagefault()
884 drm_err(&vm->xe->drm, in xe_svm_handle_pagefault()
886 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); in xe_svm_handle_pagefault()
897 xe_vm_lock(vm, false); in xe_svm_handle_pagefault()
898 fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id)); in xe_svm_handle_pagefault()
900 xe_vm_unlock(vm); in xe_svm_handle_pagefault()
911 xe_vm_unlock(vm); in xe_svm_handle_pagefault()
931 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end) in xe_svm_has_mapping() argument
933 return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end); in xe_svm_has_mapping()
962 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr, in xe_svm_range_find_or_insert() argument
967 r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)), in xe_svm_range_find_or_insert()
986 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range, in xe_svm_range_get_pages() argument
991 err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx); in xe_svm_range_get_pages()
994 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base); in xe_svm_range_get_pages()
1122 void xe_svm_flush(struct xe_vm *vm) in xe_svm_flush() argument
1124 if (xe_vm_in_fault_mode(vm)) in xe_svm_flush()
1125 flush_work(&vm->svm.garbage_collector.work); in xe_svm_flush()