Lines Matching refs:vm
45 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) in xe_vm_obj() argument
47 return vm->gpuvm.r_obj; in xe_vm_obj()
72 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_userptr_pin_pages() local
73 struct xe_device *xe = vm->xe; in xe_vma_userptr_pin_pages()
75 lockdep_assert_held(&vm->lock); in xe_vma_userptr_pin_pages()
81 static bool preempt_fences_waiting(struct xe_vm *vm) in preempt_fences_waiting() argument
85 lockdep_assert_held(&vm->lock); in preempt_fences_waiting()
86 xe_vm_assert_held(vm); in preempt_fences_waiting()
88 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in preempt_fences_waiting()
107 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list, in alloc_preempt_fences() argument
110 lockdep_assert_held(&vm->lock); in alloc_preempt_fences()
111 xe_vm_assert_held(vm); in alloc_preempt_fences()
113 if (*count >= vm->preempt.num_exec_queues) in alloc_preempt_fences()
116 for (; *count < vm->preempt.num_exec_queues; ++(*count)) { in alloc_preempt_fences()
128 static int wait_for_existing_preempt_fences(struct xe_vm *vm) in wait_for_existing_preempt_fences() argument
132 xe_vm_assert_held(vm); in wait_for_existing_preempt_fences()
134 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in wait_for_existing_preempt_fences()
150 static bool xe_vm_is_idle(struct xe_vm *vm) in xe_vm_is_idle() argument
154 xe_vm_assert_held(vm); in xe_vm_is_idle()
155 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in xe_vm_is_idle()
163 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list) in arm_preempt_fences() argument
168 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in arm_preempt_fences()
172 xe_assert(vm->xe, link != list); in arm_preempt_fences()
182 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo) in add_preempt_fences() argument
189 if (!vm->preempt.num_exec_queues) in add_preempt_fences()
192 err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues); in add_preempt_fences()
196 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) in add_preempt_fences()
206 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm, in resume_and_reinstall_preempt_fences() argument
211 lockdep_assert_held(&vm->lock); in resume_and_reinstall_preempt_fences()
212 xe_vm_assert_held(vm); in resume_and_reinstall_preempt_fences()
214 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in resume_and_reinstall_preempt_fences()
217 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence, in resume_and_reinstall_preempt_fences()
222 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in xe_vm_add_compute_exec_queue() argument
225 .vm = &vm->gpuvm, in xe_vm_add_compute_exec_queue()
234 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in xe_vm_add_compute_exec_queue()
236 down_write(&vm->lock); in xe_vm_add_compute_exec_queue()
248 list_add(&q->lr.link, &vm->preempt.exec_queues); in xe_vm_add_compute_exec_queue()
249 ++vm->preempt.num_exec_queues; in xe_vm_add_compute_exec_queue()
252 down_read(&vm->userptr.notifier_lock); in xe_vm_add_compute_exec_queue()
254 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence, in xe_vm_add_compute_exec_queue()
262 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm); in xe_vm_add_compute_exec_queue()
266 up_read(&vm->userptr.notifier_lock); in xe_vm_add_compute_exec_queue()
271 up_write(&vm->lock); in xe_vm_add_compute_exec_queue()
284 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in xe_vm_remove_compute_exec_queue() argument
286 if (!xe_vm_in_preempt_fence_mode(vm)) in xe_vm_remove_compute_exec_queue()
289 down_write(&vm->lock); in xe_vm_remove_compute_exec_queue()
292 --vm->preempt.num_exec_queues; in xe_vm_remove_compute_exec_queue()
299 up_write(&vm->lock); in xe_vm_remove_compute_exec_queue()
313 int __xe_vm_userptr_needs_repin(struct xe_vm *vm) in __xe_vm_userptr_needs_repin() argument
315 lockdep_assert_held_read(&vm->userptr.notifier_lock); in __xe_vm_userptr_needs_repin()
317 return (list_empty(&vm->userptr.repin_list) && in __xe_vm_userptr_needs_repin()
318 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN; in __xe_vm_userptr_needs_repin()
331 void xe_vm_kill(struct xe_vm *vm, bool unlocked) in xe_vm_kill() argument
335 lockdep_assert_held(&vm->lock); in xe_vm_kill()
338 xe_vm_lock(vm, false); in xe_vm_kill()
340 vm->flags |= XE_VM_FLAG_BANNED; in xe_vm_kill()
341 trace_xe_vm_kill(vm); in xe_vm_kill()
343 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) in xe_vm_kill()
347 xe_vm_unlock(vm); in xe_vm_kill()
387 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); in xe_gpuvm_validate() local
391 lockdep_assert_held(&vm->lock); in xe_gpuvm_validate()
394 &vm->rebind_list); in xe_gpuvm_validate()
396 ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false); in xe_gpuvm_validate()
419 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec, in xe_vm_validate_rebind() argument
427 ret = drm_gpuvm_validate(&vm->gpuvm, exec); in xe_vm_validate_rebind()
431 ret = xe_vm_rebind(vm, false); in xe_vm_validate_rebind()
434 } while (!list_empty(&vm->gpuvm.evict.list)); in xe_vm_validate_rebind()
445 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm, in xe_preempt_work_begin() argument
450 err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0); in xe_preempt_work_begin()
454 if (xe_vm_is_idle(vm)) { in xe_preempt_work_begin()
455 vm->preempt.rebind_deactivated = true; in xe_preempt_work_begin()
460 if (!preempt_fences_waiting(vm)) { in xe_preempt_work_begin()
465 err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0); in xe_preempt_work_begin()
469 err = wait_for_existing_preempt_fences(vm); in xe_preempt_work_begin()
479 return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues); in xe_preempt_work_begin()
484 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work); in preempt_rebind_work_func() local
493 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in preempt_rebind_work_func()
494 trace_xe_vm_rebind_worker_enter(vm); in preempt_rebind_work_func()
496 down_write(&vm->lock); in preempt_rebind_work_func()
498 if (xe_vm_is_closed_or_banned(vm)) { in preempt_rebind_work_func()
499 up_write(&vm->lock); in preempt_rebind_work_func()
500 trace_xe_vm_rebind_worker_exit(vm); in preempt_rebind_work_func()
505 if (xe_vm_userptr_check_repin(vm)) { in preempt_rebind_work_func()
506 err = xe_vm_userptr_pin(vm); in preempt_rebind_work_func()
516 err = xe_preempt_work_begin(&exec, vm, &done); in preempt_rebind_work_func()
527 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count); in preempt_rebind_work_func()
531 err = xe_vm_rebind(vm, true); in preempt_rebind_work_func()
536 wait = dma_resv_wait_timeout(xe_vm_resv(vm), in preempt_rebind_work_func()
549 down_read(&vm->userptr.notifier_lock); in preempt_rebind_work_func()
550 if (retry_required(tries, vm)) { in preempt_rebind_work_func()
551 up_read(&vm->userptr.notifier_lock); in preempt_rebind_work_func()
558 spin_lock(&vm->xe->ttm.lru_lock); in preempt_rebind_work_func()
559 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in preempt_rebind_work_func()
560 spin_unlock(&vm->xe->ttm.lru_lock); in preempt_rebind_work_func()
563 arm_preempt_fences(vm, &preempt_fences); in preempt_rebind_work_func()
564 resume_and_reinstall_preempt_fences(vm, &exec); in preempt_rebind_work_func()
565 up_read(&vm->userptr.notifier_lock); in preempt_rebind_work_func()
571 trace_xe_vm_rebind_worker_retry(vm); in preempt_rebind_work_func()
576 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err); in preempt_rebind_work_func()
577 xe_vm_kill(vm, true); in preempt_rebind_work_func()
579 up_write(&vm->lock); in preempt_rebind_work_func()
583 trace_xe_vm_rebind_worker_exit(vm); in preempt_rebind_work_func()
586 static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma) in __vma_userptr_invalidate() argument
598 if (!xe_vm_in_fault_mode(vm) && in __vma_userptr_invalidate()
600 spin_lock(&vm->userptr.invalidated_lock); in __vma_userptr_invalidate()
602 &vm->userptr.invalidated); in __vma_userptr_invalidate()
603 spin_unlock(&vm->userptr.invalidated_lock); in __vma_userptr_invalidate()
612 dma_resv_iter_begin(&cursor, xe_vm_resv(vm), in __vma_userptr_invalidate()
618 err = dma_resv_wait_timeout(xe_vm_resv(vm), in __vma_userptr_invalidate()
623 if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) { in __vma_userptr_invalidate()
637 struct xe_vm *vm = xe_vma_vm(vma); in vma_userptr_invalidate() local
639 xe_assert(vm->xe, xe_vma_is_userptr(vma)); in vma_userptr_invalidate()
649 down_write(&vm->userptr.notifier_lock); in vma_userptr_invalidate()
652 __vma_userptr_invalidate(vm, uvma); in vma_userptr_invalidate()
653 up_write(&vm->userptr.notifier_lock); in vma_userptr_invalidate()
672 struct xe_vm *vm = xe_vma_vm(&uvma->vma); in xe_vma_userptr_force_invalidate() local
675 lockdep_assert_held(&vm->lock); in xe_vma_userptr_force_invalidate()
677 lockdep_assert_held(&vm->userptr.notifier_lock); in xe_vma_userptr_force_invalidate()
682 xe_vm_assert_held(vm); in xe_vma_userptr_force_invalidate()
687 __vma_userptr_invalidate(vm, uvma); in xe_vma_userptr_force_invalidate()
691 int xe_vm_userptr_pin(struct xe_vm *vm) in xe_vm_userptr_pin() argument
696 xe_assert(vm->xe, !xe_vm_in_fault_mode(vm)); in xe_vm_userptr_pin()
697 lockdep_assert_held_write(&vm->lock); in xe_vm_userptr_pin()
700 spin_lock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
701 xe_assert(vm->xe, list_empty(&vm->userptr.repin_list)); in xe_vm_userptr_pin()
702 list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated, in xe_vm_userptr_pin()
706 &vm->userptr.repin_list); in xe_vm_userptr_pin()
708 spin_unlock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
711 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, in xe_vm_userptr_pin()
730 xe_vm_lock(vm, false); in xe_vm_userptr_pin()
731 dma_resv_wait_timeout(xe_vm_resv(vm), in xe_vm_userptr_pin()
735 down_read(&vm->userptr.notifier_lock); in xe_vm_userptr_pin()
737 up_read(&vm->userptr.notifier_lock); in xe_vm_userptr_pin()
738 xe_vm_unlock(vm); in xe_vm_userptr_pin()
747 &vm->rebind_list); in xe_vm_userptr_pin()
752 down_write(&vm->userptr.notifier_lock); in xe_vm_userptr_pin()
753 spin_lock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
754 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, in xe_vm_userptr_pin()
758 &vm->userptr.invalidated); in xe_vm_userptr_pin()
760 spin_unlock(&vm->userptr.invalidated_lock); in xe_vm_userptr_pin()
761 up_write(&vm->userptr.notifier_lock); in xe_vm_userptr_pin()
777 int xe_vm_userptr_check_repin(struct xe_vm *vm) in xe_vm_userptr_check_repin() argument
779 return (list_empty_careful(&vm->userptr.repin_list) && in xe_vm_userptr_check_repin()
780 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN; in xe_vm_userptr_check_repin()
878 static struct dma_fence *ops_execute(struct xe_vm *vm,
880 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
884 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) in xe_vm_rebind() argument
892 lockdep_assert_held(&vm->lock); in xe_vm_rebind()
893 if ((xe_vm_in_lr_mode(vm) && !rebind_worker) || in xe_vm_rebind()
894 list_empty(&vm->rebind_list)) in xe_vm_rebind()
897 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_rebind()
901 xe_vm_assert_held(vm); in xe_vm_rebind()
902 list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) { in xe_vm_rebind()
903 xe_assert(vm->xe, vma->tile_present); in xe_vm_rebind()
920 fence = ops_execute(vm, &vops); in xe_vm_rebind()
925 list_for_each_entry_safe(vma, next, &vm->rebind_list, in xe_vm_rebind()
939 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_mask) in xe_vma_rebind() argument
948 lockdep_assert_held(&vm->lock); in xe_vma_rebind()
949 xe_vm_assert_held(vm); in xe_vma_rebind()
950 xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); in xe_vma_rebind()
952 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vma_rebind()
953 for_each_tile(tile, vm->xe, id) { in xe_vma_rebind()
969 fence = ops_execute(vm, &vops); in xe_vma_rebind()
1025 struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm, in xe_vm_range_rebind() argument
1037 lockdep_assert_held(&vm->lock); in xe_vm_range_rebind()
1038 xe_vm_assert_held(vm); in xe_vm_range_rebind()
1039 xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); in xe_vm_range_rebind()
1040 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma)); in xe_vm_range_rebind()
1042 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_range_rebind()
1043 for_each_tile(tile, vm->xe, id) { in xe_vm_range_rebind()
1059 fence = ops_execute(vm, &vops); in xe_vm_range_rebind()
1108 struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm, in xe_vm_range_unbind() argument
1118 lockdep_assert_held(&vm->lock); in xe_vm_range_unbind()
1119 xe_vm_assert_held(vm); in xe_vm_range_unbind()
1120 xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); in xe_vm_range_unbind()
1125 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_range_unbind()
1126 for_each_tile(tile, vm->xe, id) { in xe_vm_range_unbind()
1142 fence = ops_execute(vm, &vops); in xe_vm_range_unbind()
1167 static struct xe_vma *xe_vma_create(struct xe_vm *vm, in xe_vma_create() argument
1182 xe_assert(vm->xe, start < end); in xe_vma_create()
1183 xe_assert(vm->xe, end < vm->size); in xe_vma_create()
1212 vma->gpuva.vm = &vm->gpuvm; in xe_vma_create()
1220 for_each_tile(tile, vm->xe, id) in xe_vma_create()
1223 if (vm->xe->info.has_atomic_enable_pte_bit) in xe_vma_create()
1233 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base); in xe_vma_create()
1267 xe_vm_get(vm); in xe_vma_create()
1275 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_destroy_late() local
1296 xe_vm_put(vm); in xe_vma_destroy_late()
1298 xe_vm_put(vm); in xe_vma_destroy_late()
1325 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_destroy() local
1327 lockdep_assert_held_write(&vm->lock); in xe_vma_destroy()
1328 xe_assert(vm->xe, list_empty(&vma->combined_links.destroy)); in xe_vma_destroy()
1331 xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED); in xe_vma_destroy()
1333 spin_lock(&vm->userptr.invalidated_lock); in xe_vma_destroy()
1334 xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link)); in xe_vma_destroy()
1336 spin_unlock(&vm->userptr.invalidated_lock); in xe_vma_destroy()
1343 xe_vm_assert_held(vm); in xe_vma_destroy()
1369 struct xe_vm *vm = xe_vma_vm(vma); in xe_vm_lock_vma() local
1373 XE_WARN_ON(!vm); in xe_vm_lock_vma()
1375 err = drm_exec_lock_obj(exec, xe_vm_obj(vm)); in xe_vm_lock_vma()
1376 if (!err && bo && !bo->vm) in xe_vm_lock_vma()
1401 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range) in xe_vm_find_overlapping_vma() argument
1405 lockdep_assert_held(&vm->lock); in xe_vm_find_overlapping_vma()
1407 if (xe_vm_is_closed_or_banned(vm)) in xe_vm_find_overlapping_vma()
1410 xe_assert(vm->xe, start + range <= vm->size); in xe_vm_find_overlapping_vma()
1412 gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range); in xe_vm_find_overlapping_vma()
1417 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma) in xe_vm_insert_vma() argument
1421 xe_assert(vm->xe, xe_vma_vm(vma) == vm); in xe_vm_insert_vma()
1422 lockdep_assert_held(&vm->lock); in xe_vm_insert_vma()
1424 mutex_lock(&vm->snap_mutex); in xe_vm_insert_vma()
1425 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva); in xe_vm_insert_vma()
1426 mutex_unlock(&vm->snap_mutex); in xe_vm_insert_vma()
1432 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma) in xe_vm_remove_vma() argument
1434 xe_assert(vm->xe, xe_vma_vm(vma) == vm); in xe_vm_remove_vma()
1435 lockdep_assert_held(&vm->lock); in xe_vm_remove_vma()
1437 mutex_lock(&vm->snap_mutex); in xe_vm_remove_vma()
1439 mutex_unlock(&vm->snap_mutex); in xe_vm_remove_vma()
1440 if (vm->usm.last_fault_vma == vma) in xe_vm_remove_vma()
1441 vm->usm.last_fault_vma = NULL; in xe_vm_remove_vma()
1606 struct xe_vm *vm) in xe_vm_create_scratch() argument
1611 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) { in xe_vm_create_scratch()
1612 vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i); in xe_vm_create_scratch()
1613 if (IS_ERR(vm->scratch_pt[id][i])) in xe_vm_create_scratch()
1614 return PTR_ERR(vm->scratch_pt[id][i]); in xe_vm_create_scratch()
1616 xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]); in xe_vm_create_scratch()
1623 static void xe_vm_free_scratch(struct xe_vm *vm) in xe_vm_free_scratch() argument
1628 if (!xe_vm_has_scratch(vm)) in xe_vm_free_scratch()
1631 for_each_tile(tile, vm->xe, id) { in xe_vm_free_scratch()
1634 if (!vm->pt_root[id]) in xe_vm_free_scratch()
1637 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i) in xe_vm_free_scratch()
1638 if (vm->scratch_pt[id][i]) in xe_vm_free_scratch()
1639 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL); in xe_vm_free_scratch()
1646 struct xe_vm *vm; in xe_vm_create() local
1657 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in xe_vm_create()
1658 if (!vm) in xe_vm_create()
1661 vm->xe = xe; in xe_vm_create()
1663 vm->size = 1ull << xe->info.va_bits; in xe_vm_create()
1665 vm->flags = flags; in xe_vm_create()
1678 __init_rwsem(&vm->lock, "gsc_vm", &gsc_vm_key); in xe_vm_create()
1680 init_rwsem(&vm->lock); in xe_vm_create()
1682 mutex_init(&vm->snap_mutex); in xe_vm_create()
1684 INIT_LIST_HEAD(&vm->rebind_list); in xe_vm_create()
1686 INIT_LIST_HEAD(&vm->userptr.repin_list); in xe_vm_create()
1687 INIT_LIST_HEAD(&vm->userptr.invalidated); in xe_vm_create()
1688 init_rwsem(&vm->userptr.notifier_lock); in xe_vm_create()
1689 spin_lock_init(&vm->userptr.invalidated_lock); in xe_vm_create()
1691 ttm_lru_bulk_move_init(&vm->lru_bulk_move); in xe_vm_create()
1693 INIT_WORK(&vm->destroy_work, vm_destroy_work_func); in xe_vm_create()
1695 INIT_LIST_HEAD(&vm->preempt.exec_queues); in xe_vm_create()
1696 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */ in xe_vm_create()
1699 xe_range_fence_tree_init(&vm->rftree[id]); in xe_vm_create()
1701 vm->pt_ops = &xelp_pt_ops; in xe_vm_create()
1710 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func); in xe_vm_create()
1715 err = xe_svm_init(vm); in xe_vm_create()
1726 drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm, in xe_vm_create()
1727 vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops); in xe_vm_create()
1731 err = xe_vm_lock(vm, true); in xe_vm_create()
1736 vm->flags |= XE_VM_FLAG_64K; in xe_vm_create()
1743 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level); in xe_vm_create()
1744 if (IS_ERR(vm->pt_root[id])) { in xe_vm_create()
1745 err = PTR_ERR(vm->pt_root[id]); in xe_vm_create()
1746 vm->pt_root[id] = NULL; in xe_vm_create()
1751 if (xe_vm_has_scratch(vm)) { in xe_vm_create()
1753 if (!vm->pt_root[id]) in xe_vm_create()
1756 err = xe_vm_create_scratch(xe, tile, vm); in xe_vm_create()
1760 vm->batch_invalidate_tlb = true; in xe_vm_create()
1763 if (vm->flags & XE_VM_FLAG_LR_MODE) in xe_vm_create()
1764 vm->batch_invalidate_tlb = false; in xe_vm_create()
1768 if (!vm->pt_root[id]) in xe_vm_create()
1771 xe_pt_populate_empty(tile, vm, vm->pt_root[id]); in xe_vm_create()
1773 xe_vm_unlock(vm); in xe_vm_create()
1781 if (!vm->pt_root[id]) in xe_vm_create()
1789 vm->q[id] = q; in xe_vm_create()
1795 vm->composite_fence_ctx = dma_fence_context_alloc(1); in xe_vm_create()
1797 trace_xe_vm_create(vm); in xe_vm_create()
1799 return vm; in xe_vm_create()
1802 xe_vm_unlock(vm); in xe_vm_create()
1804 xe_vm_close_and_put(vm); in xe_vm_create()
1809 vm->size = 0; /* close the vm */ in xe_vm_create()
1810 xe_svm_fini(vm); in xe_vm_create()
1813 mutex_destroy(&vm->snap_mutex); in xe_vm_create()
1815 xe_range_fence_tree_fini(&vm->rftree[id]); in xe_vm_create()
1816 ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); in xe_vm_create()
1817 kfree(vm); in xe_vm_create()
1823 static void xe_vm_close(struct xe_vm *vm) in xe_vm_close() argument
1825 struct xe_device *xe = vm->xe; in xe_vm_close()
1831 down_write(&vm->lock); in xe_vm_close()
1832 if (xe_vm_in_fault_mode(vm)) in xe_vm_close()
1833 xe_svm_notifier_lock(vm); in xe_vm_close()
1835 vm->size = 0; in xe_vm_close()
1837 if (!((vm->flags & XE_VM_FLAG_MIGRATION))) { in xe_vm_close()
1843 dma_resv_wait_timeout(xe_vm_resv(vm), in xe_vm_close()
1849 if (vm->pt_root[id]) in xe_vm_close()
1850 xe_pt_clear(xe, vm->pt_root[id]); in xe_vm_close()
1853 xe_gt_tlb_invalidation_vm(gt, vm); in xe_vm_close()
1857 if (xe_vm_in_fault_mode(vm)) in xe_vm_close()
1858 xe_svm_notifier_unlock(vm); in xe_vm_close()
1859 up_write(&vm->lock); in xe_vm_close()
1865 void xe_vm_close_and_put(struct xe_vm *vm) in xe_vm_close_and_put() argument
1868 struct xe_device *xe = vm->xe; in xe_vm_close_and_put()
1874 xe_assert(xe, !vm->preempt.num_exec_queues); in xe_vm_close_and_put()
1876 xe_vm_close(vm); in xe_vm_close_and_put()
1877 if (xe_vm_in_preempt_fence_mode(vm)) in xe_vm_close_and_put()
1878 flush_work(&vm->preempt.rebind_work); in xe_vm_close_and_put()
1879 if (xe_vm_in_fault_mode(vm)) in xe_vm_close_and_put()
1880 xe_svm_close(vm); in xe_vm_close_and_put()
1882 down_write(&vm->lock); in xe_vm_close_and_put()
1884 if (vm->q[id]) in xe_vm_close_and_put()
1885 xe_exec_queue_last_fence_put(vm->q[id], vm); in xe_vm_close_and_put()
1887 up_write(&vm->lock); in xe_vm_close_and_put()
1890 if (vm->q[id]) { in xe_vm_close_and_put()
1891 xe_exec_queue_kill(vm->q[id]); in xe_vm_close_and_put()
1892 xe_exec_queue_put(vm->q[id]); in xe_vm_close_and_put()
1893 vm->q[id] = NULL; in xe_vm_close_and_put()
1897 down_write(&vm->lock); in xe_vm_close_and_put()
1898 xe_vm_lock(vm, false); in xe_vm_close_and_put()
1899 drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) { in xe_vm_close_and_put()
1903 down_read(&vm->userptr.notifier_lock); in xe_vm_close_and_put()
1905 up_read(&vm->userptr.notifier_lock); in xe_vm_close_and_put()
1908 xe_vm_remove_vma(vm, vma); in xe_vm_close_and_put()
1911 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) { in xe_vm_close_and_put()
1928 xe_vm_free_scratch(vm); in xe_vm_close_and_put()
1931 if (vm->pt_root[id]) { in xe_vm_close_and_put()
1932 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL); in xe_vm_close_and_put()
1933 vm->pt_root[id] = NULL; in xe_vm_close_and_put()
1936 xe_vm_unlock(vm); in xe_vm_close_and_put()
1949 if (xe_vm_in_fault_mode(vm)) in xe_vm_close_and_put()
1950 xe_svm_fini(vm); in xe_vm_close_and_put()
1952 up_write(&vm->lock); in xe_vm_close_and_put()
1955 if (vm->usm.asid) { in xe_vm_close_and_put()
1959 xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION)); in xe_vm_close_and_put()
1961 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid); in xe_vm_close_and_put()
1962 xe_assert(xe, lookup == vm); in xe_vm_close_and_put()
1967 xe_range_fence_tree_fini(&vm->rftree[id]); in xe_vm_close_and_put()
1969 xe_vm_put(vm); in xe_vm_close_and_put()
1974 struct xe_vm *vm = in vm_destroy_work_func() local
1976 struct xe_device *xe = vm->xe; in vm_destroy_work_func()
1981 xe_assert(xe, !vm->size); in vm_destroy_work_func()
1983 if (xe_vm_in_preempt_fence_mode(vm)) in vm_destroy_work_func()
1984 flush_work(&vm->preempt.rebind_work); in vm_destroy_work_func()
1986 mutex_destroy(&vm->snap_mutex); in vm_destroy_work_func()
1988 if (vm->flags & XE_VM_FLAG_LR_MODE) in vm_destroy_work_func()
1992 XE_WARN_ON(vm->pt_root[id]); in vm_destroy_work_func()
1994 trace_xe_vm_free(vm); in vm_destroy_work_func()
1996 ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); in vm_destroy_work_func()
1998 if (vm->xef) in vm_destroy_work_func()
1999 xe_file_put(vm->xef); in vm_destroy_work_func()
2001 kfree(vm); in vm_destroy_work_func()
2006 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm); in xe_vm_free() local
2009 queue_work(system_unbound_wq, &vm->destroy_work); in xe_vm_free()
2014 struct xe_vm *vm; in xe_vm_lookup() local
2016 mutex_lock(&xef->vm.lock); in xe_vm_lookup()
2017 vm = xa_load(&xef->vm.xa, id); in xe_vm_lookup()
2018 if (vm) in xe_vm_lookup()
2019 xe_vm_get(vm); in xe_vm_lookup()
2020 mutex_unlock(&xef->vm.lock); in xe_vm_lookup()
2022 return vm; in xe_vm_lookup()
2025 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile) in xe_vm_pdp4_descriptor() argument
2027 return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0, in xe_vm_pdp4_descriptor()
2032 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in to_wait_exec_queue() argument
2034 return q ? q : vm->q[0]; in to_wait_exec_queue()
2063 struct xe_vm *vm; in xe_vm_create_ioctl() local
2100 vm = xe_vm_create(xe, flags); in xe_vm_create_ioctl()
2101 if (IS_ERR(vm)) in xe_vm_create_ioctl()
2102 return PTR_ERR(vm); in xe_vm_create_ioctl()
2106 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, in xe_vm_create_ioctl()
2113 vm->usm.asid = asid; in xe_vm_create_ioctl()
2116 vm->xef = xe_file_get(xef); in xe_vm_create_ioctl()
2120 if (vm->pt_root[id]) in xe_vm_create_ioctl()
2121 xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo); in xe_vm_create_ioctl()
2125 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE); in xe_vm_create_ioctl()
2129 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL); in xe_vm_create_ioctl()
2138 xe_vm_close_and_put(vm); in xe_vm_create_ioctl()
2149 struct xe_vm *vm; in xe_vm_destroy_ioctl() local
2156 mutex_lock(&xef->vm.lock); in xe_vm_destroy_ioctl()
2157 vm = xa_load(&xef->vm.xa, args->vm_id); in xe_vm_destroy_ioctl()
2158 if (XE_IOCTL_DBG(xe, !vm)) in xe_vm_destroy_ioctl()
2160 else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues)) in xe_vm_destroy_ioctl()
2163 xa_erase(&xef->vm.xa, args->vm_id); in xe_vm_destroy_ioctl()
2164 mutex_unlock(&xef->vm.lock); in xe_vm_destroy_ioctl()
2167 xe_vm_close_and_put(vm); in xe_vm_destroy_ioctl()
2187 struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr) in xe_vm_find_vma_by_addr() argument
2191 if (vm->usm.last_fault_vma) { /* Fast lookup */ in xe_vm_find_vma_by_addr()
2192 if (vma_matches(vm->usm.last_fault_vma, page_addr)) in xe_vm_find_vma_by_addr()
2193 vma = vm->usm.last_fault_vma; in xe_vm_find_vma_by_addr()
2196 vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K); in xe_vm_find_vma_by_addr()
2207 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma, in prep_vma_destroy() argument
2210 down_read(&vm->userptr.notifier_lock); in prep_vma_destroy()
2212 up_read(&vm->userptr.notifier_lock); in prep_vma_destroy()
2214 xe_vm_remove_vma(vm, vma); in prep_vma_destroy()
2267 static bool __xe_vm_needs_clear_scratch_pages(struct xe_vm *vm, u32 bind_flags) in __xe_vm_needs_clear_scratch_pages() argument
2269 if (!xe_vm_in_fault_mode(vm)) in __xe_vm_needs_clear_scratch_pages()
2272 if (!xe_vm_has_scratch(vm)) in __xe_vm_needs_clear_scratch_pages()
2297 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops, in vm_bind_ioctl_ops_create() argument
2310 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_create()
2312 vm_dbg(&vm->xe->drm, in vm_bind_ioctl_ops_create()
2320 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range, in vm_bind_ioctl_ops_create()
2324 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range); in vm_bind_ioctl_ops_create()
2327 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range); in vm_bind_ioctl_ops_create()
2330 xe_assert(vm->xe, bo); in vm_bind_ioctl_ops_create()
2336 vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj); in vm_bind_ioctl_ops_create()
2347 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in vm_bind_ioctl_ops_create()
2367 __xe_vm_needs_clear_scratch_pages(vm, flags); in vm_bind_ioctl_ops_create()
2382 ctx.devmem_possible = IS_DGFX(vm->xe) && in vm_bind_ioctl_ops_create()
2385 for_each_tile(tile, vm->xe, id) in vm_bind_ioctl_ops_create()
2392 svm_range = xe_svm_range_find_or_insert(vm, addr, vma, &ctx); in vm_bind_ioctl_ops_create()
2395 u64 ret = xe_svm_find_vma_start(vm, addr, range_end, vma); in vm_bind_ioctl_ops_create()
2409 if (xe_svm_range_validate(vm, svm_range, tile_mask, !!prefetch_region)) { in vm_bind_ioctl_ops_create()
2432 print_op(vm->xe, __op); in vm_bind_ioctl_ops_create()
2439 drm_gpuva_ops_free(&vm->gpuvm, ops); in vm_bind_ioctl_ops_create()
2445 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, in new_vma() argument
2453 lockdep_assert_held_write(&vm->lock); in new_vma()
2459 if (!bo->vm) { in new_vma()
2460 err = drm_exec_lock_obj(&exec, xe_vm_obj(vm)); in new_vma()
2473 vma = xe_vma_create(vm, bo, op->gem.offset, in new_vma()
2481 else if (!xe_vma_has_no_bo(vma) && !bo->vm) in new_vma()
2482 err = add_preempt_fences(vm, bo); in new_vma()
2489 prep_vma_destroy(vm, vma, false); in new_vma()
2529 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) in xe_vma_op_commit() argument
2533 lockdep_assert_held_write(&vm->lock); in xe_vma_op_commit()
2537 err |= xe_vm_insert_vma(vm, op->map.vma); in xe_vma_op_commit()
2546 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va), in xe_vma_op_commit()
2551 err |= xe_vm_insert_vma(vm, op->remap.prev); in xe_vma_op_commit()
2561 err |= xe_vm_insert_vma(vm, op->remap.next); in xe_vma_op_commit()
2579 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true); in xe_vma_op_commit()
2586 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in xe_vma_op_commit()
2592 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, in vm_bind_ioctl_ops_parse() argument
2595 struct xe_device *xe = vm->xe; in vm_bind_ioctl_ops_parse()
2601 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_parse()
2603 for_each_tile(tile, vm->xe, id) in vm_bind_ioctl_ops_parse()
2627 vma = new_vma(vm, &op->base.map, op->map.pat_index, in vm_bind_ioctl_ops_parse()
2633 if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) && in vm_bind_ioctl_ops_parse()
2655 xe_svm_has_mapping(vm, start, end)) in vm_bind_ioctl_ops_parse()
2674 vma = new_vma(vm, op->base.remap.prev, in vm_bind_ioctl_ops_parse()
2704 vma = new_vma(vm, op->base.remap.next, in vm_bind_ioctl_ops_parse()
2741 xe_svm_has_mapping(vm, xe_vma_start(vma), in vm_bind_ioctl_ops_parse()
2765 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in vm_bind_ioctl_ops_parse()
2768 err = xe_vma_op_commit(vm, op); in vm_bind_ioctl_ops_parse()
2776 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, in xe_vma_op_unwind() argument
2780 lockdep_assert_held_write(&vm->lock); in xe_vma_op_unwind()
2785 prep_vma_destroy(vm, op->map.vma, post_commit); in xe_vma_op_unwind()
2794 down_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2796 up_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2798 xe_vm_insert_vma(vm, vma); in xe_vma_op_unwind()
2807 prep_vma_destroy(vm, op->remap.prev, prev_post_commit); in xe_vma_op_unwind()
2811 prep_vma_destroy(vm, op->remap.next, next_post_commit); in xe_vma_op_unwind()
2815 down_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2817 up_read(&vm->userptr.notifier_lock); in xe_vma_op_unwind()
2819 xe_vm_insert_vma(vm, vma); in xe_vma_op_unwind()
2827 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in xe_vma_op_unwind()
2831 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm, in vm_bind_ioctl_ops_unwind() argument
2847 xe_vma_op_unwind(vm, op, in vm_bind_ioctl_ops_unwind()
2859 struct xe_vm *vm = xe_vma_vm(vma); in vma_lock_and_validate() local
2863 if (!bo->vm) in vma_lock_and_validate()
2866 err = xe_bo_validate(bo, vm, in vma_lock_and_validate()
2867 !xe_vm_in_preempt_fence_mode(vm)); in vma_lock_and_validate()
2888 static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op) in prefetch_ranges() argument
2890 bool devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_PAGEMAP); in prefetch_ranges()
2912 xe_svm_range_migrate_to_smem(vm, svm_range); in prefetch_ranges()
2915 tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0]; in prefetch_ranges()
2918 …drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe… in prefetch_ranges()
2919 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); in prefetch_ranges()
2925 err = xe_svm_range_get_pages(vm, svm_range, &ctx); in prefetch_ranges()
2927 drm_dbg(&vm->xe->drm, "Get pages failed, asid=%u, gpusvm=%p, errno=%pe\n", in prefetch_ranges()
2928 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); in prefetch_ranges()
2939 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, in op_lock_and_prep() argument
2948 !xe_vm_in_fault_mode(vm) || in op_lock_and_prep()
2983 xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type)); in op_lock_and_prep()
2994 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in op_lock_and_prep()
3000 static int vm_bind_ioctl_ops_prefetch_ranges(struct xe_vm *vm, struct xe_vma_ops *vops) in vm_bind_ioctl_ops_prefetch_ranges() argument
3010 err = prefetch_ranges(vm, op); in vm_bind_ioctl_ops_prefetch_ranges()
3020 struct xe_vm *vm, in vm_bind_ioctl_ops_lock_and_prep() argument
3026 err = drm_exec_lock_obj(exec, xe_vm_obj(vm)); in vm_bind_ioctl_ops_lock_and_prep()
3031 err = op_lock_and_prep(exec, vm, op); in vm_bind_ioctl_ops_lock_and_prep()
3038 vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK) in vm_bind_ioctl_ops_lock_and_prep()
3079 static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops) in vm_ops_setup_tile_args() argument
3086 for_each_tile(tile, vm->xe, id) { in vm_ops_setup_tile_args()
3095 if (vm->pt_root[id] && !list_empty(&q->multi_gt_list)) in vm_ops_setup_tile_args()
3098 vops->pt_update_ops[id].q = vm->q[id]; in vm_ops_setup_tile_args()
3105 static struct dma_fence *ops_execute(struct xe_vm *vm, in ops_execute() argument
3115 number_tiles = vm_ops_setup_tile_args(vm, vops); in ops_execute()
3128 for_each_tile(tile, vm->xe, id) { in ops_execute()
3141 for_each_tile(tile, vm->xe, id) { in ops_execute()
3155 vm->composite_fence_ctx, in ops_execute()
3156 vm->composite_fence_seqno++, in ops_execute()
3159 --vm->composite_fence_seqno; in ops_execute()
3166 for_each_tile(tile, vm->xe, id) { in ops_execute()
3176 for_each_tile(tile, vm->xe, id) { in ops_execute()
3188 trace_xe_vm_ops_fail(vm); in ops_execute()
3199 static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op, in op_add_ufence() argument
3218 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); in op_add_ufence()
3222 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops, in vm_bind_ioctl_ops_fini() argument
3225 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q); in vm_bind_ioctl_ops_fini()
3233 op_add_ufence(vm, op, ufence); in vm_bind_ioctl_ops_fini()
3246 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence); in vm_bind_ioctl_ops_fini()
3250 static struct dma_fence *vm_bind_ioctl_ops_execute(struct xe_vm *vm, in vm_bind_ioctl_ops_execute() argument
3257 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_execute()
3262 err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops); in vm_bind_ioctl_ops_execute()
3269 fence = ops_execute(vm, vops); in vm_bind_ioctl_ops_execute()
3272 vm_bind_ioctl_ops_fini(vm, vops, NULL); in vm_bind_ioctl_ops_execute()
3276 vm_bind_ioctl_ops_fini(vm, vops, fence); in vm_bind_ioctl_ops_execute()
3302 static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm, in vm_bind_ioctl_check_args() argument
3353 (!xe_vm_in_fault_mode(vm) || in vm_bind_ioctl_check_args()
3427 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm, in vm_bind_ioctl_signal_fences() argument
3436 to_wait_exec_queue(vm, q), vm); in vm_bind_ioctl_signal_fences()
3443 xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm, in vm_bind_ioctl_signal_fences()
3450 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm, in xe_vma_ops_init() argument
3456 vops->vm = vm; in xe_vma_ops_init()
3526 struct xe_vm *vm; in xe_vm_bind_ioctl() local
3536 vm = xe_vm_lookup(xef, args->vm_id); in xe_vm_bind_ioctl()
3537 if (XE_IOCTL_DBG(xe, !vm)) in xe_vm_bind_ioctl()
3540 err = vm_bind_ioctl_check_args(xe, vm, args, &bind_ops); in xe_vm_bind_ioctl()
3558 xe_svm_flush(vm); in xe_vm_bind_ioctl()
3560 err = down_write_killable(&vm->lock); in xe_vm_bind_ioctl()
3564 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { in xe_vm_bind_ioctl()
3573 if (XE_IOCTL_DBG(xe, range > vm->size) || in xe_vm_bind_ioctl()
3574 XE_IOCTL_DBG(xe, addr > vm->size - range)) { in xe_vm_bind_ioctl()
3637 (xe_vm_in_lr_mode(vm) ? in xe_vm_bind_ioctl()
3658 xe_vma_ops_init(&vops, vm, q, syncs, num_syncs); in xe_vm_bind_ioctl()
3668 ops[i] = vm_bind_ioctl_ops_create(vm, &vops, bos[i], obj_offset, in xe_vm_bind_ioctl()
3677 err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops); in xe_vm_bind_ioctl()
3684 vm->xe->vm_inject_error_position = in xe_vm_bind_ioctl()
3685 (vm->xe->vm_inject_error_position + 1) % in xe_vm_bind_ioctl()
3701 err = vm_bind_ioctl_ops_prefetch_ranges(vm, &vops); in xe_vm_bind_ioctl()
3705 fence = vm_bind_ioctl_ops_execute(vm, &vops); in xe_vm_bind_ioctl()
3713 vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds); in xe_vm_bind_ioctl()
3717 drm_gpuva_ops_free(&vm->gpuvm, ops[i]); in xe_vm_bind_ioctl()
3720 err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs); in xe_vm_bind_ioctl()
3729 up_write(&vm->lock); in xe_vm_bind_ioctl()
3734 xe_vm_put(vm); in xe_vm_bind_ioctl()
3756 struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo, in xe_vm_bind_kernel_bo() argument
3766 xe_vm_get(vm); in xe_vm_bind_kernel_bo()
3770 down_write(&vm->lock); in xe_vm_bind_kernel_bo()
3772 xe_vma_ops_init(&vops, vm, q, NULL, 0); in xe_vm_bind_kernel_bo()
3774 ops = vm_bind_ioctl_ops_create(vm, &vops, bo, 0, addr, xe_bo_size(bo), in xe_vm_bind_kernel_bo()
3776 vm->xe->pat.idx[cache_lvl]); in xe_vm_bind_kernel_bo()
3782 err = vm_bind_ioctl_ops_parse(vm, ops, &vops); in xe_vm_bind_kernel_bo()
3786 xe_assert(vm->xe, !list_empty(&vops.list)); in xe_vm_bind_kernel_bo()
3792 fence = vm_bind_ioctl_ops_execute(vm, &vops); in xe_vm_bind_kernel_bo()
3798 vm_bind_ioctl_ops_unwind(vm, &ops, 1); in xe_vm_bind_kernel_bo()
3801 drm_gpuva_ops_free(&vm->gpuvm, ops); in xe_vm_bind_kernel_bo()
3804 up_write(&vm->lock); in xe_vm_bind_kernel_bo()
3808 xe_vm_put(vm); in xe_vm_bind_kernel_bo()
3826 int xe_vm_lock(struct xe_vm *vm, bool intr) in xe_vm_lock() argument
3829 return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL); in xe_vm_lock()
3831 return dma_resv_lock(xe_vm_resv(vm), NULL); in xe_vm_lock()
3840 void xe_vm_unlock(struct xe_vm *vm) in xe_vm_unlock() argument
3842 dma_resv_unlock(xe_vm_resv(vm)); in xe_vm_unlock()
3857 int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start, in xe_vm_range_tilemask_tlb_invalidation() argument
3869 for_each_tile(tile, vm->xe, id) { in xe_vm_range_tilemask_tlb_invalidation()
3878 vm->usm.asid); in xe_vm_range_tilemask_tlb_invalidation()
3893 vm->usm.asid); in xe_vm_range_tilemask_tlb_invalidation()
3920 struct xe_vm *vm = xe_vma_vm(vma); in xe_vm_invalidate_vma() local
3930 vm_dbg(&vm->xe->drm, in xe_vm_invalidate_vma()
3940 lockdep_assert(lockdep_is_held_type(&vm->userptr.notifier_lock, 0) || in xe_vm_invalidate_vma()
3941 (lockdep_is_held_type(&vm->userptr.notifier_lock, 1) && in xe_vm_invalidate_vma()
3942 lockdep_is_held(&xe_vm_resv(vm)->lock.base))); in xe_vm_invalidate_vma()
3947 WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(vm), in xe_vm_invalidate_vma()
3970 int xe_vm_validate_protected(struct xe_vm *vm) in xe_vm_validate_protected() argument
3975 if (!vm) in xe_vm_validate_protected()
3978 mutex_lock(&vm->snap_mutex); in xe_vm_validate_protected()
3980 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { in xe_vm_validate_protected()
3989 err = xe_pxp_bo_key_check(vm->xe->pxp, bo); in xe_vm_validate_protected()
3995 mutex_unlock(&vm->snap_mutex); in xe_vm_validate_protected()
4010 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm) in xe_vm_snapshot_capture() argument
4016 if (!vm) in xe_vm_snapshot_capture()
4019 mutex_lock(&vm->snap_mutex); in xe_vm_snapshot_capture()
4020 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { in xe_vm_snapshot_capture()
4034 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { in xe_vm_snapshot_capture()
4064 mutex_unlock(&vm->snap_mutex); in xe_vm_snapshot_capture()