Lines Matching refs:vm
34 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm) in xe_vm_get() argument
36 drm_gpuvm_get(&vm->gpuvm); in xe_vm_get()
37 return vm; in xe_vm_get()
40 static inline void xe_vm_put(struct xe_vm *vm) in xe_vm_put() argument
42 drm_gpuvm_put(&vm->gpuvm); in xe_vm_put()
45 int xe_vm_lock(struct xe_vm *vm, bool intr);
47 void xe_vm_unlock(struct xe_vm *vm);
49 static inline bool xe_vm_is_closed(struct xe_vm *vm) in xe_vm_is_closed() argument
52 return !vm->size; in xe_vm_is_closed()
55 static inline bool xe_vm_is_banned(struct xe_vm *vm) in xe_vm_is_banned() argument
57 return vm->flags & XE_VM_FLAG_BANNED; in xe_vm_is_banned()
60 static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm) in xe_vm_is_closed_or_banned() argument
62 lockdep_assert_held(&vm->lock); in xe_vm_is_closed_or_banned()
63 return xe_vm_is_closed(vm) || xe_vm_is_banned(vm); in xe_vm_is_closed_or_banned()
67 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
75 static inline bool xe_vm_has_scratch(const struct xe_vm *vm) in xe_vm_has_scratch() argument
77 return vm->flags & XE_VM_FLAG_SCRATCH_PAGE; in xe_vm_has_scratch()
93 return gpuvm_to_vm(gpuva->vm); in gpuva_to_vm()
138 return container_of(vma->gpuva.vm, struct xe_vm, gpuvm); in xe_vma_vm()
172 struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
186 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
195 void xe_vm_close_and_put(struct xe_vm *vm);
197 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm) in xe_vm_in_fault_mode() argument
199 return vm->flags & XE_VM_FLAG_FAULT_MODE; in xe_vm_in_fault_mode()
202 static inline bool xe_vm_in_lr_mode(struct xe_vm *vm) in xe_vm_in_lr_mode() argument
204 return vm->flags & XE_VM_FLAG_LR_MODE; in xe_vm_in_lr_mode()
207 static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm) in xe_vm_in_preempt_fence_mode() argument
209 return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm); in xe_vm_in_preempt_fence_mode()
212 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
213 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
215 int xe_vm_userptr_pin(struct xe_vm *vm);
217 int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
219 int xe_vm_userptr_check_repin(struct xe_vm *vm);
221 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
222 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
224 struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
228 struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
231 int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
236 int xe_vm_validate_protected(struct xe_vm *vm);
238 static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm) in xe_vm_queue_rebind_worker() argument
240 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in xe_vm_queue_rebind_worker()
241 queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work); in xe_vm_queue_rebind_worker()
253 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm) in xe_vm_reactivate_rebind() argument
255 if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) { in xe_vm_reactivate_rebind()
256 vm->preempt.rebind_deactivated = false; in xe_vm_reactivate_rebind()
257 xe_vm_queue_rebind_worker(vm); in xe_vm_reactivate_rebind()
269 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
272 struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
282 static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm) in xe_vm_resv() argument
284 return drm_gpuvm_resv(&vm->gpuvm); in xe_vm_resv()
287 void xe_vm_kill(struct xe_vm *vm, bool unlocked);
293 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm)) argument
304 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
321 static inline struct pin_cookie xe_vm_set_validating(struct xe_vm *vm, in xe_vm_set_validating() argument
326 if (vm && !allow_res_evict) { in xe_vm_set_validating()
327 xe_vm_assert_held(vm); in xe_vm_set_validating()
328 cookie = lockdep_pin_lock(&xe_vm_resv(vm)->lock.base); in xe_vm_set_validating()
330 WRITE_ONCE(vm->validating, current); in xe_vm_set_validating()
347 static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict, in xe_vm_clear_validating() argument
350 if (vm && !allow_res_evict) { in xe_vm_clear_validating()
351 lockdep_unpin_lock(&xe_vm_resv(vm)->lock.base, cookie); in xe_vm_clear_validating()
353 WRITE_ONCE(vm->validating, NULL); in xe_vm_clear_validating()
368 static inline bool xe_vm_is_validating(struct xe_vm *vm) in xe_vm_is_validating() argument
371 if (READ_ONCE(vm->validating) == current) { in xe_vm_is_validating()
372 xe_vm_assert_held(vm); in xe_vm_is_validating()