Lines Matching refs:vm
112 struct drm_gpuvm *vm; member
161 struct msm_gem_vm *vm = container_of(gpuvm, struct msm_gem_vm, base); in msm_gem_vm_free() local
163 drm_mm_takedown(&vm->mm); in msm_gem_vm_free()
164 if (vm->mmu) in msm_gem_vm_free()
165 vm->mmu->funcs->destroy(vm->mmu); in msm_gem_vm_free()
166 dma_fence_put(vm->last_fence); in msm_gem_vm_free()
167 put_pid(vm->pid); in msm_gem_vm_free()
168 kfree(vm->log); in msm_gem_vm_free()
169 kfree(vm); in msm_gem_vm_free()
179 struct msm_gem_vm *vm = to_msm_vm(gpuvm); in msm_gem_vm_unusable() local
180 uint32_t vm_log_len = (1 << vm->log_shift); in msm_gem_vm_unusable()
185 vm->unusable = true; in msm_gem_vm_unusable()
188 if (!vm->log || !vm->log[0].op) in msm_gem_vm_unusable()
191 mutex_lock(&vm->mmu_lock); in msm_gem_vm_unusable()
198 first = vm->log_idx; in msm_gem_vm_unusable()
200 if (!vm->log[first].op) { in msm_gem_vm_unusable()
215 struct msm_gem_vm_log_entry *e = &vm->log[idx]; in msm_gem_vm_unusable()
221 mutex_unlock(&vm->mmu_lock); in msm_gem_vm_unusable()
225 vm_log(struct msm_gem_vm *vm, const char *op, uint64_t iova, uint64_t range, int queue_id) in vm_log() argument
229 if (!vm->managed) in vm_log()
230 lockdep_assert_held(&vm->mmu_lock); in vm_log()
232 vm_dbg("%s:%p:%d: %016llx %016llx", op, vm, queue_id, iova, iova + range); in vm_log()
234 if (!vm->log) in vm_log()
237 idx = vm->log_idx; in vm_log()
238 vm->log[idx].op = op; in vm_log()
239 vm->log[idx].iova = iova; in vm_log()
240 vm->log[idx].range = range; in vm_log()
241 vm->log[idx].queue_id = queue_id; in vm_log()
242 vm->log_idx = (vm->log_idx + 1) & ((1 << vm->log_shift) - 1); in vm_log()
246 vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op) in vm_unmap_op() argument
253 vm_log(vm, reason, op->iova, op->range, op->queue_id); in vm_unmap_op()
255 vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range); in vm_unmap_op()
259 vm_map_op(struct msm_gem_vm *vm, const struct msm_vm_map_op *op) in vm_map_op() argument
261 vm_log(vm, "map", op->iova, op->range, op->queue_id); in vm_map_op()
263 return vm->mmu->funcs->map(vm->mmu, op->iova, op->sgt, op->offset, in vm_map_op()
270 struct msm_gem_vm *vm = to_msm_vm(vma->vm); in msm_gem_vma_unmap() local
282 if (!vm->managed) in msm_gem_vma_unmap()
283 mutex_lock(&vm->mmu_lock); in msm_gem_vma_unmap()
285 vm_unmap_op(vm, &(struct msm_vm_unmap_op){ in msm_gem_vma_unmap()
291 if (!vm->managed) in msm_gem_vma_unmap()
292 mutex_unlock(&vm->mmu_lock); in msm_gem_vma_unmap()
301 struct msm_gem_vm *vm = to_msm_vm(vma->vm); in msm_gem_vma_map() local
318 if (!vm->managed) in msm_gem_vma_map()
319 mutex_lock(&vm->mmu_lock); in msm_gem_vma_map()
330 ret = vm_map_op(vm, &(struct msm_vm_map_op){ in msm_gem_vma_map()
338 if (!vm->managed) in msm_gem_vma_map()
339 mutex_unlock(&vm->mmu_lock); in msm_gem_vma_map()
350 struct msm_gem_vm *vm = to_msm_vm(vma->vm); in msm_gem_vma_close() local
355 drm_gpuvm_resv_assert_held(&vm->base); in msm_gem_vma_close()
360 if (vma->va.addr && vm->managed) in msm_gem_vma_close()
374 struct msm_gem_vm *vm = to_msm_vm(gpuvm); in msm_gem_vma_new() local
379 drm_gpuvm_resv_assert_held(&vm->base); in msm_gem_vma_new()
385 if (vm->managed) { in msm_gem_vma_new()
388 ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node, in msm_gem_vma_new()
405 ret = drm_gpuva_insert(&vm->base, &vma->base); in msm_gem_vma_new()
412 vm_bo = drm_gpuvm_bo_obtain(&vm->base, obj); in msm_gem_vma_new()
427 if (vm->managed) in msm_gem_vma_new()
473 return msm_gem_vma_new(arg->job->vm, op->gem.obj, op->gem.offset, in vma_from_op()
490 vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj, in msm_gem_vm_sm_step_map()
525 struct drm_gpuvm *vm = job->vm; in msm_gem_vm_sm_step_remap() local
532 vm_dbg("orig_vma: %p:%p:%p: %016llx %016llx", vm, orig_vma, in msm_gem_vm_sm_step_remap()
580 vm_dbg("prev_vma: %p:%p: %016llx %016llx", vm, prev_vma, prev_vma->va.addr, prev_vma->va.range); in msm_gem_vm_sm_step_remap()
590 vm_dbg("next_vma: %p:%p: %016llx %016llx", vm, next_vma, next_vma->va.addr, next_vma->va.range); in msm_gem_vm_sm_step_remap()
611 vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj, in msm_gem_vm_sm_step_unmap()
647 struct msm_gem_vm *vm = to_msm_vm(job->vm); in msm_vma_job_run() local
649 int ret = vm->unusable ? -EINVAL : 0; in msm_vma_job_run()
653 mutex_lock(&vm->mmu_lock); in msm_vma_job_run()
654 vm->mmu->prealloc = &job->prealloc; in msm_vma_job_run()
668 ret = vm_map_op(vm, &op->map); in msm_vma_job_run()
671 vm_unmap_op(vm, &op->unmap); in msm_vma_job_run()
679 vm->mmu->prealloc = NULL; in msm_vma_job_run()
680 mutex_unlock(&vm->mmu_lock); in msm_vma_job_run()
687 msm_gem_vm_unusable(job->vm); in msm_vma_job_run()
703 struct msm_gem_vm *vm = to_msm_vm(job->vm); in msm_vma_job_free() local
706 vm->mmu->funcs->prealloc_cleanup(vm->mmu, &job->prealloc); in msm_vma_job_free()
708 atomic_sub(job->prealloc.count, &vm->prealloc_throttle.in_flight); in msm_vma_job_free()
726 wake_up(&vm->prealloc_throttle.wait); in msm_vma_job_free()
760 struct msm_gem_vm *vm; in msm_gem_vm_create() local
767 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in msm_gem_vm_create()
768 if (!vm) in msm_gem_vm_create()
787 ret = drm_sched_init(&vm->sched, &args); in msm_gem_vm_create()
791 init_waitqueue_head(&vm->prealloc_throttle.wait); in msm_gem_vm_create()
794 drm_gpuvm_init(&vm->base, name, flags, drm, dummy_gem, in msm_gem_vm_create()
798 vm->mmu = mmu; in msm_gem_vm_create()
799 mutex_init(&vm->mmu_lock); in msm_gem_vm_create()
800 vm->managed = managed; in msm_gem_vm_create()
802 drm_mm_init(&vm->mm, va_start, va_size); in msm_gem_vm_create()
814 vm->log_shift = MIN(vm_log_shift, 8); in msm_gem_vm_create()
816 if (vm->log_shift) { in msm_gem_vm_create()
817 vm->log = kmalloc_array(1 << vm->log_shift, sizeof(vm->log[0]), in msm_gem_vm_create()
821 return &vm->base; in msm_gem_vm_create()
827 kfree(vm); in msm_gem_vm_create()
842 struct msm_gem_vm *vm = to_msm_vm(gpuvm); in msm_gem_vm_close() local
850 if (vm->managed) in msm_gem_vm_close()
853 if (vm->last_fence) in msm_gem_vm_close()
854 dma_fence_wait(vm->last_fence, false); in msm_gem_vm_close()
857 drm_sched_stop(&vm->sched, NULL); in msm_gem_vm_close()
858 drm_sched_fini(&vm->sched); in msm_gem_vm_close()
917 job->vm = msm_context_vm(dev, queue->ctx); in vm_bind_job_create()
937 struct drm_device *dev = job->vm->drm; in lookup_op()
960 if (!drm_gpuvm_range_valid(job->vm, op->iova, op->range)) in lookup_op()
994 struct drm_device *dev = job->vm->drm; in vm_bind_job_lookup_ops()
1068 struct msm_mmu *mmu = to_msm_vm(job->vm)->mmu; in prealloc_count()
1103 struct msm_gem_vm *vm = to_msm_vm(job->vm); in vm_bind_prealloc_count() local
1139 vm->prealloc_throttle.wait, in vm_bind_prealloc_count()
1140 atomic_read(&vm->prealloc_throttle.in_flight) <= 1024); in vm_bind_prealloc_count()
1144 atomic_add(job->prealloc.count, &vm->prealloc_throttle.in_flight); in vm_bind_prealloc_count()
1159 ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(job->vm)); in vm_bind_job_lock_objects()
1169 ret = drm_gpuvm_sm_unmap_exec_lock(job->vm, exec, in vm_bind_job_lock_objects()
1175 ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, in vm_bind_job_lock_objects()
1219 struct msm_drm_private *priv = job->vm->drm->dev_private; in vm_bind_job_pin_objects()
1262 struct msm_gem_vm *vm = to_msm_vm(job->vm); in vm_bind_job_prepare() local
1263 struct msm_mmu *mmu = vm->mmu; in vm_bind_job_prepare()
1278 ret = drm_gpuvm_sm_unmap(job->vm, &arg, op->iova, in vm_bind_job_prepare()
1286 ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova, in vm_bind_job_prepare()
1304 msm_gem_vm_unusable(job->vm); in vm_bind_job_prepare()
1355 if (to_msm_vm(ctx->vm)->unusable) in msm_ioctl_vm_bind()