Lines Matching refs:vm
121 struct amdgpu_vm *vm; member
140 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_set_pasid() argument
145 if (vm->pasid == pasid) in amdgpu_vm_set_pasid()
148 if (vm->pasid) { in amdgpu_vm_set_pasid()
149 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); in amdgpu_vm_set_pasid()
153 vm->pasid = 0; in amdgpu_vm_set_pasid()
157 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, in amdgpu_vm_set_pasid()
162 vm->pasid = pasid; in amdgpu_vm_set_pasid()
179 struct amdgpu_vm *vm = vm_bo->vm; in amdgpu_vm_bo_evicted() local
183 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
185 list_move(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
187 list_move_tail(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
188 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
200 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
201 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_moved()
202 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
215 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
216 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); in amdgpu_vm_bo_idle()
217 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
231 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
232 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); in amdgpu_vm_bo_invalidated()
233 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
247 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted_user()
248 list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user); in amdgpu_vm_bo_evicted_user()
249 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted_user()
263 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
264 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_relocated()
265 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
281 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
282 list_move(&vm_bo->vm_status, &vm_bo->vm->done); in amdgpu_vm_bo_done()
283 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
293 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm) in amdgpu_vm_bo_reset_state_machine() argument
297 spin_lock(&vm->status_lock); in amdgpu_vm_bo_reset_state_machine()
298 list_splice_init(&vm->done, &vm->invalidated); in amdgpu_vm_bo_reset_state_machine()
299 list_for_each_entry(vm_bo, &vm->invalidated, vm_status) in amdgpu_vm_bo_reset_state_machine()
301 list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) { in amdgpu_vm_bo_reset_state_machine()
306 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_reset_state_machine()
308 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_reset_state_machine()
310 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_reset_state_machine()
324 struct amdgpu_vm *vm, struct amdgpu_bo *bo) in amdgpu_vm_bo_base_init() argument
326 base->vm = vm; in amdgpu_vm_bo_base_init()
336 if (!amdgpu_vm_is_bo_always_valid(vm, bo)) in amdgpu_vm_bo_base_init()
339 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_base_init()
341 ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move); in amdgpu_vm_bo_base_init()
368 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, in amdgpu_vm_lock_pd() argument
372 return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base, in amdgpu_vm_lock_pd()
386 struct amdgpu_vm *vm) in amdgpu_vm_move_to_lru_tail() argument
389 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
395 struct amdgpu_vm *vm) in amdgpu_vm_init_entities() argument
399 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init_entities()
405 return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init_entities()
410 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_init_entities()
415 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm) in amdgpu_vm_fini_entities() argument
417 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_fini_entities()
418 drm_sched_entity_destroy(&vm->delayed); in amdgpu_vm_fini_entities()
430 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_generation() argument
434 if (!vm) in amdgpu_vm_generation()
437 result += lower_32_bits(vm->generation); in amdgpu_vm_generation()
439 if (drm_sched_entity_error(&vm->delayed)) in amdgpu_vm_generation()
461 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_validate() argument
466 uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm); in amdgpu_vm_validate()
471 if (vm->generation != new_vm_generation) { in amdgpu_vm_validate()
472 vm->generation = new_vm_generation; in amdgpu_vm_validate()
473 amdgpu_vm_bo_reset_state_machine(vm); in amdgpu_vm_validate()
474 amdgpu_vm_fini_entities(vm); in amdgpu_vm_validate()
475 r = amdgpu_vm_init_entities(adev, vm); in amdgpu_vm_validate()
480 spin_lock(&vm->status_lock); in amdgpu_vm_validate()
481 while (!list_empty(&vm->evicted)) { in amdgpu_vm_validate()
482 bo_base = list_first_entry(&vm->evicted, in amdgpu_vm_validate()
485 spin_unlock(&vm->status_lock); in amdgpu_vm_validate()
496 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); in amdgpu_vm_validate()
499 spin_lock(&vm->status_lock); in amdgpu_vm_validate()
501 while (ticket && !list_empty(&vm->evicted_user)) { in amdgpu_vm_validate()
502 bo_base = list_first_entry(&vm->evicted_user, in amdgpu_vm_validate()
505 spin_unlock(&vm->status_lock); in amdgpu_vm_validate()
510 struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm); in amdgpu_vm_validate()
527 spin_lock(&vm->status_lock); in amdgpu_vm_validate()
529 spin_unlock(&vm->status_lock); in amdgpu_vm_validate()
531 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_validate()
532 vm->evicting = false; in amdgpu_vm_validate()
533 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_validate()
548 bool amdgpu_vm_ready(struct amdgpu_vm *vm) in amdgpu_vm_ready() argument
553 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_ready()
554 ret = !vm->evicting; in amdgpu_vm_ready()
555 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_ready()
557 spin_lock(&vm->status_lock); in amdgpu_vm_ready()
558 empty = list_empty(&vm->evicted); in amdgpu_vm_ready()
559 spin_unlock(&vm->status_lock); in amdgpu_vm_ready()
763 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, in amdgpu_vm_bo_find() argument
769 if (base->vm != vm) in amdgpu_vm_bo_find()
817 struct amdgpu_vm *vm, bool immediate) in amdgpu_vm_update_pdes() argument
825 spin_lock(&vm->status_lock); in amdgpu_vm_update_pdes()
826 list_splice_init(&vm->relocated, &relocated); in amdgpu_vm_update_pdes()
827 spin_unlock(&vm->status_lock); in amdgpu_vm_update_pdes()
837 params.vm = vm; in amdgpu_vm_update_pdes()
840 r = vm->update_funcs->prepare(¶ms, NULL); in amdgpu_vm_update_pdes()
853 r = vm->update_funcs->commit(¶ms, &vm->last_update); in amdgpu_vm_update_pdes()
858 atomic64_inc(&vm->tlb_seq); in amdgpu_vm_update_pdes()
884 atomic64_inc(&tlb_cb->vm->tlb_seq); in amdgpu_vm_tlb_seq_cb()
902 struct amdgpu_vm *vm = params->vm; in amdgpu_vm_tlb_flush() local
904 tlb_cb->vm = vm; in amdgpu_vm_tlb_flush()
912 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_tlb_flush()
913 vm->last_tlb_flush = dma_fence_get(*fence); in amdgpu_vm_tlb_flush()
919 if (!params->unlocked && vm->is_compute_context) { in amdgpu_vm_tlb_flush()
920 amdgpu_vm_tlb_fence_create(params->adev, vm, fence); in amdgpu_vm_tlb_flush()
923 dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence, in amdgpu_vm_tlb_flush()
952 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_update_range() argument
987 params.vm = vm; in amdgpu_vm_update_range()
995 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_update_range()
996 if (vm->evicting) { in amdgpu_vm_update_range()
1001 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { in amdgpu_vm_update_range()
1004 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); in amdgpu_vm_update_range()
1005 swap(vm->last_unlocked, tmp); in amdgpu_vm_update_range()
1009 r = vm->update_funcs->prepare(¶ms, sync); in amdgpu_vm_update_range()
1067 r = vm->update_funcs->commit(¶ms, fence); in amdgpu_vm_update_range()
1080 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_update_range()
1088 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_get_memory() local
1098 if (!amdgpu_vm_is_bo_always_valid(vm, bo) && in amdgpu_vm_bo_get_memory()
1103 if (!amdgpu_vm_is_bo_always_valid(vm, bo)) in amdgpu_vm_bo_get_memory()
1107 void amdgpu_vm_get_memory(struct amdgpu_vm *vm, in amdgpu_vm_get_memory() argument
1112 spin_lock(&vm->status_lock); in amdgpu_vm_get_memory()
1113 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) in amdgpu_vm_get_memory()
1116 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) in amdgpu_vm_get_memory()
1119 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) in amdgpu_vm_get_memory()
1122 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) in amdgpu_vm_get_memory()
1125 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) in amdgpu_vm_get_memory()
1128 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) in amdgpu_vm_get_memory()
1130 spin_unlock(&vm->status_lock); in amdgpu_vm_get_memory()
1149 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_update() local
1168 r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv, in amdgpu_vm_bo_update()
1169 AMDGPU_SYNC_EQ_OWNER, vm); in amdgpu_vm_bo_update()
1197 AMDGPU_SYNC_EXPLICIT, vm); in amdgpu_vm_bo_update()
1219 if (clear || amdgpu_vm_is_bo_always_valid(vm, bo)) in amdgpu_vm_bo_update()
1220 last_update = &vm->last_update; in amdgpu_vm_bo_update()
1248 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, in amdgpu_vm_bo_update()
1261 if (amdgpu_vm_is_bo_always_valid(vm, bo)) { in amdgpu_vm_bo_update()
1382 struct amdgpu_vm *vm, in amdgpu_vm_free_mapping() argument
1399 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_prt_fini() argument
1401 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_prt_fini()
1428 struct amdgpu_vm *vm, in amdgpu_vm_clear_freed() argument
1442 r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv, in amdgpu_vm_clear_freed()
1443 AMDGPU_SYNC_EQ_OWNER, vm); in amdgpu_vm_clear_freed()
1447 while (!list_empty(&vm->freed)) { in amdgpu_vm_clear_freed()
1448 mapping = list_first_entry(&vm->freed, in amdgpu_vm_clear_freed()
1452 r = amdgpu_vm_update_range(adev, vm, false, false, true, false, in amdgpu_vm_clear_freed()
1455 amdgpu_vm_free_mapping(adev, vm, mapping, f); in amdgpu_vm_clear_freed()
1490 struct amdgpu_vm *vm, in amdgpu_vm_handle_moved() argument
1498 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1499 while (!list_empty(&vm->moved)) { in amdgpu_vm_handle_moved()
1500 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1502 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1508 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1511 while (!list_empty(&vm->invalidated)) { in amdgpu_vm_handle_moved()
1512 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1515 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1541 if (vm->is_compute_context && in amdgpu_vm_handle_moved()
1547 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1549 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1568 struct amdgpu_vm *vm, in amdgpu_vm_flush_compute_tlb() argument
1572 uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); in amdgpu_vm_flush_compute_tlb()
1576 WARN_ON_ONCE(!vm->is_compute_context); in amdgpu_vm_flush_compute_tlb()
1583 if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq) in amdgpu_vm_flush_compute_tlb()
1591 r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type, in amdgpu_vm_flush_compute_tlb()
1615 struct amdgpu_vm *vm, in amdgpu_vm_bo_add() argument
1624 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); in amdgpu_vm_bo_add()
1658 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_insert_map() local
1663 amdgpu_vm_it_insert(mapping, &vm->va); in amdgpu_vm_bo_insert_map()
1668 if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved) in amdgpu_vm_bo_insert_map()
1729 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_map() local
1740 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_map()
1800 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); in amdgpu_vm_bo_replace_map()
1838 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_unmap() local
1861 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_unmap()
1866 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_unmap()
1868 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_unmap()
1888 struct amdgpu_vm *vm, in amdgpu_vm_bo_clear_mappings() argument
1917 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_clear_mappings()
1948 amdgpu_vm_it_remove(tmp, &vm->va); in amdgpu_vm_bo_clear_mappings()
1957 list_add(&tmp->list, &vm->freed); in amdgpu_vm_bo_clear_mappings()
1965 amdgpu_vm_it_insert(before, &vm->va); in amdgpu_vm_bo_clear_mappings()
1969 if (amdgpu_vm_is_bo_always_valid(vm, bo) && in amdgpu_vm_bo_clear_mappings()
1980 amdgpu_vm_it_insert(after, &vm->va); in amdgpu_vm_bo_clear_mappings()
1984 if (amdgpu_vm_is_bo_always_valid(vm, bo) && in amdgpu_vm_bo_clear_mappings()
2006 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, in amdgpu_vm_bo_lookup_mapping() argument
2009 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); in amdgpu_vm_bo_lookup_mapping()
2020 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) in amdgpu_vm_bo_trace_cs() argument
2027 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; in amdgpu_vm_bo_trace_cs()
2057 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_del() local
2060 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_del()
2064 if (amdgpu_vm_is_bo_always_valid(vm, bo)) in amdgpu_vm_bo_del()
2077 spin_lock(&vm->status_lock); in amdgpu_vm_bo_del()
2079 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_del()
2083 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
2086 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_del()
2090 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
2091 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_del()
2115 if (!bo_base || !bo_base->vm) in amdgpu_vm_evictable()
2123 if (!amdgpu_vm_eviction_trylock(bo_base->vm)) in amdgpu_vm_evictable()
2127 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) { in amdgpu_vm_evictable()
2128 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
2132 bo_base->vm->evicting = true; in amdgpu_vm_evictable()
2133 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
2152 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_bo_invalidate() local
2154 if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) { in amdgpu_vm_bo_invalidate()
2165 else if (amdgpu_vm_is_bo_always_valid(vm, bo)) in amdgpu_vm_bo_invalidate()
2293 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) in amdgpu_vm_wait_idle() argument
2295 timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, in amdgpu_vm_wait_idle()
2301 return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); in amdgpu_vm_wait_idle()
2314 struct amdgpu_vm *vm; in amdgpu_vm_get_vm_from_pasid() local
2318 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_get_vm_from_pasid()
2321 return vm; in amdgpu_vm_get_vm_from_pasid()
2345 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm) in amdgpu_vm_get_task_info_vm() argument
2349 if (vm) { in amdgpu_vm_get_task_info_vm()
2350 ti = vm->task_info; in amdgpu_vm_get_task_info_vm()
2351 kref_get(&vm->task_info->refcount); in amdgpu_vm_get_task_info_vm()
2373 static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm) in amdgpu_vm_create_task_info() argument
2375 vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL); in amdgpu_vm_create_task_info()
2376 if (!vm->task_info) in amdgpu_vm_create_task_info()
2379 kref_init(&vm->task_info->refcount); in amdgpu_vm_create_task_info()
2388 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) in amdgpu_vm_set_task_info() argument
2390 if (!vm->task_info) in amdgpu_vm_set_task_info()
2393 if (vm->task_info->pid == current->pid) in amdgpu_vm_set_task_info()
2396 vm->task_info->pid = current->pid; in amdgpu_vm_set_task_info()
2397 get_task_comm(vm->task_info->task_name, current); in amdgpu_vm_set_task_info()
2402 vm->task_info->tgid = current->group_leader->pid; in amdgpu_vm_set_task_info()
2403 get_task_comm(vm->task_info->process_name, current->group_leader); in amdgpu_vm_set_task_info()
2418 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_init() argument
2425 vm->va = RB_ROOT_CACHED; in amdgpu_vm_init()
2427 vm->reserved_vmid[i] = NULL; in amdgpu_vm_init()
2428 INIT_LIST_HEAD(&vm->evicted); in amdgpu_vm_init()
2429 INIT_LIST_HEAD(&vm->evicted_user); in amdgpu_vm_init()
2430 INIT_LIST_HEAD(&vm->relocated); in amdgpu_vm_init()
2431 INIT_LIST_HEAD(&vm->moved); in amdgpu_vm_init()
2432 INIT_LIST_HEAD(&vm->idle); in amdgpu_vm_init()
2433 INIT_LIST_HEAD(&vm->invalidated); in amdgpu_vm_init()
2434 spin_lock_init(&vm->status_lock); in amdgpu_vm_init()
2435 INIT_LIST_HEAD(&vm->freed); in amdgpu_vm_init()
2436 INIT_LIST_HEAD(&vm->done); in amdgpu_vm_init()
2437 INIT_LIST_HEAD(&vm->pt_freed); in amdgpu_vm_init()
2438 INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work); in amdgpu_vm_init()
2439 INIT_KFIFO(vm->faults); in amdgpu_vm_init()
2441 r = amdgpu_vm_init_entities(adev, vm); in amdgpu_vm_init()
2445 ttm_lru_bulk_move_init(&vm->lru_bulk_move); in amdgpu_vm_init()
2447 vm->is_compute_context = false; in amdgpu_vm_init()
2449 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_init()
2453 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_init()
2454 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_init()
2458 if (vm->use_cpu_for_update) in amdgpu_vm_init()
2459 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_init()
2461 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_init()
2463 vm->last_update = dma_fence_get_stub(); in amdgpu_vm_init()
2464 vm->last_unlocked = dma_fence_get_stub(); in amdgpu_vm_init()
2465 vm->last_tlb_flush = dma_fence_get_stub(); in amdgpu_vm_init()
2466 vm->generation = amdgpu_vm_generation(adev, NULL); in amdgpu_vm_init()
2468 mutex_init(&vm->eviction_lock); in amdgpu_vm_init()
2469 vm->evicting = false; in amdgpu_vm_init()
2470 vm->tlb_fence_context = dma_fence_context_alloc(1); in amdgpu_vm_init()
2472 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, in amdgpu_vm_init()
2484 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); in amdgpu_vm_init()
2489 r = amdgpu_vm_pt_clear(adev, vm, root, false); in amdgpu_vm_init()
2493 r = amdgpu_vm_create_task_info(vm); in amdgpu_vm_init()
2497 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2503 amdgpu_vm_pt_free_root(adev, vm); in amdgpu_vm_init()
2504 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2508 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_init()
2509 dma_fence_put(vm->last_unlocked); in amdgpu_vm_init()
2510 ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move); in amdgpu_vm_init()
2511 amdgpu_vm_fini_entities(vm); in amdgpu_vm_init()
2535 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_make_compute() argument
2539 r = amdgpu_bo_reserve(vm->root.bo, true); in amdgpu_vm_make_compute()
2544 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_make_compute()
2547 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_make_compute()
2548 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_make_compute()
2552 if (vm->use_cpu_for_update) { in amdgpu_vm_make_compute()
2554 r = amdgpu_bo_sync_wait(vm->root.bo, in amdgpu_vm_make_compute()
2559 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_make_compute()
2560 r = amdgpu_vm_pt_map_tables(adev, vm); in amdgpu_vm_make_compute()
2565 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_make_compute()
2568 dma_fence_put(vm->last_update); in amdgpu_vm_make_compute()
2569 vm->last_update = dma_fence_get_stub(); in amdgpu_vm_make_compute()
2570 vm->is_compute_context = true; in amdgpu_vm_make_compute()
2573 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_make_compute()
2585 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_release_compute() argument
2587 amdgpu_vm_set_pasid(adev, vm, 0); in amdgpu_vm_release_compute()
2588 vm->is_compute_context = false; in amdgpu_vm_release_compute()
2600 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_fini() argument
2608 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); in amdgpu_vm_fini()
2610 flush_work(&vm->pt_free_work); in amdgpu_vm_fini()
2612 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_fini()
2614 amdgpu_vm_put_task_info(vm->task_info); in amdgpu_vm_fini()
2615 amdgpu_vm_set_pasid(adev, vm, 0); in amdgpu_vm_fini()
2616 dma_fence_wait(vm->last_unlocked, false); in amdgpu_vm_fini()
2617 dma_fence_put(vm->last_unlocked); in amdgpu_vm_fini()
2618 dma_fence_wait(vm->last_tlb_flush, false); in amdgpu_vm_fini()
2620 spin_lock_irqsave(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2621 spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2622 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_fini()
2624 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { in amdgpu_vm_fini()
2626 amdgpu_vm_prt_fini(adev, vm); in amdgpu_vm_fini()
2631 amdgpu_vm_free_mapping(adev, vm, mapping, NULL); in amdgpu_vm_fini()
2634 amdgpu_vm_pt_free_root(adev, vm); in amdgpu_vm_fini()
2637 WARN_ON(vm->root.bo); in amdgpu_vm_fini()
2639 amdgpu_vm_fini_entities(vm); in amdgpu_vm_fini()
2641 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { in amdgpu_vm_fini()
2645 &vm->va.rb_root, rb) { in amdgpu_vm_fini()
2653 dma_fence_put(vm->last_update); in amdgpu_vm_fini()
2656 if (vm->reserved_vmid[i]) { in amdgpu_vm_fini()
2658 vm->reserved_vmid[i] = false; in amdgpu_vm_fini()
2662 ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move); in amdgpu_vm_fini()
2753 if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { in amdgpu_vm_ioctl()
2755 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true; in amdgpu_vm_ioctl()
2760 if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { in amdgpu_vm_ioctl()
2762 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false; in amdgpu_vm_ioctl()
2794 struct amdgpu_vm *vm; in amdgpu_vm_handle_fault() local
2798 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2799 if (vm) { in amdgpu_vm_handle_fault()
2800 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_handle_fault()
2801 is_compute_context = vm->is_compute_context; in amdgpu_vm_handle_fault()
2824 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2825 if (vm && vm->root.bo != root) in amdgpu_vm_handle_fault()
2826 vm = NULL; in amdgpu_vm_handle_fault()
2828 if (!vm) in amdgpu_vm_handle_fault()
2857 r = amdgpu_vm_update_range(adev, vm, true, false, false, false, in amdgpu_vm_handle_fault()
2862 r = amdgpu_vm_update_pdes(adev, vm, true); in amdgpu_vm_handle_fault()
2884 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) in amdgpu_debugfs_vm_bo_info() argument
2901 spin_lock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()
2903 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2912 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2921 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2930 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2939 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2948 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
2953 spin_unlock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()
2987 struct amdgpu_vm *vm; in amdgpu_vm_update_fault_cache() local
2992 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_update_fault_cache()
2998 if (vm && status) { in amdgpu_vm_update_fault_cache()
2999 vm->fault_info.addr = addr; in amdgpu_vm_update_fault_cache()
3000 vm->fault_info.status = status; in amdgpu_vm_update_fault_cache()
3010 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX; in amdgpu_vm_update_fault_cache()
3011 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()
3014 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0; in amdgpu_vm_update_fault_cache()
3015 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()
3018 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1; in amdgpu_vm_update_fault_cache()
3019 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()
3037 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo) in amdgpu_vm_is_bo_always_valid() argument
3039 return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv; in amdgpu_vm_is_bo_always_valid()