Lines Matching refs:svms
53 #define dynamic_svm_range_dump(svms) \ argument
54 _dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms)
56 #define dynamic_svm_range_dump(svms) \ argument
57 do { if (0) svm_range_debug_dump(svms); } while (0)
94 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, in svm_range_unlink()
105 interval_tree_remove(&prange->it_node, &prange->svms->objects); in svm_range_unlink()
111 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, in svm_range_add_notifier_locked()
130 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, in svm_range_add_to_svms()
133 list_move_tail(&prange->list, &prange->svms->list); in svm_range_add_to_svms()
136 interval_tree_insert(&prange->it_node, &prange->svms->objects); in svm_range_add_to_svms()
142 prange->svms, prange, in svm_range_remove_notifier()
214 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_dma_map()
261 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_dma_unmap()
282 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); in svm_range_free()
285 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange, in svm_range_free()
312 svm_range_set_default_attributes(struct svm_range_list *svms, int32_t *location, in svm_range_set_default_attributes() argument
318 *granularity = svms->default_granularity; in svm_range_set_default_attributes()
324 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start, in svm_range_new() argument
335 p = container_of(svms, struct kfd_process, svms); in svm_range_new()
344 prange->svms = svms; in svm_range_new()
359 bitmap_copy(prange->bitmap_access, svms->bitmap_supported, in svm_range_new()
362 svm_range_set_default_attributes(svms, &prange->preferred_loc, in svm_range_new()
366 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last); in svm_range_new()
397 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, in svm_range_bo_release()
516 prange->svms, prange->start, prange->last); in svm_range_validate_svm_bo()
565 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_vram_node_new()
567 p->lead_thread->pid, prange->svms, in svm_range_vram_node_new()
684 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_get_node_by_id()
699 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_get_pdd_by_node()
753 !test_bit(gpuidx, p->svms.bitmap_supported)) { in svm_range_check_attr()
878 static void svm_range_debug_dump(struct svm_range_list *svms) in svm_range_debug_dump() argument
883 pr_debug("dump svms 0x%p list\n", svms); in svm_range_debug_dump()
886 list_for_each_entry(prange, &svms->list, list) { in svm_range_debug_dump()
893 pr_debug("dump svms 0x%p interval tree\n", svms); in svm_range_debug_dump()
895 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL); in svm_range_debug_dump()
1012 new->svms, new, new->start, start, last); in svm_range_split_nodes()
1053 new->svms, new->start, old->start, old->last, start, last); in svm_range_split_adjust()
1113 struct svm_range_list *svms; in svm_range_split() local
1116 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms, in svm_range_split()
1124 svms = prange->svms; in svm_range_split()
1126 *new = svm_range_new(svms, last + 1, old_last, false); in svm_range_split()
1128 *new = svm_range_new(svms, old_start, start - 1, false); in svm_range_split()
1344 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange); in svm_range_unmap_from_gpus()
1350 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_unmap_from_gpus()
1398 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms, in svm_range_map_to_gpu()
1420 prange->svms, last_start, prange->start + i, in svm_range_map_to_gpu()
1474 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_map_to_gpus()
1630 ctx->process = container_of(prange->svms, struct kfd_process, svms); in svm_range_validate_and_map()
1689 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_validate_and_map()
1717 WRITE_ONCE(p->svms.faulting_task, current); in svm_range_validate_and_map()
1721 WRITE_ONCE(p->svms.faulting_task, NULL); in svm_range_validate_and_map()
1791 svm_range_list_lock_and_flush_work(struct svm_range_list *svms, in svm_range_list_lock_and_flush_work() argument
1795 flush_work(&svms->deferred_list_work); in svm_range_list_lock_and_flush_work()
1798 if (list_empty(&svms->deferred_range_list)) in svm_range_list_lock_and_flush_work()
1809 struct svm_range_list *svms; in svm_range_restore_work() local
1817 svms = container_of(dwork, struct svm_range_list, restore_work); in svm_range_restore_work()
1818 evicted_ranges = atomic_read(&svms->evicted_ranges); in svm_range_restore_work()
1824 p = container_of(svms, struct kfd_process, svms); in svm_range_restore_work()
1830 pr_debug("svms 0x%p process mm gone\n", svms); in svm_range_restore_work()
1835 svm_range_list_lock_and_flush_work(svms, mm); in svm_range_restore_work()
1836 mutex_lock(&svms->lock); in svm_range_restore_work()
1838 evicted_ranges = atomic_read(&svms->evicted_ranges); in svm_range_restore_work()
1840 list_for_each_entry(prange, &svms->list, list) { in svm_range_restore_work()
1846 prange->svms, prange, prange->start, prange->last, in svm_range_restore_work()
1868 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) != in svm_range_restore_work()
1885 mutex_unlock(&svms->lock); in svm_range_restore_work()
1892 queue_delayed_work(system_freezable_wq, &svms->restore_work, in svm_range_restore_work()
1921 struct svm_range_list *svms = prange->svms; in svm_range_evict() local
1926 p = container_of(svms, struct kfd_process, svms); in svm_range_evict()
1929 svms, prange->start, prange->last, start, last); in svm_range_evict()
1955 evicted_ranges = atomic_inc_return(&svms->evicted_ranges); in svm_range_evict()
1960 prange->svms, prange->start, prange->last); in svm_range_evict()
1967 pr_debug("schedule to restore svm %p ranges\n", svms); in svm_range_evict()
1968 queue_delayed_work(system_freezable_wq, &svms->restore_work, in svm_range_evict()
1980 prange->svms, start, last); in svm_range_evict()
2002 new = svm_range_new(old->svms, old->start, old->last, false); in svm_range_clone()
2056 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last, in svm_range_split_new() argument
2069 prange = svm_range_new(svms, start, l, true); in svm_range_split_new()
2117 struct svm_range_list *svms = &p->svms; in svm_range_add() local
2124 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last); in svm_range_add()
2132 node = interval_tree_iter_first(&svms->objects, start, last); in svm_range_add()
2187 r = svm_range_split_new(svms, start, node->start - 1, in svm_range_add()
2200 r = svm_range_split_new(svms, start, last, in svm_range_add()
2231 prange->svms, prange, start, last, prange->start, in svm_range_update_notifier_and_interval_tree()
2235 interval_tree_remove(&prange->it_node, &prange->svms->objects); in svm_range_update_notifier_and_interval_tree()
2241 interval_tree_insert(&prange->it_node, &prange->svms->objects); in svm_range_update_notifier_and_interval_tree()
2246 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange, in svm_range_handle_list_op() argument
2252 svms, prange, prange->start, prange->last); in svm_range_handle_list_op()
2256 svms, prange, prange->start, prange->last); in svm_range_handle_list_op()
2263 svms, prange, prange->start, prange->last); in svm_range_handle_list_op()
2268 svms, prange, prange->start, prange->last); in svm_range_handle_list_op()
2273 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange, in svm_range_handle_list_op()
2279 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, in svm_range_handle_list_op()
2291 static void svm_range_drain_retry_fault(struct svm_range_list *svms) in svm_range_drain_retry_fault() argument
2297 p = container_of(svms, struct kfd_process, svms); in svm_range_drain_retry_fault()
2299 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) { in svm_range_drain_retry_fault()
2304 pr_debug("drain retry fault gpu %d svms %p\n", i, svms); in svm_range_drain_retry_fault()
2316 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms); in svm_range_drain_retry_fault()
2322 struct svm_range_list *svms; in svm_range_deferred_list_work() local
2326 svms = container_of(work, struct svm_range_list, deferred_list_work); in svm_range_deferred_list_work()
2327 pr_debug("enter svms 0x%p\n", svms); in svm_range_deferred_list_work()
2329 spin_lock(&svms->deferred_list_lock); in svm_range_deferred_list_work()
2330 while (!list_empty(&svms->deferred_range_list)) { in svm_range_deferred_list_work()
2331 prange = list_first_entry(&svms->deferred_range_list, in svm_range_deferred_list_work()
2333 spin_unlock(&svms->deferred_list_lock); in svm_range_deferred_list_work()
2350 spin_lock(&svms->deferred_list_lock); in svm_range_deferred_list_work()
2352 spin_unlock(&svms->deferred_list_lock); in svm_range_deferred_list_work()
2354 mutex_lock(&svms->lock); in svm_range_deferred_list_work()
2364 svm_range_handle_list_op(svms, pchild, mm); in svm_range_deferred_list_work()
2368 svm_range_handle_list_op(svms, prange, mm); in svm_range_deferred_list_work()
2369 mutex_unlock(&svms->lock); in svm_range_deferred_list_work()
2377 spin_lock(&svms->deferred_list_lock); in svm_range_deferred_list_work()
2379 spin_unlock(&svms->deferred_list_lock); in svm_range_deferred_list_work()
2380 pr_debug("exit svms 0x%p\n", svms); in svm_range_deferred_list_work()
2384 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, in svm_range_add_list_work() argument
2387 spin_lock(&svms->deferred_list_lock); in svm_range_add_list_work()
2403 &prange->svms->deferred_range_list); in svm_range_add_list_work()
2408 spin_unlock(&svms->deferred_list_lock); in svm_range_add_list_work()
2411 void schedule_deferred_list_work(struct svm_range_list *svms) in schedule_deferred_list_work() argument
2413 spin_lock(&svms->deferred_list_lock); in schedule_deferred_list_work()
2414 if (!list_empty(&svms->deferred_range_list)) in schedule_deferred_list_work()
2415 schedule_work(&svms->deferred_list_work); in schedule_deferred_list_work()
2416 spin_unlock(&svms->deferred_list_lock); in schedule_deferred_list_work()
2457 struct svm_range_list *svms; in svm_range_unmap_from_cpu() local
2477 svms = &p->svms; in svm_range_unmap_from_cpu()
2479 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms, in svm_range_unmap_from_cpu()
2485 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) { in svm_range_unmap_from_cpu()
2502 svms->checkpoint_ts[i] = in svm_range_unmap_from_cpu()
2512 svms->checkpoint_ts[i] = amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1); in svm_range_unmap_from_cpu()
2533 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE); in svm_range_unmap_from_cpu()
2535 svm_range_add_list_work(svms, prange, mm, in svm_range_unmap_from_cpu()
2537 schedule_deferred_list_work(svms); in svm_range_unmap_from_cpu()
2614 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr, in svm_range_from_addr() argument
2621 node = interval_tree_iter_first(&svms->objects, addr, addr); in svm_range_from_addr()
2677 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_best_restore_location()
2734 (unsigned long)ALIGN_DOWN(addr, 1UL << p->svms.default_granularity)); in svm_range_get_range_boundaries()
2736 (unsigned long)ALIGN(addr + 1, 1UL << p->svms.default_granularity)); in svm_range_get_range_boundaries()
2739 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX); in svm_range_get_range_boundaries()
2748 rb_node = rb_last(&p->svms.objects.rb_root); in svm_range_get_range_boundaries()
2851 prange = svm_range_new(&p->svms, start, last, true); in svm_range_create_unregistered_range()
2885 struct svm_range_list *svms = prange->svms; in svm_range_skip_recover() local
2887 spin_lock(&svms->deferred_list_lock); in svm_range_skip_recover()
2890 spin_unlock(&svms->deferred_list_lock); in svm_range_skip_recover()
2893 spin_unlock(&svms->deferred_list_lock); in svm_range_skip_recover()
2897 svms, prange, prange->start, prange->last); in svm_range_skip_recover()
2903 svms, prange, prange->start, prange->last); in svm_range_skip_recover()
2956 struct svm_range_list *svms; in svm_range_restore_pages() local
2978 svms = &p->svms; in svm_range_restore_pages()
2980 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr); in svm_range_restore_pages()
2982 if (atomic_read(&svms->drain_pagefaults)) { in svm_range_restore_pages()
3013 pr_debug("svms 0x%p failed to get mm\n", svms); in svm_range_restore_pages()
3020 mutex_lock(&svms->lock); in svm_range_restore_pages()
3023 if (svms->checkpoint_ts[gpuidx] != 0) { in svm_range_restore_pages()
3024 if (amdgpu_ih_ts_after_or_equal(ts, svms->checkpoint_ts[gpuidx])) { in svm_range_restore_pages()
3032 svms->checkpoint_ts[gpuidx] = 0; in svm_range_restore_pages()
3036 prange = svm_range_from_addr(svms, addr, NULL); in svm_range_restore_pages()
3039 svms, addr); in svm_range_restore_pages()
3045 mutex_unlock(&svms->lock); in svm_range_restore_pages()
3054 svms, addr); in svm_range_restore_pages()
3075 svms, prange->start, prange->last); in svm_range_restore_pages()
3100 svms, prange->start, prange->last); in svm_range_restore_pages()
3106 svms, prange->start, prange->last, best_loc, in svm_range_restore_pages()
3138 r, svms, start, last); in svm_range_restore_pages()
3149 r, svms, start, last); in svm_range_restore_pages()
3158 mutex_unlock(&svms->lock); in svm_range_restore_pages()
3186 mutex_lock(&p->svms.lock); in svm_range_switch_xnack_reserve_mem()
3188 list_for_each_entry(prange, &p->svms.list, list) { in svm_range_switch_xnack_reserve_mem()
3230 mutex_unlock(&p->svms.lock); in svm_range_switch_xnack_reserve_mem()
3240 &p->svms); in svm_range_list_fini()
3242 cancel_delayed_work_sync(&p->svms.restore_work); in svm_range_list_fini()
3245 flush_work(&p->svms.deferred_list_work); in svm_range_list_fini()
3252 atomic_set(&p->svms.drain_pagefaults, 1); in svm_range_list_fini()
3253 svm_range_drain_retry_fault(&p->svms); in svm_range_list_fini()
3255 list_for_each_entry_safe(prange, next, &p->svms.list, list) { in svm_range_list_fini()
3261 mutex_destroy(&p->svms.lock); in svm_range_list_fini()
3264 p->lead_thread->pid, &p->svms); in svm_range_list_fini()
3269 struct svm_range_list *svms = &p->svms; in svm_range_list_init() local
3272 svms->objects = RB_ROOT_CACHED; in svm_range_list_init()
3273 mutex_init(&svms->lock); in svm_range_list_init()
3274 INIT_LIST_HEAD(&svms->list); in svm_range_list_init()
3275 atomic_set(&svms->evicted_ranges, 0); in svm_range_list_init()
3276 atomic_set(&svms->drain_pagefaults, 0); in svm_range_list_init()
3277 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work); in svm_range_list_init()
3278 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work); in svm_range_list_init()
3279 INIT_LIST_HEAD(&svms->deferred_range_list); in svm_range_list_init()
3280 INIT_LIST_HEAD(&svms->criu_svm_metadata_list); in svm_range_list_init()
3281 spin_lock_init(&svms->deferred_list_lock); in svm_range_list_init()
3285 bitmap_set(svms->bitmap_supported, i, 1); in svm_range_list_init()
3290 svms->default_granularity = min_t(u8, amdgpu_svm_default_granularity, 0x1B); in svm_range_list_init()
3291 pr_debug("Default SVM Granularity to use: %d\n", svms->default_granularity); in svm_range_list_init()
3425 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_best_prefetch_location()
3466 p->xnack_enabled, &p->svms, prange->start, prange->last, in svm_range_best_prefetch_location()
3572 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, in svm_range_evict_svm_bo_worker()
3620 struct svm_range_list *svms; in svm_range_set_attr() local
3628 p->lead_thread->pid, &p->svms, start, start + size - 1, size); in svm_range_set_attr()
3634 svms = &p->svms; in svm_range_set_attr()
3638 svm_range_list_lock_and_flush_work(svms, mm); in svm_range_set_attr()
3647 mutex_lock(&svms->lock); in svm_range_set_attr()
3653 mutex_unlock(&svms->lock); in svm_range_set_attr()
3668 prange->svms, prange, prange->start, in svm_range_set_attr()
3729 dynamic_svm_range_dump(svms); in svm_range_set_attr()
3731 mutex_unlock(&svms->lock); in svm_range_set_attr()
3737 p->lead_thread->pid, &p->svms, start, start + size - 1, r); in svm_range_set_attr()
3757 struct svm_range_list *svms; in svm_range_get_attr() local
3767 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start, in svm_range_get_attr()
3776 flush_work(&p->svms.deferred_list_work); in svm_range_get_attr()
3813 svms = &p->svms; in svm_range_get_attr()
3815 mutex_lock(&svms->lock); in svm_range_get_attr()
3817 node = interval_tree_iter_first(&svms->objects, start, last); in svm_range_get_attr()
3820 svm_range_set_default_attributes(svms, &location, &prefetch_loc, in svm_range_get_attr()
3824 bitmap_copy(bitmap_access, svms->bitmap_supported, in svm_range_get_attr()
3831 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE); in svm_range_get_attr()
3832 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE); in svm_range_get_attr()
3879 mutex_unlock(&svms->lock); in svm_range_get_attr()
3924 struct svm_range_list *svms = &p->svms; in kfd_criu_resume_svm() local
3931 if (list_empty(&svms->criu_svm_metadata_list)) { in kfd_criu_resume_svm()
3945 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) { in kfd_criu_resume_svm()
4011 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) { in kfd_criu_resume_svm()
4030 struct svm_range_list *svms = &p->svms; in kfd_criu_restore_svm() local
4065 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list); in kfd_criu_restore_svm()
4081 struct svm_range_list *svms; in svm_range_get_info() local
4087 svms = &p->svms; in svm_range_get_info()
4089 mutex_lock(&svms->lock); in svm_range_get_info()
4090 list_for_each_entry(prange, &svms->list, list) { in svm_range_get_info()
4096 mutex_unlock(&svms->lock); in svm_range_get_info()
4139 struct svm_range_list *svms; in kfd_criu_checkpoint_svm() local
4144 svms = &p->svms; in kfd_criu_checkpoint_svm()
4183 list_for_each_entry(prange, &svms->list, list) { in kfd_criu_checkpoint_svm()