Lines Matching refs:svms
84 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, in svm_range_unlink()
95 interval_tree_remove(&prange->it_node, &prange->svms->objects); in svm_range_unlink()
101 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, in svm_range_add_notifier_locked()
120 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, in svm_range_add_to_svms()
123 list_move_tail(&prange->list, &prange->svms->list); in svm_range_add_to_svms()
126 interval_tree_insert(&prange->it_node, &prange->svms->objects); in svm_range_add_to_svms()
132 prange->svms, prange, in svm_range_remove_notifier()
204 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_dma_map()
251 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_free_dma_mappings()
273 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); in svm_range_free()
275 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange, in svm_range_free()
303 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start, in svm_range_new() argument
314 p = container_of(svms, struct kfd_process, svms); in svm_range_new()
323 prange->svms = svms; in svm_range_new()
337 bitmap_copy(prange->bitmap_access, svms->bitmap_supported, in svm_range_new()
344 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last); in svm_range_new()
375 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, in svm_range_bo_release()
479 prange->svms, prange->start, prange->last); in svm_range_validate_svm_bo()
527 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_vram_node_new()
528 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms, in svm_range_vram_node_new()
627 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_get_adev_by_id()
650 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_get_pdd_by_adev()
710 !test_bit(gpuidx, p->svms.bitmap_supported)) { in svm_range_check_attr()
833 static void svm_range_debug_dump(struct svm_range_list *svms) in svm_range_debug_dump() argument
838 pr_debug("dump svms 0x%p list\n", svms); in svm_range_debug_dump()
841 list_for_each_entry(prange, &svms->list, list) { in svm_range_debug_dump()
848 pr_debug("dump svms 0x%p interval tree\n", svms); in svm_range_debug_dump()
850 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL); in svm_range_debug_dump()
923 new->svms, new, new->start, start, last); in svm_range_split_nodes()
964 new->svms, new->start, old->start, old->last, start, last); in svm_range_split_adjust()
1023 struct svm_range_list *svms; in svm_range_split() local
1026 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms, in svm_range_split()
1034 svms = prange->svms; in svm_range_split()
1036 *new = svm_range_new(svms, last + 1, old_last, false); in svm_range_split()
1038 *new = svm_range_new(svms, old_start, start - 1, false); in svm_range_split()
1124 prange->svms, prange->start, prange->last, start, last, size); in svm_range_split_by_granularity()
1251 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange); in svm_range_unmap_from_gpus()
1257 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_unmap_from_gpus()
1305 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms, in svm_range_map_to_gpu()
1327 prange->svms, last_start, prange->start + i, in svm_range_map_to_gpu()
1379 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_map_to_gpus()
1532 ctx.process = container_of(prange->svms, struct kfd_process, svms); in svm_range_validate_and_map()
1579 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_validate_and_map()
1608 WRITE_ONCE(p->svms.faulting_task, current); in svm_range_validate_and_map()
1612 WRITE_ONCE(p->svms.faulting_task, NULL); in svm_range_validate_and_map()
1671 svm_range_list_lock_and_flush_work(struct svm_range_list *svms, in svm_range_list_lock_and_flush_work() argument
1675 flush_work(&svms->deferred_list_work); in svm_range_list_lock_and_flush_work()
1678 if (list_empty(&svms->deferred_range_list)) in svm_range_list_lock_and_flush_work()
1689 struct svm_range_list *svms; in svm_range_restore_work() local
1697 svms = container_of(dwork, struct svm_range_list, restore_work); in svm_range_restore_work()
1698 evicted_ranges = atomic_read(&svms->evicted_ranges); in svm_range_restore_work()
1704 p = container_of(svms, struct kfd_process, svms); in svm_range_restore_work()
1710 pr_debug("svms 0x%p process mm gone\n", svms); in svm_range_restore_work()
1715 svm_range_list_lock_and_flush_work(svms, mm); in svm_range_restore_work()
1716 mutex_lock(&svms->lock); in svm_range_restore_work()
1718 evicted_ranges = atomic_read(&svms->evicted_ranges); in svm_range_restore_work()
1720 list_for_each_entry(prange, &svms->list, list) { in svm_range_restore_work()
1726 prange->svms, prange, prange->start, prange->last, in svm_range_restore_work()
1748 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) != in svm_range_restore_work()
1765 mutex_unlock(&svms->lock); in svm_range_restore_work()
1772 schedule_delayed_work(&svms->restore_work, in svm_range_restore_work()
1800 struct svm_range_list *svms = prange->svms; in svm_range_evict() local
1805 p = container_of(svms, struct kfd_process, svms); in svm_range_evict()
1808 svms, prange->start, prange->last, start, last); in svm_range_evict()
1834 evicted_ranges = atomic_inc_return(&svms->evicted_ranges); in svm_range_evict()
1839 prange->svms, prange->start, prange->last); in svm_range_evict()
1846 pr_debug("schedule to restore svm %p ranges\n", svms); in svm_range_evict()
1847 schedule_delayed_work(&svms->restore_work, in svm_range_evict()
1859 prange->svms, start, last); in svm_range_evict()
1881 new = svm_range_new(old->svms, old->start, old->last, false); in svm_range_clone()
1921 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last, in svm_range_split_new() argument
1934 prange = svm_range_new(svms, start, l, true); in svm_range_split_new()
1981 struct svm_range_list *svms = &p->svms; in svm_range_add() local
1988 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last); in svm_range_add()
1995 node = interval_tree_iter_first(&svms->objects, start, last); in svm_range_add()
2049 r = svm_range_split_new(svms, start, node->start - 1, in svm_range_add()
2062 r = svm_range_split_new(svms, start, last, in svm_range_add()
2093 prange->svms, prange, start, last, prange->start, in svm_range_update_notifier_and_interval_tree()
2097 interval_tree_remove(&prange->it_node, &prange->svms->objects); in svm_range_update_notifier_and_interval_tree()
2103 interval_tree_insert(&prange->it_node, &prange->svms->objects); in svm_range_update_notifier_and_interval_tree()
2108 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange, in svm_range_handle_list_op() argument
2114 svms, prange, prange->start, prange->last); in svm_range_handle_list_op()
2118 svms, prange, prange->start, prange->last); in svm_range_handle_list_op()
2125 svms, prange, prange->start, prange->last); in svm_range_handle_list_op()
2130 svms, prange, prange->start, prange->last); in svm_range_handle_list_op()
2135 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange, in svm_range_handle_list_op()
2141 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, in svm_range_handle_list_op()
2153 static void svm_range_drain_retry_fault(struct svm_range_list *svms) in svm_range_drain_retry_fault() argument
2160 p = container_of(svms, struct kfd_process, svms); in svm_range_drain_retry_fault()
2163 drain = atomic_read(&svms->drain_pagefaults); in svm_range_drain_retry_fault()
2167 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) { in svm_range_drain_retry_fault()
2172 pr_debug("drain retry fault gpu %d svms %p\n", i, svms); in svm_range_drain_retry_fault()
2176 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms); in svm_range_drain_retry_fault()
2178 if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain) in svm_range_drain_retry_fault()
2184 struct svm_range_list *svms; in svm_range_deferred_list_work() local
2188 svms = container_of(work, struct svm_range_list, deferred_list_work); in svm_range_deferred_list_work()
2189 pr_debug("enter svms 0x%p\n", svms); in svm_range_deferred_list_work()
2191 spin_lock(&svms->deferred_list_lock); in svm_range_deferred_list_work()
2192 while (!list_empty(&svms->deferred_range_list)) { in svm_range_deferred_list_work()
2193 prange = list_first_entry(&svms->deferred_range_list, in svm_range_deferred_list_work()
2195 spin_unlock(&svms->deferred_list_lock); in svm_range_deferred_list_work()
2207 if (unlikely(atomic_read(&svms->drain_pagefaults))) { in svm_range_deferred_list_work()
2209 svm_range_drain_retry_fault(svms); in svm_range_deferred_list_work()
2221 spin_lock(&svms->deferred_list_lock); in svm_range_deferred_list_work()
2223 spin_unlock(&svms->deferred_list_lock); in svm_range_deferred_list_work()
2225 mutex_lock(&svms->lock); in svm_range_deferred_list_work()
2235 svm_range_handle_list_op(svms, pchild, mm); in svm_range_deferred_list_work()
2239 svm_range_handle_list_op(svms, prange, mm); in svm_range_deferred_list_work()
2240 mutex_unlock(&svms->lock); in svm_range_deferred_list_work()
2246 spin_lock(&svms->deferred_list_lock); in svm_range_deferred_list_work()
2248 spin_unlock(&svms->deferred_list_lock); in svm_range_deferred_list_work()
2249 pr_debug("exit svms 0x%p\n", svms); in svm_range_deferred_list_work()
2253 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, in svm_range_add_list_work() argument
2256 spin_lock(&svms->deferred_list_lock); in svm_range_add_list_work()
2271 &prange->svms->deferred_range_list); in svm_range_add_list_work()
2275 spin_unlock(&svms->deferred_list_lock); in svm_range_add_list_work()
2278 void schedule_deferred_list_work(struct svm_range_list *svms) in schedule_deferred_list_work() argument
2280 spin_lock(&svms->deferred_list_lock); in schedule_deferred_list_work()
2281 if (!list_empty(&svms->deferred_range_list)) in schedule_deferred_list_work()
2282 schedule_work(&svms->deferred_list_work); in schedule_deferred_list_work()
2283 spin_unlock(&svms->deferred_list_lock); in schedule_deferred_list_work()
2325 struct svm_range_list *svms; in svm_range_unmap_from_cpu() local
2334 svms = &p->svms; in svm_range_unmap_from_cpu()
2336 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms, in svm_range_unmap_from_cpu()
2343 atomic_inc(&svms->drain_pagefaults); in svm_range_unmap_from_cpu()
2363 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE); in svm_range_unmap_from_cpu()
2365 svm_range_add_list_work(svms, prange, mm, in svm_range_unmap_from_cpu()
2367 schedule_deferred_list_work(svms); in svm_range_unmap_from_cpu()
2447 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr, in svm_range_from_addr() argument
2454 node = interval_tree_iter_first(&svms->objects, addr, addr); in svm_range_from_addr()
2510 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_best_restore_location()
2571 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX); in svm_range_get_range_boundaries()
2581 node = container_of(rb_last(&p->svms.objects.rb_root), in svm_range_get_range_boundaries()
2684 prange = svm_range_new(&p->svms, start, last, true); in svm_range_create_unregistered_range()
2718 struct svm_range_list *svms = prange->svms; in svm_range_skip_recover() local
2720 spin_lock(&svms->deferred_list_lock); in svm_range_skip_recover()
2723 spin_unlock(&svms->deferred_list_lock); in svm_range_skip_recover()
2726 spin_unlock(&svms->deferred_list_lock); in svm_range_skip_recover()
2730 svms, prange, prange->start, prange->last); in svm_range_skip_recover()
2736 svms, prange, prange->start, prange->last); in svm_range_skip_recover()
2787 struct svm_range_list *svms; in svm_range_restore_pages() local
2808 svms = &p->svms; in svm_range_restore_pages()
2810 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr); in svm_range_restore_pages()
2812 if (atomic_read(&svms->drain_pagefaults)) { in svm_range_restore_pages()
2829 pr_debug("svms 0x%p failed to get mm\n", svms); in svm_range_restore_pages()
2836 mutex_lock(&svms->lock); in svm_range_restore_pages()
2837 prange = svm_range_from_addr(svms, addr, NULL); in svm_range_restore_pages()
2840 svms, addr); in svm_range_restore_pages()
2846 mutex_unlock(&svms->lock); in svm_range_restore_pages()
2855 svms, addr); in svm_range_restore_pages()
2876 svms, prange->start, prange->last); in svm_range_restore_pages()
2901 svms, prange->start, prange->last); in svm_range_restore_pages()
2907 svms, prange->start, prange->last, best_loc, in svm_range_restore_pages()
2938 r, svms, prange->start, prange->last); in svm_range_restore_pages()
2946 r, svms, prange->start, prange->last); in svm_range_restore_pages()
2954 mutex_unlock(&svms->lock); in svm_range_restore_pages()
2981 mutex_lock(&p->svms.lock); in svm_range_switch_xnack_reserve_mem()
2983 list_for_each_entry(prange, &p->svms.list, list) { in svm_range_switch_xnack_reserve_mem()
3025 mutex_unlock(&p->svms.lock); in svm_range_switch_xnack_reserve_mem()
3034 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms); in svm_range_list_fini()
3036 cancel_delayed_work_sync(&p->svms.restore_work); in svm_range_list_fini()
3039 flush_work(&p->svms.deferred_list_work); in svm_range_list_fini()
3045 atomic_inc(&p->svms.drain_pagefaults); in svm_range_list_fini()
3046 svm_range_drain_retry_fault(&p->svms); in svm_range_list_fini()
3048 list_for_each_entry_safe(prange, next, &p->svms.list, list) { in svm_range_list_fini()
3054 mutex_destroy(&p->svms.lock); in svm_range_list_fini()
3056 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms); in svm_range_list_fini()
3061 struct svm_range_list *svms = &p->svms; in svm_range_list_init() local
3064 svms->objects = RB_ROOT_CACHED; in svm_range_list_init()
3065 mutex_init(&svms->lock); in svm_range_list_init()
3066 INIT_LIST_HEAD(&svms->list); in svm_range_list_init()
3067 atomic_set(&svms->evicted_ranges, 0); in svm_range_list_init()
3068 atomic_set(&svms->drain_pagefaults, 0); in svm_range_list_init()
3069 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work); in svm_range_list_init()
3070 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work); in svm_range_list_init()
3071 INIT_LIST_HEAD(&svms->deferred_range_list); in svm_range_list_init()
3072 INIT_LIST_HEAD(&svms->criu_svm_metadata_list); in svm_range_list_init()
3073 spin_lock_init(&svms->deferred_list_lock); in svm_range_list_init()
3077 bitmap_set(svms->bitmap_supported, i, 1); in svm_range_list_init()
3211 p = container_of(prange->svms, struct kfd_process, svms); in svm_range_best_prefetch_location()
3247 p->xnack_enabled, &p->svms, prange->start, prange->last, in svm_range_best_prefetch_location()
3348 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, in svm_range_evict_svm_bo_worker()
3391 struct svm_range_list *svms; in svm_range_set_attr() local
3399 p->pasid, &p->svms, start, start + size - 1, size); in svm_range_set_attr()
3405 svms = &p->svms; in svm_range_set_attr()
3409 svm_range_list_lock_and_flush_work(svms, mm); in svm_range_set_attr()
3418 mutex_lock(&svms->lock); in svm_range_set_attr()
3424 mutex_unlock(&svms->lock); in svm_range_set_attr()
3439 prange->svms, prange, prange->start, in svm_range_set_attr()
3487 svm_range_debug_dump(svms); in svm_range_set_attr()
3489 mutex_unlock(&svms->lock); in svm_range_set_attr()
3495 &p->svms, start, start + size - 1, r); in svm_range_set_attr()
3515 struct svm_range_list *svms; in svm_range_get_attr() local
3525 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start, in svm_range_get_attr()
3534 flush_work(&p->svms.deferred_list_work); in svm_range_get_attr()
3571 svms = &p->svms; in svm_range_get_attr()
3573 mutex_lock(&svms->lock); in svm_range_get_attr()
3575 node = interval_tree_iter_first(&svms->objects, start, last); in svm_range_get_attr()
3582 bitmap_copy(bitmap_access, svms->bitmap_supported, in svm_range_get_attr()
3589 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE); in svm_range_get_attr()
3590 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE); in svm_range_get_attr()
3637 mutex_unlock(&svms->lock); in svm_range_get_attr()
3682 struct svm_range_list *svms = &p->svms; in kfd_criu_resume_svm() local
3689 if (list_empty(&svms->criu_svm_metadata_list)) { in kfd_criu_resume_svm()
3703 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) { in kfd_criu_resume_svm()
3769 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) { in kfd_criu_resume_svm()
3788 struct svm_range_list *svms = &p->svms; in kfd_criu_restore_svm() local
3823 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list); in kfd_criu_restore_svm()
3839 struct svm_range_list *svms; in svm_range_get_info() local
3845 svms = &p->svms; in svm_range_get_info()
3846 if (!svms) in svm_range_get_info()
3849 mutex_lock(&svms->lock); in svm_range_get_info()
3850 list_for_each_entry(prange, &svms->list, list) { in svm_range_get_info()
3856 mutex_unlock(&svms->lock); in svm_range_get_info()
3900 struct svm_range_list *svms; in kfd_criu_checkpoint_svm() local
3905 svms = &p->svms; in kfd_criu_checkpoint_svm()
3906 if (!svms) in kfd_criu_checkpoint_svm()
3946 list_for_each_entry(prange, &svms->list, list) { in kfd_criu_checkpoint_svm()