Lines Matching refs:gpuva
385 struct drm_gpuva *gpuva; in xe_gpuvm_validate() local
389 drm_gpuvm_bo_for_each_va(gpuva, vm_bo) in xe_gpuvm_validate()
390 list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind, in xe_gpuvm_validate()
619 !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) { in vma_userptr_invalidate()
767 op->base.map.va.addr = vma->gpuva.va.addr; in xe_vm_populate_rebind()
768 op->base.map.va.range = vma->gpuva.va.range; in xe_vm_populate_rebind()
769 op->base.map.gem.obj = vma->gpuva.gem.obj; in xe_vm_populate_rebind()
770 op->base.map.gem.offset = vma->gpuva.gem.offset; in xe_vm_populate_rebind()
773 op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE; in xe_vm_populate_rebind()
941 vma->gpuva.flags |= DRM_GPUVA_SPARSE; in xe_vma_create()
943 vma->gpuva.gem.obj = &bo->ttm.base; in xe_vma_create()
948 INIT_LIST_HEAD(&vma->gpuva.gem.entry); in xe_vma_create()
949 vma->gpuva.vm = &vm->gpuvm; in xe_vma_create()
950 vma->gpuva.va.addr = start; in xe_vma_create()
951 vma->gpuva.va.range = end - start + 1; in xe_vma_create()
953 vma->gpuva.flags |= XE_VMA_READ_ONLY; in xe_vma_create()
955 vma->gpuva.flags |= XE_VMA_DUMPABLE; in xe_vma_create()
961 vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT; in xe_vma_create()
970 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base); in xe_vma_create()
978 vma->gpuva.gem.offset = bo_offset_or_userptr; in xe_vma_create()
979 drm_gpuva_link(&vma->gpuva, vm_bo); in xe_vma_create()
989 vma->gpuva.gem.offset = bo_offset_or_userptr; in xe_vma_create()
1066 xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED); in xe_vma_destroy()
1074 drm_gpuva_unlink(&vma->gpuva); in xe_vma_destroy()
1137 struct drm_gpuva *gpuva; in xe_vm_find_overlapping_vma() local
1146 gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range); in xe_vm_find_overlapping_vma()
1148 return gpuva ? gpuva_to_vma(gpuva) : NULL; in xe_vm_find_overlapping_vma()
1159 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva); in xe_vm_insert_vma()
1172 drm_gpuva_remove(&vma->gpuva); in xe_vm_remove_vma()
1538 struct drm_gpuva *gpuva, *next; in xe_vm_close_and_put() local
1564 drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) { in xe_vm_close_and_put()
1565 vma = gpuva_to_vma(gpuva); in xe_vm_close_and_put()
1569 vma->gpuva.flags |= XE_VMA_DESTROYED; in xe_vm_close_and_put()
1583 vma->gpuva.flags |= XE_VMA_DESTROYED; in xe_vm_close_and_put()
1846 vma->gpuva.flags |= XE_VMA_DESTROYED; in prep_vma_destroy()
2036 if (vma->gpuva.flags & XE_VMA_PTE_1G) in xe_vma_max_pte_size()
2038 else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT)) in xe_vma_max_pte_size()
2040 else if (vma->gpuva.flags & XE_VMA_PTE_64K) in xe_vma_max_pte_size()
2042 else if (vma->gpuva.flags & XE_VMA_PTE_4K) in xe_vma_max_pte_size()
2052 vma->gpuva.flags |= XE_VMA_PTE_1G; in xe_vma_set_pte_size()
2055 vma->gpuva.flags |= XE_VMA_PTE_2M; in xe_vma_set_pte_size()
2058 vma->gpuva.flags |= XE_VMA_PTE_64K; in xe_vma_set_pte_size()
2061 vma->gpuva.flags |= XE_VMA_PTE_4K; in xe_vma_set_pte_size()
2296 vma->gpuva.flags &= ~XE_VMA_DESTROYED; in xe_vma_op_unwind()
2317 vma->gpuva.flags &= ~XE_VMA_DESTROYED; in xe_vma_op_unwind()
3245 struct drm_gpuva *gpuva; in xe_vm_snapshot_capture() local
3251 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { in xe_vm_snapshot_capture()
3252 if (gpuva->flags & XE_VMA_DUMPABLE) in xe_vm_snapshot_capture()
3265 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { in xe_vm_snapshot_capture()
3266 struct xe_vma *vma = gpuva_to_vma(gpuva); in xe_vm_snapshot_capture()
3267 struct xe_bo *bo = vma->gpuva.gem.obj ? in xe_vm_snapshot_capture()
3268 gem_to_xe_bo(vma->gpuva.gem.obj) : NULL; in xe_vm_snapshot_capture()
3270 if (!(gpuva->flags & XE_VMA_DUMPABLE)) in xe_vm_snapshot_capture()