| /drivers/gpu/drm/i915/ |
| A D | i915_vma.h | 131 return vma->node.size - 2 * vma->guard; in __i915_vma_size() 154 return vma->node.start + vma->guard; in __i915_vma_offset() 191 return vma; in i915_vma_get() 197 return vma; in i915_vma_tryget() 221 cmp = vma->gtt_view.type; in i915_vma_compare() 276 #define assert_vma_held(vma) dma_resv_assert_held((vma)->obj->base.resv) argument 310 atomic_inc(&vma->flags); in __i915_vma_pin() 317 atomic_dec(&vma->flags); in __i915_vma_unpin() 323 __i915_vma_unpin(vma); in i915_vma_unpin() 403 if (vma->fence) in i915_vma_unpin_fence() [all …]
|
| A D | i915_vma.c | 82 vma->node.start, vma->node.size, reason); in vma_print_allocator() 89 vma->node.start, vma->node.size, reason, buf); in vma_print_allocator() 266 list_add(&vma->obj_link, &obj->vma.list); in vma_create() 452 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes, in i915_vma_resource_init_from_vma() 455 vma->ops, vma->private, __i915_vma_offset(vma), in i915_vma_resource_init_from_vma() 456 __i915_vma_size(vma), vma->size, vma->guard); in i915_vma_resource_init_from_vma() 484 GEM_BUG_ON(vma->size > i915_vma_size(vma)); in i915_vma_bind() 573 vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index, in i915_vma_bind() 1362 vma->page_sizes = vma->obj->mm.page_sizes; in i915_vma_get_pages() 1400 if (vma->pages != vma->obj->mm.pages) { in __vma_put_pages() [all …]
|
| A D | i915_gem_evict.c | 88 if (dying_vma(vma)) in ungrab_vma() 98 struct i915_vma *vma, in mark_free() argument 105 if (!grab_vma(vma, ww)) in mark_free() 224 active = vma; in i915_gem_evict_something() 238 ungrab_vma(vma); in i915_gem_evict_something() 287 ungrab_vma(vma); in i915_gem_evict_something() 297 ungrab_vma(vma); in i915_gem_evict_something() 307 ungrab_vma(vma); in i915_gem_evict_something() 380 vma = container_of(node, typeof(*vma), node); in i915_gem_evict_for_node() 424 __i915_vma_pin(vma); in i915_gem_evict_for_node() [all …]
|
| A D | i915_gem.c | 140 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list, in i915_gem_object_unbind() 338 vma = NULL; in i915_gem_gtt_prepare() 398 if (IS_ERR(vma)) { in i915_gem_gtt_pread() 576 if (IS_ERR(vma)) { in i915_gem_gtt_pwrite_fast() 882 if (!reg->vma) in i915_gem_runtime_suspend() 896 rb_erase(&vma->obj_node, &obj->vma.tree); in discard_ggtt_vma() 951 if (IS_ERR(vma)) in i915_gem_object_ggtt_pin_ww() 952 return vma; in i915_gem_object_ggtt_pin_ww() 956 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) in i915_gem_object_ggtt_pin_ww() 975 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) { in i915_gem_object_ggtt_pin_ww() [all …]
|
| A D | i915_debugfs.c | 131 if (i915_vma_is_ggtt(vma)) in stringify_vma_type() 134 if (i915_vma_is_dpt(vma)) in stringify_vma_type() 176 struct i915_vma *vma; in i915_debugfs_describe_obj() local 193 spin_lock(&obj->vma.lock); in i915_debugfs_describe_obj() 194 list_for_each_entry(vma, &obj->vma.list, obj_link) { in i915_debugfs_describe_obj() 198 spin_unlock(&obj->vma.lock); in i915_debugfs_describe_obj() 205 i915_vma_offset(vma), i915_vma_size(vma), in i915_debugfs_describe_obj() 208 if (i915_vma_is_ggtt(vma) || i915_vma_is_dpt(vma)) { in i915_debugfs_describe_obj() 253 if (vma->fence) in i915_debugfs_describe_obj() 257 spin_lock(&obj->vma.lock); in i915_debugfs_describe_obj() [all …]
|
| /drivers/gpu/drm/i915/display/ |
| A D | intel_fb_pin.c | 35 struct i915_vma *vma; in intel_fb_pin_to_dpt() local 77 if (IS_ERR(vma)) { in intel_fb_pin_to_dpt() 97 vma->display_alignment = max(vma->display_alignment, alignment); in intel_fb_pin_to_dpt() 101 i915_vma_get(vma); in intel_fb_pin_to_dpt() 105 return vma; in intel_fb_pin_to_dpt() 201 if (vma->fence) in intel_fb_pin_to_ggtt() 221 return vma; in intel_fb_pin_to_ggtt() 275 if (IS_ERR(vma)) in intel_plane_pin_fb() 325 if (vma) in intel_plane_unpin_fb() 329 if (vma) in intel_plane_unpin_fb() [all …]
|
| /drivers/gpu/drm/nouveau/ |
| A D | nouveau_vmm.c | 31 if (vma->mem) { in nouveau_vma_unmap() 32 nvif_vmm_unmap(&vma->vmm->vmm, vma->addr); in nouveau_vma_unmap() 33 vma->mem = NULL; in nouveau_vma_unmap() 44 vma->mem = mem; in nouveau_vma_map() 55 return vma; in nouveau_vma_find() 65 if (vma && --vma->refs <= 0) { in nouveau_vma_del() 86 vma->refs++; in nouveau_vma_new() 90 if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL))) in nouveau_vma_new() 92 vma->vmm = vmm; in nouveau_vma_new() 93 vma->refs = 1; in nouveau_vma_new() [all …]
|
| /drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
| A D | vmm.c | 922 if (addr >= vma->addr + vma->size) in nvkm_vmm_node_search() 983 if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) in nvkm_vmm_node_split() 1006 vma->addr, (u64)vma->size, in nvkm_vma_dump() 1217 if (vma->addr == addr && vma->part && (prev = node(vma, prev))) { in nvkm_vmm_pfn_split_merge() 1245 if (!vma->mapped || vma->memory) in nvkm_vmm_pfn_unmap() 1259 } while ((vma = node(vma, next)) && (start = vma->addr) < limit); in nvkm_vmm_pfn_unmap() 1317 if (!vma->mapref || vma->memory) { in nvkm_vmm_pfn_map() 1373 vma = node(vma, next); in nvkm_vmm_pfn_map() 1614 if (vma->mapref || !vma->sparse) { in nvkm_vmm_put_locked() 1655 if (vma->sparse && !vma->mapref) { in nvkm_vmm_put_locked() [all …]
|
| A D | uvmm.c | 127 if (ret = -ENOENT, !vma || vma->addr != addr) { in nvkm_uvmm_mthd_unmap() 129 addr, vma ? vma->addr : ~0ULL); in nvkm_uvmm_mthd_unmap() 191 if (ret = -EINVAL, vma->mapped && !vma->memory) { in nvkm_uvmm_mthd_map() 197 if (addr + size > vma->addr + vma->size || vma->memory || in nvkm_uvmm_mthd_map() 201 !!vma->memory, vma->refd, vma->mapref, in nvkm_uvmm_mthd_map() 202 addr, size, vma->addr, (u64)vma->size); in nvkm_uvmm_mthd_map() 206 vma = nvkm_vmm_node_split(vmm, vma, addr, size); in nvkm_uvmm_mthd_map() 207 if (!vma) { in nvkm_uvmm_mthd_map() 249 if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) { in nvkm_uvmm_mthd_put() 251 vma ? vma->addr : ~0ULL, vma ? vma->part : 0); in nvkm_uvmm_mthd_put() [all …]
|
| /drivers/gpu/drm/i915/selftests/ |
| A D | i915_gem_gtt.c | 483 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole() 516 __func__, p->name, vma->node.start, vma->node.size, in fill_hole() 525 __func__, p->name, vma->node.start, vma->node.size, in fill_hole() 562 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole() 595 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole() 604 __func__, p->name, vma->node.start, vma->node.size, in fill_hole() 1449 vma->resource->bi.pages = vma->pages; in track_vma_bind() 1452 list_move_tail(&vma->vm_link, &vma->vm->bound_list); in track_vma_bind() 1587 vma->node.start, vma->node.size, in igt_gtt_reserve() 1633 vma->node.start, vma->node.size, in igt_gtt_reserve() [all …]
|
| A D | i915_vma.c | 73 return vma; in checked_vma_instance() 93 if (i915_vma_compare(vma, vma->vm, in checked_vma_instance() 94 i915_vma_is_ggtt(vma) ? &vma->gtt_view : NULL)) { in checked_vma_instance() 104 return vma; in checked_vma_instance() 660 if (vma->node.size < vma->size) { in igt_vma_rotate_remap() 662 vma->size, vma->node.size); in igt_vma_rotate_remap() 760 if (vma->node.size < vma->size) { in assert_pin() 762 name, vma->size, vma->node.size); in assert_pin() 773 if (vma->pages == vma->obj->mm.pages) { in assert_pin() 785 if (vma->pages != vma->obj->mm.pages) { in assert_pin() [all …]
|
| /drivers/gpu/drm/xe/ |
| A D | xe_trace_bo.h | 21 #define __dev_name_vma(vma) __dev_name_vm(xe_vma_vm(vma)) argument 104 __entry->vma = vma; 120 TP_ARGS(vma) 125 TP_ARGS(vma) 130 TP_ARGS(vma) 135 TP_ARGS(vma) 140 TP_ARGS(vma) 145 TP_ARGS(vma) 150 TP_ARGS(vma) 155 TP_ARGS(vma) [all …]
|
| A D | xe_vm.h | 112 return vma->gpuva.va.addr; in xe_vma_start() 117 return vma->gpuva.va.range; in xe_vma_size() 122 return xe_vma_start(vma) + xe_vma_size(vma); in xe_vma_end() 127 return vma->gpuva.gem.offset; in xe_vma_bo_offset() 148 return vma->gpuva.gem.offset; in xe_vma_userptr() 163 return !xe_vma_bo(vma); in xe_vma_has_no_bo() 168 return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma) && in xe_vma_is_userptr() 169 !xe_vma_is_cpu_addr_mirror(vma); in xe_vma_is_userptr() 182 xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma)); in to_userptr_vma() 183 return container_of(vma, struct xe_userptr_vma, vma); in to_userptr_vma() [all …]
|
| A D | xe_pt.c | 712 .vma = vma, in xe_pt_stage_bind() 908 u8 pt_mask = (vma->tile_present & ~vma->tile_invalidated); in xe_pt_zap_ptes() 1840 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) in vma_reserve_fences() 1860 xe_vma_start(vma), xe_vma_end(vma) - 1); in bind_op_prepare() 1904 pt_op->vma = vma; in bind_op_prepare() 1946 pt_op->vma = vma; in bind_range_prepare() 1964 if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id))) in unbind_op_prepare() 1972 xe_vma_start(vma), xe_vma_end(vma) - 1); in unbind_op_prepare() 1974 pt_op->vma = vma; in unbind_op_prepare() 2217 if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) { in bind_op_commit() [all …]
|
| A D | xe_vm.c | 71 struct xe_vma *vma = &uvma->vma; in xe_vma_userptr_pin_pages() local 589 struct xe_vma *vma = &uvma->vma; in __vma_userptr_invalidate() local 636 struct xe_vma *vma = &uvma->vma; in vma_userptr_invalidate() local 647 xe_vma_start(vma), xe_vma_size(vma)); in vma_userptr_invalidate() 856 op->map.vma = vma; in xe_vm_populate_rebind() 990 op->map_range.vma = vma; in xe_vm_populate_range_rebind() 1195 vma = &uvma->vma; in xe_vma_create() 1197 vma = kzalloc(sizeof(*vma), GFP_KERNEL); in xe_vma_create() 2195 if (!vma) in xe_vm_find_vma_by_addr() 2632 op->map.vma = vma; in vm_bind_ioctl_ops_parse() [all …]
|
| /drivers/pci/ |
| A D | mmap.c | 25 struct vm_area_struct *vma, in pci_mmap_resource_range() argument 32 if (vma->vm_pgoff + vma_pages(vma) > size) in pci_mmap_resource_range() 36 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in pci_mmap_resource_range() 38 vma->vm_page_prot = pgprot_device(vma->vm_page_prot); in pci_mmap_resource_range() 41 ret = pci_iobar_pfn(pdev, bar, vma); in pci_mmap_resource_range() 47 vma->vm_ops = &pci_phys_vm_ops; in pci_mmap_resource_range() 49 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_resource_range() 50 vma->vm_end - vma->vm_start, in pci_mmap_resource_range() 51 vma->vm_page_prot); in pci_mmap_resource_range() 67 nr = vma_pages(vma); in pci_mmap_fits() [all …]
|
| /drivers/gpu/drm/i915/gt/ |
| A D | intel_ring.c | 37 struct i915_vma *vma = ring->vma; in intel_ring_pin() local 57 if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) { in intel_ring_pin() 60 int type = intel_gt_coherent_map_type(vma->vm->gt, vma->obj, false); in intel_ring_pin() 79 i915_vma_unpin(vma); in intel_ring_pin() 96 struct i915_vma *vma = ring->vma; in intel_ring_unpin() local 102 if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) in intel_ring_unpin() 135 if (IS_ERR(vma)) in create_ring_vma() 138 return vma; in create_ring_vma() 142 return vma; in create_ring_vma() 175 if (IS_ERR(vma)) { in intel_engine_create_ring() [all …]
|
| A D | intel_ggtt_fencing.c | 210 if (vma) { in fence_update() 224 GEM_BUG_ON(vma->fence_size > i915_vma_size(vma)); in fence_update() 268 GEM_BUG_ON(vma); in fence_update() 272 WRITE_ONCE(fence->vma, vma); in fence_update() 275 if (vma) { in fence_update() 300 GEM_BUG_ON(fence->vma != vma); in i915_vma_revoke_fence() 326 return fence->vma && i915_vma_is_active(fence->vma); in fence_is_active() 336 GEM_BUG_ON(fence->vma && fence->vma->fence != fence); in fence_find() 373 if (vma->fence) { in __i915_vma_pin_fence() 375 GEM_BUG_ON(fence->vma != vma); in __i915_vma_pin_fence() [all …]
|
| /drivers/gpu/drm/xe/display/ |
| A D | xe_fb_pin.c | 164 vma->dpt = dpt; in __xe_pin_fb_vma_dpt() 280 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); in __xe_pin_fb_vma() local 285 if (!vma) in __xe_pin_fb_vma() 324 vma->bo = bo; in __xe_pin_fb_vma() 332 return vma; in __xe_pin_fb_vma() 339 kfree(vma); in __xe_pin_fb_vma() 350 if (vma->dpt) in __xe_unpin_fb_vma() 353 vma->bo->ggtt_node[tile_id]->base.start != vma->node->base.start) in __xe_unpin_fb_vma() 359 kfree(vma); in __xe_unpin_fb_vma() 399 if (vma) in reuse_vma() [all …]
|
| A D | xe_dsb_buffer.c | 15 return xe_bo_ggtt_addr(dsb_buf->vma->bo); in intel_dsb_buffer_ggtt_offset() 20 iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val); in intel_dsb_buffer_write() 39 struct i915_vma *vma; in intel_dsb_buffer_create() local 41 vma = kzalloc(sizeof(*vma), GFP_KERNEL); in intel_dsb_buffer_create() 42 if (!vma) in intel_dsb_buffer_create() 52 kfree(vma); in intel_dsb_buffer_create() 56 vma->bo = obj; in intel_dsb_buffer_create() 57 dsb_buf->vma = vma; in intel_dsb_buffer_create() 65 xe_bo_unpin_map_no_vm(dsb_buf->vma->bo); in intel_dsb_buffer_cleanup() 66 kfree(dsb_buf->vma); in intel_dsb_buffer_cleanup() [all …]
|
| /drivers/gpu/drm/i915/gem/ |
| A D | i915_gem_mman.c | 31 if (vma->vm_file != filp) in __vma_matches() 35 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); in __vma_matches() 112 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) in i915_gem_mmap_ioctl() 113 vma->vm_page_prot = in i915_gem_mmap_ioctl() 391 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { in vm_fault_gtt() 407 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { in vm_fault_gtt() 429 if (IS_ERR(vma)) { in vm_fault_gtt() 430 ret = PTR_ERR(vma); in vm_fault_gtt() 475 vma->mmo = mmo; in vm_fault_gtt() 1045 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in i915_gem_object_mmap() [all …]
|
| A D | i915_gem_tiling.c | 187 struct i915_vma *vma, *vn; in i915_gem_object_fence_prepare() local 196 spin_lock(&obj->vma.lock); in i915_gem_object_fence_prepare() 197 for_each_ggtt_vma(vma, obj) { in i915_gem_object_fence_prepare() 205 spin_unlock(&obj->vma.lock); in i915_gem_object_fence_prepare() 234 struct i915_vma *vma; in i915_gem_object_set_tiling() local 293 spin_lock(&obj->vma.lock); in i915_gem_object_set_tiling() 295 vma->fence_size = in i915_gem_object_set_tiling() 297 vma->fence_alignment = in i915_gem_object_set_tiling() 301 if (vma->fence) in i915_gem_object_set_tiling() 302 vma->fence->dirty = true; in i915_gem_object_set_tiling() [all …]
|
| /drivers/gpu/drm/ttm/ |
| A D | ttm_bo_vm.c | 64 mmap_read_unlock(vmf->vma->vm_mm); in ttm_bo_vm_fault_idle() 187 struct vm_area_struct *vma = vmf->vma; in ttm_bo_vm_fault_reserved() local 214 page_last = vma_pages(vma) + vma->vm_pgoff - in ttm_bo_vm_fault_reserved() 294 struct vm_area_struct *vma = vmf->vma; in ttm_bo_vm_dummy_page() local 314 for (address = vma->vm_start; address < vma->vm_end; in ttm_bo_vm_dummy_page() 324 struct vm_area_struct *vma = vmf->vma; in ttm_bo_vm_fault() local 335 prot = vma->vm_page_prot; in ttm_bo_vm_fault() 366 vma->vm_private_data = NULL; in ttm_bo_vm_close() 496 if (!vma->vm_ops) in ttm_bo_mmap_obj() 497 vma->vm_ops = &ttm_bo_vm_ops; in ttm_bo_mmap_obj() [all …]
|
| /drivers/gpu/drm/i915/gem/selftests/ |
| A D | huge_pages.c | 415 vma->page_sizes.sg, vma->resource->page_sizes_gtt); in igt_check_page_sizes() 428 struct i915_vma *vma; in igt_mock_exhaust_device_supported_pages() local 462 if (IS_ERR(vma)) { in igt_mock_exhaust_device_supported_pages() 530 if (IS_ERR(vma)) { in igt_mock_memory_region_huge_pages() 568 i915_vma_unpin(vma); in igt_mock_memory_region_huge_pages() 622 if (IS_ERR(vma)) { in igt_mock_ppgtt_misaligned_dma() 772 if (IS_ERR(vma)) { in igt_ppgtt_huge_fill() 1156 if (IS_ERR(vma)) in __igt_write_huge() 1673 if (IS_ERR(vma)) { in igt_ppgtt_mixed() 1807 if (IS_ERR(vma)) { in igt_tmpfs_fallback() [all …]
|
| /drivers/misc/ocxl/ |
| A D | context.c | 141 struct vm_area_struct *vma = vmf->vma; in ocxl_mmap_fault() local 162 struct vm_area_struct *vma) in check_mmap_afu_irq() argument 167 if (vma_pages(vma) != 1) in check_mmap_afu_irq() 180 if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) || in check_mmap_afu_irq() 181 !(vma->vm_flags & VM_WRITE)) in check_mmap_afu_irq() 188 struct vm_area_struct *vma) in check_mmap_mmio() argument 190 if ((vma_pages(vma) + vma->vm_pgoff) > in check_mmap_mmio() 201 rc = check_mmap_mmio(ctx, vma); in ocxl_context_mmap() 203 rc = check_mmap_afu_irq(ctx, vma); in ocxl_context_mmap() 208 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in ocxl_context_mmap() [all …]
|