| /linux/drivers/gpu/drm/i915/ |
| A D | i915_vma.h | 131 return vma->node.size - 2 * vma->guard; in __i915_vma_size() 154 return vma->node.start + vma->guard; in __i915_vma_offset() 191 return vma; in i915_vma_get() 197 return vma; in i915_vma_tryget() 221 cmp = vma->gtt_view.type; in i915_vma_compare() 276 #define assert_vma_held(vma) dma_resv_assert_held((vma)->obj->base.resv) argument 328 atomic_inc(&vma->flags); in __i915_vma_pin() 335 atomic_dec(&vma->flags); in __i915_vma_unpin() 341 __i915_vma_unpin(vma); in i915_vma_unpin() 416 if (vma->fence) in i915_vma_unpin_fence() [all …]
|
| A D | i915_vma.c | 82 vma->node.start, vma->node.size, reason); in vma_print_allocator() 89 vma->node.start, vma->node.size, reason, buf); in vma_print_allocator() 266 list_add(&vma->obj_link, &obj->vma.list); in vma_create() 452 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes, in i915_vma_resource_init_from_vma() 455 vma->ops, vma->private, __i915_vma_offset(vma), in i915_vma_resource_init_from_vma() 456 __i915_vma_size(vma), vma->size, vma->guard); in i915_vma_resource_init_from_vma() 484 GEM_BUG_ON(vma->size > i915_vma_size(vma)); in i915_vma_bind() 573 vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index, in i915_vma_bind() 1362 vma->page_sizes = vma->obj->mm.page_sizes; in i915_vma_get_pages() 1400 if (vma->pages != vma->obj->mm.pages) { in __vma_put_pages() [all …]
|
| A D | i915_gem_evict.c | 88 if (dying_vma(vma)) in ungrab_vma() 98 struct i915_vma *vma, in mark_free() argument 105 if (!grab_vma(vma, ww)) in mark_free() 224 active = vma; in i915_gem_evict_something() 238 ungrab_vma(vma); in i915_gem_evict_something() 287 ungrab_vma(vma); in i915_gem_evict_something() 297 ungrab_vma(vma); in i915_gem_evict_something() 307 ungrab_vma(vma); in i915_gem_evict_something() 380 vma = container_of(node, typeof(*vma), node); in i915_gem_evict_for_node() 424 __i915_vma_pin(vma); in i915_gem_evict_for_node() [all …]
|
| /linux/tools/testing/vma/ |
| A D | vma.c | 328 .vma = vma, in test_simple_expand() 621 vmg.vma = vma; in test_vma_merge_special_flags() 750 vmg.vma = vma; in test_vma_merge_with_close() 777 vmg.vma = vma; in test_vma_merge_with_close() 807 vmg.vma = vma; in test_vma_merge_with_close() 833 vmg.vma = vma; in test_vma_merge_with_close() 930 vmg.vma = vma; in test_merge_existing() 963 vmg.vma = vma; in test_merge_existing() 993 vmg.vma = vma; in test_merge_existing() 1027 vmg.vma = vma; in test_merge_existing() [all …]
|
| A D | vma_internal.h | 427 memset(vma, 0, sizeof(*vma)); in vma_init() 438 if (!vma) in vm_area_alloc() 582 if (!vma) in find_vma_prev() 620 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in vma_pages() 639 free(vma); in __vm_area_free() 670 (void)vma; in unmap_vmas() 683 (void)vma; in free_pgtables() 732 (void)vma; in vma_adjust_trans_huge() 784 (void)vma; in uprobe_munmap() 825 (void)vma; in userfaultfd_unmap_prep() [all …]
|
| /linux/mm/ |
| A D | vma.c | 70 vp->vma = vma; in init_multi_vma_prep() 161 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); in vma_prepare() 353 unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end, in unmap_region() 377 if (vma->vm_ops && vma->vm_ops->may_split) { in __split_vma() 378 err = vma->vm_ops->may_split(vma, addr); in __split_vma() 660 struct vm_area_struct *vma = vmg->vma; in vma_merge_existing_range() local 684 VM_WARN_ON(vma && ((vma != prev && vmg->start != vma->vm_start) || in vma_merge_existing_range() 1006 struct vm_area_struct *vma = vmg->vma; in vma_expand() local 1419 struct vm_area_struct *vma = vmg->vma; in vma_modify() local 1513 VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta); in vma_merge_extend() [all …]
|
| A D | mremap.c | 594 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); in move_page_tables() 615 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); in move_page_tables() 686 if (vma->vm_ops && vma->vm_ops->may_split) { in move_vma() 688 err = vma->vm_ops->may_split(vma, old_addr); in move_vma() 726 } else if (vma->vm_ops && vma->vm_ops->mremap) { in move_vma() 783 if (new_vma != vma && vma->vm_start == old_addr && in move_vma() 828 if (!vma) in vma_to_resize() 853 return vma; in vma_to_resize() 871 return vma; in vma_to_resize() 1044 if (!vma) { in SYSCALL_DEFINE5() [all …]
|
| A D | nommu.c | 102 if (vma) in kobjsize() 103 return vma->vm_end - vma->vm_start; in kobjsize() 158 if (vma) in __vmalloc_user_flags() 573 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start); in delete_vma_from_mm() 887 ret = mmap_file(vma->vm_file, vma); in do_mmap_shared_file() 920 ret = mmap_file(vma->vm_file, vma); in do_mmap_private() 1350 vma->vm_region->vm_end = vma->vm_end = addr; in split_vma() 1554 if (vma->vm_end != vma->vm_start + old_len) in do_mremap() 1564 vma->vm_end = vma->vm_start + new_len; in do_mremap() 1597 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory() [all …]
|
| A D | mmap.c | 819 (!vma || addr + len <= vm_start_gap(vma)) && in generic_get_unmapped_area_topdown() 1258 if (vma && (vma->vm_start <= addr)) in find_extend_vma_locked() 1326 if (vma && vma->vm_start <= addr) in expand_stack() 1334 if (vma && !vma_expand_down(vma, addr)) in expand_stack() 1521 khugepaged_enter_vma(vma, vma->vm_flags); in __mmap_region() 1683 if (!vma || !(vma->vm_flags & VM_SHARED)) { in SYSCALL_DEFINE5() 1801 if (vma && vma->vm_end == addr) { in do_brk_flags() 1911 if (!vma || unlikely(xa_is_zero(vma))) { in exit_mmap() 1951 } while (vma && likely(!xa_is_zero(vma))); in exit_mmap() 2101 struct vm_area_struct *vma = vmf->vma; in special_mapping_fault() local [all …]
|
| A D | mprotect.c | 618 *pprev = vma; in mprotect_fixup() 664 vma = vma_modify_flags(vmi, *pprev, vma, start, end, newflags); in mprotect_fixup() 665 if (IS_ERR(vma)) { in mprotect_fixup() 670 *pprev = vma; in mprotect_fixup() 754 if (!vma) in do_mprotect_pkey() 768 end = vma->vm_end; in do_mprotect_pkey() 777 prev = vma; in do_mprotect_pkey() 781 tmp = vma->vm_start; in do_mprotect_pkey() 828 tmp = vma->vm_end; in do_mprotect_pkey() 832 if (vma->vm_ops && vma->vm_ops->mprotect) { in do_mprotect_pkey() [all …]
|
| A D | madvise.c | 147 *prev = vma; in madvise_update_vma() 156 *prev = vma; in madvise_update_vma() 161 if (!vma->vm_file || vma_is_anon_shmem(vma)) { in madvise_update_vma() 269 *prev = vma; in madvise_willneed() 344 struct vm_area_struct *vma = walk->vma; in madvise_cold_or_pageout_pte_range() local 649 struct vm_area_struct *vma = walk->vma; in madvise_free_pte_range() local 899 if (!vma) in madvise_dontneed_free() 1069 if (vma->vm_file || vma->vm_flags & VM_SHARED) in madvise_vma_behavior() 1250 if (vma && start > vma->vm_start) in madvise_walk_vmas() 1257 if (!vma) in madvise_walk_vmas() [all …]
|
| A D | mseal.c | 42 if (vma->vm_file || vma->vm_flags & VM_SHARED) in is_ro_anon() 64 if (unlikely(!can_modify_vma(vma) && is_ro_anon(vma))) in can_modify_vma_madv() 81 vma = vma_modify_flags(vmi, *prev, vma, start, end, newflags); in mseal_fixup() 82 if (IS_ERR(vma)) { in mseal_fixup() 83 ret = PTR_ERR(vma); in mseal_fixup() 87 set_vma_sealed(vma); in mseal_fixup() 89 *prev = vma; in mseal_fixup() 113 if (vma->vm_end >= end) in check_mm_seal() 116 nstart = vma->vm_end; in check_mm_seal() 139 prev = vma; in apply_mm_seal() [all …]
|
| A D | memory.c | 541 vma->vm_ops ? vma->vm_ops->fault : NULL, in print_bad_pte() 2780 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory() 3039 struct vm_area_struct *vma = vmf->vma; in __wp_page_copy_user() local 3186 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page() local 3240 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse() local 3273 struct vm_area_struct *vma = vmf->vma; in vmf_can_call_fault() local 3298 struct vm_area_struct *vma = vmf->vma; in __vmf_anon_prepare() local 5820 .vma = vma, in __handle_mm_fault() 6697 if (vma && vma->vm_file) { in print_vma_addr() 6703 vma->vm_end - vma->vm_start); in print_vma_addr() [all …]
|
| A D | vma.h | 14 struct vm_area_struct *vma; member 101 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start); in vma_pgoff_offset() 122 .vma = vma_, \ 157 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) in vma_iter_store_gfp() 160 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store_gfp() 185 vms->vma = vma; in init_vma_munmap() 186 if (vma) { in init_vma_munmap() 348 return vma_wants_writenotify(vma, vma->vm_page_prot); in vma_wants_manual_pte_write_upgrade() 460 vmi->mas.index, vma->vm_start, vma->vm_start, in vma_iter_store() 466 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, in vma_iter_store() [all …]
|
| /linux/drivers/gpu/drm/nouveau/ |
| A D | nouveau_vmm.c | 31 if (vma->mem) { in nouveau_vma_unmap() 32 nvif_vmm_unmap(&vma->vmm->vmm, vma->addr); in nouveau_vma_unmap() 33 vma->mem = NULL; in nouveau_vma_unmap() 44 vma->mem = mem; in nouveau_vma_map() 55 return vma; in nouveau_vma_find() 65 if (vma && --vma->refs <= 0) { in nouveau_vma_del() 86 vma->refs++; in nouveau_vma_new() 90 if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL))) in nouveau_vma_new() 92 vma->vmm = vmm; in nouveau_vma_new() 93 vma->refs = 1; in nouveau_vma_new() [all …]
|
| /linux/include/linux/ |
| A D | userfaultfd_k.h | 185 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing() 190 return vma->vm_flags & VM_UFFD_WP; in userfaultfd_wp() 195 return vma->vm_flags & VM_UFFD_MINOR; in userfaultfd_minor() 212 return vma->vm_flags & __VM_UFFD_FLAGS; in userfaultfd_armed() 225 (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma))) in vma_can_userfault() 246 return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || in vma_can_userfault() 247 vma_is_shmem(vma); in vma_can_userfault() 275 struct vm_area_struct *vma, 280 struct vm_area_struct *vma, 410 if (!userfaultfd_wp(vma)) in userfaultfd_wp_use_markers() [all …]
|
| A D | huge_mm.h | 18 struct vm_area_struct *vma); 215 if (!vma_is_anonymous(vma)) { in thp_vma_suitable_order() 216 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, in thp_vma_suitable_order() 223 if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end) in thp_vma_suitable_order() 260 if (!vma->vm_file) in file_thp_enabled() 263 inode = vma->vm_file->f_inode; in file_thp_enabled() 428 struct vm_area_struct *vma) in pmd_trans_huge_lock() argument 431 return __pmd_trans_huge_lock(pmd, vma); in pmd_trans_huge_lock() 436 struct vm_area_struct *vma) in pud_trans_huge_lock() argument 592 struct vm_area_struct *vma) in pmd_trans_huge_lock() argument [all …]
|
| /linux/drivers/gpu/drm/i915/display/ |
| A D | intel_fb_pin.c | 31 struct i915_vma *vma; in intel_fb_pin_to_dpt() local 73 if (IS_ERR(vma)) { in intel_fb_pin_to_dpt() 93 vma->display_alignment = max(vma->display_alignment, alignment); in intel_fb_pin_to_dpt() 97 i915_vma_get(vma); in intel_fb_pin_to_dpt() 101 return vma; in intel_fb_pin_to_dpt() 202 if (vma->fence) in intel_fb_pin_to_ggtt() 222 return vma; in intel_fb_pin_to_ggtt() 266 if (IS_ERR(vma)) in intel_plane_pin_fb() 314 if (vma) in intel_plane_unpin_fb() 318 if (vma) in intel_plane_unpin_fb() [all …]
|
| /linux/drivers/gpu/drm/msm/ |
| A D | msm_gem_vma.c | 48 if (!vma->mapped) in msm_gem_vma_purge() 53 vma->mapped = false; in msm_gem_vma_purge() 67 if (vma->mapped) in msm_gem_vma_map() 70 vma->mapped = true; in msm_gem_vma_map() 101 if (vma->iova) in msm_gem_vma_close() 105 vma->iova = 0; in msm_gem_vma_close() 114 vma = kzalloc(sizeof(*vma), GFP_KERNEL); in msm_gem_vma_new() 115 if (!vma) in msm_gem_vma_new() 120 return vma; in msm_gem_vma_new() 145 vma->iova = vma->node.start; in msm_gem_vma_init() [all …]
|
| /linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
| A D | vmm.c | 922 if (addr >= vma->addr + vma->size) in nvkm_vmm_node_search() 983 if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) in nvkm_vmm_node_split() 1006 vma->addr, (u64)vma->size, in nvkm_vma_dump() 1221 if (vma->addr == addr && vma->part && (prev = node(vma, prev))) { in nvkm_vmm_pfn_split_merge() 1249 if (!vma->mapped || vma->memory) in nvkm_vmm_pfn_unmap() 1263 } while ((vma = node(vma, next)) && (start = vma->addr) < limit); in nvkm_vmm_pfn_unmap() 1321 if (!vma->mapref || vma->memory) { in nvkm_vmm_pfn_map() 1377 vma = node(vma, next); in nvkm_vmm_pfn_map() 1618 if (vma->mapref || !vma->sparse) { in nvkm_vmm_put_locked() 1659 if (vma->sparse && !vma->mapref) { in nvkm_vmm_put_locked() [all …]
|
| /linux/drivers/gpu/drm/i915/selftests/ |
| A D | i915_gem_gtt.c | 481 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole() 514 __func__, p->name, vma->node.start, vma->node.size, in fill_hole() 523 __func__, p->name, vma->node.start, vma->node.size, in fill_hole() 560 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole() 593 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole() 602 __func__, p->name, vma->node.start, vma->node.size, in fill_hole() 1446 vma->resource->bi.pages = vma->pages; in track_vma_bind() 1449 list_move_tail(&vma->vm_link, &vma->vm->bound_list); in track_vma_bind() 1583 vma->node.start, vma->node.size, in igt_gtt_reserve() 1629 vma->node.start, vma->node.size, in igt_gtt_reserve() [all …]
|
| A D | i915_vma.c | 73 return vma; in checked_vma_instance() 93 if (i915_vma_compare(vma, vma->vm, in checked_vma_instance() 94 i915_vma_is_ggtt(vma) ? &vma->gtt_view : NULL)) { in checked_vma_instance() 104 return vma; in checked_vma_instance() 656 if (vma->node.size < vma->size) { in igt_vma_rotate_remap() 658 vma->size, vma->node.size); in igt_vma_rotate_remap() 756 if (vma->node.size < vma->size) { in assert_pin() 758 name, vma->size, vma->node.size); in assert_pin() 769 if (vma->pages == vma->obj->mm.pages) { in assert_pin() 781 if (vma->pages != vma->obj->mm.pages) { in assert_pin() [all …]
|
| /linux/drivers/gpu/drm/xe/ |
| A D | xe_trace_bo.h | 21 #define __dev_name_vma(vma) __dev_name_vm(xe_vma_vm(vma)) argument 80 TP_ARGS(vma), 93 __entry->vma = vma; 107 TP_ARGS(vma) 112 TP_ARGS(vma) 117 TP_ARGS(vma) 122 TP_ARGS(vma) 127 TP_ARGS(vma) 132 TP_ARGS(vma) 137 TP_ARGS(vma) [all …]
|
| A D | xe_vm.h | 110 return vma->gpuva.va.addr; in xe_vma_start() 115 return vma->gpuva.va.range; in xe_vma_size() 120 return xe_vma_start(vma) + xe_vma_size(vma); in xe_vma_end() 125 return vma->gpuva.gem.offset; in xe_vma_bo_offset() 130 return !vma->gpuva.gem.obj ? NULL : in xe_vma_bo() 141 return vma->gpuva.flags & XE_VMA_READ_ONLY; in xe_vma_read_only() 146 return vma->gpuva.gem.offset; in xe_vma_userptr() 156 return !xe_vma_bo(vma); in xe_vma_has_no_bo() 161 return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma); in xe_vma_is_userptr() 172 xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma)); in to_userptr_vma() [all …]
|
| /linux/drivers/pci/ |
| A D | mmap.c | 25 struct vm_area_struct *vma, in pci_mmap_resource_range() argument 32 if (vma->vm_pgoff + vma_pages(vma) > size) in pci_mmap_resource_range() 36 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in pci_mmap_resource_range() 38 vma->vm_page_prot = pgprot_device(vma->vm_page_prot); in pci_mmap_resource_range() 41 ret = pci_iobar_pfn(pdev, bar, vma); in pci_mmap_resource_range() 47 vma->vm_ops = &pci_phys_vm_ops; in pci_mmap_resource_range() 49 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_resource_range() 50 vma->vm_end - vma->vm_start, in pci_mmap_resource_range() 51 vma->vm_page_prot); in pci_mmap_resource_range() 67 nr = vma_pages(vma); in pci_mmap_fits() [all …]
|