/linux-6.3-rc2/drivers/gpu/drm/i915/ |
A D | i915_vma.h | 80 return i915_is_dpt(vma->vm); in i915_vma_is_dpt() 131 return vma->node.size - 2 * vma->guard; in __i915_vma_size() 154 return vma->node.start + vma->guard; in __i915_vma_offset() 191 return vma; in i915_vma_get() 197 return vma; in i915_vma_tryget() 221 cmp = vma->gtt_view.type; in i915_vma_compare() 276 #define assert_vma_held(vma) dma_resv_assert_held((vma)->obj->base.resv) argument 328 atomic_inc(&vma->flags); in __i915_vma_pin() 335 atomic_dec(&vma->flags); in __i915_vma_unpin() 341 __i915_vma_unpin(vma); in i915_vma_unpin() [all …]
|
A D | i915_vma.c | 79 vma->node.start, vma->node.size, reason); in vma_print_allocator() 86 vma->node.start, vma->node.size, reason, buf); in vma_print_allocator() 233 list_add(&vma->obj_link, &obj->vma.list); in vma_create() 419 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes, in i915_vma_resource_init_from_vma() 422 vma->ops, vma->private, __i915_vma_offset(vma), in i915_vma_resource_init_from_vma() 423 __i915_vma_size(vma), vma->size, vma->guard); in i915_vma_resource_init_from_vma() 451 GEM_BUG_ON(vma->size > i915_vma_size(vma)); in i915_vma_bind() 540 vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level, in i915_vma_bind() 1328 vma->page_sizes = vma->obj->mm.page_sizes; in i915_vma_get_pages() 1358 if (vma->pages != vma->obj->mm.pages) { in __vma_put_pages() [all …]
|
A D | i915_gem_evict.c | 88 if (dying_vma(vma)) in ungrab_vma() 98 struct i915_vma *vma, in mark_free() argument 105 if (!grab_vma(vma, ww)) in mark_free() 224 active = vma; in i915_gem_evict_something() 238 ungrab_vma(vma); in i915_gem_evict_something() 287 ungrab_vma(vma); in i915_gem_evict_something() 297 ungrab_vma(vma); in i915_gem_evict_something() 307 ungrab_vma(vma); in i915_gem_evict_something() 380 vma = container_of(node, typeof(*vma), node); in i915_gem_evict_for_node() 424 __i915_vma_pin(vma); in i915_gem_evict_for_node() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/ |
A D | drm_vm.c | 112 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault() local 201 struct vm_area_struct *vma = vmf->vma; in drm_vm_shm_fault() local 240 vma->vm_start, vma->vm_end - vma->vm_start); in drm_vm_shm_close() 248 if (pt->vma == vma) { in drm_vm_shm_close() 395 vma->vm_start, vma->vm_end - vma->vm_start); in drm_vm_open_locked() 399 vma_entry->vma = vma; in drm_vm_open_locked() 421 vma->vm_start, vma->vm_end - vma->vm_start); in drm_vm_close_locked() 424 if (pt->vma == vma) { in drm_vm_close_locked() 470 vma->vm_start, vma->vm_end, vma->vm_pgoff); in drm_mmap_dma() 532 vma->vm_start, vma->vm_end, vma->vm_pgoff); in drm_mmap_locked() [all …]
|
/linux-6.3-rc2/mm/ |
A D | mmap.c | 139 if (vma->vm_ops && vma->vm_ops->close) in remove_vma() 140 vma->vm_ops->close(vma); in remove_vma() 473 vp->vma = vma; in init_multi_vma_prep() 506 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); in vma_prepare() 764 if (vma->vm_ops && vma->vm_ops->close) in is_mergeable_vma() 1095 MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end); in find_mergeable_anon_vma() 1108 VM_BUG_ON_VMA(prev != vma, vma); in find_mergeable_anon_vma() 2064 if (vma && (vma->vm_start <= addr)) in find_extend_vma() 2699 vma->vm_ops->close(vma); in mmap_region() 2906 if (vma && vma->vm_end == addr && !vma_policy(vma) && in do_brk_flags() [all …]
|
A D | mremap.c | 516 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); in move_page_tables() 600 if (vma->vm_ops && vma->vm_ops->may_split) { in move_vma() 602 err = vma->vm_ops->may_split(vma, old_addr); in move_vma() 639 } else if (vma->vm_ops && vma->vm_ops->mremap) { in move_vma() 696 if (new_vma != vma && vma->vm_start == old_addr && in move_vma() 741 if (!vma) in vma_to_resize() 1051 if (!vma->vm_ops || !vma->vm_ops->close) { in SYSCALL_DEFINE5() 1052 vma = vma_merge(&vmi, mm, vma, extension_start, in SYSCALL_DEFINE5() 1053 extension_end, vma->vm_flags, vma->anon_vma, in SYSCALL_DEFINE5() 1055 vma->vm_userfaultfd_ctx, anon_vma_name(vma)); in SYSCALL_DEFINE5() [all …]
|
A D | madvise.c | 148 *prev = vma; in madvise_update_vma() 154 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in madvise_update_vma() 161 *prev = vma; in madvise_update_vma() 180 if (!vma->vm_file || vma_is_anon_shmem(vma)) { in madvise_update_vma() 342 struct vm_area_struct *vma = walk->vma; in madvise_cold_or_pageout_pte_range() local 613 struct vm_area_struct *vma = walk->vma; in madvise_free_pte_range() local 856 if (!vma) in madvise_dontneed_free() 921 if (!vma || start >= vma->vm_end) { in madvise_populate() 1058 if (vma->vm_file || vma->vm_flags & VM_SHARED) in madvise_vma_behavior() 1236 if (vma && start > vma->vm_start) in madvise_walk_vmas() [all …]
|
A D | nommu.c | 101 if (vma) in kobjsize() 102 return vma->vm_end - vma->vm_start; in kobjsize() 584 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start); in delete_vma_from_mm() 602 if (vma->vm_ops && vma->vm_ops->close) in delete_vma() 603 vma->vm_ops->close(vma); in delete_vma() 885 ret = call_mmap(vma->vm_file, vma); in do_mmap_shared_file() 918 ret = call_mmap(vma->vm_file, vma); in do_mmap_private() 1567 if (vma->vm_end != vma->vm_start + old_len) in do_mremap() 1577 vma->vm_end = vma->vm_start + new_len; in do_mremap() 1616 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory() [all …]
|
A D | mprotect.c | 601 *pprev = vma; in mprotect_fixup() 644 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in mprotect_fixup() 646 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in mprotect_fixup() 647 vma->vm_userfaultfd_ctx, anon_vma_name(vma)); in mprotect_fixup() 649 vma = *pprev; in mprotect_fixup() 654 *pprev = vma; in mprotect_fixup() 747 if (!vma) in do_mprotect_pkey() 770 prev = vma; in do_mprotect_pkey() 821 tmp = vma->vm_end; in do_mprotect_pkey() 825 if (vma->vm_ops && vma->vm_ops->mprotect) { in do_mprotect_pkey() [all …]
|
A D | memory.c | 516 vma->vm_ops ? vma->vm_ops->fault : NULL, in print_bad_pte() 2541 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory() 2802 struct vm_area_struct *vma = vmf->vma; in __wp_page_copy_user() local 2949 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page() local 3003 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse() local 3047 struct vm_area_struct *vma = vmf->vma; in wp_page_copy() local 3246 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared() local 4963 .vma = vma, in __handle_mm_fault() 5599 if (vma && vma->vm_file) { in print_vma_addr() 5610 vma->vm_end - vma->vm_start); in print_vma_addr() [all …]
|
A D | pgtable-generic.c | 65 int ptep_set_access_flags(struct vm_area_struct *vma, in ptep_set_access_flags() argument 71 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 72 flush_tlb_fix_spurious_fault(vma, address); in ptep_set_access_flags() 79 int ptep_clear_flush_young(struct vm_area_struct *vma, in ptep_clear_flush_young() argument 85 flush_tlb_page(vma, address); in ptep_clear_flush_young() 94 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush() 98 flush_tlb_page(vma, address); in ptep_clear_flush() 106 int pmdp_set_access_flags(struct vm_area_struct *vma, in pmdp_set_access_flags() argument 113 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags() 121 int pmdp_clear_flush_young(struct vm_area_struct *vma, in pmdp_clear_flush_young() argument [all …]
|
A D | huge_memory.c | 658 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page() local 784 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_anonymous_page() local 793 khugepaged_enter_vma(vma, vma->vm_flags); in do_huge_pmd_anonymous_page() 907 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pmd_prot() local 998 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pud_prot() local 1305 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_wp_page() local 1312 VM_BUG_ON_VMA(!vma->anon_vma, vma); in do_huge_pmd_wp_page() 1504 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_numa_page() local 2001 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); in __split_huge_pud_locked() 3216 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() local [all …]
|
A D | rmap.c | 153 avc->vma = vma; in anon_vma_chain_link() 345 vma->anon_vma = NULL; in anon_vma_fork() 356 if (vma->anon_vma) in anon_vma_fork() 392 unlink_anon_vmas(vma); in anon_vma_fork() 423 if (vma->anon_vma) { in unlink_anon_vmas() 940 struct vm_area_struct *vma = pvmw->vma; in page_vma_mkclean_one() local 1069 .vma = vma, in pfn_mkclean_range() 1286 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); in folio_add_new_anon_rmap() 2190 vma->vm_mm, address, min(vma->vm_end, in page_make_device_exclusive_one() 2424 struct vm_area_struct *vma = avc->vma; in rmap_walk_anon() local [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/msm/ |
A D | msm_gem_vma.c | 43 if (vma->inuse > 0) in msm_gem_vma_inuse() 49 if (!msm_fence_completed(vma->fctx[idx], vma->fence[idx])) in msm_gem_vma_inuse() 68 if (!vma->mapped) in msm_gem_purge_vma() 83 vma->inuse--; in msm_gem_unpin_vma() 107 vma->inuse++; in msm_gem_map_vma() 109 if (vma->mapped) in msm_gem_map_vma() 120 vma->inuse--; in msm_gem_map_vma() 130 GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped); in msm_gem_close_vma() 133 if (vma->iova) in msm_gem_close_vma() 137 vma->iova = 0; in msm_gem_close_vma() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/i915/display/ |
A D | intel_fb_pin.c | 30 struct i915_vma *vma; in intel_pin_fb_obj_dpt() local 74 if (IS_ERR(vma)) { in intel_pin_fb_obj_dpt() 94 vma->display_alignment = max(vma->display_alignment, alignment); in intel_pin_fb_obj_dpt() 98 i915_vma_get(vma); in intel_pin_fb_obj_dpt() 102 return vma; in intel_pin_fb_obj_dpt() 207 if (vma->fence) in intel_pin_and_fence_fb_obj() 227 return vma; in intel_pin_and_fence_fb_obj() 253 if (IS_ERR(vma)) in intel_plane_pin_fb() 289 if (vma) in intel_plane_unpin_fb() 295 if (vma) in intel_plane_unpin_fb() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/nouveau/ |
A D | nouveau_vmm.c | 31 if (vma->mem) { in nouveau_vma_unmap() 32 nvif_vmm_unmap(&vma->vmm->vmm, vma->addr); in nouveau_vma_unmap() 33 vma->mem = NULL; in nouveau_vma_unmap() 44 vma->mem = mem; in nouveau_vma_map() 55 return vma; in nouveau_vma_find() 65 if (vma && --vma->refs <= 0) { in nouveau_vma_del() 86 vma->refs++; in nouveau_vma_new() 90 if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL))) in nouveau_vma_new() 92 vma->vmm = vmm; in nouveau_vma_new() 93 vma->refs = 1; in nouveau_vma_new() [all …]
|
/linux-6.3-rc2/include/linux/ |
A D | userfaultfd_k.h | 83 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; in is_mergeable_vm_userfaultfd_ctx() 99 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_huge_pmd_share() 116 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing() 121 return vma->vm_flags & VM_UFFD_WP; in userfaultfd_wp() 126 return vma->vm_flags & VM_UFFD_MINOR; in userfaultfd_minor() 132 return userfaultfd_wp(vma) && pte_uffd_wp(pte); in userfaultfd_pte_wp() 138 return userfaultfd_wp(vma) && pmd_uffd_wp(pmd); in userfaultfd_huge_pmd_wp() 143 return vma->vm_flags & __VM_UFFD_FLAGS; in userfaultfd_armed() 150 (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma))) in vma_can_userfault() 161 return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || in vma_can_userfault() [all …]
|
A D | huge_mm.h | 17 struct vm_area_struct *vma); 144 if (!vma_is_anonymous(vma)) { in transhuge_vma_suitable() 145 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, in transhuge_vma_suitable() 152 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) in transhuge_vma_suitable() 161 if (!vma->vm_file) in file_thp_enabled() 164 inode = vma->vm_file->f_inode; in file_thp_enabled() 167 (vma->vm_flags & VM_EXEC) && in file_thp_enabled() 236 struct vm_area_struct *vma) in pmd_trans_huge_lock() argument 244 struct vm_area_struct *vma) in pud_trans_huge_lock() argument 379 struct vm_area_struct *vma) in pmd_trans_huge_lock() argument [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
A D | vmm.c | 870 if (addr >= vma->addr + vma->size) in nvkm_vmm_node_search() 931 if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) in nvkm_vmm_node_split() 954 vma->addr, (u64)vma->size, in nvkm_vma_dump() 1154 if (vma->addr == addr && vma->part && (prev = node(vma, prev))) { in nvkm_vmm_pfn_split_merge() 1182 if (!vma->mapped || vma->memory) in nvkm_vmm_pfn_unmap() 1196 } while ((vma = node(vma, next)) && (start = vma->addr) < limit); in nvkm_vmm_pfn_unmap() 1254 if (!vma->mapref || vma->memory) { in nvkm_vmm_pfn_map() 1310 vma = node(vma, next); in nvkm_vmm_pfn_map() 1544 if (vma->mapref || !vma->sparse) { in nvkm_vmm_put_locked() 1585 if (vma->sparse && !vma->mapref) { in nvkm_vmm_put_locked() [all …]
|
A D | uvmm.c | 118 if (ret = -ENOENT, !vma || vma->addr != addr) { in nvkm_uvmm_mthd_unmap() 120 addr, vma ? vma->addr : ~0ULL); in nvkm_uvmm_mthd_unmap() 179 if (ret = -EINVAL, vma->mapped && !vma->memory) { in nvkm_uvmm_mthd_map() 185 if (addr + size > vma->addr + vma->size || vma->memory || in nvkm_uvmm_mthd_map() 186 (vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) { in nvkm_uvmm_mthd_map() 189 !!vma->memory, vma->refd, vma->mapref, in nvkm_uvmm_mthd_map() 190 addr, size, vma->addr, (u64)vma->size); in nvkm_uvmm_mthd_map() 194 vma = nvkm_vmm_node_split(vmm, vma, addr, size); in nvkm_uvmm_mthd_map() 195 if (!vma) { in nvkm_uvmm_mthd_map() 237 if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) { in nvkm_uvmm_mthd_put() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/i915/selftests/ |
A D | i915_gem_gtt.c | 477 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole() 510 __func__, p->name, vma->node.start, vma->node.size, in fill_hole() 519 __func__, p->name, vma->node.start, vma->node.size, in fill_hole() 556 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole() 589 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole() 598 __func__, p->name, vma->node.start, vma->node.size, in fill_hole() 1439 vma->resource->bi.pages = vma->pages; in track_vma_bind() 1442 list_move_tail(&vma->vm_link, &vma->vm->bound_list); in track_vma_bind() 1576 vma->node.start, vma->node.size, in igt_gtt_reserve() 1622 vma->node.start, vma->node.size, in igt_gtt_reserve() [all …]
|
A D | i915_vma.c | 73 return vma; in checked_vma_instance() 93 if (i915_vma_compare(vma, vma->vm, in checked_vma_instance() 94 i915_vma_is_ggtt(vma) ? &vma->gtt_view : NULL)) { in checked_vma_instance() 104 return vma; in checked_vma_instance() 656 if (vma->node.size < vma->size) { in igt_vma_rotate_remap() 658 vma->size, vma->node.size); in igt_vma_rotate_remap() 756 if (vma->node.size < vma->size) { in assert_pin() 758 name, vma->size, vma->node.size); in assert_pin() 769 if (vma->pages == vma->obj->mm.pages) { in assert_pin() 781 if (vma->pages != vma->obj->mm.pages) { in assert_pin() [all …]
|
/linux-6.3-rc2/drivers/pci/ |
A D | mmap.c | 23 struct vm_area_struct *vma, in pci_mmap_resource_range() argument 30 if (vma->vm_pgoff + vma_pages(vma) > size) in pci_mmap_resource_range() 34 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in pci_mmap_resource_range() 36 vma->vm_page_prot = pgprot_device(vma->vm_page_prot); in pci_mmap_resource_range() 39 ret = pci_iobar_pfn(pdev, bar, vma); in pci_mmap_resource_range() 43 vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT); in pci_mmap_resource_range() 45 vma->vm_ops = &pci_phys_vm_ops; in pci_mmap_resource_range() 47 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_resource_range() 48 vma->vm_end - vma->vm_start, in pci_mmap_resource_range() 49 vma->vm_page_prot); in pci_mmap_resource_range()
|
/linux-6.3-rc2/fs/proc/ |
A D | task_nommu.c | 29 for_each_vma(vmi, vma) { in task_mem() 30 bytes += kobjsize(vma); in task_mem() 37 size = vma->vm_end - vma->vm_start; in task_mem() 89 vsize += vma->vm_end - vma->vm_start; in task_vsize() 150 file = vma->vm_file; in nommu_vma_show() 162 vma->vm_start, in nommu_vma_show() 163 vma->vm_end, in nommu_vma_show() 218 if (vma) in m_start() 219 return vma; in m_start() 244 *pos = vma->vm_end; in m_next() [all …]
|
A D | task_mmu.c | 251 vma->vm_end >= vma->vm_mm->start_stack; in is_stack() 317 if (vma->vm_ops && vma->vm_ops->name) { in show_map_vma() 318 name = vma->vm_ops->name(vma); in show_map_vma() 511 struct vm_area_struct *vma = walk->vma; in smaps_pte_hole() local 537 struct vm_area_struct *vma = walk->vma; in smaps_pte_entry() local 583 struct vm_area_struct *vma = walk->vma; in smaps_pmd_entry() local 623 struct vm_area_struct *vma = walk->vma; in smaps_pte_range() local 736 struct vm_area_struct *vma = walk->vma; in smaps_hugetlb_range() local 1169 struct vm_area_struct *vma = walk->vma; in clear_refs_pte_range() local 1228 struct vm_area_struct *vma = walk->vma; in clear_refs_test_walk() local [all …]
|