Lines Matching refs:vma

180 	struct vm_area_struct *vma;  in get_next_vma()  local
186 vma = lock_next_vma(priv->mm, &priv->iter, last_pos); in get_next_vma()
187 if (!IS_ERR_OR_NULL(vma)) in get_next_vma()
188 priv->locked_vma = vma; in get_next_vma()
190 return vma; in get_next_vma()
238 struct vm_area_struct *vma; in proc_get_vma() local
241 vma = get_next_vma(priv, *ppos); in proc_get_vma()
243 if (IS_ERR(vma)) { in proc_get_vma()
244 if (PTR_ERR(vma) == -EAGAIN && fallback_to_mmap_lock(priv, *ppos)) in proc_get_vma()
247 return vma; in proc_get_vma()
252 if (vma) { in proc_get_vma()
258 *ppos = vma->vm_end; in proc_get_vma()
261 vma = get_gate_vma(priv->mm); in proc_get_vma()
264 return vma; in proc_get_vma()
371 static void get_vma_name(struct vm_area_struct *vma, in get_vma_name() argument
376 struct anon_vma_name *anon_name = vma->vm_mm ? anon_vma_name(vma) : NULL; in get_vma_name()
386 if (vma->vm_file) { in get_vma_name()
395 *path = file_user_path(vma->vm_file); in get_vma_name()
400 if (vma->vm_ops && vma->vm_ops->name) { in get_vma_name()
401 *name = vma->vm_ops->name(vma); in get_vma_name()
406 *name = arch_vma_name(vma); in get_vma_name()
410 if (!vma->vm_mm) { in get_vma_name()
415 if (vma_is_initial_heap(vma)) { in get_vma_name()
420 if (vma_is_initial_stack(vma)) { in get_vma_name()
453 show_map_vma(struct seq_file *m, struct vm_area_struct *vma) in show_map_vma() argument
457 vm_flags_t flags = vma->vm_flags; in show_map_vma()
463 if (vma->vm_file) { in show_map_vma()
464 const struct inode *inode = file_user_inode(vma->vm_file); in show_map_vma()
468 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; in show_map_vma()
471 start = vma->vm_start; in show_map_vma()
472 end = vma->vm_end; in show_map_vma()
475 get_vma_name(vma, &path, &name, &name_fmt); in show_map_vma()
525 static void query_vma_teardown(struct mm_struct *mm, struct vm_area_struct *vma) in query_vma_teardown() argument
538 struct vm_area_struct *vma; in query_matching_vma() local
541 vma = query_vma_find_by_addr(mm, addr); in query_matching_vma()
542 if (!vma) in query_matching_vma()
546 if ((flags & PROCMAP_QUERY_FILE_BACKED_VMA) && !vma->vm_file) in query_matching_vma()
562 if ((vma->vm_flags & perm) != perm) in query_matching_vma()
567 if ((flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA) || vma->vm_start <= addr) in query_matching_vma()
568 return vma; in query_matching_vma()
574 addr = vma->vm_end; in query_matching_vma()
585 struct vm_area_struct *vma; in do_procmap_query() local
623 vma = query_matching_vma(mm, karg.query_addr, karg.query_flags); in do_procmap_query()
624 if (IS_ERR(vma)) { in do_procmap_query()
625 err = PTR_ERR(vma); in do_procmap_query()
626 vma = NULL; in do_procmap_query()
630 karg.vma_start = vma->vm_start; in do_procmap_query()
631 karg.vma_end = vma->vm_end; in do_procmap_query()
634 if (vma->vm_flags & VM_READ) in do_procmap_query()
636 if (vma->vm_flags & VM_WRITE) in do_procmap_query()
638 if (vma->vm_flags & VM_EXEC) in do_procmap_query()
640 if (vma->vm_flags & VM_MAYSHARE) in do_procmap_query()
643 karg.vma_page_size = vma_kernel_pagesize(vma); in do_procmap_query()
645 if (vma->vm_file) { in do_procmap_query()
646 const struct inode *inode = file_user_inode(vma->vm_file); in do_procmap_query()
648 karg.vma_offset = ((__u64)vma->vm_pgoff) << PAGE_SHIFT; in do_procmap_query()
662 err = build_id_parse(vma, build_id_buf, &build_id_sz); in do_procmap_query()
680 get_vma_name(vma, &path, &name, &name_fmt); in do_procmap_query()
708 query_vma_teardown(mm, vma); in do_procmap_query()
728 query_vma_teardown(mm, vma); in do_procmap_query()
911 struct vm_area_struct *vma = walk->vma; in smaps_pte_hole() local
913 mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping, in smaps_pte_hole()
914 linear_page_index(vma, addr), in smaps_pte_hole()
915 linear_page_index(vma, end)); in smaps_pte_hole()
937 struct vm_area_struct *vma = walk->vma; in smaps_pte_entry() local
938 bool locked = !!(vma->vm_flags & VM_LOCKED); in smaps_pte_entry()
944 page = vm_normal_page(vma, addr, ptent); in smaps_pte_entry()
985 struct vm_area_struct *vma = walk->vma; in smaps_pmd_entry() local
986 bool locked = !!(vma->vm_flags & VM_LOCKED); in smaps_pmd_entry()
992 page = vm_normal_page_pmd(vma, addr, *pmd); in smaps_pmd_entry()
1025 struct vm_area_struct *vma = walk->vma; in smaps_pte_range() local
1029 ptl = pmd_trans_huge_lock(pmd, vma); in smaps_pte_range()
1036 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in smaps_pte_range()
1049 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) in show_smap_vma_flags() argument
1138 if (vma->vm_flags & (1UL << i)) in show_smap_vma_flags()
1150 struct vm_area_struct *vma = walk->vma; in smaps_hugetlb_range() local
1156 ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); in smaps_hugetlb_range()
1172 mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); in smaps_hugetlb_range()
1174 mss->private_hugetlb += huge_page_size(hstate_vma(vma)); in smaps_hugetlb_range()
1202 static void smap_gather_stats(struct vm_area_struct *vma, in smap_gather_stats() argument
1208 if (start >= vma->vm_end) in smap_gather_stats()
1211 if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) { in smap_gather_stats()
1222 unsigned long shmem_swapped = shmem_swap_usage(vma); in smap_gather_stats()
1224 if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) || in smap_gather_stats()
1225 !(vma->vm_flags & VM_WRITE))) { in smap_gather_stats()
1234 walk_page_vma(vma, ops, mss); in smap_gather_stats()
1236 walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss); in smap_gather_stats()
1285 struct vm_area_struct *vma = v; in show_smap() local
1288 smap_gather_stats(vma, &mss, 0); in show_smap()
1290 show_map_vma(m, vma); in show_smap()
1292 SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start); in show_smap()
1293 SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma)); in show_smap()
1294 SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma)); in show_smap()
1300 !!thp_vma_allowable_orders(vma, vma->vm_flags, in show_smap()
1304 seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); in show_smap()
1305 show_smap_vma_flags(m, vma); in show_smap()
1315 struct vm_area_struct *vma; in show_smaps_rollup() local
1334 vma = vma_next(&vmi); in show_smaps_rollup()
1336 if (unlikely(!vma)) in show_smaps_rollup()
1339 vma_start = vma->vm_start; in show_smaps_rollup()
1341 smap_gather_stats(vma, &mss, 0); in show_smaps_rollup()
1342 last_vma_end = vma->vm_end; in show_smaps_rollup()
1393 vma = vma_next(&vmi); in show_smaps_rollup()
1395 if (!vma) in show_smaps_rollup()
1399 if (vma->vm_start >= last_vma_end) { in show_smaps_rollup()
1400 smap_gather_stats(vma, &mss, 0); in show_smaps_rollup()
1401 last_vma_end = vma->vm_end; in show_smaps_rollup()
1406 if (vma->vm_end > last_vma_end) { in show_smaps_rollup()
1407 smap_gather_stats(vma, &mss, last_vma_end); in show_smaps_rollup()
1408 last_vma_end = vma->vm_end; in show_smaps_rollup()
1411 } for_each_vma(vmi, vma); in show_smaps_rollup()
1515 static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte) in pte_is_pinned() argument
1521 if (!is_cow_mapping(vma->vm_flags)) in pte_is_pinned()
1523 if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))) in pte_is_pinned()
1525 folio = vm_normal_folio(vma, addr, pte); in pte_is_pinned()
1531 static inline void clear_soft_dirty(struct vm_area_struct *vma, in clear_soft_dirty() argument
1545 if (pte_is_pinned(vma, addr, ptent)) in clear_soft_dirty()
1547 old_pte = ptep_modify_prot_start(vma, addr, pte); in clear_soft_dirty()
1550 ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent); in clear_soft_dirty()
1553 set_pte_at(vma->vm_mm, addr, pte, ptent); in clear_soft_dirty()
1557 static inline void clear_soft_dirty(struct vm_area_struct *vma, in clear_soft_dirty() argument
1564 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, in clear_soft_dirty_pmd() argument
1571 old = pmdp_invalidate(vma, addr, pmdp); in clear_soft_dirty_pmd()
1580 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
1583 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
1587 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, in clear_soft_dirty_pmd() argument
1597 struct vm_area_struct *vma = walk->vma; in clear_refs_pte_range() local
1602 ptl = pmd_trans_huge_lock(pmd, vma); in clear_refs_pte_range()
1605 clear_soft_dirty_pmd(vma, addr, pmd); in clear_refs_pte_range()
1615 pmdp_test_and_clear_young(vma, addr, pmd); in clear_refs_pte_range()
1623 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in clear_refs_pte_range()
1632 clear_soft_dirty(vma, addr, pte); in clear_refs_pte_range()
1639 folio = vm_normal_folio(vma, addr, ptent); in clear_refs_pte_range()
1644 ptep_test_and_clear_young(vma, addr, pte); in clear_refs_pte_range()
1657 struct vm_area_struct *vma = walk->vma; in clear_refs_test_walk() local
1659 if (vma->vm_flags & VM_PFNMAP) in clear_refs_test_walk()
1668 if (cp->type == CLEAR_REFS_ANON && vma->vm_file) in clear_refs_test_walk()
1670 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) in clear_refs_test_walk()
1687 struct vm_area_struct *vma; in clear_refs_write() local
1728 for_each_vma(vmi, vma) { in clear_refs_write()
1729 if (!(vma->vm_flags & VM_SOFTDIRTY)) in clear_refs_write()
1731 vm_flags_clear(vma, VM_SOFTDIRTY); in clear_refs_write()
1732 vma_set_page_prot(vma); in clear_refs_write()
1815 struct vm_area_struct *vma = find_vma(walk->mm, addr); in pagemap_pte_hole() local
1820 if (vma) in pagemap_pte_hole()
1821 hole_end = min(end, vma->vm_start); in pagemap_pte_hole()
1831 if (!vma) in pagemap_pte_hole()
1835 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_pte_hole()
1837 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { in pagemap_pte_hole()
1848 struct vm_area_struct *vma, unsigned long addr, pte_t pte) in pte_to_pagemap_entry() argument
1858 page = vm_normal_page(vma, addr, pte); in pte_to_pagemap_entry()
1900 if (vma->vm_flags & VM_SOFTDIRTY) in pte_to_pagemap_entry()
1909 struct vm_area_struct *vma = walk->vma; in pagemap_pmd_range() local
1916 ptl = pmd_trans_huge_lock(pmdp, vma); in pagemap_pmd_range()
1924 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_pmd_range()
2003 pme = pte_to_pagemap_entry(pm, vma, addr, ptep_get(pte)); in pagemap_pmd_range()
2022 struct vm_area_struct *vma = walk->vma; in pagemap_hugetlb_range() local
2028 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_hugetlb_range()
2031 ptl = huge_pte_lock(hstate_vma(vma), walk->mm, ptep); in pagemap_hugetlb_range()
2238 struct vm_area_struct *vma, in pagemap_page_category() argument
2251 page = vm_normal_page(vma, addr, pte); in pagemap_page_category()
2282 static void make_uffd_wp_pte(struct vm_area_struct *vma, in make_uffd_wp_pte() argument
2288 old_pte = ptep_modify_prot_start(vma, addr, pte); in make_uffd_wp_pte()
2290 ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent); in make_uffd_wp_pte()
2293 set_pte_at(vma->vm_mm, addr, pte, ptent); in make_uffd_wp_pte()
2295 set_pte_at(vma->vm_mm, addr, pte, in make_uffd_wp_pte()
2302 struct vm_area_struct *vma, in pagemap_thp_category() argument
2315 page = vm_normal_page_pmd(vma, addr, pmd); in pagemap_thp_category()
2344 static void make_uffd_wp_pmd(struct vm_area_struct *vma, in make_uffd_wp_pmd() argument
2350 old = pmdp_invalidate_ad(vma, addr, pmdp); in make_uffd_wp_pmd()
2352 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in make_uffd_wp_pmd()
2355 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in make_uffd_wp_pmd()
2391 static void make_uffd_wp_huge_pte(struct vm_area_struct *vma, in make_uffd_wp_huge_pte() argument
2400 psize = huge_page_size(hstate_vma(vma)); in make_uffd_wp_huge_pte()
2403 set_huge_pte_at(vma->vm_mm, addr, ptep, in make_uffd_wp_huge_pte()
2406 huge_ptep_modify_prot_commit(vma, addr, ptep, ptent, in make_uffd_wp_huge_pte()
2409 set_huge_pte_at(vma->vm_mm, addr, ptep, in make_uffd_wp_huge_pte()
2457 struct vm_area_struct *vma = walk->vma; in pagemap_scan_test_walk() local
2459 bool wp_allowed = userfaultfd_wp_async(vma) && in pagemap_scan_test_walk()
2460 userfaultfd_wp_use_markers(vma); in pagemap_scan_test_walk()
2478 if (vma->vm_flags & VM_PFNMAP) in pagemap_scan_test_walk()
2484 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_scan_test_walk()
2564 struct vm_area_struct *vma = walk->vma; in pagemap_scan_thp_entry() local
2569 ptl = pmd_trans_huge_lock(pmd, vma); in pagemap_scan_thp_entry()
2574 pagemap_thp_category(p, vma, start, *pmd); in pagemap_scan_thp_entry()
2594 split_huge_pmd(vma, pmd, start); in pagemap_scan_thp_entry()
2600 make_uffd_wp_pmd(vma, start, pmd); in pagemap_scan_thp_entry()
2601 flush_tlb_range(vma, start, end); in pagemap_scan_thp_entry()
2614 struct vm_area_struct *vma = walk->vma; in pagemap_scan_pmd_entry() local
2625 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in pagemap_scan_pmd_entry()
2641 make_uffd_wp_pte(vma, addr, pte, ptent); in pagemap_scan_pmd_entry()
2665 make_uffd_wp_pte(vma, addr, pte, ptent); in pagemap_scan_pmd_entry()
2676 pagemap_page_category(p, vma, addr, ptent); in pagemap_scan_pmd_entry()
2691 make_uffd_wp_pte(vma, addr, pte, ptent); in pagemap_scan_pmd_entry()
2699 flush_tlb_range(vma, start, addr); in pagemap_scan_pmd_entry()
2714 struct vm_area_struct *vma = walk->vma; in pagemap_scan_hugetlb_entry() local
2732 i_mmap_lock_write(vma->vm_file->f_mapping); in pagemap_scan_hugetlb_entry()
2733 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep); in pagemap_scan_hugetlb_entry()
2756 make_uffd_wp_huge_pte(vma, start, ptep, pte); in pagemap_scan_hugetlb_entry()
2757 flush_hugetlb_tlb_range(vma, start, end); in pagemap_scan_hugetlb_entry()
2761 i_mmap_unlock_write(vma->vm_file->f_mapping); in pagemap_scan_hugetlb_entry()
2773 struct vm_area_struct *vma = walk->vma; in pagemap_scan_pte_hole() local
2776 if (!vma || !pagemap_scan_is_interesting_page(p->cur_vma_category, p)) in pagemap_scan_pte_hole()
2786 err = uffd_wp_range(vma, addr, end - addr, true); in pagemap_scan_pte_hole()
3046 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, in can_gather_numa_stats() argument
3055 page = vm_normal_page(vma, addr, pte); in can_gather_numa_stats()
3071 struct vm_area_struct *vma, in can_gather_numa_stats_pmd() argument
3080 page = vm_normal_page_pmd(vma, addr, pmd); in can_gather_numa_stats_pmd()
3099 struct vm_area_struct *vma = walk->vma; in gather_pte_stats() local
3105 ptl = pmd_trans_huge_lock(pmd, vma); in gather_pte_stats()
3109 page = can_gather_numa_stats_pmd(*pmd, vma, addr); in gather_pte_stats()
3124 struct page *page = can_gather_numa_stats(ptent, vma, addr); in gather_pte_stats()
3143 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); in gather_hugetlb_stats()
3178 struct vm_area_struct *vma = v; in show_numa_map() local
3180 struct file *file = vma->vm_file; in show_numa_map()
3181 struct mm_struct *mm = vma->vm_mm; in show_numa_map()
3193 pol = __get_vma_policy(vma, vma->vm_start, &ilx); in show_numa_map()
3201 seq_printf(m, "%08lx %s", vma->vm_start, buffer); in show_numa_map()
3206 } else if (vma_is_initial_heap(vma)) { in show_numa_map()
3208 } else if (vma_is_initial_stack(vma)) { in show_numa_map()
3212 if (is_vm_hugetlb_page(vma)) in show_numa_map()
3216 walk_page_vma(vma, &show_numa_ops, md); in show_numa_map()
3236 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) in show_numa_map()
3246 seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10); in show_numa_map()