/linux-6.3-rc2/mm/ |
A D | page_vma_mapped.c | 18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); in map_pte() 51 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); in map_pte() 107 return (pfn - pvmw->pfn) < pvmw->nr_pages; in check_pte() 115 if (pfn > pvmw->pfn + pvmw->nr_pages - 1) in check_pmd() 162 if (pvmw->pmd && !pvmw->pte) in page_vma_mapped_walk() 180 pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte); in page_vma_mapped_walk() 207 pvmw->pmd = pmd_offset(pud, pvmw->address); in page_vma_mapped_walk() 217 pvmw->ptl = pmd_lock(mm, pvmw->pmd); in page_vma_mapped_walk() 278 if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) { in page_vma_mapped_walk() 279 pvmw->ptl = pte_lockptr(mm, pvmw->pmd); in page_vma_mapped_walk() [all …]
|
A D | rmap.c | 815 address = pvmw.address; in folio_referenced_one() 826 if (pvmw.pte) { in folio_referenced_one() 833 pvmw.pte)) in folio_referenced_one() 837 pvmw.pmd)) in folio_referenced_one() 956 if (pvmw->pte) { in page_vma_mkclean_one() 1467 pvmw.flags = PVMW_SYNC; in try_to_unmap_one() 1511 address = pvmw.address; in try_to_unmap_one() 1827 pvmw.flags = PVMW_SYNC; in try_to_migrate_one() 1860 if (!pvmw.pte) { in try_to_migrate_one() 1895 address = pvmw.address; in try_to_migrate_one() [all …]
|
A D | page_idle.c | 56 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in page_idle_clear_pte_refs_one() 59 while (page_vma_mapped_walk(&pvmw)) { in page_idle_clear_pte_refs_one() 60 addr = pvmw.address; in page_idle_clear_pte_refs_one() 61 if (pvmw.pte) { in page_idle_clear_pte_refs_one() 66 if (ptep_clear_young_notify(vma, addr, pvmw.pte)) in page_idle_clear_pte_refs_one() 69 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) in page_idle_clear_pte_refs_one()
|
A D | migrate.c | 189 while (page_vma_mapped_walk(&pvmw)) { in remove_migration_pte() 198 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; in remove_migration_pte() 203 if (!pvmw.pte) { in remove_migration_pte() 206 remove_migration_pmd(&pvmw, new); in remove_migration_pte() 213 if (pte_swp_soft_dirty(*pvmw.pte)) in remove_migration_pte() 219 entry = pte_to_swp_entry(*pvmw.pte); in remove_migration_pte() 242 if (pte_swp_soft_dirty(*pvmw.pte)) in remove_migration_pte() 244 if (pte_swp_uffd_wp(*pvmw.pte)) in remove_migration_pte() 259 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte() 268 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte() [all …]
|
A D | ksm.c | 1048 DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0); in write_protect_page() 1055 if (pvmw.address == -EFAULT) in write_protect_page() 1061 pvmw.address + PAGE_SIZE); in write_protect_page() 1064 if (!page_vma_mapped_walk(&pvmw)) in write_protect_page() 1070 if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || in write_protect_page() 1090 entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); in write_protect_page() 1096 set_pte_at(mm, pvmw.address, pvmw.pte, entry); in write_protect_page() 1102 set_pte_at(mm, pvmw.address, pvmw.pte, entry); in write_protect_page() 1113 set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); in write_protect_page() 1115 *orig_pte = *pvmw.pte; in write_protect_page() [all …]
|
A D | internal.h | 611 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw) in vma_address_end() argument 613 struct vm_area_struct *vma = pvmw->vma; in vma_address_end() 618 if (pvmw->nr_pages == 1) in vma_address_end() 619 return pvmw->address + PAGE_SIZE; in vma_address_end() 621 pgoff = pvmw->pgoff + pvmw->nr_pages; in vma_address_end()
|
A D | huge_memory.c | 3216 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() 3218 unsigned long address = pvmw->address; in set_pmd_migration_entry() 3224 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry() 3252 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry() 3262 struct vm_area_struct *vma = pvmw->vma; in remove_migration_pmd() 3264 unsigned long address = pvmw->address; in remove_migration_pmd() 3269 if (!(pvmw->pmd && !pvmw->pte)) in remove_migration_pmd() 3272 entry = pmd_to_swp_entry(*pvmw->pmd); in remove_migration_pmd() 3275 if (pmd_swp_soft_dirty(*pvmw->pmd)) in remove_migration_pmd() 3277 if (pmd_swp_uffd_wp(*pvmw->pmd)) in remove_migration_pmd() [all …]
|
A D | vmscan.c | 4591 void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) in lru_gen_look_around() argument 4598 pte_t *pte = pvmw->pte; in lru_gen_look_around() 4599 unsigned long addr = pvmw->address; in lru_gen_look_around() 4600 struct folio *folio = pfn_folio(pvmw->pfn); in lru_gen_look_around() 4607 lockdep_assert_held(pvmw->ptl); in lru_gen_look_around() 4610 if (spin_is_contended(pvmw->ptl)) in lru_gen_look_around() 4616 start = max(addr & PMD_MASK, pvmw->vma->vm_start); in lru_gen_look_around() 4617 end = min(addr | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1; in lru_gen_look_around() 4641 pfn = get_pte_pfn(pte[i], pvmw->vma, addr); in lru_gen_look_around() 4652 if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i)) in lru_gen_look_around() [all …]
|
/linux-6.3-rc2/mm/damon/ |
A D | paddr.c | 24 while (page_vma_mapped_walk(&pvmw)) { in __damon_pa_mkold() 25 addr = pvmw.address; in __damon_pa_mkold() 26 if (pvmw.pte) in __damon_pa_mkold() 27 damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr); in __damon_pa_mkold() 29 damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr); in __damon_pa_mkold() 89 while (page_vma_mapped_walk(&pvmw)) { in __damon_pa_young() 90 addr = pvmw.address; in __damon_pa_young() 91 if (pvmw.pte) { in __damon_pa_young() 92 *accessed = pte_young(*pvmw.pte) || in __damon_pa_young() 97 *accessed = pmd_young(*pvmw.pmd) || in __damon_pa_young() [all …]
|
/linux-6.3-rc2/include/linux/ |
A D | rmap.h | 413 static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) in page_vma_mapped_walk_done() argument 416 if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma)) in page_vma_mapped_walk_done() 417 pte_unmap(pvmw->pte); in page_vma_mapped_walk_done() 418 if (pvmw->ptl) in page_vma_mapped_walk_done() 419 spin_unlock(pvmw->ptl); in page_vma_mapped_walk_done() 422 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
|
A D | swapops.h | 494 extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 497 extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, 527 static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, in set_pmd_migration_entry() argument 533 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, in remove_migration_pmd() argument
|
A D | mmzone.h | 488 void lru_gen_look_around(struct page_vma_mapped_walk *pvmw); 576 static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) in lru_gen_look_around() argument
|
/linux-6.3-rc2/kernel/events/ |
A D | uprobes.c | 159 DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); in __replace_page() 178 if (!page_vma_mapped_walk(&pvmw)) in __replace_page() 180 VM_BUG_ON_PAGE(addr != pvmw.address, old_page); in __replace_page() 195 flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); in __replace_page() 196 ptep_clear_flush_notify(vma, addr, pvmw.pte); in __replace_page() 198 set_pte_at_notify(mm, addr, pvmw.pte, in __replace_page() 204 page_vma_mapped_walk_done(&pvmw); in __replace_page()
|