| /linux/mm/ |
| A D | page_vma_mapped.c | 22 pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd, in map_pte() 23 pvmw->address, &pvmw->ptl); in map_pte() 35 pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd, in map_pte() 129 return (pfn - pvmw->pfn) < pvmw->nr_pages; in check_pte() 137 if (pfn > pvmw->pfn + pvmw->nr_pages - 1) in check_pmd() 144 pvmw->address = (pvmw->address + size) & ~(size - 1); in step_forward() 185 if (pvmw->pmd && !pvmw->pte) in page_vma_mapped_walk() 199 pvmw->pte = hugetlb_walk(vma, pvmw->address, size); in page_vma_mapped_walk() 203 pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte); in page_vma_mapped_walk() 230 pvmw->pmd = pmd_offset(pud, pvmw->address); in page_vma_mapped_walk() [all …]
|
| A D | rmap.c | 848 address = pvmw.address; in folio_referenced_one() 891 } else if (pvmw.pte) { in folio_referenced_one() 893 pvmw.pte)) in folio_referenced_one() 897 pvmw.pmd)) in folio_referenced_one() 1033 if (pvmw->pte) { in page_vma_mkclean_one() 1655 pvmw.flags = PVMW_SYNC; in try_to_unmap_one() 1693 if (!pvmw.pte) { in try_to_unmap_one() 1694 if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, in try_to_unmap_one() 1716 address = pvmw.address; in try_to_unmap_one() 2056 if (!pvmw.pte) { in try_to_migrate_one() [all …]
|
| A D | page_idle.c | 56 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in page_idle_clear_pte_refs_one() 59 while (page_vma_mapped_walk(&pvmw)) { in page_idle_clear_pte_refs_one() 60 addr = pvmw.address; in page_idle_clear_pte_refs_one() 61 if (pvmw.pte) { in page_idle_clear_pte_refs_one() 66 if (ptep_clear_young_notify(vma, addr, pvmw.pte)) in page_idle_clear_pte_refs_one() 69 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) in page_idle_clear_pte_refs_one()
|
| A D | migrate.c | 232 pvmw->vma->vm_page_prot)); in try_to_map_unused_to_zeropage() 233 set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte); in try_to_map_unused_to_zeropage() 253 while (page_vma_mapped_walk(&pvmw)) { in remove_migration_pte() 263 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; in remove_migration_pte() 268 if (!pvmw.pte) { in remove_migration_pte() 271 remove_migration_pmd(&pvmw, new); in remove_migration_pte() 281 old_pte = ptep_get(pvmw.pte); in remove_migration_pte() 327 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte, in remove_migration_pte() 334 pvmw.address, rmap_flags); in remove_migration_pte() 337 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte() [all …]
|
| A D | ksm.c | 1250 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0); in write_protect_page() 1261 if (pvmw.address == -EFAULT) in write_protect_page() 1265 pvmw.address + PAGE_SIZE); in write_protect_page() 1268 if (!page_vma_mapped_walk(&pvmw)) in write_protect_page() 1270 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) in write_protect_page() 1274 entry = ptep_get(pvmw.pte); in write_protect_page() 1293 entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); in write_protect_page() 1299 set_pte_at(mm, pvmw.address, pvmw.pte, entry); in write_protect_page() 1306 set_pte_at(mm, pvmw.address, pvmw.pte, entry); in write_protect_page() 1317 set_pte_at(mm, pvmw.address, pvmw.pte, entry); in write_protect_page() [all …]
|
| A D | internal.h | 986 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw) in vma_address_end() argument 988 struct vm_area_struct *vma = pvmw->vma; in vma_address_end() 993 if (pvmw->nr_pages == 1) in vma_address_end() 994 return pvmw->address + PAGE_SIZE; in vma_address_end() 996 pgoff = pvmw->pgoff + pvmw->nr_pages; in vma_address_end()
|
| A D | huge_memory.c | 4155 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() 4157 unsigned long address = pvmw->address; in set_pmd_migration_entry() 4163 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry() 4193 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry() 4204 struct vm_area_struct *vma = pvmw->vma; in remove_migration_pmd() 4206 unsigned long address = pvmw->address; in remove_migration_pmd() 4211 if (!(pvmw->pmd && !pvmw->pte)) in remove_migration_pmd() 4214 entry = pmd_to_swp_entry(*pvmw->pmd); in remove_migration_pmd() 4217 if (pmd_swp_soft_dirty(*pvmw->pmd)) in remove_migration_pmd() 4221 if (pmd_swp_uffd_wp(*pvmw->pmd)) in remove_migration_pmd() [all …]
|
| A D | vmscan.c | 4044 bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) in lru_gen_look_around() argument 4051 pte_t *pte = pvmw->pte; in lru_gen_look_around() 4052 unsigned long addr = pvmw->address; in lru_gen_look_around() 4053 struct vm_area_struct *vma = pvmw->vma; in lru_gen_look_around() 4054 struct folio *folio = pfn_folio(pvmw->pfn); in lru_gen_look_around() 4063 lockdep_assert_held(pvmw->ptl); in lru_gen_look_around() 4069 if (spin_is_contended(pvmw->ptl)) in lru_gen_look_around() 4146 update_bloom_filter(mm_state, max_seq, pvmw->pmd); in lru_gen_look_around()
|
| /linux/include/linux/ |
| A D | rmap.h | 700 if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma)) in page_vma_mapped_walk_done() 701 pte_unmap(pvmw->pte); in page_vma_mapped_walk_done() 702 if (pvmw->ptl) in page_vma_mapped_walk_done() 703 spin_unlock(pvmw->ptl); in page_vma_mapped_walk_done() 718 WARN_ON_ONCE(!pvmw->pmd && !pvmw->pte); in page_vma_mapped_walk_restart() 720 if (likely(pvmw->ptl)) in page_vma_mapped_walk_restart() 721 spin_unlock(pvmw->ptl); in page_vma_mapped_walk_restart() 725 pvmw->ptl = NULL; in page_vma_mapped_walk_restart() 726 pvmw->pmd = NULL; in page_vma_mapped_walk_restart() 727 pvmw->pte = NULL; in page_vma_mapped_walk_restart() [all …]
|
| A D | swapops.h | 531 extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 534 extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, 564 static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, in set_pmd_migration_entry() argument 570 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, in remove_migration_pmd() argument
|
| A D | mmzone.h | 558 bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw); 577 static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) in lru_gen_look_around() argument
|
| /linux/mm/damon/ |
| A D | paddr.c | 25 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in damon_folio_mkold_one() 27 while (page_vma_mapped_walk(&pvmw)) { in damon_folio_mkold_one() 28 addr = pvmw.address; in damon_folio_mkold_one() 29 if (pvmw.pte) in damon_folio_mkold_one() 30 damon_ptep_mkold(pvmw.pte, vma, addr); in damon_folio_mkold_one() 32 damon_pmdp_mkold(pvmw.pmd, vma, addr); in damon_folio_mkold_one() 97 while (page_vma_mapped_walk(&pvmw)) { in damon_folio_young_one() 98 addr = pvmw.address; in damon_folio_young_one() 99 if (pvmw.pte) { in damon_folio_young_one() 100 *accessed = pte_young(ptep_get(pvmw.pte)) || in damon_folio_young_one() [all …]
|
| /linux/kernel/events/ |
| A D | uprobes.c | 167 DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); in __replace_page() 186 if (!page_vma_mapped_walk(&pvmw)) in __replace_page() 188 VM_BUG_ON_PAGE(addr != pvmw.address, old_page); in __replace_page() 203 flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); in __replace_page() 204 ptep_clear_flush(vma, addr, pvmw.pte); in __replace_page() 206 set_pte_at(mm, addr, pvmw.pte, in __replace_page() 212 page_vma_mapped_walk_done(&pvmw); in __replace_page()
|