Searched refs:pvmw (Results 1 – 9 of 9) sorted by relevance
| /mm/ |
| A D | page_vma_mapped.c | 23 pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd, in map_pte() 24 pvmw->address, &pvmw->ptl); in map_pte() 37 pvmw->pte = pte_offset_map_rw_nolock(pvmw->vma->vm_mm, pvmw->pmd, in map_pte() 138 if (pfn > (pvmw->pfn + pvmw->nr_pages - 1)) in check_pte() 148 if (pfn > pvmw->pfn + pvmw->nr_pages - 1) in check_pmd() 155 pvmw->address = (pvmw->address + size) & ~(size - 1); in step_forward() 196 if (pvmw->pmd && !pvmw->pte) in page_vma_mapped_walk() 210 pvmw->pte = hugetlb_walk(vma, pvmw->address, size); in page_vma_mapped_walk() 214 pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte); in page_vma_mapped_walk() 241 pvmw->pmd = pmd_offset(pud, pvmw->address); in page_vma_mapped_walk() [all …]
|
| A D | rmap.c | 858 address = pvmw.address; in folio_referenced_one() 901 } else if (pvmw.pte) { in folio_referenced_one() 903 pvmw.pte)) in folio_referenced_one() 907 pvmw.pmd)) in folio_referenced_one() 1043 if (pvmw->pte) { in page_vma_mkclean_one() 1898 pvmw.flags = PVMW_SYNC; in try_to_unmap_one() 1936 if (!pvmw.pte) { in try_to_unmap_one() 1938 if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio)) in try_to_unmap_one() 1977 address = pvmw.address; in try_to_unmap_one() 2300 pvmw.flags = PVMW_SYNC; in try_to_migrate_one() [all …]
|
| A D | page_idle.c | 56 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in page_idle_clear_pte_refs_one() 59 while (page_vma_mapped_walk(&pvmw)) { in page_idle_clear_pte_refs_one() 60 addr = pvmw.address; in page_idle_clear_pte_refs_one() 61 if (pvmw.pte) { in page_idle_clear_pte_refs_one() 70 if (likely(pte_present(ptep_get(pvmw.pte)))) in page_idle_clear_pte_refs_one() 71 referenced |= ptep_test_and_clear_young(vma, addr, pvmw.pte); in page_idle_clear_pte_refs_one() 74 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) in page_idle_clear_pte_refs_one()
|
| A D | migrate.c | 309 pvmw->vma->vm_page_prot)); in try_to_map_unused_to_zeropage() 310 set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte); in try_to_map_unused_to_zeropage() 330 while (page_vma_mapped_walk(&pvmw)) { in remove_migration_pte() 340 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; in remove_migration_pte() 345 if (!pvmw.pte) { in remove_migration_pte() 348 remove_migration_pmd(&pvmw, new); in remove_migration_pte() 358 old_pte = ptep_get(pvmw.pte); in remove_migration_pte() 404 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte, in remove_migration_pte() 411 pvmw.address, rmap_flags); in remove_migration_pte() 414 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte() [all …]
|
| A D | ksm.c | 1254 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0); in write_protect_page() 1265 if (pvmw.address == -EFAULT) in write_protect_page() 1269 pvmw.address + PAGE_SIZE); in write_protect_page() 1272 if (!page_vma_mapped_walk(&pvmw)) in write_protect_page() 1274 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) in write_protect_page() 1277 entry = ptep_get(pvmw.pte); in write_protect_page() 1304 entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); in write_protect_page() 1310 set_pte_at(mm, pvmw.address, pvmw.pte, entry); in write_protect_page() 1317 set_pte_at(mm, pvmw.address, pvmw.pte, entry); in write_protect_page() 1328 set_pte_at(mm, pvmw.address, pvmw.pte, entry); in write_protect_page() [all …]
|
| A D | internal.h | 1098 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw) in vma_address_end() argument 1100 struct vm_area_struct *vma = pvmw->vma; in vma_address_end() 1105 if (pvmw->nr_pages == 1) in vma_address_end() 1106 return pvmw->address + PAGE_SIZE; in vma_address_end() 1108 pgoff = pvmw->pgoff + pvmw->nr_pages; in vma_address_end()
|
| A D | huge_memory.c | 4594 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() 4596 unsigned long address = pvmw->address; in set_pmd_migration_entry() 4602 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry() 4632 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry() 4643 struct vm_area_struct *vma = pvmw->vma; in remove_migration_pmd() 4645 unsigned long address = pvmw->address; in remove_migration_pmd() 4650 if (!(pvmw->pmd && !pvmw->pte)) in remove_migration_pmd() 4653 entry = pmd_to_swp_entry(*pvmw->pmd); in remove_migration_pmd() 4656 if (pmd_swp_soft_dirty(*pvmw->pmd)) in remove_migration_pmd() 4660 if (pmd_swp_uffd_wp(*pvmw->pmd)) in remove_migration_pmd() [all …]
|
| A D | vmscan.c | 4228 bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) in lru_gen_look_around() argument 4237 pte_t *pte = pvmw->pte; in lru_gen_look_around() 4238 unsigned long addr = pvmw->address; in lru_gen_look_around() 4239 struct vm_area_struct *vma = pvmw->vma; in lru_gen_look_around() 4240 struct folio *folio = pfn_folio(pvmw->pfn); in lru_gen_look_around() 4248 lockdep_assert_held(pvmw->ptl); in lru_gen_look_around() 4254 if (spin_is_contended(pvmw->ptl)) in lru_gen_look_around() 4319 update_bloom_filter(mm_state, max_seq, pvmw->pmd); in lru_gen_look_around()
|
| /mm/damon/ |
| A D | ops-common.c | 149 while (page_vma_mapped_walk(&pvmw)) { in damon_folio_mkold_one() 150 addr = pvmw.address; in damon_folio_mkold_one() 151 if (pvmw.pte) in damon_folio_mkold_one() 152 damon_ptep_mkold(pvmw.pte, vma, addr); in damon_folio_mkold_one() 154 damon_pmdp_mkold(pvmw.pmd, vma, addr); in damon_folio_mkold_one() 191 while (page_vma_mapped_walk(&pvmw)) { in damon_folio_young_one() 192 addr = pvmw.address; in damon_folio_young_one() 193 if (pvmw.pte) { in damon_folio_young_one() 194 pte = ptep_get(pvmw.pte); in damon_folio_young_one() 206 *accessed = pmd_young(pmdp_get(pvmw.pmd)) || in damon_folio_young_one() [all …]
|
Completed in 60 milliseconds