Lines Matching refs:pmd
1052 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument
1055 pmd = pmd_mkwrite(pmd, vma); in maybe_pmd_mkwrite()
1056 return pmd; in maybe_pmd_mkwrite()
1200 static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd, in map_anon_folio_pmd() argument
1209 set_pmd_at(vma->vm_mm, haddr, pmd, entry); in map_anon_folio_pmd()
1210 update_mmu_cache_pmd(vma, haddr, pmd); in map_anon_folio_pmd()
1235 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
1236 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page()
1252 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
1253 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr); in __do_huge_pmd_anonymous_page()
1307 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_folio() argument
1312 pgtable_trans_huge_deposit(mm, pmd, pgtable); in set_huge_zero_folio()
1313 set_pmd_at(mm, haddr, pmd, entry); in set_huge_zero_folio()
1346 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page()
1348 if (pmd_none(*vmf->pmd)) { in do_huge_pmd_anonymous_page()
1360 haddr, vmf->pmd, zero_folio); in do_huge_pmd_anonymous_page()
1361 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_anonymous_page()
1383 pmd_t *pmd, struct folio_or_pfn fop, pgprot_t prot, in insert_pmd() argument
1389 lockdep_assert_held(pmd_lockptr(mm, pmd)); in insert_pmd()
1391 if (!pmd_none(*pmd)) { in insert_pmd()
1396 if (pmd_pfn(*pmd) != pfn) { in insert_pmd()
1397 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); in insert_pmd()
1400 entry = pmd_mkyoung(*pmd); in insert_pmd()
1402 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) in insert_pmd()
1403 update_mmu_cache_pmd(vma, addr, pmd); in insert_pmd()
1425 pgtable_trans_huge_deposit(mm, pmd, pgtable); in insert_pmd()
1429 set_pmd_at(mm, addr, pmd, entry); in insert_pmd()
1430 update_mmu_cache_pmd(vma, addr, pmd); in insert_pmd()
1478 ptl = pmd_lock(vma->vm_mm, vmf->pmd); in vmf_insert_pfn_pmd()
1479 error = insert_pmd(vma, addr, vmf->pmd, fop, pgprot, write, in vmf_insert_pfn_pmd()
1515 ptl = pmd_lock(mm, vmf->pmd); in vmf_insert_folio_pmd()
1516 error = insert_pmd(vma, addr, vmf->pmd, fop, vma->vm_page_prot, in vmf_insert_folio_pmd()
1654 pmd_t *pmd, bool write) in touch_pmd() argument
1658 _pmd = pmd_mkyoung(*pmd); in touch_pmd()
1662 pmd, _pmd, write)) in touch_pmd()
1663 update_mmu_cache_pmd(vma, addr, pmd); in touch_pmd()
1673 pmd_t pmd; in copy_huge_pmd() local
1677 pmd = pmdp_get_lockless(src_pmd); in copy_huge_pmd()
1678 if (unlikely(pmd_present(pmd) && pmd_special(pmd))) { in copy_huge_pmd()
1691 VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd)); in copy_huge_pmd()
1708 pmd = *src_pmd; in copy_huge_pmd()
1711 if (unlikely(is_swap_pmd(pmd))) { in copy_huge_pmd()
1712 swp_entry_t entry = pmd_to_swp_entry(pmd); in copy_huge_pmd()
1714 VM_BUG_ON(!is_pmd_migration_entry(pmd)); in copy_huge_pmd()
1718 pmd = swp_entry_to_pmd(entry); in copy_huge_pmd()
1720 pmd = pmd_swp_mksoft_dirty(pmd); in copy_huge_pmd()
1722 pmd = pmd_swp_mkuffd_wp(pmd); in copy_huge_pmd()
1723 set_pmd_at(src_mm, addr, src_pmd, pmd); in copy_huge_pmd()
1729 pmd = pmd_swp_clear_uffd_wp(pmd); in copy_huge_pmd()
1730 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_pmd()
1736 if (unlikely(!pmd_trans_huge(pmd))) { in copy_huge_pmd()
1745 if (is_huge_zero_pmd(pmd)) { in copy_huge_pmd()
1755 src_page = pmd_page(pmd); in copy_huge_pmd()
1775 pmd = pmd_clear_uffd_wp(pmd); in copy_huge_pmd()
1776 pmd = pmd_wrprotect(pmd); in copy_huge_pmd()
1778 pmd = pmd_mkold(pmd); in copy_huge_pmd()
1779 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_pmd()
1856 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in huge_pmd_set_accessed()
1857 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) in huge_pmd_set_accessed()
1860 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); in huge_pmd_set_accessed()
1881 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_zero_wp_pmd()
1882 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) in do_huge_zero_wp_pmd()
1887 (void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd); in do_huge_zero_wp_pmd()
1888 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr); in do_huge_zero_wp_pmd()
1907 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page()
1922 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
1940 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
1978 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) in do_huge_pmd_wp_page()
1979 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
1988 __split_huge_pmd(vma, vmf->pmd, vmf->address, false); in do_huge_pmd_wp_page()
1993 unsigned long addr, pmd_t pmd) in can_change_pmd_writable() argument
2001 if (pmd_protnone(pmd)) in can_change_pmd_writable()
2005 if (pmd_needs_soft_dirty_wp(vma, pmd)) in can_change_pmd_writable()
2009 if (userfaultfd_huge_pmd_wp(vma, pmd)) in can_change_pmd_writable()
2014 page = vm_normal_page_pmd(vma, addr, pmd); in can_change_pmd_writable()
2019 return pmd_dirty(pmd); in can_change_pmd_writable()
2030 pmd_t pmd, old_pmd; in do_huge_pmd_numa_page() local
2034 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
2035 old_pmd = pmdp_get(vmf->pmd); in do_huge_pmd_numa_page()
2042 pmd = pmd_modify(old_pmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
2048 writable = pmd_write(pmd); in do_huge_pmd_numa_page()
2050 can_change_pmd_writable(vma, vmf->address, pmd)) in do_huge_pmd_numa_page()
2053 folio = vm_normal_folio_pmd(vma, haddr, pmd); in do_huge_pmd_numa_page()
2079 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
2080 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) { in do_huge_pmd_numa_page()
2086 pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot); in do_huge_pmd_numa_page()
2087 pmd = pmd_mkyoung(pmd); in do_huge_pmd_numa_page()
2089 pmd = pmd_mkwrite(pmd, vma); in do_huge_pmd_numa_page()
2090 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); in do_huge_pmd_numa_page()
2091 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_numa_page()
2104 pmd_t *pmd, unsigned long addr, unsigned long next) in madvise_free_huge_pmd() argument
2114 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_free_huge_pmd()
2118 orig_pmd = *pmd; in madvise_free_huge_pmd()
2157 pmdp_invalidate(vma, addr, pmd); in madvise_free_huge_pmd()
2161 set_pmd_at(mm, addr, pmd, orig_pmd); in madvise_free_huge_pmd()
2162 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_free_huge_pmd()
2173 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) in zap_deposited_table() argument
2177 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in zap_deposited_table()
2183 pmd_t *pmd, unsigned long addr) in zap_huge_pmd() argument
2190 ptl = __pmd_trans_huge_lock(pmd, vma); in zap_huge_pmd()
2199 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, in zap_huge_pmd()
2202 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in zap_huge_pmd()
2205 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
2209 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
2233 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
2237 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
2272 static pmd_t move_soft_dirty_pmd(pmd_t pmd) in move_soft_dirty_pmd() argument
2275 if (unlikely(is_pmd_migration_entry(pmd))) in move_soft_dirty_pmd()
2276 pmd = pmd_swp_mksoft_dirty(pmd); in move_soft_dirty_pmd()
2277 else if (pmd_present(pmd)) in move_soft_dirty_pmd()
2278 pmd = pmd_mksoft_dirty(pmd); in move_soft_dirty_pmd()
2280 return pmd; in move_soft_dirty_pmd()
2283 static pmd_t clear_uffd_wp_pmd(pmd_t pmd) in clear_uffd_wp_pmd() argument
2285 if (pmd_present(pmd)) in clear_uffd_wp_pmd()
2286 pmd = pmd_clear_uffd_wp(pmd); in clear_uffd_wp_pmd()
2287 else if (is_swap_pmd(pmd)) in clear_uffd_wp_pmd()
2288 pmd = pmd_swp_clear_uffd_wp(pmd); in clear_uffd_wp_pmd()
2290 return pmd; in clear_uffd_wp_pmd()
2297 pmd_t pmd; in move_huge_pmd() local
2320 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); in move_huge_pmd()
2321 if (pmd_present(pmd)) in move_huge_pmd()
2330 pmd = move_soft_dirty_pmd(pmd); in move_huge_pmd()
2332 pmd = clear_uffd_wp_pmd(pmd); in move_huge_pmd()
2333 set_pmd_at(mm, new_addr, new_pmd, pmd); in move_huge_pmd()
2352 pmd_t *pmd, unsigned long addr, pgprot_t newprot, in change_huge_pmd() argument
2368 ptl = __pmd_trans_huge_lock(pmd, vma); in change_huge_pmd()
2373 if (is_swap_pmd(*pmd)) { in change_huge_pmd()
2374 swp_entry_t entry = pmd_to_swp_entry(*pmd); in change_huge_pmd()
2378 VM_BUG_ON(!is_pmd_migration_entry(*pmd)); in change_huge_pmd()
2389 if (pmd_swp_soft_dirty(*pmd)) in change_huge_pmd()
2392 newpmd = *pmd; in change_huge_pmd()
2399 if (!pmd_same(*pmd, newpmd)) in change_huge_pmd()
2400 set_pmd_at(mm, addr, pmd, newpmd); in change_huge_pmd()
2413 if (is_huge_zero_pmd(*pmd)) in change_huge_pmd()
2416 if (pmd_protnone(*pmd)) in change_huge_pmd()
2419 folio = pmd_folio(*pmd); in change_huge_pmd()
2454 oldpmd = pmdp_invalidate_ad(vma, addr, pmd); in change_huge_pmd()
2473 set_pmd_at(mm, addr, pmd, entry); in change_huge_pmd()
2675 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) in __pmd_trans_huge_lock() argument
2678 ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
2679 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd))) in __pmd_trans_huge_lock()
2797 unsigned long haddr, pmd_t *pmd) in __split_huge_zero_page_pmd() argument
2814 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); in __split_huge_zero_page_pmd()
2816 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_zero_page_pmd()
2834 pmd_populate(mm, pmd, pgtable); in __split_huge_zero_page_pmd()
2837 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd_locked() argument
2854 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)); in __split_huge_pmd_locked()
2859 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); in __split_huge_pmd_locked()
2865 zap_deposited_table(mm, pmd); in __split_huge_pmd_locked()
2889 if (is_huge_zero_pmd(*pmd)) { in __split_huge_pmd_locked()
2899 return __split_huge_zero_page_pmd(vma, haddr, pmd); in __split_huge_pmd_locked()
2902 pmd_migration = is_pmd_migration_entry(*pmd); in __split_huge_pmd_locked()
2906 old_pmd = *pmd; in __split_huge_pmd_locked()
2940 old_pmd = pmdp_invalidate(vma, haddr, pmd); in __split_huge_pmd_locked()
2989 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_pmd_locked()
3055 pmd_populate(mm, pmd, pgtable); in __split_huge_pmd_locked()
3059 pmd_t *pmd, bool freeze) in split_huge_pmd_locked() argument
3062 if (pmd_trans_huge(*pmd) || is_pmd_migration_entry(*pmd)) in split_huge_pmd_locked()
3063 __split_huge_pmd_locked(vma, pmd, address, freeze); in split_huge_pmd_locked()
3066 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd() argument
3076 ptl = pmd_lock(vma->vm_mm, pmd); in __split_huge_pmd()
3077 split_huge_pmd_locked(vma, range.start, pmd, freeze); in __split_huge_pmd()
3085 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); in split_huge_pmd_address() local
3087 if (!pmd) in split_huge_pmd_address()
3090 __split_huge_pmd(vma, pmd, address, freeze); in split_huge_pmd_address()
4602 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry()
4606 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry()
4611 set_pmd_at(mm, address, pvmw->pmd, pmdval); in set_pmd_migration_entry()
4632 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry()
4650 if (!(pvmw->pmd && !pvmw->pte)) in remove_migration_pmd()
4653 entry = pmd_to_swp_entry(*pvmw->pmd); in remove_migration_pmd()
4656 if (pmd_swp_soft_dirty(*pvmw->pmd)) in remove_migration_pmd()
4660 if (pmd_swp_uffd_wp(*pvmw->pmd)) in remove_migration_pmd()
4679 set_pmd_at(mm, haddr, pvmw->pmd, pmde); in remove_migration_pmd()
4682 update_mmu_cache_pmd(vma, address, pvmw->pmd); in remove_migration_pmd()