Lines Matching refs:pmd

188 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,  in free_pte_range()  argument
191 pgtable_t token = pmd_pgtable(*pmd); in free_pte_range()
192 pmd_clear(pmd); in free_pte_range()
201 pmd_t *pmd; in free_pmd_range() local
206 pmd = pmd_offset(pud, addr); in free_pmd_range()
209 if (pmd_none_or_clear_bad(pmd)) in free_pmd_range()
211 free_pte_range(tlb, pmd, addr); in free_pmd_range()
212 } while (pmd++, addr = next, addr != end); in free_pmd_range()
225 pmd = pmd_offset(pud, start); in free_pmd_range()
227 pmd_free_tlb(tlb, pmd, start); in free_pmd_range()
424 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte) in pmd_install() argument
426 spinlock_t *ptl = pmd_lock(mm, pmd); in pmd_install()
428 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ in pmd_install()
444 pmd_populate(mm, pmd, *pte); in pmd_install()
450 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) in __pte_alloc() argument
456 pmd_install(mm, pmd, &new); in __pte_alloc()
462 int __pte_alloc_kernel(pmd_t *pmd) in __pte_alloc_kernel() argument
469 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ in __pte_alloc_kernel()
471 pmd_populate_kernel(&init_mm, pmd, new); in __pte_alloc_kernel()
507 pmd_t *pmd = pmd_offset(pud, addr); in print_bad_pte() local
538 (long long)pte_val(pte), (long long)pmd_val(*pmd)); in print_bad_pte()
666 pmd_t pmd) in vm_normal_page_pmd() argument
668 unsigned long pfn = pmd_pfn(pmd); in vm_normal_page_pmd()
671 if (unlikely(pmd_special(pmd))) in vm_normal_page_pmd()
703 unsigned long addr, pmd_t pmd) in vm_normal_folio_pmd() argument
705 struct page *page = vm_normal_page_pmd(vma, addr, pmd); in vm_normal_folio_pmd()
1693 struct vm_area_struct *vma, pmd_t *pmd, in zap_pte_range() argument
1712 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in zap_pte_range()
1746 direct_reclaim = try_get_and_clear_pmd(mm, pmd, &pmdval); in zap_pte_range()
1778 try_to_free_pte(mm, pmd, start, tlb); in zap_pte_range()
1789 pmd_t *pmd; in zap_pmd_range() local
1792 pmd = pmd_offset(pud, addr); in zap_pmd_range()
1795 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd)) { in zap_pmd_range()
1797 __split_huge_pmd(vma, pmd, addr, false); in zap_pmd_range()
1798 else if (zap_huge_pmd(tlb, vma, pmd, addr)) { in zap_pmd_range()
1805 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) { in zap_pmd_range()
1806 spinlock_t *ptl = pmd_lock(tlb->mm, pmd); in zap_pmd_range()
1814 if (pmd_none(*pmd)) { in zap_pmd_range()
1818 addr = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1820 pmd--; in zap_pmd_range()
1821 } while (pmd++, cond_resched(), addr != end); in zap_pmd_range()
2072 pmd_t *pmd; in walk_to_pmd() local
2081 pmd = pmd_alloc(mm, pud, addr); in walk_to_pmd()
2082 if (!pmd) in walk_to_pmd()
2085 VM_BUG_ON(pmd_trans_huge(*pmd)); in walk_to_pmd()
2086 return pmd; in walk_to_pmd()
2092 pmd_t *pmd = walk_to_pmd(mm, addr); in __get_locked_pte() local
2094 if (!pmd) in __get_locked_pte()
2096 return pte_alloc_map_lock(mm, pmd, addr, ptl); in __get_locked_pte()
2227 pmd_t *pmd = NULL; in insert_pages() local
2237 pmd = walk_to_pmd(mm, addr); in insert_pages()
2238 if (!pmd) in insert_pages()
2246 if (pte_alloc(mm, pmd)) in insert_pages()
2253 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); in insert_pages()
2682 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, in remap_pte_range() argument
2690 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); in remap_pte_range()
2712 pmd_t *pmd; in remap_pmd_range() local
2717 pmd = pmd_alloc(mm, pud, addr); in remap_pmd_range()
2718 if (!pmd) in remap_pmd_range()
2720 VM_BUG_ON(pmd_trans_huge(*pmd)); in remap_pmd_range()
2723 err = remap_pte_range(mm, pmd, addr, next, in remap_pmd_range()
2727 } while (pmd++, addr = next, addr != end); in remap_pmd_range()
2987 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, in apply_to_pte_range() argument
2998 pte_alloc_kernel_track(pmd, addr, mask) : in apply_to_pte_range()
2999 pte_alloc_map_lock(mm, pmd, addr, &ptl); in apply_to_pte_range()
3004 pte_offset_kernel(pmd, addr) : in apply_to_pte_range()
3005 pte_offset_map_lock(mm, pmd, addr, &ptl); in apply_to_pte_range()
3035 pmd_t *pmd; in apply_to_pmd_range() local
3042 pmd = pmd_alloc_track(mm, pud, addr, mask); in apply_to_pmd_range()
3043 if (!pmd) in apply_to_pmd_range()
3046 pmd = pmd_offset(pud, addr); in apply_to_pmd_range()
3050 if (pmd_none(*pmd) && !create) in apply_to_pmd_range()
3052 if (WARN_ON_ONCE(pmd_leaf(*pmd))) in apply_to_pmd_range()
3054 if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) { in apply_to_pmd_range()
3057 pmd_clear_bad(pmd); in apply_to_pmd_range()
3059 err = apply_to_pte_range(mm, pmd, addr, next, in apply_to_pmd_range()
3063 } while (pmd++, addr = next, addr != end); in apply_to_pmd_range()
3266 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in __wp_page_copy_user()
3294 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in __wp_page_copy_user()
3588 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); in wp_page_copy()
3701 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
4175 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in remove_device_exclusive_entry()
4211 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_marker_clear()
4399 pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in alloc_swap_folio()
4479 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
4496 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
4596 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
4668 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
4945 pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK); in alloc_anon_folio()
5022 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
5030 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
5075 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in do_anonymous_page()
5146 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { in __do_fault()
5186 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
5238 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
5239 if (unlikely(!pmd_none(*vmf->pmd))) in do_set_pmd()
5257 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
5259 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
5374 if (pmd_none(*vmf->pmd)) { in finish_fault()
5382 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); in finish_fault()
5383 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) in finish_fault()
5420 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in finish_fault()
5524 if (pmd_none(*vmf->pmd)) { in do_fault_around()
5687 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in do_fault()
5887 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_numa_page()
5950 __split_huge_pmd(vma, vmf->pmd, vmf->address, false); in wp_huge_pmd()
6012 if (unlikely(pmd_none(*vmf->pmd))) { in handle_pte_fault()
6037 vmf->pte = pte_offset_map_rw_nolock(vmf->vma->vm_mm, vmf->pmd, in handle_pte_fault()
6155 vmf.pmd = pmd_alloc(mm, vmf.pud, address); in __handle_mm_fault()
6156 if (!vmf.pmd) in __handle_mm_fault()
6163 if (pmd_none(*vmf.pmd) && in __handle_mm_fault()
6170 vmf.orig_pmd = pmdp_get_lockless(vmf.pmd); in __handle_mm_fault()
6176 pmd_migration_entry_wait(mm, vmf.pmd); in __handle_mm_fault()
6536 pmd_t *pmdp, pmd; in follow_pfnmap_start() local
6573 pmd = pmdp_get_lockless(pmdp); in follow_pfnmap_start()
6574 if (pmd_leaf(pmd)) { in follow_pfnmap_start()
6576 if (!unlikely(pmd_leaf(pmd))) { in follow_pfnmap_start()
6580 pfnmap_args_setup(args, lock, NULL, pmd_pgprot(pmd), in follow_pfnmap_start()
6581 pmd_pfn(pmd), PMD_MASK, pmd_write(pmd), in follow_pfnmap_start()
6582 pmd_special(pmd)); in follow_pfnmap_start()