Lines Matching refs:vmf
2713 static inline int pte_unmap_same(struct vm_fault *vmf) in pte_unmap_same() argument
2718 spinlock_t *ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in pte_unmap_same()
2720 same = pte_same(*vmf->pte, vmf->orig_pte); in pte_unmap_same()
2724 pte_unmap(vmf->pte); in pte_unmap_same()
2725 vmf->pte = NULL; in pte_unmap_same()
2730 struct vm_fault *vmf) in cow_user_page() argument
2736 struct vm_area_struct *vma = vmf->vma; in cow_user_page()
2738 unsigned long addr = vmf->address; in cow_user_page()
2758 if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) { in cow_user_page()
2761 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in cow_user_page()
2763 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { in cow_user_page()
2768 update_mmu_tlb(vma, addr, vmf->pte); in cow_user_page()
2773 entry = pte_mkyoung(vmf->orig_pte); in cow_user_page()
2774 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) in cow_user_page()
2775 update_mmu_cache(vma, addr, vmf->pte); in cow_user_page()
2789 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in cow_user_page()
2791 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { in cow_user_page()
2793 update_mmu_tlb(vma, addr, vmf->pte); in cow_user_page()
2817 pte_unmap_unlock(vmf->pte, vmf->ptl); in cow_user_page()
2844 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf) in do_page_mkwrite() argument
2847 struct page *page = vmf->page; in do_page_mkwrite()
2848 unsigned int old_flags = vmf->flags; in do_page_mkwrite()
2850 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; in do_page_mkwrite()
2852 if (vmf->vma->vm_file && in do_page_mkwrite()
2853 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) in do_page_mkwrite()
2856 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
2858 vmf->flags = old_flags; in do_page_mkwrite()
2878 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) in fault_dirty_shared_page() argument
2880 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page()
2882 struct page *page = vmf->page; in fault_dirty_shared_page()
2912 fpin = maybe_unlock_mmap_for_io(vmf, NULL); in fault_dirty_shared_page()
2931 static inline void wp_page_reuse(struct vm_fault *vmf) in wp_page_reuse() argument
2932 __releases(vmf->ptl) in wp_page_reuse()
2934 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse()
2935 struct page *page = vmf->page; in wp_page_reuse()
2945 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
2946 entry = pte_mkyoung(vmf->orig_pte); in wp_page_reuse()
2948 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
2949 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_reuse()
2950 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_reuse()
2970 static vm_fault_t wp_page_copy(struct vm_fault *vmf) in wp_page_copy() argument
2972 struct vm_area_struct *vma = vmf->vma; in wp_page_copy()
2974 struct page *old_page = vmf->page; in wp_page_copy()
2983 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { in wp_page_copy()
2985 vmf->address); in wp_page_copy()
2990 vmf->address); in wp_page_copy()
2994 if (!cow_user_page(new_page, old_page, vmf)) { in wp_page_copy()
3015 vmf->address & PAGE_MASK, in wp_page_copy()
3016 (vmf->address & PAGE_MASK) + PAGE_SIZE); in wp_page_copy()
3022 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); in wp_page_copy()
3023 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { in wp_page_copy()
3033 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_copy()
3045 ptep_clear_flush_notify(vma, vmf->address, vmf->pte); in wp_page_copy()
3046 page_add_new_anon_rmap(new_page, vma, vmf->address, false); in wp_page_copy()
3053 set_pte_at_notify(mm, vmf->address, vmf->pte, entry); in wp_page_copy()
3054 update_mmu_cache(vma, vmf->address, vmf->pte); in wp_page_copy()
3085 update_mmu_tlb(vma, vmf->address, vmf->pte); in wp_page_copy()
3091 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_copy()
3137 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) in finish_mkwrite_fault() argument
3139 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); in finish_mkwrite_fault()
3140 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
3141 &vmf->ptl); in finish_mkwrite_fault()
3146 if (!pte_same(*vmf->pte, vmf->orig_pte)) { in finish_mkwrite_fault()
3147 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in finish_mkwrite_fault()
3148 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_mkwrite_fault()
3151 wp_page_reuse(vmf); in finish_mkwrite_fault()
3159 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) in wp_pfn_shared() argument
3161 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared()
3166 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_pfn_shared()
3167 vmf->flags |= FAULT_FLAG_MKWRITE; in wp_pfn_shared()
3168 ret = vma->vm_ops->pfn_mkwrite(vmf); in wp_pfn_shared()
3171 return finish_mkwrite_fault(vmf); in wp_pfn_shared()
3173 wp_page_reuse(vmf); in wp_pfn_shared()
3177 static vm_fault_t wp_page_shared(struct vm_fault *vmf) in wp_page_shared() argument
3178 __releases(vmf->ptl) in wp_page_shared()
3180 struct vm_area_struct *vma = vmf->vma; in wp_page_shared()
3183 get_page(vmf->page); in wp_page_shared()
3188 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_shared()
3189 tmp = do_page_mkwrite(vmf); in wp_page_shared()
3192 put_page(vmf->page); in wp_page_shared()
3195 tmp = finish_mkwrite_fault(vmf); in wp_page_shared()
3197 unlock_page(vmf->page); in wp_page_shared()
3198 put_page(vmf->page); in wp_page_shared()
3202 wp_page_reuse(vmf); in wp_page_shared()
3203 lock_page(vmf->page); in wp_page_shared()
3205 ret |= fault_dirty_shared_page(vmf); in wp_page_shared()
3206 put_page(vmf->page); in wp_page_shared()
3229 static vm_fault_t do_wp_page(struct vm_fault *vmf) in do_wp_page() argument
3230 __releases(vmf->ptl) in do_wp_page()
3232 struct vm_area_struct *vma = vmf->vma; in do_wp_page()
3234 if (userfaultfd_pte_wp(vma, *vmf->pte)) { in do_wp_page()
3235 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3236 return handle_userfault(vmf, VM_UFFD_WP); in do_wp_page()
3243 if (unlikely(userfaultfd_wp(vmf->vma) && in do_wp_page()
3244 mm_tlb_flush_pending(vmf->vma->vm_mm))) in do_wp_page()
3245 flush_tlb_page(vmf->vma, vmf->address); in do_wp_page()
3247 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); in do_wp_page()
3248 if (!vmf->page) { in do_wp_page()
3258 return wp_pfn_shared(vmf); in do_wp_page()
3260 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3261 return wp_page_copy(vmf); in do_wp_page()
3268 if (PageAnon(vmf->page)) { in do_wp_page()
3269 struct page *page = vmf->page; in do_wp_page()
3286 wp_page_reuse(vmf); in do_wp_page()
3290 return wp_page_shared(vmf); in do_wp_page()
3296 get_page(vmf->page); in do_wp_page()
3298 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3299 return wp_page_copy(vmf); in do_wp_page()
3437 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) in remove_device_exclusive_entry() argument
3439 struct page *page = vmf->page; in remove_device_exclusive_entry()
3440 struct vm_area_struct *vma = vmf->vma; in remove_device_exclusive_entry()
3443 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) in remove_device_exclusive_entry()
3446 vma->vm_mm, vmf->address & PAGE_MASK, in remove_device_exclusive_entry()
3447 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL); in remove_device_exclusive_entry()
3450 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in remove_device_exclusive_entry()
3451 &vmf->ptl); in remove_device_exclusive_entry()
3452 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) in remove_device_exclusive_entry()
3453 restore_exclusive_pte(vma, page, vmf->address, vmf->pte); in remove_device_exclusive_entry()
3455 pte_unmap_unlock(vmf->pte, vmf->ptl); in remove_device_exclusive_entry()
3470 vm_fault_t do_swap_page(struct vm_fault *vmf) in do_swap_page() argument
3472 struct vm_area_struct *vma = vmf->vma; in do_swap_page()
3482 if (!pte_unmap_same(vmf)) in do_swap_page()
3485 entry = pte_to_swp_entry(vmf->orig_pte); in do_swap_page()
3488 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
3489 vmf->address); in do_swap_page()
3491 vmf->page = pfn_swap_entry_to_page(entry); in do_swap_page()
3492 ret = remove_device_exclusive_entry(vmf); in do_swap_page()
3494 vmf->page = pfn_swap_entry_to_page(entry); in do_swap_page()
3495 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); in do_swap_page()
3499 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); in do_swap_page()
3511 page = lookup_swap_cache(entry, vma, vmf->address); in do_swap_page()
3519 vmf->address); in do_swap_page()
3545 vmf); in do_swap_page()
3554 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
3555 vmf->address, &vmf->ptl); in do_swap_page()
3556 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) in do_swap_page()
3576 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); in do_swap_page()
3594 page = ksm_might_need_to_copy(page, vma, vmf->address); in do_swap_page()
3606 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
3607 &vmf->ptl); in do_swap_page()
3608 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) in do_swap_page()
3629 if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) { in do_swap_page()
3631 vmf->flags &= ~FAULT_FLAG_WRITE; in do_swap_page()
3636 if (pte_swp_soft_dirty(vmf->orig_pte)) in do_swap_page()
3638 if (pte_swp_uffd_wp(vmf->orig_pte)) { in do_swap_page()
3642 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_swap_page()
3643 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); in do_swap_page()
3644 vmf->orig_pte = pte; in do_swap_page()
3648 page_add_new_anon_rmap(page, vma, vmf->address, false); in do_swap_page()
3651 do_page_add_anon_rmap(page, vma, vmf->address, exclusive); in do_swap_page()
3672 if (vmf->flags & FAULT_FLAG_WRITE) { in do_swap_page()
3673 ret |= do_wp_page(vmf); in do_swap_page()
3680 update_mmu_cache(vma, vmf->address, vmf->pte); in do_swap_page()
3682 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
3688 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
3707 static vm_fault_t do_anonymous_page(struct vm_fault *vmf) in do_anonymous_page() argument
3709 struct vm_area_struct *vma = vmf->vma; in do_anonymous_page()
3728 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
3732 if (unlikely(pmd_trans_unstable(vmf->pmd))) in do_anonymous_page()
3736 if (!(vmf->flags & FAULT_FLAG_WRITE) && in do_anonymous_page()
3738 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), in do_anonymous_page()
3740 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
3741 vmf->address, &vmf->ptl); in do_anonymous_page()
3742 if (!pte_none(*vmf->pte)) { in do_anonymous_page()
3743 update_mmu_tlb(vma, vmf->address, vmf->pte); in do_anonymous_page()
3751 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
3752 return handle_userfault(vmf, VM_UFFD_MISSING); in do_anonymous_page()
3760 page = alloc_zeroed_user_highpage_movable(vma, vmf->address); in do_anonymous_page()
3780 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_anonymous_page()
3781 &vmf->ptl); in do_anonymous_page()
3782 if (!pte_none(*vmf->pte)) { in do_anonymous_page()
3783 update_mmu_cache(vma, vmf->address, vmf->pte); in do_anonymous_page()
3793 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
3795 return handle_userfault(vmf, VM_UFFD_MISSING); in do_anonymous_page()
3799 page_add_new_anon_rmap(page, vma, vmf->address, false); in do_anonymous_page()
3802 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in do_anonymous_page()
3805 update_mmu_cache(vma, vmf->address, vmf->pte); in do_anonymous_page()
3807 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
3823 static vm_fault_t __do_fault(struct vm_fault *vmf) in __do_fault() argument
3825 struct vm_area_struct *vma = vmf->vma; in __do_fault()
3843 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { in __do_fault()
3844 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in __do_fault()
3845 if (!vmf->prealloc_pte) in __do_fault()
3849 ret = vma->vm_ops->fault(vmf); in __do_fault()
3854 if (unlikely(PageHWPoison(vmf->page))) { in __do_fault()
3856 unlock_page(vmf->page); in __do_fault()
3857 put_page(vmf->page); in __do_fault()
3858 vmf->page = NULL; in __do_fault()
3863 lock_page(vmf->page); in __do_fault()
3865 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page); in __do_fault()
3871 static void deposit_prealloc_pte(struct vm_fault *vmf) in deposit_prealloc_pte() argument
3873 struct vm_area_struct *vma = vmf->vma; in deposit_prealloc_pte()
3875 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
3881 vmf->prealloc_pte = NULL; in deposit_prealloc_pte()
3884 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) in do_set_pmd() argument
3886 struct vm_area_struct *vma = vmf->vma; in do_set_pmd()
3887 bool write = vmf->flags & FAULT_FLAG_WRITE; in do_set_pmd()
3888 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_set_pmd()
3913 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { in do_set_pmd()
3914 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
3915 if (!vmf->prealloc_pte) in do_set_pmd()
3919 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
3920 if (unlikely(!pmd_none(*vmf->pmd))) in do_set_pmd()
3936 deposit_prealloc_pte(vmf); in do_set_pmd()
3938 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
3940 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
3946 spin_unlock(vmf->ptl); in do_set_pmd()
3950 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) in do_set_pmd() argument
3956 void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) in do_set_pte() argument
3958 struct vm_area_struct *vma = vmf->vma; in do_set_pte()
3959 bool write = vmf->flags & FAULT_FLAG_WRITE; in do_set_pte()
3960 bool prefault = vmf->address != addr; in do_set_pte()
3982 set_pte_at(vma->vm_mm, addr, vmf->pte, entry); in do_set_pte()
4000 vm_fault_t finish_fault(struct vm_fault *vmf) in finish_fault() argument
4002 struct vm_area_struct *vma = vmf->vma; in finish_fault()
4007 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) in finish_fault()
4008 page = vmf->cow_page; in finish_fault()
4010 page = vmf->page; in finish_fault()
4022 if (pmd_none(*vmf->pmd)) { in finish_fault()
4024 ret = do_set_pmd(vmf, page); in finish_fault()
4029 if (vmf->prealloc_pte) in finish_fault()
4030 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); in finish_fault()
4031 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) in finish_fault()
4036 if (pmd_devmap_trans_unstable(vmf->pmd)) in finish_fault()
4039 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in finish_fault()
4040 vmf->address, &vmf->ptl); in finish_fault()
4043 if (likely(pte_none(*vmf->pte))) in finish_fault()
4044 do_set_pte(vmf, page, vmf->address); in finish_fault()
4048 update_mmu_tlb(vma, vmf->address, vmf->pte); in finish_fault()
4049 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_fault()
4113 static vm_fault_t do_fault_around(struct vm_fault *vmf) in do_fault_around() argument
4115 unsigned long address = vmf->address, nr_pages, mask; in do_fault_around()
4116 pgoff_t start_pgoff = vmf->pgoff; in do_fault_around()
4123 address = max(address & mask, vmf->vma->vm_start); in do_fault_around()
4124 off = ((vmf->address - address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); in do_fault_around()
4134 end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, in do_fault_around()
4137 if (pmd_none(*vmf->pmd)) { in do_fault_around()
4138 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
4139 if (!vmf->prealloc_pte) in do_fault_around()
4143 return vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff); in do_fault_around()
4146 static vm_fault_t do_read_fault(struct vm_fault *vmf) in do_read_fault() argument
4148 struct vm_area_struct *vma = vmf->vma; in do_read_fault()
4157 if (likely(!userfaultfd_minor(vmf->vma))) { in do_read_fault()
4158 ret = do_fault_around(vmf); in do_read_fault()
4164 ret = __do_fault(vmf); in do_read_fault()
4168 ret |= finish_fault(vmf); in do_read_fault()
4169 unlock_page(vmf->page); in do_read_fault()
4171 put_page(vmf->page); in do_read_fault()
4175 static vm_fault_t do_cow_fault(struct vm_fault *vmf) in do_cow_fault() argument
4177 struct vm_area_struct *vma = vmf->vma; in do_cow_fault()
4183 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); in do_cow_fault()
4184 if (!vmf->cow_page) in do_cow_fault()
4187 if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm, in do_cow_fault()
4189 put_page(vmf->cow_page); in do_cow_fault()
4192 cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL); in do_cow_fault()
4194 ret = __do_fault(vmf); in do_cow_fault()
4200 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); in do_cow_fault()
4201 __SetPageUptodate(vmf->cow_page); in do_cow_fault()
4203 ret |= finish_fault(vmf); in do_cow_fault()
4204 unlock_page(vmf->page); in do_cow_fault()
4205 put_page(vmf->page); in do_cow_fault()
4210 put_page(vmf->cow_page); in do_cow_fault()
4214 static vm_fault_t do_shared_fault(struct vm_fault *vmf) in do_shared_fault() argument
4216 struct vm_area_struct *vma = vmf->vma; in do_shared_fault()
4219 ret = __do_fault(vmf); in do_shared_fault()
4228 unlock_page(vmf->page); in do_shared_fault()
4229 tmp = do_page_mkwrite(vmf); in do_shared_fault()
4232 put_page(vmf->page); in do_shared_fault()
4237 ret |= finish_fault(vmf); in do_shared_fault()
4240 unlock_page(vmf->page); in do_shared_fault()
4241 put_page(vmf->page); in do_shared_fault()
4245 ret |= fault_dirty_shared_page(vmf); in do_shared_fault()
4257 static vm_fault_t do_fault(struct vm_fault *vmf) in do_fault() argument
4259 struct vm_area_struct *vma = vmf->vma; in do_fault()
4271 if (unlikely(!pmd_present(*vmf->pmd))) in do_fault()
4274 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, in do_fault()
4275 vmf->pmd, in do_fault()
4276 vmf->address, in do_fault()
4277 &vmf->ptl); in do_fault()
4285 if (unlikely(pte_none(*vmf->pte))) in do_fault()
4290 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_fault()
4292 } else if (!(vmf->flags & FAULT_FLAG_WRITE)) in do_fault()
4293 ret = do_read_fault(vmf); in do_fault()
4295 ret = do_cow_fault(vmf); in do_fault()
4297 ret = do_shared_fault(vmf); in do_fault()
4300 if (vmf->prealloc_pte) { in do_fault()
4301 pte_free(vm_mm, vmf->prealloc_pte); in do_fault()
4302 vmf->prealloc_pte = NULL; in do_fault()
4321 static vm_fault_t do_numa_page(struct vm_fault *vmf) in do_numa_page() argument
4323 struct vm_area_struct *vma = vmf->vma; in do_numa_page()
4329 bool was_writable = pte_savedwrite(vmf->orig_pte); in do_numa_page()
4337 vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); in do_numa_page()
4338 spin_lock(vmf->ptl); in do_numa_page()
4339 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { in do_numa_page()
4340 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
4345 old_pte = ptep_get(vmf->pte); in do_numa_page()
4348 page = vm_normal_page(vma, vmf->address, pte); in do_numa_page()
4376 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, in do_numa_page()
4382 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
4390 vmf->pte = pte_offset_map(vmf->pmd, vmf->address); in do_numa_page()
4391 spin_lock(vmf->ptl); in do_numa_page()
4392 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { in do_numa_page()
4393 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
4408 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); in do_numa_page()
4413 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); in do_numa_page()
4414 update_mmu_cache(vma, vmf->address, vmf->pte); in do_numa_page()
4415 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
4419 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) in create_huge_pmd() argument
4421 if (vma_is_anonymous(vmf->vma)) in create_huge_pmd()
4422 return do_huge_pmd_anonymous_page(vmf); in create_huge_pmd()
4423 if (vmf->vma->vm_ops->huge_fault) in create_huge_pmd()
4424 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); in create_huge_pmd()
4429 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf) in wp_huge_pmd() argument
4431 if (vma_is_anonymous(vmf->vma)) { in wp_huge_pmd()
4432 if (userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd)) in wp_huge_pmd()
4433 return handle_userfault(vmf, VM_UFFD_WP); in wp_huge_pmd()
4434 return do_huge_pmd_wp_page(vmf); in wp_huge_pmd()
4436 if (vmf->vma->vm_ops->huge_fault) { in wp_huge_pmd()
4437 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); in wp_huge_pmd()
4444 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); in wp_huge_pmd()
4449 static vm_fault_t create_huge_pud(struct vm_fault *vmf) in create_huge_pud() argument
4454 if (vma_is_anonymous(vmf->vma)) in create_huge_pud()
4456 if (vmf->vma->vm_ops->huge_fault) { in create_huge_pud()
4457 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); in create_huge_pud()
4464 __split_huge_pud(vmf->vma, vmf->pud, vmf->address); in create_huge_pud()
4469 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) in wp_huge_pud() argument
4473 if (vma_is_anonymous(vmf->vma)) in wp_huge_pud()
4475 if (vmf->vma->vm_ops->huge_fault) in wp_huge_pud()
4476 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); in wp_huge_pud()
4496 static vm_fault_t handle_pte_fault(struct vm_fault *vmf) in handle_pte_fault() argument
4500 if (unlikely(pmd_none(*vmf->pmd))) { in handle_pte_fault()
4507 vmf->pte = NULL; in handle_pte_fault()
4521 if (pmd_devmap_trans_unstable(vmf->pmd)) in handle_pte_fault()
4529 vmf->pte = pte_offset_map(vmf->pmd, vmf->address); in handle_pte_fault()
4530 vmf->orig_pte = *vmf->pte; in handle_pte_fault()
4541 if (pte_none(vmf->orig_pte)) { in handle_pte_fault()
4542 pte_unmap(vmf->pte); in handle_pte_fault()
4543 vmf->pte = NULL; in handle_pte_fault()
4547 if (!vmf->pte) { in handle_pte_fault()
4548 if (vma_is_anonymous(vmf->vma)) in handle_pte_fault()
4549 return do_anonymous_page(vmf); in handle_pte_fault()
4551 return do_fault(vmf); in handle_pte_fault()
4554 if (!pte_present(vmf->orig_pte)) in handle_pte_fault()
4555 return do_swap_page(vmf); in handle_pte_fault()
4557 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) in handle_pte_fault()
4558 return do_numa_page(vmf); in handle_pte_fault()
4560 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in handle_pte_fault()
4561 spin_lock(vmf->ptl); in handle_pte_fault()
4562 entry = vmf->orig_pte; in handle_pte_fault()
4563 if (unlikely(!pte_same(*vmf->pte, entry))) { in handle_pte_fault()
4564 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
4567 if (vmf->flags & FAULT_FLAG_WRITE) { in handle_pte_fault()
4569 return do_wp_page(vmf); in handle_pte_fault()
4573 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, in handle_pte_fault()
4574 vmf->flags & FAULT_FLAG_WRITE)) { in handle_pte_fault()
4575 update_mmu_cache(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
4578 if (vmf->flags & FAULT_FLAG_TRIED) in handle_pte_fault()
4586 if (vmf->flags & FAULT_FLAG_WRITE) in handle_pte_fault()
4587 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address); in handle_pte_fault()
4590 pte_unmap_unlock(vmf->pte, vmf->ptl); in handle_pte_fault()
4603 struct vm_fault vmf = { in __handle_mm_fault() local
4621 vmf.pud = pud_alloc(mm, p4d, address); in __handle_mm_fault()
4622 if (!vmf.pud) in __handle_mm_fault()
4625 if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
4626 ret = create_huge_pud(&vmf); in __handle_mm_fault()
4630 pud_t orig_pud = *vmf.pud; in __handle_mm_fault()
4638 ret = wp_huge_pud(&vmf, orig_pud); in __handle_mm_fault()
4642 huge_pud_set_accessed(&vmf, orig_pud); in __handle_mm_fault()
4648 vmf.pmd = pmd_alloc(mm, vmf.pud, address); in __handle_mm_fault()
4649 if (!vmf.pmd) in __handle_mm_fault()
4653 if (pud_trans_unstable(vmf.pud)) in __handle_mm_fault()
4656 if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
4657 ret = create_huge_pmd(&vmf); in __handle_mm_fault()
4661 vmf.orig_pmd = *vmf.pmd; in __handle_mm_fault()
4664 if (unlikely(is_swap_pmd(vmf.orig_pmd))) { in __handle_mm_fault()
4666 !is_pmd_migration_entry(vmf.orig_pmd)); in __handle_mm_fault()
4667 if (is_pmd_migration_entry(vmf.orig_pmd)) in __handle_mm_fault()
4668 pmd_migration_entry_wait(mm, vmf.pmd); in __handle_mm_fault()
4671 if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) { in __handle_mm_fault()
4672 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) in __handle_mm_fault()
4673 return do_huge_pmd_numa_page(&vmf); in __handle_mm_fault()
4675 if (dirty && !pmd_write(vmf.orig_pmd)) { in __handle_mm_fault()
4676 ret = wp_huge_pmd(&vmf); in __handle_mm_fault()
4680 huge_pmd_set_accessed(&vmf); in __handle_mm_fault()
4686 return handle_pte_fault(&vmf); in __handle_mm_fault()