Lines Matching refs:vmf

96 static vm_fault_t do_fault(struct vm_fault *vmf);
97 static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
98 static bool vmf_pte_changed(struct vm_fault *vmf);
104 static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) in vmf_orig_pte_uffd_wp() argument
106 if (!userfaultfd_wp(vmf->vma)) in vmf_orig_pte_uffd_wp()
108 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) in vmf_orig_pte_uffd_wp()
111 return pte_marker_uffd_wp(vmf->orig_pte); in vmf_orig_pte_uffd_wp()
2639 vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page, in vmf_insert_page_mkwrite() argument
2642 pgprot_t pgprot = vmf->vma->vm_page_prot; in vmf_insert_page_mkwrite()
2643 unsigned long addr = vmf->address; in vmf_insert_page_mkwrite()
2646 if (addr < vmf->vma->vm_start || addr >= vmf->vma->vm_end) in vmf_insert_page_mkwrite()
2649 err = insert_page(vmf->vma, addr, page, pgprot, write); in vmf_insert_page_mkwrite()
3211 static inline int pte_unmap_same(struct vm_fault *vmf) in pte_unmap_same() argument
3216 spin_lock(vmf->ptl); in pte_unmap_same()
3217 same = pte_same(ptep_get(vmf->pte), vmf->orig_pte); in pte_unmap_same()
3218 spin_unlock(vmf->ptl); in pte_unmap_same()
3221 pte_unmap(vmf->pte); in pte_unmap_same()
3222 vmf->pte = NULL; in pte_unmap_same()
3233 struct vm_fault *vmf) in __wp_page_copy_user() argument
3238 struct vm_area_struct *vma = vmf->vma; in __wp_page_copy_user()
3240 unsigned long addr = vmf->address; in __wp_page_copy_user()
3262 vmf->pte = NULL; in __wp_page_copy_user()
3263 if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) { in __wp_page_copy_user()
3266 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in __wp_page_copy_user()
3267 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in __wp_page_copy_user()
3272 if (vmf->pte) in __wp_page_copy_user()
3273 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
3278 entry = pte_mkyoung(vmf->orig_pte); in __wp_page_copy_user()
3279 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) in __wp_page_copy_user()
3280 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1); in __wp_page_copy_user()
3290 if (vmf->pte) in __wp_page_copy_user()
3294 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in __wp_page_copy_user()
3295 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in __wp_page_copy_user()
3297 if (vmf->pte) in __wp_page_copy_user()
3298 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
3321 if (vmf->pte) in __wp_page_copy_user()
3322 pte_unmap_unlock(vmf->pte, vmf->ptl); in __wp_page_copy_user()
3350 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio) in do_page_mkwrite() argument
3353 unsigned int old_flags = vmf->flags; in do_page_mkwrite()
3355 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; in do_page_mkwrite()
3357 if (vmf->vma->vm_file && in do_page_mkwrite()
3358 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) in do_page_mkwrite()
3361 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
3363 vmf->flags = old_flags; in do_page_mkwrite()
3383 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) in fault_dirty_shared_page() argument
3385 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page()
3387 struct folio *folio = page_folio(vmf->page); in fault_dirty_shared_page()
3417 fpin = maybe_unlock_mmap_for_io(vmf, NULL); in fault_dirty_shared_page()
3436 static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio) in wp_page_reuse() argument
3437 __releases(vmf->ptl) in wp_page_reuse()
3439 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse()
3442 VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE)); in wp_page_reuse()
3443 VM_WARN_ON(is_zero_pfn(pte_pfn(vmf->orig_pte))); in wp_page_reuse()
3447 !PageAnonExclusive(vmf->page)); in wp_page_reuse()
3456 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
3457 entry = pte_mkyoung(vmf->orig_pte); in wp_page_reuse()
3459 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
3460 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in wp_page_reuse()
3461 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_reuse()
3470 static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf) in vmf_can_call_fault() argument
3472 struct vm_area_struct *vma = vmf->vma; in vmf_can_call_fault()
3474 if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK)) in vmf_can_call_fault()
3495 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf) in __vmf_anon_prepare() argument
3497 struct vm_area_struct *vma = vmf->vma; in __vmf_anon_prepare()
3502 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { in __vmf_anon_prepare()
3508 if (vmf->flags & FAULT_FLAG_VMA_LOCK) in __vmf_anon_prepare()
3530 static vm_fault_t wp_page_copy(struct vm_fault *vmf) in wp_page_copy() argument
3532 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; in wp_page_copy()
3533 struct vm_area_struct *vma = vmf->vma; in wp_page_copy()
3545 if (vmf->page) in wp_page_copy()
3546 old_folio = page_folio(vmf->page); in wp_page_copy()
3547 ret = vmf_anon_prepare(vmf); in wp_page_copy()
3551 pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte)); in wp_page_copy()
3552 new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero); in wp_page_copy()
3559 err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf); in wp_page_copy()
3575 kmsan_copy_page_meta(&new_folio->page, vmf->page); in wp_page_copy()
3581 vmf->address & PAGE_MASK, in wp_page_copy()
3582 (vmf->address & PAGE_MASK) + PAGE_SIZE); in wp_page_copy()
3588 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); in wp_page_copy()
3589 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in wp_page_copy()
3596 ksm_might_unmap_zero_page(mm, vmf->orig_pte); in wp_page_copy()
3599 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_copy()
3603 if (pte_soft_dirty(vmf->orig_pte)) in wp_page_copy()
3605 if (pte_uffd_wp(vmf->orig_pte)) in wp_page_copy()
3618 ptep_clear_flush(vma, vmf->address, vmf->pte); in wp_page_copy()
3619 folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE); in wp_page_copy()
3622 set_pte_at(mm, vmf->address, vmf->pte, entry); in wp_page_copy()
3623 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in wp_page_copy()
3647 folio_remove_rmap_pte(old_folio, vmf->page, vma); in wp_page_copy()
3653 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_copy()
3654 } else if (vmf->pte) { in wp_page_copy()
3655 update_mmu_tlb(vma, vmf->address, vmf->pte); in wp_page_copy()
3656 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_copy()
3698 static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio) in finish_mkwrite_fault() argument
3700 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); in finish_mkwrite_fault()
3701 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
3702 &vmf->ptl); in finish_mkwrite_fault()
3703 if (!vmf->pte) in finish_mkwrite_fault()
3709 if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) { in finish_mkwrite_fault()
3710 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in finish_mkwrite_fault()
3711 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_mkwrite_fault()
3714 wp_page_reuse(vmf, folio); in finish_mkwrite_fault()
3722 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) in wp_pfn_shared() argument
3724 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared()
3729 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_pfn_shared()
3730 ret = vmf_can_call_fault(vmf); in wp_pfn_shared()
3734 vmf->flags |= FAULT_FLAG_MKWRITE; in wp_pfn_shared()
3735 ret = vma->vm_ops->pfn_mkwrite(vmf); in wp_pfn_shared()
3738 return finish_mkwrite_fault(vmf, NULL); in wp_pfn_shared()
3740 wp_page_reuse(vmf, NULL); in wp_pfn_shared()
3744 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio) in wp_page_shared() argument
3745 __releases(vmf->ptl) in wp_page_shared()
3747 struct vm_area_struct *vma = vmf->vma; in wp_page_shared()
3755 pte_unmap_unlock(vmf->pte, vmf->ptl); in wp_page_shared()
3756 tmp = vmf_can_call_fault(vmf); in wp_page_shared()
3762 tmp = do_page_mkwrite(vmf, folio); in wp_page_shared()
3768 tmp = finish_mkwrite_fault(vmf, folio); in wp_page_shared()
3775 wp_page_reuse(vmf, folio); in wp_page_shared()
3778 ret |= fault_dirty_shared_page(vmf); in wp_page_shared()
3921 static vm_fault_t do_wp_page(struct vm_fault *vmf) in do_wp_page() argument
3922 __releases(vmf->ptl) in do_wp_page()
3924 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; in do_wp_page()
3925 struct vm_area_struct *vma = vmf->vma; in do_wp_page()
3930 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) { in do_wp_page()
3932 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3933 return handle_userfault(vmf, VM_UFFD_WP); in do_wp_page()
3941 pte = pte_clear_uffd_wp(ptep_get(vmf->pte)); in do_wp_page()
3943 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_wp_page()
3948 vmf->orig_pte = pte; in do_wp_page()
3955 if (unlikely(userfaultfd_wp(vmf->vma) && in do_wp_page()
3956 mm_tlb_flush_pending(vmf->vma->vm_mm))) in do_wp_page()
3957 flush_tlb_page(vmf->vma, vmf->address); in do_wp_page()
3960 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); in do_wp_page()
3962 if (vmf->page) in do_wp_page()
3963 folio = page_folio(vmf->page); in do_wp_page()
3977 if (!vmf->page || is_fsdax_page(vmf->page)) { in do_wp_page()
3978 vmf->page = NULL; in do_wp_page()
3979 return wp_pfn_shared(vmf); in do_wp_page()
3981 return wp_page_shared(vmf, folio); in do_wp_page()
3992 (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) { in do_wp_page()
3993 if (!PageAnonExclusive(vmf->page)) in do_wp_page()
3994 SetPageAnonExclusive(vmf->page); in do_wp_page()
3996 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
3999 wp_page_reuse(vmf, folio); in do_wp_page()
4008 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_wp_page()
4013 return wp_page_copy(vmf); in do_wp_page()
4147 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) in remove_device_exclusive_entry() argument
4149 struct folio *folio = page_folio(vmf->page); in remove_device_exclusive_entry()
4150 struct vm_area_struct *vma = vmf->vma; in remove_device_exclusive_entry()
4165 ret = folio_lock_or_retry(folio, vmf); in remove_device_exclusive_entry()
4171 vma->vm_mm, vmf->address & PAGE_MASK, in remove_device_exclusive_entry()
4172 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL); in remove_device_exclusive_entry()
4175 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in remove_device_exclusive_entry()
4176 &vmf->ptl); in remove_device_exclusive_entry()
4177 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) in remove_device_exclusive_entry()
4178 restore_exclusive_pte(vma, folio, vmf->page, vmf->address, in remove_device_exclusive_entry()
4179 vmf->pte, vmf->orig_pte); in remove_device_exclusive_entry()
4181 if (vmf->pte) in remove_device_exclusive_entry()
4182 pte_unmap_unlock(vmf->pte, vmf->ptl); in remove_device_exclusive_entry()
4209 static vm_fault_t pte_marker_clear(struct vm_fault *vmf) in pte_marker_clear() argument
4211 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_marker_clear()
4212 vmf->address, &vmf->ptl); in pte_marker_clear()
4213 if (!vmf->pte) in pte_marker_clear()
4223 if (pte_same(vmf->orig_pte, ptep_get(vmf->pte))) in pte_marker_clear()
4224 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); in pte_marker_clear()
4225 pte_unmap_unlock(vmf->pte, vmf->ptl); in pte_marker_clear()
4229 static vm_fault_t do_pte_missing(struct vm_fault *vmf) in do_pte_missing() argument
4231 if (vma_is_anonymous(vmf->vma)) in do_pte_missing()
4232 return do_anonymous_page(vmf); in do_pte_missing()
4234 return do_fault(vmf); in do_pte_missing()
4241 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf) in pte_marker_handle_uffd_wp() argument
4247 if (unlikely(!userfaultfd_wp(vmf->vma))) in pte_marker_handle_uffd_wp()
4248 return pte_marker_clear(vmf); in pte_marker_handle_uffd_wp()
4250 return do_pte_missing(vmf); in pte_marker_handle_uffd_wp()
4253 static vm_fault_t handle_pte_marker(struct vm_fault *vmf) in handle_pte_marker() argument
4255 swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte); in handle_pte_marker()
4274 return pte_marker_handle_uffd_wp(vmf); in handle_pte_marker()
4280 static struct folio *__alloc_swap_folio(struct vm_fault *vmf) in __alloc_swap_folio() argument
4282 struct vm_area_struct *vma = vmf->vma; in __alloc_swap_folio()
4286 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address); in __alloc_swap_folio()
4290 entry = pte_to_swp_entry(vmf->orig_pte); in __alloc_swap_folio()
4305 static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages) in can_swapin_thp() argument
4312 addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE); in can_swapin_thp()
4313 idx = (vmf->address - addr) / PAGE_SIZE; in can_swapin_thp()
4316 if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx))) in can_swapin_thp()
4358 static struct folio *alloc_swap_folio(struct vm_fault *vmf) in alloc_swap_folio() argument
4360 struct vm_area_struct *vma = vmf->vma; in alloc_swap_folio()
4385 entry = pte_to_swp_entry(vmf->orig_pte); in alloc_swap_folio()
4392 orders = thp_vma_suitable_orders(vma, vmf->address, orders); in alloc_swap_folio()
4394 vmf->address, orders); in alloc_swap_folio()
4399 pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in alloc_swap_folio()
4400 vmf->address & PMD_MASK, &ptl); in alloc_swap_folio()
4410 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); in alloc_swap_folio()
4411 if (can_swapin_thp(vmf, pte + pte_index(addr), 1 << order)) in alloc_swap_folio()
4421 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); in alloc_swap_folio()
4435 return __alloc_swap_folio(vmf); in alloc_swap_folio()
4438 static struct folio *alloc_swap_folio(struct vm_fault *vmf) in alloc_swap_folio() argument
4440 return __alloc_swap_folio(vmf); in alloc_swap_folio()
4454 vm_fault_t do_swap_page(struct vm_fault *vmf) in do_swap_page() argument
4456 struct vm_area_struct *vma = vmf->vma; in do_swap_page()
4473 if (!pte_unmap_same(vmf)) in do_swap_page()
4476 entry = pte_to_swp_entry(vmf->orig_pte); in do_swap_page()
4479 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
4480 vmf->address); in do_swap_page()
4482 vmf->page = pfn_swap_entry_to_page(entry); in do_swap_page()
4483 ret = remove_device_exclusive_entry(vmf); in do_swap_page()
4485 if (vmf->flags & FAULT_FLAG_VMA_LOCK) { in do_swap_page()
4495 vmf->page = pfn_swap_entry_to_page(entry); in do_swap_page()
4496 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
4497 vmf->address, &vmf->ptl); in do_swap_page()
4498 if (unlikely(!vmf->pte || in do_swap_page()
4499 !pte_same(ptep_get(vmf->pte), in do_swap_page()
4500 vmf->orig_pte))) in do_swap_page()
4507 if (trylock_page(vmf->page)) { in do_swap_page()
4510 get_page(vmf->page); in do_swap_page()
4511 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
4512 pgmap = page_pgmap(vmf->page); in do_swap_page()
4513 ret = pgmap->ops->migrate_to_ram(vmf); in do_swap_page()
4514 unlock_page(vmf->page); in do_swap_page()
4515 put_page(vmf->page); in do_swap_page()
4517 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
4522 ret = handle_pte_marker(vmf); in do_swap_page()
4524 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); in do_swap_page()
4535 folio = swap_cache_get_folio(entry, vma, vmf->address); in do_swap_page()
4544 folio = alloc_swap_folio(vmf); in do_swap_page()
4587 vmf); in do_swap_page()
4596 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
4597 vmf->address, &vmf->ptl); in do_swap_page()
4598 if (likely(vmf->pte && in do_swap_page()
4599 pte_same(ptep_get(vmf->pte), vmf->orig_pte))) in do_swap_page()
4618 ret |= folio_lock_or_retry(folio, vmf); in do_swap_page()
4639 folio = ksm_might_need_to_copy(folio, vma, vmf->address); in do_swap_page()
4658 if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache && in do_swap_page()
4668 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
4669 &vmf->ptl); in do_swap_page()
4670 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) in do_swap_page()
4681 unsigned long folio_start = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE); in do_swap_page()
4682 unsigned long idx = (vmf->address - folio_start) / PAGE_SIZE; in do_swap_page()
4683 pte_t *folio_ptep = vmf->pte - idx; in do_swap_page()
4686 if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) || in do_swap_page()
4698 address = vmf->address; in do_swap_page()
4699 ptep = vmf->pte; in do_swap_page()
4713 folio_ptep = vmf->pte - idx; in do_swap_page()
4715 if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) || in do_swap_page()
4744 exclusive = pte_swp_exclusive(vmf->orig_pte); in do_swap_page()
4788 if (should_try_to_free_swap(folio, vma, vmf->flags)) in do_swap_page()
4794 if (pte_swp_soft_dirty(vmf->orig_pte)) in do_swap_page()
4796 if (pte_swp_uffd_wp(vmf->orig_pte)) in do_swap_page()
4810 if (vmf->flags & FAULT_FLAG_WRITE) { in do_swap_page()
4812 vmf->flags &= ~FAULT_FLAG_WRITE; in do_swap_page()
4819 vmf->orig_pte = pte_advance_pfn(pte, page_idx); in do_swap_page()
4860 if (vmf->flags & FAULT_FLAG_WRITE) { in do_swap_page()
4861 ret |= do_wp_page(vmf); in do_swap_page()
4868 update_mmu_cache_range(vmf, vma, address, ptep, nr_pages); in do_swap_page()
4870 if (vmf->pte) in do_swap_page()
4871 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
4883 if (vmf->pte) in do_swap_page()
4884 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_swap_page()
4915 static struct folio *alloc_anon_folio(struct vm_fault *vmf) in alloc_anon_folio() argument
4917 struct vm_area_struct *vma = vmf->vma; in alloc_anon_folio()
4940 orders = thp_vma_suitable_orders(vma, vmf->address, orders); in alloc_anon_folio()
4945 pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK); in alloc_anon_folio()
4956 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); in alloc_anon_folio()
4970 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); in alloc_anon_folio()
4987 folio_zero_user(folio, vmf->address); in alloc_anon_folio()
4997 return folio_prealloc(vma->vm_mm, vma, vmf->address, true); in alloc_anon_folio()
5005 static vm_fault_t do_anonymous_page(struct vm_fault *vmf) in do_anonymous_page() argument
5007 struct vm_area_struct *vma = vmf->vma; in do_anonymous_page()
5008 unsigned long addr = vmf->address; in do_anonymous_page()
5022 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
5026 if (!(vmf->flags & FAULT_FLAG_WRITE) && in do_anonymous_page()
5028 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), in do_anonymous_page()
5030 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
5031 vmf->address, &vmf->ptl); in do_anonymous_page()
5032 if (!vmf->pte) in do_anonymous_page()
5034 if (vmf_pte_changed(vmf)) { in do_anonymous_page()
5035 update_mmu_tlb(vma, vmf->address, vmf->pte); in do_anonymous_page()
5043 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
5044 return handle_userfault(vmf, VM_UFFD_MISSING); in do_anonymous_page()
5050 ret = vmf_anon_prepare(vmf); in do_anonymous_page()
5054 folio = alloc_anon_folio(vmf); in do_anonymous_page()
5061 addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE); in do_anonymous_page()
5075 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in do_anonymous_page()
5076 if (!vmf->pte) in do_anonymous_page()
5078 if (nr_pages == 1 && vmf_pte_changed(vmf)) { in do_anonymous_page()
5079 update_mmu_tlb(vma, addr, vmf->pte); in do_anonymous_page()
5081 } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { in do_anonymous_page()
5082 update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); in do_anonymous_page()
5092 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
5094 return handle_userfault(vmf, VM_UFFD_MISSING); in do_anonymous_page()
5103 if (vmf_orig_pte_uffd_wp(vmf)) in do_anonymous_page()
5105 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages); in do_anonymous_page()
5108 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages); in do_anonymous_page()
5110 if (vmf->pte) in do_anonymous_page()
5111 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_anonymous_page()
5125 static vm_fault_t __do_fault(struct vm_fault *vmf) in __do_fault() argument
5127 struct vm_area_struct *vma = vmf->vma; in __do_fault()
5146 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { in __do_fault()
5147 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in __do_fault()
5148 if (!vmf->prealloc_pte) in __do_fault()
5152 ret = vma->vm_ops->fault(vmf); in __do_fault()
5157 folio = page_folio(vmf->page); in __do_fault()
5158 if (unlikely(PageHWPoison(vmf->page))) { in __do_fault()
5161 if (page_mapped(vmf->page)) in __do_fault()
5169 vmf->page = NULL; in __do_fault()
5176 VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page); in __do_fault()
5182 static void deposit_prealloc_pte(struct vm_fault *vmf) in deposit_prealloc_pte() argument
5184 struct vm_area_struct *vma = vmf->vma; in deposit_prealloc_pte()
5186 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
5192 vmf->prealloc_pte = NULL; in deposit_prealloc_pte()
5195 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page) in do_set_pmd() argument
5197 struct vm_area_struct *vma = vmf->vma; in do_set_pmd()
5198 bool write = vmf->flags & FAULT_FLAG_WRITE; in do_set_pmd()
5199 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_set_pmd()
5232 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { in do_set_pmd()
5233 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
5234 if (!vmf->prealloc_pte) in do_set_pmd()
5238 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
5239 if (unlikely(!pmd_none(*vmf->pmd))) in do_set_pmd()
5255 deposit_prealloc_pte(vmf); in do_set_pmd()
5257 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
5259 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
5265 spin_unlock(vmf->ptl); in do_set_pmd()
5269 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page) in do_set_pmd() argument
5283 void set_pte_range(struct vm_fault *vmf, struct folio *folio, in set_pte_range() argument
5286 struct vm_area_struct *vma = vmf->vma; in set_pte_range()
5287 bool write = vmf->flags & FAULT_FLAG_WRITE; in set_pte_range()
5288 bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE); in set_pte_range()
5303 if (unlikely(vmf_orig_pte_uffd_wp(vmf))) in set_pte_range()
5313 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); in set_pte_range()
5316 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); in set_pte_range()
5319 static bool vmf_pte_changed(struct vm_fault *vmf) in vmf_pte_changed() argument
5321 if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID) in vmf_pte_changed()
5322 return !pte_same(ptep_get(vmf->pte), vmf->orig_pte); in vmf_pte_changed()
5324 return !pte_none(ptep_get(vmf->pte)); in vmf_pte_changed()
5342 vm_fault_t finish_fault(struct vm_fault *vmf) in finish_fault() argument
5344 struct vm_area_struct *vma = vmf->vma; in finish_fault()
5348 bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) && in finish_fault()
5355 addr = vmf->address; in finish_fault()
5359 page = vmf->cow_page; in finish_fault()
5361 page = vmf->page; in finish_fault()
5374 if (pmd_none(*vmf->pmd)) { in finish_fault()
5376 ret = do_set_pmd(vmf, folio, page); in finish_fault()
5381 if (vmf->prealloc_pte) in finish_fault()
5382 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); in finish_fault()
5383 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) in finish_fault()
5400 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; in finish_fault()
5402 pgoff_t pte_off = pte_index(vmf->address); in finish_fault()
5415 addr = vmf->address - idx * PAGE_SIZE; in finish_fault()
5420 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in finish_fault()
5421 addr, &vmf->ptl); in finish_fault()
5422 if (!vmf->pte) in finish_fault()
5426 if (nr_pages == 1 && unlikely(vmf_pte_changed(vmf))) { in finish_fault()
5427 update_mmu_tlb(vma, addr, vmf->pte); in finish_fault()
5430 } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { in finish_fault()
5432 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_fault()
5437 set_pte_range(vmf, folio, page, nr_pages, addr); in finish_fault()
5443 pte_unmap_unlock(vmf->pte, vmf->ptl); in finish_fault()
5507 static vm_fault_t do_fault_around(struct vm_fault *vmf) in do_fault_around() argument
5510 pgoff_t pte_off = pte_index(vmf->address); in do_fault_around()
5512 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; in do_fault_around()
5522 pte_off + vma_pages(vmf->vma) - vma_off) - 1; in do_fault_around()
5524 if (pmd_none(*vmf->pmd)) { in do_fault_around()
5525 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
5526 if (!vmf->prealloc_pte) in do_fault_around()
5531 ret = vmf->vma->vm_ops->map_pages(vmf, in do_fault_around()
5532 vmf->pgoff + from_pte - pte_off, in do_fault_around()
5533 vmf->pgoff + to_pte - pte_off); in do_fault_around()
5540 static inline bool should_fault_around(struct vm_fault *vmf) in should_fault_around() argument
5543 if (!vmf->vma->vm_ops->map_pages) in should_fault_around()
5546 if (uffd_disable_fault_around(vmf->vma)) in should_fault_around()
5553 static vm_fault_t do_read_fault(struct vm_fault *vmf) in do_read_fault() argument
5563 if (should_fault_around(vmf)) { in do_read_fault()
5564 ret = do_fault_around(vmf); in do_read_fault()
5569 ret = vmf_can_call_fault(vmf); in do_read_fault()
5573 ret = __do_fault(vmf); in do_read_fault()
5577 ret |= finish_fault(vmf); in do_read_fault()
5578 folio = page_folio(vmf->page); in do_read_fault()
5585 static vm_fault_t do_cow_fault(struct vm_fault *vmf) in do_cow_fault() argument
5587 struct vm_area_struct *vma = vmf->vma; in do_cow_fault()
5591 ret = vmf_can_call_fault(vmf); in do_cow_fault()
5593 ret = vmf_anon_prepare(vmf); in do_cow_fault()
5597 folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false); in do_cow_fault()
5601 vmf->cow_page = &folio->page; in do_cow_fault()
5603 ret = __do_fault(vmf); in do_cow_fault()
5609 if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) { in do_cow_fault()
5615 ret |= finish_fault(vmf); in do_cow_fault()
5617 unlock_page(vmf->page); in do_cow_fault()
5618 put_page(vmf->page); in do_cow_fault()
5627 static vm_fault_t do_shared_fault(struct vm_fault *vmf) in do_shared_fault() argument
5629 struct vm_area_struct *vma = vmf->vma; in do_shared_fault()
5633 ret = vmf_can_call_fault(vmf); in do_shared_fault()
5637 ret = __do_fault(vmf); in do_shared_fault()
5641 folio = page_folio(vmf->page); in do_shared_fault()
5649 tmp = do_page_mkwrite(vmf, folio); in do_shared_fault()
5657 ret |= finish_fault(vmf); in do_shared_fault()
5665 ret |= fault_dirty_shared_page(vmf); in do_shared_fault()
5677 static vm_fault_t do_fault(struct vm_fault *vmf) in do_fault() argument
5679 struct vm_area_struct *vma = vmf->vma; in do_fault()
5687 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in do_fault()
5688 vmf->address, &vmf->ptl); in do_fault()
5689 if (unlikely(!vmf->pte)) in do_fault()
5699 if (unlikely(pte_none(ptep_get(vmf->pte)))) in do_fault()
5704 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_fault()
5706 } else if (!(vmf->flags & FAULT_FLAG_WRITE)) in do_fault()
5707 ret = do_read_fault(vmf); in do_fault()
5709 ret = do_cow_fault(vmf); in do_fault()
5711 ret = do_shared_fault(vmf); in do_fault()
5714 if (vmf->prealloc_pte) { in do_fault()
5715 pte_free(vm_mm, vmf->prealloc_pte); in do_fault()
5716 vmf->prealloc_pte = NULL; in do_fault()
5721 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf, in numa_migrate_check() argument
5725 struct vm_area_struct *vma = vmf->vma; in numa_migrate_check()
5765 return mpol_misplaced(folio, vmf, addr); in numa_migrate_check()
5768 static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, in numa_rebuild_single_mapping() argument
5780 update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1); in numa_rebuild_single_mapping()
5783 static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, in numa_rebuild_large_mapping() argument
5788 unsigned long start, end, addr = vmf->address; in numa_rebuild_large_mapping()
5797 start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT); in numa_rebuild_large_mapping()
5818 numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable); in numa_rebuild_large_mapping()
5822 static vm_fault_t do_numa_page(struct vm_fault *vmf) in do_numa_page() argument
5824 struct vm_area_struct *vma = vmf->vma; in do_numa_page()
5838 spin_lock(vmf->ptl); in do_numa_page()
5840 old_pte = ptep_get(vmf->pte); in do_numa_page()
5842 if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { in do_numa_page()
5843 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
5855 can_change_pte_writable(vma, vmf->address, pte)) in do_numa_page()
5858 folio = vm_normal_folio(vma, vmf->address, pte); in do_numa_page()
5865 target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags, in do_numa_page()
5874 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
5887 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_numa_page()
5888 vmf->address, &vmf->ptl); in do_numa_page()
5889 if (unlikely(!vmf->pte)) in do_numa_page()
5891 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { in do_numa_page()
5892 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
5901 numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable, in do_numa_page()
5904 numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, in do_numa_page()
5906 pte_unmap_unlock(vmf->pte, vmf->ptl); in do_numa_page()
5913 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) in create_huge_pmd() argument
5915 struct vm_area_struct *vma = vmf->vma; in create_huge_pmd()
5917 return do_huge_pmd_anonymous_page(vmf); in create_huge_pmd()
5919 return vma->vm_ops->huge_fault(vmf, PMD_ORDER); in create_huge_pmd()
5924 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf) in wp_huge_pmd() argument
5926 struct vm_area_struct *vma = vmf->vma; in wp_huge_pmd()
5927 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; in wp_huge_pmd()
5932 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) { in wp_huge_pmd()
5933 if (userfaultfd_wp_async(vmf->vma)) in wp_huge_pmd()
5935 return handle_userfault(vmf, VM_UFFD_WP); in wp_huge_pmd()
5937 return do_huge_pmd_wp_page(vmf); in wp_huge_pmd()
5942 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER); in wp_huge_pmd()
5950 __split_huge_pmd(vma, vmf->pmd, vmf->address, false); in wp_huge_pmd()
5955 static vm_fault_t create_huge_pud(struct vm_fault *vmf) in create_huge_pud() argument
5959 struct vm_area_struct *vma = vmf->vma; in create_huge_pud()
5964 return vma->vm_ops->huge_fault(vmf, PUD_ORDER); in create_huge_pud()
5969 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) in wp_huge_pud() argument
5973 struct vm_area_struct *vma = vmf->vma; in wp_huge_pud()
5981 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER); in wp_huge_pud()
5988 __split_huge_pud(vma, vmf->pud, vmf->address); in wp_huge_pud()
6008 static vm_fault_t handle_pte_fault(struct vm_fault *vmf) in handle_pte_fault() argument
6012 if (unlikely(pmd_none(*vmf->pmd))) { in handle_pte_fault()
6019 vmf->pte = NULL; in handle_pte_fault()
6020 vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID; in handle_pte_fault()
6037 vmf->pte = pte_offset_map_rw_nolock(vmf->vma->vm_mm, vmf->pmd, in handle_pte_fault()
6038 vmf->address, &dummy_pmdval, in handle_pte_fault()
6039 &vmf->ptl); in handle_pte_fault()
6040 if (unlikely(!vmf->pte)) in handle_pte_fault()
6042 vmf->orig_pte = ptep_get_lockless(vmf->pte); in handle_pte_fault()
6043 vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID; in handle_pte_fault()
6045 if (pte_none(vmf->orig_pte)) { in handle_pte_fault()
6046 pte_unmap(vmf->pte); in handle_pte_fault()
6047 vmf->pte = NULL; in handle_pte_fault()
6051 if (!vmf->pte) in handle_pte_fault()
6052 return do_pte_missing(vmf); in handle_pte_fault()
6054 if (!pte_present(vmf->orig_pte)) in handle_pte_fault()
6055 return do_swap_page(vmf); in handle_pte_fault()
6057 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) in handle_pte_fault()
6058 return do_numa_page(vmf); in handle_pte_fault()
6060 spin_lock(vmf->ptl); in handle_pte_fault()
6061 entry = vmf->orig_pte; in handle_pte_fault()
6062 if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) { in handle_pte_fault()
6063 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
6066 if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { in handle_pte_fault()
6068 return do_wp_page(vmf); in handle_pte_fault()
6069 else if (likely(vmf->flags & FAULT_FLAG_WRITE)) in handle_pte_fault()
6073 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, in handle_pte_fault()
6074 vmf->flags & FAULT_FLAG_WRITE)) { in handle_pte_fault()
6075 update_mmu_cache_range(vmf, vmf->vma, vmf->address, in handle_pte_fault()
6076 vmf->pte, 1); in handle_pte_fault()
6079 if (vmf->flags & FAULT_FLAG_TRIED) in handle_pte_fault()
6087 if (vmf->flags & FAULT_FLAG_WRITE) in handle_pte_fault()
6088 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address, in handle_pte_fault()
6089 vmf->pte); in handle_pte_fault()
6092 pte_unmap_unlock(vmf->pte, vmf->ptl); in handle_pte_fault()
6105 struct vm_fault vmf = { in __handle_mm_fault() local
6124 vmf.pud = pud_alloc(mm, p4d, address); in __handle_mm_fault()
6125 if (!vmf.pud) in __handle_mm_fault()
6128 if (pud_none(*vmf.pud) && in __handle_mm_fault()
6131 ret = create_huge_pud(&vmf); in __handle_mm_fault()
6135 pud_t orig_pud = *vmf.pud; in __handle_mm_fault()
6145 ret = wp_huge_pud(&vmf, orig_pud); in __handle_mm_fault()
6149 huge_pud_set_accessed(&vmf, orig_pud); in __handle_mm_fault()
6155 vmf.pmd = pmd_alloc(mm, vmf.pud, address); in __handle_mm_fault()
6156 if (!vmf.pmd) in __handle_mm_fault()
6160 if (pud_trans_unstable(vmf.pud)) in __handle_mm_fault()
6163 if (pmd_none(*vmf.pmd) && in __handle_mm_fault()
6166 ret = create_huge_pmd(&vmf); in __handle_mm_fault()
6170 vmf.orig_pmd = pmdp_get_lockless(vmf.pmd); in __handle_mm_fault()
6172 if (unlikely(is_swap_pmd(vmf.orig_pmd))) { in __handle_mm_fault()
6174 !is_pmd_migration_entry(vmf.orig_pmd)); in __handle_mm_fault()
6175 if (is_pmd_migration_entry(vmf.orig_pmd)) in __handle_mm_fault()
6176 pmd_migration_entry_wait(mm, vmf.pmd); in __handle_mm_fault()
6179 if (pmd_trans_huge(vmf.orig_pmd)) { in __handle_mm_fault()
6180 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) in __handle_mm_fault()
6181 return do_huge_pmd_numa_page(&vmf); in __handle_mm_fault()
6184 !pmd_write(vmf.orig_pmd)) { in __handle_mm_fault()
6185 ret = wp_huge_pmd(&vmf); in __handle_mm_fault()
6189 huge_pmd_set_accessed(&vmf); in __handle_mm_fault()
6195 return handle_pte_fault(&vmf); in __handle_mm_fault()