Lines Matching refs:vmf
1724 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) in __folio_lock_or_retry() argument
1726 unsigned int flags = vmf->flags; in __folio_lock_or_retry()
1736 release_fault_lock(vmf); in __folio_lock_or_retry()
1748 release_fault_lock(vmf); in __folio_lock_or_retry()
3171 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, in lock_folio_maybe_drop_mmap() argument
3182 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in lock_folio_maybe_drop_mmap()
3185 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); in lock_folio_maybe_drop_mmap()
3186 if (vmf->flags & FAULT_FLAG_KILLABLE) { in lock_folio_maybe_drop_mmap()
3196 release_fault_lock(vmf); in lock_folio_maybe_drop_mmap()
3212 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) in do_sync_mmap_readahead() argument
3214 struct file *file = vmf->vma->vm_file; in do_sync_mmap_readahead()
3217 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); in do_sync_mmap_readahead()
3219 vm_flags_t vm_flags = vmf->vma->vm_flags; in do_sync_mmap_readahead()
3225 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_sync_mmap_readahead()
3251 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_sync_mmap_readahead()
3280 struct vm_area_struct *vma = vmf->vma; in do_sync_mmap_readahead()
3286 ra->start = round_down(vmf->pgoff, 1UL << ra->order); in do_sync_mmap_readahead()
3296 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); in do_sync_mmap_readahead()
3302 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_sync_mmap_readahead()
3313 static struct file *do_async_mmap_readahead(struct vm_fault *vmf, in do_async_mmap_readahead() argument
3316 struct file *file = vmf->vma->vm_file; in do_async_mmap_readahead()
3318 DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff); in do_async_mmap_readahead()
3323 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) in do_async_mmap_readahead()
3331 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_async_mmap_readahead()
3337 static vm_fault_t filemap_fault_recheck_pte_none(struct vm_fault *vmf) in filemap_fault_recheck_pte_none() argument
3339 struct vm_area_struct *vma = vmf->vma; in filemap_fault_recheck_pte_none()
3360 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) in filemap_fault_recheck_pte_none()
3363 ptep = pte_offset_map_ro_nolock(vma->vm_mm, vmf->pmd, vmf->address, in filemap_fault_recheck_pte_none()
3364 &vmf->ptl); in filemap_fault_recheck_pte_none()
3371 spin_lock(vmf->ptl); in filemap_fault_recheck_pte_none()
3374 spin_unlock(vmf->ptl); in filemap_fault_recheck_pte_none()
3403 vm_fault_t filemap_fault(struct vm_fault *vmf) in filemap_fault() argument
3406 struct file *file = vmf->vma->vm_file; in filemap_fault()
3410 pgoff_t max_idx, index = vmf->pgoff; in filemap_fault()
3430 if (!(vmf->flags & FAULT_FLAG_TRIED)) in filemap_fault()
3431 fpin = do_async_mmap_readahead(vmf, folio); in filemap_fault()
3437 ret = filemap_fault_recheck_pte_none(vmf); in filemap_fault()
3443 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); in filemap_fault()
3445 fpin = do_sync_mmap_readahead(vmf); in filemap_fault()
3457 vmf->gfp_mask); in filemap_fault()
3466 if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin)) in filemap_fault()
3526 vmf->page = folio_file_page(folio, index); in filemap_fault()
3536 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in filemap_fault()
3564 static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, in filemap_map_pmd() argument
3567 struct mm_struct *mm = vmf->vma->vm_mm; in filemap_map_pmd()
3570 if (pmd_trans_huge(*vmf->pmd)) { in filemap_map_pmd()
3576 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) { in filemap_map_pmd()
3578 vm_fault_t ret = do_set_pmd(vmf, folio, page); in filemap_map_pmd()
3586 if (pmd_none(*vmf->pmd) && vmf->prealloc_pte) in filemap_map_pmd()
3587 pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); in filemap_map_pmd()
3637 static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, in filemap_map_folio_range() argument
3645 pte_t *old_ptep = vmf->pte; in filemap_map_folio_range()
3666 if (!pte_none(ptep_get(&vmf->pte[count]))) in filemap_map_folio_range()
3673 set_pte_range(vmf, folio, page, count, addr); in filemap_map_folio_range()
3676 if (in_range(vmf->address, addr, count * PAGE_SIZE)) in filemap_map_folio_range()
3682 vmf->pte += count; in filemap_map_folio_range()
3688 set_pte_range(vmf, folio, page, count, addr); in filemap_map_folio_range()
3691 if (in_range(vmf->address, addr, count * PAGE_SIZE)) in filemap_map_folio_range()
3695 vmf->pte = old_ptep; in filemap_map_folio_range()
3700 static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, in filemap_map_order0_folio() argument
3719 if (!pte_none(ptep_get(vmf->pte))) in filemap_map_order0_folio()
3722 if (vmf->address == addr) in filemap_map_order0_folio()
3725 set_pte_range(vmf, folio, page, 1, addr); in filemap_map_order0_folio()
3732 vm_fault_t filemap_map_pages(struct vm_fault *vmf, in filemap_map_pages() argument
3735 struct vm_area_struct *vma = vmf->vma; in filemap_map_pages()
3752 if (filemap_map_pmd(vmf, folio, start_pgoff)) { in filemap_map_pages()
3758 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in filemap_map_pages()
3759 if (!vmf->pte) { in filemap_map_pages()
3774 vmf->pte += xas.xa_index - last_pgoff; in filemap_map_pages()
3780 ret |= filemap_map_order0_folio(vmf, in filemap_map_pages()
3783 ret |= filemap_map_folio_range(vmf, folio, in filemap_map_pages()
3791 pte_unmap_unlock(vmf->pte, vmf->ptl); in filemap_map_pages()
3806 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) in filemap_page_mkwrite() argument
3808 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in filemap_page_mkwrite()
3809 struct folio *folio = page_folio(vmf->page); in filemap_page_mkwrite()
3813 file_update_time(vmf->vma->vm_file); in filemap_page_mkwrite()
3880 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) in filemap_page_mkwrite() argument