Lines Matching refs:vmf

1696 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf)  in __folio_lock_or_retry()  argument
1698 unsigned int flags = vmf->flags; in __folio_lock_or_retry()
1708 release_fault_lock(vmf); in __folio_lock_or_retry()
1720 release_fault_lock(vmf); in __folio_lock_or_retry()
3101 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, in lock_folio_maybe_drop_mmap() argument
3112 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in lock_folio_maybe_drop_mmap()
3115 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); in lock_folio_maybe_drop_mmap()
3116 if (vmf->flags & FAULT_FLAG_KILLABLE) { in lock_folio_maybe_drop_mmap()
3126 release_fault_lock(vmf); in lock_folio_maybe_drop_mmap()
3142 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) in do_sync_mmap_readahead() argument
3144 struct file *file = vmf->vma->vm_file; in do_sync_mmap_readahead()
3147 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); in do_sync_mmap_readahead()
3149 unsigned long vm_flags = vmf->vma->vm_flags; in do_sync_mmap_readahead()
3155 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_sync_mmap_readahead()
3177 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_sync_mmap_readahead()
3197 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_sync_mmap_readahead()
3198 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); in do_sync_mmap_readahead()
3211 static struct file *do_async_mmap_readahead(struct vm_fault *vmf, in do_async_mmap_readahead() argument
3214 struct file *file = vmf->vma->vm_file; in do_async_mmap_readahead()
3216 DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff); in do_async_mmap_readahead()
3221 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) in do_async_mmap_readahead()
3229 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_async_mmap_readahead()
3235 static vm_fault_t filemap_fault_recheck_pte_none(struct vm_fault *vmf) in filemap_fault_recheck_pte_none() argument
3237 struct vm_area_struct *vma = vmf->vma; in filemap_fault_recheck_pte_none()
3258 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) in filemap_fault_recheck_pte_none()
3261 ptep = pte_offset_map_nolock(vma->vm_mm, vmf->pmd, vmf->address, in filemap_fault_recheck_pte_none()
3262 &vmf->ptl); in filemap_fault_recheck_pte_none()
3269 spin_lock(vmf->ptl); in filemap_fault_recheck_pte_none()
3272 spin_unlock(vmf->ptl); in filemap_fault_recheck_pte_none()
3301 vm_fault_t filemap_fault(struct vm_fault *vmf) in filemap_fault() argument
3304 struct file *file = vmf->vma->vm_file; in filemap_fault()
3308 pgoff_t max_idx, index = vmf->pgoff; in filemap_fault()
3328 if (!(vmf->flags & FAULT_FLAG_TRIED)) in filemap_fault()
3329 fpin = do_async_mmap_readahead(vmf, folio); in filemap_fault()
3335 ret = filemap_fault_recheck_pte_none(vmf); in filemap_fault()
3341 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); in filemap_fault()
3343 fpin = do_sync_mmap_readahead(vmf); in filemap_fault()
3355 vmf->gfp_mask); in filemap_fault()
3364 if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin)) in filemap_fault()
3424 vmf->page = folio_file_page(folio, index); in filemap_fault()
3434 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in filemap_fault()
3462 static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, in filemap_map_pmd() argument
3465 struct mm_struct *mm = vmf->vma->vm_mm; in filemap_map_pmd()
3468 if (pmd_trans_huge(*vmf->pmd)) { in filemap_map_pmd()
3474 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) { in filemap_map_pmd()
3476 vm_fault_t ret = do_set_pmd(vmf, page); in filemap_map_pmd()
3484 if (pmd_none(*vmf->pmd) && vmf->prealloc_pte) in filemap_map_pmd()
3485 pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); in filemap_map_pmd()
3535 static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, in filemap_map_folio_range() argument
3543 pte_t *old_ptep = vmf->pte; in filemap_map_folio_range()
3564 if (!pte_none(ptep_get(&vmf->pte[count]))) in filemap_map_folio_range()
3571 set_pte_range(vmf, folio, page, count, addr); in filemap_map_folio_range()
3574 if (in_range(vmf->address, addr, count * PAGE_SIZE)) in filemap_map_folio_range()
3580 vmf->pte += count; in filemap_map_folio_range()
3586 set_pte_range(vmf, folio, page, count, addr); in filemap_map_folio_range()
3589 if (in_range(vmf->address, addr, count * PAGE_SIZE)) in filemap_map_folio_range()
3593 vmf->pte = old_ptep; in filemap_map_folio_range()
3598 static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, in filemap_map_order0_folio() argument
3617 if (!pte_none(ptep_get(vmf->pte))) in filemap_map_order0_folio()
3620 if (vmf->address == addr) in filemap_map_order0_folio()
3623 set_pte_range(vmf, folio, page, 1, addr); in filemap_map_order0_folio()
3630 vm_fault_t filemap_map_pages(struct vm_fault *vmf, in filemap_map_pages() argument
3633 struct vm_area_struct *vma = vmf->vma; in filemap_map_pages()
3649 if (filemap_map_pmd(vmf, folio, start_pgoff)) { in filemap_map_pages()
3655 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in filemap_map_pages()
3656 if (!vmf->pte) { in filemap_map_pages()
3671 vmf->pte += xas.xa_index - last_pgoff; in filemap_map_pages()
3677 ret |= filemap_map_order0_folio(vmf, in filemap_map_pages()
3680 ret |= filemap_map_folio_range(vmf, folio, in filemap_map_pages()
3688 pte_unmap_unlock(vmf->pte, vmf->ptl); in filemap_map_pages()
3703 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) in filemap_page_mkwrite() argument
3705 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in filemap_page_mkwrite()
3706 struct folio *folio = page_folio(vmf->page); in filemap_page_mkwrite()
3710 file_update_time(vmf->vma->vm_file); in filemap_page_mkwrite()
3758 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) in filemap_page_mkwrite() argument