Lines Matching refs:vmf
3090 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, in lock_folio_maybe_drop_mmap() argument
3101 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in lock_folio_maybe_drop_mmap()
3104 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); in lock_folio_maybe_drop_mmap()
3105 if (vmf->flags & FAULT_FLAG_KILLABLE) { in lock_folio_maybe_drop_mmap()
3114 mmap_read_unlock(vmf->vma->vm_mm); in lock_folio_maybe_drop_mmap()
3130 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) in do_sync_mmap_readahead() argument
3132 struct file *file = vmf->vma->vm_file; in do_sync_mmap_readahead()
3135 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); in do_sync_mmap_readahead()
3137 unsigned long vm_flags = vmf->vma->vm_flags; in do_sync_mmap_readahead()
3143 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_sync_mmap_readahead()
3165 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_sync_mmap_readahead()
3185 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_sync_mmap_readahead()
3186 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); in do_sync_mmap_readahead()
3199 static struct file *do_async_mmap_readahead(struct vm_fault *vmf, in do_async_mmap_readahead() argument
3202 struct file *file = vmf->vma->vm_file; in do_async_mmap_readahead()
3204 DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff); in do_async_mmap_readahead()
3209 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) in do_async_mmap_readahead()
3217 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_async_mmap_readahead()
3246 vm_fault_t filemap_fault(struct vm_fault *vmf) in filemap_fault() argument
3249 struct file *file = vmf->vma->vm_file; in filemap_fault()
3253 pgoff_t max_idx, index = vmf->pgoff; in filemap_fault()
3271 if (!(vmf->flags & FAULT_FLAG_TRIED)) in filemap_fault()
3272 fpin = do_async_mmap_readahead(vmf, folio); in filemap_fault()
3280 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); in filemap_fault()
3282 fpin = do_sync_mmap_readahead(vmf); in filemap_fault()
3294 vmf->gfp_mask); in filemap_fault()
3303 if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin)) in filemap_fault()
3356 vmf->page = folio_file_page(folio, index); in filemap_fault()
3366 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in filemap_fault()
3394 static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, in filemap_map_pmd() argument
3397 struct mm_struct *mm = vmf->vma->vm_mm; in filemap_map_pmd()
3400 if (pmd_trans_huge(*vmf->pmd)) { in filemap_map_pmd()
3406 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) { in filemap_map_pmd()
3408 vm_fault_t ret = do_set_pmd(vmf, page); in filemap_map_pmd()
3416 if (pmd_none(*vmf->pmd)) in filemap_map_pmd()
3417 pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); in filemap_map_pmd()
3420 if (pmd_devmap_trans_unstable(vmf->pmd)) { in filemap_map_pmd()
3486 vm_fault_t filemap_map_pages(struct vm_fault *vmf, in filemap_map_pages() argument
3489 struct vm_area_struct *vma = vmf->vma; in filemap_map_pages()
3505 if (filemap_map_pmd(vmf, folio, start_pgoff)) { in filemap_map_pages()
3511 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in filemap_map_pages()
3522 vmf->pte += xas.xa_index - last_pgoff; in filemap_map_pages()
3530 if (!pte_none(*vmf->pte)) in filemap_map_pages()
3534 if (vmf->address == addr) in filemap_map_pages()
3537 do_set_pte(vmf, page, addr); in filemap_map_pages()
3539 update_mmu_cache(vma, addr, vmf->pte); in filemap_map_pages()
3555 pte_unmap_unlock(vmf->pte, vmf->ptl); in filemap_map_pages()
3563 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) in filemap_page_mkwrite() argument
3565 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in filemap_page_mkwrite()
3566 struct folio *folio = page_folio(vmf->page); in filemap_page_mkwrite()
3570 file_update_time(vmf->vma->vm_file); in filemap_page_mkwrite()
3618 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) in filemap_page_mkwrite() argument