Lines Matching refs:vmf

1003 static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)  in copy_cow_page_dax()  argument
1017 vto = kmap_atomic(vmf->cow_page); in copy_cow_page_dax()
1018 copy_user_page(vto, kaddr, vmf->address, vmf->cow_page); in copy_cow_page_dax()
1042 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, in dax_insert_entry() argument
1046 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_entry()
1049 bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma); in dax_insert_entry()
1071 dax_associate_entry(new_entry, mapping, vmf->vma, in dax_insert_entry()
1072 vmf->address, shared); in dax_insert_entry()
1358 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_load_hole() argument
1362 unsigned long vaddr = vmf->address; in dax_load_hole()
1366 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); in dax_load_hole()
1368 ret = vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), false); in dax_load_hole()
1369 trace_dax_load_hole(inode, vmf, ret); in dax_load_hole()
1374 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1377 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_pmd_load_hole()
1378 unsigned long pmd_addr = vmf->address & PMD_MASK; in dax_pmd_load_hole()
1379 struct vm_area_struct *vma = vmf->vma; in dax_pmd_load_hole()
1387 zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm); in dax_pmd_load_hole()
1393 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, in dax_pmd_load_hole()
1402 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in dax_pmd_load_hole()
1403 if (!pmd_none(*(vmf->pmd))) { in dax_pmd_load_hole()
1409 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in dax_pmd_load_hole()
1412 pmd_entry = folio_mk_pmd(zero_folio, vmf->vma->vm_page_prot); in dax_pmd_load_hole()
1413 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); in dax_pmd_load_hole()
1415 trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry); in dax_pmd_load_hole()
1421 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry); in dax_pmd_load_hole()
1425 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1793 static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf, in dax_fault_cow_page() argument
1802 clear_user_highpage(vmf->cow_page, vmf->address); in dax_fault_cow_page()
1805 error = copy_cow_page_dax(vmf, iter); in dax_fault_cow_page()
1816 __SetPageUptodate(vmf->cow_page); in dax_fault_cow_page()
1817 ret = finish_fault(vmf); in dax_fault_cow_page()
1832 static vm_fault_t dax_fault_iter(struct vm_fault *vmf, in dax_fault_iter() argument
1847 if (!pmd && vmf->cow_page) in dax_fault_iter()
1848 return dax_fault_cow_page(vmf, iter); in dax_fault_iter()
1854 return dax_load_hole(xas, vmf, iter, entry); in dax_fault_iter()
1855 return dax_pmd_load_hole(xas, vmf, iter, entry); in dax_fault_iter()
1867 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); in dax_fault_iter()
1876 if (dax_fault_is_synchronous(iter, vmf->vma)) in dax_fault_iter()
1881 ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn), write); in dax_fault_iter()
1883 ret = vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), write); in dax_fault_iter()
1889 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, unsigned long *pfnp, in dax_iomap_pte_fault() argument
1892 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_iomap_pte_fault()
1893 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1896 .pos = (loff_t)vmf->pgoff << PAGE_SHIFT, in dax_iomap_pte_fault()
1904 trace_dax_pte_fault(iter.inode, vmf, ret); in dax_iomap_pte_fault()
1915 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) in dax_iomap_pte_fault()
1930 if (pmd_trans_huge(*vmf->pmd)) { in dax_iomap_pte_fault()
1941 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); in dax_iomap_pte_fault()
1945 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); in dax_iomap_pte_fault()
1963 trace_dax_pte_fault_done(iter.inode, vmf, ret); in dax_iomap_pte_fault()
1968 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, in dax_fault_check_fallback() argument
1971 unsigned long pmd_addr = vmf->address & PMD_MASK; in dax_fault_check_fallback()
1972 bool write = vmf->flags & FAULT_FLAG_WRITE; in dax_fault_check_fallback()
1980 if ((vmf->pgoff & PG_PMD_COLOUR) != in dax_fault_check_fallback()
1981 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) in dax_fault_check_fallback()
1985 if (write && !(vmf->vma->vm_flags & VM_SHARED)) in dax_fault_check_fallback()
1989 if (pmd_addr < vmf->vma->vm_start) in dax_fault_check_fallback()
1991 if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end) in dax_fault_check_fallback()
2001 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, unsigned long *pfnp, in dax_iomap_pmd_fault() argument
2004 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_iomap_pmd_fault()
2005 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
2015 if (vmf->flags & FAULT_FLAG_WRITE) in dax_iomap_pmd_fault()
2025 trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0); in dax_iomap_pmd_fault()
2032 if (dax_fault_check_fallback(vmf, &xas, max_pgoff)) in dax_iomap_pmd_fault()
2053 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd)) { in dax_iomap_pmd_fault()
2063 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); in dax_iomap_pmd_fault()
2074 split_huge_pmd(vmf->vma, vmf->pmd, vmf->address); in dax_iomap_pmd_fault()
2078 trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret); in dax_iomap_pmd_fault()
2082 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, unsigned long *pfnp, in dax_iomap_pmd_fault() argument
2102 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order, in dax_iomap_fault() argument
2107 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); in dax_iomap_fault()
2109 return dax_iomap_pmd_fault(vmf, pfnp, ops); in dax_iomap_fault()
2124 static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf, in dax_insert_pfn_mkwrite() argument
2127 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_pfn_mkwrite()
2128 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
2140 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, in dax_insert_pfn_mkwrite()
2150 ret = vmf_insert_page_mkwrite(vmf, &folio->page, true); in dax_insert_pfn_mkwrite()
2153 ret = vmf_insert_folio_pmd(vmf, folio, FAULT_FLAG_WRITE); in dax_insert_pfn_mkwrite()
2159 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); in dax_insert_pfn_mkwrite()
2173 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order, in dax_finish_sync_fault() argument
2177 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; in dax_finish_sync_fault()
2180 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); in dax_finish_sync_fault()
2183 return dax_insert_pfn_mkwrite(vmf, pfn, order); in dax_finish_sync_fault()