Lines Matching refs:vmf
817 static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) in copy_cow_page_dax() argument
831 vto = kmap_atomic(vmf->cow_page); in copy_cow_page_dax()
832 copy_user_page(vto, kaddr, vmf->address, vmf->cow_page); in copy_cow_page_dax()
856 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, in dax_insert_entry() argument
860 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_entry()
863 bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma); in dax_insert_entry()
885 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, in dax_insert_entry()
1173 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_load_hole() argument
1177 unsigned long vaddr = vmf->address; in dax_load_hole()
1181 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); in dax_load_hole()
1183 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); in dax_load_hole()
1184 trace_dax_load_hole(inode, vmf, ret); in dax_load_hole()
1189 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1192 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_pmd_load_hole()
1193 unsigned long pmd_addr = vmf->address & PMD_MASK; in dax_pmd_load_hole()
1194 struct vm_area_struct *vma = vmf->vma; in dax_pmd_load_hole()
1202 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); in dax_pmd_load_hole()
1208 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, in dax_pmd_load_hole()
1217 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in dax_pmd_load_hole()
1218 if (!pmd_none(*(vmf->pmd))) { in dax_pmd_load_hole()
1224 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in dax_pmd_load_hole()
1227 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); in dax_pmd_load_hole()
1229 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); in dax_pmd_load_hole()
1231 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
1237 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
1241 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1574 static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf, in dax_fault_cow_page() argument
1583 clear_user_highpage(vmf->cow_page, vmf->address); in dax_fault_cow_page()
1586 error = copy_cow_page_dax(vmf, iter); in dax_fault_cow_page()
1597 __SetPageUptodate(vmf->cow_page); in dax_fault_cow_page()
1598 ret = finish_fault(vmf); in dax_fault_cow_page()
1613 static vm_fault_t dax_fault_iter(struct vm_fault *vmf, in dax_fault_iter() argument
1627 if (!pmd && vmf->cow_page) in dax_fault_iter()
1628 return dax_fault_cow_page(vmf, iter); in dax_fault_iter()
1634 return dax_load_hole(xas, vmf, iter, entry); in dax_fault_iter()
1635 return dax_pmd_load_hole(xas, vmf, iter, entry); in dax_fault_iter()
1647 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); in dax_fault_iter()
1655 if (dax_fault_is_synchronous(iter, vmf->vma)) in dax_fault_iter()
1660 return vmf_insert_pfn_pmd(vmf, pfn, write); in dax_fault_iter()
1664 return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); in dax_fault_iter()
1665 return vmf_insert_mixed(vmf->vma, vmf->address, pfn); in dax_fault_iter()
1668 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, in dax_iomap_pte_fault() argument
1671 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_iomap_pte_fault()
1672 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1675 .pos = (loff_t)vmf->pgoff << PAGE_SHIFT, in dax_iomap_pte_fault()
1683 trace_dax_pte_fault(iter.inode, vmf, ret); in dax_iomap_pte_fault()
1694 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) in dax_iomap_pte_fault()
1709 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { in dax_iomap_pte_fault()
1720 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); in dax_iomap_pte_fault()
1724 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); in dax_iomap_pte_fault()
1740 trace_dax_pte_fault_done(iter.inode, vmf, ret); in dax_iomap_pte_fault()
1745 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, in dax_fault_check_fallback() argument
1748 unsigned long pmd_addr = vmf->address & PMD_MASK; in dax_fault_check_fallback()
1749 bool write = vmf->flags & FAULT_FLAG_WRITE; in dax_fault_check_fallback()
1757 if ((vmf->pgoff & PG_PMD_COLOUR) != in dax_fault_check_fallback()
1758 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) in dax_fault_check_fallback()
1762 if (write && !(vmf->vma->vm_flags & VM_SHARED)) in dax_fault_check_fallback()
1766 if (pmd_addr < vmf->vma->vm_start) in dax_fault_check_fallback()
1768 if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end) in dax_fault_check_fallback()
1778 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, in dax_iomap_pmd_fault() argument
1781 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_iomap_pmd_fault()
1782 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
1793 if (vmf->flags & FAULT_FLAG_WRITE) in dax_iomap_pmd_fault()
1803 trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0); in dax_iomap_pmd_fault()
1810 if (dax_fault_check_fallback(vmf, &xas, max_pgoff)) in dax_iomap_pmd_fault()
1831 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && in dax_iomap_pmd_fault()
1832 !pmd_devmap(*vmf->pmd)) { in dax_iomap_pmd_fault()
1842 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); in dax_iomap_pmd_fault()
1851 split_huge_pmd(vmf->vma, vmf->pmd, vmf->address); in dax_iomap_pmd_fault()
1855 trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret); in dax_iomap_pmd_fault()
1859 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, in dax_iomap_pmd_fault() argument
1879 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, in dax_iomap_fault() argument
1884 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); in dax_iomap_fault()
1886 return dax_iomap_pmd_fault(vmf, pfnp, ops); in dax_iomap_fault()
1903 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) in dax_insert_pfn_mkwrite() argument
1905 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_pfn_mkwrite()
1906 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
1917 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, in dax_insert_pfn_mkwrite()
1925 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); in dax_insert_pfn_mkwrite()
1928 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); in dax_insert_pfn_mkwrite()
1933 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); in dax_insert_pfn_mkwrite()
1947 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, in dax_finish_sync_fault() argument
1951 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; in dax_finish_sync_fault()
1955 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); in dax_finish_sync_fault()
1958 return dax_insert_pfn_mkwrite(vmf, pfn, order); in dax_finish_sync_fault()