| /mm/ |
| A D | memory.c | 3588 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); in wp_page_copy() 3701 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault() 3710 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in finish_mkwrite_fault() 4211 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_marker_clear() 4224 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); in pte_marker_clear() 4497 vmf->address, &vmf->ptl); in do_swap_page() 4597 vmf->address, &vmf->ptl); in do_swap_page() 5031 vmf->address, &vmf->ptl); in do_anonymous_page() 5687 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in do_fault() 6063 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault() [all …]
|
| A D | huge_memory.c | 1235 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page() 1346 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page() 1842 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); in huge_pud_set_accessed() 1846 touch_pud(vmf->vma, vmf->address, vmf->pud, write); in huge_pud_set_accessed() 1856 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in huge_pmd_set_accessed() 1857 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) in huge_pmd_set_accessed() 1860 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); in huge_pmd_set_accessed() 1881 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_zero_wp_pmd() 1907 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page() 2034 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page() [all …]
|
| A D | filemap.c | 3363 ptep = pte_offset_map_ro_nolock(vma->vm_mm, vmf->pmd, vmf->address, in filemap_fault_recheck_pte_none() 3364 &vmf->ptl); in filemap_fault_recheck_pte_none() 3371 spin_lock(vmf->ptl); in filemap_fault_recheck_pte_none() 3457 vmf->gfp_mask); in filemap_fault() 3586 if (pmd_none(*vmf->pmd) && vmf->prealloc_pte) in filemap_map_pmd() 3587 pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); in filemap_map_pmd() 3682 vmf->pte += count; in filemap_map_folio_range() 3695 vmf->pte = old_ptep; in filemap_map_folio_range() 3758 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in filemap_map_pages() 3759 if (!vmf->pte) { in filemap_map_pages() [all …]
|
| A D | hugetlb.c | 6461 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { in hugetlb_no_page() 6490 if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) in hugetlb_no_page() 6538 if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) { in hugetlb_no_page() 6562 vmf->ptl = huge_pte_lock(h, mm, vmf->pte); in hugetlb_no_page() 6565 if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte)) in hugetlb_no_page() 6702 vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte); in hugetlb_fault() 6765 vmf.ptl = huge_pte_lock(h, mm, vmf.pte); in hugetlb_fault() 6768 if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm, vmf.address, vmf.pte)))) in hugetlb_fault() 6782 set_huge_pte_at(mm, vmf.address, vmf.pte, vmf.orig_pte, in hugetlb_fault() 6809 vmf.orig_pte = pte_mkyoung(vmf.orig_pte); in hugetlb_fault() [all …]
|
| A D | secretmem.c | 50 static vm_fault_t secretmem_fault(struct vm_fault *vmf) in secretmem_fault() argument 52 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in secretmem_fault() 53 struct inode *inode = file_inode(vmf->vma->vm_file); in secretmem_fault() 54 pgoff_t offset = vmf->pgoff; in secretmem_fault() 55 gfp_t gfp = vmf->gfp_mask; in secretmem_fault() 61 if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode)) in secretmem_fault() 103 vmf->page = folio_file_page(folio, vmf->pgoff); in secretmem_fault()
|
| A D | swap_state.c | 674 static int swap_vma_ra_win(struct vm_fault *vmf, unsigned long *start, in swap_vma_ra_win() argument 677 struct vm_area_struct *vma = vmf->vma; in swap_vma_ra_win() 686 faddr = vmf->address; in swap_vma_ra_win() 729 struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf) in swap_vma_readahead() argument 741 win = swap_vma_ra_win(vmf, &start, &end); in swap_vma_readahead() 745 ilx = targ_ilx - PFN_DOWN(vmf->address - start); in swap_vma_readahead() 750 pte = pte_offset_map(vmf->pmd, addr); in swap_vma_readahead() 768 if (addr != vmf->address) { in swap_vma_readahead() 802 struct vm_fault *vmf) in swapin_readahead() argument 808 mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx); in swapin_readahead() [all …]
|
| A D | internal.h | 429 vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf); 430 static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf) in vmf_anon_prepare() argument 432 vm_fault_t ret = __vmf_anon_prepare(vmf); in vmf_anon_prepare() 435 vma_end_read(vmf->vma); in vmf_anon_prepare() 439 vm_fault_t do_swap_page(struct vm_fault *vmf); 1116 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, in maybe_unlock_mmap_for_io() argument 1119 int flags = vmf->flags; in maybe_unlock_mmap_for_io() 1131 fpin = get_file(vmf->vma->vm_file); in maybe_unlock_mmap_for_io() 1132 release_fault_lock(vmf); in maybe_unlock_mmap_for_io() 1385 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
|
| A D | shmem.c | 1859 struct vm_area_struct *vma = vmf ? vmf->vma : NULL; in shmem_suitable_orders() 2501 struct vm_area_struct *vma = vmf ? vmf->vma : NULL; in shmem_get_folio_gfp() 2585 folio = shmem_alloc_and_add_folio(vmf, huge_gfp, in shmem_get_folio_gfp() 2748 vmf->pgoff >= shmem_falloc->start && in shmem_falloc_wait() 2749 vmf->pgoff < shmem_falloc->next) { in shmem_falloc_wait() 2754 fpin = maybe_unlock_mmap_for_io(vmf, NULL); in shmem_falloc_wait() 2779 static vm_fault_t shmem_fault(struct vm_fault *vmf) in shmem_fault() argument 2792 ret = shmem_falloc_wait(vmf, inode); in shmem_fault() 2797 WARN_ON_ONCE(vmf->page != NULL); in shmem_fault() 2799 gfp, vmf, &ret); in shmem_fault() [all …]
|
| A D | swap.h | 79 struct vm_fault *vmf); 160 struct vm_fault *vmf) in swapin_readahead() argument
|
| A D | mmap.c | 1361 static vm_fault_t special_mapping_fault(struct vm_fault *vmf); 1415 static vm_fault_t special_mapping_fault(struct vm_fault *vmf) in special_mapping_fault() argument 1417 struct vm_area_struct *vma = vmf->vma; in special_mapping_fault() 1423 return sm->fault(sm, vmf->vma, vmf); in special_mapping_fault() 1427 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) in special_mapping_fault() 1433 vmf->page = page; in special_mapping_fault()
|
| A D | khugepaged.c | 1017 struct vm_fault vmf = { in __collapse_huge_page_swapin() local 1038 vmf.orig_pte = ptep_get_lockless(pte); in __collapse_huge_page_swapin() 1039 if (!is_swap_pte(vmf.orig_pte)) in __collapse_huge_page_swapin() 1042 vmf.pte = pte; in __collapse_huge_page_swapin() 1043 vmf.ptl = ptl; in __collapse_huge_page_swapin() 1044 ret = do_swap_page(&vmf); in __collapse_huge_page_swapin() 1475 struct vm_fault vmf = { in set_huge_pmd() local 1484 if (do_set_pmd(&vmf, folio, page)) in set_huge_pmd()
|
| A D | nommu.c | 1625 vm_fault_t filemap_fault(struct vm_fault *vmf) in filemap_fault() argument 1632 vm_fault_t filemap_map_pages(struct vm_fault *vmf, in filemap_map_pages() argument
|
| A D | mempolicy.c | 2909 int mpol_misplaced(struct folio *folio, struct vm_fault *vmf, in mpol_misplaced() argument 2916 struct vm_area_struct *vma = vmf->vma; in mpol_misplaced() 2926 lockdep_assert_held(vmf->ptl); in mpol_misplaced()
|
| A D | swapfile.c | 2123 struct vm_fault vmf = { in unuse_pte_range() local 2131 &vmf); in unuse_pte_range()
|