Lines Matching refs:vmf

655 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,  in __do_huge_pmd_anonymous_page()  argument
658 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page()
660 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in __do_huge_pmd_anonymous_page()
679 clear_huge_page(page, vmf->address, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
687 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
688 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page()
699 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page()
702 ret = handle_userfault(vmf, VM_UFFD_MISSING); in __do_huge_pmd_anonymous_page()
711 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
712 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
713 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in __do_huge_pmd_anonymous_page()
716 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page()
723 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page()
782 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) in do_huge_pmd_anonymous_page() argument
784 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_anonymous_page()
787 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_huge_pmd_anonymous_page()
795 if (!(vmf->flags & FAULT_FLAG_WRITE) && in do_huge_pmd_anonymous_page()
810 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page()
812 if (pmd_none(*vmf->pmd)) { in do_huge_pmd_anonymous_page()
815 spin_unlock(vmf->ptl); in do_huge_pmd_anonymous_page()
818 spin_unlock(vmf->ptl); in do_huge_pmd_anonymous_page()
820 ret = handle_userfault(vmf, VM_UFFD_MISSING); in do_huge_pmd_anonymous_page()
824 haddr, vmf->pmd, zero_page); in do_huge_pmd_anonymous_page()
825 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_anonymous_page()
826 spin_unlock(vmf->ptl); in do_huge_pmd_anonymous_page()
829 spin_unlock(vmf->ptl); in do_huge_pmd_anonymous_page()
840 return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp); in do_huge_pmd_anonymous_page()
903 vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, in vmf_insert_pfn_pmd_prot() argument
906 unsigned long addr = vmf->address & PMD_MASK; in vmf_insert_pfn_pmd_prot()
907 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pmd_prot()
932 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); in vmf_insert_pfn_pmd_prot()
994 vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, in vmf_insert_pfn_pud_prot() argument
997 unsigned long addr = vmf->address & PUD_MASK; in vmf_insert_pfn_pud_prot()
998 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pud_prot()
1016 insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write); in vmf_insert_pfn_pud_prot()
1274 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) in huge_pud_set_accessed() argument
1276 bool write = vmf->flags & FAULT_FLAG_WRITE; in huge_pud_set_accessed()
1278 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); in huge_pud_set_accessed()
1279 if (unlikely(!pud_same(*vmf->pud, orig_pud))) in huge_pud_set_accessed()
1282 touch_pud(vmf->vma, vmf->address, vmf->pud, write); in huge_pud_set_accessed()
1284 spin_unlock(vmf->ptl); in huge_pud_set_accessed()
1288 void huge_pmd_set_accessed(struct vm_fault *vmf) in huge_pmd_set_accessed() argument
1290 bool write = vmf->flags & FAULT_FLAG_WRITE; in huge_pmd_set_accessed()
1292 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in huge_pmd_set_accessed()
1293 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) in huge_pmd_set_accessed()
1296 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); in huge_pmd_set_accessed()
1299 spin_unlock(vmf->ptl); in huge_pmd_set_accessed()
1302 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) in do_huge_pmd_wp_page() argument
1304 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; in do_huge_pmd_wp_page()
1305 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_wp_page()
1308 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_huge_pmd_wp_page()
1309 pmd_t orig_pmd = vmf->orig_pmd; in do_huge_pmd_wp_page()
1311 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page()
1317 spin_lock(vmf->ptl); in do_huge_pmd_wp_page()
1319 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
1320 spin_unlock(vmf->ptl); in do_huge_pmd_wp_page()
1334 spin_unlock(vmf->ptl); in do_huge_pmd_wp_page()
1336 spin_lock(vmf->ptl); in do_huge_pmd_wp_page()
1337 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
1338 spin_unlock(vmf->ptl); in do_huge_pmd_wp_page()
1369 spin_unlock(vmf->ptl); in do_huge_pmd_wp_page()
1374 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) in do_huge_pmd_wp_page()
1375 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
1376 spin_unlock(vmf->ptl); in do_huge_pmd_wp_page()
1382 spin_unlock(vmf->ptl); in do_huge_pmd_wp_page()
1384 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); in do_huge_pmd_wp_page()
1502 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) in do_huge_pmd_numa_page() argument
1504 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_numa_page()
1505 pmd_t oldpmd = vmf->orig_pmd; in do_huge_pmd_numa_page()
1508 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in do_huge_pmd_numa_page()
1514 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1515 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { in do_huge_pmd_numa_page()
1516 spin_unlock(vmf->ptl); in do_huge_pmd_numa_page()
1528 can_change_pmd_writable(vma, vmf->address, pmd)) in do_huge_pmd_numa_page()
1554 spin_unlock(vmf->ptl); in do_huge_pmd_numa_page()
1563 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1564 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { in do_huge_pmd_numa_page()
1565 spin_unlock(vmf->ptl); in do_huge_pmd_numa_page()
1584 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); in do_huge_pmd_numa_page()
1585 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_numa_page()
1586 spin_unlock(vmf->ptl); in do_huge_pmd_numa_page()