Lines Matching refs:folio
654 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, in vm_normal_folio()
702 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, in vm_normal_folio_pmd()
740 struct folio *folio, struct page *page, unsigned long address, in restore_exclusive_pte() argument
745 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); in restore_exclusive_pte()
756 if (folio_test_dirty(folio)) in restore_exclusive_pte()
777 struct folio *folio = page_folio(page); in try_restore_exclusive_pte() local
779 if (folio_trylock(folio)) { in try_restore_exclusive_pte()
780 restore_exclusive_pte(vma, folio, page, addr, ptep, orig_pte); in try_restore_exclusive_pte()
781 folio_unlock(folio); in try_restore_exclusive_pte()
802 struct folio *folio; in copy_nonpresent_pte() local
825 folio = pfn_swap_entry_folio(entry); in copy_nonpresent_pte()
827 rss[mm_counter(folio)]++; in copy_nonpresent_pte()
847 folio = page_folio(page); in copy_nonpresent_pte()
858 folio_get(folio); in copy_nonpresent_pte()
859 rss[mm_counter(folio)]++; in copy_nonpresent_pte()
861 folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma); in copy_nonpresent_pte()
919 struct folio **prealloc, struct page *page) in copy_present_page()
921 struct folio *new_folio; in copy_present_page()
985 int max_nr, int *rss, struct folio **prealloc) in copy_present_ptes()
989 struct folio *folio; in copy_present_ptes() local
996 folio = page_folio(page); in copy_present_ptes()
1003 if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) { in copy_present_ptes()
1009 nr = folio_pte_batch_flags(folio, src_vma, src_pte, &pte, max_nr, flags); in copy_present_ptes()
1010 folio_ref_add(folio, nr); in copy_present_ptes()
1011 if (folio_test_anon(folio)) { in copy_present_ptes()
1012 if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page, in copy_present_ptes()
1014 folio_ref_sub(folio, nr); in copy_present_ptes()
1018 VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); in copy_present_ptes()
1020 folio_dup_file_rmap_ptes(folio, page, nr, dst_vma); in copy_present_ptes()
1021 rss[mm_counter_file(folio)] += nr; in copy_present_ptes()
1028 folio_get(folio); in copy_present_ptes()
1029 if (folio_test_anon(folio)) { in copy_present_ptes()
1036 if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma))) { in copy_present_ptes()
1038 folio_put(folio); in copy_present_ptes()
1044 VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); in copy_present_ptes()
1046 folio_dup_file_rmap_pte(folio, page, dst_vma); in copy_present_ptes()
1047 rss[mm_counter_file(folio)]++; in copy_present_ptes()
1055 static inline struct folio *folio_prealloc(struct mm_struct *src_mm, in folio_prealloc()
1058 struct folio *new_folio; in folio_prealloc()
1092 struct folio *prealloc = NULL; in copy_pte_range()
1437 struct folio *folio) in should_zap_folio() argument
1444 return !folio_test_anon(folio); in should_zap_folio()
1490 struct vm_area_struct *vma, struct folio *folio, in zap_present_folio_ptes() argument
1498 if (!folio_test_anon(folio)) { in zap_present_folio_ptes()
1501 folio_mark_dirty(folio); in zap_present_folio_ptes()
1508 folio_mark_accessed(folio); in zap_present_folio_ptes()
1509 rss[mm_counter(folio)] -= nr; in zap_present_folio_ptes()
1523 folio_remove_rmap_ptes(folio, page, nr, vma); in zap_present_folio_ptes()
1525 if (unlikely(folio_mapcount(folio) < 0)) in zap_present_folio_ptes()
1547 struct folio *folio; in zap_present_ptes() local
1564 folio = page_folio(page); in zap_present_ptes()
1565 if (unlikely(!should_zap_folio(details, folio))) { in zap_present_ptes()
1574 if (unlikely(folio_test_large(folio) && max_nr != 1)) { in zap_present_ptes()
1575 nr = folio_pte_batch(folio, pte, ptent, max_nr); in zap_present_ptes()
1576 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr, in zap_present_ptes()
1581 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr, in zap_present_ptes()
1599 struct folio *folio = page_folio(page); in zap_nonpresent_ptes() local
1601 if (unlikely(!should_zap_folio(details, folio))) in zap_nonpresent_ptes()
1610 rss[mm_counter(folio)]--; in zap_nonpresent_ptes()
1611 folio_remove_rmap_pte(folio, page, vma); in zap_nonpresent_ptes()
1612 folio_put(folio); in zap_nonpresent_ptes()
1622 struct folio *folio = pfn_swap_entry_folio(entry); in zap_nonpresent_ptes() local
1624 if (!should_zap_folio(details, folio)) in zap_nonpresent_ptes()
1626 rss[mm_counter(folio)]--; in zap_nonpresent_ptes()
2132 struct folio *folio = page_folio(page); in validate_page_before_insert() local
2134 if (!folio_ref_count(folio)) in validate_page_before_insert()
2136 if (unlikely(is_zero_folio(folio))) { in validate_page_before_insert()
2141 if (folio_test_anon(folio) || folio_test_slab(folio) || in validate_page_before_insert()
2144 flush_dcache_folio(folio); in validate_page_before_insert()
2152 struct folio *folio = page_folio(page); in insert_page_into_pte_locked() local
2173 if (unlikely(is_zero_folio(folio))) { in insert_page_into_pte_locked()
2176 folio_get(folio); in insert_page_into_pte_locked()
2182 inc_mm_counter(vma->vm_mm, mm_counter_file(folio)); in insert_page_into_pte_locked()
2183 folio_add_file_rmap_pte(folio, page, vma); in insert_page_into_pte_locked()
3350 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio) in do_page_mkwrite() argument
3367 folio_lock(folio); in do_page_mkwrite()
3368 if (!folio->mapping) { in do_page_mkwrite()
3369 folio_unlock(folio); in do_page_mkwrite()
3374 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in do_page_mkwrite()
3387 struct folio *folio = page_folio(vmf->page); in fault_dirty_shared_page() local
3391 dirtied = folio_mark_dirty(folio); in fault_dirty_shared_page()
3392 VM_BUG_ON_FOLIO(folio_test_anon(folio), folio); in fault_dirty_shared_page()
3399 mapping = folio_raw_mapping(folio); in fault_dirty_shared_page()
3400 folio_unlock(folio); in fault_dirty_shared_page()
3436 static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio) in wp_page_reuse() argument
3445 if (folio) { in wp_page_reuse()
3446 VM_BUG_ON(folio_test_anon(folio) && in wp_page_reuse()
3453 folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1); in wp_page_reuse()
3535 struct folio *old_folio = NULL; in wp_page_copy()
3536 struct folio *new_folio = NULL; in wp_page_copy()
3698 static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio) in finish_mkwrite_fault() argument
3714 wp_page_reuse(vmf, folio); in finish_mkwrite_fault()
3744 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio) in wp_page_shared() argument
3750 folio_get(folio); in wp_page_shared()
3758 folio_put(folio); in wp_page_shared()
3762 tmp = do_page_mkwrite(vmf, folio); in wp_page_shared()
3765 folio_put(folio); in wp_page_shared()
3768 tmp = finish_mkwrite_fault(vmf, folio); in wp_page_shared()
3770 folio_unlock(folio); in wp_page_shared()
3771 folio_put(folio); in wp_page_shared()
3775 wp_page_reuse(vmf, folio); in wp_page_shared()
3776 folio_lock(folio); in wp_page_shared()
3779 folio_put(folio); in wp_page_shared()
3785 static bool __wp_can_reuse_large_anon_folio(struct folio *folio, in __wp_can_reuse_large_anon_folio() argument
3791 if (folio_large_mapcount(folio) <= 1) in __wp_can_reuse_large_anon_folio()
3807 if (test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids)) in __wp_can_reuse_large_anon_folio()
3810 VM_WARN_ON_ONCE(folio_test_ksm(folio)); in __wp_can_reuse_large_anon_folio()
3812 if (unlikely(folio_test_swapcache(folio))) { in __wp_can_reuse_large_anon_folio()
3817 if (!folio_trylock(folio)) in __wp_can_reuse_large_anon_folio()
3819 folio_free_swap(folio); in __wp_can_reuse_large_anon_folio()
3820 folio_unlock(folio); in __wp_can_reuse_large_anon_folio()
3823 if (folio_large_mapcount(folio) != folio_ref_count(folio)) in __wp_can_reuse_large_anon_folio()
3827 folio_lock_large_mapcount(folio); in __wp_can_reuse_large_anon_folio()
3828 VM_WARN_ON_ONCE_FOLIO(folio_large_mapcount(folio) > folio_ref_count(folio), folio); in __wp_can_reuse_large_anon_folio()
3830 if (test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids)) in __wp_can_reuse_large_anon_folio()
3832 if (folio_large_mapcount(folio) != folio_ref_count(folio)) in __wp_can_reuse_large_anon_folio()
3835 VM_WARN_ON_ONCE_FOLIO(folio_large_mapcount(folio) > folio_nr_pages(folio), folio); in __wp_can_reuse_large_anon_folio()
3836 VM_WARN_ON_ONCE_FOLIO(folio_entire_mapcount(folio), folio); in __wp_can_reuse_large_anon_folio()
3837 VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != vma->vm_mm->mm_id && in __wp_can_reuse_large_anon_folio()
3838 folio_mm_id(folio, 1) != vma->vm_mm->mm_id); in __wp_can_reuse_large_anon_folio()
3847 folio_unlock_large_mapcount(folio); in __wp_can_reuse_large_anon_folio()
3851 static bool __wp_can_reuse_large_anon_folio(struct folio *folio, in __wp_can_reuse_large_anon_folio() argument
3858 static bool wp_can_reuse_anon_folio(struct folio *folio, in wp_can_reuse_anon_folio() argument
3861 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && folio_test_large(folio)) in wp_can_reuse_anon_folio()
3862 return __wp_can_reuse_large_anon_folio(folio, vma); in wp_can_reuse_anon_folio()
3871 if (folio_test_ksm(folio) || folio_ref_count(folio) > 3) in wp_can_reuse_anon_folio()
3873 if (!folio_test_lru(folio)) in wp_can_reuse_anon_folio()
3879 if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio)) in wp_can_reuse_anon_folio()
3881 if (!folio_trylock(folio)) in wp_can_reuse_anon_folio()
3883 if (folio_test_swapcache(folio)) in wp_can_reuse_anon_folio()
3884 folio_free_swap(folio); in wp_can_reuse_anon_folio()
3885 if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) { in wp_can_reuse_anon_folio()
3886 folio_unlock(folio); in wp_can_reuse_anon_folio()
3894 folio_move_anon_rmap(folio, vma); in wp_can_reuse_anon_folio()
3895 folio_unlock(folio); in wp_can_reuse_anon_folio()
3926 struct folio *folio = NULL; in do_wp_page() local
3963 folio = page_folio(vmf->page); in do_wp_page()
3981 return wp_page_shared(vmf, folio); in do_wp_page()
3991 if (folio && folio_test_anon(folio) && in do_wp_page()
3992 (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) { in do_wp_page()
3999 wp_page_reuse(vmf, folio); in do_wp_page()
4005 if (folio) in do_wp_page()
4006 folio_get(folio); in do_wp_page()
4010 if (folio && folio_test_ksm(folio)) in do_wp_page()
4055 void unmap_mapping_folio(struct folio *folio) in unmap_mapping_folio() argument
4057 struct address_space *mapping = folio->mapping; in unmap_mapping_folio()
4062 VM_BUG_ON(!folio_test_locked(folio)); in unmap_mapping_folio()
4064 first_index = folio->index; in unmap_mapping_folio()
4065 last_index = folio_next_index(folio) - 1; in unmap_mapping_folio()
4068 details.single_folio = folio; in unmap_mapping_folio()
4149 struct folio *folio = page_folio(vmf->page); in remove_device_exclusive_entry() local
4162 if (!folio_try_get(folio)) in remove_device_exclusive_entry()
4165 ret = folio_lock_or_retry(folio, vmf); in remove_device_exclusive_entry()
4167 folio_put(folio); in remove_device_exclusive_entry()
4178 restore_exclusive_pte(vma, folio, vmf->page, vmf->address, in remove_device_exclusive_entry()
4183 folio_unlock(folio); in remove_device_exclusive_entry()
4184 folio_put(folio); in remove_device_exclusive_entry()
4190 static inline bool should_try_to_free_swap(struct folio *folio, in should_try_to_free_swap() argument
4194 if (!folio_test_swapcache(folio)) in should_try_to_free_swap()
4196 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) || in should_try_to_free_swap()
4197 folio_test_mlocked(folio)) in should_try_to_free_swap()
4205 return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) && in should_try_to_free_swap()
4206 folio_ref_count(folio) == (1 + folio_nr_pages(folio)); in should_try_to_free_swap()
4280 static struct folio *__alloc_swap_folio(struct vm_fault *vmf) in __alloc_swap_folio()
4283 struct folio *folio; in __alloc_swap_folio() local
4286 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address); in __alloc_swap_folio()
4287 if (!folio) in __alloc_swap_folio()
4291 if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, in __alloc_swap_folio()
4293 folio_put(folio); in __alloc_swap_folio()
4297 return folio; in __alloc_swap_folio()
4358 static struct folio *alloc_swap_folio(struct vm_fault *vmf) in alloc_swap_folio()
4362 struct folio *folio; in alloc_swap_folio() local
4422 folio = vma_alloc_folio(gfp, order, vma, addr); in alloc_swap_folio()
4423 if (folio) { in alloc_swap_folio()
4424 if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, in alloc_swap_folio()
4426 return folio; in alloc_swap_folio()
4428 folio_put(folio); in alloc_swap_folio()
4438 static struct folio *alloc_swap_folio(struct vm_fault *vmf) in alloc_swap_folio()
4457 struct folio *swapcache, *folio = NULL; in do_swap_page() local
4535 folio = swap_cache_get_folio(entry, vma, vmf->address); in do_swap_page()
4536 if (folio) in do_swap_page()
4537 page = folio_file_page(folio, swp_offset(entry)); in do_swap_page()
4538 swapcache = folio; in do_swap_page()
4540 if (!folio) { in do_swap_page()
4544 folio = alloc_swap_folio(vmf); in do_swap_page()
4545 if (folio) { in do_swap_page()
4546 __folio_set_locked(folio); in do_swap_page()
4547 __folio_set_swapbacked(folio); in do_swap_page()
4549 nr_pages = folio_nr_pages(folio); in do_swap_page()
4550 if (folio_test_large(folio)) in do_swap_page()
4576 workingset_refault(folio, shadow); in do_swap_page()
4578 folio_add_lru(folio); in do_swap_page()
4581 folio->swap = entry; in do_swap_page()
4582 swap_read_folio(folio, NULL); in do_swap_page()
4583 folio->private = NULL; in do_swap_page()
4586 folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, in do_swap_page()
4588 swapcache = folio; in do_swap_page()
4591 if (!folio) { in do_swap_page()
4608 page = folio_file_page(folio, swp_offset(entry)); in do_swap_page()
4618 ret |= folio_lock_or_retry(folio, vmf); in do_swap_page()
4630 if (unlikely(!folio_test_swapcache(folio) || in do_swap_page()
4639 folio = ksm_might_need_to_copy(folio, vma, vmf->address); in do_swap_page()
4640 if (unlikely(!folio)) { in do_swap_page()
4642 folio = swapcache; in do_swap_page()
4644 } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) { in do_swap_page()
4646 folio = swapcache; in do_swap_page()
4649 if (folio != swapcache) in do_swap_page()
4650 page = folio_page(folio, 0); in do_swap_page()
4658 if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache && in do_swap_page()
4659 !folio_test_ksm(folio) && !folio_test_lru(folio)) in do_swap_page()
4663 folio_throttle_swaprate(folio, GFP_KERNEL); in do_swap_page()
4673 if (unlikely(!folio_test_uptodate(folio))) { in do_swap_page()
4679 if (folio_test_large(folio) && !folio_test_swapcache(folio)) { in do_swap_page()
4680 unsigned long nr = folio_nr_pages(folio); in do_swap_page()
4700 if (folio_test_large(folio) && folio_test_swapcache(folio)) { in do_swap_page()
4701 int nr = folio_nr_pages(folio); in do_swap_page()
4702 unsigned long idx = folio_page_idx(folio, page); in do_swap_page()
4723 entry = folio->swap; in do_swap_page()
4724 page = &folio->page; in do_swap_page()
4736 BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio)); in do_swap_page()
4737 BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page)); in do_swap_page()
4743 if (!folio_test_ksm(folio)) { in do_swap_page()
4745 if (folio != swapcache) { in do_swap_page()
4751 } else if (exclusive && folio_test_writeback(folio) && in do_swap_page()
4780 arch_swap_restore(folio_swap(entry, folio), folio); in do_swap_page()
4788 if (should_try_to_free_swap(folio, vma, vmf->flags)) in do_swap_page()
4789 folio_free_swap(folio); in do_swap_page()
4805 if (!folio_test_ksm(folio) && in do_swap_page()
4806 (exclusive || folio_ref_count(folio) == 1)) { in do_swap_page()
4817 folio_ref_add(folio, nr_pages - 1); in do_swap_page()
4822 if (unlikely(folio != swapcache && swapcache)) { in do_swap_page()
4823 folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE); in do_swap_page()
4824 folio_add_lru_vma(folio, vma); in do_swap_page()
4825 } else if (!folio_test_anon(folio)) { in do_swap_page()
4832 VM_WARN_ON_ONCE(folio_test_large(folio) && folio_test_swapcache(folio)); in do_swap_page()
4833 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); in do_swap_page()
4834 folio_add_new_anon_rmap(folio, vma, address, rmap_flags); in do_swap_page()
4836 folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address, in do_swap_page()
4840 VM_BUG_ON(!folio_test_anon(folio) || in do_swap_page()
4846 folio_unlock(folio); in do_swap_page()
4847 if (folio != swapcache && swapcache) { in do_swap_page()
4886 folio_unlock(folio); in do_swap_page()
4888 folio_put(folio); in do_swap_page()
4889 if (folio != swapcache && swapcache) { in do_swap_page()
4915 static struct folio *alloc_anon_folio(struct vm_fault *vmf) in alloc_anon_folio()
4920 struct folio *folio; in alloc_anon_folio() local
4971 folio = vma_alloc_folio(gfp, order, vma, addr); in alloc_anon_folio()
4972 if (folio) { in alloc_anon_folio()
4973 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { in alloc_anon_folio()
4975 folio_put(folio); in alloc_anon_folio()
4978 folio_throttle_swaprate(folio, gfp); in alloc_anon_folio()
4987 folio_zero_user(folio, vmf->address); in alloc_anon_folio()
4988 return folio; in alloc_anon_folio()
5009 struct folio *folio; in do_anonymous_page() local
5054 folio = alloc_anon_folio(vmf); in do_anonymous_page()
5055 if (IS_ERR(folio)) in do_anonymous_page()
5057 if (!folio) in do_anonymous_page()
5060 nr_pages = folio_nr_pages(folio); in do_anonymous_page()
5068 __folio_mark_uptodate(folio); in do_anonymous_page()
5070 entry = folio_mk_pte(folio, vma->vm_page_prot); in do_anonymous_page()
5093 folio_put(folio); in do_anonymous_page()
5097 folio_ref_add(folio, nr_pages - 1); in do_anonymous_page()
5099 count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC); in do_anonymous_page()
5100 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); in do_anonymous_page()
5101 folio_add_lru_vma(folio, vma); in do_anonymous_page()
5114 folio_put(folio); in do_anonymous_page()
5128 struct folio *folio; in __do_fault() local
5157 folio = page_folio(vmf->page); in __do_fault()
5162 unmap_mapping_folio(folio); in __do_fault()
5164 if (mapping_evict_folio(folio->mapping, folio)) in __do_fault()
5166 folio_unlock(folio); in __do_fault()
5168 folio_put(folio); in __do_fault()
5174 folio_lock(folio); in __do_fault()
5176 VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page); in __do_fault()
5195 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page) in do_set_pmd() argument
5215 if (folio_order(folio) != HPAGE_PMD_ORDER) in do_set_pmd()
5217 page = &folio->page; in do_set_pmd()
5225 if (unlikely(folio_test_has_hwpoisoned(folio))) in do_set_pmd()
5244 entry = folio_mk_pmd(folio, vma->vm_page_prot); in do_set_pmd()
5248 add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR); in do_set_pmd()
5249 folio_add_file_rmap_pmd(folio, page, vma); in do_set_pmd()
5269 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page) in do_set_pmd() argument
5283 void set_pte_range(struct vm_fault *vmf, struct folio *folio, in set_pte_range() argument
5301 else if (pte_write(entry) && folio_test_dirty(folio)) in set_pte_range()
5307 VM_BUG_ON_FOLIO(nr != 1, folio); in set_pte_range()
5308 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); in set_pte_range()
5309 folio_add_lru_vma(folio, vma); in set_pte_range()
5311 folio_add_file_rmap_ptes(folio, page, nr, vma); in set_pte_range()
5346 struct folio *folio; in finish_fault() local
5363 folio = page_folio(page); in finish_fault()
5375 if (folio_test_pmd_mappable(folio)) { in finish_fault()
5376 ret = do_set_pmd(vmf, folio, page); in finish_fault()
5387 nr_pages = folio_nr_pages(folio); in finish_fault()
5398 pgoff_t idx = folio_page_idx(folio, page); in finish_fault()
5416 page = &folio->page; in finish_fault()
5436 folio_ref_add(folio, nr_pages - 1); in finish_fault()
5437 set_pte_range(vmf, folio, page, nr_pages, addr); in finish_fault()
5438 type = is_cow ? MM_ANONPAGES : mm_counter_file(folio); in finish_fault()
5556 struct folio *folio; in do_read_fault() local
5578 folio = page_folio(vmf->page); in do_read_fault()
5579 folio_unlock(folio); in do_read_fault()
5581 folio_put(folio); in do_read_fault()
5588 struct folio *folio; in do_cow_fault() local
5597 folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false); in do_cow_fault()
5598 if (!folio) in do_cow_fault()
5601 vmf->cow_page = &folio->page; in do_cow_fault()
5613 __folio_mark_uptodate(folio); in do_cow_fault()
5623 folio_put(folio); in do_cow_fault()
5631 struct folio *folio; in do_shared_fault() local
5641 folio = page_folio(vmf->page); in do_shared_fault()
5648 folio_unlock(folio); in do_shared_fault()
5649 tmp = do_page_mkwrite(vmf, folio); in do_shared_fault()
5652 folio_put(folio); in do_shared_fault()
5660 folio_unlock(folio); in do_shared_fault()
5661 folio_put(folio); in do_shared_fault()
5721 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf, in numa_migrate_check() argument
5742 if (folio_maybe_mapped_shared(folio) && (vma->vm_flags & VM_SHARED)) in numa_migrate_check()
5748 if (folio_use_access_time(folio)) in numa_migrate_check()
5751 *last_cpupid = folio_last_cpupid(folio); in numa_migrate_check()
5758 count_memcg_folio_events(folio, NUMA_HINT_FAULTS, 1); in numa_migrate_check()
5760 if (folio_nid(folio) == numa_node_id()) { in numa_migrate_check()
5765 return mpol_misplaced(folio, vmf, addr); in numa_migrate_check()
5784 struct folio *folio, pte_t fault_pte, in numa_rebuild_large_mapping() argument
5787 int nr = pte_pfn(fault_pte) - folio_pfn(folio); in numa_rebuild_large_mapping()
5795 end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE, in numa_rebuild_large_mapping()
5807 if (pfn_folio(pte_pfn(ptent)) != folio) in numa_rebuild_large_mapping()
5825 struct folio *folio = NULL; in do_numa_page() local
5858 folio = vm_normal_folio(vma, vmf->address, pte); in do_numa_page()
5859 if (!folio || folio_is_zone_device(folio)) in do_numa_page()
5862 nid = folio_nid(folio); in do_numa_page()
5863 nr_pages = folio_nr_pages(folio); in do_numa_page()
5865 target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags, in do_numa_page()
5869 if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) { in do_numa_page()
5879 if (!migrate_misplaced_folio(folio, target_nid)) { in do_numa_page()
5900 if (folio && folio_test_large(folio)) in do_numa_page()
5901 numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable, in do_numa_page()
6706 struct folio *folio; in __access_remote_vm() local
6738 folio = page_folio(page); in __access_remote_vm()
6744 maddr = kmap_local_folio(folio, folio_page_idx(folio, page) * PAGE_SIZE); in __access_remote_vm()
6748 folio_mark_dirty_lock(folio); in __access_remote_vm()
6753 folio_release_kmap(folio, maddr); in __access_remote_vm()
6832 struct folio *folio; in __copy_remote_vm_str() local
6848 folio = page_folio(page); in __copy_remote_vm_str()
6854 maddr = kmap_local_folio(folio, folio_page_idx(folio, page) * PAGE_SIZE); in __copy_remote_vm_str()
6859 folio_release_kmap(folio, maddr); in __copy_remote_vm_str()
6877 folio_release_kmap(folio, maddr); in __copy_remote_vm_str()
7024 static void clear_gigantic_page(struct folio *folio, unsigned long addr_hint, in clear_gigantic_page() argument
7027 unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(folio)); in clear_gigantic_page()
7033 clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE); in clear_gigantic_page()
7039 struct folio *folio = arg; in clear_subpage() local
7041 clear_user_highpage(folio_page(folio, idx), addr); in clear_subpage()
7050 void folio_zero_user(struct folio *folio, unsigned long addr_hint) in folio_zero_user() argument
7052 unsigned int nr_pages = folio_nr_pages(folio); in folio_zero_user()
7055 clear_gigantic_page(folio, addr_hint, nr_pages); in folio_zero_user()
7057 process_huge_page(addr_hint, nr_pages, clear_subpage, folio); in folio_zero_user()
7060 static int copy_user_gigantic_page(struct folio *dst, struct folio *src, in copy_user_gigantic_page()
7083 struct folio *dst;
7084 struct folio *src;
7099 int copy_user_large_folio(struct folio *dst, struct folio *src, in copy_user_large_folio()
7115 long copy_folio_from_user(struct folio *dst_folio, in copy_folio_from_user()