Lines Matching refs:folio

78 struct folio *huge_zero_folio __read_mostly;
212 struct folio *zero_folio; in get_huge_zero_page()
249 struct folio *mm_get_huge_zero_folio(struct mm_struct *mm) in mm_get_huge_zero_folio()
280 struct folio *zero_folio = xchg(&huge_zero_folio, NULL); in shrink_huge_zero_page_scan()
1061 struct deferred_split *get_deferred_split_queue(struct folio *folio) in get_deferred_split_queue() argument
1063 struct mem_cgroup *memcg = folio_memcg(folio); in get_deferred_split_queue()
1064 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); in get_deferred_split_queue()
1073 struct deferred_split *get_deferred_split_queue(struct folio *folio) in get_deferred_split_queue() argument
1075 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); in get_deferred_split_queue()
1081 static inline bool is_transparent_hugepage(const struct folio *folio) in is_transparent_hugepage() argument
1083 if (!folio_test_large(folio)) in is_transparent_hugepage()
1086 return is_huge_zero_folio(folio) || in is_transparent_hugepage()
1087 folio_test_large_rmappable(folio); in is_transparent_hugepage()
1157 static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma, in vma_alloc_anon_folio_pmd()
1162 struct folio *folio; in vma_alloc_anon_folio_pmd() local
1164 folio = vma_alloc_folio(gfp, order, vma, addr & HPAGE_PMD_MASK); in vma_alloc_anon_folio_pmd()
1166 if (unlikely(!folio)) { in vma_alloc_anon_folio_pmd()
1172 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); in vma_alloc_anon_folio_pmd()
1173 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { in vma_alloc_anon_folio_pmd()
1174 folio_put(folio); in vma_alloc_anon_folio_pmd()
1181 folio_throttle_swaprate(folio, gfp); in vma_alloc_anon_folio_pmd()
1190 folio_zero_user(folio, addr); in vma_alloc_anon_folio_pmd()
1196 __folio_mark_uptodate(folio); in vma_alloc_anon_folio_pmd()
1197 return folio; in vma_alloc_anon_folio_pmd()
1200 static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd, in map_anon_folio_pmd() argument
1205 entry = folio_mk_pmd(folio, vma->vm_page_prot); in map_anon_folio_pmd()
1207 folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE); in map_anon_folio_pmd()
1208 folio_add_lru_vma(folio, vma); in map_anon_folio_pmd()
1221 struct folio *folio; in __do_huge_pmd_anonymous_page() local
1225 folio = vma_alloc_anon_folio_pmd(vma, vmf->address); in __do_huge_pmd_anonymous_page()
1226 if (unlikely(!folio)) in __do_huge_pmd_anonymous_page()
1246 folio_put(folio); in __do_huge_pmd_anonymous_page()
1253 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr); in __do_huge_pmd_anonymous_page()
1255 deferred_split_folio(folio, false); in __do_huge_pmd_anonymous_page()
1265 folio_put(folio); in __do_huge_pmd_anonymous_page()
1308 struct folio *zero_folio) in set_huge_zero_folio()
1334 struct folio *zero_folio; in do_huge_pmd_anonymous_page()
1376 struct folio *folio; member
1392 const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) : in insert_pmd()
1410 entry = folio_mk_pmd(fop.folio, vma->vm_page_prot); in insert_pmd()
1412 folio_get(fop.folio); in insert_pmd()
1413 folio_add_file_rmap_pmd(fop.folio, &fop.folio->page, vma); in insert_pmd()
1414 add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PMD_NR); in insert_pmd()
1489 vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio, in vmf_insert_folio_pmd() argument
1496 .folio = folio, in vmf_insert_folio_pmd()
1506 if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER)) in vmf_insert_folio_pmd()
1541 const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) : in insert_pud()
1556 entry = folio_mk_pud(fop.folio, vma->vm_page_prot); in insert_pud()
1558 folio_get(fop.folio); in insert_pud()
1559 folio_add_file_rmap_pud(fop.folio, &fop.folio->page, vma); in insert_pud()
1560 add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PUD_NR); in insert_pud()
1625 vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio, in vmf_insert_folio_pud() argument
1633 .folio = folio, in vmf_insert_folio_pud()
1641 if (WARN_ON_ONCE(folio_order(folio) != PUD_ORDER)) in vmf_insert_folio_pud()
1672 struct folio *src_folio; in copy_huge_pmd()
1871 struct folio *folio; in do_huge_zero_wp_pmd() local
1874 folio = vma_alloc_anon_folio_pmd(vma, vmf->address); in do_huge_zero_wp_pmd()
1875 if (unlikely(!folio)) in do_huge_zero_wp_pmd()
1888 map_anon_folio_pmd(folio, vmf->pmd, vma, haddr); in do_huge_zero_wp_pmd()
1891 folio_put(folio); in do_huge_zero_wp_pmd()
1902 struct folio *folio; in do_huge_pmd_wp_page() local
1928 folio = page_folio(page); in do_huge_pmd_wp_page()
1935 if (!folio_trylock(folio)) { in do_huge_pmd_wp_page()
1936 folio_get(folio); in do_huge_pmd_wp_page()
1938 folio_lock(folio); in do_huge_pmd_wp_page()
1942 folio_unlock(folio); in do_huge_pmd_wp_page()
1943 folio_put(folio); in do_huge_pmd_wp_page()
1946 folio_put(folio); in do_huge_pmd_wp_page()
1951 folio_unlock(folio); in do_huge_pmd_wp_page()
1960 if (folio_ref_count(folio) > in do_huge_pmd_wp_page()
1961 1 + folio_test_swapcache(folio) * folio_nr_pages(folio)) in do_huge_pmd_wp_page()
1963 if (folio_test_swapcache(folio)) in do_huge_pmd_wp_page()
1964 folio_free_swap(folio); in do_huge_pmd_wp_page()
1965 if (folio_ref_count(folio) == 1) { in do_huge_pmd_wp_page()
1968 folio_move_anon_rmap(folio, vma); in do_huge_pmd_wp_page()
1970 folio_unlock(folio); in do_huge_pmd_wp_page()
1985 folio_unlock(folio); in do_huge_pmd_wp_page()
2026 struct folio *folio; in do_huge_pmd_numa_page() local
2053 folio = vm_normal_folio_pmd(vma, haddr, pmd); in do_huge_pmd_numa_page()
2054 if (!folio) in do_huge_pmd_numa_page()
2057 nid = folio_nid(folio); in do_huge_pmd_numa_page()
2059 target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable, in do_huge_pmd_numa_page()
2063 if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) { in do_huge_pmd_numa_page()
2071 if (!migrate_misplaced_folio(folio, target_nid)) { in do_huge_pmd_numa_page()
2108 struct folio *folio; in madvise_free_huge_pmd() local
2128 folio = pmd_folio(orig_pmd); in madvise_free_huge_pmd()
2133 if (folio_maybe_mapped_shared(folio)) in madvise_free_huge_pmd()
2136 if (!folio_trylock(folio)) in madvise_free_huge_pmd()
2144 folio_get(folio); in madvise_free_huge_pmd()
2146 split_folio(folio); in madvise_free_huge_pmd()
2147 folio_unlock(folio); in madvise_free_huge_pmd()
2148 folio_put(folio); in madvise_free_huge_pmd()
2152 if (folio_test_dirty(folio)) in madvise_free_huge_pmd()
2153 folio_clear_dirty(folio); in madvise_free_huge_pmd()
2154 folio_unlock(folio); in madvise_free_huge_pmd()
2165 folio_mark_lazyfree(folio); in madvise_free_huge_pmd()
2212 struct folio *folio = NULL; in zap_huge_pmd() local
2218 folio = page_folio(page); in zap_huge_pmd()
2219 folio_remove_rmap_pmd(folio, page, vma); in zap_huge_pmd()
2220 WARN_ON_ONCE(folio_mapcount(folio) < 0); in zap_huge_pmd()
2227 folio = pfn_swap_entry_folio(entry); in zap_huge_pmd()
2232 if (folio_test_anon(folio)) { in zap_huge_pmd()
2238 add_mm_counter(tlb->mm, mm_counter_file(folio), in zap_huge_pmd()
2247 folio_mark_accessed(folio); in zap_huge_pmd()
2252 tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE); in zap_huge_pmd()
2375 struct folio *folio = pfn_swap_entry_folio(entry); in change_huge_pmd() local
2384 if (folio_test_anon(folio)) in change_huge_pmd()
2406 struct folio *folio; in change_huge_pmd() local
2419 folio = pmd_folio(*pmd); in change_huge_pmd()
2420 toptier = node_is_toptier(folio_nid(folio)); in change_huge_pmd()
2429 if (folio_use_access_time(folio)) in change_huge_pmd()
2430 folio_xchg_access_time(folio, in change_huge_pmd()
2543 struct folio *src_folio; in move_pages_huge_pmd()
2721 struct folio *folio; in zap_huge_pud() local
2728 folio = page_folio(page); in zap_huge_pud()
2729 folio_remove_rmap_pud(folio, page, vma); in zap_huge_pud()
2730 add_mm_counter(tlb->mm, mm_counter_file(folio), -HPAGE_PUD_NR); in zap_huge_pud()
2741 struct folio *folio; in __split_huge_pud_locked() local
2758 folio = page_folio(page); in __split_huge_pud_locked()
2760 if (!folio_test_dirty(folio) && pud_dirty(old_pud)) in __split_huge_pud_locked()
2761 folio_mark_dirty(folio); in __split_huge_pud_locked()
2762 if (!folio_test_referenced(folio) && pud_young(old_pud)) in __split_huge_pud_locked()
2763 folio_set_referenced(folio); in __split_huge_pud_locked()
2764 folio_remove_rmap_pud(folio, page, vma); in __split_huge_pud_locked()
2765 folio_put(folio); in __split_huge_pud_locked()
2766 add_mm_counter(vma->vm_mm, mm_counter_file(folio), in __split_huge_pud_locked()
2841 struct folio *folio; in __split_huge_pmd_locked() local
2872 folio = pfn_swap_entry_folio(entry); in __split_huge_pmd_locked()
2877 folio = page_folio(page); in __split_huge_pmd_locked()
2878 if (!folio_test_dirty(folio) && pmd_dirty(old_pmd)) in __split_huge_pmd_locked()
2879 folio_mark_dirty(folio); in __split_huge_pmd_locked()
2880 if (!folio_test_referenced(folio) && pmd_young(old_pmd)) in __split_huge_pmd_locked()
2881 folio_set_referenced(folio); in __split_huge_pmd_locked()
2882 folio_remove_rmap_pmd(folio, page, vma); in __split_huge_pmd_locked()
2883 folio_put(folio); in __split_huge_pmd_locked()
2885 add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR); in __split_huge_pmd_locked()
2942 folio = page_folio(page); in __split_huge_pmd_locked()
2945 folio_set_dirty(folio); in __split_huge_pmd_locked()
2952 VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio); in __split_huge_pmd_locked()
2953 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in __split_huge_pmd_locked()
2972 folio_try_share_anon_rmap_pmd(folio, page)) in __split_huge_pmd_locked()
2977 folio_ref_add(folio, HPAGE_PMD_NR - 1); in __split_huge_pmd_locked()
2980 folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR, in __split_huge_pmd_locked()
3050 folio_remove_rmap_pmd(folio, page, vma); in __split_huge_pmd_locked()
3121 static void unmap_folio(struct folio *folio) in unmap_folio() argument
3126 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); in unmap_folio()
3128 if (folio_test_pmd_mappable(folio)) in unmap_folio()
3136 if (folio_test_anon(folio)) in unmap_folio()
3137 try_to_migrate(folio, ttu_flags); in unmap_folio()
3139 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK); in unmap_folio()
3146 struct folio *folio) in __discard_anon_folio_pmd_locked() argument
3153 folio_set_dirty(folio); in __discard_anon_folio_pmd_locked()
3154 if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { in __discard_anon_folio_pmd_locked()
3155 folio_set_swapbacked(folio); in __discard_anon_folio_pmd_locked()
3168 ref_count = folio_ref_count(folio); in __discard_anon_folio_pmd_locked()
3169 map_count = folio_mapcount(folio); in __discard_anon_folio_pmd_locked()
3185 folio_set_dirty(folio); in __discard_anon_folio_pmd_locked()
3186 if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { in __discard_anon_folio_pmd_locked()
3187 folio_set_swapbacked(folio); in __discard_anon_folio_pmd_locked()
3197 folio_remove_rmap_pmd(folio, pmd_page(orig_pmd), vma); in __discard_anon_folio_pmd_locked()
3202 folio_put(folio); in __discard_anon_folio_pmd_locked()
3208 pmd_t *pmdp, struct folio *folio) in unmap_huge_pmd_locked() argument
3210 VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio); in unmap_huge_pmd_locked()
3211 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); in unmap_huge_pmd_locked()
3212 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in unmap_huge_pmd_locked()
3213 VM_WARN_ON_FOLIO(folio_test_swapbacked(folio), folio); in unmap_huge_pmd_locked()
3216 return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio); in unmap_huge_pmd_locked()
3219 static void remap_page(struct folio *folio, unsigned long nr, int flags) in remap_page() argument
3224 if (!folio_test_anon(folio)) in remap_page()
3227 remove_migration_ptes(folio, folio, RMP_LOCKED | flags); in remap_page()
3228 i += folio_nr_pages(folio); in remap_page()
3231 folio = folio_next(folio); in remap_page()
3235 static void lru_add_split_folio(struct folio *folio, struct folio *new_folio, in lru_add_split_folio() argument
3238 VM_BUG_ON_FOLIO(folio_test_lru(new_folio), folio); in lru_add_split_folio()
3243 VM_WARN_ON(folio_test_lru(folio)); in lru_add_split_folio()
3248 VM_WARN_ON(!folio_test_lru(folio)); in lru_add_split_folio()
3249 if (folio_test_unevictable(folio)) in lru_add_split_folio()
3252 list_add_tail(&new_folio->lru, &folio->lru); in lru_add_split_folio()
3258 bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins) in can_split_folio() argument
3263 if (folio_test_anon(folio)) in can_split_folio()
3264 extra_pins = folio_test_swapcache(folio) ? in can_split_folio()
3265 folio_nr_pages(folio) : 0; in can_split_folio()
3267 extra_pins = folio_nr_pages(folio); in can_split_folio()
3270 return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - in can_split_folio()
3278 static void __split_folio_to_order(struct folio *folio, int old_order, in __split_folio_to_order() argument
3290 struct page *new_head = &folio->page + i; in __split_folio_to_order()
3296 struct folio *new_folio = (struct folio *)new_head; in __split_folio_to_order()
3314 new_folio->flags |= (folio->flags & in __split_folio_to_order()
3333 new_folio->mapping = folio->mapping; in __split_folio_to_order()
3334 new_folio->index = folio->index + i; in __split_folio_to_order()
3345 if (folio_test_swapcache(folio)) in __split_folio_to_order()
3346 new_folio->swap.val = folio->swap.val + i; in __split_folio_to_order()
3363 if (folio_test_young(folio)) in __split_folio_to_order()
3365 if (folio_test_idle(folio)) in __split_folio_to_order()
3368 new_folio->memcg_data = folio->memcg_data; in __split_folio_to_order()
3371 folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio)); in __split_folio_to_order()
3375 folio_set_order(folio, new_order); in __split_folio_to_order()
3377 ClearPageCompound(&folio->page); in __split_folio_to_order()
3419 static int __split_unmapped_folio(struct folio *folio, int new_order, in __split_unmapped_folio() argument
3423 int order = folio_order(folio); in __split_unmapped_folio()
3426 struct folio *next; in __split_unmapped_folio()
3430 if (folio_test_anon(folio)) in __split_unmapped_folio()
3433 folio_clear_has_hwpoisoned(folio); in __split_unmapped_folio()
3442 struct folio *end_folio = folio_next(folio); in __split_unmapped_folio()
3443 int old_order = folio_order(folio); in __split_unmapped_folio()
3444 struct folio *new_folio; in __split_unmapped_folio()
3447 if (folio_test_anon(folio) && split_order == 1) in __split_unmapped_folio()
3459 xas_split(xas, folio, old_order); in __split_unmapped_folio()
3461 xas_set_order(xas, folio->index, split_order); in __split_unmapped_folio()
3462 xas_try_split(xas, folio, old_order); in __split_unmapped_folio()
3471 folio_split_memcg_refs(folio, old_order, split_order); in __split_unmapped_folio()
3472 split_page_owner(&folio->page, old_order, split_order); in __split_unmapped_folio()
3473 pgalloc_tag_split(folio, old_order, split_order); in __split_unmapped_folio()
3475 __split_folio_to_order(folio, old_order, split_order); in __split_unmapped_folio()
3485 for (new_folio = folio; new_folio != end_folio; new_folio = next) { in __split_unmapped_folio()
3495 folio = new_folio; in __split_unmapped_folio()
3508 bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, in non_uniform_split_supported() argument
3511 if (folio_test_anon(folio)) { in non_uniform_split_supported()
3517 !mapping_large_folio_support(folio->mapping)) { in non_uniform_split_supported()
3530 if (folio_test_swapcache(folio)) { in non_uniform_split_supported()
3540 bool uniform_split_supported(struct folio *folio, unsigned int new_order, in uniform_split_supported() argument
3543 if (folio_test_anon(folio)) { in uniform_split_supported()
3549 !mapping_large_folio_support(folio->mapping)) { in uniform_split_supported()
3556 if (new_order && folio_test_swapcache(folio)) { in uniform_split_supported()
3586 static int __folio_split(struct folio *folio, unsigned int new_order, in __folio_split() argument
3590 struct deferred_split *ds_queue = get_deferred_split_queue(folio); in __folio_split()
3591 XA_STATE(xas, &folio->mapping->i_pages, folio->index); in __folio_split()
3592 struct folio *end_folio = folio_next(folio); in __folio_split()
3593 bool is_anon = folio_test_anon(folio); in __folio_split()
3596 int order = folio_order(folio); in __folio_split()
3597 struct folio *new_folio, *next; in __folio_split()
3604 VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio); in __folio_split()
3605 VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio); in __folio_split()
3607 if (folio != page_folio(split_at) || folio != page_folio(lock_at)) in __folio_split()
3610 if (new_order >= folio_order(folio)) in __folio_split()
3613 if (uniform_split && !uniform_split_supported(folio, new_order, true)) in __folio_split()
3617 !non_uniform_split_supported(folio, new_order, true)) in __folio_split()
3620 is_hzp = is_huge_zero_folio(folio); in __folio_split()
3626 if (folio_test_writeback(folio)) in __folio_split()
3638 anon_vma = folio_get_anon_vma(folio); in __folio_split()
3649 mapping = folio->mapping; in __folio_split()
3662 min_order = mapping_min_folio_order(folio->mapping); in __folio_split()
3673 if (!filemap_release_folio(folio, gfp)) { in __folio_split()
3679 xas_set_order(&xas, folio->index, new_order); in __folio_split()
3680 xas_split_alloc(&xas, folio, folio_order(folio), gfp); in __folio_split()
3706 if (!can_split_folio(folio, 1, &extra_pins)) { in __folio_split()
3711 unmap_folio(folio); in __folio_split()
3722 if (xas_load(&xas) != folio) { in __folio_split()
3730 if (folio_ref_freeze(folio, 1 + extra_pins)) { in __folio_split()
3735 if (folio_order(folio) > 1 && in __folio_split()
3736 !list_empty(&folio->_deferred_list)) { in __folio_split()
3738 if (folio_test_partially_mapped(folio)) { in __folio_split()
3739 folio_clear_partially_mapped(folio); in __folio_split()
3740 mod_mthp_stat(folio_order(folio), in __folio_split()
3749 list_del_init(&folio->_deferred_list); in __folio_split()
3753 int nr = folio_nr_pages(folio); in __folio_split()
3755 if (folio_test_pmd_mappable(folio) && in __folio_split()
3757 if (folio_test_swapbacked(folio)) { in __folio_split()
3758 __lruvec_stat_mod_folio(folio, in __folio_split()
3761 __lruvec_stat_mod_folio(folio, in __folio_split()
3768 if (folio_test_swapcache(folio)) { in __folio_split()
3770 VM_WARN_ON_ONCE_FOLIO(mapping, folio); in __folio_split()
3775 swap_cache = swap_address_space(folio->swap); in __folio_split()
3780 lruvec = folio_lruvec_lock(folio); in __folio_split()
3782 ret = __split_unmapped_folio(folio, new_order, split_at, &xas, in __folio_split()
3793 for (new_folio = folio_next(folio); new_folio != end_folio; in __folio_split()
3802 lru_add_split_folio(folio, new_folio, lruvec, list); in __folio_split()
3841 expected_refs = folio_expected_ref_count(folio) + 1; in __folio_split()
3842 folio_ref_unfreeze(folio, expected_refs); in __folio_split()
3863 remap_page(folio, 1 << order, remap_flags); in __folio_split()
3869 for (new_folio = folio; new_folio != end_folio; new_folio = next) { in __folio_split()
3950 struct folio *folio = page_folio(page); in split_huge_page_to_list_to_order() local
3952 return __folio_split(folio, new_order, &folio->page, page, list, true); in split_huge_page_to_list_to_order()
3977 int folio_split(struct folio *folio, unsigned int new_order, in folio_split() argument
3980 return __folio_split(folio, new_order, split_at, &folio->page, list, in folio_split()
3984 int min_order_for_split(struct folio *folio) in min_order_for_split() argument
3986 if (folio_test_anon(folio)) in min_order_for_split()
3989 if (!folio->mapping) { in min_order_for_split()
3990 if (folio_test_pmd_mappable(folio)) in min_order_for_split()
3995 return mapping_min_folio_order(folio->mapping); in min_order_for_split()
3998 int split_folio_to_list(struct folio *folio, struct list_head *list) in split_folio_to_list() argument
4000 int ret = min_order_for_split(folio); in split_folio_to_list()
4005 return split_huge_page_to_list_to_order(&folio->page, list, ret); in split_folio_to_list()
4021 bool __folio_unqueue_deferred_split(struct folio *folio) in __folio_unqueue_deferred_split() argument
4027 WARN_ON_ONCE(folio_ref_count(folio)); in __folio_unqueue_deferred_split()
4028 WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg(folio)); in __folio_unqueue_deferred_split()
4030 ds_queue = get_deferred_split_queue(folio); in __folio_unqueue_deferred_split()
4032 if (!list_empty(&folio->_deferred_list)) { in __folio_unqueue_deferred_split()
4034 if (folio_test_partially_mapped(folio)) { in __folio_unqueue_deferred_split()
4035 folio_clear_partially_mapped(folio); in __folio_unqueue_deferred_split()
4036 mod_mthp_stat(folio_order(folio), in __folio_unqueue_deferred_split()
4039 list_del_init(&folio->_deferred_list); in __folio_unqueue_deferred_split()
4048 void deferred_split_folio(struct folio *folio, bool partially_mapped) in deferred_split_folio() argument
4050 struct deferred_split *ds_queue = get_deferred_split_queue(folio); in deferred_split_folio()
4052 struct mem_cgroup *memcg = folio_memcg(folio); in deferred_split_folio()
4060 if (folio_order(folio) <= 1) in deferred_split_folio()
4073 if (folio_test_swapcache(folio)) in deferred_split_folio()
4078 if (!folio_test_partially_mapped(folio)) { in deferred_split_folio()
4079 folio_set_partially_mapped(folio); in deferred_split_folio()
4080 if (folio_test_pmd_mappable(folio)) in deferred_split_folio()
4082 count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); in deferred_split_folio()
4083 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1); in deferred_split_folio()
4088 VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio); in deferred_split_folio()
4090 if (list_empty(&folio->_deferred_list)) { in deferred_split_folio()
4091 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); in deferred_split_folio()
4095 set_shrinker_bit(memcg, folio_nid(folio), in deferred_split_folio()
4115 static bool thp_underused(struct folio *folio) in thp_underused() argument
4124 for (i = 0; i < folio_nr_pages(folio); i++) { in thp_underused()
4125 kaddr = kmap_local_folio(folio, i * PAGE_SIZE); in thp_underused()
4155 struct folio *folio, *next, *prev = NULL; in deferred_split_scan() local
4165 list_for_each_entry_safe(folio, next, &ds_queue->split_queue, in deferred_split_scan()
4167 if (folio_try_get(folio)) { in deferred_split_scan()
4168 list_move(&folio->_deferred_list, &list); in deferred_split_scan()
4171 if (folio_test_partially_mapped(folio)) { in deferred_split_scan()
4172 folio_clear_partially_mapped(folio); in deferred_split_scan()
4173 mod_mthp_stat(folio_order(folio), in deferred_split_scan()
4176 list_del_init(&folio->_deferred_list); in deferred_split_scan()
4184 list_for_each_entry_safe(folio, next, &list, _deferred_list) { in deferred_split_scan()
4188 if (!folio_test_partially_mapped(folio)) { in deferred_split_scan()
4189 underused = thp_underused(folio); in deferred_split_scan()
4193 if (!folio_trylock(folio)) in deferred_split_scan()
4195 if (!split_folio(folio)) { in deferred_split_scan()
4201 folio_unlock(folio); in deferred_split_scan()
4212 } else if (!folio_test_partially_mapped(folio)) { in deferred_split_scan()
4213 list_del_init(&folio->_deferred_list); in deferred_split_scan()
4222 swap(folio, prev); in deferred_split_scan()
4224 if (folio) in deferred_split_scan()
4225 folio_put(folio); in deferred_split_scan()
4250 struct folio *folio; in split_huge_pages_all() local
4265 folio = page_folio(page); in split_huge_pages_all()
4266 if (!folio_try_get(folio)) in split_huge_pages_all()
4269 if (unlikely(page_folio(page) != folio)) in split_huge_pages_all()
4272 if (zone != folio_zone(folio)) in split_huge_pages_all()
4275 if (!folio_test_large(folio) in split_huge_pages_all()
4276 || folio_test_hugetlb(folio) in split_huge_pages_all()
4277 || !folio_test_lru(folio)) in split_huge_pages_all()
4281 folio_lock(folio); in split_huge_pages_all()
4282 nr_pages = folio_nr_pages(folio); in split_huge_pages_all()
4283 if (!split_folio(folio)) in split_huge_pages_all()
4286 folio_unlock(folio); in split_huge_pages_all()
4288 folio_put(folio); in split_huge_pages_all()
4341 struct folio *folio; in split_huge_pages_pid() local
4354 folio = folio_walk_start(&fw, vma, addr, 0); in split_huge_pages_pid()
4355 if (!folio) in split_huge_pages_pid()
4358 if (!is_transparent_hugepage(folio)) in split_huge_pages_pid()
4361 if (!folio_test_anon(folio)) { in split_huge_pages_pid()
4362 mapping = folio->mapping; in split_huge_pages_pid()
4367 if (target_order >= folio_order(folio)) in split_huge_pages_pid()
4376 if (!folio_test_private(folio) && in split_huge_pages_pid()
4377 !can_split_folio(folio, 0, NULL)) in split_huge_pages_pid()
4380 if (!folio_trylock(folio)) in split_huge_pages_pid()
4382 folio_get(folio); in split_huge_pages_pid()
4385 if (!folio_test_anon(folio) && folio->mapping != mapping) in split_huge_pages_pid()
4389 in_folio_offset >= folio_nr_pages(folio)) { in split_huge_pages_pid()
4390 if (!split_folio_to_order(folio, target_order)) in split_huge_pages_pid()
4393 struct page *split_at = folio_page(folio, in split_huge_pages_pid()
4395 if (!folio_split(folio, target_order, split_at, NULL)) in split_huge_pages_pid()
4401 folio_unlock(folio); in split_huge_pages_pid()
4402 folio_put(folio); in split_huge_pages_pid()
4449 struct folio *folio = filemap_get_folio(mapping, index); in split_huge_pages_in_file() local
4452 if (IS_ERR(folio)) in split_huge_pages_in_file()
4455 if (!folio_test_large(folio)) in split_huge_pages_in_file()
4459 nr_pages = folio_nr_pages(folio); in split_huge_pages_in_file()
4461 if (target_order >= folio_order(folio)) in split_huge_pages_in_file()
4464 if (!folio_trylock(folio)) in split_huge_pages_in_file()
4467 if (folio->mapping != mapping) in split_huge_pages_in_file()
4471 if (!split_folio_to_order(folio, target_order)) in split_huge_pages_in_file()
4474 struct page *split_at = folio_page(folio, in split_huge_pages_in_file()
4476 if (!folio_split(folio, target_order, split_at, NULL)) in split_huge_pages_in_file()
4481 folio_unlock(folio); in split_huge_pages_in_file()
4483 folio_put(folio); in split_huge_pages_in_file()
4593 struct folio *folio = page_folio(page); in set_pmd_migration_entry() local
4609 anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page); in set_pmd_migration_entry()
4610 if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) { in set_pmd_migration_entry()
4616 folio_mark_dirty(folio); in set_pmd_migration_entry()
4633 folio_remove_rmap_pmd(folio, page, vma); in set_pmd_migration_entry()
4634 folio_put(folio); in set_pmd_migration_entry()
4642 struct folio *folio = page_folio(new); in remove_migration_pmd() local
4654 folio_get(folio); in remove_migration_pmd()
4655 pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot)); in remove_migration_pmd()
4665 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) in remove_migration_pmd()
4668 if (folio_test_anon(folio)) { in remove_migration_pmd()
4674 folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags); in remove_migration_pmd()
4676 folio_add_file_rmap_pmd(folio, new, vma); in remove_migration_pmd()
4678 VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new)); in remove_migration_pmd()