Lines Matching refs:folio

500 struct anon_vma *folio_get_anon_vma(struct folio *folio)  in folio_get_anon_vma()  argument
506 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_get_anon_vma()
509 if (!folio_mapped(folio)) in folio_get_anon_vma()
525 if (!folio_mapped(folio)) { in folio_get_anon_vma()
544 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, in folio_lock_anon_vma_read() argument
553 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_lock_anon_vma_read()
556 if (!folio_mapped(folio)) in folio_lock_anon_vma_read()
566 if (unlikely((unsigned long)READ_ONCE(folio->mapping) != in folio_lock_anon_vma_read()
578 if (!folio_mapped(folio)) { in folio_lock_anon_vma_read()
597 if (!folio_mapped(folio)) { in folio_lock_anon_vma_read()
611 if (unlikely((unsigned long)READ_ONCE(folio->mapping) != in folio_lock_anon_vma_read()
777 struct folio *folio = page_folio(page); in page_address_in_vma() local
780 if (folio_test_anon(folio)) { in page_address_in_vma()
781 struct anon_vma *page__anon_vma = folio_anon_vma(folio); in page_address_in_vma()
791 } else if (vma->vm_file->f_mapping != folio->mapping) { in page_address_in_vma()
796 pgoff = folio->index + folio_page_idx(folio, page); in page_address_in_vma()
839 static bool folio_referenced_one(struct folio *folio, in folio_referenced_one() argument
843 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in folio_referenced_one()
851 if (!folio_test_large(folio) || !pvmw.pte) { in folio_referenced_one()
853 mlock_vma_folio(folio, vma); in folio_referenced_one()
881 folio_test_anon(folio) && folio_test_swapbacked(folio) && in folio_referenced_one()
882 !folio_likely_mapped_shared(folio)) { in folio_referenced_one()
908 folio_test_large(folio) && in folio_referenced_one()
909 folio_within_vma(folio, vma)) { in folio_referenced_one()
913 e_align = ALIGN_DOWN(start + folio_size(folio) - 1, PMD_SIZE); in folio_referenced_one()
916 if ((s_align == e_align) && (ptes == folio_nr_pages(folio))) { in folio_referenced_one()
918 mlock_vma_folio(folio, vma); in folio_referenced_one()
925 folio_clear_idle(folio); in folio_referenced_one()
926 if (folio_test_clear_young(folio)) in folio_referenced_one()
976 int folio_referenced(struct folio *folio, int is_locked, in folio_referenced() argument
981 .mapcount = folio_mapcount(folio), in folio_referenced()
996 if (!folio_raw_mapping(folio)) in folio_referenced()
999 if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { in folio_referenced()
1000 we_locked = folio_trylock(folio); in folio_referenced()
1005 rmap_walk(folio, &rwc); in folio_referenced()
1009 folio_unlock(folio); in folio_referenced()
1076 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, in page_mkclean_one() argument
1079 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); in page_mkclean_one()
1095 int folio_mkclean(struct folio *folio) in folio_mkclean() argument
1105 BUG_ON(!folio_test_locked(folio)); in folio_mkclean()
1107 if (!folio_mapped(folio)) in folio_mkclean()
1110 mapping = folio_mapping(folio); in folio_mkclean()
1114 rmap_walk(folio, &rwc); in folio_mkclean()
1152 static __always_inline unsigned int __folio_add_rmap(struct folio *folio, in __folio_add_rmap() argument
1156 atomic_t *mapped = &folio->_nr_pages_mapped; in __folio_add_rmap()
1160 __folio_rmap_sanity_checks(folio, page, nr_pages, level); in __folio_add_rmap()
1164 if (!folio_test_large(folio)) { in __folio_add_rmap()
1165 nr = atomic_inc_and_test(&folio->_mapcount); in __folio_add_rmap()
1177 atomic_add(orig_nr_pages, &folio->_large_mapcount); in __folio_add_rmap()
1180 first = atomic_inc_and_test(&folio->_entire_mapcount); in __folio_add_rmap()
1184 *nr_pmdmapped = folio_nr_pages(folio); in __folio_add_rmap()
1194 atomic_inc(&folio->_large_mapcount); in __folio_add_rmap()
1209 void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma) in folio_move_anon_rmap() argument
1213 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in folio_move_anon_rmap()
1222 WRITE_ONCE(folio->mapping, anon_vma); in folio_move_anon_rmap()
1232 static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, in __folio_set_anon() argument
1253 WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); in __folio_set_anon()
1254 folio->index = linear_page_index(vma, address); in __folio_set_anon()
1264 static void __page_check_anon_rmap(struct folio *folio, struct page *page, in __page_check_anon_rmap() argument
1278 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, in __page_check_anon_rmap()
1279 folio); in __page_check_anon_rmap()
1284 static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped) in __folio_mod_stat() argument
1289 idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; in __folio_mod_stat()
1290 __lruvec_stat_mod_folio(folio, idx, nr); in __folio_mod_stat()
1293 if (folio_test_anon(folio)) { in __folio_mod_stat()
1295 __lruvec_stat_mod_folio(folio, idx, nr_pmdmapped); in __folio_mod_stat()
1298 idx = folio_test_swapbacked(folio) ? in __folio_mod_stat()
1300 __mod_node_page_state(folio_pgdat(folio), idx, in __folio_mod_stat()
1306 static __always_inline void __folio_add_anon_rmap(struct folio *folio, in __folio_add_anon_rmap() argument
1312 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in __folio_add_anon_rmap()
1314 nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); in __folio_add_anon_rmap()
1316 if (likely(!folio_test_ksm(folio))) in __folio_add_anon_rmap()
1317 __page_check_anon_rmap(folio, page, vma, address); in __folio_add_anon_rmap()
1319 __folio_mod_stat(folio, nr, nr_pmdmapped); in __folio_add_anon_rmap()
1337 (folio_test_large(folio) && in __folio_add_anon_rmap()
1338 folio_entire_mapcount(folio) > 1)) && in __folio_add_anon_rmap()
1339 PageAnonExclusive(cur_page), folio); in __folio_add_anon_rmap()
1348 if (!folio_test_large(folio)) in __folio_add_anon_rmap()
1349 mlock_vma_folio(folio, vma); in __folio_add_anon_rmap()
1368 void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page, in folio_add_anon_rmap_ptes() argument
1372 __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags, in folio_add_anon_rmap_ptes()
1389 void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, in folio_add_anon_rmap_pmd() argument
1393 __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags, in folio_add_anon_rmap_pmd()
1415 void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, in folio_add_new_anon_rmap() argument
1418 const int nr = folio_nr_pages(folio); in folio_add_new_anon_rmap()
1422 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); in folio_add_new_anon_rmap()
1423 VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio); in folio_add_new_anon_rmap()
1431 if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE)) in folio_add_new_anon_rmap()
1432 __folio_set_swapbacked(folio); in folio_add_new_anon_rmap()
1433 __folio_set_anon(folio, vma, address, exclusive); in folio_add_new_anon_rmap()
1435 if (likely(!folio_test_large(folio))) { in folio_add_new_anon_rmap()
1437 atomic_set(&folio->_mapcount, 0); in folio_add_new_anon_rmap()
1439 SetPageAnonExclusive(&folio->page); in folio_add_new_anon_rmap()
1440 } else if (!folio_test_pmd_mappable(folio)) { in folio_add_new_anon_rmap()
1444 struct page *page = folio_page(folio, i); in folio_add_new_anon_rmap()
1453 atomic_set(&folio->_large_mapcount, nr - 1); in folio_add_new_anon_rmap()
1454 atomic_set(&folio->_nr_pages_mapped, nr); in folio_add_new_anon_rmap()
1457 atomic_set(&folio->_entire_mapcount, 0); in folio_add_new_anon_rmap()
1459 atomic_set(&folio->_large_mapcount, 0); in folio_add_new_anon_rmap()
1460 atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); in folio_add_new_anon_rmap()
1462 SetPageAnonExclusive(&folio->page); in folio_add_new_anon_rmap()
1466 __folio_mod_stat(folio, nr, nr_pmdmapped); in folio_add_new_anon_rmap()
1467 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in folio_add_new_anon_rmap()
1470 static __always_inline void __folio_add_file_rmap(struct folio *folio, in __folio_add_file_rmap() argument
1476 VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); in __folio_add_file_rmap()
1478 nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); in __folio_add_file_rmap()
1479 __folio_mod_stat(folio, nr, nr_pmdmapped); in __folio_add_file_rmap()
1482 if (!folio_test_large(folio)) in __folio_add_file_rmap()
1483 mlock_vma_folio(folio, vma); in __folio_add_file_rmap()
1497 void folio_add_file_rmap_ptes(struct folio *folio, struct page *page, in folio_add_file_rmap_ptes() argument
1500 __folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); in folio_add_file_rmap_ptes()
1513 void folio_add_file_rmap_pmd(struct folio *folio, struct page *page, in folio_add_file_rmap_pmd() argument
1517 __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); in folio_add_file_rmap_pmd()
1523 static __always_inline void __folio_remove_rmap(struct folio *folio, in __folio_remove_rmap() argument
1527 atomic_t *mapped = &folio->_nr_pages_mapped; in __folio_remove_rmap()
1531 __folio_rmap_sanity_checks(folio, page, nr_pages, level); in __folio_remove_rmap()
1535 if (!folio_test_large(folio)) { in __folio_remove_rmap()
1536 nr = atomic_add_negative(-1, &folio->_mapcount); in __folio_remove_rmap()
1540 atomic_sub(nr_pages, &folio->_large_mapcount); in __folio_remove_rmap()
1552 atomic_dec(&folio->_large_mapcount); in __folio_remove_rmap()
1553 last = atomic_add_negative(-1, &folio->_entire_mapcount); in __folio_remove_rmap()
1557 nr_pmdmapped = folio_nr_pages(folio); in __folio_remove_rmap()
1578 if (partially_mapped && folio_test_anon(folio) && in __folio_remove_rmap()
1579 !folio_test_partially_mapped(folio)) in __folio_remove_rmap()
1580 deferred_split_folio(folio, true); in __folio_remove_rmap()
1582 __folio_mod_stat(folio, -nr, -nr_pmdmapped); in __folio_remove_rmap()
1592 munlock_vma_folio(folio, vma); in __folio_remove_rmap()
1606 void folio_remove_rmap_ptes(struct folio *folio, struct page *page, in folio_remove_rmap_ptes() argument
1609 __folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); in folio_remove_rmap_ptes()
1622 void folio_remove_rmap_pmd(struct folio *folio, struct page *page, in folio_remove_rmap_pmd() argument
1626 __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); in folio_remove_rmap_pmd()
1635 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, in try_to_unmap_one() argument
1639 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in try_to_unmap_one()
1668 if (folio_test_hugetlb(folio)) { in try_to_unmap_one()
1688 if (!folio_test_large(folio)) in try_to_unmap_one()
1689 mlock_vma_folio(folio, vma); in try_to_unmap_one()
1695 folio)) in try_to_unmap_one()
1704 pvmw.pmd, false, folio); in try_to_unmap_one()
1712 VM_BUG_ON_FOLIO(!pvmw.pte, folio); in try_to_unmap_one()
1715 subpage = folio_page(folio, pfn - folio_pfn(folio)); in try_to_unmap_one()
1717 anon_exclusive = folio_test_anon(folio) && in try_to_unmap_one()
1720 if (folio_test_hugetlb(folio)) { in try_to_unmap_one()
1721 bool anon = folio_test_anon(folio); in try_to_unmap_one()
1799 folio_mark_dirty(folio); in try_to_unmap_one()
1806 if (folio_test_hugetlb(folio)) { in try_to_unmap_one()
1807 hugetlb_count_sub(folio_nr_pages(folio), mm); in try_to_unmap_one()
1811 dec_mm_counter(mm, mm_counter(folio)); in try_to_unmap_one()
1826 dec_mm_counter(mm, mm_counter(folio)); in try_to_unmap_one()
1827 } else if (folio_test_anon(folio)) { in try_to_unmap_one()
1834 if (unlikely(folio_test_swapbacked(folio) != in try_to_unmap_one()
1835 folio_test_swapcache(folio))) { in try_to_unmap_one()
1841 if (!folio_test_swapbacked(folio)) { in try_to_unmap_one()
1851 ref_count = folio_ref_count(folio); in try_to_unmap_one()
1852 map_count = folio_mapcount(folio); in try_to_unmap_one()
1865 (!folio_test_dirty(folio) || in try_to_unmap_one()
1886 folio_set_swapbacked(folio); in try_to_unmap_one()
1902 folio_try_share_anon_rmap_pte(folio, subpage)) { in try_to_unmap_one()
1935 dec_mm_counter(mm, mm_counter_file(folio)); in try_to_unmap_one()
1938 if (unlikely(folio_test_hugetlb(folio))) in try_to_unmap_one()
1939 hugetlb_remove_rmap(folio); in try_to_unmap_one()
1941 folio_remove_rmap_pte(folio, subpage, vma); in try_to_unmap_one()
1944 folio_put(folio); in try_to_unmap_one()
1963 static int folio_not_mapped(struct folio *folio) in folio_not_mapped() argument
1965 return !folio_mapped(folio); in folio_not_mapped()
1979 void try_to_unmap(struct folio *folio, enum ttu_flags flags) in try_to_unmap() argument
1989 rmap_walk_locked(folio, &rwc); in try_to_unmap()
1991 rmap_walk(folio, &rwc); in try_to_unmap()
2000 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, in try_to_migrate_one() argument
2004 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in try_to_migrate_one()
2027 split_huge_pmd_address(vma, address, true, folio); in try_to_migrate_one()
2040 if (folio_test_hugetlb(folio)) { in try_to_migrate_one()
2057 subpage = folio_page(folio, in try_to_migrate_one()
2058 pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); in try_to_migrate_one()
2059 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || in try_to_migrate_one()
2060 !folio_test_pmd_mappable(folio), folio); in try_to_migrate_one()
2072 VM_BUG_ON_FOLIO(!pvmw.pte, folio); in try_to_migrate_one()
2076 if (folio_is_zone_device(folio)) { in try_to_migrate_one()
2087 VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); in try_to_migrate_one()
2088 subpage = &folio->page; in try_to_migrate_one()
2090 subpage = folio_page(folio, pfn - folio_pfn(folio)); in try_to_migrate_one()
2093 anon_exclusive = folio_test_anon(folio) && in try_to_migrate_one()
2096 if (folio_test_hugetlb(folio)) { in try_to_migrate_one()
2097 bool anon = folio_test_anon(folio); in try_to_migrate_one()
2169 folio_mark_dirty(folio); in try_to_migrate_one()
2174 if (folio_is_device_private(folio)) { in try_to_migrate_one()
2175 unsigned long pfn = folio_pfn(folio); in try_to_migrate_one()
2180 WARN_ON_ONCE(folio_try_share_anon_rmap_pte(folio, in try_to_migrate_one()
2207 folio_order(folio)); in try_to_migrate_one()
2214 if (folio_test_hugetlb(folio)) { in try_to_migrate_one()
2215 hugetlb_count_sub(folio_nr_pages(folio), mm); in try_to_migrate_one()
2219 dec_mm_counter(mm, mm_counter(folio)); in try_to_migrate_one()
2234 dec_mm_counter(mm, mm_counter(folio)); in try_to_migrate_one()
2240 if (folio_test_hugetlb(folio)) in try_to_migrate_one()
2249 VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && in try_to_migrate_one()
2253 if (folio_test_hugetlb(folio)) { in try_to_migrate_one()
2255 hugetlb_try_share_anon_rmap(folio)) { in try_to_migrate_one()
2263 folio_try_share_anon_rmap_pte(folio, subpage)) { in try_to_migrate_one()
2293 if (folio_test_hugetlb(folio)) in try_to_migrate_one()
2299 folio_order(folio)); in try_to_migrate_one()
2306 if (unlikely(folio_test_hugetlb(folio))) in try_to_migrate_one()
2307 hugetlb_remove_rmap(folio); in try_to_migrate_one()
2309 folio_remove_rmap_pte(folio, subpage, vma); in try_to_migrate_one()
2312 folio_put(folio); in try_to_migrate_one()
2328 void try_to_migrate(struct folio *folio, enum ttu_flags flags) in try_to_migrate() argument
2345 if (folio_is_zone_device(folio) && in try_to_migrate()
2346 (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) in try_to_migrate()
2357 if (!folio_test_ksm(folio) && folio_test_anon(folio)) in try_to_migrate()
2361 rmap_walk_locked(folio, &rwc); in try_to_migrate()
2363 rmap_walk(folio, &rwc); in try_to_migrate()
2374 static bool page_make_device_exclusive_one(struct folio *folio, in page_make_device_exclusive_one() argument
2378 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in page_make_device_exclusive_one()
2390 address + folio_size(folio)), in page_make_device_exclusive_one()
2396 VM_BUG_ON_FOLIO(!pvmw.pte, folio); in page_make_device_exclusive_one()
2405 subpage = folio_page(folio, in page_make_device_exclusive_one()
2406 pte_pfn(ptent) - folio_pfn(folio)); in page_make_device_exclusive_one()
2415 folio_mark_dirty(folio); in page_make_device_exclusive_one()
2448 folio_remove_rmap_pte(folio, subpage, vma); in page_make_device_exclusive_one()
2471 static bool folio_make_device_exclusive(struct folio *folio, in folio_make_device_exclusive() argument
2491 if (!folio_test_anon(folio)) in folio_make_device_exclusive()
2494 rmap_walk(folio, &rwc); in folio_make_device_exclusive()
2496 return args.valid && !folio_mapcount(folio); in folio_make_device_exclusive()
2534 struct folio *folio = page_folio(pages[i]); in make_device_exclusive_range() local
2535 if (PageTail(pages[i]) || !folio_trylock(folio)) { in make_device_exclusive_range()
2536 folio_put(folio); in make_device_exclusive_range()
2541 if (!folio_make_device_exclusive(folio, mm, start, owner)) { in make_device_exclusive_range()
2542 folio_unlock(folio); in make_device_exclusive_range()
2543 folio_put(folio); in make_device_exclusive_range()
2562 static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, in rmap_walk_anon_lock() argument
2568 return rwc->anon_lock(folio, rwc); in rmap_walk_anon_lock()
2576 anon_vma = folio_anon_vma(folio); in rmap_walk_anon_lock()
2604 static void rmap_walk_anon(struct folio *folio, in rmap_walk_anon() argument
2612 anon_vma = folio_anon_vma(folio); in rmap_walk_anon()
2614 VM_BUG_ON_FOLIO(!anon_vma, folio); in rmap_walk_anon()
2616 anon_vma = rmap_walk_anon_lock(folio, rwc); in rmap_walk_anon()
2621 pgoff_start = folio_pgoff(folio); in rmap_walk_anon()
2622 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; in rmap_walk_anon()
2627 folio_nr_pages(folio)); in rmap_walk_anon()
2635 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) in rmap_walk_anon()
2637 if (rwc->done && rwc->done(folio)) in rmap_walk_anon()
2654 static void rmap_walk_file(struct folio *folio, in rmap_walk_file() argument
2657 struct address_space *mapping = folio_mapping(folio); in rmap_walk_file()
2667 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in rmap_walk_file()
2672 pgoff_start = folio_pgoff(folio); in rmap_walk_file()
2673 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; in rmap_walk_file()
2689 folio_nr_pages(folio)); in rmap_walk_file()
2697 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) in rmap_walk_file()
2699 if (rwc->done && rwc->done(folio)) in rmap_walk_file()
2708 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) in rmap_walk() argument
2710 if (unlikely(folio_test_ksm(folio))) in rmap_walk()
2711 rmap_walk_ksm(folio, rwc); in rmap_walk()
2712 else if (folio_test_anon(folio)) in rmap_walk()
2713 rmap_walk_anon(folio, rwc, false); in rmap_walk()
2715 rmap_walk_file(folio, rwc, false); in rmap_walk()
2719 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) in rmap_walk_locked() argument
2722 VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); in rmap_walk_locked()
2723 if (folio_test_anon(folio)) in rmap_walk_locked()
2724 rmap_walk_anon(folio, rwc, true); in rmap_walk_locked()
2726 rmap_walk_file(folio, rwc, true); in rmap_walk_locked()
2735 void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, in hugetlb_add_anon_rmap() argument
2738 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); in hugetlb_add_anon_rmap()
2739 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); in hugetlb_add_anon_rmap()
2741 atomic_inc(&folio->_entire_mapcount); in hugetlb_add_anon_rmap()
2742 atomic_inc(&folio->_large_mapcount); in hugetlb_add_anon_rmap()
2744 SetPageAnonExclusive(&folio->page); in hugetlb_add_anon_rmap()
2745 VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 && in hugetlb_add_anon_rmap()
2746 PageAnonExclusive(&folio->page), folio); in hugetlb_add_anon_rmap()
2749 void hugetlb_add_new_anon_rmap(struct folio *folio, in hugetlb_add_new_anon_rmap() argument
2752 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); in hugetlb_add_new_anon_rmap()
2756 atomic_set(&folio->_entire_mapcount, 0); in hugetlb_add_new_anon_rmap()
2757 atomic_set(&folio->_large_mapcount, 0); in hugetlb_add_new_anon_rmap()
2758 folio_clear_hugetlb_restore_reserve(folio); in hugetlb_add_new_anon_rmap()
2759 __folio_set_anon(folio, vma, address, true); in hugetlb_add_new_anon_rmap()
2760 SetPageAnonExclusive(&folio->page); in hugetlb_add_new_anon_rmap()