Lines Matching refs:folio
230 struct folio *folio = page_folio(p); in hwpoison_filter_dev() local
238 mapping = folio_mapping(folio); in hwpoison_filter_dev()
381 void shake_folio(struct folio *folio) in shake_folio() argument
383 if (folio_test_hugetlb(folio)) in shake_folio()
389 if (folio_test_slab(folio)) in shake_folio()
609 static void collect_procs_anon(const struct folio *folio, in collect_procs_anon() argument
617 av = folio_lock_anon_vma_read(folio, NULL); in collect_procs_anon()
621 pgoff = page_pgoff(folio, page); in collect_procs_anon()
647 static void collect_procs_file(const struct folio *folio, in collect_procs_file() argument
653 struct address_space *mapping = folio->mapping; in collect_procs_file()
658 pgoff = page_pgoff(folio, page); in collect_procs_file()
676 addr = page_address_in_vma(folio, page, vma); in collect_procs_file()
730 static void collect_procs(const struct folio *folio, const struct page *page, in collect_procs() argument
733 if (!folio->mapping) in collect_procs()
735 if (unlikely(folio_test_ksm(folio))) in collect_procs()
736 collect_procs_ksm(folio, page, tokill, force_early); in collect_procs()
737 else if (folio_test_anon(folio)) in collect_procs()
738 collect_procs_anon(folio, page, tokill, force_early); in collect_procs()
740 collect_procs_file(folio, page, tokill, force_early); in collect_procs()
961 static int delete_from_lru_cache(struct folio *folio) in delete_from_lru_cache() argument
963 if (folio_isolate_lru(folio)) { in delete_from_lru_cache()
968 folio_clear_active(folio); in delete_from_lru_cache()
969 folio_clear_unevictable(folio); in delete_from_lru_cache()
975 mem_cgroup_uncharge(folio); in delete_from_lru_cache()
980 folio_put(folio); in delete_from_lru_cache()
986 static int truncate_error_folio(struct folio *folio, unsigned long pfn, in truncate_error_folio() argument
992 int err = mapping->a_ops->error_remove_folio(mapping, folio); in truncate_error_folio()
996 else if (!filemap_release_folio(folio, GFP_NOIO)) in truncate_error_folio()
1005 if (mapping_evict_folio(mapping, folio)) in truncate_error_folio()
1073 struct folio *folio = page_folio(p); in me_pagecache_clean() local
1078 delete_from_lru_cache(folio); in me_pagecache_clean()
1084 if (folio_test_anon(folio)) { in me_pagecache_clean()
1096 mapping = folio_mapping(folio); in me_pagecache_clean()
1114 ret = truncate_error_folio(folio, page_to_pfn(p), mapping); in me_pagecache_clean()
1119 folio_unlock(folio); in me_pagecache_clean()
1131 struct folio *folio = page_folio(p); in me_pagecache_dirty() local
1132 struct address_space *mapping = folio_mapping(folio); in me_pagecache_dirty()
1169 struct folio *folio = page_folio(p); in me_swapcache_dirty() local
1173 folio_clear_dirty(folio); in me_swapcache_dirty()
1175 folio_clear_uptodate(folio); in me_swapcache_dirty()
1177 ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_DELAYED; in me_swapcache_dirty()
1178 folio_unlock(folio); in me_swapcache_dirty()
1191 struct folio *folio = page_folio(p); in me_swapcache_clean() local
1194 delete_from_swap_cache(folio); in me_swapcache_clean()
1196 ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_RECOVERED; in me_swapcache_clean()
1197 folio_unlock(folio); in me_swapcache_clean()
1213 struct folio *folio = page_folio(p); in me_huge_page() local
1218 mapping = folio_mapping(folio); in me_huge_page()
1220 res = truncate_error_folio(folio, page_to_pfn(p), mapping); in me_huge_page()
1223 folio_unlock(folio); in me_huge_page()
1225 folio_unlock(folio); in me_huge_page()
1231 folio_put(folio); in me_huge_page()
1406 struct folio *folio = page_folio(page); in __get_hwpoison_page() local
1410 ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, false); in __get_hwpoison_page()
1413 if (folio == page_folio(page)) in __get_hwpoison_page()
1416 folio_put(folio); in __get_hwpoison_page()
1417 folio = page_folio(page); in __get_hwpoison_page()
1426 if (!HWPoisonHandlable(&folio->page, flags)) in __get_hwpoison_page()
1429 if (folio_try_get(folio)) { in __get_hwpoison_page()
1430 if (folio == page_folio(page)) in __get_hwpoison_page()
1434 folio_put(folio); in __get_hwpoison_page()
1505 struct folio *folio = page_folio(page); in __get_unpoison_page() local
1509 ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, true); in __get_unpoison_page()
1512 if (folio == page_folio(page)) in __get_unpoison_page()
1515 folio_put(folio); in __get_unpoison_page()
1574 int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill) in unmap_poisoned_folio() argument
1579 if (folio_test_swapcache(folio)) { in unmap_poisoned_folio()
1590 mapping = folio_mapping(folio); in unmap_poisoned_folio()
1591 if (!must_kill && !folio_test_dirty(folio) && mapping && in unmap_poisoned_folio()
1593 if (folio_mkclean(folio)) { in unmap_poisoned_folio()
1594 folio_set_dirty(folio); in unmap_poisoned_folio()
1602 if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { in unmap_poisoned_folio()
1610 mapping = hugetlb_folio_mapping_lock_write(folio); in unmap_poisoned_folio()
1613 folio_pfn(folio)); in unmap_poisoned_folio()
1617 try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); in unmap_poisoned_folio()
1620 try_to_unmap(folio, ttu); in unmap_poisoned_folio()
1623 return folio_mapped(folio) ? -EBUSY : 0; in unmap_poisoned_folio()
1630 static bool hwpoison_user_mappings(struct folio *folio, struct page *p, in hwpoison_user_mappings() argument
1636 bool mlocked = folio_test_mlocked(folio); in hwpoison_user_mappings()
1642 if (folio_test_reserved(folio) || folio_test_slab(folio) || in hwpoison_user_mappings()
1643 folio_test_pgtable(folio) || folio_test_offline(folio)) in hwpoison_user_mappings()
1645 if (!(folio_test_lru(folio) || folio_test_hugetlb(folio))) in hwpoison_user_mappings()
1652 if (!folio_mapped(folio)) in hwpoison_user_mappings()
1660 collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED); in hwpoison_user_mappings()
1662 unmap_success = !unmap_poisoned_folio(folio, pfn, flags & MF_MUST_KILL); in hwpoison_user_mappings()
1665 pfn, folio_mapcount(folio)); in hwpoison_user_mappings()
1672 shake_folio(folio); in hwpoison_user_mappings()
1684 forcekill = folio_test_dirty(folio) || (flags & MF_MUST_KILL) || in hwpoison_user_mappings()
1770 struct folio *folio = pfn_folio(pfn); in mf_generic_kill_procs() local
1782 cookie = dax_lock_folio(folio); in mf_generic_kill_procs()
1786 if (hwpoison_filter(&folio->page)) { in mf_generic_kill_procs()
1808 SetPageHWPoison(&folio->page); in mf_generic_kill_procs()
1817 collect_procs(folio, &folio->page, &to_kill, true); in mf_generic_kill_procs()
1819 unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags); in mf_generic_kill_procs()
1821 dax_unlock_folio(folio, cookie); in mf_generic_kill_procs()
1881 static inline struct llist_head *raw_hwp_list_head(struct folio *folio) in raw_hwp_list_head() argument
1883 return (struct llist_head *)&folio->_hugetlb_hwpoison; in raw_hwp_list_head()
1890 struct folio *folio = page_folio(page); in is_raw_hwpoison_page_in_hugepage() local
1893 if (!folio_test_hwpoison(folio)) in is_raw_hwpoison_page_in_hugepage()
1896 if (!folio_test_hugetlb(folio)) in is_raw_hwpoison_page_in_hugepage()
1903 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) in is_raw_hwpoison_page_in_hugepage()
1908 raw_hwp_head = raw_hwp_list_head(folio); in is_raw_hwpoison_page_in_hugepage()
1921 static unsigned long __folio_free_raw_hwp(struct folio *folio, bool move_flag) in __folio_free_raw_hwp() argument
1927 head = llist_del_all(raw_hwp_list_head(folio)); in __folio_free_raw_hwp()
1939 static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page) in folio_set_hugetlb_hwpoison() argument
1944 int ret = folio_test_set_hwpoison(folio) ? -EHWPOISON : 0; in folio_set_hugetlb_hwpoison()
1951 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) in folio_set_hugetlb_hwpoison()
1953 head = raw_hwp_list_head(folio); in folio_set_hugetlb_hwpoison()
1972 folio_set_hugetlb_raw_hwp_unreliable(folio); in folio_set_hugetlb_hwpoison()
1977 __folio_free_raw_hwp(folio, false); in folio_set_hugetlb_hwpoison()
1982 static unsigned long folio_free_raw_hwp(struct folio *folio, bool move_flag) in folio_free_raw_hwp() argument
1988 if (move_flag && folio_test_hugetlb_vmemmap_optimized(folio)) in folio_free_raw_hwp()
1995 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) in folio_free_raw_hwp()
1998 return __folio_free_raw_hwp(folio, move_flag); in folio_free_raw_hwp()
2001 void folio_clear_hugetlb_hwpoison(struct folio *folio) in folio_clear_hugetlb_hwpoison() argument
2003 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) in folio_clear_hugetlb_hwpoison()
2005 if (folio_test_hugetlb_vmemmap_optimized(folio)) in folio_clear_hugetlb_hwpoison()
2007 folio_clear_hwpoison(folio); in folio_clear_hugetlb_hwpoison()
2008 folio_free_raw_hwp(folio, true); in folio_clear_hugetlb_hwpoison()
2025 struct folio *folio = page_folio(page); in __get_huge_page_for_hwpoison() local
2029 if (!folio_test_hugetlb(folio)) in __get_huge_page_for_hwpoison()
2035 } else if (folio_test_hugetlb_freed(folio)) { in __get_huge_page_for_hwpoison()
2037 } else if (folio_test_hugetlb_migratable(folio)) { in __get_huge_page_for_hwpoison()
2038 ret = folio_try_get(folio); in __get_huge_page_for_hwpoison()
2047 if (folio_set_hugetlb_hwpoison(folio, page)) { in __get_huge_page_for_hwpoison()
2056 if (count_increased && folio_test_hugetlb_migratable(folio)) { in __get_huge_page_for_hwpoison()
2057 folio_clear_hugetlb_migratable(folio); in __get_huge_page_for_hwpoison()
2064 folio_put(folio); in __get_huge_page_for_hwpoison()
2078 struct folio *folio; in try_memory_failure_hugetlb() local
2091 folio = page_folio(p); in try_memory_failure_hugetlb()
2092 res = kill_accessing_process(current, folio_pfn(folio), flags); in try_memory_failure_hugetlb()
2104 folio = page_folio(p); in try_memory_failure_hugetlb()
2105 folio_lock(folio); in try_memory_failure_hugetlb()
2108 folio_clear_hugetlb_hwpoison(folio); in try_memory_failure_hugetlb()
2110 folio_set_hugetlb_migratable(folio); in try_memory_failure_hugetlb()
2111 folio_unlock(folio); in try_memory_failure_hugetlb()
2113 folio_put(folio); in try_memory_failure_hugetlb()
2122 folio_unlock(folio); in try_memory_failure_hugetlb()
2132 page_flags = folio->flags; in try_memory_failure_hugetlb()
2134 if (!hwpoison_user_mappings(folio, p, pfn, flags)) { in try_memory_failure_hugetlb()
2135 folio_unlock(folio); in try_memory_failure_hugetlb()
2148 static inline unsigned long folio_free_raw_hwp(struct folio *folio, bool flag) in folio_free_raw_hwp() argument
2203 struct folio *folio) in kill_procs_now() argument
2207 collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED); in kill_procs_now()
2239 struct folio *folio; in memory_failure() local
2328 folio = page_folio(p); in memory_failure()
2331 folio_lock(folio); in memory_failure()
2334 folio_unlock(folio); in memory_failure()
2335 folio_put(folio); in memory_failure()
2339 folio_unlock(folio); in memory_failure()
2341 if (folio_test_large(folio)) { in memory_failure()
2355 folio_set_has_hwpoisoned(folio); in memory_failure()
2358 kill_procs_now(p, pfn, flags, folio); in memory_failure()
2364 folio = page_folio(p); in memory_failure()
2375 shake_folio(folio); in memory_failure()
2377 folio_lock(folio); in memory_failure()
2384 WARN_ON(folio_test_large(folio)); in memory_failure()
2393 page_flags = folio->flags; in memory_failure()
2400 if (!folio_test_lru(folio) && !folio_test_writeback(folio)) in memory_failure()
2407 folio_wait_writeback(folio); in memory_failure()
2413 if (!hwpoison_user_mappings(folio, p, pfn, flags)) { in memory_failure()
2421 if (folio_test_lru(folio) && !folio_test_swapcache(folio) && in memory_failure()
2422 folio->mapping == NULL) { in memory_failure()
2432 folio_unlock(folio); in memory_failure()
2556 struct folio *folio; in unpoison_memory() local
2568 folio = page_folio(p); in unpoison_memory()
2579 if (is_huge_zero_folio(folio)) { in unpoison_memory()
2592 if (folio_ref_count(folio) > 1) { in unpoison_memory()
2598 if (folio_test_slab(folio) || folio_test_pgtable(folio) || in unpoison_memory()
2599 folio_test_reserved(folio) || folio_test_offline(folio)) in unpoison_memory()
2602 if (folio_mapped(folio)) { in unpoison_memory()
2608 if (folio_mapping(folio)) { in unpoison_memory()
2616 if (folio_test_hugetlb(folio)) { in unpoison_memory()
2618 count = folio_free_raw_hwp(folio, false); in unpoison_memory()
2622 ret = folio_test_clear_hwpoison(folio) ? 0 : -EBUSY; in unpoison_memory()
2632 if (folio_test_hugetlb(folio)) { in unpoison_memory()
2634 count = folio_free_raw_hwp(folio, false); in unpoison_memory()
2636 folio_put(folio); in unpoison_memory()
2641 folio_put(folio); in unpoison_memory()
2643 folio_put(folio); in unpoison_memory()
2672 struct folio *folio = page_folio(page); in soft_offline_in_use_page() local
2674 bool huge = folio_test_hugetlb(folio); in soft_offline_in_use_page()
2683 if (!huge && folio_test_large(folio)) { in soft_offline_in_use_page()
2688 folio = page_folio(page); in soft_offline_in_use_page()
2691 folio_lock(folio); in soft_offline_in_use_page()
2693 folio_wait_writeback(folio); in soft_offline_in_use_page()
2695 folio_unlock(folio); in soft_offline_in_use_page()
2696 folio_put(folio); in soft_offline_in_use_page()
2701 if (!huge && folio_test_lru(folio) && !folio_test_swapcache(folio)) in soft_offline_in_use_page()
2706 ret = mapping_evict_folio(folio_mapping(folio), folio); in soft_offline_in_use_page()
2707 folio_unlock(folio); in soft_offline_in_use_page()
2715 isolated = isolate_folio_to_list(folio, &pagelist); in soft_offline_in_use_page()
2724 folio_put(folio); in soft_offline_in_use_page()