| /mm/ |
| A D | swap.c | 97 void __folio_put(struct folio *folio) in __folio_put() argument 123 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in lru_add() 165 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru() local 179 struct folio *folio, move_fn_t move_fn, in __folio_batch_add_and_move() argument 329 void folio_activate(struct folio *folio) in folio_activate() argument 342 void folio_activate(struct folio *folio) in folio_activate() argument 499 void folio_add_lru(struct folio *folio) in folio_add_lru() argument 502 folio_test_unevictable(folio), folio); in folio_add_lru() 705 void folio_deactivate(struct folio *folio) in folio_deactivate() argument 953 struct folio *folio = folios->folios[i]; in folios_put_refs() local [all …]
|
| A D | filemap.c | 150 struct folio *folio) in filemap_unaccount_folio() argument 281 struct folio *folio; in page_cache_delete_batch() local 489 struct folio *folio; in filemap_range_has_page() local 649 struct folio *folio; in filemap_range_has_writeback() local 995 struct folio *folio; in filemap_alloc_folio_noprof() local 1184 key.folio = folio; in folio_wake_bit() 1692 wait->folio = folio; in __folio_lock_async() 1866 struct folio *folio; in filemap_get_entry() local 1913 struct folio *folio; in __filemap_get_folio() local 2033 struct folio *folio; in find_get_entry() local [all …]
|
| A D | rmap.c | 1113 int folio_mkclean(struct folio *folio) in folio_mkclean() argument 1463 atomic_read(&folio->_mapcount) > 0, folio); in __folio_add_anon_rmap() 2340 !folio_test_pmd_mappable(folio), folio); in try_to_migrate_one() 2667 struct folio *folio, *fw_folio; in make_device_exclusive() local 2811 static void rmap_walk_anon(struct folio *folio, in rmap_walk_anon() argument 2879 VM_WARN_ON_FOLIO(folio && mapping != folio_mapping(folio), folio); in __rmap_walk_file() 2880 VM_WARN_ON_FOLIO(folio && pgoff_start != folio_pgoff(folio), folio); in __rmap_walk_file() 2881 VM_WARN_ON_FOLIO(folio && nr_pages != folio_nr_pages(folio), folio); in __rmap_walk_file() 2924 static void rmap_walk_file(struct folio *folio, in rmap_walk_file() argument 2938 __rmap_walk_file(folio, folio->mapping, folio->index, in rmap_walk_file() [all …]
|
| A D | page_io.c | 32 struct folio *folio = bio_first_folio_all(bio); in __end_swap_bio_write() local 60 struct folio *folio = bio_first_folio_all(bio); in __end_swap_bio_read() local 174 static bool is_folio_zero_filled(struct folio *folio) in is_folio_zero_filled() argument 204 static void swap_zeromap_folio_set(struct folio *folio) in swap_zeromap_folio_set() argument 224 static void swap_zeromap_folio_clear(struct folio *folio) in swap_zeromap_folio_clear() argument 438 bio_add_folio_nofail(bio, folio, folio_size(folio), 0); in swap_writepage_bdev_async() 451 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); in __swap_writepage() 508 static bool swap_read_folio_zeromap(struct folio *folio) in swap_read_folio_zeromap() argument 533 folio_zero_range(folio, 0, folio_size(folio)); in swap_read_folio_zeromap() 618 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in swap_read_folio() [all …]
|
| A D | truncate.c | 30 struct folio *folio; in clear_shadow_entries() local 65 struct folio *folio; in truncate_folio_batch_exceptionals() local 160 folio_invalidate(folio, 0, folio_size(folio)); in truncate_cleanup_folio() 209 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio() 267 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio() 275 struct folio *folio) in generic_error_remove_folio() argument 350 struct folio *folio; in truncate_inode_pages_range() local 426 struct folio *folio = fbatch.folios[i]; in truncate_inode_pages_range() local 525 struct folio *folio = fbatch.folios[i]; in mapping_try_invalidate() local 668 struct folio *folio = fbatch.folios[i]; in invalidate_inode_pages2_range() local [all …]
|
| A D | mlock.c | 95 folio->mlock_count = !!folio_test_mlocked(folio); in __mlock_folio() 105 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in __mlock_new_folio() 169 static inline struct folio *mlock_lru(struct folio *folio) in mlock_lru() argument 174 static inline struct folio *mlock_new(struct folio *folio) in mlock_new() argument 190 struct folio *folio; in mlock_folio_batch() local 196 folio = (struct folio *)((unsigned long)folio - mlock); in mlock_folio_batch() 242 void mlock_folio(struct folio *folio) in mlock_folio() argument 267 void mlock_new_folio(struct folio *folio) in mlock_new_folio() argument 290 void munlock_folio(struct folio *folio) in munlock_folio() argument 358 struct folio *folio; in mlock_pte_range() local [all …]
|
| A D | swap_state.c | 99 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in add_to_swap_cache() 141 void __delete_from_swap_cache(struct folio *folio, in __delete_from_swap_cache() argument 174 void delete_from_swap_cache(struct folio *folio) in delete_from_swap_cache() argument 184 folio_ref_sub(folio, folio_nr_pages(folio)); in delete_from_swap_cache() 224 void free_swap_cache(struct folio *folio) in free_swap_cache() argument 286 struct folio *folio; in swap_cache_get_folio() local 369 struct folio *folio; in __read_swap_cache_async() local 488 struct folio *folio; in read_swap_cache_async() local 588 struct folio *folio; in swap_cluster_readahead() local 733 struct folio *folio; in swap_vma_readahead() local [all …]
|
| A D | migrate.c | 177 struct folio *folio = page_folio(page); in putback_movable_ops_page() local 238 struct folio *folio; in putback_movable_pages() local 278 struct folio *folio, in try_to_map_unused_to_zeropage() argument 317 struct folio *folio; member 542 struct folio *newfolio, struct folio *folio, int expected_count) in __folio_migrate_mapping() argument 673 struct folio *newfolio, struct folio *folio, int extra_count) in folio_migrate_mapping() argument 724 void folio_migrate_flags(struct folio *newfolio, struct folio *folio) in folio_migrate_flags() argument 1596 struct folio *folio, *folio2; in migrate_hugetlbs() local 2045 struct folio *folio, *folio2; in migrate_pages() local 2246 struct folio *folio; in add_folio_for_migration() local [all …]
|
| A D | memory-failure.c | 230 struct folio *folio = page_folio(p); in hwpoison_filter_dev() local 381 void shake_folio(struct folio *folio) in shake_folio() argument 1073 struct folio *folio = page_folio(p); in me_pagecache_clean() local 1131 struct folio *folio = page_folio(p); in me_pagecache_dirty() local 1169 struct folio *folio = page_folio(p); in me_swapcache_dirty() local 1191 struct folio *folio = page_folio(p); in me_swapcache_clean() local 1213 struct folio *folio = page_folio(p); in me_huge_page() local 2078 struct folio *folio; in try_memory_failure_hugetlb() local 2203 struct folio *folio) in kill_procs_now() argument 2239 struct folio *folio; in memory_failure() local [all …]
|
| A D | huge_memory.c | 1162 struct folio *folio; in vma_alloc_anon_folio_pmd() local 1221 struct folio *folio; in __do_huge_pmd_anonymous_page() local 1376 struct folio *folio; member 1496 .folio = folio, in vmf_insert_folio_pmd() 1633 .folio = folio, in vmf_insert_folio_pud() 1871 struct folio *folio; in do_huge_zero_wp_pmd() local 1902 struct folio *folio; in do_huge_pmd_wp_page() local 2026 struct folio *folio; in do_huge_pmd_numa_page() local 2108 struct folio *folio; in madvise_free_huge_pmd() local 2741 struct folio *folio; in __split_huge_pud_locked() local [all …]
|
| A D | internal.h | 139 const struct folio *folio) in folio_swap() argument 443 void folio_activate(struct folio *folio); 538 bool folio_isolate_lru(struct folio *folio); 539 void folio_putback_lru(struct folio *folio); 784 struct folio *folio = (struct folio *)page; in page_rmappable_folio() local 786 if (folio && folio_test_large(folio)) in page_rmappable_folio() 793 struct folio *folio = (struct folio *)page; in prep_compound_head() local 1025 void mlock_folio(struct folio *folio); 1041 void munlock_folio(struct folio *folio); 1058 void mlock_new_folio(struct folio *folio); [all …]
|
| A D | migrate_device.c | 76 struct folio *folio; in migrate_vma_collect_pmd() local 118 struct folio *folio; in migrate_vma_collect_pmd() local 208 if (fault_folio == folio || folio_trylock(folio)) { in migrate_vma_collect_pmd() 341 struct folio *folio = page_folio(page); in migrate_vma_check_page() local 390 struct folio *folio; in migrate_device_unmap() local 437 struct folio *folio; in migrate_device_unmap() local 443 remove_migration_ptes(folio, folio, 0); in migrate_device_unmap() 591 struct folio *folio = page_folio(page); in migrate_vma_insert_page() local 714 struct folio *newfolio, *folio; in __migrate_device_pages() local 910 struct folio *folio; in migrate_device_pfn_lock() local [all …]
|
| A D | page_idle.c | 37 struct folio *folio; in page_idle_get_folio() local 43 if (!folio_test_lru(folio) || !folio_try_get(folio)) in page_idle_get_folio() 47 folio = NULL; in page_idle_get_folio() 49 return folio; in page_idle_get_folio() 52 static bool page_idle_clear_pte_refs_one(struct folio *folio, in page_idle_clear_pte_refs_one() argument 94 static void page_idle_clear_pte_refs(struct folio *folio) in page_idle_clear_pte_refs() argument 106 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) in page_idle_clear_pte_refs() 109 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); in page_idle_clear_pte_refs() 124 struct folio *folio; in page_idle_bitmap_read() local 144 if (folio) { in page_idle_bitmap_read() [all …]
|
| A D | swap.h | 17 void swap_read_folio(struct folio *folio, struct swap_iocb **plug); 25 int swap_writeout(struct folio *folio, struct swap_iocb **swap_plug); 26 void __swap_writepage(struct folio *folio, struct swap_iocb **swap_plug); 57 int add_to_swap_cache(struct folio *folio, swp_entry_t entry, 59 void __delete_from_swap_cache(struct folio *folio, 61 void delete_from_swap_cache(struct folio *folio); 81 static inline unsigned int folio_swap_flags(struct folio *folio) in folio_swap_flags() argument 165 static inline int swap_writeout(struct folio *folio, in swap_writeout() argument 199 static inline void __delete_from_swap_cache(struct folio *folio, in __delete_from_swap_cache() argument 204 static inline void delete_from_swap_cache(struct folio *folio) in delete_from_swap_cache() argument [all …]
|
| A D | hugetlb.c | 1294 struct folio *folio; in dequeue_hugetlb_folio_node_exact() local 1337 struct folio *folio; in dequeue_hugetlb_folio_nodemask() local 1479 struct folio *folio; in alloc_gigantic_folio() local 1672 struct folio *folio; in free_hpage_workfn() local 1948 struct folio *folio; in alloc_buddy_hugetlb_folio() local 1996 struct folio *folio; in only_alloc_fresh_hugetlb_folio() local 2017 struct folio *folio; in alloc_fresh_hugetlb_folio() local 2206 struct folio *folio; in dissolve_free_hugetlb_folios() local 2284 struct folio *folio; in alloc_migrate_hugetlb_folio() local 2336 struct folio *folio; in alloc_hugetlb_folio_reserve() local [all …]
|
| A D | gup.c | 56 struct folio *folio; in sanity_check_pinned_pages() local 81 struct folio *folio; in try_get_folio() local 293 struct folio *folio; in unpin_user_pages_dirty_lock() local 359 struct folio *folio; in unpin_user_page_range_dirty_lock() local 377 struct folio *folio; in gup_fast_unpin_user_pages() local 403 struct folio *folio; in unpin_user_pages() local 533 struct folio *folio; in try_grab_folio_fast() local 820 struct folio *folio; in follow_page_pte() local 2291 struct folio *folio; in collect_longterm_unpinnable_folios() local 2958 struct folio *folio; in gup_fast_pmd_leaf() local [all …]
|
| A D | shmem.c | 753 struct folio *folio; in shmem_unused_huge_shrink() local 1086 struct folio *folio; in shmem_get_partial_folio() local 1125 struct folio *folio; in shmem_undo_range() local 1433 struct folio *folio; in shmem_find_swap_entries() local 1760 struct folio *folio; in shmem_swapin_cluster() local 1905 struct folio *folio; in shmem_alloc_folio() local 2503 struct folio *folio; in shmem_get_folio_gfp() local 3227 struct folio *folio; in shmem_mfill_atomic_pte() local 3341 struct folio *folio; in shmem_write_begin() local 4151 struct folio *folio; in shmem_symlink() local [all …]
|
| A D | khugepaged.c | 516 static void release_pte_folio(struct folio *folio) in release_pte_folio() argument 528 struct folio *folio, *tmp; in release_pte_pages() local 558 struct folio *folio = NULL; in __collapse_huge_page_isolate() local 661 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in __collapse_huge_page_isolate() 1086 struct folio *folio; in alloc_charge_folio() local 1116 struct folio *folio; in collapse_huge_page() local 1281 struct folio *folio = NULL; in hpage_collapse_scan_pmd() local 1513 struct folio *folio; in collapse_pte_mapped_thp() local 1853 struct folio *folio, *tmp, *new_folio; in collapse_file() local 1932 if (!folio || xa_is_value(folio)) { in collapse_file() [all …]
|
| A D | vmscan.c | 511 struct folio *folio, int error) in handle_write_error() argument 867 void folio_putback_lru(struct folio *folio) in folio_putback_lru() argument 1119 struct folio *folio; in shrink_folio_list() local 1656 struct folio *folio, *next; in reclaim_clean_pages_from_list() local 1753 struct folio *folio; in isolate_lru_folios() local 2157 struct folio *folio; in shrink_active_list() local 2223 struct folio *folio; in reclaim_folio_list() local 3475 struct folio *folio = pfn_folio(pfn); in get_pfn_folio() local 3557 struct folio *folio; in walk_pte_range() local 3639 struct folio *folio; in walk_pmd_range_locked() local [all …]
|
| A D | memory.c | 802 struct folio *folio; in copy_nonpresent_pte() local 989 struct folio *folio; in copy_present_ptes() local 1547 struct folio *folio; in zap_present_ptes() local 4283 struct folio *folio; in __alloc_swap_folio() local 4362 struct folio *folio; in alloc_swap_folio() local 4920 struct folio *folio; in alloc_anon_folio() local 5009 struct folio *folio; in do_anonymous_page() local 5128 struct folio *folio; in __do_fault() local 5346 struct folio *folio; in finish_fault() local 5556 struct folio *folio; in do_read_fault() local [all …]
|
| A D | workingset.c | 232 static void *lru_gen_eviction(struct folio *folio) in lru_gen_eviction() argument 283 static void lru_gen_refault(struct folio *folio, void *shadow) in lru_gen_refault() argument 328 static void *lru_gen_eviction(struct folio *folio) in lru_gen_eviction() argument 339 static void lru_gen_refault(struct folio *folio, void *shadow) in lru_gen_refault() argument 389 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in workingset_eviction() 390 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); in workingset_eviction() 391 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in workingset_eviction() 534 void workingset_refault(struct folio *folio, void *shadow) in workingset_refault() argument 543 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in workingset_refault() 589 void workingset_activation(struct folio *folio) in workingset_activation() argument [all …]
|
| A D | readahead.c | 149 struct folio *folio; in read_pages() local 184 struct folio *folio; in ractl_alloc_folio() local 190 return folio; in ractl_alloc_folio() 255 if (folio && !xa_is_value(folio)) { in page_cache_ra_unbounded() 272 if (!folio) in page_cache_ra_unbounded() 443 if (!folio) in ra_alloc_folio() 624 struct folio *folio, unsigned long req_count) in page_cache_async_ra() argument 777 if (folio && !xa_is_value(folio)) in readahead_expand() 781 if (!folio) in readahead_expand() 806 if (folio && !xa_is_value(folio)) in readahead_expand() [all …]
|
| A D | ksm.c | 633 struct folio *folio; in break_ksm() local 744 struct folio *folio; in get_mergeable_page() local 891 struct folio *folio; in ksm_get_folio() local 972 struct folio *folio; in remove_rmap_item_from_tree() local 1082 struct folio *folio; in remove_stable_node() local 1355 struct folio *folio = page_folio(page); in replace_page() local 1811 struct folio *folio; in stable_tree_search() local 2499 struct folio *folio; in scan_get_next_rmap_item() local 2550 struct folio *folio; in scan_get_next_rmap_item() local 2968 struct folio *ksm_might_need_to_copy(struct folio *folio, in ksm_might_need_to_copy() argument [all …]
|
| /mm/damon/ |
| A D | ops-common.c | 29 struct folio *folio; in damon_get_folio() local 35 if (!folio_test_lru(folio) || !folio_try_get(folio)) in damon_get_folio() 41 return folio; in damon_get_folio() 47 struct folio *folio; in damon_ptep_mkold() local 57 if (!folio) in damon_ptep_mkold() 80 if (!folio) in damon_pmdp_mkold() 159 void damon_folio_mkold(struct folio *folio) in damon_folio_mkold() argument 223 bool damon_folio_young(struct folio *folio) in damon_folio_young() argument 329 struct folio *folio; in damon_migrate_folio_list() local 334 struct folio *folio; in damon_migrate_folio_list() local [all …]
|
| A D | paddr.c | 23 struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); in damon_pa_mkold() local 25 if (!folio) in damon_pa_mkold() 29 folio_put(folio); in damon_pa_mkold() 52 struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); in damon_pa_young() local 55 if (!folio) in damon_pa_young() 60 folio_put(folio); in damon_pa_young() 119 if (!folio) in damon_pa_invalid_damos_folio() 135 struct folio *folio; in damon_pa_pageout() local 190 struct folio *folio; in damon_pa_mark_accessed_or_deactivate() local 237 struct folio *folio; in damon_pa_migrate() local [all …]
|