/linux-6.3-rc2/mm/ |
A D | folio-compat.c | 15 return folio_mapping(page_folio(page)); in page_mapping() 21 return folio_unlock(page_folio(page)); in unlock_page() 27 return folio_end_writeback(page_folio(page)); in end_page_writeback() 33 return folio_wait_writeback(page_folio(page)); in wait_on_page_writeback() 39 return folio_wait_stable(page_folio(page)); in wait_for_stable_page() 45 folio_mark_accessed(page_folio(page)); in mark_page_accessed() 51 return folio_start_writeback(page_folio(page)); in set_page_writeback() 57 return folio_mark_dirty(page_folio(page)); in set_page_dirty() 69 return folio_clear_dirty_for_io(page_folio(page)); in clear_page_dirty_for_io() 83 folio_add_lru_vma(page_folio(page), vma); in lru_cache_add_inactive_or_unevictable() [all …]
|
A D | migrate_device.c | 224 folio_mark_dirty(page_folio(page)); in migrate_vma_collect_pmd() 401 folio = page_folio(page); in migrate_device_unmap() 427 folio = page_folio(page); in migrate_device_unmap() 621 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) in migrate_vma_insert_page() 767 r = migrate_folio_extra(mapping, page_folio(newpage), in __migrate_device_pages() 768 page_folio(page), in __migrate_device_pages() 771 r = migrate_folio(mapping, page_folio(newpage), in __migrate_device_pages() 772 page_folio(page), MIGRATE_SYNC_NO_COPY); in __migrate_device_pages() 852 src = page_folio(page); in migrate_device_finalize() 853 dst = page_folio(newpage); in migrate_device_finalize()
|
A D | memory-failure.c | 551 struct folio *folio = page_folio(page); in collect_procs_anon() 861 mem_cgroup_uncharge(page_folio(p)); in delete_from_lru_cache() 878 struct folio *folio = page_folio(p); in truncate_error_page() 1107 struct folio *folio = page_folio(p); in me_swapcache_clean() 1331 struct folio *folio = page_folio(page); in __get_hwpoison_page() 1348 if (folio == page_folio(page)) in __get_hwpoison_page() 1421 struct folio *folio = page_folio(page); in __get_unpoison_page() 1881 struct folio *folio = page_folio(page); in __get_huge_page_for_hwpoison() 1947 folio = page_folio(p); in try_memory_failure_hugetlb() 1959 folio = page_folio(p); in try_memory_failure_hugetlb() [all …]
|
A D | page_idle.c | 42 folio = page_folio(page); in page_idle_get_folio() 45 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in page_idle_get_folio()
|
A D | rmap.c | 750 struct folio *folio = page_folio(page); in page_address_in_vma() 1118 struct folio *folio = page_folio(page); in page_move_anon_rmap() 1182 struct folio *folio = page_folio(page); in __page_check_anon_rmap() 1215 struct folio *folio = page_folio(page); in page_add_anon_rmap() 1316 struct folio *folio = page_folio(page); in page_add_file_rmap() 1370 struct folio *folio = page_folio(page); in page_remove_rmap() 2334 struct folio *folio = page_folio(pages[i]); in make_device_exclusive_range() 2536 struct folio *folio = page_folio(page); in hugepage_add_anon_rmap()
|
A D | gup.c | 52 struct folio *folio = page_folio(page); in sanity_check_pinned_pages() 74 folio = page_folio(page); in try_get_folio() 89 if (unlikely(page_folio(page) != folio)) { in try_get_folio() 216 struct folio *folio = page_folio(page); in try_grab_page() 257 gup_put_folio(page_folio(page), 1, FOLL_PIN); in unpin_user_page() 265 struct folio *folio = page_folio(next); in gup_folio_range_next() 279 struct folio *folio = page_folio(list[i]); in gup_folio_next() 283 if (page_folio(list[nr]) != folio) in gup_folio_next() 1918 struct folio *folio = page_folio(pages[i]); in collect_longterm_unpinnable_pages() 1968 struct folio *folio = page_folio(pages[i]); in migrate_longterm_unpinnable_pages()
|
A D | huge_memory.c | 600 folio = page_folio(page); in is_transparent_hugepage() 665 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, gfp)) { in __do_huge_pmd_anonymous_page() 1325 folio = page_folio(page); in do_huge_pmd_wp_page() 2297 if (folio && folio != page_folio(pmd_page(*pmd))) in __split_huge_pmd() 2477 if (!folio_test_swapcache(page_folio(head))) { in __split_huge_page_tail() 2515 struct folio *folio = page_folio(page); in __split_huge_page() 2543 struct folio *tail = page_folio(head + i); in __split_huge_page() 2645 struct folio *folio = page_folio(page); in split_huge_page_to_list() 2945 folio = page_folio(page); in split_huge_pages_all() 2949 if (unlikely(page_folio(page) != folio)) in split_huge_pages_all() [all …]
|
A D | hugetlb.c | 1542 return page_folio(page); in alloc_gigantic_folio() 1553 return page_folio(page); in alloc_gigantic_folio() 1560 return page ? page_folio(page) : NULL; in alloc_gigantic_folio() 1834 folio = page_folio(page); in update_and_free_pages_bulk() 1857 struct folio *folio = page_folio(page); in free_huge_page() 2048 folio = page_folio(page); in PageHuge() 2161 return page_folio(page); in alloc_buddy_hugetlb_folio() 2253 folio = page_folio(page); in remove_pool_huge_page() 2279 struct folio *folio = page_folio(page); in dissolve_free_huge_page() 3624 inner_folio = page_folio(subpage); in demote_free_hugetlb_folio() [all …]
|
A D | debug.c | 51 struct folio *folio = page_folio(page); in __dump_page()
|
A D | truncate.c | 266 return truncate_inode_folio(mapping, page_folio(page)); in generic_error_remove_page() 297 struct folio *folio = page_folio(page); in invalidate_inode_page()
|
A D | khugepaged.c | 504 release_pte_folio(page_folio(page)); in release_pte_page() 978 if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp))) in alloc_charge_hpage() 1132 mem_cgroup_uncharge(page_folio(hpage)); in collapse_huge_page() 1935 folio = page_folio(page); in collapse_file() 2073 folio = page_folio(hpage); in collapse_file() 2134 mem_cgroup_uncharge(page_folio(hpage)); in collapse_file()
|
A D | userfaultfd.c | 103 folio = page_folio(page); in mfill_atomic_install_pte() 193 if (mem_cgroup_charge(page_folio(page), dst_mm, GFP_KERNEL)) in mcopy_atomic_pte()
|
/linux-6.3-rc2/include/linux/ |
A D | page_idle.h | 125 return folio_test_young(page_folio(page)); in page_is_young() 130 folio_set_young(page_folio(page)); in set_page_young() 135 return folio_test_clear_young(page_folio(page)); in test_and_clear_page_young() 140 return folio_test_idle(page_folio(page)); in page_is_idle() 145 folio_set_idle(page_folio(page)); in set_page_idle() 150 folio_clear_idle(page_folio(page)); in clear_page_idle()
|
A D | netfs.h | 94 folio_start_fscache(page_folio(page)); in set_page_fscache() 99 folio_end_private_2(page_folio(page)); in end_page_fscache() 104 folio_wait_private_2(page_folio(page)); in wait_on_page_fscache() 109 return folio_wait_private_2_killable(page_folio(page)); in wait_on_page_fscache_killable()
|
A D | pagemap.h | 374 return folio_file_mapping(page_folio(page)); in page_file_mapping() 382 struct folio *folio = page_folio(page); in page_mapping_file() 461 folio_attach_private(page_folio(page), data); in attach_page_private() 466 return folio_detach_private(page_folio(page)); in detach_page_private() 923 return folio_trylock(page_folio(page)); in trylock_page() 971 folio = page_folio(page); in lock_page() 1037 folio_wait_locked(page_folio(page)); in wait_on_page_locked() 1042 return folio_wait_locked_killable(page_folio(page)); in wait_on_page_locked_killable() 1056 __folio_mark_dirty(page_folio(page), mapping, warn); in __set_page_dirty() 1072 return folio_write_one(page_folio(page)); in write_one_page() [all …]
|
A D | page-flags.h | 273 #define page_folio(p) (_Generic((p), \ macro 548 return folio_test_swapcache(page_folio(page)); in PageSwapCache() 662 return folio_test_anon(page_folio(page)); in PageAnon() 692 return folio_test_ksm(page_folio(page)); in PageKsm() 729 return folio_test_uptodate(page_folio(page)); in PageUptodate() 771 folio_start_writeback_keepwrite(page_folio(page)); in set_page_writeback_keepwrite()
|
A D | mm_inline.h | 34 return folio_is_file_lru(page_folio(page)); in page_is_file_lru() 329 lruvec_add_folio(lruvec, page_folio(page)); in add_page_to_lru_list() 363 lruvec_del_folio(lruvec, page_folio(page)); in del_page_from_lru_list()
|
A D | fsverity.h | 259 return fsverity_verify_blocks(page_folio(page), PAGE_SIZE, 0); in fsverity_verify_page()
|
A D | mm.h | 981 mapcount += folio_entire_mapcount(page_folio(page)); in page_mapcount() 1010 return folio_total_mapcount(page_folio(page)); in total_mapcount() 1045 return folio_large_is_mapped(page_folio(page)); in page_mapped() 1059 return page_folio(page); in virt_to_folio() 1281 folio_get(page_folio(page)); in get_page() 1370 struct folio *folio = page_folio(page); in put_page() 1711 return page_folio(pfn_to_page(pfn)); in pfn_folio() 1758 return folio_maybe_dma_pinned(page_folio(page)); in page_maybe_dma_pinned()
|
A D | hugetlb.h | 759 return hugetlb_folio_subpool(page_folio(hpage)); in hugetlb_page_subpool() 771 hugetlb_set_folio_subpool(page_folio(hpage), subpool); in hugetlb_set_page_subpool() 866 return folio_hstate(page_folio(page)); in page_hstate()
|
A D | writeback.h | 386 folio_account_redirty(page_folio(page)); in account_page_redirty()
|
A D | page_ref.h | 94 return folio_ref_count(page_folio(page)); in page_count()
|
/linux-6.3-rc2/mm/damon/ |
A D | ops-common.c | 30 folio = page_folio(page); in damon_get_folio() 33 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in damon_get_folio()
|
/linux-6.3-rc2/fs/9p/ |
A D | vfs_addr.c | 200 struct folio *folio = page_folio(page); in v9fs_vfs_writepage() 299 struct folio *folio = page_folio(subpage); in v9fs_write_end()
|
/linux-6.3-rc2/fs/afs/ |
A D | write.c | 157 struct folio *folio = page_folio(subpage); in afs_write_end() 555 folio = page_folio(pvec.pages[i]); in afs_extend_writeback() 907 struct folio *folio = page_folio(vmf->page); in afs_page_mkwrite()
|