Home
last modified time | relevance | path

Searched refs:page_folio (Results 1 – 25 of 75) sorted by relevance

123

/linux-6.3-rc2/mm/
A Dfolio-compat.c15 return folio_mapping(page_folio(page)); in page_mapping()
21 return folio_unlock(page_folio(page)); in unlock_page()
27 return folio_end_writeback(page_folio(page)); in end_page_writeback()
33 return folio_wait_writeback(page_folio(page)); in wait_on_page_writeback()
39 return folio_wait_stable(page_folio(page)); in wait_for_stable_page()
45 folio_mark_accessed(page_folio(page)); in mark_page_accessed()
51 return folio_start_writeback(page_folio(page)); in set_page_writeback()
57 return folio_mark_dirty(page_folio(page)); in set_page_dirty()
69 return folio_clear_dirty_for_io(page_folio(page)); in clear_page_dirty_for_io()
83 folio_add_lru_vma(page_folio(page), vma); in lru_cache_add_inactive_or_unevictable()
[all …]
A Dmigrate_device.c224 folio_mark_dirty(page_folio(page)); in migrate_vma_collect_pmd()
401 folio = page_folio(page); in migrate_device_unmap()
427 folio = page_folio(page); in migrate_device_unmap()
621 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) in migrate_vma_insert_page()
767 r = migrate_folio_extra(mapping, page_folio(newpage), in __migrate_device_pages()
768 page_folio(page), in __migrate_device_pages()
771 r = migrate_folio(mapping, page_folio(newpage), in __migrate_device_pages()
772 page_folio(page), MIGRATE_SYNC_NO_COPY); in __migrate_device_pages()
852 src = page_folio(page); in migrate_device_finalize()
853 dst = page_folio(newpage); in migrate_device_finalize()
A Dmemory-failure.c551 struct folio *folio = page_folio(page); in collect_procs_anon()
861 mem_cgroup_uncharge(page_folio(p)); in delete_from_lru_cache()
878 struct folio *folio = page_folio(p); in truncate_error_page()
1107 struct folio *folio = page_folio(p); in me_swapcache_clean()
1331 struct folio *folio = page_folio(page); in __get_hwpoison_page()
1348 if (folio == page_folio(page)) in __get_hwpoison_page()
1421 struct folio *folio = page_folio(page); in __get_unpoison_page()
1881 struct folio *folio = page_folio(page); in __get_huge_page_for_hwpoison()
1947 folio = page_folio(p); in try_memory_failure_hugetlb()
1959 folio = page_folio(p); in try_memory_failure_hugetlb()
[all …]
A Dpage_idle.c42 folio = page_folio(page); in page_idle_get_folio()
45 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in page_idle_get_folio()
A Drmap.c750 struct folio *folio = page_folio(page); in page_address_in_vma()
1118 struct folio *folio = page_folio(page); in page_move_anon_rmap()
1182 struct folio *folio = page_folio(page); in __page_check_anon_rmap()
1215 struct folio *folio = page_folio(page); in page_add_anon_rmap()
1316 struct folio *folio = page_folio(page); in page_add_file_rmap()
1370 struct folio *folio = page_folio(page); in page_remove_rmap()
2334 struct folio *folio = page_folio(pages[i]); in make_device_exclusive_range()
2536 struct folio *folio = page_folio(page); in hugepage_add_anon_rmap()
A Dgup.c52 struct folio *folio = page_folio(page); in sanity_check_pinned_pages()
74 folio = page_folio(page); in try_get_folio()
89 if (unlikely(page_folio(page) != folio)) { in try_get_folio()
216 struct folio *folio = page_folio(page); in try_grab_page()
257 gup_put_folio(page_folio(page), 1, FOLL_PIN); in unpin_user_page()
265 struct folio *folio = page_folio(next); in gup_folio_range_next()
279 struct folio *folio = page_folio(list[i]); in gup_folio_next()
283 if (page_folio(list[nr]) != folio) in gup_folio_next()
1918 struct folio *folio = page_folio(pages[i]); in collect_longterm_unpinnable_pages()
1968 struct folio *folio = page_folio(pages[i]); in migrate_longterm_unpinnable_pages()
A Dhuge_memory.c600 folio = page_folio(page); in is_transparent_hugepage()
665 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, gfp)) { in __do_huge_pmd_anonymous_page()
1325 folio = page_folio(page); in do_huge_pmd_wp_page()
2297 if (folio && folio != page_folio(pmd_page(*pmd))) in __split_huge_pmd()
2477 if (!folio_test_swapcache(page_folio(head))) { in __split_huge_page_tail()
2515 struct folio *folio = page_folio(page); in __split_huge_page()
2543 struct folio *tail = page_folio(head + i); in __split_huge_page()
2645 struct folio *folio = page_folio(page); in split_huge_page_to_list()
2945 folio = page_folio(page); in split_huge_pages_all()
2949 if (unlikely(page_folio(page) != folio)) in split_huge_pages_all()
[all …]
A Dhugetlb.c1542 return page_folio(page); in alloc_gigantic_folio()
1553 return page_folio(page); in alloc_gigantic_folio()
1560 return page ? page_folio(page) : NULL; in alloc_gigantic_folio()
1834 folio = page_folio(page); in update_and_free_pages_bulk()
1857 struct folio *folio = page_folio(page); in free_huge_page()
2048 folio = page_folio(page); in PageHuge()
2161 return page_folio(page); in alloc_buddy_hugetlb_folio()
2253 folio = page_folio(page); in remove_pool_huge_page()
2279 struct folio *folio = page_folio(page); in dissolve_free_huge_page()
3624 inner_folio = page_folio(subpage); in demote_free_hugetlb_folio()
[all …]
A Ddebug.c51 struct folio *folio = page_folio(page); in __dump_page()
A Dtruncate.c266 return truncate_inode_folio(mapping, page_folio(page)); in generic_error_remove_page()
297 struct folio *folio = page_folio(page); in invalidate_inode_page()
A Dkhugepaged.c504 release_pte_folio(page_folio(page)); in release_pte_page()
978 if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp))) in alloc_charge_hpage()
1132 mem_cgroup_uncharge(page_folio(hpage)); in collapse_huge_page()
1935 folio = page_folio(page); in collapse_file()
2073 folio = page_folio(hpage); in collapse_file()
2134 mem_cgroup_uncharge(page_folio(hpage)); in collapse_file()
A Duserfaultfd.c103 folio = page_folio(page); in mfill_atomic_install_pte()
193 if (mem_cgroup_charge(page_folio(page), dst_mm, GFP_KERNEL)) in mcopy_atomic_pte()
/linux-6.3-rc2/include/linux/
A Dpage_idle.h125 return folio_test_young(page_folio(page)); in page_is_young()
130 folio_set_young(page_folio(page)); in set_page_young()
135 return folio_test_clear_young(page_folio(page)); in test_and_clear_page_young()
140 return folio_test_idle(page_folio(page)); in page_is_idle()
145 folio_set_idle(page_folio(page)); in set_page_idle()
150 folio_clear_idle(page_folio(page)); in clear_page_idle()
A Dnetfs.h94 folio_start_fscache(page_folio(page)); in set_page_fscache()
99 folio_end_private_2(page_folio(page)); in end_page_fscache()
104 folio_wait_private_2(page_folio(page)); in wait_on_page_fscache()
109 return folio_wait_private_2_killable(page_folio(page)); in wait_on_page_fscache_killable()
A Dpagemap.h374 return folio_file_mapping(page_folio(page)); in page_file_mapping()
382 struct folio *folio = page_folio(page); in page_mapping_file()
461 folio_attach_private(page_folio(page), data); in attach_page_private()
466 return folio_detach_private(page_folio(page)); in detach_page_private()
923 return folio_trylock(page_folio(page)); in trylock_page()
971 folio = page_folio(page); in lock_page()
1037 folio_wait_locked(page_folio(page)); in wait_on_page_locked()
1042 return folio_wait_locked_killable(page_folio(page)); in wait_on_page_locked_killable()
1056 __folio_mark_dirty(page_folio(page), mapping, warn); in __set_page_dirty()
1072 return folio_write_one(page_folio(page)); in write_one_page()
[all …]
A Dpage-flags.h273 #define page_folio(p) (_Generic((p), \ macro
548 return folio_test_swapcache(page_folio(page)); in PageSwapCache()
662 return folio_test_anon(page_folio(page)); in PageAnon()
692 return folio_test_ksm(page_folio(page)); in PageKsm()
729 return folio_test_uptodate(page_folio(page)); in PageUptodate()
771 folio_start_writeback_keepwrite(page_folio(page)); in set_page_writeback_keepwrite()
A Dmm_inline.h34 return folio_is_file_lru(page_folio(page)); in page_is_file_lru()
329 lruvec_add_folio(lruvec, page_folio(page)); in add_page_to_lru_list()
363 lruvec_del_folio(lruvec, page_folio(page)); in del_page_from_lru_list()
A Dfsverity.h259 return fsverity_verify_blocks(page_folio(page), PAGE_SIZE, 0); in fsverity_verify_page()
A Dmm.h981 mapcount += folio_entire_mapcount(page_folio(page)); in page_mapcount()
1010 return folio_total_mapcount(page_folio(page)); in total_mapcount()
1045 return folio_large_is_mapped(page_folio(page)); in page_mapped()
1059 return page_folio(page); in virt_to_folio()
1281 folio_get(page_folio(page)); in get_page()
1370 struct folio *folio = page_folio(page); in put_page()
1711 return page_folio(pfn_to_page(pfn)); in pfn_folio()
1758 return folio_maybe_dma_pinned(page_folio(page)); in page_maybe_dma_pinned()
A Dhugetlb.h759 return hugetlb_folio_subpool(page_folio(hpage)); in hugetlb_page_subpool()
771 hugetlb_set_folio_subpool(page_folio(hpage), subpool); in hugetlb_set_page_subpool()
866 return folio_hstate(page_folio(page)); in page_hstate()
A Dwriteback.h386 folio_account_redirty(page_folio(page)); in account_page_redirty()
A Dpage_ref.h94 return folio_ref_count(page_folio(page)); in page_count()
/linux-6.3-rc2/mm/damon/
A Dops-common.c30 folio = page_folio(page); in damon_get_folio()
33 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in damon_get_folio()
/linux-6.3-rc2/fs/9p/
A Dvfs_addr.c200 struct folio *folio = page_folio(page); in v9fs_vfs_writepage()
299 struct folio *folio = page_folio(subpage); in v9fs_write_end()
/linux-6.3-rc2/fs/afs/
A Dwrite.c157 struct folio *folio = page_folio(subpage); in afs_write_end()
555 folio = page_folio(pvec.pages[i]); in afs_extend_writeback()
907 struct folio *folio = page_folio(vmf->page); in afs_page_mkwrite()

Completed in 88 milliseconds

123