Lines Matching refs:page

88 	struct page *page;  in get_shadow_from_swap_cache()  local
90 page = xa_load(&address_space->i_pages, idx); in get_shadow_from_swap_cache()
91 if (xa_is_value(page)) in get_shadow_from_swap_cache()
92 return page; in get_shadow_from_swap_cache()
100 int add_to_swap_cache(struct page *page, swp_entry_t entry, in add_to_swap_cache() argument
105 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); in add_to_swap_cache()
106 unsigned long i, nr = thp_nr_pages(page); in add_to_swap_cache()
109 VM_BUG_ON_PAGE(!PageLocked(page), page); in add_to_swap_cache()
110 VM_BUG_ON_PAGE(PageSwapCache(page), page); in add_to_swap_cache()
111 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); in add_to_swap_cache()
113 page_ref_add(page, nr); in add_to_swap_cache()
114 SetPageSwapCache(page); in add_to_swap_cache()
122 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); in add_to_swap_cache()
128 set_page_private(page + i, entry.val + i); in add_to_swap_cache()
129 xas_store(&xas, page); in add_to_swap_cache()
133 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); in add_to_swap_cache()
134 __mod_lruvec_page_state(page, NR_SWAPCACHE, nr); in add_to_swap_cache()
143 ClearPageSwapCache(page); in add_to_swap_cache()
144 page_ref_sub(page, nr); in add_to_swap_cache()
152 void __delete_from_swap_cache(struct page *page, in __delete_from_swap_cache() argument
156 int i, nr = thp_nr_pages(page); in __delete_from_swap_cache()
160 VM_BUG_ON_PAGE(!PageLocked(page), page); in __delete_from_swap_cache()
161 VM_BUG_ON_PAGE(!PageSwapCache(page), page); in __delete_from_swap_cache()
162 VM_BUG_ON_PAGE(PageWriteback(page), page); in __delete_from_swap_cache()
166 VM_BUG_ON_PAGE(entry != page, entry); in __delete_from_swap_cache()
167 set_page_private(page + i, 0); in __delete_from_swap_cache()
170 ClearPageSwapCache(page); in __delete_from_swap_cache()
172 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); in __delete_from_swap_cache()
173 __mod_lruvec_page_state(page, NR_SWAPCACHE, -nr); in __delete_from_swap_cache()
184 int add_to_swap(struct page *page) in add_to_swap() argument
189 VM_BUG_ON_PAGE(!PageLocked(page), page); in add_to_swap()
190 VM_BUG_ON_PAGE(!PageUptodate(page), page); in add_to_swap()
192 entry = get_swap_page(page); in add_to_swap()
207 err = add_to_swap_cache(page, entry, in add_to_swap()
225 set_page_dirty(page); in add_to_swap()
230 put_swap_page(page, entry); in add_to_swap()
240 void delete_from_swap_cache(struct page *page) in delete_from_swap_cache() argument
242 swp_entry_t entry = { .val = page_private(page) }; in delete_from_swap_cache()
246 __delete_from_swap_cache(page, entry, NULL); in delete_from_swap_cache()
249 put_swap_page(page, entry); in delete_from_swap_cache()
250 page_ref_sub(page, thp_nr_pages(page)); in delete_from_swap_cache()
289 void free_swap_cache(struct page *page) in free_swap_cache() argument
291 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { in free_swap_cache()
292 try_to_free_swap(page); in free_swap_cache()
293 unlock_page(page); in free_swap_cache()
301 void free_page_and_swap_cache(struct page *page) in free_page_and_swap_cache() argument
303 free_swap_cache(page); in free_page_and_swap_cache()
304 if (!is_huge_zero_page(page)) in free_page_and_swap_cache()
305 put_page(page); in free_page_and_swap_cache()
312 void free_pages_and_swap_cache(struct page **pages, int nr) in free_pages_and_swap_cache()
314 struct page **pagep = pages; in free_pages_and_swap_cache()
334 struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, in lookup_swap_cache()
337 struct page *page; in lookup_swap_cache() local
343 page = find_get_page(swap_address_space(entry), swp_offset(entry)); in lookup_swap_cache()
347 if (page) { in lookup_swap_cache()
356 if (unlikely(PageTransCompound(page))) in lookup_swap_cache()
357 return page; in lookup_swap_cache()
359 readahead = TestClearPageReadahead(page); in lookup_swap_cache()
380 return page; in lookup_swap_cache()
393 struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index) in find_get_incore_page()
397 struct page *page = pagecache_get_page(mapping, index, in find_get_incore_page() local
400 if (!page) in find_get_incore_page()
401 return page; in find_get_incore_page()
402 if (!xa_is_value(page)) in find_get_incore_page()
403 return find_subpage(page, index); in find_get_incore_page()
407 swp = radix_to_swp_entry(page); in find_get_incore_page()
412 page = find_get_page(swap_address_space(swp), swp_offset(swp)); in find_get_incore_page()
414 return page; in find_get_incore_page()
417 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async()
422 struct page *page; in __read_swap_cache_async() local
437 page = find_get_page(swap_address_space(entry), in __read_swap_cache_async()
440 if (page) in __read_swap_cache_async()
441 return page; in __read_swap_cache_async()
459 page = alloc_page_vma(gfp_mask, vma, addr); in __read_swap_cache_async()
460 if (!page) in __read_swap_cache_async()
470 put_page(page); in __read_swap_cache_async()
488 __SetPageLocked(page); in __read_swap_cache_async()
489 __SetPageSwapBacked(page); in __read_swap_cache_async()
491 if (mem_cgroup_swapin_charge_page(page, NULL, gfp_mask, entry)) in __read_swap_cache_async()
495 if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) in __read_swap_cache_async()
501 workingset_refault(page_folio(page), shadow); in __read_swap_cache_async()
504 lru_cache_add(page); in __read_swap_cache_async()
506 return page; in __read_swap_cache_async()
509 put_swap_page(page, entry); in __read_swap_cache_async()
510 unlock_page(page); in __read_swap_cache_async()
511 put_page(page); in __read_swap_cache_async()
521 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async()
525 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, in read_swap_cache_async()
613 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, in swap_cluster_readahead()
616 struct page *page; in swap_cluster_readahead() local
643 page = __read_swap_cache_async( in swap_cluster_readahead()
646 if (!page) in swap_cluster_readahead()
649 swap_readpage(page, false); in swap_cluster_readahead()
651 SetPageReadahead(page); in swap_cluster_readahead()
655 put_page(page); in swap_cluster_readahead()
788 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, in swap_vma_readahead()
793 struct page *page; in swap_vma_readahead() local
817 page = __read_swap_cache_async(entry, gfp_mask, vma, in swap_vma_readahead()
819 if (!page) in swap_vma_readahead()
822 swap_readpage(page, false); in swap_vma_readahead()
824 SetPageReadahead(page); in swap_vma_readahead()
828 put_page(page); in swap_vma_readahead()
849 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, in swapin_readahead()