Lines Matching refs:page

137 static inline void __mm_zero_struct_page(struct page *page)  in __mm_zero_struct_page()  argument
139 unsigned long *_pp = (void *)page; in __mm_zero_struct_page()
142 BUILD_BUG_ON(sizeof(struct page) & 7); in __mm_zero_struct_page()
143 BUILD_BUG_ON(sizeof(struct page) < 56); in __mm_zero_struct_page()
144 BUILD_BUG_ON(sizeof(struct page) > 96); in __mm_zero_struct_page()
146 switch (sizeof(struct page)) { in __mm_zero_struct_page()
173 #define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
201 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) argument
204 #define nth_page(page,n) ((page) + (n)) argument
205 #define folio_page_idx(folio, p) ((p) - &(folio)->page)
557 struct page *cow_page; /* Page handler may use for COW fault */
558 struct page *page; /* ->fault handlers should return a member
656 struct page *(*find_special_page)(struct vm_area_struct *vma,
948 extern void prep_compound_page(struct page *page, unsigned int order);
974 static inline unsigned int compound_order(struct page *page) in compound_order() argument
976 struct folio *folio = (struct folio *)page; in compound_order()
1034 static inline int put_page_testzero(struct page *page) in put_page_testzero() argument
1036 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); in put_page_testzero()
1037 return page_ref_dec_and_test(page); in put_page_testzero()
1042 return put_page_testzero(&folio->page); in folio_put_testzero()
1051 static inline bool get_page_unless_zero(struct page *page) in get_page_unless_zero() argument
1053 return page_ref_add_unless(page, 1, 0); in get_page_unless_zero()
1056 static inline struct folio *folio_get_nontail_page(struct page *page) in folio_get_nontail_page() argument
1058 if (unlikely(!get_page_unless_zero(page))) in folio_get_nontail_page()
1060 return (struct folio *)page; in folio_get_nontail_page()
1075 struct page *vmalloc_to_page(const void *addr);
1166 static inline bool page_mapped(const struct page *page) in page_mapped() argument
1168 return folio_mapped(page_folio(page)); in page_mapped()
1171 static inline struct page *virt_to_head_page(const void *x) in virt_to_head_page()
1173 struct page *page = virt_to_page(x); in virt_to_head_page() local
1175 return compound_head(page); in virt_to_head_page()
1180 struct page *page = virt_to_page(x); in virt_to_folio() local
1182 return page_folio(page); in virt_to_folio()
1187 void split_page(struct page *page, unsigned int order);
1194 static inline unsigned long page_size(struct page *page) in page_size() argument
1196 return PAGE_SIZE << compound_order(page); in page_size()
1200 static inline unsigned int page_shift(struct page *page) in page_shift() argument
1202 return PAGE_SHIFT + compound_order(page); in page_shift()
1209 static inline unsigned int thp_order(struct page *page) in thp_order() argument
1211 VM_BUG_ON_PGFLAGS(PageTail(page), page); in thp_order()
1212 return compound_order(page); in thp_order()
1221 static inline unsigned long thp_size(struct page *page) in thp_size() argument
1223 return PAGE_SIZE << thp_order(page); in thp_size()
1240 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page);
1242 struct page *page, unsigned int nr, unsigned long addr);
1325 static inline void get_page(struct page *page) in get_page() argument
1327 struct folio *folio = page_folio(page); in get_page()
1335 static inline __must_check bool try_get_page(struct page *page) in try_get_page() argument
1337 page = compound_head(page); in try_get_page()
1338 if (WARN_ON_ONCE(page_ref_count(page) <= 0)) in try_get_page()
1340 page_ref_inc(page); in try_get_page()
1397 struct page **pages;
1422 static inline void put_page(struct page *page) in put_page() argument
1424 struct folio *folio = page_folio(page); in put_page()
1464 void unpin_user_page(struct page *page);
1466 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
1468 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
1470 void unpin_user_pages(struct page **pages, unsigned long npages);
1506 static inline int page_zone_id(struct page *page) in page_zone_id() argument
1508 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; in page_zone_id()
1512 int page_to_nid(const struct page *page);
1514 static inline int page_to_nid(const struct page *page) in page_to_nid() argument
1516 return (PF_POISONED_CHECK(page)->flags >> NODES_PGSHIFT) & NODES_MASK; in page_to_nid()
1522 return page_to_nid(&folio->page); in folio_nid()
1584 static inline void page_cpupid_reset_last(struct page *page) in page_cpupid_reset_last() argument
1586 page->_last_cpupid = -1 & LAST_CPUPID_MASK; in page_cpupid_reset_last()
1596 static inline void page_cpupid_reset_last(struct page *page) in page_cpupid_reset_last() argument
1598 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; in page_cpupid_reset_last()
1663 static inline void page_cpupid_reset_last(struct page *page) in page_cpupid_reset_last() argument
1689 static inline u8 page_kasan_tag(const struct page *page) in page_kasan_tag() argument
1694 tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; in page_kasan_tag()
1701 static inline void page_kasan_tag_set(struct page *page, u8 tag) in page_kasan_tag_set() argument
1709 old_flags = READ_ONCE(page->flags); in page_kasan_tag_set()
1714 } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); in page_kasan_tag_set()
1717 static inline void page_kasan_tag_reset(struct page *page) in page_kasan_tag_reset() argument
1720 page_kasan_tag_set(page, KASAN_TAG_KERNEL); in page_kasan_tag_reset()
1725 static inline u8 page_kasan_tag(const struct page *page) in page_kasan_tag() argument
1730 static inline void page_kasan_tag_set(struct page *page, u8 tag) { } in page_kasan_tag_set() argument
1731 static inline void page_kasan_tag_reset(struct page *page) { } in page_kasan_tag_reset() argument
1735 static inline struct zone *page_zone(const struct page *page) in page_zone() argument
1737 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; in page_zone()
1740 static inline pg_data_t *page_pgdat(const struct page *page) in page_pgdat() argument
1742 return NODE_DATA(page_to_nid(page)); in page_pgdat()
1747 return page_zone(&folio->page); in folio_zone()
1752 return page_pgdat(&folio->page); in folio_pgdat()
1756 static inline void set_page_section(struct page *page, unsigned long section) in set_page_section() argument
1758 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); in set_page_section()
1759 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; in set_page_section()
1762 static inline unsigned long page_to_section(const struct page *page) in page_to_section() argument
1764 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; in page_to_section()
1779 return page_to_pfn(&folio->page); in folio_pfn()
1788 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) in mk_pte() argument
1790 return pfn_pte(page_to_pfn(page), pgprot); in mk_pte()
1915 static inline bool is_zero_page(const struct page *page) in is_zero_page() argument
1917 return is_zero_pfn(page_to_pfn(page)); in is_zero_page()
1928 return is_zero_page(&folio->page); in is_zero_folio()
1967 static inline void set_page_zone(struct page *page, enum zone_type zone) in set_page_zone() argument
1969 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); in set_page_zone()
1970 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; in set_page_zone()
1973 static inline void set_page_node(struct page *page, unsigned long node) in set_page_node() argument
1975 page->flags &= ~(NODES_MASK << NODES_PGSHIFT); in set_page_node()
1976 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; in set_page_node()
1979 static inline void set_page_links(struct page *page, enum zone_type zone, in set_page_links() argument
1982 set_page_zone(page, zone); in set_page_links()
1983 set_page_node(page, node); in set_page_links()
1985 set_page_section(page, pfn_to_section_nr(pfn)); in set_page_links()
2014 static inline long compound_nr(struct page *page) in compound_nr() argument
2016 struct folio *folio = (struct folio *)page; in compound_nr()
2174 if (WARN_ON_ONCE(page_has_type(&folio->page) && !folio_test_hugetlb(folio))) in folio_expected_ref_count()
2208 static inline void *page_address(const struct page *page) in page_address() argument
2210 return page->virtual; in page_address()
2212 static inline void set_page_address(struct page *page, void *address) in set_page_address() argument
2214 page->virtual = address; in set_page_address()
2220 void *page_address(const struct page *page);
2221 void set_page_address(struct page *page, void *virtual);
2225 static __always_inline void *lowmem_page_address(const struct page *page) in lowmem_page_address() argument
2227 return page_to_virt(page); in lowmem_page_address()
2231 #define page_address(page) lowmem_page_address(page) argument
2232 #define set_page_address(page, address) do { } while(0) argument
2238 return page_address(&folio->page); in folio_address()
2246 static inline bool page_is_pfmemalloc(const struct page *page) in page_is_pfmemalloc() argument
2253 return (uintptr_t)page->lru.next & BIT(1); in page_is_pfmemalloc()
2275 static inline void set_page_pfmemalloc(struct page *page) in set_page_pfmemalloc() argument
2277 page->lru.next = (void *)BIT(1); in set_page_pfmemalloc()
2280 static inline void clear_page_pfmemalloc(struct page *page) in clear_page_pfmemalloc() argument
2282 page->lru.next = NULL; in clear_page_pfmemalloc()
2348 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
2352 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
2474 unsigned int gup_flags, struct page **pages,
2478 unsigned int gup_flags, struct page **pages,
2484 static inline struct page *get_user_page_vma_remote(struct mm_struct *mm, in get_user_page_vma_remote()
2489 struct page *page; in get_user_page_vma_remote() local
2496 got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL); in get_user_page_vma_remote()
2503 put_page(page); in get_user_page_vma_remote()
2508 return page; in get_user_page_vma_remote()
2512 unsigned int gup_flags, struct page **pages);
2514 unsigned int gup_flags, struct page **pages);
2516 struct page **pages, unsigned int gup_flags);
2518 struct page **pages, unsigned int gup_flags);
2525 unsigned int gup_flags, struct page **pages);
2527 unsigned int gup_flags, struct page **pages);
2535 struct page *get_dump_page(unsigned long addr, int *locked);
2539 bool set_page_dirty(struct page *page);
2540 int set_page_dirty_lock(struct page *page);
2577 unsigned int gup_flags, struct page **pages);
2580 unsigned int gup_flags, struct page **pagep) in get_user_page_fast_only()
2881 struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order); in pagetable_alloc_noprof() local
2883 return page_ptdesc(page); in pagetable_alloc_noprof()
2896 struct page *page = ptdesc_page(pt); in pagetable_free() local
2898 __free_pages(page, compound_order(page)); in pagetable_free()
3060 static inline struct page *pmd_pgtable_page(pmd_t *pmd) in pmd_pgtable_page()
3162 extern void adjust_managed_page_count(struct page *page, long count);
3168 void free_reserved_page(struct page *page);
3170 static inline void mark_page_reserved(struct page *page) in mark_page_reserved() argument
3172 SetPageReserved(page); in mark_page_reserved()
3173 adjust_managed_page_count(page, -1); in mark_page_reserved()
3520 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
3522 struct page **pages, unsigned long *num);
3523 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
3525 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
3527 vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page,
3540 unsigned long addr, struct page *page) in vmf_insert_page() argument
3542 int err = vm_insert_page(vma, addr, page); in vmf_insert_page()
3631 extern void __kernel_poison_pages(struct page *page, int numpages);
3632 extern void __kernel_unpoison_pages(struct page *page, int numpages);
3647 static inline void kernel_poison_pages(struct page *page, int numpages) in kernel_poison_pages() argument
3650 __kernel_poison_pages(page, numpages); in kernel_poison_pages()
3652 static inline void kernel_unpoison_pages(struct page *page, int numpages) in kernel_unpoison_pages() argument
3655 __kernel_unpoison_pages(page, numpages); in kernel_unpoison_pages()
3660 static inline void __kernel_poison_pages(struct page *page, int nunmpages) { } in __kernel_poison_pages() argument
3661 static inline void kernel_poison_pages(struct page *page, int numpages) { } in kernel_poison_pages() argument
3662 static inline void kernel_unpoison_pages(struct page *page, int numpages) { } in kernel_unpoison_pages() argument
3706 extern void __kernel_map_pages(struct page *page, int numpages, int enable);
3708 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) in debug_pagealloc_map_pages() argument
3711 __kernel_map_pages(page, numpages, 1); in debug_pagealloc_map_pages()
3714 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) in debug_pagealloc_unmap_pages() argument
3717 __kernel_map_pages(page, numpages, 0); in debug_pagealloc_unmap_pages()
3733 static inline bool page_is_guard(struct page *page) in page_is_guard() argument
3738 return PageGuard(page); in page_is_guard()
3741 bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order);
3742 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
3747 return __set_page_guard(zone, page, order); in set_page_guard()
3750 void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order);
3751 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
3756 __clear_page_guard(zone, page, order); in clear_page_guard()
3760 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {} in debug_pagealloc_map_pages() argument
3761 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {} in debug_pagealloc_unmap_pages() argument
3764 static inline bool page_is_guard(struct page *page) { return false; } in page_is_guard() argument
3765 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
3767 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
3808 struct page * __populate_section_memmap(unsigned long pfn,
3879 if (!pgmap || !is_power_of_2(sizeof(struct page))) in __vmemmap_can_optimize()
3883 nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT); in __vmemmap_can_optimize()
4045 extern int memcmp_pages(struct page *page1, struct page *page2);
4047 static inline int pages_identical(struct page *page1, struct page *page2) in pages_identical()
4194 static inline bool page_pool_page_is_pp(const struct page *page) in page_pool_page_is_pp() argument
4196 return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE; in page_pool_page_is_pp()
4199 static inline bool page_pool_page_is_pp(const struct page *page) in page_pool_page_is_pp() argument
4211 struct page page_snapshot;
4222 void snapshot_page(struct page_snapshot *ps, const struct page *page);