Lines Matching refs:folio
126 static inline int folio_nr_pages_mapped(const struct folio *folio) in folio_nr_pages_mapped() argument
130 return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED; in folio_nr_pages_mapped()
139 const struct folio *folio) in folio_swap() argument
142 .val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)), in folio_swap()
148 static inline void *folio_raw_mapping(const struct folio *folio) in folio_raw_mapping() argument
150 unsigned long mapping = (unsigned long)folio->mapping; in folio_raw_mapping()
267 static inline unsigned int folio_pte_batch_flags(struct folio *folio, in folio_pte_batch_flags() argument
275 VM_WARN_ON_FOLIO(!pte_present(pte), folio); in folio_pte_batch_flags()
276 VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio); in folio_pte_batch_flags()
277 VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio); in folio_pte_batch_flags()
287 folio_pfn(folio) + folio_nr_pages(folio) - pte_pfn(pte)); in folio_pte_batch_flags()
322 unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
409 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
411 static inline void acct_reclaim_writeback(struct folio *folio) in acct_reclaim_writeback() argument
413 pg_data_t *pgdat = folio_pgdat(folio); in acct_reclaim_writeback()
417 __acct_reclaim_writeback(pgdat, folio, nr_throttled); in acct_reclaim_writeback()
440 void folio_rotate_reclaimable(struct folio *folio);
441 bool __folio_end_writeback(struct folio *folio);
442 void deactivate_file_folio(struct folio *folio);
443 void folio_activate(struct folio *folio);
458 int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
474 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
475 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
476 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
478 long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
493 static inline bool folio_evictable(struct folio *folio) in folio_evictable() argument
499 ret = !mapping_unevictable(folio_mapping(folio)) && in folio_evictable()
500 !folio_test_mlocked(folio); in folio_evictable()
519 static inline bool folio_needs_release(struct folio *folio) in folio_needs_release() argument
521 struct address_space *mapping = folio_mapping(folio); in folio_needs_release()
523 return folio_has_private(folio) || in folio_needs_release()
538 bool folio_isolate_lru(struct folio *folio);
539 void folio_putback_lru(struct folio *folio);
754 static inline void folio_set_order(struct folio *folio, unsigned int order) in folio_set_order() argument
756 if (WARN_ON_ONCE(!order || !folio_test_large(folio))) in folio_set_order()
759 folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order; in folio_set_order()
761 folio->_nr_pages = 1U << order; in folio_set_order()
765 bool __folio_unqueue_deferred_split(struct folio *folio);
766 static inline bool folio_unqueue_deferred_split(struct folio *folio) in folio_unqueue_deferred_split() argument
768 if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio)) in folio_unqueue_deferred_split()
776 if (data_race(list_empty(&folio->_deferred_list))) in folio_unqueue_deferred_split()
779 return __folio_unqueue_deferred_split(folio); in folio_unqueue_deferred_split()
782 static inline struct folio *page_rmappable_folio(struct page *page) in page_rmappable_folio()
784 struct folio *folio = (struct folio *)page; in page_rmappable_folio() local
786 if (folio && folio_test_large(folio)) in page_rmappable_folio()
787 folio_set_large_rmappable(folio); in page_rmappable_folio()
788 return folio; in page_rmappable_folio()
793 struct folio *folio = (struct folio *)page; in prep_compound_head() local
795 folio_set_order(folio, order); in prep_compound_head()
796 atomic_set(&folio->_large_mapcount, -1); in prep_compound_head()
798 atomic_set(&folio->_nr_pages_mapped, 0); in prep_compound_head()
800 folio->_mm_ids = 0; in prep_compound_head()
801 folio->_mm_id_mapcount[0] = -1; in prep_compound_head()
802 folio->_mm_id_mapcount[1] = -1; in prep_compound_head()
805 atomic_set(&folio->_pincount, 0); in prep_compound_head()
806 atomic_set(&folio->_entire_mapcount, -1); in prep_compound_head()
809 INIT_LIST_HEAD(&folio->_deferred_list); in prep_compound_head()
956 struct anon_vma *folio_anon_vma(const struct folio *folio);
959 void unmap_mapping_folio(struct folio *folio);
983 folio_within_range(struct folio *folio, struct vm_area_struct *vma, in folio_within_range() argument
989 VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio); in folio_within_range()
999 pgoff = folio_pgoff(folio); in folio_within_range()
1007 return !(addr < start || end - addr < folio_size(folio)); in folio_within_range()
1011 folio_within_vma(struct folio *folio, struct vm_area_struct *vma) in folio_within_vma() argument
1013 return folio_within_range(folio, vma, vma->vm_start, vma->vm_end); in folio_within_vma()
1025 void mlock_folio(struct folio *folio);
1026 static inline void mlock_vma_folio(struct folio *folio, in mlock_vma_folio() argument
1038 mlock_folio(folio); in mlock_vma_folio()
1041 void munlock_folio(struct folio *folio);
1042 static inline void munlock_vma_folio(struct folio *folio, in munlock_vma_folio() argument
1055 munlock_folio(folio); in munlock_vma_folio()
1058 void mlock_new_folio(struct folio *folio);
1137 static inline void unmap_mapping_folio(struct folio *folio) { } in unmap_mapping_folio() argument
1138 static inline void mlock_new_folio(struct folio *folio) { } in mlock_new_folio() argument
1228 int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill);
1229 void shake_folio(struct folio *folio);
1251 static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill) in unmap_poisoned_folio() argument
1354 struct folio *folio, loff_t fpos, size_t size);
1385 int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
1389 void free_zone_device_folio(struct folio *folio);
1390 int migrate_device_coherent_folio(struct folio *folio);
1401 int __must_check try_grab_folio(struct folio *folio, int refs,