Lines Matching refs:folio

30 	struct folio *folio;  in clear_shadow_entries()  local
42 xas_for_each(&xas, folio, max) { in clear_shadow_entries()
43 if (xa_is_value(folio)) in clear_shadow_entries()
65 struct folio *folio; in truncate_folio_batch_exceptionals() local
107 xas_for_each(&xas, folio, indices[nr-1]) { in truncate_folio_batch_exceptionals()
108 if (xa_is_value(folio)) in truncate_folio_batch_exceptionals()
135 void folio_invalidate(struct folio *folio, size_t offset, size_t length) in folio_invalidate() argument
137 const struct address_space_operations *aops = folio->mapping->a_ops; in folio_invalidate()
140 aops->invalidate_folio(folio, offset, length); in folio_invalidate()
154 static void truncate_cleanup_folio(struct folio *folio) in truncate_cleanup_folio() argument
156 if (folio_mapped(folio)) in truncate_cleanup_folio()
157 unmap_mapping_folio(folio); in truncate_cleanup_folio()
159 if (folio_needs_release(folio)) in truncate_cleanup_folio()
160 folio_invalidate(folio, 0, folio_size(folio)); in truncate_cleanup_folio()
167 folio_cancel_dirty(folio); in truncate_cleanup_folio()
170 int truncate_inode_folio(struct address_space *mapping, struct folio *folio) in truncate_inode_folio() argument
172 if (folio->mapping != mapping) in truncate_inode_folio()
175 truncate_cleanup_folio(folio); in truncate_inode_folio()
176 filemap_remove_folio(folio); in truncate_inode_folio()
191 bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) in truncate_inode_partial_folio() argument
193 loff_t pos = folio_pos(folio); in truncate_inode_partial_folio()
194 size_t size = folio_size(folio); in truncate_inode_partial_folio()
207 folio_wait_writeback(folio); in truncate_inode_partial_folio()
209 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio()
218 if (!mapping_inaccessible(folio->mapping)) in truncate_inode_partial_folio()
219 folio_zero_range(folio, offset, length); in truncate_inode_partial_folio()
221 if (folio_needs_release(folio)) in truncate_inode_partial_folio()
222 folio_invalidate(folio, offset, length); in truncate_inode_partial_folio()
223 if (!folio_test_large(folio)) in truncate_inode_partial_folio()
226 split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE); in truncate_inode_partial_folio()
227 if (!try_folio_split(folio, split_at, NULL)) { in truncate_inode_partial_folio()
233 struct folio *folio2; in truncate_inode_partial_folio()
238 split_at2 = folio_page(folio, in truncate_inode_partial_folio()
256 folio2->mapping == folio->mapping) in truncate_inode_partial_folio()
265 if (folio_test_dirty(folio)) in truncate_inode_partial_folio()
267 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio()
275 struct folio *folio) in generic_error_remove_folio() argument
285 return truncate_inode_folio(mapping, folio); in generic_error_remove_folio()
300 long mapping_evict_folio(struct address_space *mapping, struct folio *folio) in mapping_evict_folio() argument
305 if (folio_test_dirty(folio) || folio_test_writeback(folio)) in mapping_evict_folio()
308 if (folio_ref_count(folio) > in mapping_evict_folio()
309 folio_nr_pages(folio) + folio_has_private(folio) + 1) in mapping_evict_folio()
311 if (!filemap_release_folio(folio, 0)) in mapping_evict_folio()
314 return remove_mapping(mapping, folio); in mapping_evict_folio()
350 struct folio *folio; in truncate_inode_pages_range() local
388 folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0); in truncate_inode_pages_range()
389 if (!IS_ERR(folio)) { in truncate_inode_pages_range()
390 same_folio = lend < folio_pos(folio) + folio_size(folio); in truncate_inode_pages_range()
391 if (!truncate_inode_partial_folio(folio, lstart, lend)) { in truncate_inode_pages_range()
392 start = folio_next_index(folio); in truncate_inode_pages_range()
394 end = folio->index; in truncate_inode_pages_range()
396 folio_unlock(folio); in truncate_inode_pages_range()
397 folio_put(folio); in truncate_inode_pages_range()
398 folio = NULL; in truncate_inode_pages_range()
402 folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT, in truncate_inode_pages_range()
404 if (!IS_ERR(folio)) { in truncate_inode_pages_range()
405 if (!truncate_inode_partial_folio(folio, lstart, lend)) in truncate_inode_pages_range()
406 end = folio->index; in truncate_inode_pages_range()
407 folio_unlock(folio); in truncate_inode_pages_range()
408 folio_put(folio); in truncate_inode_pages_range()
426 struct folio *folio = fbatch.folios[i]; in truncate_inode_pages_range() local
430 if (xa_is_value(folio)) in truncate_inode_pages_range()
433 folio_lock(folio); in truncate_inode_pages_range()
434 VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio); in truncate_inode_pages_range()
435 folio_wait_writeback(folio); in truncate_inode_pages_range()
436 truncate_inode_folio(mapping, folio); in truncate_inode_pages_range()
437 folio_unlock(folio); in truncate_inode_pages_range()
525 struct folio *folio = fbatch.folios[i]; in mapping_try_invalidate() local
529 if (xa_is_value(folio)) { in mapping_try_invalidate()
535 ret = mapping_evict_folio(mapping, folio); in mapping_try_invalidate()
536 folio_unlock(folio); in mapping_try_invalidate()
542 deactivate_file_folio(folio); in mapping_try_invalidate()
581 static int folio_launder(struct address_space *mapping, struct folio *folio) in folio_launder() argument
583 if (!folio_test_dirty(folio)) in folio_launder()
585 if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL) in folio_launder()
587 return mapping->a_ops->launder_folio(folio); in folio_launder()
597 int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio, in folio_unmap_invalidate() argument
602 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in folio_unmap_invalidate()
604 if (folio_mapped(folio)) in folio_unmap_invalidate()
605 unmap_mapping_folio(folio); in folio_unmap_invalidate()
606 BUG_ON(folio_mapped(folio)); in folio_unmap_invalidate()
608 ret = folio_launder(mapping, folio); in folio_unmap_invalidate()
611 if (folio->mapping != mapping) in folio_unmap_invalidate()
613 if (!filemap_release_folio(folio, gfp)) in folio_unmap_invalidate()
618 if (folio_test_dirty(folio)) in folio_unmap_invalidate()
621 BUG_ON(folio_has_private(folio)); in folio_unmap_invalidate()
622 __filemap_remove_folio(folio, NULL); in folio_unmap_invalidate()
628 filemap_free_folio(mapping, folio); in folio_unmap_invalidate()
668 struct folio *folio = fbatch.folios[i]; in invalidate_inode_pages2_range() local
672 if (xa_is_value(folio)) { in invalidate_inode_pages2_range()
680 if (!did_range_unmap && folio_mapped(folio)) { in invalidate_inode_pages2_range()
690 folio_lock(folio); in invalidate_inode_pages2_range()
691 if (unlikely(folio->mapping != mapping)) { in invalidate_inode_pages2_range()
692 folio_unlock(folio); in invalidate_inode_pages2_range()
695 VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio); in invalidate_inode_pages2_range()
696 folio_wait_writeback(folio); in invalidate_inode_pages2_range()
697 ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL); in invalidate_inode_pages2_range()
700 folio_unlock(folio); in invalidate_inode_pages2_range()
821 struct folio *folio; in pagecache_isize_extended() local
832 folio = filemap_lock_folio(inode->i_mapping, from / PAGE_SIZE); in pagecache_isize_extended()
834 if (IS_ERR(folio)) in pagecache_isize_extended()
840 if (folio_mkclean(folio)) in pagecache_isize_extended()
841 folio_mark_dirty(folio); in pagecache_isize_extended()
848 if (folio_test_dirty(folio)) { in pagecache_isize_extended()
851 offset = from - folio_pos(folio); in pagecache_isize_extended()
852 end = min_t(unsigned int, to - folio_pos(folio), in pagecache_isize_extended()
853 folio_size(folio)); in pagecache_isize_extended()
854 folio_zero_segment(folio, offset, end); in pagecache_isize_extended()
857 folio_unlock(folio); in pagecache_isize_extended()
858 folio_put(folio); in pagecache_isize_extended()