Lines Matching refs:mapping

1878 void balance_dirty_pages_ratelimited(struct address_space *mapping)  in balance_dirty_pages_ratelimited()  argument
1880 struct inode *inode = mapping->host; in balance_dirty_pages_ratelimited()
2124 void tag_pages_for_writeback(struct address_space *mapping, in tag_pages_for_writeback() argument
2127 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback()
2177 int write_cache_pages(struct address_space *mapping, in write_cache_pages() argument
2194 index = mapping->writeback_index; /* prev offset */ in write_cache_pages()
2203 tag_pages_for_writeback(mapping, index, end); in write_cache_pages()
2212 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, in write_cache_pages()
2232 if (unlikely(page->mapping != mapping)) { in write_cache_pages()
2254 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); in write_cache_pages()
2306 mapping->writeback_index = done_index; in write_cache_pages()
2319 struct address_space *mapping = data; in __writepage() local
2320 int ret = mapping->a_ops->writepage(page, wbc); in __writepage()
2321 mapping_set_error(mapping, ret); in __writepage()
2335 int generic_writepages(struct address_space *mapping, in generic_writepages() argument
2342 if (!mapping->a_ops->writepage) in generic_writepages()
2346 ret = write_cache_pages(mapping, wbc, __writepage, mapping); in generic_writepages()
2353 int do_writepages(struct address_space *mapping, struct writeback_control *wbc) in do_writepages() argument
2360 wb = inode_to_wb_wbc(mapping->host, wbc); in do_writepages()
2363 if (mapping->a_ops->writepages) in do_writepages()
2364 ret = mapping->a_ops->writepages(mapping, wbc); in do_writepages()
2366 ret = generic_writepages(mapping, wbc); in do_writepages()
2403 struct address_space *mapping = folio->mapping; in folio_write_one() local
2416 ret = mapping->a_ops->writepage(&folio->page, &wbc); in folio_write_one()
2425 ret = filemap_check_errors(mapping); in folio_write_one()
2449 struct address_space *mapping) in folio_account_dirtied() argument
2451 struct inode *inode = mapping->host; in folio_account_dirtied()
2453 trace_writeback_dirty_folio(folio, mapping); in folio_account_dirtied()
2455 if (mapping_can_writeback(mapping)) { in folio_account_dirtied()
2480 void folio_account_cleaned(struct folio *folio, struct address_space *mapping, in folio_account_cleaned() argument
2483 if (mapping_can_writeback(mapping)) { in folio_account_cleaned()
2501 void __folio_mark_dirty(struct folio *folio, struct address_space *mapping, in __folio_mark_dirty() argument
2506 xa_lock_irqsave(&mapping->i_pages, flags); in __folio_mark_dirty()
2507 if (folio->mapping) { /* Race with truncate? */ in __folio_mark_dirty()
2509 folio_account_dirtied(folio, mapping); in __folio_mark_dirty()
2510 __xa_set_mark(&mapping->i_pages, folio_index(folio), in __folio_mark_dirty()
2513 xa_unlock_irqrestore(&mapping->i_pages, flags); in __folio_mark_dirty()
2535 bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio) in filemap_dirty_folio() argument
2543 __folio_mark_dirty(folio, mapping, !folio_test_private(folio)); in filemap_dirty_folio()
2546 if (mapping->host) { in filemap_dirty_folio()
2548 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in filemap_dirty_folio()
2568 struct address_space *mapping = folio->mapping; in folio_account_redirty() local
2570 if (mapping && mapping_can_writeback(mapping)) { in folio_account_redirty()
2571 struct inode *inode = mapping->host; in folio_account_redirty()
2604 ret = filemap_dirty_folio(folio->mapping, folio); in folio_redirty_for_writepage()
2624 struct address_space *mapping = folio_mapping(folio); in folio_mark_dirty() local
2626 if (likely(mapping)) { in folio_mark_dirty()
2639 return mapping->a_ops->set_page_dirty(&folio->page); in folio_mark_dirty()
2685 struct address_space *mapping = folio_mapping(folio); in __folio_cancel_dirty() local
2687 if (mapping_can_writeback(mapping)) { in __folio_cancel_dirty()
2688 struct inode *inode = mapping->host; in __folio_cancel_dirty()
2696 folio_account_cleaned(folio, mapping, wb); in __folio_cancel_dirty()
2722 struct address_space *mapping = folio_mapping(folio); in folio_clear_dirty_for_io() local
2727 if (mapping && mapping_can_writeback(mapping)) { in folio_clear_dirty_for_io()
2728 struct inode *inode = mapping->host; in folio_clear_dirty_for_io()
2803 struct address_space *mapping = folio_mapping(folio); in __folio_end_writeback() local
2807 if (mapping && mapping_use_writeback_tags(mapping)) { in __folio_end_writeback()
2808 struct inode *inode = mapping->host; in __folio_end_writeback()
2812 xa_lock_irqsave(&mapping->i_pages, flags); in __folio_end_writeback()
2815 __xa_clear_mark(&mapping->i_pages, folio_index(folio), in __folio_end_writeback()
2822 if (!mapping_tagged(mapping, in __folio_end_writeback()
2828 if (mapping->host && !mapping_tagged(mapping, in __folio_end_writeback()
2830 sb_clear_inode_writeback(mapping->host); in __folio_end_writeback()
2832 xa_unlock_irqrestore(&mapping->i_pages, flags); in __folio_end_writeback()
2848 struct address_space *mapping = folio_mapping(folio); in __folio_start_writeback() local
2853 if (mapping && mapping_use_writeback_tags(mapping)) { in __folio_start_writeback()
2854 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); in __folio_start_writeback()
2855 struct inode *inode = mapping->host; in __folio_start_writeback()
2865 on_wblist = mapping_tagged(mapping, in __folio_start_writeback()
2882 if (mapping->host && !on_wblist) in __folio_start_writeback()
2883 sb_mark_inode_writeback(mapping->host); in __folio_start_writeback()