| /fs/nilfs2/ |
| A D | page.c | 246 struct folio_batch fbatch; in nilfs_copy_dirty_pages() local 251 folio_batch_init(&fbatch); in nilfs_copy_dirty_pages() 282 folio_batch_release(&fbatch); in nilfs_copy_dirty_pages() 301 struct folio_batch fbatch; in nilfs_copy_back_pages() local 305 folio_batch_init(&fbatch); in nilfs_copy_back_pages() 363 struct folio_batch fbatch; in nilfs_clear_dirty_pages() local 367 folio_batch_init(&fbatch); in nilfs_clear_dirty_pages() 513 struct folio_batch fbatch; in nilfs_find_uncommitted_extent() local 521 folio_batch_init(&fbatch); in nilfs_find_uncommitted_extent() 525 &fbatch); in nilfs_find_uncommitted_extent() [all …]
|
| A D | segment.c | 704 struct folio_batch fbatch; in nilfs_lookup_dirty_data_buffers() local 718 folio_batch_init(&fbatch); in nilfs_lookup_dirty_data_buffers() 722 PAGECACHE_TAG_DIRTY, &fbatch)) in nilfs_lookup_dirty_data_buffers() 727 struct folio *folio = fbatch.folios[i]; in nilfs_lookup_dirty_data_buffers() 749 folio_batch_release(&fbatch); in nilfs_lookup_dirty_data_buffers() 757 folio_batch_release(&fbatch); in nilfs_lookup_dirty_data_buffers() 767 struct folio_batch fbatch; in nilfs_lookup_dirty_node_buffers() local 774 folio_batch_init(&fbatch); in nilfs_lookup_dirty_node_buffers() 777 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) { in nilfs_lookup_dirty_node_buffers() 779 bh = head = folio_buffers(fbatch.folios[i]); in nilfs_lookup_dirty_node_buffers() [all …]
|
| A D | btree.c | 2172 struct folio_batch fbatch; in nilfs_btree_lookup_dirty_buffers() local 2182 folio_batch_init(&fbatch); in nilfs_btree_lookup_dirty_buffers() 2185 PAGECACHE_TAG_DIRTY, &fbatch)) { in nilfs_btree_lookup_dirty_buffers() 2186 for (i = 0; i < folio_batch_count(&fbatch); i++) { in nilfs_btree_lookup_dirty_buffers() 2187 bh = head = folio_buffers(fbatch.folios[i]); in nilfs_btree_lookup_dirty_buffers() 2194 folio_batch_release(&fbatch); in nilfs_btree_lookup_dirty_buffers()
|
| /fs/ramfs/ |
| A D | file-nommu.c | 208 struct folio_batch fbatch; in ramfs_nommu_get_unmapped_area() local 224 folio_batch_init(&fbatch); in ramfs_nommu_get_unmapped_area() 228 ULONG_MAX, &fbatch); in ramfs_nommu_get_unmapped_area() 235 ret = (unsigned long) folio_address(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area() 236 pfn = folio_pfn(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area() 240 if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) { in ramfs_nommu_get_unmapped_area() 244 nr_pages += folio_nr_pages(fbatch.folios[loop]); in ramfs_nommu_get_unmapped_area() 250 folio_batch_release(&fbatch); in ramfs_nommu_get_unmapped_area() 256 folio_batch_release(&fbatch); in ramfs_nommu_get_unmapped_area()
|
| /fs/bcachefs/ |
| A D | fs-io-pagecache.c | 255 struct folio_batch fbatch; in bch2_mark_pagecache_unallocated() local 261 folio_batch_init(&fbatch); in bch2_mark_pagecache_unallocated() 287 folio_batch_release(&fbatch); in bch2_mark_pagecache_unallocated() 299 struct folio_batch fbatch; in bch2_mark_pagecache_reserved() local 306 folio_batch_init(&fbatch); in bch2_mark_pagecache_reserved() 316 folio_batch_release(&fbatch); in bch2_mark_pagecache_reserved() 344 folio_batch_release(&fbatch); in bch2_mark_pagecache_reserved() 699 struct folio_batch fbatch; in bch2_seek_pagecache_data() local 707 folio_batch_init(&fbatch); in bch2_seek_pagecache_data() 717 folio_batch_release(&fbatch); in bch2_seek_pagecache_data() [all …]
|
| /fs/netfs/ |
| A D | rolling_buffer.c | 199 struct folio_batch fbatch; in rolling_buffer_clear() local 202 folio_batch_init(&fbatch); in rolling_buffer_clear() 213 if (!folio_batch_add(&fbatch, folio)) in rolling_buffer_clear() 214 folio_batch_release(&fbatch); in rolling_buffer_clear() 221 folio_batch_release(&fbatch); in rolling_buffer_clear()
|
| A D | misc.c | 81 struct folio_batch fbatch; in netfs_free_folioq_buffer() local 83 folio_batch_init(&fbatch); in netfs_free_folioq_buffer() 94 if (folio_batch_add(&fbatch, folio)) in netfs_free_folioq_buffer() 95 folio_batch_release(&fbatch); in netfs_free_folioq_buffer() 103 folio_batch_release(&fbatch); in netfs_free_folioq_buffer()
|
| /fs/gfs2/ |
| A D | aops.c | 193 struct folio_batch *fbatch, in gfs2_write_jdata_batch() argument 202 int nr_folios = folio_batch_count(fbatch); in gfs2_write_jdata_batch() 205 size += folio_size(fbatch->folios[i]); in gfs2_write_jdata_batch() 213 struct folio *folio = fbatch->folios[i]; in gfs2_write_jdata_batch() 288 struct folio_batch fbatch; in gfs2_write_cache_jdata() local 298 folio_batch_init(&fbatch); in gfs2_write_cache_jdata() 325 tag, &fbatch); in gfs2_write_cache_jdata() 329 ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch, in gfs2_write_cache_jdata() 335 folio_batch_release(&fbatch); in gfs2_write_cache_jdata()
|
| /fs/ceph/ |
| A D | addr.c | 609 struct folio_batch fbatch; member 1014 folio_batch_init(&ceph_wbc->fbatch); in ceph_folio_batch_init() 1300 folio = ceph_wbc->fbatch.folios[i]; in ceph_process_folio_batch() 1379 ceph_wbc->fbatch.folios[i] = NULL; in ceph_process_folio_batch() 1395 if (!fbatch->folios[j]) in ceph_shift_unused_folios_left() 1399 fbatch->folios[n] = fbatch->folios[j]; in ceph_shift_unused_folios_left() 1405 fbatch->nr = n; in ceph_shift_unused_folios_left() 1613 &ceph_wbc->fbatch))) { in ceph_wait_until_current_writes_complete() 1681 &ceph_wbc.fbatch); in ceph_writepages_start() 1731 (int)ceph_wbc.fbatch.nr, in ceph_writepages_start() [all …]
|
| /fs/f2fs/ |
| A D | node.c | 1632 struct folio_batch fbatch; in last_fsync_dnode() local 1636 folio_batch_init(&fbatch); in last_fsync_dnode() 1641 &fbatch))) { in last_fsync_dnode() 1829 struct folio_batch fbatch; in f2fs_fsync_node_pages() local 1843 folio_batch_init(&fbatch); in f2fs_fsync_node_pages() 1848 &fbatch))) { in f2fs_fsync_node_pages() 1986 struct folio_batch fbatch; in f2fs_flush_inline_data() local 1989 folio_batch_init(&fbatch); in f2fs_flush_inline_data() 1993 &fbatch))) { in f2fs_flush_inline_data() 2029 struct folio_batch fbatch; in f2fs_sync_node_pages() local [all …]
|
| A D | checkpoint.c | 415 struct folio_batch fbatch; in f2fs_sync_meta_pages() local 421 folio_batch_init(&fbatch); in f2fs_sync_meta_pages() 427 PAGECACHE_TAG_DIRTY, &fbatch))) { in f2fs_sync_meta_pages() 431 struct folio *folio = fbatch.folios[i]; in f2fs_sync_meta_pages() 435 folio_nr_pages(fbatch.folios[i-1])) { in f2fs_sync_meta_pages() 436 folio_batch_release(&fbatch); in f2fs_sync_meta_pages() 467 folio_batch_release(&fbatch); in f2fs_sync_meta_pages()
|
| A D | compress.c | 1990 struct folio_batch fbatch; in f2fs_invalidate_compress_pages() local 1997 folio_batch_init(&fbatch); in f2fs_invalidate_compress_pages() 2002 nr = filemap_get_folios(mapping, &index, end - 1, &fbatch); in f2fs_invalidate_compress_pages() 2007 struct folio *folio = fbatch.folios[i]; in f2fs_invalidate_compress_pages() 2023 folio_batch_release(&fbatch); in f2fs_invalidate_compress_pages()
|
| A D | data.c | 2947 struct folio_batch fbatch; in f2fs_write_cache_pages() local 2989 folio_batch_init(&fbatch); in f2fs_write_cache_pages() 3019 tag, &fbatch); in f2fs_write_cache_pages() 3027 struct folio *folio = fbatch.folios[i]; in f2fs_write_cache_pages() 3036 folio_batch_release(&fbatch); in f2fs_write_cache_pages() 3042 folio_batch_release(&fbatch); in f2fs_write_cache_pages()
|
| /fs/btrfs/tests/ |
| A D | extent-io-tests.c | 25 struct folio_batch fbatch; in process_page_range() local 32 folio_batch_init(&fbatch); in process_page_range() 36 end_index, &fbatch); in process_page_range() 38 struct folio *folio = fbatch.folios[i]; in process_page_range() 48 folio_batch_release(&fbatch); in process_page_range()
|
| /fs/btrfs/ |
| A D | extent_io.c | 206 struct folio_batch fbatch; in __process_folios_contig() local 209 folio_batch_init(&fbatch); in __process_folios_contig() 214 end_index, &fbatch); in __process_folios_contig() 221 folio_batch_release(&fbatch); in __process_folios_contig() 245 struct folio_batch fbatch; in lock_delalloc_folios() local 247 folio_batch_init(&fbatch); in lock_delalloc_folios() 252 end_index, &fbatch); in lock_delalloc_folios() 275 folio_batch_release(&fbatch); in lock_delalloc_folios() 281 folio_batch_release(&fbatch); in lock_delalloc_folios() 2323 struct folio_batch fbatch; in extent_write_cache_pages() local [all …]
|
| A D | compression.c | 287 struct folio_batch fbatch; in end_compressed_writeback() local 295 folio_batch_init(&fbatch); in end_compressed_writeback() 298 &fbatch); in end_compressed_writeback() 304 struct folio *folio = fbatch.folios[i]; in end_compressed_writeback() 309 folio_batch_release(&fbatch); in end_compressed_writeback()
|
| A D | file.c | 2228 struct folio_batch fbatch; in check_range_has_page() local 2251 folio_batch_init(&fbatch); in check_range_has_page() 2252 found_folios = filemap_get_folios(inode->i_mapping, &tmp, end_index, &fbatch); in check_range_has_page() 2254 struct folio *folio = fbatch.folios[i]; in check_range_has_page() 2266 folio_batch_release(&fbatch); in check_range_has_page()
|
| /fs/hugetlbfs/ |
| A D | inode.c | 574 struct folio_batch fbatch; in remove_inode_hugepages() local 579 folio_batch_init(&fbatch); in remove_inode_hugepages() 581 while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { in remove_inode_hugepages() 582 for (i = 0; i < folio_batch_count(&fbatch); ++i) { in remove_inode_hugepages() 583 struct folio *folio = fbatch.folios[i]; in remove_inode_hugepages() 599 folio_batch_release(&fbatch); in remove_inode_hugepages()
|
| /fs/ext4/ |
| A D | inode.c | 1712 struct folio_batch fbatch; in mpage_release_unused_pages() local 1735 folio_batch_init(&fbatch); in mpage_release_unused_pages() 1760 folio_batch_release(&fbatch); in mpage_release_unused_pages() 2291 struct folio_batch fbatch; in mpage_map_and_submit_buffers() local 2305 folio_batch_init(&fbatch); in mpage_map_and_submit_buffers() 2329 folio_batch_release(&fbatch); in mpage_map_and_submit_buffers() 2336 folio_batch_release(&fbatch); in mpage_map_and_submit_buffers() 2610 struct folio_batch fbatch; in mpage_prepare_extent_to_map() local 2635 folio_batch_init(&fbatch); in mpage_prepare_extent_to_map() 2638 tag, &fbatch); in mpage_prepare_extent_to_map() [all …]
|
| /fs/ |
| A D | buffer.c | 1742 struct folio_batch fbatch; in clean_bdev_aliases() local 1750 folio_batch_init(&fbatch); in clean_bdev_aliases() 1751 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) { in clean_bdev_aliases() 1752 count = folio_batch_count(&fbatch); in clean_bdev_aliases() 1754 struct folio *folio = fbatch.folios[i]; in clean_bdev_aliases() 1783 folio_batch_release(&fbatch); in clean_bdev_aliases()
|