| /linux/include/linux/ |
| A D | pagevec.h | 43 fbatch->nr = 0; in folio_batch_init() 44 fbatch->i = 0; in folio_batch_init() 50 fbatch->nr = 0; in folio_batch_reinit() 51 fbatch->i = 0; in folio_batch_reinit() 56 return fbatch->nr; in folio_batch_count() 61 return PAGEVEC_SIZE - fbatch->nr; in folio_batch_space() 77 fbatch->folios[fbatch->nr++] = folio; in folio_batch_add() 91 if (fbatch->i == fbatch->nr) in folio_batch_next() 93 return fbatch->folios[fbatch->i++]; in folio_batch_next() 100 if (folio_batch_count(fbatch)) in folio_batch_release() [all …]
|
| A D | pagemap.h | 975 pgoff_t end, struct folio_batch *fbatch); 977 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); 979 pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch); 1291 struct folio_batch *fbatch);
|
| /linux/mm/ |
| A D | swap.c | 124 struct folio_batch fbatch; in put_pages_list() local 127 folio_batch_init(&fbatch); in put_pages_list() 138 free_unref_folios(&fbatch); in put_pages_list() 141 if (fbatch.nr) in put_pages_list() 142 free_unref_folios(&fbatch); in put_pages_list() 206 folios_put(fbatch); in folio_batch_move_lru() 384 struct folio_batch *fbatch; in __lru_cache_activate_folio() local 1022 refs[fbatch.nr] = 1; in release_pages() 1032 if (fbatch.nr) in release_pages() 1053 folios_put(fbatch); in __folio_batch_release() [all …]
|
| A D | truncate.c | 100 fbatch->folios[j++] = folio; in truncate_folio_batch_exceptionals() 118 fbatch->nr = j; in truncate_folio_batch_exceptionals() 309 struct folio_batch fbatch; in truncate_inode_pages_range() local 336 folio_batch_init(&fbatch); in truncate_inode_pages_range() 339 &fbatch, indices)) { in truncate_inode_pages_range() 346 folio_batch_release(&fbatch); in truncate_inode_pages_range() 403 folio_batch_release(&fbatch); in truncate_inode_pages_range() 476 struct folio_batch fbatch; in mapping_try_invalidate() local 483 folio_batch_init(&fbatch); in mapping_try_invalidate() 602 struct folio_batch fbatch; in invalidate_inode_pages2_range() local [all …]
|
| A D | mlock.c | 33 struct folio_batch fbatch; member 194 folio = fbatch->folios[i]; in mlock_folio_batch() 197 fbatch->folios[i] = folio; in mlock_folio_batch() 209 folios_put(fbatch); in mlock_folio_batch() 214 struct folio_batch *fbatch; in mlock_drain_local() local 217 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); in mlock_drain_local() 219 mlock_folio_batch(fbatch); in mlock_drain_local() 228 fbatch = &per_cpu(mlock_fbatch.fbatch, cpu); in mlock_drain_remote() 247 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); in mlock_folio() 273 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); in mlock_new_folio() [all …]
|
| A D | filemap.c | 516 struct folio_batch fbatch; in __filemap_fdatawait_range() local 519 folio_batch_init(&fbatch); in __filemap_fdatawait_range() 535 folio_batch_release(&fbatch); in __filemap_fdatawait_range() 2562 folio = fbatch->folios[folio_batch_count(fbatch) - 1]; in filemap_get_pages() 2583 if (likely(--fbatch->nr)) in filemap_get_pages() 2617 struct folio_batch fbatch; in filemap_read() local 2629 folio_batch_init(&fbatch); in filemap_read() 2673 fbatch.folios[0])) in filemap_read() 2710 folio_batch_init(&fbatch); in filemap_read() 2894 struct folio_batch fbatch; in filemap_splice_read() local [all …]
|
| A D | shmem.c | 944 struct folio_batch fbatch; in shmem_unlock_mapping() local 947 folio_batch_init(&fbatch); in shmem_unlock_mapping() 954 folio_batch_release(&fbatch); in shmem_unlock_mapping() 998 struct folio_batch fbatch; in shmem_undo_range() local 1012 folio_batch_init(&fbatch); in shmem_undo_range() 1015 &fbatch, indices)) { in shmem_undo_range() 1017 folio = fbatch.folios[i]; in shmem_undo_range() 1032 folio_batch_release(&fbatch); in shmem_undo_range() 1086 folio = fbatch.folios[i]; in shmem_undo_range() 1379 struct folio_batch fbatch; in shmem_unuse_inode() local [all …]
|
| A D | internal.h | 406 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); 408 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); 744 void free_unref_folios(struct folio_batch *fbatch);
|
| A D | page-writeback.c | 2485 folio = folio_batch_next(&wbc->fbatch); in writeback_get_folio() 2487 folio_batch_release(&wbc->fbatch); in writeback_get_folio() 2490 wbc_to_tag(wbc), &wbc->fbatch); in writeback_get_folio() 2491 folio = folio_batch_next(&wbc->fbatch); in writeback_get_folio() 2536 folio_batch_init(&wbc->fbatch); in writeback_iter() 2616 folio_batch_release(&wbc->fbatch); in writeback_iter()
|
| /linux/fs/nilfs2/ |
| A D | page.c | 247 struct folio_batch fbatch; in nilfs_copy_dirty_pages() local 252 folio_batch_init(&fbatch); in nilfs_copy_dirty_pages() 283 folio_batch_release(&fbatch); in nilfs_copy_dirty_pages() 302 struct folio_batch fbatch; in nilfs_copy_back_pages() local 306 folio_batch_init(&fbatch); in nilfs_copy_back_pages() 364 struct folio_batch fbatch; in nilfs_clear_dirty_pages() local 368 folio_batch_init(&fbatch); in nilfs_clear_dirty_pages() 490 struct folio_batch fbatch; in nilfs_find_uncommitted_extent() local 498 folio_batch_init(&fbatch); in nilfs_find_uncommitted_extent() 502 &fbatch); in nilfs_find_uncommitted_extent() [all …]
|
| A D | segment.c | 702 struct folio_batch fbatch; in nilfs_lookup_dirty_data_buffers() local 716 folio_batch_init(&fbatch); in nilfs_lookup_dirty_data_buffers() 720 PAGECACHE_TAG_DIRTY, &fbatch)) in nilfs_lookup_dirty_data_buffers() 725 struct folio *folio = fbatch.folios[i]; in nilfs_lookup_dirty_data_buffers() 747 folio_batch_release(&fbatch); in nilfs_lookup_dirty_data_buffers() 753 folio_batch_release(&fbatch); in nilfs_lookup_dirty_data_buffers() 763 struct folio_batch fbatch; in nilfs_lookup_dirty_node_buffers() local 770 folio_batch_init(&fbatch); in nilfs_lookup_dirty_node_buffers() 773 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) { in nilfs_lookup_dirty_node_buffers() 775 bh = head = folio_buffers(fbatch.folios[i]); in nilfs_lookup_dirty_node_buffers() [all …]
|
| /linux/fs/ramfs/ |
| A D | file-nommu.c | 208 struct folio_batch fbatch; in ramfs_nommu_get_unmapped_area() local 224 folio_batch_init(&fbatch); in ramfs_nommu_get_unmapped_area() 228 ULONG_MAX, &fbatch); in ramfs_nommu_get_unmapped_area() 235 ret = (unsigned long) folio_address(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area() 236 pfn = folio_pfn(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area() 240 if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) { in ramfs_nommu_get_unmapped_area() 244 nr_pages += folio_nr_pages(fbatch.folios[loop]); in ramfs_nommu_get_unmapped_area() 250 folio_batch_release(&fbatch); in ramfs_nommu_get_unmapped_area() 256 folio_batch_release(&fbatch); in ramfs_nommu_get_unmapped_area()
|
| /linux/fs/bcachefs/ |
| A D | fs-io-pagecache.c | 255 struct folio_batch fbatch; in bch2_mark_pagecache_unallocated() local 261 folio_batch_init(&fbatch); in bch2_mark_pagecache_unallocated() 287 folio_batch_release(&fbatch); in bch2_mark_pagecache_unallocated() 299 struct folio_batch fbatch; in bch2_mark_pagecache_reserved() local 306 folio_batch_init(&fbatch); in bch2_mark_pagecache_reserved() 316 folio_batch_release(&fbatch); in bch2_mark_pagecache_reserved() 344 folio_batch_release(&fbatch); in bch2_mark_pagecache_reserved() 695 struct folio_batch fbatch; in bch2_seek_pagecache_data() local 703 folio_batch_init(&fbatch); in bch2_seek_pagecache_data() 713 folio_batch_release(&fbatch); in bch2_seek_pagecache_data() [all …]
|
| /linux/drivers/gpu/drm/ |
| A D | drm_gem.c | 507 check_move_unevictable_folios(fbatch); in drm_gem_check_release_batch() 508 __folio_batch_release(fbatch); in drm_gem_check_release_batch() 542 struct folio_batch fbatch; in drm_gem_get_pages() local 589 folio_batch_init(&fbatch); in drm_gem_get_pages() 593 if (!folio_batch_add(&fbatch, f)) in drm_gem_get_pages() 597 if (fbatch.nr) in drm_gem_get_pages() 598 drm_gem_check_release_batch(&fbatch); in drm_gem_get_pages() 617 struct folio_batch fbatch; in drm_gem_put_pages() local 630 folio_batch_init(&fbatch); in drm_gem_put_pages() 645 if (!folio_batch_add(&fbatch, folio)) in drm_gem_put_pages() [all …]
|
| /linux/drivers/gpu/drm/i915/gem/ |
| A D | i915_gem_shmem.c | 25 static void check_release_folio_batch(struct folio_batch *fbatch) in check_release_folio_batch() argument 27 check_move_unevictable_folios(fbatch); in check_release_folio_batch() 28 __folio_batch_release(fbatch); in check_release_folio_batch() 36 struct folio_batch fbatch; in shmem_sg_free_table() local 42 folio_batch_init(&fbatch); in shmem_sg_free_table() 54 if (!folio_batch_add(&fbatch, folio)) in shmem_sg_free_table() 55 check_release_folio_batch(&fbatch); in shmem_sg_free_table() 57 if (fbatch.nr) in shmem_sg_free_table() 58 check_release_folio_batch(&fbatch); in shmem_sg_free_table()
|
| /linux/fs/gfs2/ |
| A D | aops.c | 179 struct folio_batch *fbatch, in gfs2_write_jdata_batch() argument 188 int nr_folios = folio_batch_count(fbatch); in gfs2_write_jdata_batch() 191 size += folio_size(fbatch->folios[i]); in gfs2_write_jdata_batch() 199 struct folio *folio = fbatch->folios[i]; in gfs2_write_jdata_batch() 282 struct folio_batch fbatch; in gfs2_write_cache_jdata() local 292 folio_batch_init(&fbatch); in gfs2_write_cache_jdata() 319 tag, &fbatch); in gfs2_write_cache_jdata() 323 ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch, in gfs2_write_cache_jdata() 329 folio_batch_release(&fbatch); in gfs2_write_cache_jdata()
|
| /linux/fs/ceph/ |
| A D | addr.c | 955 struct folio_batch fbatch; in ceph_writepages_start() local 985 folio_batch_init(&fbatch); in ceph_writepages_start() 1052 end, tag, &fbatch); in ceph_writepages_start() 1199 fbatch.folios[i] = NULL; in ceph_writepages_start() 1210 if (!fbatch.folios[j]) in ceph_writepages_start() 1213 fbatch.folios[n] = fbatch.folios[j]; in ceph_writepages_start() 1216 fbatch.nr = n; in ceph_writepages_start() 1221 folio_batch_release(&fbatch); in ceph_writepages_start() 1374 (int)fbatch.nr, fbatch.nr ? fbatch.folios[0] : NULL); in ceph_writepages_start() 1375 folio_batch_release(&fbatch); in ceph_writepages_start() [all …]
|
| /linux/fs/f2fs/ |
| A D | node.c | 1553 struct folio_batch fbatch; in last_fsync_dnode() local 1557 folio_batch_init(&fbatch); in last_fsync_dnode() 1562 &fbatch))) { in last_fsync_dnode() 1770 struct folio_batch fbatch; in f2fs_fsync_node_pages() local 1784 folio_batch_init(&fbatch); in f2fs_fsync_node_pages() 1789 &fbatch))) { in f2fs_fsync_node_pages() 1929 struct folio_batch fbatch; in f2fs_flush_inline_data() local 1932 folio_batch_init(&fbatch); in f2fs_flush_inline_data() 1936 &fbatch))) { in f2fs_flush_inline_data() 1977 struct folio_batch fbatch; in f2fs_sync_node_pages() local [all …]
|
| A D | checkpoint.c | 427 struct folio_batch fbatch; in f2fs_sync_meta_pages() local 435 folio_batch_init(&fbatch); in f2fs_sync_meta_pages() 441 PAGECACHE_TAG_DIRTY, &fbatch))) { in f2fs_sync_meta_pages() 445 struct folio *folio = fbatch.folios[i]; in f2fs_sync_meta_pages() 449 folio_nr_pages(fbatch.folios[i-1])) { in f2fs_sync_meta_pages() 450 folio_batch_release(&fbatch); in f2fs_sync_meta_pages() 482 folio_batch_release(&fbatch); in f2fs_sync_meta_pages()
|
| /linux/fs/btrfs/tests/ |
| A D | extent-io-tests.c | 25 struct folio_batch fbatch; in process_page_range() local 32 folio_batch_init(&fbatch); in process_page_range() 36 end_index, &fbatch); in process_page_range() 38 struct folio *folio = fbatch.folios[i]; in process_page_range() 48 folio_batch_release(&fbatch); in process_page_range()
|
| /linux/drivers/gpu/drm/i915/ |
| A D | i915_gpu_error.c | 192 static void pool_fini(struct folio_batch *fbatch) in pool_fini() argument 194 folio_batch_release(fbatch); in pool_fini() 199 while (folio_batch_space(fbatch)) { in pool_refill() 206 folio_batch_add(fbatch, folio); in pool_refill() 216 folio_batch_init(fbatch); in pool_init() 218 err = pool_refill(fbatch, gfp); in pool_init() 220 pool_fini(fbatch); in pool_init() 230 if (!folio && folio_batch_count(fbatch)) in pool_alloc() 231 folio = fbatch->folios[--fbatch->nr]; in pool_alloc() 240 if (folio_batch_space(fbatch)) in pool_free() [all …]
|
| /linux/fs/btrfs/ |
| A D | extent_io.c | 204 struct folio_batch fbatch; in __process_folios_contig() local 207 folio_batch_init(&fbatch); in __process_folios_contig() 212 end_index, &fbatch); in __process_folios_contig() 249 struct folio_batch fbatch; in lock_delalloc_folios() local 254 folio_batch_init(&fbatch); in lock_delalloc_folios() 259 end_index, &fbatch); in lock_delalloc_folios() 1901 struct folio_batch fbatch; in btree_write_cache_pages() local 1908 folio_batch_init(&fbatch); in btree_write_cache_pages() 1932 tag, &fbatch))) { in btree_write_cache_pages() 2029 struct folio_batch fbatch; in extent_write_cache_pages() local [all …]
|
| A D | compression.c | 287 struct folio_batch fbatch; in end_compressed_writeback() local 295 folio_batch_init(&fbatch); in end_compressed_writeback() 298 &fbatch); in end_compressed_writeback() 304 struct folio *folio = fbatch.folios[i]; in end_compressed_writeback() 309 folio_batch_release(&fbatch); in end_compressed_writeback()
|
| /linux/fs/hugetlbfs/ |
| A D | inode.c | 651 struct folio_batch fbatch; in remove_inode_hugepages() local 656 folio_batch_init(&fbatch); in remove_inode_hugepages() 658 while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { in remove_inode_hugepages() 659 for (i = 0; i < folio_batch_count(&fbatch); ++i) { in remove_inode_hugepages() 660 struct folio *folio = fbatch.folios[i]; in remove_inode_hugepages() 676 folio_batch_release(&fbatch); in remove_inode_hugepages()
|
| /linux/fs/ext4/ |
| A D | inode.c | 1556 struct folio_batch fbatch; in mpage_release_unused_pages() local 1581 folio_batch_init(&fbatch); in mpage_release_unused_pages() 1604 folio_batch_release(&fbatch); in mpage_release_unused_pages() 2134 struct folio_batch fbatch; in mpage_map_and_submit_buffers() local 2149 folio_batch_init(&fbatch); in mpage_map_and_submit_buffers() 2172 folio_batch_release(&fbatch); in mpage_map_and_submit_buffers() 2179 folio_batch_release(&fbatch); in mpage_map_and_submit_buffers() 2410 struct folio_batch fbatch; in mpage_prepare_extent_to_map() local 2435 folio_batch_init(&fbatch); in mpage_prepare_extent_to_map() 2438 tag, &fbatch); in mpage_prepare_extent_to_map() [all …]
|