Lines Matching refs:buffer_head
57 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
60 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
62 inline void touch_buffer(struct buffer_head *bh) in touch_buffer()
69 void __lock_buffer(struct buffer_head *bh) in __lock_buffer()
75 void unlock_buffer(struct buffer_head *bh) in unlock_buffer()
91 struct buffer_head *head, *bh; in buffer_check_dirty_writeback()
121 void __wait_on_buffer(struct buffer_head * bh) in __wait_on_buffer()
127 static void buffer_io_error(struct buffer_head *bh, char *msg) in buffer_io_error()
143 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) in __end_buffer_read_notouch()
158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) in end_buffer_read_sync()
165 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) in end_buffer_write_sync()
189 static struct buffer_head *
194 struct buffer_head *ret = NULL; in __find_get_block_slow()
196 struct buffer_head *bh; in __find_get_block_slow()
197 struct buffer_head *head; in __find_get_block_slow()
245 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) in end_buffer_async_read()
248 struct buffer_head *first; in end_buffer_async_read()
249 struct buffer_head *tmp; in end_buffer_async_read()
294 struct buffer_head *bh;
301 struct buffer_head *bh = ctx->bh; in verify_bh()
309 static bool need_fsverity(struct buffer_head *bh) in need_fsverity()
323 struct buffer_head *bh = ctx->bh; in decrypt_bh()
346 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) in end_buffer_async_read_io()
377 static void end_buffer_async_write(struct buffer_head *bh, int uptodate) in end_buffer_async_write()
380 struct buffer_head *first; in end_buffer_async_write()
381 struct buffer_head *tmp; in end_buffer_async_write()
438 static void mark_buffer_async_read(struct buffer_head *bh) in mark_buffer_async_read()
444 static void mark_buffer_async_write_endio(struct buffer_head *bh, in mark_buffer_async_write_endio()
451 void mark_buffer_async_write(struct buffer_head *bh) in mark_buffer_async_write()
510 static void __remove_assoc_queue(struct buffer_head *bh) in __remove_assoc_queue()
534 struct buffer_head *bh; in osync_buffers_list()
659 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); in write_boundary_block()
667 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) in mark_buffer_dirty_inode()
722 struct buffer_head *head; in block_dirty_folio()
728 struct buffer_head *bh = head; in block_dirty_folio()
776 struct buffer_head *bh; in fsync_buffers_list()
893 struct buffer_head *bh = BH_ENTRY(list->next); in remove_inode_buffers()
914 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size, in folio_alloc_buffers()
917 struct buffer_head *bh, *head; in folio_alloc_buffers()
960 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size) in alloc_page_buffers()
969 struct buffer_head *head) in link_dev_buffers()
971 struct buffer_head *bh, *tail; in link_dev_buffers()
1000 struct buffer_head *head = folio_buffers(folio); in folio_init_buffers()
1001 struct buffer_head *bh = head; in folio_init_buffers()
1040 struct buffer_head *bh; in grow_dev_folio()
1112 static struct buffer_head *
1129 struct buffer_head *bh; in __getblk_slow()
1175 void mark_buffer_dirty(struct buffer_head *bh) in mark_buffer_dirty()
1210 void mark_buffer_write_io_error(struct buffer_head *bh) in mark_buffer_write_io_error()
1229 void __brelse(struct buffer_head *bh) in __brelse()
1246 void __bforget(struct buffer_head *bh) in __bforget()
1261 static struct buffer_head *__bread_slow(struct buffer_head *bh) in __bread_slow()
1296 struct buffer_head *bhs[BH_LRU_SIZE];
1321 static void bh_lru_install(struct buffer_head *bh) in bh_lru_install()
1323 struct buffer_head *evictee = bh; in bh_lru_install()
1358 static struct buffer_head *
1361 struct buffer_head *ret = NULL; in lookup_bh_lru()
1371 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); in lookup_bh_lru()
1397 struct buffer_head *
1400 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); in __find_get_block()
1428 struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block, in bdev_getblk()
1431 struct buffer_head *bh = __find_get_block(bdev, block, size); in bdev_getblk()
1446 struct buffer_head *bh = bdev_getblk(bdev, block, size, in __breadahead()
1478 struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block, in __bread_gfp()
1481 struct buffer_head *bh; in __bread_gfp()
1554 void folio_set_bh(struct buffer_head *bh, struct folio *folio, in folio_set_bh()
1578 static void discard_buffer(struct buffer_head * bh) in discard_buffer()
1609 struct buffer_head *head, *bh, *next; in block_invalidate_folio()
1661 struct buffer_head *create_empty_buffers(struct folio *folio, in create_empty_buffers()
1664 struct buffer_head *bh, *head, *tail; in create_empty_buffers()
1722 struct buffer_head *bh; in clean_bdev_aliases()
1723 struct buffer_head *head; in clean_bdev_aliases()
1768 static struct buffer_head *folio_create_buffers(struct folio *folio, in folio_create_buffers()
1772 struct buffer_head *bh; in folio_create_buffers()
1818 struct buffer_head *bh, *head; in __block_write_full_folio()
1907 struct buffer_head *next = bh->b_this_page; in __block_write_full_folio()
1961 struct buffer_head *next = bh->b_this_page; in __block_write_full_folio()
1983 struct buffer_head *head, *bh; in folio_zero_new_buffers()
2019 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, in iomap_to_bh()
2096 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; in __block_write_begin_int()
2179 struct buffer_head *bh, *head; in __block_commit_write()
2326 struct buffer_head *bh, *head; in block_is_partially_uptodate()
2369 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; in block_read_full_folio()
2658 struct buffer_head *bh; in block_truncate_page()
2755 struct buffer_head tmp = { in generic_block_bmap()
2766 struct buffer_head *bh = bio->bi_private; in end_bio_bh_io_sync()
2775 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, in submit_bh_wbc()
2822 void submit_bh(blk_opf_t opf, struct buffer_head *bh) in submit_bh()
2828 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) in write_dirty_buffer()
2846 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) in __sync_dirty_buffer()
2873 int sync_dirty_buffer(struct buffer_head *bh) in sync_dirty_buffer()
2879 static inline int buffer_busy(struct buffer_head *bh) in buffer_busy()
2886 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free) in drop_buffers()
2888 struct buffer_head *head = folio_buffers(folio); in drop_buffers()
2889 struct buffer_head *bh; in drop_buffers()
2899 struct buffer_head *next = bh->b_this_page; in drop_buffers()
2939 struct buffer_head *buffers_to_free = NULL; in try_to_free_buffers()
2973 struct buffer_head *bh = buffers_to_free; in try_to_free_buffers()
2976 struct buffer_head *next = bh->b_this_page; in try_to_free_buffers()
3018 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) in alloc_buffer_head()
3020 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); in alloc_buffer_head()
3033 void free_buffer_head(struct buffer_head *bh) in free_buffer_head()
3065 int bh_uptodate_or_lock(struct buffer_head *bh) in bh_uptodate_or_lock()
3085 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait) in __bh_read()
3113 void __bh_read_batch(int nr, struct buffer_head *bhs[], in __bh_read_batch()
3119 struct buffer_head *bh = bhs[i]; in __bh_read_batch()
3147 bh_cachep = KMEM_CACHE(buffer_head, in buffer_init()
3153 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); in buffer_init()