Lines Matching refs:buffer_head

56 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
59 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
61 inline void touch_buffer(struct buffer_head *bh) in touch_buffer()
68 void __lock_buffer(struct buffer_head *bh) in __lock_buffer()
74 void unlock_buffer(struct buffer_head *bh) in unlock_buffer()
90 struct buffer_head *head, *bh; in buffer_check_dirty_writeback()
121 void __wait_on_buffer(struct buffer_head * bh) in __wait_on_buffer()
127 static void buffer_io_error(struct buffer_head *bh, char *msg) in buffer_io_error()
143 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) in __end_buffer_read_notouch()
158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) in end_buffer_read_sync()
165 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) in end_buffer_write_sync()
189 static struct buffer_head *
194 struct buffer_head *ret = NULL; in __find_get_block_slow()
196 struct buffer_head *bh; in __find_get_block_slow()
197 struct buffer_head *head; in __find_get_block_slow()
245 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) in end_buffer_async_read()
248 struct buffer_head *first; in end_buffer_async_read()
249 struct buffer_head *tmp; in end_buffer_async_read()
301 struct buffer_head *bh;
308 struct buffer_head *bh = ctx->bh; in verify_bh()
317 static bool need_fsverity(struct buffer_head *bh) in need_fsverity()
331 struct buffer_head *bh = ctx->bh; in decrypt_bh()
354 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) in end_buffer_async_read_io()
385 void end_buffer_async_write(struct buffer_head *bh, int uptodate) in end_buffer_async_write()
388 struct buffer_head *first; in end_buffer_async_write()
389 struct buffer_head *tmp; in end_buffer_async_write()
448 static void mark_buffer_async_read(struct buffer_head *bh) in mark_buffer_async_read()
454 static void mark_buffer_async_write_endio(struct buffer_head *bh, in mark_buffer_async_write_endio()
461 void mark_buffer_async_write(struct buffer_head *bh) in mark_buffer_async_write()
520 static void __remove_assoc_queue(struct buffer_head *bh) in __remove_assoc_queue()
544 struct buffer_head *bh; in osync_buffers_list()
605 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); in write_boundary_block()
613 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) in mark_buffer_dirty_inode()
661 struct buffer_head *head; in block_dirty_folio()
667 struct buffer_head *bh = head; in block_dirty_folio()
715 struct buffer_head *bh; in fsync_buffers_list()
833 struct buffer_head *bh = BH_ENTRY(list->next); in remove_inode_buffers()
854 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, in alloc_page_buffers()
857 struct buffer_head *bh, *head; in alloc_page_buffers()
905 link_dev_buffers(struct page *page, struct buffer_head *head) in link_dev_buffers()
907 struct buffer_head *bh, *tail; in link_dev_buffers()
937 struct buffer_head *head = page_buffers(page); in init_page_buffers()
938 struct buffer_head *bh = head; in init_page_buffers()
974 struct buffer_head *bh; in grow_dev_page()
1057 static struct buffer_head *
1074 struct buffer_head *bh; in __getblk_slow()
1122 void mark_buffer_dirty(struct buffer_head *bh) in mark_buffer_dirty()
1157 void mark_buffer_write_io_error(struct buffer_head *bh) in mark_buffer_write_io_error()
1182 void __brelse(struct buffer_head * buf) in __brelse()
1196 void __bforget(struct buffer_head *bh) in __bforget()
1211 static struct buffer_head *__bread_slow(struct buffer_head *bh) in __bread_slow()
1246 struct buffer_head *bhs[BH_LRU_SIZE];
1271 static void bh_lru_install(struct buffer_head *bh) in bh_lru_install()
1273 struct buffer_head *evictee = bh; in bh_lru_install()
1308 static struct buffer_head *
1311 struct buffer_head *ret = NULL; in lookup_bh_lru()
1317 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); in lookup_bh_lru()
1343 struct buffer_head *
1346 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); in __find_get_block()
1368 struct buffer_head *
1372 struct buffer_head *bh = __find_get_block(bdev, block, size); in __getblk_gfp()
1386 struct buffer_head *bh = __getblk(bdev, block, size); in __breadahead()
1406 struct buffer_head *
1410 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); in __bread_gfp()
1473 void set_bh_page(struct buffer_head *bh, in set_bh_page()
1497 static void discard_buffer(struct buffer_head * bh) in discard_buffer()
1528 struct buffer_head *head, *bh, *next; in block_invalidate_folio()
1584 struct buffer_head *bh, *head, *tail; in create_empty_buffers()
1639 struct buffer_head *bh; in clean_bdev_aliases()
1640 struct buffer_head *head; in clean_bdev_aliases()
1698 static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int… in create_page_buffers()
1744 struct buffer_head *bh, *head; in __block_write_full_page()
1833 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1886 struct buffer_head *next = bh->b_this_page; in __block_write_full_page()
1907 struct buffer_head *head, *bh; in page_zero_new_buffers()
1942 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, in iomap_to_bh()
2006 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; in __block_write_begin_int()
2095 struct buffer_head *bh, *head; in __block_commit_write()
2243 struct buffer_head *bh, *head; in block_is_partially_uptodate()
2286 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; in block_read_full_folio()
2583 struct buffer_head *bh; in block_truncate_page()
2687 struct buffer_head tmp = { in generic_block_bmap()
2698 struct buffer_head *bh = bio->bi_private; in end_bio_bh_io_sync()
2707 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, in submit_bh_wbc()
2753 void submit_bh(blk_opf_t opf, struct buffer_head *bh) in submit_bh()
2759 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) in write_dirty_buffer()
2777 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) in __sync_dirty_buffer()
2804 int sync_dirty_buffer(struct buffer_head *bh) in sync_dirty_buffer()
2830 static inline int buffer_busy(struct buffer_head *bh) in buffer_busy()
2837 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free) in drop_buffers()
2839 struct buffer_head *head = folio_buffers(folio); in drop_buffers()
2840 struct buffer_head *bh; in drop_buffers()
2850 struct buffer_head *next = bh->b_this_page; in drop_buffers()
2866 struct buffer_head *buffers_to_free = NULL; in try_to_free_buffers()
2900 struct buffer_head *bh = buffers_to_free; in try_to_free_buffers()
2903 struct buffer_head *next = bh->b_this_page; in try_to_free_buffers()
2945 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) in alloc_buffer_head()
2947 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); in alloc_buffer_head()
2960 void free_buffer_head(struct buffer_head *bh) in free_buffer_head()
2992 int bh_uptodate_or_lock(struct buffer_head *bh) in bh_uptodate_or_lock()
3012 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait) in __bh_read()
3040 void __bh_read_batch(int nr, struct buffer_head *bhs[], in __bh_read_batch()
3046 struct buffer_head *bh = bhs[i]; in __bh_read_batch()
3075 sizeof(struct buffer_head), 0, in buffer_init()
3084 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); in buffer_init()