Lines Matching refs:bio
84 void (*issue_op)(struct bio *bio, void *context);
107 struct bio *bio; in __commit() local
132 while ((bio = bio_list_pop(&bios))) { in __commit()
134 bio->bi_status = r; in __commit()
135 bio_endio(bio); in __commit()
137 b->issue_op(bio, b->issue_context); in __commit()
144 void (*issue_op)(struct bio *bio, void *), in batcher_init() argument
182 static void issue_after_commit(struct batcher *b, struct bio *bio) in issue_after_commit() argument
188 bio_list_add(&b->bios, bio); in issue_after_commit()
220 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio, in dm_hook_bio() argument
223 h->bi_end_io = bio->bi_end_io; in dm_hook_bio()
225 bio->bi_end_io = bi_end_io; in dm_hook_bio()
226 bio->bi_private = bi_private; in dm_hook_bio()
229 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) in dm_unhook_bio() argument
231 bio->bi_end_io = h->bi_end_io; in dm_unhook_bio()
430 struct bio *overwrite_bio;
527 static unsigned int lock_level(struct bio *bio) in lock_level() argument
529 return bio_data_dir(bio) == WRITE ? in lock_level()
540 static struct per_bio_data *get_per_bio_data(struct bio *bio) in get_per_bio_data() argument
542 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); in get_per_bio_data()
548 static struct per_bio_data *init_per_bio_data(struct bio *bio) in init_per_bio_data() argument
550 struct per_bio_data *pb = get_per_bio_data(bio); in init_per_bio_data()
553 pb->req_nr = dm_bio_get_target_bio_nr(bio); in init_per_bio_data()
562 static void defer_bio(struct cache *cache, struct bio *bio) in defer_bio() argument
565 bio_list_add(&cache->deferred_bios, bio); in defer_bio()
582 static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio) in bio_detain_shared() argument
593 r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell); in bio_detain_shared()
605 pb = get_per_bio_data(bio); in bio_detain_shared()
724 static void remap_to_origin(struct cache *cache, struct bio *bio) in remap_to_origin() argument
726 bio_set_dev(bio, cache->origin_dev->bdev); in remap_to_origin()
729 static void remap_to_cache(struct cache *cache, struct bio *bio, in remap_to_cache() argument
732 sector_t bi_sector = bio->bi_iter.bi_sector; in remap_to_cache()
735 bio_set_dev(bio, cache->cache_dev->bdev); in remap_to_cache()
737 bio->bi_iter.bi_sector = in remap_to_cache()
741 bio->bi_iter.bi_sector = in remap_to_cache()
746 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) in check_if_tick_bio_needed() argument
751 if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) && in check_if_tick_bio_needed()
752 bio_op(bio) != REQ_OP_DISCARD) { in check_if_tick_bio_needed()
753 pb = get_per_bio_data(bio); in check_if_tick_bio_needed()
760 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, in remap_to_origin_clear_discard() argument
764 check_if_tick_bio_needed(cache, bio); in remap_to_origin_clear_discard()
765 remap_to_origin(cache, bio); in remap_to_origin_clear_discard()
766 if (bio_data_dir(bio) == WRITE) in remap_to_origin_clear_discard()
770 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, in remap_to_cache_dirty() argument
773 check_if_tick_bio_needed(cache, bio); in remap_to_cache_dirty()
774 remap_to_cache(cache, bio, cblock); in remap_to_cache_dirty()
775 if (bio_data_dir(bio) == WRITE) { in remap_to_cache_dirty()
781 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) in get_bio_block() argument
783 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block()
793 static bool accountable_bio(struct cache *cache, struct bio *bio) in accountable_bio() argument
795 return bio_op(bio) != REQ_OP_DISCARD; in accountable_bio()
798 static void accounted_begin(struct cache *cache, struct bio *bio) in accounted_begin() argument
802 if (accountable_bio(cache, bio)) { in accounted_begin()
803 pb = get_per_bio_data(bio); in accounted_begin()
804 pb->len = bio_sectors(bio); in accounted_begin()
809 static void accounted_complete(struct cache *cache, struct bio *bio) in accounted_complete() argument
811 struct per_bio_data *pb = get_per_bio_data(bio); in accounted_complete()
816 static void accounted_request(struct cache *cache, struct bio *bio) in accounted_request() argument
818 accounted_begin(cache, bio); in accounted_request()
819 dm_submit_bio_remap(bio, NULL); in accounted_request()
822 static void issue_op(struct bio *bio, void *context) in issue_op() argument
826 accounted_request(cache, bio); in issue_op()
833 static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio, in remap_to_origin_and_cache() argument
836 struct bio *origin_bio = bio_alloc_clone(cache->origin_dev->bdev, bio, in remap_to_origin_and_cache()
841 bio_chain(origin_bio, bio); in remap_to_origin_and_cache()
847 remap_to_cache(cache, bio, cblock); in remap_to_origin_and_cache()
1009 static bool discard_or_flush(struct bio *bio) in discard_or_flush() argument
1011 return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf); in discard_or_flush()
1014 static void calc_discard_block_range(struct cache *cache, struct bio *bio, in calc_discard_block_range() argument
1017 sector_t sb = bio->bi_iter.bi_sector; in calc_discard_block_range()
1018 sector_t se = bio_end_sector(bio); in calc_discard_block_range()
1064 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) in bio_writes_complete_block() argument
1066 return (bio_data_dir(bio) == WRITE) && in bio_writes_complete_block()
1067 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); in bio_writes_complete_block()
1070 static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block) in optimisable_bio() argument
1073 (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio)); in optimisable_bio()
1119 static void bio_drop_shared_lock(struct cache *cache, struct bio *bio) in bio_drop_shared_lock() argument
1121 struct per_bio_data *pb = get_per_bio_data(bio); in bio_drop_shared_lock()
1128 static void overwrite_endio(struct bio *bio) in overwrite_endio() argument
1130 struct dm_cache_migration *mg = bio->bi_private; in overwrite_endio()
1132 struct per_bio_data *pb = get_per_bio_data(bio); in overwrite_endio()
1134 dm_unhook_bio(&pb->hook_info, bio); in overwrite_endio()
1136 if (bio->bi_status) in overwrite_endio()
1137 mg->k.input = bio->bi_status; in overwrite_endio()
1145 struct bio *bio = mg->overwrite_bio; in overwrite() local
1146 struct per_bio_data *pb = get_per_bio_data(bio); in overwrite()
1148 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg); in overwrite()
1155 remap_to_cache(mg->cache, bio, mg->op->cblock); in overwrite()
1157 remap_to_origin(mg->cache, bio); in overwrite()
1160 accounted_request(mg->cache, bio); in overwrite()
1433 static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio) in mg_start() argument
1445 mg->overwrite_bio = bio; in mg_start()
1447 if (!bio) in mg_start()
1565 dm_oblock_t oblock, struct bio *bio) in invalidate_start() argument
1574 mg->overwrite_bio = bio; in invalidate_start()
1604 static void inc_hit_counter(struct cache *cache, struct bio *bio) in inc_hit_counter() argument
1606 atomic_inc(bio_data_dir(bio) == READ ? in inc_hit_counter()
1610 static void inc_miss_counter(struct cache *cache, struct bio *bio) in inc_miss_counter() argument
1612 atomic_inc(bio_data_dir(bio) == READ ? in inc_miss_counter()
1618 static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block, in map_bio() argument
1627 rb = bio_detain_shared(cache, block, bio); in map_bio()
1639 data_dir = bio_data_dir(bio); in map_bio()
1641 if (optimisable_bio(cache, bio, block)) { in map_bio()
1648 bio_io_error(bio); in map_bio()
1653 bio_drop_shared_lock(cache, bio); in map_bio()
1655 mg_start(cache, op, bio); in map_bio()
1663 bio_io_error(bio); in map_bio()
1672 struct per_bio_data *pb = get_per_bio_data(bio); in map_bio()
1677 inc_miss_counter(cache, bio); in map_bio()
1679 accounted_begin(cache, bio); in map_bio()
1680 remap_to_origin_clear_discard(cache, bio, block); in map_bio()
1686 bio_endio(bio); in map_bio()
1693 inc_hit_counter(cache, bio); in map_bio()
1700 if (bio_data_dir(bio) == WRITE) { in map_bio()
1701 bio_drop_shared_lock(cache, bio); in map_bio()
1703 invalidate_start(cache, cblock, block, bio); in map_bio()
1705 remap_to_origin_clear_discard(cache, bio, block); in map_bio()
1707 if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) && in map_bio()
1709 remap_to_origin_and_cache(cache, bio, block, cblock); in map_bio()
1710 accounted_begin(cache, bio); in map_bio()
1712 remap_to_cache_dirty(cache, bio, block, cblock); in map_bio()
1719 if (bio->bi_opf & REQ_FUA) { in map_bio()
1724 accounted_complete(cache, bio); in map_bio()
1725 issue_after_commit(&cache->committer, bio); in map_bio()
1733 static bool process_bio(struct cache *cache, struct bio *bio) in process_bio() argument
1737 if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED) in process_bio()
1738 dm_submit_bio_remap(bio, NULL); in process_bio()
1776 static bool process_flush_bio(struct cache *cache, struct bio *bio) in process_flush_bio() argument
1778 struct per_bio_data *pb = get_per_bio_data(bio); in process_flush_bio()
1781 remap_to_origin(cache, bio); in process_flush_bio()
1783 remap_to_cache(cache, bio, 0); in process_flush_bio()
1785 issue_after_commit(&cache->committer, bio); in process_flush_bio()
1789 static bool process_discard_bio(struct cache *cache, struct bio *bio) in process_discard_bio() argument
1798 calc_discard_block_range(cache, bio, &b, &e); in process_discard_bio()
1805 remap_to_origin(cache, bio); in process_discard_bio()
1806 dm_submit_bio_remap(bio, NULL); in process_discard_bio()
1808 bio_endio(bio); in process_discard_bio()
1819 struct bio *bio; in process_deferred_bios() local
1827 while ((bio = bio_list_pop(&bios))) { in process_deferred_bios()
1828 if (bio->bi_opf & REQ_PREFLUSH) in process_deferred_bios()
1829 commit_needed = process_flush_bio(cache, bio) || commit_needed; in process_deferred_bios()
1831 else if (bio_op(bio) == REQ_OP_DISCARD) in process_deferred_bios()
1832 commit_needed = process_discard_bio(cache, bio) || commit_needed; in process_deferred_bios()
1835 commit_needed = process_bio(cache, bio) || commit_needed; in process_deferred_bios()
1850 struct bio *bio; in requeue_deferred_bios() local
1856 while ((bio = bio_list_pop(&bios))) { in requeue_deferred_bios()
1857 bio->bi_status = BLK_STS_DM_REQUEUE; in requeue_deferred_bios()
1858 bio_endio(bio); in requeue_deferred_bios()
2650 static int cache_map(struct dm_target *ti, struct bio *bio) in cache_map() argument
2656 dm_oblock_t block = get_bio_block(cache, bio); in cache_map()
2658 init_per_bio_data(bio); in cache_map()
2665 remap_to_origin(cache, bio); in cache_map()
2666 accounted_begin(cache, bio); in cache_map()
2670 if (discard_or_flush(bio)) { in cache_map()
2671 defer_bio(cache, bio); in cache_map()
2675 r = map_bio(cache, bio, block, &commit_needed); in cache_map()
2682 static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error) in cache_end_io() argument
2686 struct per_bio_data *pb = get_per_bio_data(bio); in cache_end_io()
2696 bio_drop_shared_lock(cache, bio); in cache_end_io()
2697 accounted_complete(cache, bio); in cache_end_io()