Lines Matching refs:bio

226 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
384 struct bio *parent_bio;
385 struct bio *bio; member
388 static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent) in begin_discard()
395 op->bio = NULL; in begin_discard()
404 return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio); in issue_discard()
409 if (op->bio) { in end_discard()
414 bio_chain(op->bio, op->parent_bio); in end_discard()
415 op->bio->bi_opf = REQ_OP_DISCARD; in end_discard()
416 submit_bio(op->bio); in end_discard()
443 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, in bio_detain() argument
455 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result); in bio_detain()
597 struct bio *bio; in error_bio_list() local
599 while ((bio = bio_list_pop(bios))) { in error_bio_list()
600 bio->bi_status = error; in error_bio_list()
601 bio_endio(bio); in error_bio_list()
672 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) in get_bio_block() argument
675 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block()
688 static void get_bio_block_range(struct thin_c *tc, struct bio *bio, in get_bio_block_range() argument
692 sector_t b = bio->bi_iter.bi_sector; in get_bio_block_range()
693 sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT); in get_bio_block_range()
714 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) in remap() argument
717 sector_t bi_sector = bio->bi_iter.bi_sector; in remap()
719 bio_set_dev(bio, tc->pool_dev->bdev); in remap()
721 bio->bi_iter.bi_sector = in remap()
725 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + in remap()
730 static void remap_to_origin(struct thin_c *tc, struct bio *bio) in remap_to_origin() argument
732 bio_set_dev(bio, tc->origin_dev->bdev); in remap_to_origin()
735 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) in bio_triggers_commit() argument
737 return op_is_flush(bio->bi_opf) && in bio_triggers_commit()
741 static void inc_all_io_entry(struct pool *pool, struct bio *bio) in inc_all_io_entry() argument
745 if (bio_op(bio) == REQ_OP_DISCARD) in inc_all_io_entry()
748 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in inc_all_io_entry()
752 static void issue(struct thin_c *tc, struct bio *bio) in issue() argument
756 if (!bio_triggers_commit(tc, bio)) { in issue()
757 dm_submit_bio_remap(bio, NULL); in issue()
767 bio_io_error(bio); in issue()
776 bio_list_add(&pool->deferred_flush_bios, bio); in issue()
780 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio) in remap_to_origin_and_issue() argument
782 remap_to_origin(tc, bio); in remap_to_origin_and_issue()
783 issue(tc, bio); in remap_to_origin_and_issue()
786 static void remap_and_issue(struct thin_c *tc, struct bio *bio, in remap_and_issue() argument
789 remap(tc, bio, block); in remap_and_issue()
790 issue(tc, bio); in remap_and_issue()
823 struct bio *bio; member
855 static void overwrite_endio(struct bio *bio) in overwrite_endio() argument
857 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in overwrite_endio()
860 bio->bi_end_io = m->saved_bi_end_io; in overwrite_endio()
862 m->status = bio->bi_status; in overwrite_endio()
897 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
909 struct bio *bio; in __inc_remap_and_issue_cell() local
911 while ((bio = bio_list_pop(&cell->bios))) { in __inc_remap_and_issue_cell()
912 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) in __inc_remap_and_issue_cell()
913 bio_list_add(&info->defer_bios, bio); in __inc_remap_and_issue_cell()
915 inc_all_io_entry(info->tc->pool, bio); in __inc_remap_and_issue_cell()
922 bio_list_add(&info->issue_bios, bio); in __inc_remap_and_issue_cell()
931 struct bio *bio; in inc_remap_and_issue_cell() local
946 while ((bio = bio_list_pop(&info.defer_bios))) in inc_remap_and_issue_cell()
947 thin_defer_bio(tc, bio); in inc_remap_and_issue_cell()
949 while ((bio = bio_list_pop(&info.issue_bios))) in inc_remap_and_issue_cell()
950 remap_and_issue(info.tc, bio, block); in inc_remap_and_issue_cell()
960 static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio) in complete_overwrite_bio() argument
968 if (!bio_triggers_commit(tc, bio)) { in complete_overwrite_bio()
969 bio_endio(bio); in complete_overwrite_bio()
979 bio_io_error(bio); in complete_overwrite_bio()
988 bio_list_add(&pool->deferred_flush_completions, bio); in complete_overwrite_bio()
996 struct bio *bio = m->bio; in process_prepared_mapping() local
1022 if (bio) { in process_prepared_mapping()
1024 complete_overwrite_bio(tc, bio); in process_prepared_mapping()
1049 bio_io_error(m->bio); in process_prepared_discard_fail()
1055 bio_endio(m->bio); in process_prepared_discard_success()
1067 bio_io_error(m->bio); in process_prepared_discard_no_passdown()
1069 bio_endio(m->bio); in process_prepared_discard_no_passdown()
1078 struct bio *discard_parent) in passdown_double_checking_shared_status()
1137 static void passdown_endio(struct bio *bio) in passdown_endio() argument
1143 queue_passdown_pt2(bio->bi_private); in passdown_endio()
1144 bio_put(bio); in passdown_endio()
1152 struct bio *discard_parent; in process_prepared_discard_passdown_pt1()
1163 bio_io_error(m->bio); in process_prepared_discard_passdown_pt1()
1176 bio_io_error(m->bio); in process_prepared_discard_passdown_pt1()
1210 bio_io_error(m->bio); in process_prepared_discard_passdown_pt2()
1212 bio_endio(m->bio); in process_prepared_discard_passdown_pt2()
1236 static int io_overlaps_block(struct pool *pool, struct bio *bio) in io_overlaps_block() argument
1238 return bio->bi_iter.bi_size == in io_overlaps_block()
1242 static int io_overwrites_block(struct pool *pool, struct bio *bio) in io_overwrites_block() argument
1244 return (bio_data_dir(bio) == WRITE) && in io_overwrites_block()
1245 io_overlaps_block(pool, bio); in io_overwrites_block()
1248 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save, in save_and_set_endio() argument
1251 *save = bio->bi_end_io; in save_and_set_endio()
1252 bio->bi_end_io = fn; in save_and_set_endio()
1273 m->bio = NULL; in get_next_mapping()
1292 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio, in remap_and_issue_overwrite() argument
1297 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in remap_and_issue_overwrite()
1300 m->bio = bio; in remap_and_issue_overwrite()
1301 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); in remap_and_issue_overwrite()
1302 inc_all_io_entry(pool, bio); in remap_and_issue_overwrite()
1303 remap_and_issue(tc, bio, data_begin); in remap_and_issue_overwrite()
1312 struct dm_bio_prison_cell *cell, struct bio *bio, in schedule_copy() argument
1340 if (io_overwrites_block(pool, bio)) in schedule_copy()
1341 remap_and_issue_overwrite(tc, bio, data_dest, m); in schedule_copy()
1372 struct dm_bio_prison_cell *cell, struct bio *bio) in schedule_internal_copy() argument
1375 data_origin, data_dest, cell, bio, in schedule_internal_copy()
1381 struct bio *bio) in schedule_zero() argument
1399 if (io_overwrites_block(pool, bio)) in schedule_zero()
1400 remap_and_issue_overwrite(tc, bio, data_block, m); in schedule_zero()
1411 struct dm_bio_prison_cell *cell, struct bio *bio) in schedule_external_copy() argument
1419 virt_block, data_dest, cell, bio, in schedule_external_copy()
1424 virt_block, data_dest, cell, bio, in schedule_external_copy()
1428 schedule_zero(tc, virt_block, data_dest, cell, bio); in schedule_external_copy()
1582 static void retry_on_resume(struct bio *bio) in retry_on_resume() argument
1584 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in retry_on_resume()
1588 bio_list_add(&tc->retry_on_resume_list, bio); in retry_on_resume()
1616 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) in handle_unserviceable_bio() argument
1621 bio->bi_status = error; in handle_unserviceable_bio()
1622 bio_endio(bio); in handle_unserviceable_bio()
1624 retry_on_resume(bio); in handle_unserviceable_bio()
1629 struct bio *bio; in retry_bios_on_resume() local
1642 while ((bio = bio_list_pop(&bios))) in retry_bios_on_resume()
1643 retry_on_resume(bio); in retry_bios_on_resume()
1660 m->bio = virt_cell->holder; in process_discard_cell_no_passdown()
1667 struct bio *bio) in break_up_discard_bio() argument
1723 m->bio = bio; in break_up_discard_bio()
1733 bio_inc_remaining(bio); in break_up_discard_bio()
1747 struct bio *bio = virt_cell->holder; in process_discard_cell_passdown() local
1748 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in process_discard_cell_passdown()
1756 break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio); in process_discard_cell_passdown()
1763 bio_endio(bio); in process_discard_cell_passdown()
1766 static void process_discard_bio(struct thin_c *tc, struct bio *bio) in process_discard_bio() argument
1772 get_bio_block_range(tc, bio, &begin, &end); in process_discard_bio()
1777 bio_endio(bio); in process_discard_bio()
1783 bio_endio(bio); in process_discard_bio()
1787 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) { in process_discard_bio()
1801 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, in break_sharing() argument
1814 data_block, cell, bio); in break_sharing()
1833 struct bio *bio; in __remap_and_issue_shared_cell() local
1835 while ((bio = bio_list_pop(&cell->bios))) { in __remap_and_issue_shared_cell()
1836 if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) || in __remap_and_issue_shared_cell()
1837 bio_op(bio) == REQ_OP_DISCARD) in __remap_and_issue_shared_cell()
1838 bio_list_add(&info->defer_bios, bio); in __remap_and_issue_shared_cell()
1840 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in __remap_and_issue_shared_cell()
1843 inc_all_io_entry(info->tc->pool, bio); in __remap_and_issue_shared_cell()
1844 bio_list_add(&info->issue_bios, bio); in __remap_and_issue_shared_cell()
1853 struct bio *bio; in remap_and_issue_shared_cell() local
1863 while ((bio = bio_list_pop(&info.defer_bios))) in remap_and_issue_shared_cell()
1864 thin_defer_bio(tc, bio); in remap_and_issue_shared_cell()
1866 while ((bio = bio_list_pop(&info.issue_bios))) in remap_and_issue_shared_cell()
1867 remap_and_issue(tc, bio, block); in remap_and_issue_shared_cell()
1870 static void process_shared_bio(struct thin_c *tc, struct bio *bio, in process_shared_bio() argument
1884 if (bio_detain(pool, &key, bio, &data_cell)) { in process_shared_bio()
1889 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) { in process_shared_bio()
1890 break_sharing(tc, bio, block, &key, lookup_result, data_cell); in process_shared_bio()
1893 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in process_shared_bio()
1896 inc_all_io_entry(pool, bio); in process_shared_bio()
1897 remap_and_issue(tc, bio, lookup_result->block); in process_shared_bio()
1904 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, in provision_block() argument
1914 if (!bio->bi_iter.bi_size) { in provision_block()
1915 inc_all_io_entry(pool, bio); in provision_block()
1918 remap_and_issue(tc, bio, 0); in provision_block()
1925 if (bio_data_dir(bio) == READ) { in provision_block()
1926 zero_fill_bio(bio); in provision_block()
1928 bio_endio(bio); in provision_block()
1936 schedule_external_copy(tc, block, data_block, cell, bio); in provision_block()
1938 schedule_zero(tc, block, data_block, cell, bio); in provision_block()
1957 struct bio *bio = cell->holder; in process_cell() local
1958 dm_block_t block = get_bio_block(tc, bio); in process_cell()
1970 process_shared_bio(tc, bio, block, &lookup_result, cell); in process_cell()
1972 inc_all_io_entry(pool, bio); in process_cell()
1973 remap_and_issue(tc, bio, lookup_result.block); in process_cell()
1979 if (bio_data_dir(bio) == READ && tc->origin_dev) { in process_cell()
1980 inc_all_io_entry(pool, bio); in process_cell()
1983 if (bio_end_sector(bio) <= tc->origin_size) in process_cell()
1984 remap_to_origin_and_issue(tc, bio); in process_cell()
1986 else if (bio->bi_iter.bi_sector < tc->origin_size) { in process_cell()
1987 zero_fill_bio(bio); in process_cell()
1988 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell()
1989 remap_to_origin_and_issue(tc, bio); in process_cell()
1992 zero_fill_bio(bio); in process_cell()
1993 bio_endio(bio); in process_cell()
1996 provision_block(tc, bio, block, cell); in process_cell()
2003 bio_io_error(bio); in process_cell()
2008 static void process_bio(struct thin_c *tc, struct bio *bio) in process_bio() argument
2011 dm_block_t block = get_bio_block(tc, bio); in process_bio()
2020 if (bio_detain(pool, &key, bio, &cell)) in process_bio()
2026 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio, in __process_bio_read_only() argument
2030 int rw = bio_data_dir(bio); in __process_bio_read_only()
2031 dm_block_t block = get_bio_block(tc, bio); in __process_bio_read_only()
2037 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) { in __process_bio_read_only()
2038 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2042 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2043 remap_and_issue(tc, bio, lookup_result.block); in __process_bio_read_only()
2053 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2058 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2059 remap_to_origin_and_issue(tc, bio); in __process_bio_read_only()
2063 zero_fill_bio(bio); in __process_bio_read_only()
2064 bio_endio(bio); in __process_bio_read_only()
2072 bio_io_error(bio); in __process_bio_read_only()
2077 static void process_bio_read_only(struct thin_c *tc, struct bio *bio) in process_bio_read_only() argument
2079 __process_bio_read_only(tc, bio, NULL); in process_bio_read_only()
2087 static void process_bio_success(struct thin_c *tc, struct bio *bio) in process_bio_success() argument
2089 bio_endio(bio); in process_bio_success()
2092 static void process_bio_fail(struct thin_c *tc, struct bio *bio) in process_bio_fail() argument
2094 bio_io_error(bio); in process_bio_fail()
2120 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio) in __thin_bio_rb_add() argument
2124 sector_t bi_sector = bio->bi_iter.bi_sector; in __thin_bio_rb_add()
2138 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in __thin_bio_rb_add()
2147 struct bio *bio; in __extract_sorted_bios() local
2151 bio = thin_bio(pbd); in __extract_sorted_bios()
2153 bio_list_add(&tc->deferred_bio_list, bio); in __extract_sorted_bios()
2162 struct bio *bio; in __sort_thin_deferred_bios() local
2170 while ((bio = bio_list_pop(&bios))) in __sort_thin_deferred_bios()
2171 __thin_bio_rb_add(tc, bio); in __sort_thin_deferred_bios()
2184 struct bio *bio; in process_thin_deferred_bios() local
2212 while ((bio = bio_list_pop(&bios))) { in process_thin_deferred_bios()
2220 bio_list_add(&tc->deferred_bio_list, bio); in process_thin_deferred_bios()
2226 if (bio_op(bio) == REQ_OP_DISCARD) in process_thin_deferred_bios()
2227 pool->process_discard(tc, bio); in process_thin_deferred_bios()
2229 pool->process_bio(tc, bio); in process_thin_deferred_bios()
2362 struct bio *bio; in process_deferred_bios() local
2395 while ((bio = bio_list_pop(&bios))) in process_deferred_bios()
2396 bio_io_error(bio); in process_deferred_bios()
2401 while ((bio = bio_list_pop(&bio_completions))) in process_deferred_bios()
2402 bio_endio(bio); in process_deferred_bios()
2404 while ((bio = bio_list_pop(&bios))) { in process_deferred_bios()
2409 if (bio->bi_opf & REQ_PREFLUSH) in process_deferred_bios()
2410 bio_endio(bio); in process_deferred_bios()
2412 dm_submit_bio_remap(bio, NULL); in process_deferred_bios()
2674 static void thin_defer_bio(struct thin_c *tc, struct bio *bio) in thin_defer_bio() argument
2679 bio_list_add(&tc->deferred_bio_list, bio); in thin_defer_bio()
2685 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio) in thin_defer_bio_with_throttle() argument
2690 thin_defer_bio(tc, bio); in thin_defer_bio_with_throttle()
2707 static void thin_hook_bio(struct thin_c *tc, struct bio *bio) in thin_hook_bio() argument
2709 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in thin_hook_bio()
2721 static int thin_bio_map(struct dm_target *ti, struct bio *bio) in thin_bio_map() argument
2725 dm_block_t block = get_bio_block(tc, bio); in thin_bio_map()
2731 thin_hook_bio(tc, bio); in thin_bio_map()
2734 bio->bi_status = BLK_STS_DM_REQUEUE; in thin_bio_map()
2735 bio_endio(bio); in thin_bio_map()
2740 bio_io_error(bio); in thin_bio_map()
2744 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) { in thin_bio_map()
2745 thin_defer_bio_with_throttle(tc, bio); in thin_bio_map()
2754 if (bio_detain(tc->pool, &key, bio, &virt_cell)) in thin_bio_map()
2784 if (bio_detain(tc->pool, &key, bio, &data_cell)) { in thin_bio_map()
2789 inc_all_io_entry(tc->pool, bio); in thin_bio_map()
2793 remap(tc, bio, result.block); in thin_bio_map()
2807 bio_io_error(bio); in thin_bio_map()
3440 static int pool_map(struct dm_target *ti, struct bio *bio) in pool_map() argument
3449 bio_set_dev(bio, pt->data_dev->bdev); in pool_map()
4328 static int thin_map(struct dm_target *ti, struct bio *bio) in thin_map() argument
4330 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); in thin_map()
4332 return thin_bio_map(ti, bio); in thin_map()
4335 static int thin_endio(struct dm_target *ti, struct bio *bio, in thin_endio() argument
4339 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); in thin_endio()