Lines Matching refs:clone
93 static inline struct dm_target_io *clone_to_tio(struct bio *clone) in clone_to_tio() argument
95 return container_of(clone, struct dm_target_io, clone); in clone_to_tio()
119 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; in dm_bio_get_target_bio_nr()
543 static void dm_start_io_acct(struct dm_io *io, struct bio *clone) in dm_start_io_acct() argument
552 if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) { in dm_start_io_acct()
578 struct bio *clone; in alloc_io() local
580 clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs); in alloc_io()
581 if (unlikely(!clone)) in alloc_io()
583 tio = clone_to_tio(clone); in alloc_io()
612 bio_put(&io->tio.clone); in free_io()
620 struct bio *clone; in alloc_tio() local
626 clone = &tio->clone; in alloc_tio()
628 clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, in alloc_tio()
630 if (!clone) in alloc_tio()
634 clone->bi_opf &= ~REQ_DM_POLL_LIST; in alloc_tio()
636 tio = clone_to_tio(clone); in alloc_tio()
648 clone->bi_bdev = md->disk->part0; in alloc_tio()
650 bio_set_dev(clone, md->disk->part0); in alloc_tio()
653 clone->bi_iter.bi_size = to_bytes(*len); in alloc_tio()
654 if (bio_integrity(clone)) in alloc_tio()
655 bio_integrity_trim(clone); in alloc_tio()
658 return clone; in alloc_tio()
661 static void free_tio(struct bio *clone) in free_tio() argument
663 if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO)) in free_tio()
665 bio_put(clone); in free_tio()
1371 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone) in dm_submit_bio_remap() argument
1373 struct dm_target_io *tio = clone_to_tio(clone); in dm_submit_bio_remap()
1378 tgt_clone = clone; in dm_submit_bio_remap()
1384 dm_start_io_acct(io, clone); in dm_submit_bio_remap()
1408 static void __map_bio(struct bio *clone) in __map_bio() argument
1410 struct dm_target_io *tio = clone_to_tio(clone); in __map_bio()
1416 clone->bi_end_io = clone_endio; in __map_bio()
1421 tio->old_sector = clone->bi_iter.bi_sector; in __map_bio()
1424 unlikely(swap_bios_limit(ti, clone))) { in __map_bio()
1433 r = linear_map(ti, clone); in __map_bio()
1435 r = stripe_map(ti, clone); in __map_bio()
1437 r = ti->type->map(ti, clone); in __map_bio()
1443 dm_start_io_acct(io, clone); in __map_bio()
1446 dm_submit_bio_remap(clone, NULL); in __map_bio()
1451 unlikely(swap_bios_limit(ti, clone))) in __map_bio()
1453 free_tio(clone); in __map_bio()
1515 struct bio *clone; in __send_duplicate_bios() local
1530 while ((clone = bio_list_pop(&blist))) { in __send_duplicate_bios()
1532 dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); in __send_duplicate_bios()
1533 __map_bio(clone); in __send_duplicate_bios()
1555 ci->io->tio.clone.bi_iter.bi_size = 0; in __send_empty_flush()
1580 struct bio *clone; in __send_empty_flush() local
1588 clone = alloc_tio(ci, NULL, 0, &len, GFP_NOIO); in __send_empty_flush()
1590 bio_set_dev(clone, dd->dm_dev->bdev); in __send_empty_flush()
1591 clone->bi_end_io = clone_endio; in __send_empty_flush()
1592 dm_submit_bio_remap(clone, NULL); in __send_empty_flush()
1731 struct bio *clone; in __split_and_process_bio() local
1755 clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT); in __split_and_process_bio()
1756 if (unlikely(!clone)) in __split_and_process_bio()
1759 clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); in __split_and_process_bio()
1761 __map_bio(clone); in __split_and_process_bio()
1813 struct bio *clone; in __send_zone_reset_all_emulated() local
1856 clone = bio_list_pop(&blist); in __send_zone_reset_all_emulated()
1857 clone->bi_opf &= ~REQ_OP_MASK; in __send_zone_reset_all_emulated()
1858 clone->bi_opf |= REQ_OP_ZONE_RESET | REQ_SYNC; in __send_zone_reset_all_emulated()
1859 clone->bi_iter.bi_sector = sector; in __send_zone_reset_all_emulated()
1860 clone->bi_iter.bi_size = 0; in __send_zone_reset_all_emulated()
1861 __map_bio(clone); in __send_zone_reset_all_emulated()
2063 bio_poll(&io->tio.clone, iob, flags); in dm_poll_dm_io()