Lines Matching refs:clone

93 static inline struct dm_target_io *clone_to_tio(struct bio *clone)  in clone_to_tio()  argument
95 return container_of(clone, struct dm_target_io, clone); in clone_to_tio()
119 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; in dm_bio_get_target_bio_nr()
545 static void dm_start_io_acct(struct dm_io *io, struct bio *clone) in dm_start_io_acct() argument
554 if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) { in dm_start_io_acct()
580 struct bio *clone; in alloc_io() local
582 clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs); in alloc_io()
583 if (unlikely(!clone)) in alloc_io()
585 tio = clone_to_tio(clone); in alloc_io()
614 bio_put(&io->tio.clone); in free_io()
622 struct bio *clone; in alloc_tio() local
628 clone = &tio->clone; in alloc_tio()
630 clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, in alloc_tio()
632 if (!clone) in alloc_tio()
636 clone->bi_opf &= ~REQ_DM_POLL_LIST; in alloc_tio()
638 tio = clone_to_tio(clone); in alloc_tio()
650 clone->bi_bdev = md->disk->part0; in alloc_tio()
652 bio_set_dev(clone, md->disk->part0); in alloc_tio()
655 clone->bi_iter.bi_size = to_bytes(*len); in alloc_tio()
656 if (bio_integrity(clone)) in alloc_tio()
657 bio_integrity_trim(clone); in alloc_tio()
660 return clone; in alloc_tio()
663 static void free_tio(struct bio *clone) in free_tio() argument
665 if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO)) in free_tio()
667 bio_put(clone); in free_tio()
1359 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone) in dm_submit_bio_remap() argument
1361 struct dm_target_io *tio = clone_to_tio(clone); in dm_submit_bio_remap()
1366 tgt_clone = clone; in dm_submit_bio_remap()
1372 dm_start_io_acct(io, clone); in dm_submit_bio_remap()
1396 static void __map_bio(struct bio *clone) in __map_bio() argument
1398 struct dm_target_io *tio = clone_to_tio(clone); in __map_bio()
1404 clone->bi_end_io = clone_endio; in __map_bio()
1409 tio->old_sector = clone->bi_iter.bi_sector; in __map_bio()
1412 unlikely(swap_bios_limit(ti, clone))) { in __map_bio()
1421 r = linear_map(ti, clone); in __map_bio()
1423 r = stripe_map(ti, clone); in __map_bio()
1425 r = ti->type->map(ti, clone); in __map_bio()
1431 dm_start_io_acct(io, clone); in __map_bio()
1434 dm_submit_bio_remap(clone, NULL); in __map_bio()
1439 unlikely(swap_bios_limit(ti, clone))) in __map_bio()
1441 free_tio(clone); in __map_bio()
1502 struct bio *clone; in __send_duplicate_bios() local
1517 while ((clone = bio_list_pop(&blist))) { in __send_duplicate_bios()
1519 dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); in __send_duplicate_bios()
1520 __map_bio(clone); in __send_duplicate_bios()
1546 ci->io->tio.clone.bi_iter.bi_size = 0; in __send_empty_flush()
1571 struct bio *clone; in __send_empty_flush() local
1579 clone = alloc_tio(ci, NULL, 0, &len, GFP_NOIO); in __send_empty_flush()
1581 bio_set_dev(clone, dd->dm_dev->bdev); in __send_empty_flush()
1582 clone->bi_end_io = clone_endio; in __send_empty_flush()
1583 dm_submit_bio_remap(clone, NULL); in __send_empty_flush()
1722 struct bio *clone; in __split_and_process_bio() local
1749 clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT); in __split_and_process_bio()
1750 if (unlikely(!clone)) in __split_and_process_bio()
1753 clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); in __split_and_process_bio()
1755 __map_bio(clone); in __split_and_process_bio()
1823 struct bio *clone; in __send_zone_reset_all_emulated() local
1866 clone = bio_list_pop(&blist); in __send_zone_reset_all_emulated()
1867 clone->bi_opf &= ~REQ_OP_MASK; in __send_zone_reset_all_emulated()
1868 clone->bi_opf |= REQ_OP_ZONE_RESET | REQ_SYNC; in __send_zone_reset_all_emulated()
1869 clone->bi_iter.bi_sector = sector; in __send_zone_reset_all_emulated()
1870 clone->bi_iter.bi_size = 0; in __send_zone_reset_all_emulated()
1871 __map_bio(clone); in __send_zone_reset_all_emulated()
2079 bio_poll(&io->tio.clone, iob, flags); in dm_poll_dm_io()