Lines Matching refs:bio
85 struct bio *bio; member
93 static inline struct dm_target_io *clone_to_tio(struct bio *clone) in clone_to_tio()
98 void *dm_per_bio_data(struct bio *bio, size_t data_size) in dm_per_bio_data() argument
100 if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO)) in dm_per_bio_data()
101 return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; in dm_per_bio_data()
102 return (char *)bio - DM_IO_BIO_OFFSET - data_size; in dm_per_bio_data()
106 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) in dm_bio_from_per_bio_data()
111 return (struct bio *)((char *)io + DM_IO_BIO_OFFSET); in dm_bio_from_per_bio_data()
113 return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET); in dm_bio_from_per_bio_data()
117 unsigned int dm_bio_get_target_bio_nr(const struct bio *bio) in dm_bio_get_target_bio_nr() argument
119 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; in dm_bio_get_target_bio_nr()
487 u64 dm_start_time_ns_from_clone(struct bio *bio) in dm_start_time_ns_from_clone() argument
489 return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time); in dm_start_time_ns_from_clone()
493 static inline bool bio_is_flush_with_data(struct bio *bio) in bio_is_flush_with_data() argument
495 return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size); in bio_is_flush_with_data()
498 static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio) in dm_io_sectors() argument
504 if (bio_is_flush_with_data(bio)) in dm_io_sectors()
508 return bio_sectors(bio); in dm_io_sectors()
513 struct bio *bio = io->orig_bio; in dm_io_acct() local
517 bdev_start_io_acct(bio->bi_bdev, bio_op(bio), in dm_io_acct()
520 bdev_end_io_acct(bio->bi_bdev, bio_op(bio), in dm_io_acct()
521 dm_io_sectors(io, bio), in dm_io_acct()
530 sector = bio_end_sector(bio) - io->sector_offset; in dm_io_acct()
532 sector = bio->bi_iter.bi_sector; in dm_io_acct()
534 dm_stats_account_io(&io->md->stats, bio_data_dir(bio), in dm_io_acct()
535 sector, dm_io_sectors(io, bio), in dm_io_acct()
545 static void dm_start_io_acct(struct dm_io *io, struct bio *clone) in dm_start_io_acct()
576 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask) in alloc_io() argument
580 struct bio *clone; in alloc_io()
582 clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs); in alloc_io()
597 io->orig_bio = bio; in alloc_io()
617 static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, in alloc_tio()
622 struct bio *clone; in alloc_tio()
630 clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, in alloc_tio()
663 static void free_tio(struct bio *clone) in free_tio()
673 static void queue_io(struct mapped_device *md, struct bio *bio) in queue_io() argument
678 bio_list_add(&md->deferred, bio); in queue_io()
900 struct bio *bio = io->orig_bio; in dm_handle_requeue() local
903 (bio->bi_opf & REQ_POLLED)); in dm_handle_requeue()
910 if (bio->bi_opf & REQ_POLLED) { in dm_handle_requeue()
916 bio_clear_polled(bio); in dm_handle_requeue()
925 !WARN_ON_ONCE(dm_is_zone_write(md, bio))) || in dm_handle_requeue()
947 struct bio *bio = io->orig_bio; in __dm_io_complete() local
979 if (bio_is_flush_with_data(bio)) { in __dm_io_complete()
984 bio->bi_opf &= ~REQ_PREFLUSH; in __dm_io_complete()
985 queue_io(md, bio); in __dm_io_complete()
989 bio->bi_status = io_error; in __dm_io_complete()
990 bio_endio(bio); in __dm_io_complete()
1080 static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) in swap_bios_limit() argument
1082 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); in swap_bios_limit()
1085 static void clone_endio(struct bio *bio) in clone_endio() argument
1087 blk_status_t error = bio->bi_status; in clone_endio()
1088 struct dm_target_io *tio = clone_to_tio(bio); in clone_endio()
1095 if (bio_op(bio) == REQ_OP_DISCARD && in clone_endio()
1096 !bdev_max_discard_sectors(bio->bi_bdev)) in clone_endio()
1098 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && in clone_endio()
1099 !bdev_write_zeroes_sectors(bio->bi_bdev)) in clone_endio()
1104 unlikely(bdev_is_zoned(bio->bi_bdev))) in clone_endio()
1105 dm_zone_endio(io, bio); in clone_endio()
1108 int r = endio(ti, bio, &error); in clone_endio()
1118 if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) in clone_endio()
1137 likely(ti != NULL) && unlikely(swap_bios_limit(ti, bio))) in clone_endio()
1140 free_tio(bio); in clone_endio()
1317 void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors) in dm_accept_partial_bio() argument
1319 struct dm_target_io *tio = clone_to_tio(bio); in dm_accept_partial_bio()
1321 unsigned int bio_sectors = bio_sectors(bio); in dm_accept_partial_bio()
1328 unlikely(bdev_is_zoned(bio->bi_bdev))) { in dm_accept_partial_bio()
1329 enum req_op op = bio_op(bio); in dm_accept_partial_bio()
1338 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; in dm_accept_partial_bio()
1359 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone) in dm_submit_bio_remap()
1396 static void __map_bio(struct bio *clone) in __map_bio()
1464 io->sector_offset = bio_sectors(ci->bio); in setup_split_accounting()
1472 struct bio *bio; in alloc_multiple_bios() local
1481 bio = alloc_tio(ci, ti, bio_nr, len, in alloc_multiple_bios()
1483 if (!bio) in alloc_multiple_bios()
1486 bio_list_add(blist, bio); in alloc_multiple_bios()
1493 while ((bio = bio_list_pop(blist))) in alloc_multiple_bios()
1494 free_tio(bio); in alloc_multiple_bios()
1502 struct bio *clone; in __send_duplicate_bios()
1530 struct bio flush_bio; in __send_empty_flush()
1544 ci->bio = &flush_bio; in __send_empty_flush()
1571 struct bio *clone; in __send_empty_flush()
1593 bio_uninit(ci->bio); in __send_empty_flush()
1617 static bool is_abnormal_io(struct bio *bio) in is_abnormal_io() argument
1619 switch (bio_op(bio)) { in is_abnormal_io()
1642 switch (bio_op(ci->bio)) { in __process_abnormal_io()
1684 static inline struct dm_io **dm_poll_list_head(struct bio *bio) in dm_poll_list_head() argument
1686 return (struct dm_io **)&bio->bi_private; in dm_poll_list_head()
1689 static void dm_queue_poll_io(struct bio *bio, struct dm_io *io) in dm_queue_poll_io() argument
1691 struct dm_io **head = dm_poll_list_head(bio); in dm_queue_poll_io()
1693 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) { in dm_queue_poll_io()
1694 bio->bi_opf |= REQ_DM_POLL_LIST; in dm_queue_poll_io()
1699 io->data = bio->bi_private; in dm_queue_poll_io()
1702 bio->bi_cookie = ~BLK_QC_T_NONE; in dm_queue_poll_io()
1722 struct bio *clone; in __split_and_process_bio()
1737 ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED); in __split_and_process_bio()
1740 if (ci->bio->bi_opf & REQ_ATOMIC && len != ci->sector_count) in __split_and_process_bio()
1745 if (unlikely(ci->bio->bi_opf & REQ_NOWAIT)) { in __split_and_process_bio()
1764 struct dm_table *map, struct bio *bio, bool is_abnormal) in init_clone_info() argument
1768 ci->bio = bio; in init_clone_info()
1771 ci->sector = bio->bi_iter.bi_sector; in init_clone_info()
1772 ci->sector_count = bio_sectors(bio); in init_clone_info()
1776 WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count)) in init_clone_info()
1781 static inline bool dm_zone_bio_needs_split(struct bio *bio) in dm_zone_bio_needs_split() argument
1786 switch (bio_op(bio)) { in dm_zone_bio_needs_split()
1802 return bio_needs_zone_write_plugging(bio) || bio_straddles_zones(bio); in dm_zone_bio_needs_split()
1805 static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio) in dm_zone_plug_bio() argument
1807 if (!bio_needs_zone_write_plugging(bio)) in dm_zone_plug_bio()
1809 return blk_zone_plug_bio(bio, 0); in dm_zone_plug_bio()
1823 struct bio *clone; in __send_zone_reset_all_emulated()
1925 static inline bool dm_zone_bio_needs_split(struct bio *bio) in dm_zone_bio_needs_split() argument
1929 static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio) in dm_zone_plug_bio() argument
1943 struct dm_table *map, struct bio *bio) in dm_split_and_process_bio() argument
1950 is_abnormal = is_abnormal_io(bio); in dm_split_and_process_bio()
1952 need_split = is_abnormal || dm_zone_bio_needs_split(bio); in dm_split_and_process_bio()
1965 bio = bio_split_to_limits(bio); in dm_split_and_process_bio()
1966 if (!bio) in dm_split_and_process_bio()
1974 if (static_branch_unlikely(&zoned_enabled) && dm_zone_plug_bio(md, bio)) in dm_split_and_process_bio()
1978 if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) { in dm_split_and_process_bio()
1984 if (bio->bi_opf & REQ_PREFLUSH) { in dm_split_and_process_bio()
1985 bio_wouldblock_error(bio); in dm_split_and_process_bio()
1988 io = alloc_io(md, bio, GFP_NOWAIT); in dm_split_and_process_bio()
1991 bio_wouldblock_error(bio); in dm_split_and_process_bio()
1995 io = alloc_io(md, bio, GFP_NOIO); in dm_split_and_process_bio()
1997 init_clone_info(&ci, io, map, bio, is_abnormal); in dm_split_and_process_bio()
1999 if (bio->bi_opf & REQ_PREFLUSH) { in dm_split_and_process_bio()
2006 (bio_op(bio) == REQ_OP_ZONE_RESET_ALL)) { in dm_split_and_process_bio()
2018 bio_trim(bio, io->sectors, ci.sector_count); in dm_split_and_process_bio()
2019 trace_block_split(bio, bio->bi_iter.bi_sector); in dm_split_and_process_bio()
2020 bio_inc_remaining(bio); in dm_split_and_process_bio()
2021 submit_bio_noacct(bio); in dm_split_and_process_bio()
2039 dm_queue_poll_io(bio, io); in dm_split_and_process_bio()
2042 static void dm_submit_bio(struct bio *bio) in dm_submit_bio() argument
2044 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; in dm_submit_bio()
2052 bio_io_error(bio); in dm_submit_bio()
2058 if (bio->bi_opf & REQ_NOWAIT) in dm_submit_bio()
2059 bio_wouldblock_error(bio); in dm_submit_bio()
2060 else if (bio->bi_opf & REQ_RAHEAD) in dm_submit_bio()
2061 bio_io_error(bio); in dm_submit_bio()
2063 queue_io(md, bio); in dm_submit_bio()
2067 dm_split_and_process_bio(md, map, bio); in dm_submit_bio()
2085 static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob, in dm_poll_bio() argument
2088 struct dm_io **head = dm_poll_list_head(bio); in dm_poll_bio()
2094 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) in dm_poll_bio()
2107 bio->bi_opf &= ~REQ_DM_POLL_LIST; in dm_poll_bio()
2108 bio->bi_private = list->data; in dm_poll_bio()
2126 bio->bi_opf |= REQ_DM_POLL_LIST; in dm_poll_bio()
2806 struct bio *bio; in dm_wq_work() local
2810 bio = bio_list_pop(&md->deferred); in dm_wq_work()
2813 if (!bio) in dm_wq_work()
2816 submit_bio_noacct(bio); in dm_wq_work()