Lines Matching refs:bio

121 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)  in queue_bio()  argument
130 bio_list_add(bl, bio); in queue_bio()
140 struct bio *bio; in dispatch_bios() local
142 while ((bio = bio_list_pop(bio_list))) in dispatch_bios()
143 queue_bio(ms, bio, WRITE); in dispatch_bios()
163 static struct mirror *bio_get_m(struct bio *bio) in bio_get_m() argument
165 return (struct mirror *) bio->bi_next; in bio_get_m()
168 static void bio_set_m(struct bio *bio, struct mirror *m) in bio_set_m() argument
170 bio->bi_next = (struct bio *) m; in bio_set_m()
437 static int mirror_available(struct mirror_set *ms, struct bio *bio) in mirror_available() argument
440 region_t region = dm_rh_bio_to_region(ms->rh, bio); in mirror_available()
443 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; in mirror_available()
451 static sector_t map_sector(struct mirror *m, struct bio *bio) in map_sector() argument
453 if (unlikely(!bio->bi_iter.bi_size)) in map_sector()
455 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); in map_sector()
458 static void map_bio(struct mirror *m, struct bio *bio) in map_bio() argument
460 bio_set_dev(bio, m->dev->bdev); in map_bio()
461 bio->bi_iter.bi_sector = map_sector(m, bio); in map_bio()
465 struct bio *bio) in map_region() argument
468 io->sector = map_sector(m, bio); in map_region()
469 io->count = bio_sectors(bio); in map_region()
472 static void hold_bio(struct mirror_set *ms, struct bio *bio) in hold_bio() argument
487 bio->bi_status = BLK_STS_DM_REQUEUE; in hold_bio()
489 bio->bi_status = BLK_STS_IOERR; in hold_bio()
491 bio_endio(bio); in hold_bio()
498 bio_list_add(&ms->holds, bio); in hold_bio()
507 struct bio *bio = context; in read_callback() local
510 m = bio_get_m(bio); in read_callback()
511 bio_set_m(bio, NULL); in read_callback()
514 bio_endio(bio); in read_callback()
520 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) { in read_callback()
524 queue_bio(m->ms, bio, bio_data_dir(bio)); in read_callback()
530 bio_io_error(bio); in read_callback()
534 static void read_async_bio(struct mirror *m, struct bio *bio) in read_async_bio() argument
541 .mem.ptr.bio = bio, in read_async_bio()
543 .notify.context = bio, in read_async_bio()
547 map_region(&io, m, bio); in read_async_bio()
548 bio_set_m(bio, m); in read_async_bio()
562 struct bio *bio; in do_reads() local
565 while ((bio = bio_list_pop(reads))) { in do_reads()
566 region = dm_rh_bio_to_region(ms->rh, bio); in do_reads()
573 m = choose_mirror(ms, bio->bi_iter.bi_sector); in do_reads()
578 read_async_bio(m, bio); in do_reads()
580 bio_io_error(bio); in do_reads()
599 struct bio *bio = (struct bio *) context; in write_callback() local
604 ms = bio_get_m(bio)->ms; in write_callback()
605 bio_set_m(bio, NULL); in write_callback()
614 bio_endio(bio); in write_callback()
622 if (bio_op(bio) == REQ_OP_DISCARD) { in write_callback()
623 bio->bi_status = BLK_STS_NOTSUPP; in write_callback()
624 bio_endio(bio); in write_callback()
640 bio_list_add(&ms->failures, bio); in write_callback()
646 static void do_write(struct mirror_set *ms, struct bio *bio) in do_write() argument
653 .bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH), in do_write()
655 .mem.ptr.bio = bio, in do_write()
657 .notify.context = bio, in do_write()
661 if (bio_op(bio) == REQ_OP_DISCARD) { in do_write()
668 map_region(dest++, m, bio); in do_write()
674 bio_set_m(bio, get_default_mirror(ms)); in do_write()
682 struct bio *bio; in do_writes() local
699 while ((bio = bio_list_pop(writes))) { in do_writes()
700 if ((bio->bi_opf & REQ_PREFLUSH) || in do_writes()
701 (bio_op(bio) == REQ_OP_DISCARD)) { in do_writes()
702 bio_list_add(&sync, bio); in do_writes()
706 region = dm_rh_bio_to_region(ms->rh, bio); in do_writes()
710 bio_list_add(&requeue, bio); in do_writes()
730 bio_list_add(this_list, bio); in do_writes()
768 while ((bio = bio_list_pop(&sync))) in do_writes()
769 do_write(ms, bio); in do_writes()
771 while ((bio = bio_list_pop(&recover))) in do_writes()
772 dm_rh_delay(ms->rh, bio); in do_writes()
774 while ((bio = bio_list_pop(&nosync))) { in do_writes()
777 bio_list_add(&ms->failures, bio); in do_writes()
781 map_bio(get_default_mirror(ms), bio); in do_writes()
782 submit_bio_noacct(bio); in do_writes()
789 struct bio *bio; in do_failures() local
811 while ((bio = bio_list_pop(failures))) { in do_failures()
814 dm_rh_mark_nosync(ms->rh, bio); in do_failures()
831 bio_io_error(bio); in do_failures()
833 hold_bio(ms, bio); in do_failures()
835 bio_endio(bio); in do_failures()
1184 static int mirror_map(struct dm_target *ti, struct bio *bio) in mirror_map() argument
1186 int r, rw = bio_data_dir(bio); in mirror_map()
1191 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); in mirror_map()
1197 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); in mirror_map()
1198 queue_bio(ms, bio, rw); in mirror_map()
1202 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); in mirror_map()
1210 if (bio->bi_opf & REQ_RAHEAD) in mirror_map()
1213 queue_bio(ms, bio, rw); in mirror_map()
1221 m = choose_mirror(ms, bio->bi_iter.bi_sector); in mirror_map()
1225 dm_bio_record(&bio_record->details, bio); in mirror_map()
1228 map_bio(m, bio); in mirror_map()
1233 static int mirror_end_io(struct dm_target *ti, struct bio *bio, in mirror_end_io() argument
1236 int rw = bio_data_dir(bio); in mirror_end_io()
1241 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); in mirror_end_io()
1247 if (!(bio->bi_opf & REQ_PREFLUSH) && in mirror_end_io()
1248 bio_op(bio) != REQ_OP_DISCARD) in mirror_end_io()
1256 if (bio->bi_opf & REQ_RAHEAD) in mirror_end_io()
1281 if (default_ok(m) || mirror_available(ms, bio)) { in mirror_end_io()
1284 dm_bio_restore(bd, bio); in mirror_end_io()
1286 bio->bi_status = 0; in mirror_end_io()
1288 queue_bio(ms, bio, rw); in mirror_end_io()
1306 struct bio *bio; in mirror_presuspend() local
1321 while ((bio = bio_list_pop(&holds))) in mirror_presuspend()
1322 hold_bio(ms, bio); in mirror_presuspend()