Lines Matching refs:bi
1134 raid5_end_read_request(struct bio *bi);
1136 raid5_end_write_request(struct bio *bi);
1158 struct bio *bi, *rbi; in ops_run_io() local
1181 bi = &dev->req; in ops_run_io()
1245 bio_init(bi, rdev->bdev, &dev->vec, 1, op | op_flags); in ops_run_io()
1246 bi->bi_end_io = op_is_write(op) in ops_run_io()
1249 bi->bi_private = sh; in ops_run_io()
1253 bi->bi_opf, i); in ops_run_io()
1258 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1261 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1264 bi->bi_opf |= REQ_NOMERGE; in ops_run_io()
1279 bi->bi_vcnt = 1; in ops_run_io()
1280 bi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf); in ops_run_io()
1281 bi->bi_io_vec[0].bv_offset = sh->dev[i].offset; in ops_run_io()
1282 bi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf); in ops_run_io()
1288 bi->bi_vcnt = 0; in ops_run_io()
1292 mddev_trace_remap(conf->mddev, bi, sh->dev[i].sector); in ops_run_io()
1294 bio_list_add(&pending_bios, bi); in ops_run_io()
1296 submit_bio_noacct(bi); in ops_run_io()
1340 bi->bi_opf, i, (unsigned long long)sh->sector); in ops_run_io()
2706 static void raid5_end_read_request(struct bio * bi) in raid5_end_read_request() argument
2708 struct stripe_head *sh = bi->bi_private; in raid5_end_read_request()
2715 if (bi == &sh->dev[i].req) in raid5_end_read_request()
2720 bi->bi_status); in raid5_end_read_request()
2739 if (!bi->bi_status) { in raid5_end_read_request()
2771 if (!(bi->bi_status == BLK_STS_PROTECTION)) in raid5_end_read_request()
2828 bio_uninit(bi); in raid5_end_read_request()
2834 static void raid5_end_write_request(struct bio *bi) in raid5_end_write_request() argument
2836 struct stripe_head *sh = bi->bi_private; in raid5_end_write_request()
2843 if (bi == &sh->dev[i].req) { in raid5_end_write_request()
2847 if (bi == &sh->dev[i].rreq) { in raid5_end_write_request()
2862 bi->bi_status); in raid5_end_write_request()
2869 if (bi->bi_status) in raid5_end_write_request()
2875 if (bi->bi_status) { in raid5_end_write_request()
2894 if (sh->batch_head && bi->bi_status && !replacement) in raid5_end_write_request()
2897 bio_uninit(bi); in raid5_end_write_request()
3432 static bool stripe_bio_overlaps(struct stripe_head *sh, struct bio *bi, in stripe_bio_overlaps() argument
3439 bi->bi_iter.bi_sector, sh->sector); in stripe_bio_overlaps()
3450 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) { in stripe_bio_overlaps()
3451 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector) in stripe_bio_overlaps()
3456 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) in stripe_bio_overlaps()
3492 static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi, in __add_stripe_bio() argument
3507 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) in __add_stripe_bio()
3513 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); in __add_stripe_bio()
3515 bi->bi_next = *bip; in __add_stripe_bio()
3516 *bip = bi; in __add_stripe_bio()
3517 bio_inc_remaining(bi); in __add_stripe_bio()
3518 md_write_inc(conf->mddev, bi); in __add_stripe_bio()
3523 for (bi=sh->dev[dd_idx].towrite; in __add_stripe_bio()
3525 bi && bi->bi_iter.bi_sector <= sector; in __add_stripe_bio()
3526 bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) { in __add_stripe_bio()
3527 if (bio_end_sector(bi) >= sector) in __add_stripe_bio()
3528 sector = bio_end_sector(bi); in __add_stripe_bio()
3550 static bool add_stripe_bio(struct stripe_head *sh, struct bio *bi, in add_stripe_bio() argument
3555 if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) { in add_stripe_bio()
3561 __add_stripe_bio(sh, bi, dd_idx, forwrite, previous); in add_stripe_bio()
3591 struct bio *bi; in handle_failed_stripe() local
3612 bi = sh->dev[i].towrite; in handle_failed_stripe()
3622 while (bi && bi->bi_iter.bi_sector < in handle_failed_stripe()
3624 struct bio *nextbi = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3627 bio_io_error(bi); in handle_failed_stripe()
3628 bi = nextbi; in handle_failed_stripe()
3631 bi = sh->dev[i].written; in handle_failed_stripe()
3638 while (bi && bi->bi_iter.bi_sector < in handle_failed_stripe()
3640 struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3643 bio_io_error(bi); in handle_failed_stripe()
3644 bi = bi2; in handle_failed_stripe()
3655 bi = sh->dev[i].toread; in handle_failed_stripe()
3660 if (bi) in handle_failed_stripe()
3662 while (bi && bi->bi_iter.bi_sector < in handle_failed_stripe()
3665 r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3667 bio_io_error(bi); in handle_failed_stripe()
3668 bi = nextbi; in handle_failed_stripe()
5329 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) in add_bio_to_retry() argument
5335 bi->bi_next = conf->retry_read_aligned_list; in add_bio_to_retry()
5336 conf->retry_read_aligned_list = bi; in add_bio_to_retry()
5345 struct bio *bi; in remove_bio_from_retry() local
5347 bi = conf->retry_read_aligned; in remove_bio_from_retry()
5348 if (bi) { in remove_bio_from_retry()
5351 return bi; in remove_bio_from_retry()
5353 bi = conf->retry_read_aligned_list; in remove_bio_from_retry()
5354 if(bi) { in remove_bio_from_retry()
5355 conf->retry_read_aligned_list = bi->bi_next; in remove_bio_from_retry()
5356 bi->bi_next = NULL; in remove_bio_from_retry()
5360 return bi; in remove_bio_from_retry()
5369 static void raid5_align_endio(struct bio *bi) in raid5_align_endio() argument
5371 struct bio *raid_bi = bi->bi_private; in raid5_align_endio()
5375 blk_status_t error = bi->bi_status; in raid5_align_endio()
5377 bio_put(bi); in raid5_align_endio()
5666 static void make_discard_request(struct mddev *mddev, struct bio *bi) in make_discard_request() argument
5674 if (WARN_ON_ONCE(bi->bi_opf & REQ_NOWAIT)) in make_discard_request()
5681 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1); in make_discard_request()
5682 last_sector = bio_end_sector(bi); in make_discard_request()
5684 bi->bi_next = NULL; in make_discard_request()
5727 sh->dev[d].towrite = bi; in make_discard_request()
5729 bio_inc_remaining(bi); in make_discard_request()
5730 md_write_inc(mddev, bi); in make_discard_request()
5746 bio_endio(bi); in make_discard_request()
5792 struct bio *bi, int forwrite, int previous) in add_all_stripe_bios() argument
5808 if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) { in add_all_stripe_bios()
5833 __add_stripe_bio(sh, bi, dd_idx, forwrite, previous); in add_all_stripe_bios()
5928 sector_t logical_sector, struct bio *bi) in make_stripe_request() argument
5930 const int rw = bio_data_dir(bi); in make_stripe_request()
5954 if (bi->bi_opf & REQ_RAHEAD) in make_stripe_request()
5959 bi->bi_status = BLK_STS_IOERR; in make_stripe_request()
5989 if (!add_all_stripe_bios(conf, ctx, sh, bi, rw, previous)) { in make_stripe_request()
6011 (bi->bi_opf & REQ_SYNC) && in make_stripe_request()
6022 bi->bi_status = BLK_STS_RESOURCE; in make_stripe_request()
6034 struct bio *bi) in raid5_bio_lowest_chunk_sector() argument
6041 sector_t r_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1); in raid5_bio_lowest_chunk_sector()
6047 if (sectors_per_chunk - chunk_offset >= bio_sectors(bi)) in raid5_bio_lowest_chunk_sector()
6061 static bool raid5_make_request(struct mddev *mddev, struct bio * bi) in raid5_make_request() argument
6068 const int rw = bio_data_dir(bi); in raid5_make_request()
6072 if (unlikely(bi->bi_opf & REQ_PREFLUSH)) { in raid5_make_request()
6073 int ret = log_handle_flush_request(conf, bi); in raid5_make_request()
6078 if (md_flush_request(mddev, bi)) in raid5_make_request()
6086 ctx.do_flush = bi->bi_opf & REQ_PREFLUSH; in raid5_make_request()
6089 md_write_start(mddev, bi); in raid5_make_request()
6097 bi = chunk_aligned_read(mddev, bi); in raid5_make_request()
6098 if (!bi) in raid5_make_request()
6102 if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) { in raid5_make_request()
6103 make_discard_request(mddev, bi); in raid5_make_request()
6108 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1); in raid5_make_request()
6110 ctx.last_sector = bio_end_sector(bi); in raid5_make_request()
6111 bi->bi_next = NULL; in raid5_make_request()
6118 bi->bi_iter.bi_sector, ctx.last_sector); in raid5_make_request()
6121 if ((bi->bi_opf & REQ_NOWAIT) && in raid5_make_request()
6123 bio_wouldblock_error(bi); in raid5_make_request()
6128 md_account_bio(mddev, &bi); in raid5_make_request()
6138 logical_sector = raid5_bio_lowest_chunk_sector(conf, bi); in raid5_make_request()
6148 bi); in raid5_make_request()
6190 md_free_cloned_bio(bi); in raid5_make_request()
6194 bio_endio(bi); in raid5_make_request()