Lines Matching refs:sectors
346 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
530 r10_bio->sectors) && in raid10_end_write_request()
727 int sectors = r10_bio->sectors; in read_balance() local
747 if (raid1_should_read_first(conf->mddev, this_sector, sectors)) in read_balance()
762 r10_bio->devs[slot].addr + sectors > in read_balance()
769 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
773 if (is_badblock(rdev, dev_sector, sectors, in read_balance()
784 if (!do_balance && sectors > bad_sectors) in read_balance()
785 sectors = bad_sectors; in read_balance()
786 if (best_good_sectors > sectors) in read_balance()
787 best_good_sectors = sectors; in read_balance()
802 best_good_sectors = sectors; in read_balance()
1121 struct bio *bio, sector_t sectors) in regular_request_wait() argument
1130 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { in regular_request_wait()
1140 sectors); in regular_request_wait()
1185 if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) { in raid10_read_request()
1218 r10_bio->sectors = max_sectors; in raid10_read_request()
1311 r10_bio->sectors && in wait_blocked_dev()
1313 r10_bio->sectors) < 0) in wait_blocked_dev()
1352 sector_t sectors; in raid10_write_request() local
1377 sectors = r10_bio->sectors; in raid10_write_request()
1378 if (!regular_request_wait(mddev, conf, bio, sectors)) { in raid10_write_request()
1386 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) in raid10_write_request()
1387 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && in raid10_write_request()
1422 max_sectors = r10_bio->sectors; in raid10_write_request()
1488 if (max_sectors < r10_bio->sectors) in raid10_write_request()
1489 r10_bio->sectors = max_sectors; in raid10_write_request()
1491 if (r10_bio->sectors < bio_sectors(bio)) { in raid10_write_request()
1492 struct bio *split = bio_split(bio, r10_bio->sectors, in raid10_write_request()
1539 static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) in __make_request() argument
1547 r10_bio->sectors = sectors; in __make_request()
1735 r10_bio->sectors = 0; in raid10_handle_discard()
1876 int sectors = bio_sectors(bio); in raid10_make_request() local
1893 sectors > chunk_sects in raid10_make_request()
1897 sectors = chunk_sects - in raid10_make_request()
1900 __make_request(mddev, bio, sectors); in raid10_make_request()
2239 atomic_add(r10_bio->sectors, in __end_sync_read()
2279 sector_t s = r10_bio->sectors; in end_sync_request()
2326 r10_bio->sectors)) { in end_sync_write()
2371 fbio->bi_iter.bi_size = r10_bio->sectors << 9; in sync_request_write()
2375 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); in sync_request_write()
2397 int sectors = r10_bio->sectors; in sync_request_write() local
2400 if (sectors < (len / 512)) in sync_request_write()
2401 len = sectors * 512; in sync_request_write()
2406 sectors -= len/512; in sync_request_write()
2410 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); in sync_request_write()
2461 md_done_sync(mddev, r10_bio->sectors, 1); in sync_request_write()
2489 int sectors = r10_bio->sectors; in fix_recovery_read_error() local
2495 while (sectors) { in fix_recovery_read_error()
2496 int s = sectors; in fix_recovery_read_error()
2553 sectors -= s; in fix_recovery_read_error()
2598 int sectors, struct page *page, enum req_op op) in r10_sync_page_io() argument
2600 if (rdev_has_badblock(rdev, sector, sectors) && in r10_sync_page_io()
2603 if (sync_page_io(rdev, sector, sectors << 9, page, op, false)) in r10_sync_page_io()
2613 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) in r10_sync_page_io()
2629 int sectors = r10_bio->sectors, slot = r10_bio->read_slot; in fix_read_error() local
2648 while(sectors) { in fix_read_error()
2649 int s = sectors; in fix_read_error()
2777 sectors -= s; in fix_read_error()
2801 int sectors; in narrow_write_error() local
2802 int sect_to_write = r10_bio->sectors; in narrow_write_error()
2811 sectors = ((r10_bio->sector + block_sectors) in narrow_write_error()
2818 if (sectors > sect_to_write) in narrow_write_error()
2819 sectors = sect_to_write; in narrow_write_error()
2823 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); in narrow_write_error()
2832 sectors, 0) in narrow_write_error()
2836 sect_to_write -= sectors; in narrow_write_error()
2837 sector += sectors; in narrow_write_error()
2838 sectors = block_sectors; in narrow_write_error()
2904 r10_bio->sectors, 0); in handle_write_completed()
2909 r10_bio->sectors, 0)) in handle_write_completed()
2921 r10_bio->sectors, 0); in handle_write_completed()
2926 r10_bio->sectors, 0)) in handle_write_completed()
2941 r10_bio->sectors, 0); in handle_write_completed()
2955 r10_bio->sectors, 0); in handle_write_completed()
3600 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; in raid10_sync_request()
3703 r10_bio->sectors = nr_sectors; in raid10_sync_request()
3757 r10_bio->sectors = nr_sectors; in raid10_sync_request()
3787 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid10_size() argument
3795 if (!sectors) in raid10_size()
3796 sectors = conf->dev_sectors; in raid10_size()
3798 size = sectors >> conf->geo.chunk_shift; in raid10_size()
4212 static int raid10_resize(struct mddev *mddev, sector_t sectors) in raid10_resize() argument
4237 size = raid10_size(mddev, sectors, 0); in raid10_resize()
4247 if (sectors > mddev->dev_sectors && in raid10_resize()
4252 calc_sectors(conf, sectors); in raid10_resize()
4286 rdev->sectors = size; in raid10_takeover_raid0()
4783 r10_bio->sectors = last - sector_nr + 1; in reshape_request()
4880 r10_bio->sectors = nr_sectors; in reshape_request()
4919 md_done_sync(mddev, r10_bio->sectors, 0); in reshape_request_write()
4983 int sectors = r10_bio->sectors; in handle_reshape_read_error() local
5002 while (sectors) { in handle_reshape_read_error()
5003 int s = sectors; in handle_reshape_read_error()
5043 sectors -= s; in handle_reshape_read_error()
5077 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); in end_reshape_request()