Lines Matching refs:chunk_sectors

924 	if (!sector_div(tmp_sec, conf->chunk_sectors))  in stripe_add_to_batch_list()
2970 : conf->chunk_sectors; in raid5_compute_sector()
3166 : conf->chunk_sectors; in raid5_compute_blocknr()
3497 if (first + conf->chunk_sectors * (count - 1) != last) in stripe_bio_overlaps()
3604 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; in stripe_set_idx()
5370 unsigned int chunk_sectors; in in_chunk_boundary() local
5373 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); in in_chunk_boundary()
5374 return chunk_sectors >= in in_chunk_boundary()
5375 ((sector & (chunk_sectors - 1)) + bio_sectors); in in_chunk_boundary()
5526 unsigned chunk_sects = mddev->chunk_sectors; in chunk_aligned_read()
5739 stripe_sectors = conf->chunk_sectors * in make_discard_request()
5745 logical_sector *= conf->chunk_sectors; in make_discard_request()
5746 last_sector *= conf->chunk_sectors; in make_discard_request()
6045 int sectors_per_chunk = conf->chunk_sectors; in raid5_bio_lowest_chunk_sector()
6264 reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors); in reshape_request()
7292 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_size()
7316 max(conf->chunk_sectors, in alloc_scratch_buffer()
7389 conf->scribble_sectors = max(conf->chunk_sectors, in raid5_alloc_percpu()
7577 conf->chunk_sectors = mddev->new_chunk_sectors; in setup_conf()
7624 conf->prev_chunk_sectors = mddev->chunk_sectors; in setup_conf()
7627 conf->prev_chunk_sectors = conf->chunk_sectors; in setup_conf()
7634 ((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4, in setup_conf()
7733 stripe = roundup_pow_of_two(data_disks * (mddev->chunk_sectors << 9)); in raid5_set_limits()
7736 lim.io_min = mddev->chunk_sectors << 9; in raid5_set_limits()
7837 int chunk_sectors; in raid5_run() local
7860 chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); in raid5_run()
7862 if (sector_div(here_new, chunk_sectors * new_data_disks)) { in raid5_run()
7867 reshape_offset = here_new * chunk_sectors; in raid5_run()
7870 sector_div(here_old, chunk_sectors * (old_disks-max_degraded)); in raid5_run()
7881 if (abs(min_offset_diff) >= mddev->chunk_sectors && in raid5_run()
7890 ? (here_new * chunk_sectors + min_offset_diff <= in raid5_run()
7891 here_old * chunk_sectors) in raid5_run()
7892 : (here_new * chunk_sectors >= in raid5_run()
7893 here_old * chunk_sectors + (-min_offset_diff))) { in raid5_run()
7904 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); in raid5_run()
7993 mddev->dev_sectors &= ~((sector_t)mddev->chunk_sectors - 1); in raid5_run()
8071 conf->chunk_sectors / 2, mddev->layout); in raid5_status()
8325 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_resize()
8357 if (((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4 in check_stripe_cache()
8363 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) in check_stripe_cache()
8378 mddev->new_chunk_sectors == mddev->chunk_sectors) in check_reshape()
8398 if (mddev->new_chunk_sectors > mddev->chunk_sectors || in check_reshape()
8404 mddev->chunk_sectors) in check_reshape()
8466 conf->prev_chunk_sectors = conf->chunk_sectors; in raid5_start_reshape()
8467 conf->chunk_sectors = mddev->new_chunk_sectors; in raid5_start_reshape()
8594 mddev->chunk_sectors = conf->chunk_sectors; in raid5_finish_reshape()
8653 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid45_takeover_raid0()
8758 conf->chunk_sectors = new_chunk ; in raid5_check_reshape()
8759 mddev->chunk_sectors = new_chunk; in raid5_check_reshape()