Lines Matching refs:mddev

72 static void raid5_quiesce(struct mddev *mddev, int quiesce);
199 md_wakeup_thread(conf->mddev->thread); in raid5_wakeup_stripe_thread()
272 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
278 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
360 md_wakeup_thread(conf->mddev->thread); in release_inactive_stripe_list()
406 if (unlikely(!conf->mddev->thread) || in raid5_release_stripe()
411 md_wakeup_thread(conf->mddev->thread); in raid5_release_stripe()
747 int degraded = conf->mddev->degraded; in has_failed()
749 if (test_bit(MD_BROKEN, &conf->mddev->flags)) in has_failed()
752 if (conf->mddev->reshape_position != MaxSector) in has_failed()
990 md_wakeup_thread(conf->mddev->thread); in stripe_add_to_batch_list()
1220 if (!conf->mddev->external && in ops_run_io()
1221 conf->mddev->sb_flags) { in ops_run_io()
1226 md_check_recovery(conf->mddev); in ops_run_io()
1234 md_wait_for_blocked_rdev(rdev, conf->mddev); in ops_run_io()
1237 rdev_dec_pending(rdev, conf->mddev); in ops_run_io()
1292 mddev_trace_remap(conf->mddev, bi, sh->dev[i].sector); in ops_run_io()
1332 mddev_trace_remap(conf->mddev, rbi, sh->dev[i].sector); in ops_run_io()
2407 if (mddev_is_dm(conf->mddev)) in grow_stripes()
2409 "raid%d-%p", conf->level, conf->mddev); in grow_stripes()
2412 "raid%d-%s", conf->level, mdname(conf->mddev)); in grow_stripes()
2481 raid5_quiesce(conf->mddev, true); in resize_chunks()
2495 raid5_quiesce(conf->mddev, false); in resize_chunks()
2537 md_allow_write(conf->mddev); in resize_stripes()
2748 mdname(conf->mddev), RAID5_STRIPE_SECTORS(conf), in raid5_end_read_request()
2776 mdname(conf->mddev), in raid5_end_read_request()
2779 else if (conf->mddev->degraded >= conf->max_degraded) { in raid5_end_read_request()
2783 mdname(conf->mddev), in raid5_end_read_request()
2791 mdname(conf->mddev), in raid5_end_read_request()
2798 mdname(conf->mddev), in raid5_end_read_request()
2802 mdname(conf->mddev), rdev->bdev); in raid5_end_read_request()
2824 md_error(conf->mddev, rdev); in raid5_end_read_request()
2827 rdev_dec_pending(rdev, conf->mddev); in raid5_end_read_request()
2870 md_error(conf->mddev, rdev); in raid5_end_write_request()
2880 &rdev->mddev->recovery); in raid5_end_write_request()
2892 rdev_dec_pending(rdev, conf->mddev); in raid5_end_write_request()
2907 static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) in raid5_error() argument
2909 struct r5conf *conf = mddev->private; in raid5_error()
2914 mdname(mddev), rdev->bdev); in raid5_error()
2919 mddev->degraded = raid5_calc_degraded(conf); in raid5_error()
2922 set_bit(MD_BROKEN, &conf->mddev->flags); in raid5_error()
2923 conf->recovery_disabled = mddev->recovery_disabled; in raid5_error()
2926 mdname(mddev), mddev->degraded, conf->raid_disks); in raid5_error()
2929 mdname(mddev), conf->raid_disks - mddev->degraded); in raid5_error()
2933 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in raid5_error()
2936 set_mask_bits(&mddev->sb_flags, 0, in raid5_error()
2938 r5c_update_on_rdev_error(mddev, rdev); in raid5_error()
3261 mdname(conf->mddev)); in raid5_compute_blocknr()
3518 md_write_inc(conf->mddev, bi); in __add_stripe_bio()
3539 if (conf->mddev->bitmap && firstwrite && !sh->batch_head) { in __add_stripe_bio()
3606 md_error(conf->mddev, rdev); in handle_failed_stripe()
3607 rdev_dec_pending(rdev, conf->mddev); in handle_failed_stripe()
3626 md_write_end(conf->mddev); in handle_failed_stripe()
3642 md_write_end(conf->mddev); in handle_failed_stripe()
3681 md_wakeup_thread(conf->mddev->thread); in handle_failed_stripe()
3704 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { in handle_failed_sync()
3728 conf->mddev->recovery_disabled; in handle_failed_sync()
3730 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), !abort); in handle_failed_sync()
3743 || rdev->mddev->resync_offset <= sh->sector)) in want_replace()
3835 sh->sector < sh->raid_conf->mddev->resync_offset) in need_this_block()
4015 md_write_end(conf->mddev); in handle_stripe_clean_event()
4073 md_wakeup_thread(conf->mddev->thread); in handle_stripe_clean_event()
4100 sector_t resync_offset = conf->mddev->resync_offset; in handle_stripe_dirtying()
4151 mddev_add_trace_msg(conf->mddev, "raid5 rmw %llu %d", in handle_stripe_dirtying()
4230 if (rcw && !mddev_is_dm(conf->mddev)) in handle_stripe_dirtying()
4231 blk_add_trace_msg(conf->mddev->gendisk->queue, in handle_stripe_dirtying()
4319 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches); in handle_parity_checks5()
4320 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks5()
4324 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks5()
4446 mdname(conf->mddev), in handle_parity_checks6()
4483 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches); in handle_parity_checks6()
4484 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks6()
4488 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks6()
4780 sh->sector >= conf->mddev->resync_offset || in analyse_stripe()
4781 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) in analyse_stripe()
4941 test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) { in handle_stripe()
4953 rdev_dec_pending(s.blocked_rdev, conf->mddev); in handle_stripe()
5141 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1); in handle_stripe()
5150 if (s.failed <= conf->max_degraded && !conf->mddev->ro) in handle_stripe()
5208 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1); in handle_stripe()
5218 if (conf->mddev->external) in handle_stripe()
5220 conf->mddev); in handle_stripe()
5227 conf->mddev); in handle_stripe()
5239 md_error(conf->mddev, rdev); in handle_stripe()
5240 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5246 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5255 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5272 md_wakeup_thread(conf->mddev->thread); in handle_stripe()
5313 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) in in_chunk_boundary() argument
5315 struct r5conf *conf = mddev->private; in in_chunk_boundary()
5339 md_wakeup_thread(conf->mddev->thread); in add_bio_to_retry()
5373 struct mddev *mddev = rdev->mddev; in raid5_align_endio() local
5374 struct r5conf *conf = mddev->private; in raid5_align_endio()
5379 rdev_dec_pending(rdev, conf->mddev); in raid5_align_endio()
5393 static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) in raid5_read_one_chunk() argument
5395 struct r5conf *conf = mddev->private; in raid5_read_one_chunk()
5402 if (!in_chunk_boundary(mddev, raid_bio)) { in raid5_read_one_chunk()
5429 rdev_dec_pending(rdev, mddev); in raid5_read_one_chunk()
5433 md_account_bio(mddev, &raid_bio); in raid5_read_one_chunk()
5437 &mddev->bio_set); in raid5_read_one_chunk()
5464 mddev_trace_remap(mddev, align_bio, raid_bio->bi_iter.bi_sector); in raid5_read_one_chunk()
5469 static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) in chunk_aligned_read() argument
5473 unsigned chunk_sects = mddev->chunk_sectors; in chunk_aligned_read()
5477 struct r5conf *conf = mddev->private; in chunk_aligned_read()
5484 if (!raid5_read_one_chunk(mddev, raid_bio)) in chunk_aligned_read()
5604 struct mddev *mddev = cb->cb.data; in raid5_unplug() local
5605 struct r5conf *conf = mddev->private; in raid5_unplug()
5633 if (!mddev_is_dm(mddev)) in raid5_unplug()
5634 trace_block_unplug(mddev->gendisk->queue, cnt, !from_schedule); in raid5_unplug()
5638 static void release_stripe_plug(struct mddev *mddev, in release_stripe_plug() argument
5642 raid5_unplug, mddev, in release_stripe_plug()
5666 static void make_discard_request(struct mddev *mddev, struct bio *bi) in make_discard_request() argument
5668 struct r5conf *conf = mddev->private; in make_discard_request()
5677 if (mddev->reshape_position != MaxSector) in make_discard_request()
5730 md_write_inc(mddev, bi); in make_discard_request()
5734 if (conf->mddev->bitmap) { in make_discard_request()
5743 release_stripe_plug(mddev, sh); in make_discard_request()
5749 static bool ahead_of_reshape(struct mddev *mddev, sector_t sector, in ahead_of_reshape() argument
5752 return mddev->reshape_backwards ? sector < reshape_sector : in ahead_of_reshape()
5756 static bool range_ahead_of_reshape(struct mddev *mddev, sector_t min, in range_ahead_of_reshape() argument
5759 return mddev->reshape_backwards ? max < reshape_sector : in range_ahead_of_reshape()
5763 static bool stripe_ahead_of_reshape(struct mddev *mddev, struct r5conf *conf, in stripe_ahead_of_reshape() argument
5780 if (!range_ahead_of_reshape(mddev, min_sector, max_sector, in stripe_ahead_of_reshape()
5817 md_wakeup_thread(conf->mddev->thread); in add_all_stripe_bios()
5849 static enum reshape_loc get_reshape_loc(struct mddev *mddev, in get_reshape_loc() argument
5871 if (ahead_of_reshape(mddev, logical_sector, reshape_progress)) in get_reshape_loc()
5873 if (ahead_of_reshape(mddev, logical_sector, reshape_safe)) in get_reshape_loc()
5878 static void raid5_bitmap_sector(struct mddev *mddev, sector_t *offset, in raid5_bitmap_sector() argument
5881 struct r5conf *conf = mddev->private; in raid5_bitmap_sector()
5902 loc = get_reshape_loc(mddev, conf, prev_start); in raid5_bitmap_sector()
5926 static enum stripe_result make_stripe_request(struct mddev *mddev, in make_stripe_request() argument
5939 loc = get_reshape_loc(mddev, conf, logical_sector); in make_stripe_request()
5964 stripe_ahead_of_reshape(mddev, conf, sh)) { in make_stripe_request()
5984 md_wakeup_thread(mddev->thread); in make_stripe_request()
6015 release_stripe_plug(mddev, sh); in make_stripe_request()
6021 if (ret == STRIPE_SCHEDULE_AND_RETRY && reshape_interrupted(mddev)) { in make_stripe_request()
6061 static bool raid5_make_request(struct mddev *mddev, struct bio * bi) in raid5_make_request() argument
6065 struct r5conf *conf = mddev->private; in raid5_make_request()
6078 if (md_flush_request(mddev, bi)) in raid5_make_request()
6089 md_write_start(mddev, bi); in raid5_make_request()
6095 if (rw == READ && mddev->degraded == 0 && in raid5_make_request()
6096 mddev->reshape_position == MaxSector) { in raid5_make_request()
6097 bi = chunk_aligned_read(mddev, bi); in raid5_make_request()
6103 make_discard_request(mddev, bi); in raid5_make_request()
6104 md_write_end(mddev); in raid5_make_request()
6122 get_reshape_loc(mddev, conf, logical_sector) == LOC_INSIDE_RESHAPE) { in raid5_make_request()
6125 md_write_end(mddev); in raid5_make_request()
6128 md_account_bio(mddev, &bi); in raid5_make_request()
6147 res = make_stripe_request(mddev, conf, &ctx, logical_sector, in raid5_make_request()
6188 md_write_end(mddev); in raid5_make_request()
6198 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
6200 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) in reshape_request() argument
6211 struct r5conf *conf = mddev->private; in reshape_request()
6228 if (mddev->reshape_backwards && in reshape_request()
6229 conf->reshape_progress < raid5_size(mddev, 0, 0)) { in reshape_request()
6230 sector_nr = raid5_size(mddev, 0, 0) in reshape_request()
6232 } else if (mddev->reshape_backwards && in reshape_request()
6236 } else if (!mddev->reshape_backwards && in reshape_request()
6241 mddev->curr_resync_completed = sector_nr; in reshape_request()
6242 sysfs_notify_dirent_safe(mddev->sysfs_completed); in reshape_request()
6268 if (mddev->reshape_backwards) { in reshape_request()
6288 if (mddev->reshape_backwards) { in reshape_request()
6293 if (WARN_ON((mddev->dev_sectors & in reshape_request()
6330 if ((mddev->reshape_backwards in reshape_request()
6337 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
6340 mddev->reshape_position = conf->reshape_progress; in reshape_request()
6341 mddev->curr_resync_completed = sector_nr; in reshape_request()
6342 if (!mddev->reshape_backwards) in reshape_request()
6344 rdev_for_each(rdev, mddev) in reshape_request()
6352 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in reshape_request()
6353 md_wakeup_thread(mddev->thread); in reshape_request()
6354 wait_event(mddev->sb_wait, mddev->sb_flags == 0 || in reshape_request()
6355 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
6356 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in reshape_request()
6359 conf->reshape_safe = mddev->reshape_position; in reshape_request()
6362 sysfs_notify_dirent_safe(mddev->sysfs_completed); in reshape_request()
6384 if (s < raid5_size(mddev, 0, 0)) { in reshape_request()
6399 if (mddev->reshape_backwards) in reshape_request()
6416 if (last_sector >= mddev->dev_sectors) in reshape_request()
6417 last_sector = mddev->dev_sectors - 1; in reshape_request()
6440 if (mddev->curr_resync_completed > mddev->resync_max || in reshape_request()
6441 (sector_nr - mddev->curr_resync_completed) * 2 in reshape_request()
6442 >= mddev->resync_max - mddev->curr_resync_completed) { in reshape_request()
6446 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
6449 mddev->reshape_position = conf->reshape_progress; in reshape_request()
6450 mddev->curr_resync_completed = sector_nr; in reshape_request()
6451 if (!mddev->reshape_backwards) in reshape_request()
6453 rdev_for_each(rdev, mddev) in reshape_request()
6460 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in reshape_request()
6461 md_wakeup_thread(mddev->thread); in reshape_request()
6462 wait_event(mddev->sb_wait, in reshape_request()
6463 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) in reshape_request()
6464 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
6465 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in reshape_request()
6468 conf->reshape_safe = mddev->reshape_position; in reshape_request()
6471 sysfs_notify_dirent_safe(mddev->sysfs_completed); in reshape_request()
6477 static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr, in raid5_sync_request() argument
6480 struct r5conf *conf = mddev->private; in raid5_sync_request()
6489 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in raid5_sync_request()
6494 if (mddev->curr_resync < max_sector) /* aborted */ in raid5_sync_request()
6495 mddev->bitmap_ops->end_sync(mddev, mddev->curr_resync, in raid5_sync_request()
6499 mddev->bitmap_ops->close_sync(mddev); in raid5_sync_request()
6507 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in raid5_sync_request()
6508 return reshape_request(mddev, sector_nr, skipped); in raid5_sync_request()
6520 if (mddev->degraded >= conf->max_degraded && in raid5_sync_request()
6521 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in raid5_sync_request()
6522 sector_t rv = mddev->dev_sectors - sector_nr; in raid5_sync_request()
6526 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in raid5_sync_request()
6528 !mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks, in raid5_sync_request()
6538 mddev->bitmap_ops->cond_end_sync(mddev, sector_nr, false); in raid5_sync_request()
6560 mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks, in raid5_sync_request()
6689 struct mddev *mddev = conf->mddev; in raid5_do_work() local
6710 wait_event_lock_irq(mddev->sb_wait, in raid5_do_work()
6711 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags), in raid5_do_work()
6737 struct mddev *mddev = thread->mddev; in raid5d() local
6738 struct r5conf *conf = mddev->private; in raid5d()
6744 md_check_recovery(mddev); in raid5d()
6754 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) in raid5d()
6766 mddev->bitmap_ops->unplug(mddev, true); in raid5d()
6789 if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) { in raid5d()
6791 md_check_recovery(mddev); in raid5d()
6819 raid5_show_stripe_cache_size(struct mddev *mddev, char *page) in raid5_show_stripe_cache_size() argument
6823 spin_lock(&mddev->lock); in raid5_show_stripe_cache_size()
6824 conf = mddev->private; in raid5_show_stripe_cache_size()
6827 spin_unlock(&mddev->lock); in raid5_show_stripe_cache_size()
6832 raid5_set_cache_size(struct mddev *mddev, int size) in raid5_set_cache_size() argument
6835 struct r5conf *conf = mddev->private; in raid5_set_cache_size()
6847 md_allow_write(mddev); in raid5_set_cache_size()
6863 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) in raid5_store_stripe_cache_size() argument
6873 err = mddev_lock(mddev); in raid5_store_stripe_cache_size()
6876 conf = mddev->private; in raid5_store_stripe_cache_size()
6880 err = raid5_set_cache_size(mddev, new); in raid5_store_stripe_cache_size()
6881 mddev_unlock(mddev); in raid5_store_stripe_cache_size()
6892 raid5_show_rmw_level(struct mddev *mddev, char *page) in raid5_show_rmw_level() argument
6894 struct r5conf *conf = mddev->private; in raid5_show_rmw_level()
6902 raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len) in raid5_store_rmw_level() argument
6904 struct r5conf *conf = mddev->private; in raid5_store_rmw_level()
6934 raid5_show_stripe_size(struct mddev *mddev, char *page) in raid5_show_stripe_size() argument
6939 spin_lock(&mddev->lock); in raid5_show_stripe_size()
6940 conf = mddev->private; in raid5_show_stripe_size()
6943 spin_unlock(&mddev->lock); in raid5_show_stripe_size()
6949 raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len) in raid5_store_stripe_size() argument
6971 err = mddev_suspend_and_lock(mddev); in raid5_store_stripe_size()
6975 conf = mddev->private; in raid5_store_stripe_size()
6987 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in raid5_store_stripe_size()
6988 mddev->reshape_position != MaxSector || mddev->sysfs_active) { in raid5_store_stripe_size()
7003 mdname(mddev)); in raid5_store_stripe_size()
7009 mddev_unlock_and_resume(mddev); in raid5_store_stripe_size()
7025 raid5_show_preread_threshold(struct mddev *mddev, char *page) in raid5_show_preread_threshold() argument
7029 spin_lock(&mddev->lock); in raid5_show_preread_threshold()
7030 conf = mddev->private; in raid5_show_preread_threshold()
7033 spin_unlock(&mddev->lock); in raid5_show_preread_threshold()
7038 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) in raid5_store_preread_threshold() argument
7049 err = mddev_lock(mddev); in raid5_store_preread_threshold()
7052 conf = mddev->private; in raid5_store_preread_threshold()
7059 mddev_unlock(mddev); in raid5_store_preread_threshold()
7070 raid5_show_skip_copy(struct mddev *mddev, char *page) in raid5_show_skip_copy() argument
7074 spin_lock(&mddev->lock); in raid5_show_skip_copy()
7075 conf = mddev->private; in raid5_show_skip_copy()
7078 spin_unlock(&mddev->lock); in raid5_show_skip_copy()
7083 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) in raid5_store_skip_copy() argument
7095 err = mddev_suspend_and_lock(mddev); in raid5_store_skip_copy()
7098 conf = mddev->private; in raid5_store_skip_copy()
7102 struct request_queue *q = mddev->gendisk->queue; in raid5_store_skip_copy()
7112 mddev_unlock_and_resume(mddev); in raid5_store_skip_copy()
7122 stripe_cache_active_show(struct mddev *mddev, char *page) in stripe_cache_active_show() argument
7124 struct r5conf *conf = mddev->private; in stripe_cache_active_show()
7135 raid5_show_group_thread_cnt(struct mddev *mddev, char *page) in raid5_show_group_thread_cnt() argument
7139 spin_lock(&mddev->lock); in raid5_show_group_thread_cnt()
7140 conf = mddev->private; in raid5_show_group_thread_cnt()
7143 spin_unlock(&mddev->lock); in raid5_show_group_thread_cnt()
7151 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) in raid5_store_group_thread_cnt() argument
7167 err = mddev_suspend_and_lock(mddev); in raid5_store_group_thread_cnt()
7170 raid5_quiesce(mddev, true); in raid5_store_group_thread_cnt()
7172 conf = mddev->private; in raid5_store_group_thread_cnt()
7194 raid5_quiesce(mddev, false); in raid5_store_group_thread_cnt()
7195 mddev_unlock_and_resume(mddev); in raid5_store_group_thread_cnt()
7276 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid5_size() argument
7278 struct r5conf *conf = mddev->private; in raid5_size()
7281 sectors = mddev->dev_sectors; in raid5_size()
7423 static struct r5conf *setup_conf(struct mddev *mddev) in setup_conf() argument
7435 if (mddev->new_level != 5 in setup_conf()
7436 && mddev->new_level != 4 in setup_conf()
7437 && mddev->new_level != 6) { in setup_conf()
7439 mdname(mddev), mddev->new_level); in setup_conf()
7442 if ((mddev->new_level == 5 in setup_conf()
7443 && !algorithm_valid_raid5(mddev->new_layout)) || in setup_conf()
7444 (mddev->new_level == 6 in setup_conf()
7445 && !algorithm_valid_raid6(mddev->new_layout))) { in setup_conf()
7447 mdname(mddev), mddev->new_layout); in setup_conf()
7450 if (mddev->new_level == 6 && mddev->raid_disks < 4) { in setup_conf()
7452 mdname(mddev), mddev->raid_disks); in setup_conf()
7456 if (!mddev->new_chunk_sectors || in setup_conf()
7457 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || in setup_conf()
7458 !is_power_of_2(mddev->new_chunk_sectors)) { in setup_conf()
7460 mdname(mddev), mddev->new_chunk_sectors << 9); in setup_conf()
7507 rdev_for_each(rdev, mddev) { in setup_conf()
7517 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
7519 conf->raid_disks = mddev->raid_disks; in setup_conf()
7520 if (mddev->reshape_position == MaxSector) in setup_conf()
7521 conf->previous_raid_disks = mddev->raid_disks; in setup_conf()
7523 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; in setup_conf()
7541 conf->mddev = mddev; in setup_conf()
7570 conf->level = mddev->new_level; in setup_conf()
7571 conf->chunk_sectors = mddev->new_chunk_sectors; in setup_conf()
7576 pr_debug("raid456: run(%s) called.\n", mdname(mddev)); in setup_conf()
7579 rdev_for_each(rdev, mddev) { in setup_conf()
7598 mdname(mddev), rdev->bdev, raid_disk); in setup_conf()
7604 conf->level = mddev->new_level; in setup_conf()
7615 conf->algorithm = mddev->new_layout; in setup_conf()
7616 conf->reshape_progress = mddev->reshape_position; in setup_conf()
7618 conf->prev_chunk_sectors = mddev->chunk_sectors; in setup_conf()
7619 conf->prev_algo = mddev->layout; in setup_conf()
7626 if (mddev->reshape_position != MaxSector) { in setup_conf()
7628 ((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4, in setup_conf()
7629 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4); in setup_conf()
7633 mdname(mddev), conf->min_nr_stripes); in setup_conf()
7640 mdname(mddev), memory); in setup_conf()
7644 pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory); in setup_conf()
7650 conf->shrinker = shrinker_alloc(0, "md-raid5:%s", mdname(mddev)); in setup_conf()
7654 mdname(mddev)); in setup_conf()
7666 sprintf(pers_name, "raid%d", mddev->new_level); in setup_conf()
7668 md_register_thread(raid5d, mddev, pers_name)); in setup_conf()
7671 mdname(mddev)); in setup_conf()
7710 static int raid5_set_limits(struct mddev *mddev) in raid5_set_limits() argument
7712 struct r5conf *conf = mddev->private; in raid5_set_limits()
7727 stripe = roundup_pow_of_two(data_disks * (mddev->chunk_sectors << 9)); in raid5_set_limits()
7730 lim.io_min = mddev->chunk_sectors << 9; in raid5_set_limits()
7735 mddev_stack_rdev_limits(mddev, &lim, 0); in raid5_set_limits()
7736 rdev_for_each(rdev, mddev) in raid5_set_limits()
7738 mddev->gendisk->disk_name); in raid5_set_limits()
7768 return queue_limits_set(mddev->gendisk->queue, &lim); in raid5_set_limits()
7771 static int raid5_run(struct mddev *mddev) in raid5_run() argument
7783 if (mddev->resync_offset != MaxSector) in raid5_run()
7785 mdname(mddev)); in raid5_run()
7787 rdev_for_each(rdev, mddev) { in raid5_run()
7800 } else if (mddev->reshape_backwards && in raid5_run()
7803 else if (!mddev->reshape_backwards && in raid5_run()
7808 if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) && in raid5_run()
7809 (mddev->bitmap_info.offset || mddev->bitmap_info.file)) { in raid5_run()
7811 mdname(mddev)); in raid5_run()
7815 if (mddev->reshape_position != MaxSector) { in raid5_run()
7830 int max_degraded = (mddev->level == 6 ? 2 : 1); in raid5_run()
7836 mdname(mddev)); in raid5_run()
7840 if (mddev->new_level != mddev->level) { in raid5_run()
7842 mdname(mddev)); in raid5_run()
7845 old_disks = mddev->raid_disks - mddev->delta_disks; in raid5_run()
7853 here_new = mddev->reshape_position; in raid5_run()
7854 chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); in raid5_run()
7855 new_data_disks = mddev->raid_disks - max_degraded; in raid5_run()
7858 mdname(mddev)); in raid5_run()
7863 here_old = mddev->reshape_position; in raid5_run()
7867 if (mddev->delta_disks == 0) { in raid5_run()
7875 if (abs(min_offset_diff) >= mddev->chunk_sectors && in raid5_run()
7876 abs(min_offset_diff) >= mddev->new_chunk_sectors) in raid5_run()
7878 else if (mddev->ro == 0) { in raid5_run()
7880 mdname(mddev)); in raid5_run()
7883 } else if (mddev->reshape_backwards in raid5_run()
7890 mdname(mddev)); in raid5_run()
7893 pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev)); in raid5_run()
7896 BUG_ON(mddev->level != mddev->new_level); in raid5_run()
7897 BUG_ON(mddev->layout != mddev->new_layout); in raid5_run()
7898 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); in raid5_run()
7899 BUG_ON(mddev->delta_disks != 0); in raid5_run()
7902 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && in raid5_run()
7903 test_bit(MD_HAS_PPL, &mddev->flags)) { in raid5_run()
7905 mdname(mddev)); in raid5_run()
7906 clear_bit(MD_HAS_PPL, &mddev->flags); in raid5_run()
7907 clear_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags); in raid5_run()
7910 if (mddev->private == NULL) in raid5_run()
7911 conf = setup_conf(mddev); in raid5_run()
7913 conf = mddev->private; in raid5_run()
7918 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { in raid5_run()
7921 mdname(mddev)); in raid5_run()
7922 mddev->ro = 1; in raid5_run()
7923 set_disk_ro(mddev->gendisk, 1); in raid5_run()
7924 } else if (mddev->resync_offset == MaxSector) in raid5_run()
7925 set_bit(MD_JOURNAL_CLEAN, &mddev->flags); in raid5_run()
7929 rcu_assign_pointer(mddev->thread, conf->thread); in raid5_run()
7931 mddev->private = conf; in raid5_run()
7955 if (mddev->major_version == 0 && in raid5_run()
7956 mddev->minor_version > 90) in raid5_run()
7978 mddev->degraded = raid5_calc_degraded(conf); in raid5_run()
7982 mdname(mddev), mddev->degraded, conf->raid_disks); in raid5_run()
7987 mddev->dev_sectors &= ~((sector_t)mddev->chunk_sectors - 1); in raid5_run()
7988 mddev->resync_max_sectors = mddev->dev_sectors; in raid5_run()
7990 if (mddev->degraded > dirty_parity_disks && in raid5_run()
7991 mddev->resync_offset != MaxSector) { in raid5_run()
7992 if (test_bit(MD_HAS_PPL, &mddev->flags)) in raid5_run()
7994 mdname(mddev)); in raid5_run()
7995 else if (mddev->ok_start_degraded) in raid5_run()
7997 mdname(mddev)); in raid5_run()
8000 mdname(mddev)); in raid5_run()
8006 mdname(mddev), conf->level, in raid5_run()
8007 mddev->raid_disks-mddev->degraded, mddev->raid_disks, in raid5_run()
8008 mddev->new_layout); in raid5_run()
8015 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid5_run()
8016 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid5_run()
8017 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid5_run()
8018 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid5_run()
8022 if (mddev->to_remove == &raid5_attrs_group) in raid5_run()
8023 mddev->to_remove = NULL; in raid5_run()
8024 else if (mddev->kobj.sd && in raid5_run()
8025 sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) in raid5_run()
8027 mdname(mddev)); in raid5_run()
8028 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); in raid5_run()
8030 if (!mddev_is_dm(mddev)) { in raid5_run()
8031 ret = raid5_set_limits(mddev); in raid5_run()
8041 md_unregister_thread(mddev, &mddev->thread); in raid5_run()
8044 mddev->private = NULL; in raid5_run()
8045 pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev)); in raid5_run()
8049 static void raid5_free(struct mddev *mddev, void *priv) in raid5_free() argument
8054 mddev->to_remove = &raid5_attrs_group; in raid5_free()
8057 static void raid5_status(struct seq_file *seq, struct mddev *mddev) in raid5_status() argument
8059 struct r5conf *conf = mddev->private; in raid5_status()
8062 lockdep_assert_held(&mddev->lock); in raid5_status()
8064 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, in raid5_status()
8065 conf->chunk_sectors / 2, mddev->layout); in raid5_status()
8066 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); in raid5_status()
8087 conf->raid_disks - conf->mddev->degraded); in print_raid5_conf()
8098 static int raid5_spare_active(struct mddev *mddev) in raid5_spare_active() argument
8101 struct r5conf *conf = mddev->private; in raid5_spare_active()
8136 mddev->degraded = raid5_calc_degraded(conf); in raid5_spare_active()
8142 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) in raid5_remove_disk() argument
8144 struct r5conf *conf = mddev->private; in raid5_remove_disk()
8190 mddev->recovery_disabled != conf->recovery_disabled && in raid5_remove_disk()
8222 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) in raid5_add_disk() argument
8224 struct r5conf *conf = mddev->private; in raid5_add_disk()
8251 if (mddev->recovery_disabled == conf->recovery_disabled) in raid5_add_disk()
8288 mddev->reshape_position == MaxSector && in raid5_add_disk()
8304 static int raid5_resize(struct mddev *mddev, sector_t sectors) in raid5_resize() argument
8314 struct r5conf *conf = mddev->private; in raid5_resize()
8320 newsize = raid5_size(mddev, sectors, mddev->raid_disks); in raid5_resize()
8321 if (mddev->external_size && in raid5_resize()
8322 mddev->array_sectors > newsize) in raid5_resize()
8325 ret = mddev->bitmap_ops->resize(mddev, sectors, 0, false); in raid5_resize()
8329 md_set_array_sectors(mddev, newsize); in raid5_resize()
8330 if (sectors > mddev->dev_sectors && in raid5_resize()
8331 mddev->resync_offset > mddev->dev_sectors) { in raid5_resize()
8332 mddev->resync_offset = mddev->dev_sectors; in raid5_resize()
8333 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid5_resize()
8335 mddev->dev_sectors = sectors; in raid5_resize()
8336 mddev->resync_max_sectors = sectors; in raid5_resize()
8340 static int check_stripe_cache(struct mddev *mddev) in check_stripe_cache() argument
8350 struct r5conf *conf = mddev->private; in check_stripe_cache()
8351 if (((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4 in check_stripe_cache()
8353 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4 in check_stripe_cache()
8356 mdname(mddev), in check_stripe_cache()
8357 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) in check_stripe_cache()
8364 static int check_reshape(struct mddev *mddev) in check_reshape() argument
8366 struct r5conf *conf = mddev->private; in check_reshape()
8370 if (mddev->delta_disks == 0 && in check_reshape()
8371 mddev->new_layout == mddev->layout && in check_reshape()
8372 mddev->new_chunk_sectors == mddev->chunk_sectors) in check_reshape()
8376 if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) { in check_reshape()
8383 if (mddev->level == 6) in check_reshape()
8385 if (mddev->raid_disks + mddev->delta_disks < min) in check_reshape()
8389 if (!check_stripe_cache(mddev)) in check_reshape()
8392 if (mddev->new_chunk_sectors > mddev->chunk_sectors || in check_reshape()
8393 mddev->delta_disks > 0) in check_reshape()
8396 + max(0, mddev->delta_disks), in check_reshape()
8397 max(mddev->new_chunk_sectors, in check_reshape()
8398 mddev->chunk_sectors) in check_reshape()
8402 if (conf->previous_raid_disks + mddev->delta_disks <= conf->pool_size) in check_reshape()
8405 + mddev->delta_disks)); in check_reshape()
8408 static int raid5_start_reshape(struct mddev *mddev) in raid5_start_reshape() argument
8410 struct r5conf *conf = mddev->private; in raid5_start_reshape()
8416 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in raid5_start_reshape()
8419 if (!check_stripe_cache(mddev)) in raid5_start_reshape()
8426 if (mddev->resync_offset < MaxSector) in raid5_start_reshape()
8432 rdev_for_each(rdev, mddev) { in raid5_start_reshape()
8438 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) in raid5_start_reshape()
8448 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) in raid5_start_reshape()
8449 < mddev->array_sectors) { in raid5_start_reshape()
8451 mdname(mddev)); in raid5_start_reshape()
8459 conf->raid_disks += mddev->delta_disks; in raid5_start_reshape()
8461 conf->chunk_sectors = mddev->new_chunk_sectors; in raid5_start_reshape()
8463 conf->algorithm = mddev->new_layout; in raid5_start_reshape()
8469 if (mddev->reshape_backwards) in raid5_start_reshape()
8470 conf->reshape_progress = raid5_size(mddev, 0, 0); in raid5_start_reshape()
8481 raid5_quiesce(mddev, true); in raid5_start_reshape()
8482 raid5_quiesce(mddev, false); in raid5_start_reshape()
8491 if (mddev->delta_disks >= 0) { in raid5_start_reshape()
8492 rdev_for_each(rdev, mddev) in raid5_start_reshape()
8495 if (raid5_add_disk(mddev, rdev) == 0) { in raid5_start_reshape()
8503 sysfs_link_rdev(mddev, rdev); in raid5_start_reshape()
8516 mddev->degraded = raid5_calc_degraded(conf); in raid5_start_reshape()
8519 mddev->raid_disks = conf->raid_disks; in raid5_start_reshape()
8520 mddev->reshape_position = conf->reshape_progress; in raid5_start_reshape()
8521 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in raid5_start_reshape()
8523 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid5_start_reshape()
8524 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid5_start_reshape()
8525 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in raid5_start_reshape()
8526 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid5_start_reshape()
8527 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid5_start_reshape()
8539 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in end_reshape()
8544 md_finish_reshape(conf->mddev); in end_reshape()
8547 conf->mddev->reshape_position = MaxSector; in end_reshape()
8548 rdev_for_each(rdev, conf->mddev) in end_reshape()
8556 mddev_update_io_opt(conf->mddev, in end_reshape()
8564 static void raid5_finish_reshape(struct mddev *mddev) in raid5_finish_reshape() argument
8566 struct r5conf *conf = mddev->private; in raid5_finish_reshape()
8569 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in raid5_finish_reshape()
8571 if (mddev->delta_disks <= 0) { in raid5_finish_reshape()
8574 mddev->degraded = raid5_calc_degraded(conf); in raid5_finish_reshape()
8577 d < conf->raid_disks - mddev->delta_disks; in raid5_finish_reshape()
8587 mddev->layout = conf->algorithm; in raid5_finish_reshape()
8588 mddev->chunk_sectors = conf->chunk_sectors; in raid5_finish_reshape()
8589 mddev->reshape_position = MaxSector; in raid5_finish_reshape()
8590 mddev->delta_disks = 0; in raid5_finish_reshape()
8591 mddev->reshape_backwards = 0; in raid5_finish_reshape()
8595 static void raid5_quiesce(struct mddev *mddev, int quiesce) in raid5_quiesce() argument
8597 struct r5conf *conf = mddev->private; in raid5_quiesce()
8630 static void *raid45_takeover_raid0(struct mddev *mddev, int level) in raid45_takeover_raid0() argument
8632 struct r0conf *raid0_conf = mddev->private; in raid45_takeover_raid0()
8638 mdname(mddev)); in raid45_takeover_raid0()
8644 mddev->dev_sectors = sectors; in raid45_takeover_raid0()
8645 mddev->new_level = level; in raid45_takeover_raid0()
8646 mddev->new_layout = ALGORITHM_PARITY_N; in raid45_takeover_raid0()
8647 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid45_takeover_raid0()
8648 mddev->raid_disks += 1; in raid45_takeover_raid0()
8649 mddev->delta_disks = 1; in raid45_takeover_raid0()
8651 mddev->resync_offset = MaxSector; in raid45_takeover_raid0()
8653 return setup_conf(mddev); in raid45_takeover_raid0()
8656 static void *raid5_takeover_raid1(struct mddev *mddev) in raid5_takeover_raid1() argument
8661 if (mddev->raid_disks != 2 || in raid5_takeover_raid1()
8662 mddev->degraded > 1) in raid5_takeover_raid1()
8670 while (chunksect && (mddev->array_sectors & (chunksect-1))) in raid5_takeover_raid1()
8673 if ((chunksect<<9) < RAID5_STRIPE_SIZE((struct r5conf *)mddev->private)) in raid5_takeover_raid1()
8677 mddev->new_level = 5; in raid5_takeover_raid1()
8678 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; in raid5_takeover_raid1()
8679 mddev->new_chunk_sectors = chunksect; in raid5_takeover_raid1()
8681 ret = setup_conf(mddev); in raid5_takeover_raid1()
8683 mddev_clear_unsupported_flags(mddev, in raid5_takeover_raid1()
8688 static void *raid5_takeover_raid6(struct mddev *mddev) in raid5_takeover_raid6() argument
8692 switch (mddev->layout) { in raid5_takeover_raid6()
8714 mddev->new_level = 5; in raid5_takeover_raid6()
8715 mddev->new_layout = new_layout; in raid5_takeover_raid6()
8716 mddev->delta_disks = -1; in raid5_takeover_raid6()
8717 mddev->raid_disks -= 1; in raid5_takeover_raid6()
8718 return setup_conf(mddev); in raid5_takeover_raid6()
8721 static int raid5_check_reshape(struct mddev *mddev) in raid5_check_reshape() argument
8728 struct r5conf *conf = mddev->private; in raid5_check_reshape()
8729 int new_chunk = mddev->new_chunk_sectors; in raid5_check_reshape()
8731 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) in raid5_check_reshape()
8738 if (mddev->array_sectors & (new_chunk-1)) in raid5_check_reshape()
8745 if (mddev->raid_disks == 2) { in raid5_check_reshape()
8747 if (mddev->new_layout >= 0) { in raid5_check_reshape()
8748 conf->algorithm = mddev->new_layout; in raid5_check_reshape()
8749 mddev->layout = mddev->new_layout; in raid5_check_reshape()
8753 mddev->chunk_sectors = new_chunk; in raid5_check_reshape()
8755 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in raid5_check_reshape()
8756 md_wakeup_thread(mddev->thread); in raid5_check_reshape()
8758 return check_reshape(mddev); in raid5_check_reshape()
8761 static int raid6_check_reshape(struct mddev *mddev) in raid6_check_reshape() argument
8763 int new_chunk = mddev->new_chunk_sectors; in raid6_check_reshape()
8765 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) in raid6_check_reshape()
8772 if (mddev->array_sectors & (new_chunk-1)) in raid6_check_reshape()
8778 return check_reshape(mddev); in raid6_check_reshape()
8781 static void *raid5_takeover(struct mddev *mddev) in raid5_takeover() argument
8789 if (mddev->level == 0) in raid5_takeover()
8790 return raid45_takeover_raid0(mddev, 5); in raid5_takeover()
8791 if (mddev->level == 1) in raid5_takeover()
8792 return raid5_takeover_raid1(mddev); in raid5_takeover()
8793 if (mddev->level == 4) { in raid5_takeover()
8794 mddev->new_layout = ALGORITHM_PARITY_N; in raid5_takeover()
8795 mddev->new_level = 5; in raid5_takeover()
8796 return setup_conf(mddev); in raid5_takeover()
8798 if (mddev->level == 6) in raid5_takeover()
8799 return raid5_takeover_raid6(mddev); in raid5_takeover()
8804 static void *raid4_takeover(struct mddev *mddev) in raid4_takeover() argument
8810 if (mddev->level == 0) in raid4_takeover()
8811 return raid45_takeover_raid0(mddev, 4); in raid4_takeover()
8812 if (mddev->level == 5 && in raid4_takeover()
8813 mddev->layout == ALGORITHM_PARITY_N) { in raid4_takeover()
8814 mddev->new_layout = 0; in raid4_takeover()
8815 mddev->new_level = 4; in raid4_takeover()
8816 return setup_conf(mddev); in raid4_takeover()
8823 static void *raid6_takeover(struct mddev *mddev) in raid6_takeover() argument
8831 if (mddev->pers != &raid5_personality) in raid6_takeover()
8833 if (mddev->degraded > 1) in raid6_takeover()
8835 if (mddev->raid_disks > 253) in raid6_takeover()
8837 if (mddev->raid_disks < 3) in raid6_takeover()
8840 switch (mddev->layout) { in raid6_takeover()
8862 mddev->new_level = 6; in raid6_takeover()
8863 mddev->new_layout = new_layout; in raid6_takeover()
8864 mddev->delta_disks = 1; in raid6_takeover()
8865 mddev->raid_disks += 1; in raid6_takeover()
8866 return setup_conf(mddev); in raid6_takeover()
8869 static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf) in raid5_change_consistency_policy() argument
8874 err = mddev_suspend_and_lock(mddev); in raid5_change_consistency_policy()
8877 conf = mddev->private; in raid5_change_consistency_policy()
8879 mddev_unlock_and_resume(mddev); in raid5_change_consistency_policy()
8898 } else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) && in raid5_change_consistency_policy()
8903 rdev_for_each(rdev, mddev) in raid5_change_consistency_policy()
8910 clear_bit(MD_HAS_JOURNAL, &mddev->flags); in raid5_change_consistency_policy()
8920 md_update_sb(mddev, 1); in raid5_change_consistency_policy()
8922 mddev_unlock_and_resume(mddev); in raid5_change_consistency_policy()
8927 static int raid5_start(struct mddev *mddev) in raid5_start() argument
8929 struct r5conf *conf = mddev->private; in raid5_start()
8939 static void raid5_prepare_suspend(struct mddev *mddev) in raid5_prepare_suspend() argument
8941 struct r5conf *conf = mddev->private; in raid5_prepare_suspend()