Lines Matching refs:mddev
245 struct mddev md;
263 struct mddev *mddev = &rs->md; in rs_config_backup() local
265 l->new_level = mddev->new_level; in rs_config_backup()
266 l->new_layout = mddev->new_layout; in rs_config_backup()
267 l->new_chunk_sectors = mddev->new_chunk_sectors; in rs_config_backup()
272 struct mddev *mddev = &rs->md; in rs_config_restore() local
274 mddev->new_level = l->new_level; in rs_config_restore()
275 mddev->new_layout = l->new_layout; in rs_config_restore()
276 mddev->new_chunk_sectors = l->new_chunk_sectors; in rs_config_restore()
686 struct mddev *mddev = &rs->md; in rs_set_rdev_sectors() local
693 rdev_for_each(rdev, mddev) in rs_set_rdev_sectors()
695 rdev->sectors = mddev->dev_sectors; in rs_set_rdev_sectors()
714 struct mddev *mddev = &rs->md; in rs_set_cur() local
716 mddev->new_level = mddev->level; in rs_set_cur()
717 mddev->new_layout = mddev->layout; in rs_set_cur()
718 mddev->new_chunk_sectors = mddev->chunk_sectors; in rs_set_cur()
727 struct mddev *mddev = &rs->md; in rs_set_new() local
729 mddev->level = mddev->new_level; in rs_set_new()
730 mddev->layout = mddev->new_layout; in rs_set_new()
731 mddev->chunk_sectors = mddev->new_chunk_sectors; in rs_set_new()
732 mddev->raid_disks = rs->raid_disks; in rs_set_new()
733 mddev->delta_disks = 0; in rs_set_new()
851 rs->dev[i].rdev.mddev = &rs->md; in parse_dev_params()
1271 jdev->mddev = &rs->md; in parse_raid_params()
1541 struct mddev *mddev = &rs->md; in rs_set_raid456_stripe_cache() local
1542 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2; in rs_set_raid456_stripe_cache()
1556 conf = mddev->private; in rs_set_raid456_stripe_cache()
1564 r = raid5_set_cache_size(mddev, nr_stripes); in rs_set_raid456_stripe_cache()
1648 struct mddev *mddev = &rs->md; in rs_set_dev_and_array_sectors() local
1651 delta_disks = mddev->delta_disks; in rs_set_dev_and_array_sectors()
1683 mddev->array_sectors = array_sectors; in rs_set_dev_and_array_sectors()
1684 mddev->dev_sectors = dev_sectors; in rs_set_dev_and_array_sectors()
1745 struct mddev *mddev = &rs->md; in rs_check_takeover() local
1758 switch (mddev->level) { in rs_check_takeover()
1761 if ((mddev->new_level == 1 || mddev->new_level == 5) && in rs_check_takeover()
1762 mddev->raid_disks == 1) in rs_check_takeover()
1766 if (mddev->new_level == 10 && in rs_check_takeover()
1767 !(rs->raid_disks % mddev->raid_disks)) in rs_check_takeover()
1771 if (__within_range(mddev->new_level, 4, 6) && in rs_check_takeover()
1772 mddev->new_layout == ALGORITHM_PARITY_N && in rs_check_takeover()
1773 mddev->raid_disks > 1) in rs_check_takeover()
1780 if (__is_raid10_offset(mddev->layout)) in rs_check_takeover()
1783 near_copies = __raid10_near_copies(mddev->layout); in rs_check_takeover()
1786 if (mddev->new_level == 0) { in rs_check_takeover()
1789 !(mddev->raid_disks % near_copies)) { in rs_check_takeover()
1790 mddev->raid_disks /= near_copies; in rs_check_takeover()
1791 mddev->delta_disks = mddev->raid_disks; in rs_check_takeover()
1797 __raid10_far_copies(mddev->layout) > 1) in rs_check_takeover()
1804 if (mddev->new_level == 1 && in rs_check_takeover()
1805 max(near_copies, __raid10_far_copies(mddev->layout)) == mddev->raid_disks) in rs_check_takeover()
1809 if (__within_range(mddev->new_level, 4, 5) && in rs_check_takeover()
1810 mddev->raid_disks == 2) in rs_check_takeover()
1816 if (__within_range(mddev->new_level, 4, 5) && in rs_check_takeover()
1817 mddev->raid_disks == 2) { in rs_check_takeover()
1818 mddev->degraded = 1; in rs_check_takeover()
1823 if (mddev->new_level == 0 && in rs_check_takeover()
1824 mddev->raid_disks == 1) in rs_check_takeover()
1828 if (mddev->new_level == 10) in rs_check_takeover()
1834 if (mddev->new_level == 0) in rs_check_takeover()
1838 if ((mddev->new_level == 1 || mddev->new_level == 5) && in rs_check_takeover()
1839 mddev->raid_disks == 2) in rs_check_takeover()
1843 if (__within_range(mddev->new_level, 5, 6) && in rs_check_takeover()
1844 mddev->layout == ALGORITHM_PARITY_N) in rs_check_takeover()
1850 if (mddev->new_level == 0 && in rs_check_takeover()
1851 mddev->layout == ALGORITHM_PARITY_N) in rs_check_takeover()
1855 if (mddev->new_level == 4 && in rs_check_takeover()
1856 mddev->layout == ALGORITHM_PARITY_N) in rs_check_takeover()
1860 if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) && in rs_check_takeover()
1861 mddev->raid_disks == 2) in rs_check_takeover()
1865 if (mddev->new_level == 6 && in rs_check_takeover()
1866 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || in rs_check_takeover()
1867 … __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6))) in rs_check_takeover()
1873 if (mddev->new_level == 0 && in rs_check_takeover()
1874 mddev->layout == ALGORITHM_PARITY_N) in rs_check_takeover()
1878 if (mddev->new_level == 4 && in rs_check_takeover()
1879 mddev->layout == ALGORITHM_PARITY_N) in rs_check_takeover()
1883 if (mddev->new_level == 5 && in rs_check_takeover()
1884 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || in rs_check_takeover()
1885 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC))) in rs_check_takeover()
1915 struct mddev *mddev = &rs->md; in rs_reshape_requested() local
1931 mddev->raid_disks != rs->raid_disks; in rs_reshape_requested()
1936 !__is_raid10_far(mddev->new_layout) && in rs_reshape_requested()
2043 struct mddev *mddev = &rs->md; in rs_check_reshape() local
2045 if (!mddev->pers || !mddev->pers->check_reshape) in rs_check_reshape()
2047 else if (mddev->degraded) in rs_check_reshape()
2073 md_error(rdev->mddev, rdev); in read_disk_sb()
2110 static void super_sync(struct mddev *mddev, struct md_rdev *rdev) in super_sync() argument
2116 struct raid_set *rs = container_of(mddev, struct raid_set, md); in super_sync()
2140 sb->num_devices = cpu_to_le32(mddev->raid_disks); in super_sync()
2143 sb->events = cpu_to_le64(mddev->events); in super_sync()
2146 sb->array_resync_offset = cpu_to_le64(mddev->resync_offset); in super_sync()
2148 sb->level = cpu_to_le32(mddev->level); in super_sync()
2149 sb->layout = cpu_to_le32(mddev->layout); in super_sync()
2150 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); in super_sync()
2157 sb->new_level = cpu_to_le32(mddev->new_level); in super_sync()
2158 sb->new_layout = cpu_to_le32(mddev->new_layout); in super_sync()
2159 sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors); in super_sync()
2161 sb->delta_disks = cpu_to_le32(mddev->delta_disks); in super_sync()
2164 sb->reshape_position = cpu_to_le64(mddev->reshape_position); in super_sync()
2169 if (mddev->delta_disks < 0 || mddev->reshape_backwards) in super_sync()
2176 sb->array_sectors = cpu_to_le64(mddev->array_sectors); in super_sync()
2214 super_sync(rdev->mddev, rdev); in super_load()
2220 set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags); in super_load()
2240 struct mddev *mddev = &rs->md; in super_init_validation() local
2254 mddev->events = events_sb ? : 1; in super_init_validation()
2256 mddev->reshape_position = MaxSector; in super_init_validation()
2258 mddev->raid_disks = le32_to_cpu(sb->num_devices); in super_init_validation()
2259 mddev->level = le32_to_cpu(sb->level); in super_init_validation()
2260 mddev->layout = le32_to_cpu(sb->layout); in super_init_validation()
2261 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors); in super_init_validation()
2269 mddev->new_level = le32_to_cpu(sb->new_level); in super_init_validation()
2270 mddev->new_layout = le32_to_cpu(sb->new_layout); in super_init_validation()
2271 mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors); in super_init_validation()
2272 mddev->delta_disks = le32_to_cpu(sb->delta_disks); in super_init_validation()
2273 mddev->array_sectors = le64_to_cpu(sb->array_sectors); in super_init_validation()
2282 if (mddev->delta_disks < 0 || in super_init_validation()
2283 (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS))) in super_init_validation()
2284 mddev->reshape_backwards = 1; in super_init_validation()
2286 mddev->reshape_backwards = 0; in super_init_validation()
2288 mddev->reshape_position = le64_to_cpu(sb->reshape_position); in super_init_validation()
2289 rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout); in super_init_validation()
2296 struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout); in super_init_validation()
2297 struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout); in super_init_validation()
2308 if (mddev->layout != mddev->new_layout) { in super_init_validation()
2314 le32_to_cpu(sb->layout), mddev->new_layout); in super_init_validation()
2316 if (mddev->chunk_sectors != mddev->new_chunk_sectors) in super_init_validation()
2318 mddev->chunk_sectors, mddev->new_chunk_sectors); in super_init_validation()
2321 mddev->raid_disks, mddev->raid_disks + rs->delta_disks); in super_init_validation()
2324 raid10_md_layout_to_format(mddev->layout), in super_init_validation()
2325 raid10_md_layout_to_copies(mddev->layout)); in super_init_validation()
2327 raid10_md_layout_to_format(mddev->new_layout), in super_init_validation()
2328 raid10_md_layout_to_copies(mddev->new_layout)); in super_init_validation()
2337 mddev->resync_offset = le64_to_cpu(sb->array_resync_offset); in super_init_validation()
2354 rdev_for_each(r, mddev) { in super_init_validation()
2375 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); in super_init_validation()
2394 (unsigned long long) mddev->resync_offset); in super_init_validation()
2398 (unsigned long long) mddev->reshape_position); in super_init_validation()
2408 rdev_for_each(r, mddev) { in super_init_validation()
2425 if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) { in super_init_validation()
2426 if (mddev->raid_disks % __raid10_near_copies(mddev->layout) || in super_init_validation()
2459 struct mddev *mddev = &rs->md; in super_validate() local
2471 if (!mddev->events && super_init_validation(rs, rdev)) in super_validate()
2486 …mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(40… in super_validate()
2487 mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; in super_validate()
2532 struct mddev *mddev = &rs->md; in analyse_superblocks() local
2536 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); in analyse_superblocks()
2539 rdev_for_each(rdev, mddev) { in analyse_superblocks()
2612 rdev_for_each(rdev, mddev) in analyse_superblocks()
2739 struct mddev *mddev = &rs->md; in rs_setup_takeover() local
2741 unsigned int d = mddev->raid_disks = rs->raid_disks; in rs_setup_takeover()
2750 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR, in rs_setup_takeover()
2754 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, in rs_setup_takeover()
2761 clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags); in rs_setup_takeover()
2762 mddev->resync_offset = MaxSector; in rs_setup_takeover()
2770 mddev->resync_offset = rdev->recovery_offset = 0; in rs_setup_takeover()
2772 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); in rs_setup_takeover()
2785 struct mddev *mddev = &rs->md; in rs_prepare_reshape() local
2788 if (rs->raid_disks != mddev->raid_disks && in rs_prepare_reshape()
2789 __is_raid10_near(mddev->layout) && in rs_prepare_reshape()
2791 rs->raid10_copies != __raid10_near_copies(mddev->layout)) { in rs_prepare_reshape()
2805 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, in rs_prepare_reshape()
2807 mddev->new_layout = mddev->layout; in rs_prepare_reshape()
2818 mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks; in rs_prepare_reshape()
2822 mddev->raid_disks = rs->raid_disks; in rs_prepare_reshape()
2833 } else if (mddev->raid_disks < rs->raid_disks) in rs_prepare_reshape()
2852 struct mddev *mddev = &rs->md; in rs_setup_reshape() local
2855 mddev->delta_disks = rs->delta_disks; in rs_setup_reshape()
2856 cur_raid_devs = mddev->raid_disks; in rs_setup_reshape()
2859 if (mddev->delta_disks && in rs_setup_reshape()
2860 mddev->layout != mddev->new_layout) { in rs_setup_reshape()
2862 mddev->new_layout = mddev->layout; in rs_setup_reshape()
2901 rdev->sectors = mddev->dev_sectors; in rs_setup_reshape()
2905 mddev->reshape_backwards = 0; /* adding disk(s) -> forward reshape */ in rs_setup_reshape()
2910 mddev->reshape_backwards = 1; /* removing disk(s) -> backward reshape */ in rs_setup_reshape()
2935 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1; in rs_setup_reshape()
2942 if (!mddev->reshape_backwards) in rs_setup_reshape()
3342 struct mddev *mddev = &rs->md; in raid_map() local
3352 if (unlikely(bio_has_data(bio) && bio_end_sector(bio) > mddev->array_sectors)) in raid_map()
3355 if (unlikely(!md_handle_request(mddev, bio))) in raid_map()
3380 static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery) in decipher_sync_action() argument
3388 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) { in decipher_sync_action()
3403 if (mddev->reshape_position != MaxSector) in decipher_sync_action()
3441 struct mddev *mddev = &rs->md; in rs_get_progress() local
3452 r = mddev->resync_offset; in rs_get_progress()
3454 r = mddev->curr_resync_completed; in rs_get_progress()
3506 rdev_for_each(rdev, mddev) in rs_get_progress()
3528 struct mddev *mddev = &rs->md; in raid_status() local
3529 struct r5conf *conf = rs_is_raid456(rs) ? mddev->private : NULL; in raid_status()
3542 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout); in raid_status()
3546 DMEMIT("%s %d ", rt->name, mddev->raid_disks); in raid_status()
3552 mddev->resync_max_sectors : mddev->dev_sectors; in raid_status()
3554 state = decipher_sync_action(mddev, recovery); in raid_status()
3556 resync_mismatches = mddev->last_sync_action == ACTION_CHECK ? in raid_status()
3557 atomic64_read(&mddev->resync_mismatches) : 0; in raid_status()
3634 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors); in raid_status()
3645 mddev->bitmap_info.daemon_sleep); in raid_status()
3648 mddev->sync_speed_min); in raid_status()
3651 mddev->sync_speed_max); in raid_status()
3659 mddev->bitmap_info.max_write_behind); in raid_status()
3665 (unsigned long long) to_sector(mddev->bitmap_info.chunksize)); in raid_status()
3668 raid10_md_layout_to_copies(mddev->layout)); in raid_status()
3671 raid10_md_layout_to_format(mddev->layout)); in raid_status()
3674 max(rs->delta_disks, mddev->delta_disks)); in raid_status()
3691 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout); in raid_status()
3696 DMEMIT(",raid_type=%s,raid_disks=%d", rt->name, mddev->raid_disks); in raid_status()
3701 state = decipher_sync_action(mddev, recovery); in raid_status()
3734 struct mddev *mddev = &rs->md; in raid_message() local
3737 if (!mddev->pers || !mddev->pers->sync_request) in raid_message()
3745 ret = mddev_lock(mddev); in raid_message()
3749 md_frozen_sync_thread(mddev); in raid_message()
3750 mddev_unlock(mddev); in raid_message()
3752 ret = mddev_lock(mddev); in raid_message()
3756 md_idle_sync_thread(mddev); in raid_message()
3757 mddev_unlock(mddev); in raid_message()
3760 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in raid_message()
3761 if (decipher_sync_action(mddev, mddev->recovery) != st_idle) in raid_message()
3766 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in raid_message()
3769 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid_message()
3770 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in raid_message()
3771 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid_message()
3773 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in raid_message()
3774 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid_message()
3778 if (mddev->ro == 2) { in raid_message()
3782 mddev->ro = 0; in raid_message()
3783 if (!mddev->suspended) in raid_message()
3784 md_wakeup_thread(mddev->sync_thread); in raid_message()
3786 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid_message()
3787 if (!mddev->suspended) in raid_message()
3788 md_wakeup_thread(mddev->thread); in raid_message()
3823 struct mddev *mddev = &rs->md; in raid_presuspend() local
3831 if (!reshape_interrupted(mddev)) in raid_presuspend()
3839 if (mddev->pers && mddev->pers->prepare_suspend) in raid_presuspend()
3840 mddev->pers->prepare_suspend(mddev); in raid_presuspend()
3871 struct mddev *mddev = &rs->md; in attempt_restore_of_faulty_devices() local
3875 if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk) in attempt_restore_of_faulty_devices()
3904 if (mddev->pers->hot_remove_disk(mddev, r)) { in attempt_restore_of_faulty_devices()
3915 if (mddev->pers->hot_add_disk(mddev, r)) { in attempt_restore_of_faulty_devices()
3954 struct mddev *mddev = &rs->md; in __load_dirty_region_bitmap() local
3956 r = mddev->bitmap_ops->load(mddev); in __load_dirty_region_bitmap()
3967 struct mddev *mddev = &rs->md; in rs_update_sbs() local
3968 int ro = mddev->ro; in rs_update_sbs()
3970 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in rs_update_sbs()
3971 mddev->ro = 0; in rs_update_sbs()
3972 md_update_sb(mddev, 1); in rs_update_sbs()
3973 mddev->ro = ro; in rs_update_sbs()
3986 struct mddev *mddev = &rs->md; in rs_start_reshape() local
3987 struct md_personality *pers = mddev->pers; in rs_start_reshape()
3990 set_bit(MD_RECOVERY_WAIT, &mddev->recovery); in rs_start_reshape()
4001 r = pers->check_reshape(mddev); in rs_start_reshape()
4012 r = pers->start_reshape(mddev); in rs_start_reshape()
4033 struct mddev *mddev = &rs->md; in raid_preresume() local
4041 rs->array_sectors != mddev->array_sectors) in raid_preresume()
4060 mddev->array_sectors = rs->array_sectors; in raid_preresume()
4061 mddev->dev_sectors = rs->dev_sectors; in raid_preresume()
4067 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap && in raid_preresume()
4070 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) { in raid_preresume()
4071 int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize; in raid_preresume()
4073 r = mddev->bitmap_ops->resize(mddev, mddev->dev_sectors, in raid_preresume()
4080 if (mddev->resync_offset && mddev->resync_offset < MaxSector) { in raid_preresume()
4081 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in raid_preresume()
4082 mddev->resync_min = mddev->resync_offset; in raid_preresume()
4084 mddev->resync_max_sectors = mddev->dev_sectors; in raid_preresume()
4091 mddev_lock_nointr(mddev); in raid_preresume()
4093 mddev_unlock(mddev); in raid_preresume()
4105 struct mddev *mddev = &rs->md; in raid_resume() local
4113 mddev_lock_nointr(mddev); in raid_resume()
4115 mddev_unlock(mddev); in raid_resume()
4120 if (mddev->delta_disks < 0) in raid_resume()
4123 mddev_lock_nointr(mddev); in raid_resume()
4124 WARN_ON_ONCE(!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)); in raid_resume()
4125 WARN_ON_ONCE(rcu_dereference_protected(mddev->sync_thread, in raid_resume()
4126 lockdep_is_held(&mddev->reconfig_mutex))); in raid_resume()
4128 mddev->ro = 0; in raid_resume()
4129 mddev->in_sync = 0; in raid_resume()
4130 md_unfrozen_sync_thread(mddev); in raid_resume()
4131 mddev_unlock_and_resume(mddev); in raid_resume()