Lines Matching refs:mddev
35 static void dump_zones(struct mddev *mddev) in dump_zones() argument
40 struct r0conf *conf = mddev->private; in dump_zones()
43 mdname(mddev), in dump_zones()
63 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) in create_strip_zones() argument
76 rdev_for_each(rdev1, mddev) { in create_strip_zones()
78 mdname(mddev), in create_strip_zones()
84 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones()
85 rdev1->sectors = sectors * mddev->chunk_sectors; in create_strip_zones()
90 rdev_for_each(rdev2, mddev) { in create_strip_zones()
93 mdname(mddev), in create_strip_zones()
100 mdname(mddev)); in create_strip_zones()
109 mdname(mddev)); in create_strip_zones()
114 mdname(mddev)); in create_strip_zones()
118 mdname(mddev)); in create_strip_zones()
121 mdname(mddev), conf->nr_strip_zones); in create_strip_zones()
125 mdname(mddev), conf->nr_strip_zones); in create_strip_zones()
131 if ((mddev->chunk_sectors << 9) % blksize) { in create_strip_zones()
133 mdname(mddev), in create_strip_zones()
134 mddev->chunk_sectors << 9, blksize); in create_strip_zones()
147 mddev->raid_disks), in create_strip_zones()
160 rdev_for_each(rdev1, mddev) { in create_strip_zones()
163 if (mddev->level == 10) { in create_strip_zones()
169 if (mddev->level == 1) { in create_strip_zones()
179 mdname(mddev)); in create_strip_zones()
182 if (j >= mddev->raid_disks) { in create_strip_zones()
184 mdname(mddev), j); in create_strip_zones()
189 mdname(mddev), j); in create_strip_zones()
198 if (cnt != mddev->raid_disks) { in create_strip_zones()
200 mdname(mddev), cnt, mddev->raid_disks); in create_strip_zones()
214 dev = conf->devlist + i * mddev->raid_disks; in create_strip_zones()
216 pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i); in create_strip_zones()
225 mdname(mddev), in create_strip_zones()
231 mdname(mddev), in create_strip_zones()
238 mdname(mddev), in create_strip_zones()
246 mdname(mddev), in create_strip_zones()
253 mdname(mddev), in create_strip_zones()
259 } else if (mddev->layout == RAID0_ORIG_LAYOUT || in create_strip_zones()
260 mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) { in create_strip_zones()
261 conf->layout = mddev->layout; in create_strip_zones()
267 mdname(mddev)); in create_strip_zones()
277 sector_div(first_sector, mddev->chunk_sectors); in create_strip_zones()
285 pr_debug("md/raid0:%s: done.\n", mdname(mddev)); in create_strip_zones()
320 static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone, in map_sector() argument
325 struct r0conf *conf = mddev->private; in map_sector()
327 unsigned int chunk_sects = mddev->chunk_sectors; in map_sector()
353 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid0_size() argument
361 rdev_for_each(rdev, mddev) in raid0_size()
363 ~(sector_t)(mddev->chunk_sectors-1)); in raid0_size()
368 static void raid0_free(struct mddev *mddev, void *priv) in raid0_free() argument
377 static int raid0_set_limits(struct mddev *mddev) in raid0_set_limits() argument
383 lim.max_hw_sectors = mddev->chunk_sectors; in raid0_set_limits()
384 lim.max_write_zeroes_sectors = mddev->chunk_sectors; in raid0_set_limits()
385 lim.io_min = mddev->chunk_sectors << 9; in raid0_set_limits()
386 lim.io_opt = lim.io_min * mddev->raid_disks; in raid0_set_limits()
387 lim.chunk_sectors = mddev->chunk_sectors; in raid0_set_limits()
389 err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); in raid0_set_limits()
392 return queue_limits_set(mddev->gendisk->queue, &lim); in raid0_set_limits()
395 static int raid0_run(struct mddev *mddev) in raid0_run() argument
400 if (mddev->chunk_sectors == 0) { in raid0_run()
401 pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev)); in raid0_run()
404 if (md_check_no_bitmap(mddev)) in raid0_run()
408 if (mddev->private == NULL) { in raid0_run()
409 ret = create_strip_zones(mddev, &conf); in raid0_run()
412 mddev->private = conf; in raid0_run()
414 conf = mddev->private; in raid0_run()
415 if (!mddev_is_dm(mddev)) { in raid0_run()
416 ret = raid0_set_limits(mddev); in raid0_run()
422 md_set_array_sectors(mddev, raid0_size(mddev, 0, 0)); in raid0_run()
425 mdname(mddev), in raid0_run()
426 (unsigned long long)mddev->array_sectors); in raid0_run()
428 dump_zones(mddev); in raid0_run()
430 return md_integrity_register(mddev); in raid0_run()
447 static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) in raid0_handle_discard() argument
449 struct r0conf *conf = mddev->private; in raid0_handle_discard()
468 &mddev->bio_set); in raid0_handle_discard()
487 stripe_size = zone->nb_dev * mddev->chunk_sectors; in raid0_handle_discard()
496 sector_div(orig_start, mddev->chunk_sectors); in raid0_handle_discard()
501 sector_div(orig_end, mddev->chunk_sectors); in raid0_handle_discard()
507 mddev->chunk_sectors; in raid0_handle_discard()
509 mddev->chunk_sectors; in raid0_handle_discard()
512 mddev->chunk_sectors) + in raid0_handle_discard()
513 first_stripe_index * mddev->chunk_sectors; in raid0_handle_discard()
515 mddev->chunk_sectors) + in raid0_handle_discard()
516 last_stripe_index * mddev->chunk_sectors; in raid0_handle_discard()
528 mddev->chunk_sectors; in raid0_handle_discard()
530 dev_start = first_stripe_index * mddev->chunk_sectors; in raid0_handle_discard()
535 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors; in raid0_handle_discard()
537 dev_end = last_stripe_index * mddev->chunk_sectors; in raid0_handle_discard()
546 md_submit_discard_bio(mddev, rdev, bio, in raid0_handle_discard()
553 static void raid0_map_submit_bio(struct mddev *mddev, struct bio *bio) in raid0_map_submit_bio() argument
555 struct r0conf *conf = mddev->private; in raid0_map_submit_bio()
561 md_account_bio(mddev, &bio); in raid0_map_submit_bio()
563 zone = find_zone(mddev->private, §or); in raid0_map_submit_bio()
566 tmp_dev = map_sector(mddev, zone, bio_sector, §or); in raid0_map_submit_bio()
569 tmp_dev = map_sector(mddev, zone, sector, §or); in raid0_map_submit_bio()
572 WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev)); in raid0_map_submit_bio()
579 md_error(mddev, tmp_dev); in raid0_map_submit_bio()
586 mddev_trace_remap(mddev, bio, bio_sector); in raid0_map_submit_bio()
587 mddev_check_write_zeroes(mddev, bio); in raid0_map_submit_bio()
591 static bool raid0_make_request(struct mddev *mddev, struct bio *bio) in raid0_make_request() argument
598 && md_flush_request(mddev, bio)) in raid0_make_request()
602 raid0_handle_discard(mddev, bio); in raid0_make_request()
607 chunk_sects = mddev->chunk_sectors; in raid0_make_request()
616 &mddev->bio_set); in raid0_make_request()
624 raid0_map_submit_bio(mddev, bio); in raid0_make_request()
628 raid0_map_submit_bio(mddev, bio); in raid0_make_request()
632 static void raid0_status(struct seq_file *seq, struct mddev *mddev) in raid0_status() argument
634 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2); in raid0_status()
638 static void raid0_error(struct mddev *mddev, struct md_rdev *rdev) in raid0_error() argument
640 if (!test_and_set_bit(MD_BROKEN, &mddev->flags)) { in raid0_error()
641 char *md_name = mdname(mddev); in raid0_error()
648 static void *raid0_takeover_raid45(struct mddev *mddev) in raid0_takeover_raid45() argument
653 if (mddev->degraded != 1) { in raid0_takeover_raid45()
655 mdname(mddev), in raid0_takeover_raid45()
656 mddev->degraded); in raid0_takeover_raid45()
660 rdev_for_each(rdev, mddev) { in raid0_takeover_raid45()
662 if (rdev->raid_disk == mddev->raid_disks-1) { in raid0_takeover_raid45()
664 mdname(mddev)); in raid0_takeover_raid45()
667 rdev->sectors = mddev->dev_sectors; in raid0_takeover_raid45()
671 mddev->new_level = 0; in raid0_takeover_raid45()
672 mddev->new_layout = 0; in raid0_takeover_raid45()
673 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid0_takeover_raid45()
674 mddev->raid_disks--; in raid0_takeover_raid45()
675 mddev->delta_disks = -1; in raid0_takeover_raid45()
677 mddev->resync_offset = MaxSector; in raid0_takeover_raid45()
678 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); in raid0_takeover_raid45()
680 create_strip_zones(mddev, &priv_conf); in raid0_takeover_raid45()
685 static void *raid0_takeover_raid10(struct mddev *mddev) in raid0_takeover_raid10() argument
695 if (mddev->layout != ((1 << 8) + 2)) { in raid0_takeover_raid10()
697 mdname(mddev), in raid0_takeover_raid10()
698 mddev->layout); in raid0_takeover_raid10()
701 if (mddev->raid_disks & 1) { in raid0_takeover_raid10()
703 mdname(mddev)); in raid0_takeover_raid10()
706 if (mddev->degraded != (mddev->raid_disks>>1)) { in raid0_takeover_raid10()
708 mdname(mddev)); in raid0_takeover_raid10()
713 mddev->new_level = 0; in raid0_takeover_raid10()
714 mddev->new_layout = 0; in raid0_takeover_raid10()
715 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid0_takeover_raid10()
716 mddev->delta_disks = - mddev->raid_disks / 2; in raid0_takeover_raid10()
717 mddev->raid_disks += mddev->delta_disks; in raid0_takeover_raid10()
718 mddev->degraded = 0; in raid0_takeover_raid10()
720 mddev->resync_offset = MaxSector; in raid0_takeover_raid10()
721 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); in raid0_takeover_raid10()
723 create_strip_zones(mddev, &priv_conf); in raid0_takeover_raid10()
727 static void *raid0_takeover_raid1(struct mddev *mddev) in raid0_takeover_raid1() argument
735 if ((mddev->raid_disks - 1) != mddev->degraded) { in raid0_takeover_raid1()
737 mdname(mddev)); in raid0_takeover_raid1()
748 while (chunksect && (mddev->array_sectors & (chunksect - 1))) in raid0_takeover_raid1()
756 mddev->new_level = 0; in raid0_takeover_raid1()
757 mddev->new_layout = 0; in raid0_takeover_raid1()
758 mddev->new_chunk_sectors = chunksect; in raid0_takeover_raid1()
759 mddev->chunk_sectors = chunksect; in raid0_takeover_raid1()
760 mddev->delta_disks = 1 - mddev->raid_disks; in raid0_takeover_raid1()
761 mddev->raid_disks = 1; in raid0_takeover_raid1()
763 mddev->resync_offset = MaxSector; in raid0_takeover_raid1()
764 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); in raid0_takeover_raid1()
766 create_strip_zones(mddev, &priv_conf); in raid0_takeover_raid1()
770 static void *raid0_takeover(struct mddev *mddev) in raid0_takeover() argument
779 if (mddev->bitmap) { in raid0_takeover()
781 mdname(mddev)); in raid0_takeover()
784 if (mddev->level == 4) in raid0_takeover()
785 return raid0_takeover_raid45(mddev); in raid0_takeover()
787 if (mddev->level == 5) { in raid0_takeover()
788 if (mddev->layout == ALGORITHM_PARITY_N) in raid0_takeover()
789 return raid0_takeover_raid45(mddev); in raid0_takeover()
792 mdname(mddev), ALGORITHM_PARITY_N); in raid0_takeover()
795 if (mddev->level == 10) in raid0_takeover()
796 return raid0_takeover_raid10(mddev); in raid0_takeover()
798 if (mddev->level == 1) in raid0_takeover()
799 return raid0_takeover_raid1(mddev); in raid0_takeover()
802 mddev->level); in raid0_takeover()
807 static void raid0_quiesce(struct mddev *mddev, int quiesce) in raid0_quiesce() argument