Lines Matching refs:conf
46 static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
47 static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
237 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio) in put_all_bios() argument
241 for (i = 0; i < conf->raid_disks * 2; i++) { in put_all_bios()
251 struct r1conf *conf = r1_bio->mddev->private; in free_r1bio() local
253 put_all_bios(conf, r1_bio); in free_r1bio()
254 mempool_free(r1_bio, &conf->r1bio_pool); in free_r1bio()
259 struct r1conf *conf = r1_bio->mddev->private; in put_buf() local
263 for (i = 0; i < conf->raid_disks * 2; i++) { in put_buf()
266 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); in put_buf()
269 mempool_free(r1_bio, &conf->r1buf_pool); in put_buf()
271 lower_barrier(conf, sect); in put_buf()
278 struct r1conf *conf = mddev->private; in reschedule_retry() local
282 spin_lock_irqsave(&conf->device_lock, flags); in reschedule_retry()
283 list_add(&r1_bio->retry_list, &conf->retry_list); in reschedule_retry()
284 atomic_inc(&conf->nr_queued[idx]); in reschedule_retry()
285 spin_unlock_irqrestore(&conf->device_lock, flags); in reschedule_retry()
287 wake_up(&conf->wait_barrier); in reschedule_retry()
311 struct r1conf *conf = r1_bio->mddev->private; in raid_end_bio_io() local
326 allow_barrier(conf, r1_bio->sector); in raid_end_bio_io()
336 struct r1conf *conf = r1_bio->mddev->private; in update_head_pos() local
338 conf->mirrors[disk].head_position = in update_head_pos()
348 struct r1conf *conf = r1_bio->mddev->private; in find_bio_disk() local
349 int raid_disks = conf->raid_disks; in find_bio_disk()
365 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_read_request() local
366 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; in raid1_end_read_request()
386 spin_lock_irqsave(&conf->device_lock, flags); in raid1_end_read_request()
387 if (r1_bio->mddev->degraded == conf->raid_disks || in raid1_end_read_request()
388 (r1_bio->mddev->degraded == conf->raid_disks-1 && in raid1_end_read_request()
391 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_end_read_request()
396 rdev_dec_pending(rdev, conf->mddev); in raid1_end_read_request()
403 mdname(conf->mddev), in raid1_end_read_request()
448 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_write_request() local
451 struct md_rdev *rdev = conf->mirrors[mirror].rdev; in raid1_end_write_request()
465 conf->mddev->recovery); in raid1_end_write_request()
551 rdev_dec_pending(rdev, conf->mddev); in raid1_end_write_request()
596 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) in read_balance() argument
628 if ((conf->mddev->recovery_cp < this_sector + sectors) || in read_balance()
629 (mddev_is_clustered(conf->mddev) && in read_balance()
630 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, in read_balance()
636 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { in read_balance()
643 rdev = rcu_dereference(conf->mirrors[disk].rdev); in read_balance()
710 dist = abs(this_sector - conf->mirrors[disk].head_position); in read_balance()
716 if (conf->mirrors[disk].next_seq_sect == this_sector in read_balance()
719 struct raid1_info *mirror = &conf->mirrors[disk]; in read_balance()
774 rdev = rcu_dereference(conf->mirrors[best_disk].rdev); in read_balance()
780 if (conf->mirrors[best_disk].next_seq_sect != this_sector) in read_balance()
781 conf->mirrors[best_disk].seq_start = this_sector; in read_balance()
783 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; in read_balance()
791 static void flush_bio_list(struct r1conf *conf, struct bio *bio) in flush_bio_list() argument
794 md_bitmap_unplug(conf->mddev->bitmap); in flush_bio_list()
795 wake_up(&conf->wait_barrier); in flush_bio_list()
815 static void flush_pending_writes(struct r1conf *conf) in flush_pending_writes() argument
820 spin_lock_irq(&conf->device_lock); in flush_pending_writes()
822 if (conf->pending_bio_list.head) { in flush_pending_writes()
826 bio = bio_list_get(&conf->pending_bio_list); in flush_pending_writes()
827 conf->pending_count = 0; in flush_pending_writes()
828 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
841 flush_bio_list(conf, bio); in flush_pending_writes()
844 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
871 static int raise_barrier(struct r1conf *conf, sector_t sector_nr) in raise_barrier() argument
875 spin_lock_irq(&conf->resync_lock); in raise_barrier()
878 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
879 !atomic_read(&conf->nr_waiting[idx]), in raise_barrier()
880 conf->resync_lock); in raise_barrier()
883 atomic_inc(&conf->barrier[idx]); in raise_barrier()
901 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
902 (!conf->array_frozen && in raise_barrier()
903 !atomic_read(&conf->nr_pending[idx]) && in raise_barrier()
904 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) || in raise_barrier()
905 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery), in raise_barrier()
906 conf->resync_lock); in raise_barrier()
908 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in raise_barrier()
909 atomic_dec(&conf->barrier[idx]); in raise_barrier()
910 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
911 wake_up(&conf->wait_barrier); in raise_barrier()
915 atomic_inc(&conf->nr_sync_pending); in raise_barrier()
916 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
921 static void lower_barrier(struct r1conf *conf, sector_t sector_nr) in lower_barrier() argument
925 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0); in lower_barrier()
927 atomic_dec(&conf->barrier[idx]); in lower_barrier()
928 atomic_dec(&conf->nr_sync_pending); in lower_barrier()
929 wake_up(&conf->wait_barrier); in lower_barrier()
932 static void _wait_barrier(struct r1conf *conf, int idx) in _wait_barrier() argument
942 atomic_inc(&conf->nr_pending[idx]); in _wait_barrier()
962 if (!READ_ONCE(conf->array_frozen) && in _wait_barrier()
963 !atomic_read(&conf->barrier[idx])) in _wait_barrier()
973 spin_lock_irq(&conf->resync_lock); in _wait_barrier()
974 atomic_inc(&conf->nr_waiting[idx]); in _wait_barrier()
975 atomic_dec(&conf->nr_pending[idx]); in _wait_barrier()
980 wake_up(&conf->wait_barrier); in _wait_barrier()
982 wait_event_lock_irq(conf->wait_barrier, in _wait_barrier()
983 !conf->array_frozen && in _wait_barrier()
984 !atomic_read(&conf->barrier[idx]), in _wait_barrier()
985 conf->resync_lock); in _wait_barrier()
986 atomic_inc(&conf->nr_pending[idx]); in _wait_barrier()
987 atomic_dec(&conf->nr_waiting[idx]); in _wait_barrier()
988 spin_unlock_irq(&conf->resync_lock); in _wait_barrier()
991 static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr) in wait_read_barrier() argument
1002 atomic_inc(&conf->nr_pending[idx]); in wait_read_barrier()
1004 if (!READ_ONCE(conf->array_frozen)) in wait_read_barrier()
1007 spin_lock_irq(&conf->resync_lock); in wait_read_barrier()
1008 atomic_inc(&conf->nr_waiting[idx]); in wait_read_barrier()
1009 atomic_dec(&conf->nr_pending[idx]); in wait_read_barrier()
1014 wake_up(&conf->wait_barrier); in wait_read_barrier()
1016 wait_event_lock_irq(conf->wait_barrier, in wait_read_barrier()
1017 !conf->array_frozen, in wait_read_barrier()
1018 conf->resync_lock); in wait_read_barrier()
1019 atomic_inc(&conf->nr_pending[idx]); in wait_read_barrier()
1020 atomic_dec(&conf->nr_waiting[idx]); in wait_read_barrier()
1021 spin_unlock_irq(&conf->resync_lock); in wait_read_barrier()
1024 static void wait_barrier(struct r1conf *conf, sector_t sector_nr) in wait_barrier() argument
1028 _wait_barrier(conf, idx); in wait_barrier()
1031 static void _allow_barrier(struct r1conf *conf, int idx) in _allow_barrier() argument
1033 atomic_dec(&conf->nr_pending[idx]); in _allow_barrier()
1034 wake_up(&conf->wait_barrier); in _allow_barrier()
1037 static void allow_barrier(struct r1conf *conf, sector_t sector_nr) in allow_barrier() argument
1041 _allow_barrier(conf, idx); in allow_barrier()
1045 static int get_unqueued_pending(struct r1conf *conf) in get_unqueued_pending() argument
1049 ret = atomic_read(&conf->nr_sync_pending); in get_unqueued_pending()
1051 ret += atomic_read(&conf->nr_pending[idx]) - in get_unqueued_pending()
1052 atomic_read(&conf->nr_queued[idx]); in get_unqueued_pending()
1057 static void freeze_array(struct r1conf *conf, int extra) in freeze_array() argument
1082 spin_lock_irq(&conf->resync_lock); in freeze_array()
1083 conf->array_frozen = 1; in freeze_array()
1084 raid1_log(conf->mddev, "wait freeze"); in freeze_array()
1086 conf->wait_barrier, in freeze_array()
1087 get_unqueued_pending(conf) == extra, in freeze_array()
1088 conf->resync_lock, in freeze_array()
1089 flush_pending_writes(conf)); in freeze_array()
1090 spin_unlock_irq(&conf->resync_lock); in freeze_array()
1092 static void unfreeze_array(struct r1conf *conf) in unfreeze_array() argument
1095 spin_lock_irq(&conf->resync_lock); in unfreeze_array()
1096 conf->array_frozen = 0; in unfreeze_array()
1097 spin_unlock_irq(&conf->resync_lock); in unfreeze_array()
1098 wake_up(&conf->wait_barrier); in unfreeze_array()
1160 struct r1conf *conf = mddev->private; in raid1_unplug() local
1164 spin_lock_irq(&conf->device_lock); in raid1_unplug()
1165 bio_list_merge(&conf->pending_bio_list, &plug->pending); in raid1_unplug()
1166 conf->pending_count += plug->pending_cnt; in raid1_unplug()
1167 spin_unlock_irq(&conf->device_lock); in raid1_unplug()
1168 wake_up(&conf->wait_barrier); in raid1_unplug()
1176 flush_bio_list(conf, bio); in raid1_unplug()
1192 struct r1conf *conf = mddev->private; in alloc_r1bio() local
1195 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO); in alloc_r1bio()
1197 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0])); in alloc_r1bio()
1205 struct r1conf *conf = mddev->private; in raid1_read_request() local
1227 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev); in raid1_read_request()
1239 wait_read_barrier(conf, bio->bi_iter.bi_sector); in raid1_read_request()
1251 rdisk = read_balance(conf, r1_bio, &max_sectors); in raid1_read_request()
1264 mirror = conf->mirrors + rdisk; in raid1_read_request()
1285 gfp, &conf->bio_split); in raid1_read_request()
1322 struct r1conf *conf = mddev->private; in raid1_write_request() local
1340 prepare_to_wait(&conf->wait_barrier, in raid1_write_request()
1348 finish_wait(&conf->wait_barrier, &w); in raid1_write_request()
1356 wait_barrier(conf, bio->bi_iter.bi_sector); in raid1_write_request()
1361 if (conf->pending_count >= max_queued_requests) { in raid1_write_request()
1364 wait_event(conf->wait_barrier, in raid1_write_request()
1365 conf->pending_count < max_queued_requests); in raid1_write_request()
1378 disks = conf->raid_disks * 2; in raid1_write_request()
1384 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in raid1_write_request()
1401 if (i < conf->raid_disks) in raid1_write_request()
1458 rdev_dec_pending(conf->mirrors[j].rdev, mddev); in raid1_write_request()
1460 allow_barrier(conf, bio->bi_iter.bi_sector); in raid1_write_request()
1463 wait_barrier(conf, bio->bi_iter.bi_sector); in raid1_write_request()
1478 GFP_NOIO, &conf->bio_split); in raid1_write_request()
1495 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_write_request()
1539 conf->raid_disks - mddev->degraded > 1) in raid1_write_request()
1560 spin_lock_irqsave(&conf->device_lock, flags); in raid1_write_request()
1561 bio_list_add(&conf->pending_bio_list, mbio); in raid1_write_request()
1562 conf->pending_count++; in raid1_write_request()
1563 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_write_request()
1571 wake_up(&conf->wait_barrier); in raid1_write_request()
1604 struct r1conf *conf = mddev->private; in raid1_status() local
1607 seq_printf(seq, " [%d/%d] [", conf->raid_disks, in raid1_status()
1608 conf->raid_disks - mddev->degraded); in raid1_status()
1610 for (i = 0; i < conf->raid_disks; i++) { in raid1_status()
1611 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in raid1_status()
1622 struct r1conf *conf = mddev->private; in raid1_error() local
1631 spin_lock_irqsave(&conf->device_lock, flags); in raid1_error()
1633 && (conf->raid_disks - mddev->degraded) == 1) { in raid1_error()
1640 conf->recovery_disabled = mddev->recovery_disabled; in raid1_error()
1641 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_error()
1648 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_error()
1658 mdname(mddev), conf->raid_disks - mddev->degraded); in raid1_error()
1661 static void print_conf(struct r1conf *conf) in print_conf() argument
1666 if (!conf) { in print_conf()
1670 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, in print_conf()
1671 conf->raid_disks); in print_conf()
1674 for (i = 0; i < conf->raid_disks; i++) { in print_conf()
1676 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in print_conf()
1686 static void close_sync(struct r1conf *conf) in close_sync() argument
1691 _wait_barrier(conf, idx); in close_sync()
1692 _allow_barrier(conf, idx); in close_sync()
1695 mempool_exit(&conf->r1buf_pool); in close_sync()
1701 struct r1conf *conf = mddev->private; in raid1_spare_active() local
1712 spin_lock_irqsave(&conf->device_lock, flags); in raid1_spare_active()
1713 for (i = 0; i < conf->raid_disks; i++) { in raid1_spare_active()
1714 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_spare_active()
1715 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; in raid1_spare_active()
1744 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_spare_active()
1746 print_conf(conf); in raid1_spare_active()
1752 struct r1conf *conf = mddev->private; in raid1_add_disk() local
1757 int last = conf->raid_disks - 1; in raid1_add_disk()
1759 if (mddev->recovery_disabled == conf->recovery_disabled) in raid1_add_disk()
1774 rdev->saved_raid_disk < conf->raid_disks && in raid1_add_disk()
1775 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) in raid1_add_disk()
1779 p = conf->mirrors + mirror; in raid1_add_disk()
1792 conf->fullsync = 1; in raid1_add_disk()
1797 p[conf->raid_disks].rdev == NULL) { in raid1_add_disk()
1803 conf->fullsync = 1; in raid1_add_disk()
1804 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev); in raid1_add_disk()
1810 print_conf(conf); in raid1_add_disk()
1816 struct r1conf *conf = mddev->private; in raid1_remove_disk() local
1819 struct raid1_info *p = conf->mirrors + number; in raid1_remove_disk()
1822 p = conf->mirrors + conf->raid_disks + number; in raid1_remove_disk()
1824 print_conf(conf); in raid1_remove_disk()
1835 mddev->recovery_disabled != conf->recovery_disabled && in raid1_remove_disk()
1836 mddev->degraded < conf->raid_disks) { in raid1_remove_disk()
1850 if (conf->mirrors[conf->raid_disks + number].rdev) { in raid1_remove_disk()
1856 conf->mirrors[conf->raid_disks + number].rdev; in raid1_remove_disk()
1857 freeze_array(conf, 0); in raid1_remove_disk()
1866 unfreeze_array(conf); in raid1_remove_disk()
1871 conf->mirrors[conf->raid_disks + number].rdev = NULL; in raid1_remove_disk()
1872 unfreeze_array(conf); in raid1_remove_disk()
1880 print_conf(conf); in raid1_remove_disk()
1937 struct r1conf *conf = mddev->private; in end_sync_write() local
1940 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; in end_sync_write()
1951 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, in end_sync_write()
1994 struct r1conf *conf = mddev->private; in fix_sync_read_error() local
2002 rdev = conf->mirrors[r1_bio->read_disk].rdev; in fix_sync_read_error()
2028 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2037 if (d == conf->raid_disks * 2) in fix_sync_read_error()
2052 for (d = 0; d < conf->raid_disks * 2; d++) { in fix_sync_read_error()
2053 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2060 conf->recovery_disabled = in fix_sync_read_error()
2078 d = conf->raid_disks * 2; in fix_sync_read_error()
2082 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2093 d = conf->raid_disks * 2; in fix_sync_read_error()
2097 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2122 struct r1conf *conf = mddev->private; in process_checks() local
2129 for (i = 0; i < conf->raid_disks * 2; i++) { in process_checks()
2140 conf->mirrors[i].rdev->data_offset; in process_checks()
2141 bio_set_dev(b, conf->mirrors[i].rdev->bdev); in process_checks()
2149 for (primary = 0; primary < conf->raid_disks * 2; primary++) in process_checks()
2153 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); in process_checks()
2157 for (i = 0; i < conf->raid_disks * 2; i++) { in process_checks()
2191 rdev_dec_pending(conf->mirrors[i].rdev, mddev); in process_checks()
2201 struct r1conf *conf = mddev->private; in sync_request_write() local
2203 int disks = conf->raid_disks * 2; in sync_request_write()
2225 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { in sync_request_write()
2231 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) in sync_request_write()
2236 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); in sync_request_write()
2252 static void fix_read_error(struct r1conf *conf, int read_disk, in fix_read_error() argument
2255 struct mddev *mddev = conf->mddev; in fix_read_error()
2271 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2281 conf->tmppage, REQ_OP_READ, 0, false)) in fix_read_error()
2289 if (d == conf->raid_disks * 2) in fix_read_error()
2295 struct md_rdev *rdev = conf->mirrors[read_disk].rdev; in fix_read_error()
2304 d = conf->raid_disks * 2; in fix_read_error()
2307 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2313 conf->tmppage, WRITE); in fix_read_error()
2322 d = conf->raid_disks * 2; in fix_read_error()
2325 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2331 conf->tmppage, READ)) { in fix_read_error()
2351 struct r1conf *conf = mddev->private; in narrow_write_error() local
2352 struct md_rdev *rdev = conf->mirrors[i].rdev; in narrow_write_error()
2418 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_sync_write_finished() argument
2422 for (m = 0; m < conf->raid_disks * 2 ; m++) { in handle_sync_write_finished()
2423 struct md_rdev *rdev = conf->mirrors[m].rdev; in handle_sync_write_finished()
2434 md_error(conf->mddev, rdev); in handle_sync_write_finished()
2438 md_done_sync(conf->mddev, s, 1); in handle_sync_write_finished()
2441 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_write_finished() argument
2446 for (m = 0; m < conf->raid_disks * 2 ; m++) in handle_write_finished()
2448 struct md_rdev *rdev = conf->mirrors[m].rdev; in handle_write_finished()
2452 rdev_dec_pending(rdev, conf->mddev); in handle_write_finished()
2460 md_error(conf->mddev, in handle_write_finished()
2461 conf->mirrors[m].rdev); in handle_write_finished()
2465 rdev_dec_pending(conf->mirrors[m].rdev, in handle_write_finished()
2466 conf->mddev); in handle_write_finished()
2469 spin_lock_irq(&conf->device_lock); in handle_write_finished()
2470 list_add(&r1_bio->retry_list, &conf->bio_end_io_list); in handle_write_finished()
2472 atomic_inc(&conf->nr_queued[idx]); in handle_write_finished()
2473 spin_unlock_irq(&conf->device_lock); in handle_write_finished()
2478 wake_up(&conf->wait_barrier); in handle_write_finished()
2479 md_wakeup_thread(conf->mddev->thread); in handle_write_finished()
2487 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) in handle_read_error() argument
2489 struct mddev *mddev = conf->mddev; in handle_read_error()
2507 rdev = conf->mirrors[r1_bio->read_disk].rdev; in handle_read_error()
2510 freeze_array(conf, 1); in handle_read_error()
2511 fix_read_error(conf, r1_bio->read_disk, in handle_read_error()
2513 unfreeze_array(conf); in handle_read_error()
2520 rdev_dec_pending(rdev, conf->mddev); in handle_read_error()
2521 allow_barrier(conf, r1_bio->sector); in handle_read_error()
2534 struct r1conf *conf = mddev->private; in raid1d() local
2535 struct list_head *head = &conf->retry_list; in raid1d()
2541 if (!list_empty_careful(&conf->bio_end_io_list) && in raid1d()
2544 spin_lock_irqsave(&conf->device_lock, flags); in raid1d()
2546 list_splice_init(&conf->bio_end_io_list, &tmp); in raid1d()
2547 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2553 atomic_dec(&conf->nr_queued[idx]); in raid1d()
2565 flush_pending_writes(conf); in raid1d()
2567 spin_lock_irqsave(&conf->device_lock, flags); in raid1d()
2569 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2575 atomic_dec(&conf->nr_queued[idx]); in raid1d()
2576 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2579 conf = mddev->private; in raid1d()
2583 handle_sync_write_finished(conf, r1_bio); in raid1d()
2588 handle_write_finished(conf, r1_bio); in raid1d()
2590 handle_read_error(conf, r1_bio); in raid1d()
2601 static int init_resync(struct r1conf *conf) in init_resync() argument
2606 BUG_ON(mempool_initialized(&conf->r1buf_pool)); in init_resync()
2608 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc, in init_resync()
2609 r1buf_pool_free, conf->poolinfo); in init_resync()
2612 static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf) in raid1_alloc_init_r1buf() argument
2614 struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO); in raid1_alloc_init_r1buf()
2619 for (i = conf->poolinfo->raid_disks; i--; ) { in raid1_alloc_init_r1buf()
2642 struct r1conf *conf = mddev->private; in raid1_sync_request() local
2657 if (!mempool_initialized(&conf->r1buf_pool)) in raid1_sync_request()
2658 if (init_resync(conf)) in raid1_sync_request()
2672 conf->fullsync = 0; in raid1_sync_request()
2675 close_sync(conf); in raid1_sync_request()
2678 conf->cluster_sync_low = 0; in raid1_sync_request()
2679 conf->cluster_sync_high = 0; in raid1_sync_request()
2687 conf->fullsync == 0) { in raid1_sync_request()
2695 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in raid1_sync_request()
2705 if (atomic_read(&conf->nr_waiting[idx])) in raid1_sync_request()
2713 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); in raid1_sync_request()
2716 if (raise_barrier(conf, sector_nr)) in raid1_sync_request()
2719 r1_bio = raid1_alloc_init_r1buf(conf); in raid1_sync_request()
2738 for (i = 0; i < conf->raid_disks * 2; i++) { in raid1_sync_request()
2742 rdev = rcu_dereference(conf->mirrors[i].rdev); in raid1_sync_request()
2745 if (i < conf->raid_disks) in raid1_sync_request()
2810 for (i = 0 ; i < conf->raid_disks * 2 ; i++) in raid1_sync_request()
2812 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_sync_request()
2827 conf->recovery_disabled = mddev->recovery_disabled; in raid1_sync_request()
2873 !conf->fullsync && in raid1_sync_request()
2880 for (i = 0 ; i < conf->raid_disks * 2; i++) { in raid1_sync_request()
2903 conf->cluster_sync_high < sector_nr + nr_sectors) { in raid1_sync_request()
2904 conf->cluster_sync_low = mddev->curr_resync_completed; in raid1_sync_request()
2905 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS; in raid1_sync_request()
2908 conf->cluster_sync_low, in raid1_sync_request()
2909 conf->cluster_sync_high); in raid1_sync_request()
2917 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) { in raid1_sync_request()
2948 struct r1conf *conf; in setup_conf() local
2954 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL); in setup_conf()
2955 if (!conf) in setup_conf()
2958 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
2960 if (!conf->nr_pending) in setup_conf()
2963 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
2965 if (!conf->nr_waiting) in setup_conf()
2968 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
2970 if (!conf->nr_queued) in setup_conf()
2973 conf->barrier = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
2975 if (!conf->barrier) in setup_conf()
2978 conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info), in setup_conf()
2981 if (!conf->mirrors) in setup_conf()
2984 conf->tmppage = alloc_page(GFP_KERNEL); in setup_conf()
2985 if (!conf->tmppage) in setup_conf()
2988 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); in setup_conf()
2989 if (!conf->poolinfo) in setup_conf()
2991 conf->poolinfo->raid_disks = mddev->raid_disks * 2; in setup_conf()
2992 err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc, in setup_conf()
2993 rbio_pool_free, conf->poolinfo); in setup_conf()
2997 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); in setup_conf()
3001 conf->poolinfo->mddev = mddev; in setup_conf()
3004 spin_lock_init(&conf->device_lock); in setup_conf()
3011 disk = conf->mirrors + mddev->raid_disks + disk_idx; in setup_conf()
3013 disk = conf->mirrors + disk_idx; in setup_conf()
3021 conf->raid_disks = mddev->raid_disks; in setup_conf()
3022 conf->mddev = mddev; in setup_conf()
3023 INIT_LIST_HEAD(&conf->retry_list); in setup_conf()
3024 INIT_LIST_HEAD(&conf->bio_end_io_list); in setup_conf()
3026 spin_lock_init(&conf->resync_lock); in setup_conf()
3027 init_waitqueue_head(&conf->wait_barrier); in setup_conf()
3029 bio_list_init(&conf->pending_bio_list); in setup_conf()
3030 conf->pending_count = 0; in setup_conf()
3031 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
3034 for (i = 0; i < conf->raid_disks * 2; i++) { in setup_conf()
3036 disk = conf->mirrors + i; in setup_conf()
3038 if (i < conf->raid_disks && in setup_conf()
3039 disk[conf->raid_disks].rdev) { in setup_conf()
3046 disk[conf->raid_disks].rdev; in setup_conf()
3047 disk[conf->raid_disks].rdev = NULL; in setup_conf()
3058 conf->fullsync = 1; in setup_conf()
3063 conf->thread = md_register_thread(raid1d, mddev, "raid1"); in setup_conf()
3064 if (!conf->thread) in setup_conf()
3067 return conf; in setup_conf()
3070 if (conf) { in setup_conf()
3071 mempool_exit(&conf->r1bio_pool); in setup_conf()
3072 kfree(conf->mirrors); in setup_conf()
3073 safe_put_page(conf->tmppage); in setup_conf()
3074 kfree(conf->poolinfo); in setup_conf()
3075 kfree(conf->nr_pending); in setup_conf()
3076 kfree(conf->nr_waiting); in setup_conf()
3077 kfree(conf->nr_queued); in setup_conf()
3078 kfree(conf->barrier); in setup_conf()
3079 bioset_exit(&conf->bio_split); in setup_conf()
3080 kfree(conf); in setup_conf()
3088 struct r1conf *conf; in raid1_run() local
3112 conf = setup_conf(mddev); in raid1_run()
3114 conf = mddev->private; in raid1_run()
3116 if (IS_ERR(conf)) in raid1_run()
3117 return PTR_ERR(conf); in raid1_run()
3134 for (i = 0; i < conf->raid_disks; i++) in raid1_run()
3135 if (conf->mirrors[i].rdev == NULL || in raid1_run()
3136 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || in raid1_run()
3137 test_bit(Faulty, &conf->mirrors[i].rdev->flags)) in raid1_run()
3142 if (conf->raid_disks - mddev->degraded < 1) { in raid1_run()
3147 if (conf->raid_disks - mddev->degraded == 1) in raid1_run()
3160 mddev->thread = conf->thread; in raid1_run()
3161 conf->thread = NULL; in raid1_run()
3162 mddev->private = conf; in raid1_run()
3184 raid1_free(mddev, conf); in raid1_run()
3190 struct r1conf *conf = priv; in raid1_free() local
3192 mempool_exit(&conf->r1bio_pool); in raid1_free()
3193 kfree(conf->mirrors); in raid1_free()
3194 safe_put_page(conf->tmppage); in raid1_free()
3195 kfree(conf->poolinfo); in raid1_free()
3196 kfree(conf->nr_pending); in raid1_free()
3197 kfree(conf->nr_waiting); in raid1_free()
3198 kfree(conf->nr_queued); in raid1_free()
3199 kfree(conf->barrier); in raid1_free()
3200 bioset_exit(&conf->bio_split); in raid1_free()
3201 kfree(conf); in raid1_free()
3249 struct r1conf *conf = mddev->private; in raid1_reshape() local
3273 if (raid_disks < conf->raid_disks) { in raid1_reshape()
3275 for (d= 0; d < conf->raid_disks; d++) in raid1_reshape()
3276 if (conf->mirrors[d].rdev) in raid1_reshape()
3303 freeze_array(conf, 0); in raid1_reshape()
3306 oldpool = conf->r1bio_pool; in raid1_reshape()
3307 conf->r1bio_pool = newpool; in raid1_reshape()
3309 for (d = d2 = 0; d < conf->raid_disks; d++) { in raid1_reshape()
3310 struct md_rdev *rdev = conf->mirrors[d].rdev; in raid1_reshape()
3322 kfree(conf->mirrors); in raid1_reshape()
3323 conf->mirrors = newmirrors; in raid1_reshape()
3324 kfree(conf->poolinfo); in raid1_reshape()
3325 conf->poolinfo = newpoolinfo; in raid1_reshape()
3327 spin_lock_irqsave(&conf->device_lock, flags); in raid1_reshape()
3328 mddev->degraded += (raid_disks - conf->raid_disks); in raid1_reshape()
3329 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_reshape()
3330 conf->raid_disks = mddev->raid_disks = raid_disks; in raid1_reshape()
3333 unfreeze_array(conf); in raid1_reshape()
3345 struct r1conf *conf = mddev->private; in raid1_quiesce() local
3348 freeze_array(conf, 0); in raid1_quiesce()
3350 unfreeze_array(conf); in raid1_quiesce()
3359 struct r1conf *conf; in raid1_takeover() local
3363 conf = setup_conf(mddev); in raid1_takeover()
3364 if (!IS_ERR(conf)) { in raid1_takeover()
3366 conf->array_frozen = 1; in raid1_takeover()
3370 return conf; in raid1_takeover()