Lines Matching refs:conf
67 static void allow_barrier(struct r10conf *conf);
68 static void lower_barrier(struct r10conf *conf);
69 static int _enough(struct r10conf *conf, int previous, int ignore);
70 static int enough(struct r10conf *conf, int ignore);
75 static void end_reshape(struct r10conf *conf);
93 struct r10conf *conf = data; in r10bio_pool_alloc() local
94 int size = offsetof(struct r10bio, devs[conf->copies]); in r10bio_pool_alloc()
118 struct r10conf *conf = data; in r10buf_pool_alloc() local
125 r10_bio = r10bio_pool_alloc(gfp_flags, conf); in r10buf_pool_alloc()
129 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in r10buf_pool_alloc()
130 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in r10buf_pool_alloc()
131 nalloc = conf->copies; /* resync */ in r10buf_pool_alloc()
136 if (!conf->have_replacement) in r10buf_pool_alloc()
152 if (!conf->have_replacement) in r10buf_pool_alloc()
174 &conf->mddev->recovery)) { in r10buf_pool_alloc()
206 rbio_pool_free(r10_bio, conf); in r10buf_pool_alloc()
212 struct r10conf *conf = data; in r10buf_pool_free() local
217 for (j = conf->copies; j--; ) { in r10buf_pool_free()
234 rbio_pool_free(r10bio, conf); in r10buf_pool_free()
237 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) in put_all_bios() argument
241 for (i = 0; i < conf->copies; i++) { in put_all_bios()
255 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio() local
257 put_all_bios(conf, r10_bio); in free_r10bio()
258 mempool_free(r10_bio, &conf->r10bio_pool); in free_r10bio()
263 struct r10conf *conf = r10_bio->mddev->private; in put_buf() local
265 mempool_free(r10_bio, &conf->r10buf_pool); in put_buf()
267 lower_barrier(conf); in put_buf()
274 struct r10conf *conf = mddev->private; in reschedule_retry() local
276 spin_lock_irqsave(&conf->device_lock, flags); in reschedule_retry()
277 list_add(&r10_bio->retry_list, &conf->retry_list); in reschedule_retry()
278 conf->nr_queued ++; in reschedule_retry()
279 spin_unlock_irqrestore(&conf->device_lock, flags); in reschedule_retry()
282 wake_up(&conf->wait_barrier); in reschedule_retry()
295 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io() local
305 allow_barrier(conf); in raid_end_bio_io()
315 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos() local
317 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
324 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, in find_bio_disk() argument
330 for (slot = 0; slot < conf->copies; slot++) { in find_bio_disk()
339 BUG_ON(slot == conf->copies); in find_bio_disk()
355 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request() local
381 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state), in raid10_end_read_request()
387 rdev_dec_pending(rdev, conf->mddev); in raid10_end_read_request()
394 mdname(conf->mddev), in raid10_end_read_request()
432 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request() local
440 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_write_request()
443 rdev = conf->mirrors[dev].replacement; in raid10_end_write_request()
447 rdev = conf->mirrors[dev].rdev; in raid10_end_write_request()
531 rdev_dec_pending(rdev, conf->mddev); in raid10_end_write_request()
624 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio) in raid10_find_phys() argument
626 struct geom *geo = &conf->geo; in raid10_find_phys()
628 if (conf->reshape_progress != MaxSector && in raid10_find_phys()
629 ((r10bio->sector >= conf->reshape_progress) != in raid10_find_phys()
630 conf->mddev->reshape_backwards)) { in raid10_find_phys()
632 geo = &conf->prev; in raid10_find_phys()
639 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) in raid10_find_virt() argument
645 struct geom *geo = &conf->geo; in raid10_find_virt()
703 static struct md_rdev *read_balance(struct r10conf *conf, in read_balance() argument
717 struct geom *geo = &conf->geo; in read_balance()
719 raid10_find_phys(conf, r10_bio); in read_balance()
735 if ((conf->mddev->recovery_cp < MaxSector in read_balance()
736 && (this_sector + sectors >= conf->next_resync)) || in read_balance()
737 (mddev_is_clustered(conf->mddev) && in read_balance()
738 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, in read_balance()
742 for (slot = 0; slot < conf->copies ; slot++) { in read_balance()
752 rdev = rcu_dereference(conf->mirrors[disk].replacement); in read_balance()
755 rdev = rcu_dereference(conf->mirrors[disk].rdev); in read_balance()
822 conf->mirrors[disk].head_position); in read_balance()
830 if (slot >= conf->copies) { in read_balance()
851 static void flush_pending_writes(struct r10conf *conf) in flush_pending_writes() argument
856 spin_lock_irq(&conf->device_lock); in flush_pending_writes()
858 if (conf->pending_bio_list.head) { in flush_pending_writes()
862 bio = bio_list_get(&conf->pending_bio_list); in flush_pending_writes()
863 conf->pending_count = 0; in flush_pending_writes()
864 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
880 md_bitmap_unplug(conf->mddev->bitmap); in flush_pending_writes()
881 wake_up(&conf->wait_barrier); in flush_pending_writes()
900 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
925 static void raise_barrier(struct r10conf *conf, int force) in raise_barrier() argument
927 BUG_ON(force && !conf->barrier); in raise_barrier()
928 spin_lock_irq(&conf->resync_lock); in raise_barrier()
931 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, in raise_barrier()
932 conf->resync_lock); in raise_barrier()
935 conf->barrier++; in raise_barrier()
938 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
939 !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH, in raise_barrier()
940 conf->resync_lock); in raise_barrier()
942 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
945 static void lower_barrier(struct r10conf *conf) in lower_barrier() argument
948 spin_lock_irqsave(&conf->resync_lock, flags); in lower_barrier()
949 conf->barrier--; in lower_barrier()
950 spin_unlock_irqrestore(&conf->resync_lock, flags); in lower_barrier()
951 wake_up(&conf->wait_barrier); in lower_barrier()
954 static void wait_barrier(struct r10conf *conf) in wait_barrier() argument
956 spin_lock_irq(&conf->resync_lock); in wait_barrier()
957 if (conf->barrier) { in wait_barrier()
959 conf->nr_waiting++; in wait_barrier()
969 raid10_log(conf->mddev, "wait barrier"); in wait_barrier()
970 wait_event_lock_irq(conf->wait_barrier, in wait_barrier()
971 !conf->barrier || in wait_barrier()
972 (atomic_read(&conf->nr_pending) && in wait_barrier()
979 (conf->mddev->thread->tsk == current && in wait_barrier()
981 &conf->mddev->recovery) && in wait_barrier()
982 conf->nr_queued > 0), in wait_barrier()
983 conf->resync_lock); in wait_barrier()
984 conf->nr_waiting--; in wait_barrier()
985 if (!conf->nr_waiting) in wait_barrier()
986 wake_up(&conf->wait_barrier); in wait_barrier()
988 atomic_inc(&conf->nr_pending); in wait_barrier()
989 spin_unlock_irq(&conf->resync_lock); in wait_barrier()
992 static void allow_barrier(struct r10conf *conf) in allow_barrier() argument
994 if ((atomic_dec_and_test(&conf->nr_pending)) || in allow_barrier()
995 (conf->array_freeze_pending)) in allow_barrier()
996 wake_up(&conf->wait_barrier); in allow_barrier()
999 static void freeze_array(struct r10conf *conf, int extra) in freeze_array() argument
1013 spin_lock_irq(&conf->resync_lock); in freeze_array()
1014 conf->array_freeze_pending++; in freeze_array()
1015 conf->barrier++; in freeze_array()
1016 conf->nr_waiting++; in freeze_array()
1017 wait_event_lock_irq_cmd(conf->wait_barrier, in freeze_array()
1018 atomic_read(&conf->nr_pending) == conf->nr_queued+extra, in freeze_array()
1019 conf->resync_lock, in freeze_array()
1020 flush_pending_writes(conf)); in freeze_array()
1022 conf->array_freeze_pending--; in freeze_array()
1023 spin_unlock_irq(&conf->resync_lock); in freeze_array()
1026 static void unfreeze_array(struct r10conf *conf) in unfreeze_array() argument
1029 spin_lock_irq(&conf->resync_lock); in unfreeze_array()
1030 conf->barrier--; in unfreeze_array()
1031 conf->nr_waiting--; in unfreeze_array()
1032 wake_up(&conf->wait_barrier); in unfreeze_array()
1033 spin_unlock_irq(&conf->resync_lock); in unfreeze_array()
1057 struct r10conf *conf = mddev->private; in raid10_unplug() local
1061 spin_lock_irq(&conf->device_lock); in raid10_unplug()
1062 bio_list_merge(&conf->pending_bio_list, &plug->pending); in raid10_unplug()
1063 conf->pending_count += plug->pending_cnt; in raid10_unplug()
1064 spin_unlock_irq(&conf->device_lock); in raid10_unplug()
1065 wake_up(&conf->wait_barrier); in raid10_unplug()
1074 wake_up(&conf->wait_barrier); in raid10_unplug()
1100 static void regular_request_wait(struct mddev *mddev, struct r10conf *conf, in regular_request_wait() argument
1103 wait_barrier(conf); in regular_request_wait()
1105 bio->bi_iter.bi_sector < conf->reshape_progress && in regular_request_wait()
1106 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { in regular_request_wait()
1107 raid10_log(conf->mddev, "wait reshape"); in regular_request_wait()
1108 allow_barrier(conf); in regular_request_wait()
1109 wait_event(conf->wait_barrier, in regular_request_wait()
1110 conf->reshape_progress <= bio->bi_iter.bi_sector || in regular_request_wait()
1111 conf->reshape_progress >= bio->bi_iter.bi_sector + in regular_request_wait()
1113 wait_barrier(conf); in regular_request_wait()
1120 struct r10conf *conf = mddev->private; in raid10_read_request() local
1148 err_rdev = rcu_dereference(conf->mirrors[disk].rdev); in raid10_read_request()
1159 regular_request_wait(mddev, conf, bio, r10_bio->sectors); in raid10_read_request()
1160 rdev = read_balance(conf, r10_bio, &max_sectors); in raid10_read_request()
1177 gfp, &conf->bio_split); in raid10_read_request()
1179 allow_barrier(conf); in raid10_read_request()
1181 wait_barrier(conf); in raid10_read_request()
1221 struct r10conf *conf = mddev->private; in raid10_write_one_disk() local
1227 rdev = conf->mirrors[devnum].replacement; in raid10_write_one_disk()
1231 rdev = conf->mirrors[devnum].rdev; in raid10_write_one_disk()
1234 rdev = conf->mirrors[devnum].rdev; in raid10_write_one_disk()
1248 &conf->mirrors[devnum].rdev->flags) in raid10_write_one_disk()
1249 && enough(conf, devnum)) in raid10_write_one_disk()
1253 if (conf->mddev->gendisk) in raid10_write_one_disk()
1255 mbio, disk_devt(conf->mddev->gendisk), in raid10_write_one_disk()
1271 spin_lock_irqsave(&conf->device_lock, flags); in raid10_write_one_disk()
1272 bio_list_add(&conf->pending_bio_list, mbio); in raid10_write_one_disk()
1273 conf->pending_count++; in raid10_write_one_disk()
1274 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_write_one_disk()
1282 struct r10conf *conf = mddev->private; in raid10_write_request() local
1294 prepare_to_wait(&conf->wait_barrier, in raid10_write_request()
1301 finish_wait(&conf->wait_barrier, &w); in raid10_write_request()
1305 regular_request_wait(mddev, conf, bio, sectors); in raid10_write_request()
1308 ? (bio->bi_iter.bi_sector < conf->reshape_safe && in raid10_write_request()
1309 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) in raid10_write_request()
1310 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && in raid10_write_request()
1311 bio->bi_iter.bi_sector < conf->reshape_progress))) { in raid10_write_request()
1313 mddev->reshape_position = conf->reshape_progress; in raid10_write_request()
1317 raid10_log(conf->mddev, "wait reshape metadata"); in raid10_write_request()
1321 conf->reshape_safe = mddev->reshape_position; in raid10_write_request()
1324 if (conf->pending_count >= max_queued_requests) { in raid10_write_request()
1327 wait_event(conf->wait_barrier, in raid10_write_request()
1328 conf->pending_count < max_queued_requests); in raid10_write_request()
1341 raid10_find_phys(conf, r10_bio); in raid10_write_request()
1347 for (i = 0; i < conf->copies; i++) { in raid10_write_request()
1349 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); in raid10_write_request()
1351 conf->mirrors[d].replacement); in raid10_write_request()
1436 rdev_dec_pending(conf->mirrors[d].rdev, mddev); in raid10_write_request()
1441 rdev = conf->mirrors[d].replacement; in raid10_write_request()
1445 rdev = conf->mirrors[d].rdev; in raid10_write_request()
1450 allow_barrier(conf); in raid10_write_request()
1451 raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); in raid10_write_request()
1453 wait_barrier(conf); in raid10_write_request()
1462 GFP_NOIO, &conf->bio_split); in raid10_write_request()
1464 allow_barrier(conf); in raid10_write_request()
1466 wait_barrier(conf); in raid10_write_request()
1474 for (i = 0; i < conf->copies; i++) { in raid10_write_request()
1485 struct r10conf *conf = mddev->private; in __make_request() local
1488 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO); in __make_request()
1497 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies); in __make_request()
1507 struct r10conf *conf = mddev->private; in raid10_make_request() local
1508 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); in raid10_make_request()
1525 && (conf->geo.near_copies < conf->geo.raid_disks in raid10_make_request()
1526 || conf->prev.near_copies < in raid10_make_request()
1527 conf->prev.raid_disks))) in raid10_make_request()
1534 wake_up(&conf->wait_barrier); in raid10_make_request()
1540 struct r10conf *conf = mddev->private; in raid10_status() local
1543 if (conf->geo.near_copies < conf->geo.raid_disks) in raid10_status()
1545 if (conf->geo.near_copies > 1) in raid10_status()
1546 seq_printf(seq, " %d near-copies", conf->geo.near_copies); in raid10_status()
1547 if (conf->geo.far_copies > 1) { in raid10_status()
1548 if (conf->geo.far_offset) in raid10_status()
1549 seq_printf(seq, " %d offset-copies", conf->geo.far_copies); in raid10_status()
1551 seq_printf(seq, " %d far-copies", conf->geo.far_copies); in raid10_status()
1552 if (conf->geo.far_set_size != conf->geo.raid_disks) in raid10_status()
1553 seq_printf(seq, " %d devices per set", conf->geo.far_set_size); in raid10_status()
1555 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, in raid10_status()
1556 conf->geo.raid_disks - mddev->degraded); in raid10_status()
1558 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_status()
1559 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in raid10_status()
1571 static int _enough(struct r10conf *conf, int previous, int ignore) in _enough() argument
1577 disks = conf->prev.raid_disks; in _enough()
1578 ncopies = conf->prev.near_copies; in _enough()
1580 disks = conf->geo.raid_disks; in _enough()
1581 ncopies = conf->geo.near_copies; in _enough()
1586 int n = conf->copies; in _enough()
1592 (rdev = rcu_dereference(conf->mirrors[this].rdev)) && in _enough()
1607 static int enough(struct r10conf *conf, int ignore) in enough() argument
1614 return _enough(conf, 0, ignore) && in enough()
1615 _enough(conf, 1, ignore); in enough()
1621 struct r10conf *conf = mddev->private; in raid10_error() local
1630 spin_lock_irqsave(&conf->device_lock, flags); in raid10_error()
1632 && !enough(conf, rdev->raid_disk)) { in raid10_error()
1636 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_error()
1649 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_error()
1653 mdname(mddev), conf->geo.raid_disks - mddev->degraded); in raid10_error()
1656 static void print_conf(struct r10conf *conf) in print_conf() argument
1662 if (!conf) { in print_conf()
1666 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, in print_conf()
1667 conf->geo.raid_disks); in print_conf()
1671 for (i = 0; i < conf->geo.raid_disks; i++) { in print_conf()
1673 rdev = conf->mirrors[i].rdev; in print_conf()
1682 static void close_sync(struct r10conf *conf) in close_sync() argument
1684 wait_barrier(conf); in close_sync()
1685 allow_barrier(conf); in close_sync()
1687 mempool_exit(&conf->r10buf_pool); in close_sync()
1693 struct r10conf *conf = mddev->private; in raid10_spare_active() local
1702 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_spare_active()
1703 tmp = conf->mirrors + i; in raid10_spare_active()
1730 spin_lock_irqsave(&conf->device_lock, flags); in raid10_spare_active()
1732 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_spare_active()
1734 print_conf(conf); in raid10_spare_active()
1740 struct r10conf *conf = mddev->private; in raid10_add_disk() local
1744 int last = conf->geo.raid_disks - 1; in raid10_add_disk()
1751 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1)) in raid10_add_disk()
1761 rdev->saved_raid_disk < conf->geo.raid_disks && in raid10_add_disk()
1762 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) in raid10_add_disk()
1767 struct raid10_info *p = &conf->mirrors[mirror]; in raid10_add_disk()
1781 conf->fullsync = 1; in raid10_add_disk()
1795 conf->fullsync = 1; in raid10_add_disk()
1802 print_conf(conf); in raid10_add_disk()
1808 struct r10conf *conf = mddev->private; in raid10_remove_disk() local
1812 struct raid10_info *p = conf->mirrors + number; in raid10_remove_disk()
1814 print_conf(conf); in raid10_remove_disk()
1833 number < conf->geo.raid_disks && in raid10_remove_disk()
1834 enough(conf, -1)) { in raid10_remove_disk()
1863 print_conf(conf); in raid10_remove_disk()
1869 struct r10conf *conf = r10_bio->mddev->private; in __end_sync_read() local
1878 &conf->mirrors[d].rdev->corrected_errors); in __end_sync_read()
1883 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); in __end_sync_read()
1896 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read() local
1897 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); in end_sync_read()
1941 struct r10conf *conf = mddev->private; in end_sync_write() local
1949 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_sync_write()
1951 rdev = conf->mirrors[d].replacement; in end_sync_write()
1953 rdev = conf->mirrors[d].rdev; in end_sync_write()
1994 struct r10conf *conf = mddev->private; in sync_request_write() local
2003 for (i=0; i<conf->copies; i++) in sync_request_write()
2007 if (i == conf->copies) in sync_request_write()
2018 for (i=0 ; i < conf->copies ; i++) { in sync_request_write()
2032 rdev = conf->mirrors[d].rdev; in sync_request_write()
2078 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in sync_request_write()
2080 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); in sync_request_write()
2082 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) in sync_request_write()
2084 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; in sync_request_write()
2085 bio_set_dev(tbio, conf->mirrors[d].rdev->bdev); in sync_request_write()
2092 for (i = 0; i < conf->copies; i++) { in sync_request_write()
2103 md_sync_acct(conf->mirrors[d].replacement->bdev, in sync_request_write()
2135 struct r10conf *conf = mddev->private; in fix_recovery_read_error() local
2153 rdev = conf->mirrors[dr].rdev; in fix_recovery_read_error()
2161 rdev = conf->mirrors[dw].rdev; in fix_recovery_read_error()
2183 if (rdev != conf->mirrors[dw].rdev) { in fix_recovery_read_error()
2185 struct md_rdev *rdev2 = conf->mirrors[dw].rdev; in fix_recovery_read_error()
2193 conf->mirrors[dw].recovery_disabled in fix_recovery_read_error()
2210 struct r10conf *conf = mddev->private; in recovery_request_write() local
2234 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in recovery_request_write()
2235 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); in recovery_request_write()
2239 atomic_inc(&conf->mirrors[d].replacement->nr_pending); in recovery_request_write()
2240 md_sync_acct(conf->mirrors[d].replacement->bdev, in recovery_request_write()
2314 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2325 rdev = conf->mirrors[d].rdev; in fix_read_error()
2363 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2375 conf->tmppage, in fix_read_error()
2383 if (sl == conf->copies) in fix_read_error()
2394 rdev = conf->mirrors[dn].rdev; in fix_read_error()
2415 sl = conf->copies; in fix_read_error()
2418 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2429 s, conf->tmppage, WRITE) in fix_read_error()
2451 sl = conf->copies; in fix_read_error()
2454 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2465 s, conf->tmppage, in fix_read_error()
2503 struct r10conf *conf = mddev->private; in narrow_write_error() local
2504 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2564 struct r10conf *conf = mddev->private; in handle_read_error() local
2582 freeze_array(conf, 1); in handle_read_error()
2583 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2584 unfreeze_array(conf); in handle_read_error()
2589 allow_barrier(conf); in handle_read_error()
2594 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) in handle_write_completed() argument
2607 for (m = 0; m < conf->copies; m++) { in handle_write_completed()
2609 rdev = conf->mirrors[dev].rdev; in handle_write_completed()
2623 md_error(conf->mddev, rdev); in handle_write_completed()
2625 rdev = conf->mirrors[dev].replacement; in handle_write_completed()
2640 md_error(conf->mddev, rdev); in handle_write_completed()
2646 for (m = 0; m < conf->copies; m++) { in handle_write_completed()
2649 rdev = conf->mirrors[dev].rdev; in handle_write_completed()
2655 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2659 md_error(conf->mddev, rdev); in handle_write_completed()
2663 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2666 rdev = conf->mirrors[dev].replacement; in handle_write_completed()
2672 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2676 spin_lock_irq(&conf->device_lock); in handle_write_completed()
2677 list_add(&r10_bio->retry_list, &conf->bio_end_io_list); in handle_write_completed()
2678 conf->nr_queued++; in handle_write_completed()
2679 spin_unlock_irq(&conf->device_lock); in handle_write_completed()
2684 wake_up(&conf->wait_barrier); in handle_write_completed()
2685 md_wakeup_thread(conf->mddev->thread); in handle_write_completed()
2700 struct r10conf *conf = mddev->private; in raid10d() local
2701 struct list_head *head = &conf->retry_list; in raid10d()
2706 if (!list_empty_careful(&conf->bio_end_io_list) && in raid10d()
2709 spin_lock_irqsave(&conf->device_lock, flags); in raid10d()
2711 while (!list_empty(&conf->bio_end_io_list)) { in raid10d()
2712 list_move(conf->bio_end_io_list.prev, &tmp); in raid10d()
2713 conf->nr_queued--; in raid10d()
2716 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10d()
2734 flush_pending_writes(conf); in raid10d()
2736 spin_lock_irqsave(&conf->device_lock, flags); in raid10d()
2738 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10d()
2743 conf->nr_queued--; in raid10d()
2744 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10d()
2747 conf = mddev->private; in raid10d()
2750 handle_write_completed(conf, r10_bio); in raid10d()
2769 static int init_resync(struct r10conf *conf) in init_resync() argument
2774 BUG_ON(mempool_initialized(&conf->r10buf_pool)); in init_resync()
2775 conf->have_replacement = 0; in init_resync()
2776 for (i = 0; i < conf->geo.raid_disks; i++) in init_resync()
2777 if (conf->mirrors[i].replacement) in init_resync()
2778 conf->have_replacement = 1; in init_resync()
2779 ret = mempool_init(&conf->r10buf_pool, buffs, in init_resync()
2780 r10buf_pool_alloc, r10buf_pool_free, conf); in init_resync()
2783 conf->next_resync = 0; in init_resync()
2787 static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf) in raid10_alloc_init_r10buf() argument
2789 struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO); in raid10_alloc_init_r10buf()
2795 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in raid10_alloc_init_r10buf()
2796 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in raid10_alloc_init_r10buf()
2797 nalloc = conf->copies; /* resync */ in raid10_alloc_init_r10buf()
2820 static void raid10_set_cluster_sync_high(struct r10conf *conf) in raid10_set_cluster_sync_high() argument
2837 chunks = conf->geo.raid_disks / conf->geo.near_copies; in raid10_set_cluster_sync_high()
2838 if (conf->geo.raid_disks % conf->geo.near_copies == 0) in raid10_set_cluster_sync_high()
2842 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; in raid10_set_cluster_sync_high()
2850 conf->cluster_sync_high = conf->cluster_sync_low + window_size; in raid10_set_cluster_sync_high()
2888 struct r10conf *conf = mddev->private; in raid10_sync_request() local
2897 sector_t chunk_mask = conf->geo.chunk_mask; in raid10_sync_request()
2900 if (!mempool_initialized(&conf->r10buf_pool)) in raid10_sync_request()
2901 if (init_resync(conf)) in raid10_sync_request()
2914 conf->fullsync == 0) { in raid10_sync_request()
2925 conf->cluster_sync_low = 0; in raid10_sync_request()
2926 conf->cluster_sync_high = 0; in raid10_sync_request()
2938 end_reshape(conf); in raid10_sync_request()
2939 close_sync(conf); in raid10_sync_request()
2947 else for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_sync_request()
2949 raid10_find_virt(conf, mddev->curr_resync, i); in raid10_sync_request()
2955 if ((!mddev->bitmap || conf->fullsync) in raid10_sync_request()
2956 && conf->have_replacement in raid10_sync_request()
2962 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_sync_request()
2964 rcu_dereference(conf->mirrors[i].replacement); in raid10_sync_request()
2970 conf->fullsync = 0; in raid10_sync_request()
2973 close_sync(conf); in raid10_sync_request()
2981 if (chunks_skipped >= conf->geo.raid_disks) { in raid10_sync_request()
2995 if (conf->geo.near_copies < conf->geo.raid_disks && in raid10_sync_request()
3003 if (conf->nr_waiting) in raid10_sync_request()
3027 for (i = 0 ; i < conf->geo.raid_disks; i++) { in raid10_sync_request()
3035 struct raid10_info *mirror = &conf->mirrors[i]; in raid10_sync_request()
3058 sect = raid10_find_virt(conf, sector_nr, i); in raid10_sync_request()
3078 !conf->fullsync) { in raid10_sync_request()
3091 r10_bio = raid10_alloc_init_r10buf(conf); in raid10_sync_request()
3093 raise_barrier(conf, rb2 != NULL); in raid10_sync_request()
3103 raid10_find_phys(conf, r10_bio); in raid10_sync_request()
3109 for (j = 0; j < conf->geo.raid_disks; j++) { in raid10_sync_request()
3111 conf->mirrors[j].rdev); in raid10_sync_request()
3122 for (j=0; j<conf->copies;j++) { in raid10_sync_request()
3127 rcu_dereference(conf->mirrors[d].rdev); in raid10_sync_request()
3163 for (k=0; k<conf->copies; k++) in raid10_sync_request()
3166 BUG_ON(k == conf->copies); in raid10_sync_request()
3207 if (j == conf->copies) { in raid10_sync_request()
3215 for (k = 0; k < conf->copies; k++) in raid10_sync_request()
3258 for (; j < conf->copies; j++) { in raid10_sync_request()
3260 if (conf->mirrors[d].rdev && in raid10_sync_request()
3262 &conf->mirrors[d].rdev->flags)) in raid10_sync_request()
3292 (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); in raid10_sync_request()
3296 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, in raid10_sync_request()
3304 r10_bio = raid10_alloc_init_r10buf(conf); in raid10_sync_request()
3309 raise_barrier(conf, 0); in raid10_sync_request()
3310 conf->next_resync = sector_nr; in raid10_sync_request()
3315 raid10_find_phys(conf, r10_bio); in raid10_sync_request()
3318 for (i = 0; i < conf->copies; i++) { in raid10_sync_request()
3330 rdev = rcu_dereference(conf->mirrors[d].rdev); in raid10_sync_request()
3360 rdev = rcu_dereference(conf->mirrors[d].replacement); in raid10_sync_request()
3385 for (i=0; i<conf->copies; i++) { in raid10_sync_request()
3388 rdev_dec_pending(conf->mirrors[d].rdev, in raid10_sync_request()
3393 conf->mirrors[d].replacement, in raid10_sync_request()
3429 if (conf->cluster_sync_high < sector_nr + nr_sectors) { in raid10_sync_request()
3430 conf->cluster_sync_low = mddev->curr_resync_completed; in raid10_sync_request()
3431 raid10_set_cluster_sync_high(conf); in raid10_sync_request()
3434 conf->cluster_sync_low, in raid10_sync_request()
3435 conf->cluster_sync_high); in raid10_sync_request()
3442 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_sync_request()
3448 sect_va1 = raid10_find_virt(conf, sector_nr, i); in raid10_sync_request()
3450 if (conf->cluster_sync_high < sect_va1 + nr_sectors) { in raid10_sync_request()
3456 sect_va2 = raid10_find_virt(conf, in raid10_sync_request()
3459 if (conf->cluster_sync_low == 0 || in raid10_sync_request()
3460 conf->cluster_sync_low > sect_va2) in raid10_sync_request()
3461 conf->cluster_sync_low = sect_va2; in raid10_sync_request()
3465 raid10_set_cluster_sync_high(conf); in raid10_sync_request()
3467 conf->cluster_sync_low, in raid10_sync_request()
3468 conf->cluster_sync_high); in raid10_sync_request()
3512 struct r10conf *conf = mddev->private; in raid10_size() local
3515 raid_disks = min(conf->geo.raid_disks, in raid10_size()
3516 conf->prev.raid_disks); in raid10_size()
3518 sectors = conf->dev_sectors; in raid10_size()
3520 size = sectors >> conf->geo.chunk_shift; in raid10_size()
3521 sector_div(size, conf->geo.far_copies); in raid10_size()
3523 sector_div(size, conf->geo.near_copies); in raid10_size()
3525 return size << conf->geo.chunk_shift; in raid10_size()
3528 static void calc_sectors(struct r10conf *conf, sector_t size) in calc_sectors() argument
3535 size = size >> conf->geo.chunk_shift; in calc_sectors()
3536 sector_div(size, conf->geo.far_copies); in calc_sectors()
3537 size = size * conf->geo.raid_disks; in calc_sectors()
3538 sector_div(size, conf->geo.near_copies); in calc_sectors()
3541 size = size * conf->copies; in calc_sectors()
3546 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks); in calc_sectors()
3548 conf->dev_sectors = size << conf->geo.chunk_shift; in calc_sectors()
3550 if (conf->geo.far_offset) in calc_sectors()
3551 conf->geo.stride = 1 << conf->geo.chunk_shift; in calc_sectors()
3553 sector_div(size, conf->geo.far_copies); in calc_sectors()
3554 conf->geo.stride = size << conf->geo.chunk_shift; in calc_sectors()
3617 struct r10conf *conf = NULL; in setup_conf() local
3637 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL); in setup_conf()
3638 if (!conf) in setup_conf()
3642 conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks), in setup_conf()
3645 if (!conf->mirrors) in setup_conf()
3648 conf->tmppage = alloc_page(GFP_KERNEL); in setup_conf()
3649 if (!conf->tmppage) in setup_conf()
3652 conf->geo = geo; in setup_conf()
3653 conf->copies = copies; in setup_conf()
3654 err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc, in setup_conf()
3655 rbio_pool_free, conf); in setup_conf()
3659 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); in setup_conf()
3663 calc_sectors(conf, mddev->dev_sectors); in setup_conf()
3665 conf->prev = conf->geo; in setup_conf()
3666 conf->reshape_progress = MaxSector; in setup_conf()
3668 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { in setup_conf()
3672 conf->reshape_progress = mddev->reshape_position; in setup_conf()
3673 if (conf->prev.far_offset) in setup_conf()
3674 conf->prev.stride = 1 << conf->prev.chunk_shift; in setup_conf()
3677 conf->prev.stride = conf->dev_sectors; in setup_conf()
3679 conf->reshape_safe = conf->reshape_progress; in setup_conf()
3680 spin_lock_init(&conf->device_lock); in setup_conf()
3681 INIT_LIST_HEAD(&conf->retry_list); in setup_conf()
3682 INIT_LIST_HEAD(&conf->bio_end_io_list); in setup_conf()
3684 spin_lock_init(&conf->resync_lock); in setup_conf()
3685 init_waitqueue_head(&conf->wait_barrier); in setup_conf()
3686 atomic_set(&conf->nr_pending, 0); in setup_conf()
3689 conf->thread = md_register_thread(raid10d, mddev, "raid10"); in setup_conf()
3690 if (!conf->thread) in setup_conf()
3693 conf->mddev = mddev; in setup_conf()
3694 return conf; in setup_conf()
3697 if (conf) { in setup_conf()
3698 mempool_exit(&conf->r10bio_pool); in setup_conf()
3699 kfree(conf->mirrors); in setup_conf()
3700 safe_put_page(conf->tmppage); in setup_conf()
3701 bioset_exit(&conf->bio_split); in setup_conf()
3702 kfree(conf); in setup_conf()
3707 static void raid10_set_io_opt(struct r10conf *conf) in raid10_set_io_opt() argument
3709 int raid_disks = conf->geo.raid_disks; in raid10_set_io_opt()
3711 if (!(conf->geo.raid_disks % conf->geo.near_copies)) in raid10_set_io_opt()
3712 raid_disks /= conf->geo.near_copies; in raid10_set_io_opt()
3713 blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) * in raid10_set_io_opt()
3719 struct r10conf *conf; in raid10_run() local
3732 conf = setup_conf(mddev); in raid10_run()
3733 if (IS_ERR(conf)) in raid10_run()
3734 return PTR_ERR(conf); in raid10_run()
3735 mddev->private = conf; in raid10_run()
3737 conf = mddev->private; in raid10_run()
3738 if (!conf) in raid10_run()
3741 if (mddev_is_clustered(conf->mddev)) { in raid10_run()
3753 mddev->thread = conf->thread; in raid10_run()
3754 conf->thread = NULL; in raid10_run()
3762 raid10_set_io_opt(conf); in raid10_run()
3771 if (disk_idx >= conf->geo.raid_disks && in raid10_run()
3772 disk_idx >= conf->prev.raid_disks) in raid10_run()
3774 disk = conf->mirrors + disk_idx; in raid10_run()
3813 if (!enough(conf, -1)) { in raid10_run()
3819 if (conf->reshape_progress != MaxSector) { in raid10_run()
3821 if (conf->geo.far_copies != 1 && in raid10_run()
3822 conf->geo.far_offset == 0) in raid10_run()
3824 if (conf->prev.far_copies != 1 && in raid10_run()
3825 conf->prev.far_offset == 0) in raid10_run()
3831 i < conf->geo.raid_disks in raid10_run()
3832 || i < conf->prev.raid_disks; in raid10_run()
3835 disk = conf->mirrors + i; in raid10_run()
3850 conf->fullsync = 1; in raid10_run()
3856 conf->fullsync = 1; in raid10_run()
3866 mdname(mddev), conf->geo.raid_disks - mddev->degraded, in raid10_run()
3867 conf->geo.raid_disks); in raid10_run()
3871 mddev->dev_sectors = conf->dev_sectors; in raid10_run()
3880 if (conf->reshape_progress != MaxSector) { in raid10_run()
3883 before_length = ((1 << conf->prev.chunk_shift) * in raid10_run()
3884 conf->prev.far_copies); in raid10_run()
3885 after_length = ((1 << conf->geo.chunk_shift) * in raid10_run()
3886 conf->geo.far_copies); in raid10_run()
3893 conf->offset_diff = min_offset_diff; in raid10_run()
3909 mempool_exit(&conf->r10bio_pool); in raid10_run()
3910 safe_put_page(conf->tmppage); in raid10_run()
3911 kfree(conf->mirrors); in raid10_run()
3912 kfree(conf); in raid10_run()
3920 struct r10conf *conf = priv; in raid10_free() local
3922 mempool_exit(&conf->r10bio_pool); in raid10_free()
3923 safe_put_page(conf->tmppage); in raid10_free()
3924 kfree(conf->mirrors); in raid10_free()
3925 kfree(conf->mirrors_old); in raid10_free()
3926 kfree(conf->mirrors_new); in raid10_free()
3927 bioset_exit(&conf->bio_split); in raid10_free()
3928 kfree(conf); in raid10_free()
3933 struct r10conf *conf = mddev->private; in raid10_quiesce() local
3936 raise_barrier(conf, 0); in raid10_quiesce()
3938 lower_barrier(conf); in raid10_quiesce()
3955 struct r10conf *conf = mddev->private; in raid10_resize() local
3961 if (conf->geo.far_copies > 1 && !conf->geo.far_offset) in raid10_resize()
3980 calc_sectors(conf, sectors); in raid10_resize()
3981 mddev->dev_sectors = conf->dev_sectors; in raid10_resize()
3989 struct r10conf *conf; in raid10_takeover_raid0() local
4009 conf = setup_conf(mddev); in raid10_takeover_raid0()
4010 if (!IS_ERR(conf)) { in raid10_takeover_raid0()
4016 conf->barrier = 1; in raid10_takeover_raid0()
4019 return conf; in raid10_takeover_raid0()
4060 struct r10conf *conf = mddev->private; in raid10_check_reshape() local
4063 if (conf->geo.far_copies != 1 && !conf->geo.far_offset) in raid10_check_reshape()
4066 if (setup_geo(&geo, mddev, geo_start) != conf->copies) in raid10_check_reshape()
4077 if (!enough(conf, -1)) in raid10_check_reshape()
4080 kfree(conf->mirrors_new); in raid10_check_reshape()
4081 conf->mirrors_new = NULL; in raid10_check_reshape()
4084 conf->mirrors_new = in raid10_check_reshape()
4088 if (!conf->mirrors_new) in raid10_check_reshape()
4107 static int calc_degraded(struct r10conf *conf) in calc_degraded() argument
4115 for (i = 0; i < conf->prev.raid_disks; i++) { in calc_degraded()
4116 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in calc_degraded()
4127 if (conf->geo.raid_disks == conf->prev.raid_disks) in calc_degraded()
4131 for (i = 0; i < conf->geo.raid_disks; i++) { in calc_degraded()
4132 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in calc_degraded()
4141 if (conf->geo.raid_disks <= conf->prev.raid_disks) in calc_degraded()
4167 struct r10conf *conf = mddev->private; in raid10_start_reshape() local
4175 if (setup_geo(&new, mddev, geo_start) != conf->copies) in raid10_start_reshape()
4178 before_length = ((1 << conf->prev.chunk_shift) * in raid10_start_reshape()
4179 conf->prev.far_copies); in raid10_start_reshape()
4180 after_length = ((1 << conf->geo.chunk_shift) * in raid10_start_reshape()
4181 conf->geo.far_copies); in raid10_start_reshape()
4206 conf->offset_diff = min_offset_diff; in raid10_start_reshape()
4207 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4208 if (conf->mirrors_new) { in raid10_start_reshape()
4209 memcpy(conf->mirrors_new, conf->mirrors, in raid10_start_reshape()
4210 sizeof(struct raid10_info)*conf->prev.raid_disks); in raid10_start_reshape()
4212 kfree(conf->mirrors_old); in raid10_start_reshape()
4213 conf->mirrors_old = conf->mirrors; in raid10_start_reshape()
4214 conf->mirrors = conf->mirrors_new; in raid10_start_reshape()
4215 conf->mirrors_new = NULL; in raid10_start_reshape()
4217 setup_geo(&conf->geo, mddev, geo_start); in raid10_start_reshape()
4222 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4228 conf->reshape_progress = size; in raid10_start_reshape()
4230 conf->reshape_progress = 0; in raid10_start_reshape()
4231 conf->reshape_safe = conf->reshape_progress; in raid10_start_reshape()
4232 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4239 newsize = raid10_size(mddev, 0, conf->geo.raid_disks); in raid10_start_reshape()
4281 conf->prev.raid_disks) in raid10_start_reshape()
4289 } else if (rdev->raid_disk >= conf->prev.raid_disks in raid10_start_reshape()
4299 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4300 mddev->degraded = calc_degraded(conf); in raid10_start_reshape()
4301 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4302 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4303 mddev->reshape_position = conf->reshape_progress; in raid10_start_reshape()
4318 conf->reshape_checkpoint = jiffies; in raid10_start_reshape()
4325 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4326 conf->geo = conf->prev; in raid10_start_reshape()
4327 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4331 conf->reshape_progress = MaxSector; in raid10_start_reshape()
4332 conf->reshape_safe = MaxSector; in raid10_start_reshape()
4334 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4409 struct r10conf *conf = mddev->private; in reshape_request() local
4425 conf->reshape_progress < raid10_size(mddev, 0, 0)) { in reshape_request()
4427 - conf->reshape_progress); in reshape_request()
4429 conf->reshape_progress > 0) in reshape_request()
4430 sector_nr = conf->reshape_progress; in reshape_request()
4447 next = first_dev_address(conf->reshape_progress - 1, in reshape_request()
4448 &conf->geo); in reshape_request()
4453 safe = last_dev_address(conf->reshape_safe - 1, in reshape_request()
4454 &conf->prev); in reshape_request()
4456 if (next + conf->offset_diff < safe) in reshape_request()
4459 last = conf->reshape_progress - 1; in reshape_request()
4460 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask in reshape_request()
4461 & conf->prev.chunk_mask); in reshape_request()
4468 next = last_dev_address(conf->reshape_progress, &conf->geo); in reshape_request()
4473 safe = first_dev_address(conf->reshape_safe, &conf->prev); in reshape_request()
4478 if (next > safe + conf->offset_diff) in reshape_request()
4481 sector_nr = conf->reshape_progress; in reshape_request()
4482 last = sector_nr | (conf->geo.chunk_mask in reshape_request()
4483 & conf->prev.chunk_mask); in reshape_request()
4490 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { in reshape_request()
4492 wait_barrier(conf); in reshape_request()
4493 mddev->reshape_position = conf->reshape_progress; in reshape_request()
4496 - conf->reshape_progress; in reshape_request()
4498 mddev->curr_resync_completed = conf->reshape_progress; in reshape_request()
4499 conf->reshape_checkpoint = jiffies; in reshape_request()
4505 allow_barrier(conf); in reshape_request()
4508 conf->reshape_safe = mddev->reshape_position; in reshape_request()
4509 allow_barrier(conf); in reshape_request()
4512 raise_barrier(conf, 0); in reshape_request()
4515 r10_bio = raid10_alloc_init_r10buf(conf); in reshape_request()
4517 raise_barrier(conf, 1); in reshape_request()
4523 rdev = read_balance(conf, r10_bio, &max_sectors); in reshape_request()
4531 mempool_free(r10_bio, &conf->r10buf_pool); in reshape_request()
4555 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) { in reshape_request()
4559 conf->cluster_sync_low = sector_nr; in reshape_request()
4560 conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS; in reshape_request()
4569 if (sb_reshape_pos < conf->cluster_sync_low) in reshape_request()
4570 conf->cluster_sync_low = sb_reshape_pos; in reshape_request()
4573 md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low, in reshape_request()
4574 conf->cluster_sync_high); in reshape_request()
4578 __raid10_find_phys(&conf->geo, r10_bio); in reshape_request()
4584 for (s = 0; s < conf->copies*2; s++) { in reshape_request()
4589 rdev2 = rcu_dereference(conf->mirrors[d].replacement); in reshape_request()
4592 rdev2 = rcu_dereference(conf->mirrors[d].rdev); in reshape_request()
4638 lower_barrier(conf); in reshape_request()
4644 conf->reshape_progress -= sectors_done; in reshape_request()
4646 conf->reshape_progress += sectors_done; in reshape_request()
4661 struct r10conf *conf = mddev->private; in reshape_request_write() local
4675 for (s = 0; s < conf->copies*2; s++) { in reshape_request_write()
4681 rdev = rcu_dereference(conf->mirrors[d].replacement); in reshape_request_write()
4684 rdev = rcu_dereference(conf->mirrors[d].rdev); in reshape_request_write()
4701 static void end_reshape(struct r10conf *conf) in end_reshape() argument
4703 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) in end_reshape()
4706 spin_lock_irq(&conf->device_lock); in end_reshape()
4707 conf->prev = conf->geo; in end_reshape()
4708 md_finish_reshape(conf->mddev); in end_reshape()
4710 conf->reshape_progress = MaxSector; in end_reshape()
4711 conf->reshape_safe = MaxSector; in end_reshape()
4712 spin_unlock_irq(&conf->device_lock); in end_reshape()
4714 if (conf->mddev->queue) in end_reshape()
4715 raid10_set_io_opt(conf); in end_reshape()
4716 conf->fullsync = 0; in end_reshape()
4721 struct r10conf *conf = mddev->private; in raid10_update_reshape_pos() local
4727 conf->reshape_progress = mddev->reshape_position; in raid10_update_reshape_pos()
4737 struct r10conf *conf = mddev->private; in handle_reshape_read_error() local
4743 r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO); in handle_reshape_read_error()
4753 __raid10_find_phys(&conf->prev, r10b); in handle_reshape_read_error()
4766 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); in handle_reshape_read_error()
4787 if (slot >= conf->copies) in handle_reshape_read_error()
4811 struct r10conf *conf = mddev->private; in end_reshape_write() local
4817 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_reshape_write()
4819 rdev = conf->mirrors[d].replacement; in end_reshape_write()
4822 rdev = conf->mirrors[d].rdev; in end_reshape_write()
4845 struct r10conf *conf = mddev->private; in raid10_finish_reshape() local
4859 for (d = conf->geo.raid_disks ; in raid10_finish_reshape()
4860 d < conf->geo.raid_disks - mddev->delta_disks; in raid10_finish_reshape()
4862 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); in raid10_finish_reshape()
4865 rdev = rcu_dereference(conf->mirrors[d].replacement); in raid10_finish_reshape()
4872 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()