Lines Matching refs:conf
67 static void allow_barrier(struct r10conf *conf);
68 static void lower_barrier(struct r10conf *conf);
69 static int _enough(struct r10conf *conf, int previous, int ignore);
70 static int enough(struct r10conf *conf, int ignore);
75 static void end_reshape(struct r10conf *conf);
93 struct r10conf *conf = data; in r10bio_pool_alloc() local
94 int size = offsetof(struct r10bio, devs[conf->copies]); in r10bio_pool_alloc()
118 struct r10conf *conf = data; in r10buf_pool_alloc() local
125 r10_bio = r10bio_pool_alloc(gfp_flags, conf); in r10buf_pool_alloc()
129 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in r10buf_pool_alloc()
130 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in r10buf_pool_alloc()
131 nalloc = conf->copies; /* resync */ in r10buf_pool_alloc()
136 if (!conf->have_replacement) in r10buf_pool_alloc()
152 if (!conf->have_replacement) in r10buf_pool_alloc()
174 &conf->mddev->recovery)) { in r10buf_pool_alloc()
206 rbio_pool_free(r10_bio, conf); in r10buf_pool_alloc()
212 struct r10conf *conf = data; in r10buf_pool_free() local
217 for (j = conf->copies; j--; ) { in r10buf_pool_free()
234 rbio_pool_free(r10bio, conf); in r10buf_pool_free()
237 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) in put_all_bios() argument
241 for (i = 0; i < conf->copies; i++) { in put_all_bios()
255 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio() local
257 put_all_bios(conf, r10_bio); in free_r10bio()
258 mempool_free(r10_bio, &conf->r10bio_pool); in free_r10bio()
263 struct r10conf *conf = r10_bio->mddev->private; in put_buf() local
265 mempool_free(r10_bio, &conf->r10buf_pool); in put_buf()
267 lower_barrier(conf); in put_buf()
274 struct r10conf *conf = mddev->private; in reschedule_retry() local
276 spin_lock_irqsave(&conf->device_lock, flags); in reschedule_retry()
277 list_add(&r10_bio->retry_list, &conf->retry_list); in reschedule_retry()
278 conf->nr_queued ++; in reschedule_retry()
279 spin_unlock_irqrestore(&conf->device_lock, flags); in reschedule_retry()
282 wake_up(&conf->wait_barrier); in reschedule_retry()
295 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io() local
305 allow_barrier(conf); in raid_end_bio_io()
315 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos() local
317 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
324 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, in find_bio_disk() argument
330 for (slot = 0; slot < conf->copies; slot++) { in find_bio_disk()
339 BUG_ON(slot == conf->copies); in find_bio_disk()
355 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request() local
381 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state), in raid10_end_read_request()
387 rdev_dec_pending(rdev, conf->mddev); in raid10_end_read_request()
394 mdname(conf->mddev), in raid10_end_read_request()
432 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request() local
440 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_write_request()
443 rdev = conf->mirrors[dev].replacement; in raid10_end_write_request()
447 rdev = conf->mirrors[dev].rdev; in raid10_end_write_request()
531 rdev_dec_pending(rdev, conf->mddev); in raid10_end_write_request()
624 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio) in raid10_find_phys() argument
626 struct geom *geo = &conf->geo; in raid10_find_phys()
628 if (conf->reshape_progress != MaxSector && in raid10_find_phys()
629 ((r10bio->sector >= conf->reshape_progress) != in raid10_find_phys()
630 conf->mddev->reshape_backwards)) { in raid10_find_phys()
632 geo = &conf->prev; in raid10_find_phys()
639 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) in raid10_find_virt() argument
645 struct geom *geo = &conf->geo; in raid10_find_virt()
703 static struct md_rdev *read_balance(struct r10conf *conf, in read_balance() argument
717 struct geom *geo = &conf->geo; in read_balance()
719 raid10_find_phys(conf, r10_bio); in read_balance()
735 if ((conf->mddev->recovery_cp < MaxSector in read_balance()
736 && (this_sector + sectors >= conf->next_resync)) || in read_balance()
737 (mddev_is_clustered(conf->mddev) && in read_balance()
738 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, in read_balance()
742 for (slot = 0; slot < conf->copies ; slot++) { in read_balance()
752 rdev = rcu_dereference(conf->mirrors[disk].replacement); in read_balance()
755 rdev = rcu_dereference(conf->mirrors[disk].rdev); in read_balance()
822 conf->mirrors[disk].head_position); in read_balance()
830 if (slot >= conf->copies) { in read_balance()
853 struct r10conf *conf = mddev->private; in raid10_congested() local
857 conf->pending_count >= max_queued_requests) in raid10_congested()
862 (i < conf->geo.raid_disks || i < conf->prev.raid_disks) in raid10_congested()
865 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in raid10_congested()
876 static void flush_pending_writes(struct r10conf *conf) in flush_pending_writes() argument
881 spin_lock_irq(&conf->device_lock); in flush_pending_writes()
883 if (conf->pending_bio_list.head) { in flush_pending_writes()
887 bio = bio_list_get(&conf->pending_bio_list); in flush_pending_writes()
888 conf->pending_count = 0; in flush_pending_writes()
889 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
905 md_bitmap_unplug(conf->mddev->bitmap); in flush_pending_writes()
906 wake_up(&conf->wait_barrier); in flush_pending_writes()
925 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
950 static void raise_barrier(struct r10conf *conf, int force) in raise_barrier() argument
952 BUG_ON(force && !conf->barrier); in raise_barrier()
953 spin_lock_irq(&conf->resync_lock); in raise_barrier()
956 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, in raise_barrier()
957 conf->resync_lock); in raise_barrier()
960 conf->barrier++; in raise_barrier()
963 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
964 !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH, in raise_barrier()
965 conf->resync_lock); in raise_barrier()
967 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
970 static void lower_barrier(struct r10conf *conf) in lower_barrier() argument
973 spin_lock_irqsave(&conf->resync_lock, flags); in lower_barrier()
974 conf->barrier--; in lower_barrier()
975 spin_unlock_irqrestore(&conf->resync_lock, flags); in lower_barrier()
976 wake_up(&conf->wait_barrier); in lower_barrier()
979 static void wait_barrier(struct r10conf *conf) in wait_barrier() argument
981 spin_lock_irq(&conf->resync_lock); in wait_barrier()
982 if (conf->barrier) { in wait_barrier()
983 conf->nr_waiting++; in wait_barrier()
993 raid10_log(conf->mddev, "wait barrier"); in wait_barrier()
994 wait_event_lock_irq(conf->wait_barrier, in wait_barrier()
995 !conf->barrier || in wait_barrier()
996 (atomic_read(&conf->nr_pending) && in wait_barrier()
1000 conf->resync_lock); in wait_barrier()
1001 conf->nr_waiting--; in wait_barrier()
1002 if (!conf->nr_waiting) in wait_barrier()
1003 wake_up(&conf->wait_barrier); in wait_barrier()
1005 atomic_inc(&conf->nr_pending); in wait_barrier()
1006 spin_unlock_irq(&conf->resync_lock); in wait_barrier()
1009 static void allow_barrier(struct r10conf *conf) in allow_barrier() argument
1011 if ((atomic_dec_and_test(&conf->nr_pending)) || in allow_barrier()
1012 (conf->array_freeze_pending)) in allow_barrier()
1013 wake_up(&conf->wait_barrier); in allow_barrier()
1016 static void freeze_array(struct r10conf *conf, int extra) in freeze_array() argument
1030 spin_lock_irq(&conf->resync_lock); in freeze_array()
1031 conf->array_freeze_pending++; in freeze_array()
1032 conf->barrier++; in freeze_array()
1033 conf->nr_waiting++; in freeze_array()
1034 wait_event_lock_irq_cmd(conf->wait_barrier, in freeze_array()
1035 atomic_read(&conf->nr_pending) == conf->nr_queued+extra, in freeze_array()
1036 conf->resync_lock, in freeze_array()
1037 flush_pending_writes(conf)); in freeze_array()
1039 conf->array_freeze_pending--; in freeze_array()
1040 spin_unlock_irq(&conf->resync_lock); in freeze_array()
1043 static void unfreeze_array(struct r10conf *conf) in unfreeze_array() argument
1046 spin_lock_irq(&conf->resync_lock); in unfreeze_array()
1047 conf->barrier--; in unfreeze_array()
1048 conf->nr_waiting--; in unfreeze_array()
1049 wake_up(&conf->wait_barrier); in unfreeze_array()
1050 spin_unlock_irq(&conf->resync_lock); in unfreeze_array()
1074 struct r10conf *conf = mddev->private; in raid10_unplug() local
1078 spin_lock_irq(&conf->device_lock); in raid10_unplug()
1079 bio_list_merge(&conf->pending_bio_list, &plug->pending); in raid10_unplug()
1080 conf->pending_count += plug->pending_cnt; in raid10_unplug()
1081 spin_unlock_irq(&conf->device_lock); in raid10_unplug()
1082 wake_up(&conf->wait_barrier); in raid10_unplug()
1091 wake_up(&conf->wait_barrier); in raid10_unplug()
1117 static void regular_request_wait(struct mddev *mddev, struct r10conf *conf, in regular_request_wait() argument
1120 wait_barrier(conf); in regular_request_wait()
1122 bio->bi_iter.bi_sector < conf->reshape_progress && in regular_request_wait()
1123 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { in regular_request_wait()
1124 raid10_log(conf->mddev, "wait reshape"); in regular_request_wait()
1125 allow_barrier(conf); in regular_request_wait()
1126 wait_event(conf->wait_barrier, in regular_request_wait()
1127 conf->reshape_progress <= bio->bi_iter.bi_sector || in regular_request_wait()
1128 conf->reshape_progress >= bio->bi_iter.bi_sector + in regular_request_wait()
1130 wait_barrier(conf); in regular_request_wait()
1137 struct r10conf *conf = mddev->private; in raid10_read_request() local
1165 err_rdev = rcu_dereference(conf->mirrors[disk].rdev); in raid10_read_request()
1176 regular_request_wait(mddev, conf, bio, r10_bio->sectors); in raid10_read_request()
1177 rdev = read_balance(conf, r10_bio, &max_sectors); in raid10_read_request()
1194 gfp, &conf->bio_split); in raid10_read_request()
1196 allow_barrier(conf); in raid10_read_request()
1198 wait_barrier(conf); in raid10_read_request()
1238 struct r10conf *conf = mddev->private; in raid10_write_one_disk() local
1244 rdev = conf->mirrors[devnum].replacement; in raid10_write_one_disk()
1248 rdev = conf->mirrors[devnum].rdev; in raid10_write_one_disk()
1251 rdev = conf->mirrors[devnum].rdev; in raid10_write_one_disk()
1265 &conf->mirrors[devnum].rdev->flags) in raid10_write_one_disk()
1266 && enough(conf, devnum)) in raid10_write_one_disk()
1270 if (conf->mddev->gendisk) in raid10_write_one_disk()
1272 mbio, disk_devt(conf->mddev->gendisk), in raid10_write_one_disk()
1288 spin_lock_irqsave(&conf->device_lock, flags); in raid10_write_one_disk()
1289 bio_list_add(&conf->pending_bio_list, mbio); in raid10_write_one_disk()
1290 conf->pending_count++; in raid10_write_one_disk()
1291 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_write_one_disk()
1299 struct r10conf *conf = mddev->private; in raid10_write_request() local
1311 prepare_to_wait(&conf->wait_barrier, in raid10_write_request()
1318 finish_wait(&conf->wait_barrier, &w); in raid10_write_request()
1322 regular_request_wait(mddev, conf, bio, sectors); in raid10_write_request()
1325 ? (bio->bi_iter.bi_sector < conf->reshape_safe && in raid10_write_request()
1326 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) in raid10_write_request()
1327 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && in raid10_write_request()
1328 bio->bi_iter.bi_sector < conf->reshape_progress))) { in raid10_write_request()
1330 mddev->reshape_position = conf->reshape_progress; in raid10_write_request()
1334 raid10_log(conf->mddev, "wait reshape metadata"); in raid10_write_request()
1338 conf->reshape_safe = mddev->reshape_position; in raid10_write_request()
1341 if (conf->pending_count >= max_queued_requests) { in raid10_write_request()
1344 wait_event(conf->wait_barrier, in raid10_write_request()
1345 conf->pending_count < max_queued_requests); in raid10_write_request()
1358 raid10_find_phys(conf, r10_bio); in raid10_write_request()
1364 for (i = 0; i < conf->copies; i++) { in raid10_write_request()
1366 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); in raid10_write_request()
1368 conf->mirrors[d].replacement); in raid10_write_request()
1453 rdev_dec_pending(conf->mirrors[d].rdev, mddev); in raid10_write_request()
1458 rdev = conf->mirrors[d].replacement; in raid10_write_request()
1462 rdev = conf->mirrors[d].rdev; in raid10_write_request()
1467 allow_barrier(conf); in raid10_write_request()
1468 raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); in raid10_write_request()
1470 wait_barrier(conf); in raid10_write_request()
1479 GFP_NOIO, &conf->bio_split); in raid10_write_request()
1481 allow_barrier(conf); in raid10_write_request()
1483 wait_barrier(conf); in raid10_write_request()
1491 for (i = 0; i < conf->copies; i++) { in raid10_write_request()
1502 struct r10conf *conf = mddev->private; in __make_request() local
1505 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO); in __make_request()
1513 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies); in __make_request()
1523 struct r10conf *conf = mddev->private; in raid10_make_request() local
1524 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); in raid10_make_request()
1541 && (conf->geo.near_copies < conf->geo.raid_disks in raid10_make_request()
1542 || conf->prev.near_copies < in raid10_make_request()
1543 conf->prev.raid_disks))) in raid10_make_request()
1550 wake_up(&conf->wait_barrier); in raid10_make_request()
1556 struct r10conf *conf = mddev->private; in raid10_status() local
1559 if (conf->geo.near_copies < conf->geo.raid_disks) in raid10_status()
1561 if (conf->geo.near_copies > 1) in raid10_status()
1562 seq_printf(seq, " %d near-copies", conf->geo.near_copies); in raid10_status()
1563 if (conf->geo.far_copies > 1) { in raid10_status()
1564 if (conf->geo.far_offset) in raid10_status()
1565 seq_printf(seq, " %d offset-copies", conf->geo.far_copies); in raid10_status()
1567 seq_printf(seq, " %d far-copies", conf->geo.far_copies); in raid10_status()
1568 if (conf->geo.far_set_size != conf->geo.raid_disks) in raid10_status()
1569 seq_printf(seq, " %d devices per set", conf->geo.far_set_size); in raid10_status()
1571 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, in raid10_status()
1572 conf->geo.raid_disks - mddev->degraded); in raid10_status()
1574 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_status()
1575 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in raid10_status()
1587 static int _enough(struct r10conf *conf, int previous, int ignore) in _enough() argument
1593 disks = conf->prev.raid_disks; in _enough()
1594 ncopies = conf->prev.near_copies; in _enough()
1596 disks = conf->geo.raid_disks; in _enough()
1597 ncopies = conf->geo.near_copies; in _enough()
1602 int n = conf->copies; in _enough()
1608 (rdev = rcu_dereference(conf->mirrors[this].rdev)) && in _enough()
1623 static int enough(struct r10conf *conf, int ignore) in enough() argument
1630 return _enough(conf, 0, ignore) && in enough()
1631 _enough(conf, 1, ignore); in enough()
1637 struct r10conf *conf = mddev->private; in raid10_error() local
1646 spin_lock_irqsave(&conf->device_lock, flags); in raid10_error()
1648 && !enough(conf, rdev->raid_disk)) { in raid10_error()
1652 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_error()
1665 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_error()
1669 mdname(mddev), conf->geo.raid_disks - mddev->degraded); in raid10_error()
1672 static void print_conf(struct r10conf *conf) in print_conf() argument
1678 if (!conf) { in print_conf()
1682 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, in print_conf()
1683 conf->geo.raid_disks); in print_conf()
1687 for (i = 0; i < conf->geo.raid_disks; i++) { in print_conf()
1689 rdev = conf->mirrors[i].rdev; in print_conf()
1698 static void close_sync(struct r10conf *conf) in close_sync() argument
1700 wait_barrier(conf); in close_sync()
1701 allow_barrier(conf); in close_sync()
1703 mempool_exit(&conf->r10buf_pool); in close_sync()
1709 struct r10conf *conf = mddev->private; in raid10_spare_active() local
1718 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_spare_active()
1719 tmp = conf->mirrors + i; in raid10_spare_active()
1746 spin_lock_irqsave(&conf->device_lock, flags); in raid10_spare_active()
1748 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10_spare_active()
1750 print_conf(conf); in raid10_spare_active()
1756 struct r10conf *conf = mddev->private; in raid10_add_disk() local
1760 int last = conf->geo.raid_disks - 1; in raid10_add_disk()
1767 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1)) in raid10_add_disk()
1777 rdev->saved_raid_disk < conf->geo.raid_disks && in raid10_add_disk()
1778 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) in raid10_add_disk()
1783 struct raid10_info *p = &conf->mirrors[mirror]; in raid10_add_disk()
1797 conf->fullsync = 1; in raid10_add_disk()
1811 conf->fullsync = 1; in raid10_add_disk()
1818 print_conf(conf); in raid10_add_disk()
1824 struct r10conf *conf = mddev->private; in raid10_remove_disk() local
1828 struct raid10_info *p = conf->mirrors + number; in raid10_remove_disk()
1830 print_conf(conf); in raid10_remove_disk()
1849 number < conf->geo.raid_disks && in raid10_remove_disk()
1850 enough(conf, -1)) { in raid10_remove_disk()
1879 print_conf(conf); in raid10_remove_disk()
1885 struct r10conf *conf = r10_bio->mddev->private; in __end_sync_read() local
1894 &conf->mirrors[d].rdev->corrected_errors); in __end_sync_read()
1899 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); in __end_sync_read()
1912 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read() local
1913 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); in end_sync_read()
1957 struct r10conf *conf = mddev->private; in end_sync_write() local
1965 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_sync_write()
1967 rdev = conf->mirrors[d].replacement; in end_sync_write()
1969 rdev = conf->mirrors[d].rdev; in end_sync_write()
2010 struct r10conf *conf = mddev->private; in sync_request_write() local
2019 for (i=0; i<conf->copies; i++) in sync_request_write()
2023 if (i == conf->copies) in sync_request_write()
2034 for (i=0 ; i < conf->copies ; i++) { in sync_request_write()
2048 rdev = conf->mirrors[d].rdev; in sync_request_write()
2094 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in sync_request_write()
2096 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); in sync_request_write()
2098 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) in sync_request_write()
2100 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; in sync_request_write()
2101 bio_set_dev(tbio, conf->mirrors[d].rdev->bdev); in sync_request_write()
2108 for (i = 0; i < conf->copies; i++) { in sync_request_write()
2119 md_sync_acct(conf->mirrors[d].replacement->bdev, in sync_request_write()
2151 struct r10conf *conf = mddev->private; in fix_recovery_read_error() local
2169 rdev = conf->mirrors[dr].rdev; in fix_recovery_read_error()
2177 rdev = conf->mirrors[dw].rdev; in fix_recovery_read_error()
2199 if (rdev != conf->mirrors[dw].rdev) { in fix_recovery_read_error()
2201 struct md_rdev *rdev2 = conf->mirrors[dw].rdev; in fix_recovery_read_error()
2209 conf->mirrors[dw].recovery_disabled in fix_recovery_read_error()
2226 struct r10conf *conf = mddev->private; in recovery_request_write() local
2250 atomic_inc(&conf->mirrors[d].rdev->nr_pending); in recovery_request_write()
2251 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); in recovery_request_write()
2255 atomic_inc(&conf->mirrors[d].replacement->nr_pending); in recovery_request_write()
2256 md_sync_acct(conf->mirrors[d].replacement->bdev, in recovery_request_write()
2330 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2341 rdev = conf->mirrors[d].rdev; in fix_read_error()
2379 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2391 conf->tmppage, in fix_read_error()
2399 if (sl == conf->copies) in fix_read_error()
2410 rdev = conf->mirrors[dn].rdev; in fix_read_error()
2431 sl = conf->copies; in fix_read_error()
2434 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2445 s, conf->tmppage, WRITE) in fix_read_error()
2467 sl = conf->copies; in fix_read_error()
2470 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2481 s, conf->tmppage, in fix_read_error()
2519 struct r10conf *conf = mddev->private; in narrow_write_error() local
2520 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2580 struct r10conf *conf = mddev->private; in handle_read_error() local
2598 freeze_array(conf, 1); in handle_read_error()
2599 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2600 unfreeze_array(conf); in handle_read_error()
2605 allow_barrier(conf); in handle_read_error()
2610 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) in handle_write_completed() argument
2623 for (m = 0; m < conf->copies; m++) { in handle_write_completed()
2625 rdev = conf->mirrors[dev].rdev; in handle_write_completed()
2639 md_error(conf->mddev, rdev); in handle_write_completed()
2641 rdev = conf->mirrors[dev].replacement; in handle_write_completed()
2656 md_error(conf->mddev, rdev); in handle_write_completed()
2662 for (m = 0; m < conf->copies; m++) { in handle_write_completed()
2665 rdev = conf->mirrors[dev].rdev; in handle_write_completed()
2671 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2675 md_error(conf->mddev, rdev); in handle_write_completed()
2679 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2682 rdev = conf->mirrors[dev].replacement; in handle_write_completed()
2688 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2692 spin_lock_irq(&conf->device_lock); in handle_write_completed()
2693 list_add(&r10_bio->retry_list, &conf->bio_end_io_list); in handle_write_completed()
2694 conf->nr_queued++; in handle_write_completed()
2695 spin_unlock_irq(&conf->device_lock); in handle_write_completed()
2700 wake_up(&conf->wait_barrier); in handle_write_completed()
2701 md_wakeup_thread(conf->mddev->thread); in handle_write_completed()
2716 struct r10conf *conf = mddev->private; in raid10d() local
2717 struct list_head *head = &conf->retry_list; in raid10d()
2722 if (!list_empty_careful(&conf->bio_end_io_list) && in raid10d()
2725 spin_lock_irqsave(&conf->device_lock, flags); in raid10d()
2727 while (!list_empty(&conf->bio_end_io_list)) { in raid10d()
2728 list_move(conf->bio_end_io_list.prev, &tmp); in raid10d()
2729 conf->nr_queued--; in raid10d()
2732 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10d()
2750 flush_pending_writes(conf); in raid10d()
2752 spin_lock_irqsave(&conf->device_lock, flags); in raid10d()
2754 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10d()
2759 conf->nr_queued--; in raid10d()
2760 spin_unlock_irqrestore(&conf->device_lock, flags); in raid10d()
2763 conf = mddev->private; in raid10d()
2766 handle_write_completed(conf, r10_bio); in raid10d()
2785 static int init_resync(struct r10conf *conf) in init_resync() argument
2790 BUG_ON(mempool_initialized(&conf->r10buf_pool)); in init_resync()
2791 conf->have_replacement = 0; in init_resync()
2792 for (i = 0; i < conf->geo.raid_disks; i++) in init_resync()
2793 if (conf->mirrors[i].replacement) in init_resync()
2794 conf->have_replacement = 1; in init_resync()
2795 ret = mempool_init(&conf->r10buf_pool, buffs, in init_resync()
2796 r10buf_pool_alloc, r10buf_pool_free, conf); in init_resync()
2799 conf->next_resync = 0; in init_resync()
2803 static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf) in raid10_alloc_init_r10buf() argument
2805 struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO); in raid10_alloc_init_r10buf()
2811 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in raid10_alloc_init_r10buf()
2812 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in raid10_alloc_init_r10buf()
2813 nalloc = conf->copies; /* resync */ in raid10_alloc_init_r10buf()
2836 static void raid10_set_cluster_sync_high(struct r10conf *conf) in raid10_set_cluster_sync_high() argument
2853 chunks = conf->geo.raid_disks / conf->geo.near_copies; in raid10_set_cluster_sync_high()
2854 if (conf->geo.raid_disks % conf->geo.near_copies == 0) in raid10_set_cluster_sync_high()
2858 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; in raid10_set_cluster_sync_high()
2866 conf->cluster_sync_high = conf->cluster_sync_low + window_size; in raid10_set_cluster_sync_high()
2904 struct r10conf *conf = mddev->private; in raid10_sync_request() local
2913 sector_t chunk_mask = conf->geo.chunk_mask; in raid10_sync_request()
2916 if (!mempool_initialized(&conf->r10buf_pool)) in raid10_sync_request()
2917 if (init_resync(conf)) in raid10_sync_request()
2930 conf->fullsync == 0) { in raid10_sync_request()
2941 conf->cluster_sync_low = 0; in raid10_sync_request()
2942 conf->cluster_sync_high = 0; in raid10_sync_request()
2954 end_reshape(conf); in raid10_sync_request()
2955 close_sync(conf); in raid10_sync_request()
2963 else for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_sync_request()
2965 raid10_find_virt(conf, mddev->curr_resync, i); in raid10_sync_request()
2971 if ((!mddev->bitmap || conf->fullsync) in raid10_sync_request()
2972 && conf->have_replacement in raid10_sync_request()
2978 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_sync_request()
2980 rcu_dereference(conf->mirrors[i].replacement); in raid10_sync_request()
2986 conf->fullsync = 0; in raid10_sync_request()
2989 close_sync(conf); in raid10_sync_request()
2997 if (chunks_skipped >= conf->geo.raid_disks) { in raid10_sync_request()
3011 if (conf->geo.near_copies < conf->geo.raid_disks && in raid10_sync_request()
3019 if (conf->nr_waiting) in raid10_sync_request()
3043 for (i = 0 ; i < conf->geo.raid_disks; i++) { in raid10_sync_request()
3051 struct raid10_info *mirror = &conf->mirrors[i]; in raid10_sync_request()
3074 sect = raid10_find_virt(conf, sector_nr, i); in raid10_sync_request()
3094 !conf->fullsync) { in raid10_sync_request()
3107 r10_bio = raid10_alloc_init_r10buf(conf); in raid10_sync_request()
3109 raise_barrier(conf, rb2 != NULL); in raid10_sync_request()
3119 raid10_find_phys(conf, r10_bio); in raid10_sync_request()
3125 for (j = 0; j < conf->geo.raid_disks; j++) { in raid10_sync_request()
3127 conf->mirrors[j].rdev); in raid10_sync_request()
3138 for (j=0; j<conf->copies;j++) { in raid10_sync_request()
3143 rcu_dereference(conf->mirrors[d].rdev); in raid10_sync_request()
3179 for (k=0; k<conf->copies; k++) in raid10_sync_request()
3182 BUG_ON(k == conf->copies); in raid10_sync_request()
3223 if (j == conf->copies) { in raid10_sync_request()
3231 for (k = 0; k < conf->copies; k++) in raid10_sync_request()
3274 for (; j < conf->copies; j++) { in raid10_sync_request()
3276 if (conf->mirrors[d].rdev && in raid10_sync_request()
3278 &conf->mirrors[d].rdev->flags)) in raid10_sync_request()
3308 (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); in raid10_sync_request()
3312 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, in raid10_sync_request()
3320 r10_bio = raid10_alloc_init_r10buf(conf); in raid10_sync_request()
3325 raise_barrier(conf, 0); in raid10_sync_request()
3326 conf->next_resync = sector_nr; in raid10_sync_request()
3331 raid10_find_phys(conf, r10_bio); in raid10_sync_request()
3334 for (i = 0; i < conf->copies; i++) { in raid10_sync_request()
3346 rdev = rcu_dereference(conf->mirrors[d].rdev); in raid10_sync_request()
3376 rdev = rcu_dereference(conf->mirrors[d].replacement); in raid10_sync_request()
3401 for (i=0; i<conf->copies; i++) { in raid10_sync_request()
3404 rdev_dec_pending(conf->mirrors[d].rdev, in raid10_sync_request()
3409 conf->mirrors[d].replacement, in raid10_sync_request()
3445 if (conf->cluster_sync_high < sector_nr + nr_sectors) { in raid10_sync_request()
3446 conf->cluster_sync_low = mddev->curr_resync_completed; in raid10_sync_request()
3447 raid10_set_cluster_sync_high(conf); in raid10_sync_request()
3450 conf->cluster_sync_low, in raid10_sync_request()
3451 conf->cluster_sync_high); in raid10_sync_request()
3458 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_sync_request()
3464 sect_va1 = raid10_find_virt(conf, sector_nr, i); in raid10_sync_request()
3466 if (conf->cluster_sync_high < sect_va1 + nr_sectors) { in raid10_sync_request()
3472 sect_va2 = raid10_find_virt(conf, in raid10_sync_request()
3475 if (conf->cluster_sync_low == 0 || in raid10_sync_request()
3476 conf->cluster_sync_low > sect_va2) in raid10_sync_request()
3477 conf->cluster_sync_low = sect_va2; in raid10_sync_request()
3481 raid10_set_cluster_sync_high(conf); in raid10_sync_request()
3483 conf->cluster_sync_low, in raid10_sync_request()
3484 conf->cluster_sync_high); in raid10_sync_request()
3528 struct r10conf *conf = mddev->private; in raid10_size() local
3531 raid_disks = min(conf->geo.raid_disks, in raid10_size()
3532 conf->prev.raid_disks); in raid10_size()
3534 sectors = conf->dev_sectors; in raid10_size()
3536 size = sectors >> conf->geo.chunk_shift; in raid10_size()
3537 sector_div(size, conf->geo.far_copies); in raid10_size()
3539 sector_div(size, conf->geo.near_copies); in raid10_size()
3541 return size << conf->geo.chunk_shift; in raid10_size()
3544 static void calc_sectors(struct r10conf *conf, sector_t size) in calc_sectors() argument
3551 size = size >> conf->geo.chunk_shift; in calc_sectors()
3552 sector_div(size, conf->geo.far_copies); in calc_sectors()
3553 size = size * conf->geo.raid_disks; in calc_sectors()
3554 sector_div(size, conf->geo.near_copies); in calc_sectors()
3557 size = size * conf->copies; in calc_sectors()
3562 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks); in calc_sectors()
3564 conf->dev_sectors = size << conf->geo.chunk_shift; in calc_sectors()
3566 if (conf->geo.far_offset) in calc_sectors()
3567 conf->geo.stride = 1 << conf->geo.chunk_shift; in calc_sectors()
3569 sector_div(size, conf->geo.far_copies); in calc_sectors()
3570 conf->geo.stride = size << conf->geo.chunk_shift; in calc_sectors()
3633 struct r10conf *conf = NULL; in setup_conf() local
3653 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL); in setup_conf()
3654 if (!conf) in setup_conf()
3658 conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks), in setup_conf()
3661 if (!conf->mirrors) in setup_conf()
3664 conf->tmppage = alloc_page(GFP_KERNEL); in setup_conf()
3665 if (!conf->tmppage) in setup_conf()
3668 conf->geo = geo; in setup_conf()
3669 conf->copies = copies; in setup_conf()
3670 err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc, in setup_conf()
3671 rbio_pool_free, conf); in setup_conf()
3675 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); in setup_conf()
3679 calc_sectors(conf, mddev->dev_sectors); in setup_conf()
3681 conf->prev = conf->geo; in setup_conf()
3682 conf->reshape_progress = MaxSector; in setup_conf()
3684 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { in setup_conf()
3688 conf->reshape_progress = mddev->reshape_position; in setup_conf()
3689 if (conf->prev.far_offset) in setup_conf()
3690 conf->prev.stride = 1 << conf->prev.chunk_shift; in setup_conf()
3693 conf->prev.stride = conf->dev_sectors; in setup_conf()
3695 conf->reshape_safe = conf->reshape_progress; in setup_conf()
3696 spin_lock_init(&conf->device_lock); in setup_conf()
3697 INIT_LIST_HEAD(&conf->retry_list); in setup_conf()
3698 INIT_LIST_HEAD(&conf->bio_end_io_list); in setup_conf()
3700 spin_lock_init(&conf->resync_lock); in setup_conf()
3701 init_waitqueue_head(&conf->wait_barrier); in setup_conf()
3702 atomic_set(&conf->nr_pending, 0); in setup_conf()
3705 conf->thread = md_register_thread(raid10d, mddev, "raid10"); in setup_conf()
3706 if (!conf->thread) in setup_conf()
3709 conf->mddev = mddev; in setup_conf()
3710 return conf; in setup_conf()
3713 if (conf) { in setup_conf()
3714 mempool_exit(&conf->r10bio_pool); in setup_conf()
3715 kfree(conf->mirrors); in setup_conf()
3716 safe_put_page(conf->tmppage); in setup_conf()
3717 bioset_exit(&conf->bio_split); in setup_conf()
3718 kfree(conf); in setup_conf()
3725 struct r10conf *conf; in raid10_run() local
3738 conf = setup_conf(mddev); in raid10_run()
3739 if (IS_ERR(conf)) in raid10_run()
3740 return PTR_ERR(conf); in raid10_run()
3741 mddev->private = conf; in raid10_run()
3743 conf = mddev->private; in raid10_run()
3744 if (!conf) in raid10_run()
3747 if (mddev_is_clustered(conf->mddev)) { in raid10_run()
3759 mddev->thread = conf->thread; in raid10_run()
3760 conf->thread = NULL; in raid10_run()
3769 if (conf->geo.raid_disks % conf->geo.near_copies) in raid10_run()
3770 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); in raid10_run()
3773 (conf->geo.raid_disks / conf->geo.near_copies)); in raid10_run()
3782 if (disk_idx >= conf->geo.raid_disks && in raid10_run()
3783 disk_idx >= conf->prev.raid_disks) in raid10_run()
3785 disk = conf->mirrors + disk_idx; in raid10_run()
3824 if (!enough(conf, -1)) { in raid10_run()
3830 if (conf->reshape_progress != MaxSector) { in raid10_run()
3832 if (conf->geo.far_copies != 1 && in raid10_run()
3833 conf->geo.far_offset == 0) in raid10_run()
3835 if (conf->prev.far_copies != 1 && in raid10_run()
3836 conf->prev.far_offset == 0) in raid10_run()
3842 i < conf->geo.raid_disks in raid10_run()
3843 || i < conf->prev.raid_disks; in raid10_run()
3846 disk = conf->mirrors + i; in raid10_run()
3861 conf->fullsync = 1; in raid10_run()
3867 conf->fullsync = 1; in raid10_run()
3877 mdname(mddev), conf->geo.raid_disks - mddev->degraded, in raid10_run()
3878 conf->geo.raid_disks); in raid10_run()
3882 mddev->dev_sectors = conf->dev_sectors; in raid10_run()
3889 int stripe = conf->geo.raid_disks * in raid10_run()
3896 stripe /= conf->geo.near_copies; in raid10_run()
3904 if (conf->reshape_progress != MaxSector) { in raid10_run()
3907 before_length = ((1 << conf->prev.chunk_shift) * in raid10_run()
3908 conf->prev.far_copies); in raid10_run()
3909 after_length = ((1 << conf->geo.chunk_shift) * in raid10_run()
3910 conf->geo.far_copies); in raid10_run()
3917 conf->offset_diff = min_offset_diff; in raid10_run()
3933 mempool_exit(&conf->r10bio_pool); in raid10_run()
3934 safe_put_page(conf->tmppage); in raid10_run()
3935 kfree(conf->mirrors); in raid10_run()
3936 kfree(conf); in raid10_run()
3944 struct r10conf *conf = priv; in raid10_free() local
3946 mempool_exit(&conf->r10bio_pool); in raid10_free()
3947 safe_put_page(conf->tmppage); in raid10_free()
3948 kfree(conf->mirrors); in raid10_free()
3949 kfree(conf->mirrors_old); in raid10_free()
3950 kfree(conf->mirrors_new); in raid10_free()
3951 bioset_exit(&conf->bio_split); in raid10_free()
3952 kfree(conf); in raid10_free()
3957 struct r10conf *conf = mddev->private; in raid10_quiesce() local
3960 raise_barrier(conf, 0); in raid10_quiesce()
3962 lower_barrier(conf); in raid10_quiesce()
3979 struct r10conf *conf = mddev->private; in raid10_resize() local
3985 if (conf->geo.far_copies > 1 && !conf->geo.far_offset) in raid10_resize()
4004 calc_sectors(conf, sectors); in raid10_resize()
4005 mddev->dev_sectors = conf->dev_sectors; in raid10_resize()
4013 struct r10conf *conf; in raid10_takeover_raid0() local
4033 conf = setup_conf(mddev); in raid10_takeover_raid0()
4034 if (!IS_ERR(conf)) { in raid10_takeover_raid0()
4040 conf->barrier = 1; in raid10_takeover_raid0()
4043 return conf; in raid10_takeover_raid0()
4084 struct r10conf *conf = mddev->private; in raid10_check_reshape() local
4087 if (conf->geo.far_copies != 1 && !conf->geo.far_offset) in raid10_check_reshape()
4090 if (setup_geo(&geo, mddev, geo_start) != conf->copies) in raid10_check_reshape()
4101 if (!enough(conf, -1)) in raid10_check_reshape()
4104 kfree(conf->mirrors_new); in raid10_check_reshape()
4105 conf->mirrors_new = NULL; in raid10_check_reshape()
4108 conf->mirrors_new = in raid10_check_reshape()
4112 if (!conf->mirrors_new) in raid10_check_reshape()
4131 static int calc_degraded(struct r10conf *conf) in calc_degraded() argument
4139 for (i = 0; i < conf->prev.raid_disks; i++) { in calc_degraded()
4140 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in calc_degraded()
4151 if (conf->geo.raid_disks == conf->prev.raid_disks) in calc_degraded()
4155 for (i = 0; i < conf->geo.raid_disks; i++) { in calc_degraded()
4156 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in calc_degraded()
4165 if (conf->geo.raid_disks <= conf->prev.raid_disks) in calc_degraded()
4191 struct r10conf *conf = mddev->private; in raid10_start_reshape() local
4199 if (setup_geo(&new, mddev, geo_start) != conf->copies) in raid10_start_reshape()
4202 before_length = ((1 << conf->prev.chunk_shift) * in raid10_start_reshape()
4203 conf->prev.far_copies); in raid10_start_reshape()
4204 after_length = ((1 << conf->geo.chunk_shift) * in raid10_start_reshape()
4205 conf->geo.far_copies); in raid10_start_reshape()
4230 conf->offset_diff = min_offset_diff; in raid10_start_reshape()
4231 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4232 if (conf->mirrors_new) { in raid10_start_reshape()
4233 memcpy(conf->mirrors_new, conf->mirrors, in raid10_start_reshape()
4234 sizeof(struct raid10_info)*conf->prev.raid_disks); in raid10_start_reshape()
4236 kfree(conf->mirrors_old); in raid10_start_reshape()
4237 conf->mirrors_old = conf->mirrors; in raid10_start_reshape()
4238 conf->mirrors = conf->mirrors_new; in raid10_start_reshape()
4239 conf->mirrors_new = NULL; in raid10_start_reshape()
4241 setup_geo(&conf->geo, mddev, geo_start); in raid10_start_reshape()
4246 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4252 conf->reshape_progress = size; in raid10_start_reshape()
4254 conf->reshape_progress = 0; in raid10_start_reshape()
4255 conf->reshape_safe = conf->reshape_progress; in raid10_start_reshape()
4256 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4263 newsize = raid10_size(mddev, 0, conf->geo.raid_disks); in raid10_start_reshape()
4305 conf->prev.raid_disks) in raid10_start_reshape()
4313 } else if (rdev->raid_disk >= conf->prev.raid_disks in raid10_start_reshape()
4323 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4324 mddev->degraded = calc_degraded(conf); in raid10_start_reshape()
4325 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4326 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4327 mddev->reshape_position = conf->reshape_progress; in raid10_start_reshape()
4342 conf->reshape_checkpoint = jiffies; in raid10_start_reshape()
4349 spin_lock_irq(&conf->device_lock); in raid10_start_reshape()
4350 conf->geo = conf->prev; in raid10_start_reshape()
4351 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4355 conf->reshape_progress = MaxSector; in raid10_start_reshape()
4356 conf->reshape_safe = MaxSector; in raid10_start_reshape()
4358 spin_unlock_irq(&conf->device_lock); in raid10_start_reshape()
4433 struct r10conf *conf = mddev->private; in reshape_request() local
4449 conf->reshape_progress < raid10_size(mddev, 0, 0)) { in reshape_request()
4451 - conf->reshape_progress); in reshape_request()
4453 conf->reshape_progress > 0) in reshape_request()
4454 sector_nr = conf->reshape_progress; in reshape_request()
4471 next = first_dev_address(conf->reshape_progress - 1, in reshape_request()
4472 &conf->geo); in reshape_request()
4477 safe = last_dev_address(conf->reshape_safe - 1, in reshape_request()
4478 &conf->prev); in reshape_request()
4480 if (next + conf->offset_diff < safe) in reshape_request()
4483 last = conf->reshape_progress - 1; in reshape_request()
4484 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask in reshape_request()
4485 & conf->prev.chunk_mask); in reshape_request()
4492 next = last_dev_address(conf->reshape_progress, &conf->geo); in reshape_request()
4497 safe = first_dev_address(conf->reshape_safe, &conf->prev); in reshape_request()
4502 if (next > safe + conf->offset_diff) in reshape_request()
4505 sector_nr = conf->reshape_progress; in reshape_request()
4506 last = sector_nr | (conf->geo.chunk_mask in reshape_request()
4507 & conf->prev.chunk_mask); in reshape_request()
4514 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { in reshape_request()
4516 wait_barrier(conf); in reshape_request()
4517 mddev->reshape_position = conf->reshape_progress; in reshape_request()
4520 - conf->reshape_progress; in reshape_request()
4522 mddev->curr_resync_completed = conf->reshape_progress; in reshape_request()
4523 conf->reshape_checkpoint = jiffies; in reshape_request()
4529 allow_barrier(conf); in reshape_request()
4532 conf->reshape_safe = mddev->reshape_position; in reshape_request()
4533 allow_barrier(conf); in reshape_request()
4536 raise_barrier(conf, 0); in reshape_request()
4539 r10_bio = raid10_alloc_init_r10buf(conf); in reshape_request()
4541 raise_barrier(conf, 1); in reshape_request()
4547 rdev = read_balance(conf, r10_bio, &max_sectors); in reshape_request()
4555 mempool_free(r10_bio, &conf->r10buf_pool); in reshape_request()
4579 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) { in reshape_request()
4583 conf->cluster_sync_low = sector_nr; in reshape_request()
4584 conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS; in reshape_request()
4593 if (sb_reshape_pos < conf->cluster_sync_low) in reshape_request()
4594 conf->cluster_sync_low = sb_reshape_pos; in reshape_request()
4597 md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low, in reshape_request()
4598 conf->cluster_sync_high); in reshape_request()
4602 __raid10_find_phys(&conf->geo, r10_bio); in reshape_request()
4608 for (s = 0; s < conf->copies*2; s++) { in reshape_request()
4613 rdev2 = rcu_dereference(conf->mirrors[d].replacement); in reshape_request()
4616 rdev2 = rcu_dereference(conf->mirrors[d].rdev); in reshape_request()
4662 lower_barrier(conf); in reshape_request()
4668 conf->reshape_progress -= sectors_done; in reshape_request()
4670 conf->reshape_progress += sectors_done; in reshape_request()
4685 struct r10conf *conf = mddev->private; in reshape_request_write() local
4699 for (s = 0; s < conf->copies*2; s++) { in reshape_request_write()
4705 rdev = rcu_dereference(conf->mirrors[d].replacement); in reshape_request_write()
4708 rdev = rcu_dereference(conf->mirrors[d].rdev); in reshape_request_write()
4725 static void end_reshape(struct r10conf *conf) in end_reshape() argument
4727 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) in end_reshape()
4730 spin_lock_irq(&conf->device_lock); in end_reshape()
4731 conf->prev = conf->geo; in end_reshape()
4732 md_finish_reshape(conf->mddev); in end_reshape()
4734 conf->reshape_progress = MaxSector; in end_reshape()
4735 conf->reshape_safe = MaxSector; in end_reshape()
4736 spin_unlock_irq(&conf->device_lock); in end_reshape()
4741 if (conf->mddev->queue) { in end_reshape()
4742 int stripe = conf->geo.raid_disks * in end_reshape()
4743 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); in end_reshape()
4744 stripe /= conf->geo.near_copies; in end_reshape()
4745 if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) in end_reshape()
4746 conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; in end_reshape()
4748 conf->fullsync = 0; in end_reshape()
4753 struct r10conf *conf = mddev->private; in raid10_update_reshape_pos() local
4759 conf->reshape_progress = mddev->reshape_position; in raid10_update_reshape_pos()
4769 struct r10conf *conf = mddev->private; in handle_reshape_read_error() local
4775 r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO); in handle_reshape_read_error()
4785 __raid10_find_phys(&conf->prev, r10b); in handle_reshape_read_error()
4798 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); in handle_reshape_read_error()
4819 if (slot >= conf->copies) in handle_reshape_read_error()
4843 struct r10conf *conf = mddev->private; in end_reshape_write() local
4849 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_reshape_write()
4851 rdev = conf->mirrors[d].replacement; in end_reshape_write()
4854 rdev = conf->mirrors[d].rdev; in end_reshape_write()
4877 struct r10conf *conf = mddev->private; in raid10_finish_reshape() local
4891 for (d = conf->geo.raid_disks ; in raid10_finish_reshape()
4892 d < conf->geo.raid_disks - mddev->delta_disks; in raid10_finish_reshape()
4894 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); in raid10_finish_reshape()
4897 rdev = rcu_dereference(conf->mirrors[d].replacement); in raid10_finish_reshape()
4904 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()