• Home
  • Raw
  • Download

Lines Matching refs:mddev

71 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
73 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
129 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in r10buf_pool_alloc()
130 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in r10buf_pool_alloc()
174 &conf->mddev->recovery)) { in r10buf_pool_alloc()
255 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio()
263 struct r10conf *conf = r10_bio->mddev->private; in put_buf()
273 struct mddev *mddev = r10_bio->mddev; in reschedule_retry() local
274 struct r10conf *conf = mddev->private; in reschedule_retry()
284 md_wakeup_thread(mddev->thread); in reschedule_retry()
295 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io()
315 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos()
355 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request()
387 rdev_dec_pending(rdev, conf->mddev); in raid10_end_read_request()
394 mdname(conf->mddev), in raid10_end_read_request()
405 md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, in close_write()
409 md_write_end(r10_bio->mddev); in close_write()
432 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request()
457 md_error(rdev->mddev, rdev); in raid10_end_write_request()
462 &rdev->mddev->recovery); in raid10_end_write_request()
467 md_error(rdev->mddev, rdev); in raid10_end_write_request()
531 rdev_dec_pending(rdev, conf->mddev); in raid10_end_write_request()
630 conf->mddev->reshape_backwards)) { in raid10_find_phys()
735 if ((conf->mddev->recovery_cp < MaxSector in read_balance()
737 (mddev_is_clustered(conf->mddev) && in read_balance()
738 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, in read_balance()
851 static int raid10_congested(struct mddev *mddev, int bits) in raid10_congested() argument
853 struct r10conf *conf = mddev->private; in raid10_congested()
905 md_bitmap_unplug(conf->mddev->bitmap); in flush_pending_writes()
993 raid10_log(conf->mddev, "wait barrier"); in wait_barrier()
1056 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) || in choose_data_offset()
1073 struct mddev *mddev = plug->cb.data; in raid10_unplug() local
1074 struct r10conf *conf = mddev->private; in raid10_unplug()
1083 md_wakeup_thread(mddev->thread); in raid10_unplug()
1090 md_bitmap_unplug(mddev->bitmap); in raid10_unplug()
1117 static void regular_request_wait(struct mddev *mddev, struct r10conf *conf, in regular_request_wait() argument
1121 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in regular_request_wait()
1124 raid10_log(conf->mddev, "wait reshape"); in regular_request_wait()
1134 static void raid10_read_request(struct mddev *mddev, struct bio *bio, in raid10_read_request() argument
1137 struct r10conf *conf = mddev->private; in raid10_read_request()
1176 regular_request_wait(mddev, conf, bio, r10_bio->sectors); in raid10_read_request()
1181 mdname(mddev), b, in raid10_read_request()
1189 mdname(mddev), in raid10_read_request()
1205 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); in raid10_read_request()
1220 if (mddev->gendisk) in raid10_read_request()
1222 read_bio, disk_devt(mddev->gendisk), in raid10_read_request()
1228 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, in raid10_write_one_disk() argument
1238 struct r10conf *conf = mddev->private; in raid10_write_one_disk()
1253 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); in raid10_write_one_disk()
1270 if (conf->mddev->gendisk) in raid10_write_one_disk()
1272 mbio, disk_devt(conf->mddev->gendisk), in raid10_write_one_disk()
1279 cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug)); in raid10_write_one_disk()
1292 md_wakeup_thread(mddev->thread); in raid10_write_one_disk()
1296 static void raid10_write_request(struct mddev *mddev, struct bio *bio, in raid10_write_request() argument
1299 struct r10conf *conf = mddev->private; in raid10_write_request()
1305 if ((mddev_is_clustered(mddev) && in raid10_write_request()
1306 md_cluster_ops->area_resyncing(mddev, WRITE, in raid10_write_request()
1313 if (!md_cluster_ops->area_resyncing(mddev, WRITE, in raid10_write_request()
1322 regular_request_wait(mddev, conf, bio, sectors); in raid10_write_request()
1323 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in raid10_write_request()
1324 (mddev->reshape_backwards in raid10_write_request()
1330 mddev->reshape_position = conf->reshape_progress; in raid10_write_request()
1331 set_mask_bits(&mddev->sb_flags, 0, in raid10_write_request()
1333 md_wakeup_thread(mddev->thread); in raid10_write_request()
1334 raid10_log(conf->mddev, "wait reshape metadata"); in raid10_write_request()
1335 wait_event(mddev->sb_wait, in raid10_write_request()
1336 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in raid10_write_request()
1338 conf->reshape_safe = mddev->reshape_position; in raid10_write_request()
1342 md_wakeup_thread(mddev->thread); in raid10_write_request()
1343 raid10_log(mddev, "wait queued"); in raid10_write_request()
1453 rdev_dec_pending(conf->mirrors[d].rdev, mddev); in raid10_write_request()
1464 rdev_dec_pending(rdev, mddev); in raid10_write_request()
1468 raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); in raid10_write_request()
1469 md_wait_for_blocked_rdev(blocked_rdev, mddev); in raid10_write_request()
1489 md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); in raid10_write_request()
1493 raid10_write_one_disk(mddev, r10_bio, bio, false, i); in raid10_write_request()
1495 raid10_write_one_disk(mddev, r10_bio, bio, true, i); in raid10_write_request()
1500 static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) in __make_request() argument
1502 struct r10conf *conf = mddev->private; in __make_request()
1510 r10_bio->mddev = mddev; in __make_request()
1516 raid10_read_request(mddev, bio, r10_bio); in __make_request()
1518 raid10_write_request(mddev, bio, r10_bio); in __make_request()
1521 static bool raid10_make_request(struct mddev *mddev, struct bio *bio) in raid10_make_request() argument
1523 struct r10conf *conf = mddev->private; in raid10_make_request()
1529 && md_flush_request(mddev, bio)) in raid10_make_request()
1532 if (!md_write_start(mddev, bio)) in raid10_make_request()
1547 __make_request(mddev, bio, sectors); in raid10_make_request()
1554 static void raid10_status(struct seq_file *seq, struct mddev *mddev) in raid10_status() argument
1556 struct r10conf *conf = mddev->private; in raid10_status()
1560 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); in raid10_status()
1572 conf->geo.raid_disks - mddev->degraded); in raid10_status()
1634 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) in raid10_error() argument
1637 struct r10conf *conf = mddev->private; in raid10_error()
1647 if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev in raid10_error()
1656 mddev->degraded++; in raid10_error()
1660 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in raid10_error()
1663 set_mask_bits(&mddev->sb_flags, 0, in raid10_error()
1668 mdname(mddev), bdevname(rdev->bdev, b), in raid10_error()
1669 mdname(mddev), conf->geo.raid_disks - mddev->degraded); in raid10_error()
1682 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, in print_conf()
1706 static int raid10_spare_active(struct mddev *mddev) in raid10_spare_active() argument
1709 struct r10conf *conf = mddev->private; in raid10_spare_active()
1747 mddev->degraded -= count; in raid10_spare_active()
1754 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) in raid10_add_disk() argument
1756 struct r10conf *conf = mddev->private; in raid10_add_disk()
1762 if (mddev->recovery_cp < MaxSector) in raid10_add_disk()
1770 if (md_integrity_add_rdev(rdev, mddev)) in raid10_add_disk()
1784 if (p->recovery_disabled == mddev->recovery_disabled) in raid10_add_disk()
1794 if (mddev->gendisk) in raid10_add_disk()
1795 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_add_disk()
1802 if (mddev->gendisk) in raid10_add_disk()
1803 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_add_disk()
1807 p->recovery_disabled = mddev->recovery_disabled - 1; in raid10_add_disk()
1815 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid10_add_disk()
1816 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); in raid10_add_disk()
1822 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) in raid10_remove_disk() argument
1824 struct r10conf *conf = mddev->private; in raid10_remove_disk()
1847 mddev->recovery_disabled != p->recovery_disabled && in raid10_remove_disk()
1875 err = md_integrity_register(mddev); in raid10_remove_disk()
1885 struct r10conf *conf = r10_bio->mddev->private; in __end_sync_read()
1899 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); in __end_sync_read()
1912 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read()
1928 struct mddev *mddev = r10_bio->mddev; in end_sync_request() local
1939 md_done_sync(mddev, s, 1); in end_sync_request()
1956 struct mddev *mddev = r10_bio->mddev; in end_sync_write() local
1957 struct r10conf *conf = mddev->private; in end_sync_write()
1973 md_error(mddev, rdev); in end_sync_write()
1978 &rdev->mddev->recovery); in end_sync_write()
1987 rdev_dec_pending(rdev, mddev); in end_sync_write()
2008 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) in sync_request_write() argument
2010 struct r10conf *conf = mddev->private; in sync_request_write()
2067 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); in sync_request_write()
2068 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) in sync_request_write()
2073 md_error(rdev->mddev, rdev); in sync_request_write()
2126 md_done_sync(mddev, r10_bio->sectors, 1); in sync_request_write()
2150 struct mddev *mddev = r10_bio->mddev; in fix_recovery_read_error() local
2151 struct r10conf *conf = mddev->private; in fix_recovery_read_error()
2189 &rdev->mddev->recovery); in fix_recovery_read_error()
2207 mdname(mddev)); in fix_recovery_read_error()
2210 = mddev->recovery_disabled; in fix_recovery_read_error()
2212 &mddev->recovery); in fix_recovery_read_error()
2224 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) in recovery_request_write() argument
2226 struct r10conf *conf = mddev->private; in recovery_request_write()
2268 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) in check_decay_read_errors() argument
2314 &rdev->mddev->recovery); in r10_sync_page_io()
2318 md_error(rdev->mddev, rdev); in r10_sync_page_io()
2330 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2335 int max_read_errors = atomic_read(&mddev->max_corr_read_errors); in fix_read_error()
2348 check_decay_read_errors(mddev, rdev); in fix_read_error()
2355 mdname(mddev), b, in fix_read_error()
2358 mdname(mddev), b); in fix_read_error()
2359 md_error(mddev, rdev); in fix_read_error()
2393 rdev_dec_pending(rdev, mddev); in fix_read_error()
2417 md_error(mddev, rdev); in fix_read_error()
2449 mdname(mddev), s, in fix_read_error()
2456 mdname(mddev), in fix_read_error()
2459 rdev_dec_pending(rdev, mddev); in fix_read_error()
2486 mdname(mddev), s, in fix_read_error()
2492 mdname(mddev), in fix_read_error()
2497 mdname(mddev), s, in fix_read_error()
2505 rdev_dec_pending(rdev, mddev); in fix_read_error()
2518 struct mddev *mddev = r10_bio->mddev; in narrow_write_error() local
2519 struct r10conf *conf = mddev->private; in narrow_write_error()
2554 wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); in narrow_write_error()
2576 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) in handle_read_error() argument
2580 struct r10conf *conf = mddev->private; in handle_read_error()
2595 if (mddev->ro) in handle_read_error()
2599 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2602 md_error(mddev, rdev); in handle_read_error()
2604 rdev_dec_pending(rdev, mddev); in handle_read_error()
2607 raid10_read_request(mddev, r10_bio->master_bio, r10_bio); in handle_read_error()
2639 md_error(conf->mddev, rdev); in handle_write_completed()
2656 md_error(conf->mddev, rdev); in handle_write_completed()
2671 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2675 md_error(conf->mddev, rdev); in handle_write_completed()
2679 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2688 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2701 md_wakeup_thread(conf->mddev->thread); in handle_write_completed()
2713 struct mddev *mddev = thread->mddev; in raid10d() local
2716 struct r10conf *conf = mddev->private; in raid10d()
2720 md_check_recovery(mddev); in raid10d()
2723 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { in raid10d()
2726 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { in raid10d()
2737 if (mddev->degraded) in raid10d()
2762 mddev = r10_bio->mddev; in raid10d()
2763 conf = mddev->private; in raid10d()
2768 reshape_request_write(mddev, r10_bio); in raid10d()
2770 sync_request_write(mddev, r10_bio); in raid10d()
2772 recovery_request_write(mddev, r10_bio); in raid10d()
2774 handle_read_error(mddev, r10_bio); in raid10d()
2779 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) in raid10d()
2780 md_check_recovery(mddev); in raid10d()
2811 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in raid10_alloc_init_r10buf()
2812 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in raid10_alloc_init_r10buf()
2858 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; in raid10_set_cluster_sync_high()
2901 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, in raid10_sync_request() argument
2904 struct r10conf *conf = mddev->private; in raid10_sync_request()
2924 if (mddev->bitmap == NULL && in raid10_sync_request()
2925 mddev->recovery_cp == MaxSector && in raid10_sync_request()
2926 mddev->reshape_position == MaxSector && in raid10_sync_request()
2927 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in raid10_sync_request()
2928 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in raid10_sync_request()
2929 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in raid10_sync_request()
2932 return mddev->dev_sectors - sector_nr; in raid10_sync_request()
2936 max_sector = mddev->dev_sectors; in raid10_sync_request()
2937 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in raid10_sync_request()
2938 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in raid10_sync_request()
2939 max_sector = mddev->resync_max_sectors; in raid10_sync_request()
2953 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in raid10_sync_request()
2959 if (mddev->curr_resync < max_sector) { /* aborted */ in raid10_sync_request()
2960 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) in raid10_sync_request()
2961 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, in raid10_sync_request()
2965 raid10_find_virt(conf, mddev->curr_resync, i); in raid10_sync_request()
2966 md_bitmap_end_sync(mddev->bitmap, sect, in raid10_sync_request()
2971 if ((!mddev->bitmap || conf->fullsync) in raid10_sync_request()
2973 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in raid10_sync_request()
2988 md_bitmap_close_sync(mddev->bitmap); in raid10_sync_request()
2994 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in raid10_sync_request()
2995 return reshape_request(mddev, sector_nr, skipped); in raid10_sync_request()
3005 if (max_sector > mddev->resync_max) in raid10_sync_request()
3006 max_sector = mddev->resync_max; /* Don't do IO beyond here */ in raid10_sync_request()
3038 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in raid10_sync_request()
3075 if (sect >= mddev->resync_max_sectors) { in raid10_sync_request()
3088 must_sync = md_bitmap_start_sync(mddev->bitmap, sect, in raid10_sync_request()
3115 r10_bio->mddev = mddev; in raid10_sync_request()
3134 must_sync = md_bitmap_start_sync(mddev->bitmap, sect, in raid10_sync_request()
3250 &mddev->recovery)) in raid10_sync_request()
3252 mdname(mddev)); in raid10_sync_request()
3254 = mddev->recovery_disabled; in raid10_sync_request()
3260 rdev_dec_pending(mrdev, mddev); in raid10_sync_request()
3262 rdev_dec_pending(mreplace, mddev); in raid10_sync_request()
3265 rdev_dec_pending(mrdev, mddev); in raid10_sync_request()
3267 rdev_dec_pending(mreplace, mddev); in raid10_sync_request()
3306 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, in raid10_sync_request()
3307 mddev_is_clustered(mddev) && in raid10_sync_request()
3310 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, in raid10_sync_request()
3311 &sync_blocks, mddev->degraded) && in raid10_sync_request()
3313 &mddev->recovery)) { in raid10_sync_request()
3323 r10_bio->mddev = mddev; in raid10_sync_request()
3405 mddev); in raid10_sync_request()
3410 mddev); in raid10_sync_request()
3442 if (mddev_is_clustered(mddev) && in raid10_sync_request()
3443 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in raid10_sync_request()
3446 conf->cluster_sync_low = mddev->curr_resync_completed; in raid10_sync_request()
3449 md_cluster_ops->resync_info_update(mddev, in raid10_sync_request()
3453 } else if (mddev_is_clustered(mddev)) { in raid10_sync_request()
3473 mddev->curr_resync_completed, i); in raid10_sync_request()
3482 md_cluster_ops->resync_info_update(mddev, in raid10_sync_request()
3507 md_done_sync(mddev, sectors_skipped, 1); in raid10_sync_request()
3525 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid10_size() argument
3528 struct r10conf *conf = mddev->private; in raid10_size()
3575 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) in setup_geo() argument
3581 layout = mddev->layout; in setup_geo()
3582 chunk = mddev->chunk_sectors; in setup_geo()
3583 disks = mddev->raid_disks - mddev->delta_disks; in setup_geo()
3586 layout = mddev->new_layout; in setup_geo()
3587 chunk = mddev->new_chunk_sectors; in setup_geo()
3588 disks = mddev->raid_disks; in setup_geo()
3593 layout = mddev->new_layout; in setup_geo()
3594 chunk = mddev->new_chunk_sectors; in setup_geo()
3595 disks = mddev->raid_disks + mddev->delta_disks; in setup_geo()
3631 static struct r10conf *setup_conf(struct mddev *mddev) in setup_conf() argument
3638 copies = setup_geo(&geo, mddev, geo_new); in setup_conf()
3642 mdname(mddev), PAGE_SIZE); in setup_conf()
3646 if (copies < 2 || copies > mddev->raid_disks) { in setup_conf()
3648 mdname(mddev), mddev->new_layout); in setup_conf()
3658 conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks), in setup_conf()
3679 calc_sectors(conf, mddev->dev_sectors); in setup_conf()
3680 if (mddev->reshape_position == MaxSector) { in setup_conf()
3684 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { in setup_conf()
3688 conf->reshape_progress = mddev->reshape_position; in setup_conf()
3705 conf->thread = md_register_thread(raid10d, mddev, "raid10"); in setup_conf()
3709 conf->mddev = mddev; in setup_conf()
3723 static int raid10_run(struct mddev *mddev) in raid10_run() argument
3734 if (mddev_init_writes_pending(mddev) < 0) in raid10_run()
3737 if (mddev->private == NULL) { in raid10_run()
3738 conf = setup_conf(mddev); in raid10_run()
3741 mddev->private = conf; in raid10_run()
3743 conf = mddev->private; in raid10_run()
3747 if (mddev_is_clustered(conf->mddev)) { in raid10_run()
3750 fc = (mddev->layout >> 8) & 255; in raid10_run()
3751 fo = mddev->layout & (1<<16); in raid10_run()
3759 mddev->thread = conf->thread; in raid10_run()
3762 chunk_size = mddev->chunk_sectors << 9; in raid10_run()
3763 if (mddev->queue) { in raid10_run()
3764 blk_queue_max_discard_sectors(mddev->queue, in raid10_run()
3765 mddev->chunk_sectors); in raid10_run()
3766 blk_queue_max_write_same_sectors(mddev->queue, 0); in raid10_run()
3767 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); in raid10_run()
3768 blk_queue_io_min(mddev->queue, chunk_size); in raid10_run()
3770 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); in raid10_run()
3772 blk_queue_io_opt(mddev->queue, chunk_size * in raid10_run()
3776 rdev_for_each(rdev, mddev) { in raid10_run()
3797 if (!mddev->reshape_backwards) in raid10_run()
3804 if (mddev->gendisk) in raid10_run()
3805 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_run()
3815 if (mddev->queue) { in raid10_run()
3818 mddev->queue); in raid10_run()
3821 mddev->queue); in raid10_run()
3826 mdname(mddev)); in raid10_run()
3840 mddev->degraded = 0; in raid10_run()
3858 mddev->degraded++; in raid10_run()
3870 disk->recovery_disabled = mddev->recovery_disabled - 1; in raid10_run()
3873 if (mddev->recovery_cp != MaxSector) in raid10_run()
3875 mdname(mddev)); in raid10_run()
3877 mdname(mddev), conf->geo.raid_disks - mddev->degraded, in raid10_run()
3882 mddev->dev_sectors = conf->dev_sectors; in raid10_run()
3883 size = raid10_size(mddev, 0, 0); in raid10_run()
3884 md_set_array_sectors(mddev, size); in raid10_run()
3885 mddev->resync_max_sectors = size; in raid10_run()
3886 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); in raid10_run()
3888 if (mddev->queue) { in raid10_run()
3890 ((mddev->chunk_sectors << 9) / PAGE_SIZE); in raid10_run()
3897 if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) in raid10_run()
3898 mddev->queue->backing_dev_info->ra_pages = 2 * stripe; in raid10_run()
3901 if (md_integrity_register(mddev)) in raid10_run()
3919 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid10_run()
3920 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid10_run()
3921 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid10_run()
3922 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid10_run()
3923 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in raid10_run()
3925 if (!mddev->sync_thread) in raid10_run()
3932 md_unregister_thread(&mddev->thread); in raid10_run()
3937 mddev->private = NULL; in raid10_run()
3942 static void raid10_free(struct mddev *mddev, void *priv) in raid10_free() argument
3955 static void raid10_quiesce(struct mddev *mddev, int quiesce) in raid10_quiesce() argument
3957 struct r10conf *conf = mddev->private; in raid10_quiesce()
3965 static int raid10_resize(struct mddev *mddev, sector_t sectors) in raid10_resize() argument
3979 struct r10conf *conf = mddev->private; in raid10_resize()
3982 if (mddev->reshape_position != MaxSector) in raid10_resize()
3988 oldsize = raid10_size(mddev, 0, 0); in raid10_resize()
3989 size = raid10_size(mddev, sectors, 0); in raid10_resize()
3990 if (mddev->external_size && in raid10_resize()
3991 mddev->array_sectors > size) in raid10_resize()
3993 if (mddev->bitmap) { in raid10_resize()
3994 int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0); in raid10_resize()
3998 md_set_array_sectors(mddev, size); in raid10_resize()
3999 if (sectors > mddev->dev_sectors && in raid10_resize()
4000 mddev->recovery_cp > oldsize) { in raid10_resize()
4001 mddev->recovery_cp = oldsize; in raid10_resize()
4002 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid10_resize()
4005 mddev->dev_sectors = conf->dev_sectors; in raid10_resize()
4006 mddev->resync_max_sectors = size; in raid10_resize()
4010 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) in raid10_takeover_raid0() argument
4015 if (mddev->degraded > 0) { in raid10_takeover_raid0()
4017 mdname(mddev)); in raid10_takeover_raid0()
4023 mddev->new_level = 10; in raid10_takeover_raid0()
4025 mddev->new_layout = (1<<8) + 2; in raid10_takeover_raid0()
4026 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid10_takeover_raid0()
4027 mddev->delta_disks = mddev->raid_disks; in raid10_takeover_raid0()
4028 mddev->raid_disks *= 2; in raid10_takeover_raid0()
4030 mddev->recovery_cp = MaxSector; in raid10_takeover_raid0()
4031 mddev->dev_sectors = size; in raid10_takeover_raid0()
4033 conf = setup_conf(mddev); in raid10_takeover_raid0()
4035 rdev_for_each(rdev, mddev) in raid10_takeover_raid0()
4046 static void *raid10_takeover(struct mddev *mddev) in raid10_takeover() argument
4053 if (mddev->level == 0) { in raid10_takeover()
4055 raid0_conf = mddev->private; in raid10_takeover()
4058 mdname(mddev)); in raid10_takeover()
4061 return raid10_takeover_raid0(mddev, in raid10_takeover()
4068 static int raid10_check_reshape(struct mddev *mddev) in raid10_check_reshape() argument
4084 struct r10conf *conf = mddev->private; in raid10_check_reshape()
4090 if (setup_geo(&geo, mddev, geo_start) != conf->copies) in raid10_check_reshape()
4097 if (mddev->array_sectors & geo.chunk_mask) in raid10_check_reshape()
4106 if (mddev->delta_disks > 0) { in raid10_check_reshape()
4109 kcalloc(mddev->raid_disks + mddev->delta_disks, in raid10_check_reshape()
4175 static int raid10_start_reshape(struct mddev *mddev) in raid10_start_reshape() argument
4191 struct r10conf *conf = mddev->private; in raid10_start_reshape()
4196 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in raid10_start_reshape()
4199 if (setup_geo(&new, mddev, geo_start) != conf->copies) in raid10_start_reshape()
4207 rdev_for_each(rdev, mddev) { in raid10_start_reshape()
4214 if (!mddev->reshape_backwards) in raid10_start_reshape()
4227 if (spares < mddev->delta_disks) in raid10_start_reshape()
4241 setup_geo(&conf->geo, mddev, geo_start); in raid10_start_reshape()
4243 if (mddev->reshape_backwards) { in raid10_start_reshape()
4244 sector_t size = raid10_size(mddev, 0, 0); in raid10_start_reshape()
4245 if (size < mddev->array_sectors) { in raid10_start_reshape()
4248 mdname(mddev)); in raid10_start_reshape()
4251 mddev->resync_max_sectors = size; in raid10_start_reshape()
4258 if (mddev->delta_disks && mddev->bitmap) { in raid10_start_reshape()
4262 oldsize = raid10_size(mddev, 0, 0); in raid10_start_reshape()
4263 newsize = raid10_size(mddev, 0, conf->geo.raid_disks); in raid10_start_reshape()
4265 if (!mddev_is_clustered(mddev)) { in raid10_start_reshape()
4266 ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); in raid10_start_reshape()
4273 rdev_for_each(rdev, mddev) { in raid10_start_reshape()
4288 ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); in raid10_start_reshape()
4292 ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize); in raid10_start_reshape()
4294 md_bitmap_resize(mddev->bitmap, oldsize, 0, 0); in raid10_start_reshape()
4299 if (mddev->delta_disks > 0) { in raid10_start_reshape()
4300 rdev_for_each(rdev, mddev) in raid10_start_reshape()
4303 if (raid10_add_disk(mddev, rdev) == 0) { in raid10_start_reshape()
4310 if (sysfs_link_rdev(mddev, rdev)) in raid10_start_reshape()
4324 mddev->degraded = calc_degraded(conf); in raid10_start_reshape()
4326 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4327 mddev->reshape_position = conf->reshape_progress; in raid10_start_reshape()
4328 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in raid10_start_reshape()
4330 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid10_start_reshape()
4331 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid10_start_reshape()
4332 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in raid10_start_reshape()
4333 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid10_start_reshape()
4334 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid10_start_reshape()
4336 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in raid10_start_reshape()
4338 if (!mddev->sync_thread) { in raid10_start_reshape()
4343 md_wakeup_thread(mddev->sync_thread); in raid10_start_reshape()
4344 md_new_event(mddev); in raid10_start_reshape()
4348 mddev->recovery = 0; in raid10_start_reshape()
4351 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4352 rdev_for_each(rdev, mddev) in raid10_start_reshape()
4357 mddev->reshape_position = MaxSector; in raid10_start_reshape()
4393 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, in reshape_request() argument
4433 struct r10conf *conf = mddev->private; in reshape_request()
4448 if (mddev->reshape_backwards && in reshape_request()
4449 conf->reshape_progress < raid10_size(mddev, 0, 0)) { in reshape_request()
4450 sector_nr = (raid10_size(mddev, 0, 0) in reshape_request()
4452 } else if (!mddev->reshape_backwards && in reshape_request()
4456 mddev->curr_resync_completed = sector_nr; in reshape_request()
4457 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in reshape_request()
4467 if (mddev->reshape_backwards) { in reshape_request()
4517 mddev->reshape_position = conf->reshape_progress; in reshape_request()
4518 if (mddev->reshape_backwards) in reshape_request()
4519 mddev->curr_resync_completed = raid10_size(mddev, 0, 0) in reshape_request()
4522 mddev->curr_resync_completed = conf->reshape_progress; in reshape_request()
4524 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in reshape_request()
4525 md_wakeup_thread(mddev->thread); in reshape_request()
4526 wait_event(mddev->sb_wait, mddev->sb_flags == 0 || in reshape_request()
4527 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
4528 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in reshape_request()
4532 conf->reshape_safe = mddev->reshape_position; in reshape_request()
4543 r10_bio->mddev = mddev; in reshape_request()
4556 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in reshape_request()
4560 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); in reshape_request()
4579 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) { in reshape_request()
4597 md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low, in reshape_request()
4667 if (mddev->reshape_backwards) in reshape_request()
4676 static int handle_reshape_read_error(struct mddev *mddev,
4678 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) in reshape_request_write() argument
4685 struct r10conf *conf = mddev->private; in reshape_request_write()
4689 if (handle_reshape_read_error(mddev, r10_bio) < 0) { in reshape_request_write()
4691 md_done_sync(mddev, r10_bio->sectors, 0); in reshape_request_write()
4727 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) in end_reshape()
4732 md_finish_reshape(conf->mddev); in end_reshape()
4741 if (conf->mddev->queue) { in end_reshape()
4743 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); in end_reshape()
4745 if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) in end_reshape()
4746 conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; in end_reshape()
4751 static void raid10_update_reshape_pos(struct mddev *mddev) in raid10_update_reshape_pos() argument
4753 struct r10conf *conf = mddev->private; in raid10_update_reshape_pos()
4756 md_cluster_ops->resync_info_get(mddev, &lo, &hi); in raid10_update_reshape_pos()
4757 if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo)) in raid10_update_reshape_pos()
4758 || mddev->reshape_position == MaxSector) in raid10_update_reshape_pos()
4759 conf->reshape_progress = mddev->reshape_position; in raid10_update_reshape_pos()
4764 static int handle_reshape_read_error(struct mddev *mddev, in handle_reshape_read_error() argument
4769 struct r10conf *conf = mddev->private; in handle_reshape_read_error()
4777 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in handle_reshape_read_error()
4813 rdev_dec_pending(rdev, mddev); in handle_reshape_read_error()
4828 &mddev->recovery); in handle_reshape_read_error()
4842 struct mddev *mddev = r10_bio->mddev; in end_reshape_write() local
4843 struct r10conf *conf = mddev->private; in end_reshape_write()
4859 md_error(mddev, rdev); in end_reshape_write()
4862 rdev_dec_pending(rdev, mddev); in end_reshape_write()
4870 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); in end_reshape_request()
4875 static void raid10_finish_reshape(struct mddev *mddev) in raid10_finish_reshape() argument
4877 struct r10conf *conf = mddev->private; in raid10_finish_reshape()
4879 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in raid10_finish_reshape()
4882 if (mddev->delta_disks > 0) { in raid10_finish_reshape()
4883 if (mddev->recovery_cp > mddev->resync_max_sectors) { in raid10_finish_reshape()
4884 mddev->recovery_cp = mddev->resync_max_sectors; in raid10_finish_reshape()
4885 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid10_finish_reshape()
4887 mddev->resync_max_sectors = mddev->array_sectors; in raid10_finish_reshape()
4892 d < conf->geo.raid_disks - mddev->delta_disks; in raid10_finish_reshape()
4903 mddev->layout = mddev->new_layout; in raid10_finish_reshape()
4904 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()
4905 mddev->reshape_position = MaxSector; in raid10_finish_reshape()
4906 mddev->delta_disks = 0; in raid10_finish_reshape()
4907 mddev->reshape_backwards = 0; in raid10_finish_reshape()