• Home
  • Raw
  • Download

Lines Matching refs:mddev

190 		md_wakeup_thread(conf->mddev->thread);  in raid5_wakeup_stripe_thread()
262 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
268 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
349 md_wakeup_thread(conf->mddev->thread); in release_inactive_stripe_list()
395 if (unlikely(!conf->mddev->thread) || in raid5_release_stripe()
400 md_wakeup_thread(conf->mddev->thread); in raid5_release_stripe()
616 if (conf->mddev->reshape_position == MaxSector) in has_failed()
617 return conf->mddev->degraded > conf->max_degraded; in has_failed()
832 md_wakeup_thread(conf->mddev->thread); in stripe_add_to_batch_list()
1067 if (!conf->mddev->external && in ops_run_io()
1068 conf->mddev->sb_flags) { in ops_run_io()
1073 md_check_recovery(conf->mddev); in ops_run_io()
1081 md_wait_for_blocked_rdev(rdev, conf->mddev); in ops_run_io()
1084 rdev_dec_pending(rdev, conf->mddev); in ops_run_io()
1147 if (conf->mddev->gendisk) in ops_run_io()
1149 bi, disk_devt(conf->mddev->gendisk), in ops_run_io()
1197 if (conf->mddev->gendisk) in ops_run_io()
1199 rbi, disk_devt(conf->mddev->gendisk), in ops_run_io()
2194 if (conf->mddev->gendisk) in grow_stripes()
2196 "raid%d-%s", conf->level, mdname(conf->mddev)); in grow_stripes()
2199 "raid%d-%p", conf->level, conf->mddev); in grow_stripes()
2262 mddev_suspend(conf->mddev); in resize_chunks()
2277 mddev_resume(conf->mddev); in resize_chunks()
2318 md_allow_write(conf->mddev); in resize_stripes()
2505 mdname(conf->mddev), STRIPE_SECTORS, in raid5_end_read_request()
2534 mdname(conf->mddev), in raid5_end_read_request()
2537 else if (conf->mddev->degraded >= conf->max_degraded) { in raid5_end_read_request()
2541 mdname(conf->mddev), in raid5_end_read_request()
2549 mdname(conf->mddev), in raid5_end_read_request()
2556 mdname(conf->mddev), in raid5_end_read_request()
2560 mdname(conf->mddev), bdn); in raid5_end_read_request()
2582 md_error(conf->mddev, rdev); in raid5_end_read_request()
2585 rdev_dec_pending(rdev, conf->mddev); in raid5_end_read_request()
2631 md_error(conf->mddev, rdev); in raid5_end_write_request()
2643 &rdev->mddev->recovery); in raid5_end_write_request()
2656 rdev_dec_pending(rdev, conf->mddev); in raid5_end_write_request()
2671 static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) in raid5_error() argument
2674 struct r5conf *conf = mddev->private; in raid5_error()
2681 mddev->degraded == conf->max_degraded) { in raid5_error()
2686 conf->recovery_disabled = mddev->recovery_disabled; in raid5_error()
2693 mddev->degraded = raid5_calc_degraded(conf); in raid5_error()
2695 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in raid5_error()
2698 set_mask_bits(&mddev->sb_flags, 0, in raid5_error()
2702 mdname(mddev), in raid5_error()
2704 mdname(mddev), in raid5_error()
2705 conf->raid_disks - mddev->degraded); in raid5_error()
2706 r5c_update_on_rdev_error(mddev, rdev); in raid5_error()
3029 mdname(conf->mddev)); in raid5_compute_blocknr()
3273 md_write_inc(conf->mddev, bi); in add_stripe_bio()
3294 if (conf->mddev->bitmap && firstwrite) { in add_stripe_bio()
3309 md_bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
3373 md_error(conf->mddev, rdev); in handle_failed_stripe()
3374 rdev_dec_pending(rdev, conf->mddev); in handle_failed_stripe()
3395 md_write_end(conf->mddev); in handle_failed_stripe()
3400 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3416 md_write_end(conf->mddev); in handle_failed_stripe()
3446 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3458 md_wakeup_thread(conf->mddev->thread); in handle_failed_stripe()
3481 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { in handle_failed_sync()
3505 conf->mddev->recovery_disabled; in handle_failed_sync()
3507 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort); in handle_failed_sync()
3521 || rdev->mddev->recovery_cp <= sh->sector)) in want_replace()
3603 sh->sector < sh->raid_conf->mddev->recovery_cp) in need_this_block()
3783 md_write_end(conf->mddev); in handle_stripe_clean_event()
3787 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
3844 md_wakeup_thread(conf->mddev->thread); in handle_stripe_clean_event()
3871 sector_t recovery_cp = conf->mddev->recovery_cp; in handle_stripe_dirtying()
3922 if (conf->mddev->queue) in handle_stripe_dirtying()
3923 blk_add_trace_msg(conf->mddev->queue, in handle_stripe_dirtying()
4006 if (rcw && conf->mddev->queue) in handle_stripe_dirtying()
4007 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", in handle_stripe_dirtying()
4095 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); in handle_parity_checks5()
4096 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks5()
4100 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks5()
4222 mdname(conf->mddev), in handle_parity_checks6()
4260 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); in handle_parity_checks6()
4261 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks6()
4265 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks6()
4560 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4561 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) in analyse_stripe()
4723 test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) { in handle_stripe()
4735 rdev_dec_pending(s.blocked_rdev, conf->mddev); in handle_stripe()
4923 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); in handle_stripe()
4932 if (s.failed <= conf->max_degraded && !conf->mddev->ro) in handle_stripe()
4991 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); in handle_stripe()
5001 if (conf->mddev->external) in handle_stripe()
5003 conf->mddev); in handle_stripe()
5010 conf->mddev); in handle_stripe()
5022 md_error(conf->mddev, rdev); in handle_stripe()
5023 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5029 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5038 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5055 md_wakeup_thread(conf->mddev->thread); in handle_stripe()
5095 static int raid5_congested(struct mddev *mddev, int bits) in raid5_congested() argument
5097 struct r5conf *conf = mddev->private; in raid5_congested()
5117 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) in in_chunk_boundary() argument
5119 struct r5conf *conf = mddev->private; in in_chunk_boundary()
5145 md_wakeup_thread(conf->mddev->thread); in add_bio_to_retry()
5178 struct mddev *mddev; in raid5_align_endio() local
5187 mddev = rdev->mddev; in raid5_align_endio()
5188 conf = mddev->private; in raid5_align_endio()
5190 rdev_dec_pending(rdev, conf->mddev); in raid5_align_endio()
5204 static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) in raid5_read_one_chunk() argument
5206 struct r5conf *conf = mddev->private; in raid5_read_one_chunk()
5212 if (!in_chunk_boundary(mddev, raid_bio)) { in raid5_read_one_chunk()
5219 align_bi = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set); in raid5_read_one_chunk()
5267 rdev_dec_pending(rdev, mddev); in raid5_read_one_chunk()
5281 if (mddev->gendisk) in raid5_read_one_chunk()
5283 align_bi, disk_devt(mddev->gendisk), in raid5_read_one_chunk()
5294 static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) in chunk_aligned_read() argument
5298 unsigned chunk_sects = mddev->chunk_sectors; in chunk_aligned_read()
5302 struct r5conf *conf = mddev->private; in chunk_aligned_read()
5309 if (!raid5_read_one_chunk(mddev, raid_bio)) in chunk_aligned_read()
5428 struct mddev *mddev = cb->cb.data; in raid5_unplug() local
5429 struct r5conf *conf = mddev->private; in raid5_unplug()
5457 if (mddev->queue) in raid5_unplug()
5458 trace_block_unplug(mddev->queue, cnt, !from_schedule); in raid5_unplug()
5462 static void release_stripe_plug(struct mddev *mddev, in release_stripe_plug() argument
5466 raid5_unplug, mddev, in release_stripe_plug()
5490 static void make_discard_request(struct mddev *mddev, struct bio *bi) in make_discard_request() argument
5492 struct r5conf *conf = mddev->private; in make_discard_request()
5497 if (mddev->reshape_position != MaxSector) in make_discard_request()
5551 md_write_inc(mddev, bi); in make_discard_request()
5555 if (conf->mddev->bitmap) { in make_discard_request()
5559 md_bitmap_startwrite(mddev->bitmap, in make_discard_request()
5571 release_stripe_plug(mddev, sh); in make_discard_request()
5577 static bool raid5_make_request(struct mddev *mddev, struct bio * bi) in raid5_make_request() argument
5579 struct r5conf *conf = mddev->private; in raid5_make_request()
5595 if (md_flush_request(mddev, bi)) in raid5_make_request()
5606 if (!md_write_start(mddev, bi)) in raid5_make_request()
5613 if (rw == READ && mddev->degraded == 0 && in raid5_make_request()
5614 mddev->reshape_position == MaxSector) { in raid5_make_request()
5615 bi = chunk_aligned_read(mddev, bi); in raid5_make_request()
5621 make_discard_request(mddev, bi); in raid5_make_request()
5622 md_write_end(mddev); in raid5_make_request()
5652 if (mddev->reshape_backwards in raid5_make_request()
5657 if (mddev->reshape_backwards in raid5_make_request()
5690 if (mddev->reshape_backwards in raid5_make_request()
5717 md_wakeup_thread(mddev->thread); in raid5_make_request()
5736 release_stripe_plug(mddev, sh); in raid5_make_request()
5746 md_write_end(mddev); in raid5_make_request()
5751 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
5753 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) in reshape_request() argument
5764 struct r5conf *conf = mddev->private; in reshape_request()
5781 if (mddev->reshape_backwards && in reshape_request()
5782 conf->reshape_progress < raid5_size(mddev, 0, 0)) { in reshape_request()
5783 sector_nr = raid5_size(mddev, 0, 0) in reshape_request()
5785 } else if (mddev->reshape_backwards && in reshape_request()
5789 } else if (!mddev->reshape_backwards && in reshape_request()
5794 mddev->curr_resync_completed = sector_nr; in reshape_request()
5795 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in reshape_request()
5821 if (mddev->reshape_backwards) { in reshape_request()
5839 if (mddev->reshape_backwards) { in reshape_request()
5842 BUG_ON((mddev->dev_sectors & in reshape_request()
5877 if ((mddev->reshape_backwards in reshape_request()
5884 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
5887 mddev->reshape_position = conf->reshape_progress; in reshape_request()
5888 mddev->curr_resync_completed = sector_nr; in reshape_request()
5889 if (!mddev->reshape_backwards) in reshape_request()
5891 rdev_for_each(rdev, mddev) in reshape_request()
5899 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in reshape_request()
5900 md_wakeup_thread(mddev->thread); in reshape_request()
5901 wait_event(mddev->sb_wait, mddev->sb_flags == 0 || in reshape_request()
5902 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
5903 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in reshape_request()
5906 conf->reshape_safe = mddev->reshape_position; in reshape_request()
5909 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in reshape_request()
5930 if (s < raid5_size(mddev, 0, 0)) { in reshape_request()
5945 if (mddev->reshape_backwards) in reshape_request()
5962 if (last_sector >= mddev->dev_sectors) in reshape_request()
5963 last_sector = mddev->dev_sectors - 1; in reshape_request()
5985 if (mddev->curr_resync_completed > mddev->resync_max || in reshape_request()
5986 (sector_nr - mddev->curr_resync_completed) * 2 in reshape_request()
5987 >= mddev->resync_max - mddev->curr_resync_completed) { in reshape_request()
5991 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
5994 mddev->reshape_position = conf->reshape_progress; in reshape_request()
5995 mddev->curr_resync_completed = sector_nr; in reshape_request()
5996 if (!mddev->reshape_backwards) in reshape_request()
5998 rdev_for_each(rdev, mddev) in reshape_request()
6005 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in reshape_request()
6006 md_wakeup_thread(mddev->thread); in reshape_request()
6007 wait_event(mddev->sb_wait, in reshape_request()
6008 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) in reshape_request()
6009 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
6010 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in reshape_request()
6013 conf->reshape_safe = mddev->reshape_position; in reshape_request()
6016 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); in reshape_request()
6022 static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr, in raid5_sync_request() argument
6025 struct r5conf *conf = mddev->private; in raid5_sync_request()
6027 sector_t max_sector = mddev->dev_sectors; in raid5_sync_request()
6035 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in raid5_sync_request()
6040 if (mddev->curr_resync < max_sector) /* aborted */ in raid5_sync_request()
6041 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, in raid5_sync_request()
6045 md_bitmap_close_sync(mddev->bitmap); in raid5_sync_request()
6053 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in raid5_sync_request()
6054 return reshape_request(mddev, sector_nr, skipped); in raid5_sync_request()
6066 if (mddev->degraded >= conf->max_degraded && in raid5_sync_request()
6067 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in raid5_sync_request()
6068 sector_t rv = mddev->dev_sectors - sector_nr; in raid5_sync_request()
6072 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in raid5_sync_request()
6074 !md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && in raid5_sync_request()
6082 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false); in raid5_sync_request()
6105 md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); in raid5_sync_request()
6234 struct mddev *mddev = conf->mddev; in raid5_do_work() local
6255 wait_event_lock_irq(mddev->sb_wait, in raid5_do_work()
6256 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags), in raid5_do_work()
6282 struct mddev *mddev = thread->mddev; in raid5d() local
6283 struct r5conf *conf = mddev->private; in raid5d()
6289 md_check_recovery(mddev); in raid5d()
6308 md_bitmap_unplug(mddev->bitmap); in raid5d()
6331 if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) { in raid5d()
6333 md_check_recovery(mddev); in raid5d()
6361 raid5_show_stripe_cache_size(struct mddev *mddev, char *page) in raid5_show_stripe_cache_size() argument
6365 spin_lock(&mddev->lock); in raid5_show_stripe_cache_size()
6366 conf = mddev->private; in raid5_show_stripe_cache_size()
6369 spin_unlock(&mddev->lock); in raid5_show_stripe_cache_size()
6374 raid5_set_cache_size(struct mddev *mddev, int size) in raid5_set_cache_size() argument
6377 struct r5conf *conf = mddev->private; in raid5_set_cache_size()
6389 md_allow_write(mddev); in raid5_set_cache_size()
6405 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) in raid5_store_stripe_cache_size() argument
6415 err = mddev_lock(mddev); in raid5_store_stripe_cache_size()
6418 conf = mddev->private; in raid5_store_stripe_cache_size()
6422 err = raid5_set_cache_size(mddev, new); in raid5_store_stripe_cache_size()
6423 mddev_unlock(mddev); in raid5_store_stripe_cache_size()
6434 raid5_show_rmw_level(struct mddev *mddev, char *page) in raid5_show_rmw_level() argument
6436 struct r5conf *conf = mddev->private; in raid5_show_rmw_level()
6444 raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len) in raid5_store_rmw_level() argument
6446 struct r5conf *conf = mddev->private; in raid5_store_rmw_level()
6477 raid5_show_preread_threshold(struct mddev *mddev, char *page) in raid5_show_preread_threshold() argument
6481 spin_lock(&mddev->lock); in raid5_show_preread_threshold()
6482 conf = mddev->private; in raid5_show_preread_threshold()
6485 spin_unlock(&mddev->lock); in raid5_show_preread_threshold()
6490 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) in raid5_store_preread_threshold() argument
6501 err = mddev_lock(mddev); in raid5_store_preread_threshold()
6504 conf = mddev->private; in raid5_store_preread_threshold()
6511 mddev_unlock(mddev); in raid5_store_preread_threshold()
6522 raid5_show_skip_copy(struct mddev *mddev, char *page) in raid5_show_skip_copy() argument
6526 spin_lock(&mddev->lock); in raid5_show_skip_copy()
6527 conf = mddev->private; in raid5_show_skip_copy()
6530 spin_unlock(&mddev->lock); in raid5_show_skip_copy()
6535 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) in raid5_store_skip_copy() argument
6547 err = mddev_lock(mddev); in raid5_store_skip_copy()
6550 conf = mddev->private; in raid5_store_skip_copy()
6554 mddev_suspend(mddev); in raid5_store_skip_copy()
6557 mddev->queue->backing_dev_info->capabilities |= in raid5_store_skip_copy()
6560 mddev->queue->backing_dev_info->capabilities &= in raid5_store_skip_copy()
6562 mddev_resume(mddev); in raid5_store_skip_copy()
6564 mddev_unlock(mddev); in raid5_store_skip_copy()
6574 stripe_cache_active_show(struct mddev *mddev, char *page) in stripe_cache_active_show() argument
6576 struct r5conf *conf = mddev->private; in stripe_cache_active_show()
6587 raid5_show_group_thread_cnt(struct mddev *mddev, char *page) in raid5_show_group_thread_cnt() argument
6591 spin_lock(&mddev->lock); in raid5_show_group_thread_cnt()
6592 conf = mddev->private; in raid5_show_group_thread_cnt()
6595 spin_unlock(&mddev->lock); in raid5_show_group_thread_cnt()
6604 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) in raid5_store_group_thread_cnt() argument
6620 err = mddev_lock(mddev); in raid5_store_group_thread_cnt()
6623 conf = mddev->private; in raid5_store_group_thread_cnt()
6627 mddev_suspend(mddev); in raid5_store_group_thread_cnt()
6647 mddev_resume(mddev); in raid5_store_group_thread_cnt()
6649 mddev_unlock(mddev); in raid5_store_group_thread_cnt()
6732 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid5_size() argument
6734 struct r5conf *conf = mddev->private; in raid5_size()
6737 sectors = mddev->dev_sectors; in raid5_size()
6877 static struct r5conf *setup_conf(struct mddev *mddev) in setup_conf() argument
6889 if (mddev->new_level != 5 in setup_conf()
6890 && mddev->new_level != 4 in setup_conf()
6891 && mddev->new_level != 6) { in setup_conf()
6893 mdname(mddev), mddev->new_level); in setup_conf()
6896 if ((mddev->new_level == 5 in setup_conf()
6897 && !algorithm_valid_raid5(mddev->new_layout)) || in setup_conf()
6898 (mddev->new_level == 6 in setup_conf()
6899 && !algorithm_valid_raid6(mddev->new_layout))) { in setup_conf()
6901 mdname(mddev), mddev->new_layout); in setup_conf()
6904 if (mddev->new_level == 6 && mddev->raid_disks < 4) { in setup_conf()
6906 mdname(mddev), mddev->raid_disks); in setup_conf()
6910 if (!mddev->new_chunk_sectors || in setup_conf()
6911 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || in setup_conf()
6912 !is_power_of_2(mddev->new_chunk_sectors)) { in setup_conf()
6914 mdname(mddev), mddev->new_chunk_sectors << 9); in setup_conf()
6955 rdev_for_each(rdev, mddev) { in setup_conf()
6965 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
6967 conf->raid_disks = mddev->raid_disks; in setup_conf()
6968 if (mddev->reshape_position == MaxSector) in setup_conf()
6969 conf->previous_raid_disks = mddev->raid_disks; in setup_conf()
6971 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; in setup_conf()
6989 conf->mddev = mddev; in setup_conf()
7016 conf->level = mddev->new_level; in setup_conf()
7017 conf->chunk_sectors = mddev->new_chunk_sectors; in setup_conf()
7021 pr_debug("raid456: run(%s) called.\n", mdname(mddev)); in setup_conf()
7023 rdev_for_each(rdev, mddev) { in setup_conf()
7043 mdname(mddev), bdevname(rdev->bdev, b), raid_disk); in setup_conf()
7049 conf->level = mddev->new_level; in setup_conf()
7060 conf->algorithm = mddev->new_layout; in setup_conf()
7061 conf->reshape_progress = mddev->reshape_position; in setup_conf()
7063 conf->prev_chunk_sectors = mddev->chunk_sectors; in setup_conf()
7064 conf->prev_algo = mddev->layout; in setup_conf()
7071 if (mddev->reshape_position != MaxSector) { in setup_conf()
7073 ((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4, in setup_conf()
7074 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4); in setup_conf()
7078 mdname(mddev), conf->min_nr_stripes); in setup_conf()
7085 mdname(mddev), memory); in setup_conf()
7088 pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory); in setup_conf()
7101 mdname(mddev)); in setup_conf()
7105 sprintf(pers_name, "raid%d", mddev->new_level); in setup_conf()
7106 conf->thread = md_register_thread(raid5d, mddev, pers_name); in setup_conf()
7109 mdname(mddev)); in setup_conf()
7149 static int raid5_run(struct mddev *mddev) in raid5_run() argument
7161 if (mddev_init_writes_pending(mddev) < 0) in raid5_run()
7164 if (mddev->recovery_cp != MaxSector) in raid5_run()
7166 mdname(mddev)); in raid5_run()
7168 rdev_for_each(rdev, mddev) { in raid5_run()
7181 } else if (mddev->reshape_backwards && in raid5_run()
7184 else if (!mddev->reshape_backwards && in raid5_run()
7189 if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) && in raid5_run()
7190 (mddev->bitmap_info.offset || mddev->bitmap_info.file)) { in raid5_run()
7192 mdname(mddev)); in raid5_run()
7196 if (mddev->reshape_position != MaxSector) { in raid5_run()
7211 int max_degraded = (mddev->level == 6 ? 2 : 1); in raid5_run()
7217 mdname(mddev)); in raid5_run()
7221 if (mddev->new_level != mddev->level) { in raid5_run()
7223 mdname(mddev)); in raid5_run()
7226 old_disks = mddev->raid_disks - mddev->delta_disks; in raid5_run()
7234 here_new = mddev->reshape_position; in raid5_run()
7235 chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); in raid5_run()
7236 new_data_disks = mddev->raid_disks - max_degraded; in raid5_run()
7239 mdname(mddev)); in raid5_run()
7244 here_old = mddev->reshape_position; in raid5_run()
7248 if (mddev->delta_disks == 0) { in raid5_run()
7256 if (abs(min_offset_diff) >= mddev->chunk_sectors && in raid5_run()
7257 abs(min_offset_diff) >= mddev->new_chunk_sectors) in raid5_run()
7259 else if (mddev->ro == 0) { in raid5_run()
7261 mdname(mddev)); in raid5_run()
7264 } else if (mddev->reshape_backwards in raid5_run()
7271 mdname(mddev)); in raid5_run()
7274 pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev)); in raid5_run()
7277 BUG_ON(mddev->level != mddev->new_level); in raid5_run()
7278 BUG_ON(mddev->layout != mddev->new_layout); in raid5_run()
7279 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); in raid5_run()
7280 BUG_ON(mddev->delta_disks != 0); in raid5_run()
7283 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && in raid5_run()
7284 test_bit(MD_HAS_PPL, &mddev->flags)) { in raid5_run()
7286 mdname(mddev)); in raid5_run()
7287 clear_bit(MD_HAS_PPL, &mddev->flags); in raid5_run()
7288 clear_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags); in raid5_run()
7291 if (mddev->private == NULL) in raid5_run()
7292 conf = setup_conf(mddev); in raid5_run()
7294 conf = mddev->private; in raid5_run()
7299 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { in raid5_run()
7302 mdname(mddev)); in raid5_run()
7303 mddev->ro = 1; in raid5_run()
7304 set_disk_ro(mddev->gendisk, 1); in raid5_run()
7305 } else if (mddev->recovery_cp == MaxSector) in raid5_run()
7306 set_bit(MD_JOURNAL_CLEAN, &mddev->flags); in raid5_run()
7310 mddev->thread = conf->thread; in raid5_run()
7312 mddev->private = conf; in raid5_run()
7345 if (mddev->major_version == 0 && in raid5_run()
7346 mddev->minor_version > 90) in raid5_run()
7368 mddev->degraded = raid5_calc_degraded(conf); in raid5_run()
7372 mdname(mddev), mddev->degraded, conf->raid_disks); in raid5_run()
7377 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); in raid5_run()
7378 mddev->resync_max_sectors = mddev->dev_sectors; in raid5_run()
7380 if (mddev->degraded > dirty_parity_disks && in raid5_run()
7381 mddev->recovery_cp != MaxSector) { in raid5_run()
7382 if (test_bit(MD_HAS_PPL, &mddev->flags)) in raid5_run()
7384 mdname(mddev)); in raid5_run()
7385 else if (mddev->ok_start_degraded) in raid5_run()
7387 mdname(mddev)); in raid5_run()
7390 mdname(mddev)); in raid5_run()
7396 mdname(mddev), conf->level, in raid5_run()
7397 mddev->raid_disks-mddev->degraded, mddev->raid_disks, in raid5_run()
7398 mddev->new_layout); in raid5_run()
7405 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid5_run()
7406 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid5_run()
7407 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid5_run()
7408 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid5_run()
7409 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in raid5_run()
7411 if (!mddev->sync_thread) in raid5_run()
7416 if (mddev->to_remove == &raid5_attrs_group) in raid5_run()
7417 mddev->to_remove = NULL; in raid5_run()
7418 else if (mddev->kobj.sd && in raid5_run()
7419 sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) in raid5_run()
7421 mdname(mddev)); in raid5_run()
7422 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); in raid5_run()
7424 if (mddev->queue) { in raid5_run()
7432 ((mddev->chunk_sectors << 9) / PAGE_SIZE); in raid5_run()
7433 if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) in raid5_run()
7434 mddev->queue->backing_dev_info->ra_pages = 2 * stripe; in raid5_run()
7436 chunk_size = mddev->chunk_sectors << 9; in raid5_run()
7437 blk_queue_io_min(mddev->queue, chunk_size); in raid5_run()
7438 blk_queue_io_opt(mddev->queue, chunk_size * in raid5_run()
7440 mddev->queue->limits.raid_partial_stripes_expensive = 1; in raid5_run()
7450 mddev->queue->limits.discard_alignment = stripe; in raid5_run()
7451 mddev->queue->limits.discard_granularity = stripe; in raid5_run()
7453 blk_queue_max_write_same_sectors(mddev->queue, 0); in raid5_run()
7454 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); in raid5_run()
7456 rdev_for_each(rdev, mddev) { in raid5_run()
7457 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid5_run()
7459 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid5_run()
7479 mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && in raid5_run()
7480 mddev->queue->limits.discard_granularity >= stripe) in raid5_run()
7482 mddev->queue); in raid5_run()
7485 mddev->queue); in raid5_run()
7487 blk_queue_max_hw_sectors(mddev->queue, UINT_MAX); in raid5_run()
7495 md_unregister_thread(&mddev->thread); in raid5_run()
7498 mddev->private = NULL; in raid5_run()
7499 pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev)); in raid5_run()
7503 static void raid5_free(struct mddev *mddev, void *priv) in raid5_free() argument
7508 mddev->to_remove = &raid5_attrs_group; in raid5_free()
7511 static void raid5_status(struct seq_file *seq, struct mddev *mddev) in raid5_status() argument
7513 struct r5conf *conf = mddev->private; in raid5_status()
7516 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, in raid5_status()
7517 conf->chunk_sectors / 2, mddev->layout); in raid5_status()
7518 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); in raid5_status()
7540 conf->raid_disks - conf->mddev->degraded); in print_raid5_conf()
7552 static int raid5_spare_active(struct mddev *mddev) in raid5_spare_active() argument
7555 struct r5conf *conf = mddev->private; in raid5_spare_active()
7589 mddev->degraded = raid5_calc_degraded(conf); in raid5_spare_active()
7595 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) in raid5_remove_disk() argument
7597 struct r5conf *conf = mddev->private; in raid5_remove_disk()
7639 mddev->recovery_disabled != conf->recovery_disabled && in raid5_remove_disk()
7680 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) in raid5_add_disk() argument
7682 struct r5conf *conf = mddev->private; in raid5_add_disk()
7708 if (mddev->recovery_disabled == conf->recovery_disabled) in raid5_add_disk()
7759 static int raid5_resize(struct mddev *mddev, sector_t sectors) in raid5_resize() argument
7769 struct r5conf *conf = mddev->private; in raid5_resize()
7774 newsize = raid5_size(mddev, sectors, mddev->raid_disks); in raid5_resize()
7775 if (mddev->external_size && in raid5_resize()
7776 mddev->array_sectors > newsize) in raid5_resize()
7778 if (mddev->bitmap) { in raid5_resize()
7779 int ret = md_bitmap_resize(mddev->bitmap, sectors, 0, 0); in raid5_resize()
7783 md_set_array_sectors(mddev, newsize); in raid5_resize()
7784 if (sectors > mddev->dev_sectors && in raid5_resize()
7785 mddev->recovery_cp > mddev->dev_sectors) { in raid5_resize()
7786 mddev->recovery_cp = mddev->dev_sectors; in raid5_resize()
7787 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid5_resize()
7789 mddev->dev_sectors = sectors; in raid5_resize()
7790 mddev->resync_max_sectors = sectors; in raid5_resize()
7794 static int check_stripe_cache(struct mddev *mddev) in check_stripe_cache() argument
7804 struct r5conf *conf = mddev->private; in check_stripe_cache()
7805 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 in check_stripe_cache()
7807 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 in check_stripe_cache()
7810 mdname(mddev), in check_stripe_cache()
7811 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) in check_stripe_cache()
7818 static int check_reshape(struct mddev *mddev) in check_reshape() argument
7820 struct r5conf *conf = mddev->private; in check_reshape()
7824 if (mddev->delta_disks == 0 && in check_reshape()
7825 mddev->new_layout == mddev->layout && in check_reshape()
7826 mddev->new_chunk_sectors == mddev->chunk_sectors) in check_reshape()
7830 if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) { in check_reshape()
7837 if (mddev->level == 6) in check_reshape()
7839 if (mddev->raid_disks + mddev->delta_disks < min) in check_reshape()
7843 if (!check_stripe_cache(mddev)) in check_reshape()
7846 if (mddev->new_chunk_sectors > mddev->chunk_sectors || in check_reshape()
7847 mddev->delta_disks > 0) in check_reshape()
7850 + max(0, mddev->delta_disks), in check_reshape()
7851 max(mddev->new_chunk_sectors, in check_reshape()
7852 mddev->chunk_sectors) in check_reshape()
7856 if (conf->previous_raid_disks + mddev->delta_disks <= conf->pool_size) in check_reshape()
7859 + mddev->delta_disks)); in check_reshape()
7862 static int raid5_start_reshape(struct mddev *mddev) in raid5_start_reshape() argument
7864 struct r5conf *conf = mddev->private; in raid5_start_reshape()
7869 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in raid5_start_reshape()
7872 if (!check_stripe_cache(mddev)) in raid5_start_reshape()
7878 rdev_for_each(rdev, mddev) { in raid5_start_reshape()
7884 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) in raid5_start_reshape()
7894 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) in raid5_start_reshape()
7895 < mddev->array_sectors) { in raid5_start_reshape()
7897 mdname(mddev)); in raid5_start_reshape()
7905 conf->raid_disks += mddev->delta_disks; in raid5_start_reshape()
7907 conf->chunk_sectors = mddev->new_chunk_sectors; in raid5_start_reshape()
7909 conf->algorithm = mddev->new_layout; in raid5_start_reshape()
7915 if (mddev->reshape_backwards) in raid5_start_reshape()
7916 conf->reshape_progress = raid5_size(mddev, 0, 0); in raid5_start_reshape()
7927 mddev_suspend(mddev); in raid5_start_reshape()
7928 mddev_resume(mddev); in raid5_start_reshape()
7937 if (mddev->delta_disks >= 0) { in raid5_start_reshape()
7938 rdev_for_each(rdev, mddev) in raid5_start_reshape()
7941 if (raid5_add_disk(mddev, rdev) == 0) { in raid5_start_reshape()
7948 if (sysfs_link_rdev(mddev, rdev)) in raid5_start_reshape()
7962 mddev->degraded = raid5_calc_degraded(conf); in raid5_start_reshape()
7965 mddev->raid_disks = conf->raid_disks; in raid5_start_reshape()
7966 mddev->reshape_position = conf->reshape_progress; in raid5_start_reshape()
7967 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in raid5_start_reshape()
7969 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid5_start_reshape()
7970 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid5_start_reshape()
7971 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in raid5_start_reshape()
7972 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid5_start_reshape()
7973 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid5_start_reshape()
7974 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in raid5_start_reshape()
7976 if (!mddev->sync_thread) { in raid5_start_reshape()
7977 mddev->recovery = 0; in raid5_start_reshape()
7980 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; in raid5_start_reshape()
7981 mddev->new_chunk_sectors = in raid5_start_reshape()
7983 mddev->new_layout = conf->algorithm = conf->prev_algo; in raid5_start_reshape()
7984 rdev_for_each(rdev, mddev) in raid5_start_reshape()
7989 mddev->reshape_position = MaxSector; in raid5_start_reshape()
7995 md_wakeup_thread(mddev->sync_thread); in raid5_start_reshape()
7996 md_new_event(mddev); in raid5_start_reshape()
8006 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in end_reshape()
8011 md_finish_reshape(conf->mddev); in end_reshape()
8014 conf->mddev->reshape_position = MaxSector; in end_reshape()
8015 rdev_for_each(rdev, conf->mddev) in end_reshape()
8026 if (conf->mddev->queue) { in end_reshape()
8030 if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) in end_reshape()
8031 conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; in end_reshape()
8039 static void raid5_finish_reshape(struct mddev *mddev) in raid5_finish_reshape() argument
8041 struct r5conf *conf = mddev->private; in raid5_finish_reshape()
8043 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in raid5_finish_reshape()
8045 if (mddev->delta_disks <= 0) { in raid5_finish_reshape()
8048 mddev->degraded = raid5_calc_degraded(conf); in raid5_finish_reshape()
8051 d < conf->raid_disks - mddev->delta_disks; in raid5_finish_reshape()
8061 mddev->layout = conf->algorithm; in raid5_finish_reshape()
8062 mddev->chunk_sectors = conf->chunk_sectors; in raid5_finish_reshape()
8063 mddev->reshape_position = MaxSector; in raid5_finish_reshape()
8064 mddev->delta_disks = 0; in raid5_finish_reshape()
8065 mddev->reshape_backwards = 0; in raid5_finish_reshape()
8069 static void raid5_quiesce(struct mddev *mddev, int quiesce) in raid5_quiesce() argument
8071 struct r5conf *conf = mddev->private; in raid5_quiesce()
8101 static void *raid45_takeover_raid0(struct mddev *mddev, int level) in raid45_takeover_raid0() argument
8103 struct r0conf *raid0_conf = mddev->private; in raid45_takeover_raid0()
8109 mdname(mddev)); in raid45_takeover_raid0()
8115 mddev->dev_sectors = sectors; in raid45_takeover_raid0()
8116 mddev->new_level = level; in raid45_takeover_raid0()
8117 mddev->new_layout = ALGORITHM_PARITY_N; in raid45_takeover_raid0()
8118 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid45_takeover_raid0()
8119 mddev->raid_disks += 1; in raid45_takeover_raid0()
8120 mddev->delta_disks = 1; in raid45_takeover_raid0()
8122 mddev->recovery_cp = MaxSector; in raid45_takeover_raid0()
8124 return setup_conf(mddev); in raid45_takeover_raid0()
8127 static void *raid5_takeover_raid1(struct mddev *mddev) in raid5_takeover_raid1() argument
8132 if (mddev->raid_disks != 2 || in raid5_takeover_raid1()
8133 mddev->degraded > 1) in raid5_takeover_raid1()
8141 while (chunksect && (mddev->array_sectors & (chunksect-1))) in raid5_takeover_raid1()
8148 mddev->new_level = 5; in raid5_takeover_raid1()
8149 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; in raid5_takeover_raid1()
8150 mddev->new_chunk_sectors = chunksect; in raid5_takeover_raid1()
8152 ret = setup_conf(mddev); in raid5_takeover_raid1()
8154 mddev_clear_unsupported_flags(mddev, in raid5_takeover_raid1()
8159 static void *raid5_takeover_raid6(struct mddev *mddev) in raid5_takeover_raid6() argument
8163 switch (mddev->layout) { in raid5_takeover_raid6()
8185 mddev->new_level = 5; in raid5_takeover_raid6()
8186 mddev->new_layout = new_layout; in raid5_takeover_raid6()
8187 mddev->delta_disks = -1; in raid5_takeover_raid6()
8188 mddev->raid_disks -= 1; in raid5_takeover_raid6()
8189 return setup_conf(mddev); in raid5_takeover_raid6()
8192 static int raid5_check_reshape(struct mddev *mddev) in raid5_check_reshape() argument
8199 struct r5conf *conf = mddev->private; in raid5_check_reshape()
8200 int new_chunk = mddev->new_chunk_sectors; in raid5_check_reshape()
8202 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) in raid5_check_reshape()
8209 if (mddev->array_sectors & (new_chunk-1)) in raid5_check_reshape()
8216 if (mddev->raid_disks == 2) { in raid5_check_reshape()
8218 if (mddev->new_layout >= 0) { in raid5_check_reshape()
8219 conf->algorithm = mddev->new_layout; in raid5_check_reshape()
8220 mddev->layout = mddev->new_layout; in raid5_check_reshape()
8224 mddev->chunk_sectors = new_chunk; in raid5_check_reshape()
8226 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in raid5_check_reshape()
8227 md_wakeup_thread(mddev->thread); in raid5_check_reshape()
8229 return check_reshape(mddev); in raid5_check_reshape()
8232 static int raid6_check_reshape(struct mddev *mddev) in raid6_check_reshape() argument
8234 int new_chunk = mddev->new_chunk_sectors; in raid6_check_reshape()
8236 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) in raid6_check_reshape()
8243 if (mddev->array_sectors & (new_chunk-1)) in raid6_check_reshape()
8249 return check_reshape(mddev); in raid6_check_reshape()
8252 static void *raid5_takeover(struct mddev *mddev) in raid5_takeover() argument
8260 if (mddev->level == 0) in raid5_takeover()
8261 return raid45_takeover_raid0(mddev, 5); in raid5_takeover()
8262 if (mddev->level == 1) in raid5_takeover()
8263 return raid5_takeover_raid1(mddev); in raid5_takeover()
8264 if (mddev->level == 4) { in raid5_takeover()
8265 mddev->new_layout = ALGORITHM_PARITY_N; in raid5_takeover()
8266 mddev->new_level = 5; in raid5_takeover()
8267 return setup_conf(mddev); in raid5_takeover()
8269 if (mddev->level == 6) in raid5_takeover()
8270 return raid5_takeover_raid6(mddev); in raid5_takeover()
8275 static void *raid4_takeover(struct mddev *mddev) in raid4_takeover() argument
8281 if (mddev->level == 0) in raid4_takeover()
8282 return raid45_takeover_raid0(mddev, 4); in raid4_takeover()
8283 if (mddev->level == 5 && in raid4_takeover()
8284 mddev->layout == ALGORITHM_PARITY_N) { in raid4_takeover()
8285 mddev->new_layout = 0; in raid4_takeover()
8286 mddev->new_level = 4; in raid4_takeover()
8287 return setup_conf(mddev); in raid4_takeover()
8294 static void *raid6_takeover(struct mddev *mddev) in raid6_takeover() argument
8302 if (mddev->pers != &raid5_personality) in raid6_takeover()
8304 if (mddev->degraded > 1) in raid6_takeover()
8306 if (mddev->raid_disks > 253) in raid6_takeover()
8308 if (mddev->raid_disks < 3) in raid6_takeover()
8311 switch (mddev->layout) { in raid6_takeover()
8333 mddev->new_level = 6; in raid6_takeover()
8334 mddev->new_layout = new_layout; in raid6_takeover()
8335 mddev->delta_disks = 1; in raid6_takeover()
8336 mddev->raid_disks += 1; in raid6_takeover()
8337 return setup_conf(mddev); in raid6_takeover()
8340 static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf) in raid5_change_consistency_policy() argument
8345 err = mddev_lock(mddev); in raid5_change_consistency_policy()
8348 conf = mddev->private; in raid5_change_consistency_policy()
8350 mddev_unlock(mddev); in raid5_change_consistency_policy()
8367 mddev_suspend(mddev); in raid5_change_consistency_policy()
8369 mddev_resume(mddev); in raid5_change_consistency_policy()
8371 } else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) && in raid5_change_consistency_policy()
8376 rdev_for_each(rdev, mddev) in raid5_change_consistency_policy()
8383 mddev_suspend(mddev); in raid5_change_consistency_policy()
8384 clear_bit(MD_HAS_JOURNAL, &mddev->flags); in raid5_change_consistency_policy()
8385 mddev_resume(mddev); in raid5_change_consistency_policy()
8395 md_update_sb(mddev, 1); in raid5_change_consistency_policy()
8397 mddev_unlock(mddev); in raid5_change_consistency_policy()
8402 static int raid5_start(struct mddev *mddev) in raid5_start() argument
8404 struct r5conf *conf = mddev->private; in raid5_start()