Lines Matching refs:mddev
240 struct mddev md;
256 struct mddev *mddev = &rs->md; in rs_config_backup() local
258 l->new_level = mddev->new_level; in rs_config_backup()
259 l->new_layout = mddev->new_layout; in rs_config_backup()
260 l->new_chunk_sectors = mddev->new_chunk_sectors; in rs_config_backup()
265 struct mddev *mddev = &rs->md; in rs_config_restore() local
267 mddev->new_level = l->new_level; in rs_config_restore()
268 mddev->new_layout = l->new_layout; in rs_config_restore()
269 mddev->new_chunk_sectors = l->new_chunk_sectors; in rs_config_restore()
680 struct mddev *mddev = &rs->md; in rs_set_rdev_sectors() local
687 rdev_for_each(rdev, mddev) in rs_set_rdev_sectors()
689 rdev->sectors = mddev->dev_sectors; in rs_set_rdev_sectors()
709 struct mddev *mddev = &rs->md; in rs_set_cur() local
711 mddev->new_level = mddev->level; in rs_set_cur()
712 mddev->new_layout = mddev->layout; in rs_set_cur()
713 mddev->new_chunk_sectors = mddev->chunk_sectors; in rs_set_cur()
722 struct mddev *mddev = &rs->md; in rs_set_new() local
724 mddev->level = mddev->new_level; in rs_set_new()
725 mddev->layout = mddev->new_layout; in rs_set_new()
726 mddev->chunk_sectors = mddev->new_chunk_sectors; in rs_set_new()
727 mddev->raid_disks = rs->raid_disks; in rs_set_new()
728 mddev->delta_disks = 0; in rs_set_new()
841 rs->dev[i].rdev.mddev = &rs->md; in parse_dev_params()
1259 jdev->mddev = &rs->md; in parse_raid_params()
1533 struct mddev *mddev = &rs->md; in rs_set_raid456_stripe_cache() local
1534 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2; in rs_set_raid456_stripe_cache()
1548 conf = mddev->private; in rs_set_raid456_stripe_cache()
1556 r = raid5_set_cache_size(mddev, nr_stripes); in rs_set_raid456_stripe_cache()
1622 struct mddev *mddev = &rs->md; in rs_set_dev_and_array_sectors() local
1627 delta_disks = mddev->delta_disks; in rs_set_dev_and_array_sectors()
1659 rdev_for_each(rdev, mddev) in rs_set_dev_and_array_sectors()
1663 mddev->array_sectors = array_sectors; in rs_set_dev_and_array_sectors()
1664 mddev->dev_sectors = dev_sectors; in rs_set_dev_and_array_sectors()
1738 struct mddev *mddev = &rs->md; in rs_check_takeover() local
1751 switch (mddev->level) { in rs_check_takeover()
1754 if ((mddev->new_level == 1 || mddev->new_level == 5) && in rs_check_takeover()
1755 mddev->raid_disks == 1) in rs_check_takeover()
1759 if (mddev->new_level == 10 && in rs_check_takeover()
1760 !(rs->raid_disks % mddev->raid_disks)) in rs_check_takeover()
1764 if (__within_range(mddev->new_level, 4, 6) && in rs_check_takeover()
1765 mddev->new_layout == ALGORITHM_PARITY_N && in rs_check_takeover()
1766 mddev->raid_disks > 1) in rs_check_takeover()
1773 if (__is_raid10_offset(mddev->layout)) in rs_check_takeover()
1776 near_copies = __raid10_near_copies(mddev->layout); in rs_check_takeover()
1779 if (mddev->new_level == 0) { in rs_check_takeover()
1782 !(mddev->raid_disks % near_copies)) { in rs_check_takeover()
1783 mddev->raid_disks /= near_copies; in rs_check_takeover()
1784 mddev->delta_disks = mddev->raid_disks; in rs_check_takeover()
1790 __raid10_far_copies(mddev->layout) > 1) in rs_check_takeover()
1797 if (mddev->new_level == 1 && in rs_check_takeover()
1798 max(near_copies, __raid10_far_copies(mddev->layout)) == mddev->raid_disks) in rs_check_takeover()
1802 if (__within_range(mddev->new_level, 4, 5) && in rs_check_takeover()
1803 mddev->raid_disks == 2) in rs_check_takeover()
1809 if (__within_range(mddev->new_level, 4, 5) && in rs_check_takeover()
1810 mddev->raid_disks == 2) { in rs_check_takeover()
1811 mddev->degraded = 1; in rs_check_takeover()
1816 if (mddev->new_level == 0 && in rs_check_takeover()
1817 mddev->raid_disks == 1) in rs_check_takeover()
1821 if (mddev->new_level == 10) in rs_check_takeover()
1827 if (mddev->new_level == 0) in rs_check_takeover()
1831 if ((mddev->new_level == 1 || mddev->new_level == 5) && in rs_check_takeover()
1832 mddev->raid_disks == 2) in rs_check_takeover()
1836 if (__within_range(mddev->new_level, 5, 6) && in rs_check_takeover()
1837 mddev->layout == ALGORITHM_PARITY_N) in rs_check_takeover()
1843 if (mddev->new_level == 0 && in rs_check_takeover()
1844 mddev->layout == ALGORITHM_PARITY_N) in rs_check_takeover()
1848 if (mddev->new_level == 4 && in rs_check_takeover()
1849 mddev->layout == ALGORITHM_PARITY_N) in rs_check_takeover()
1853 if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) && in rs_check_takeover()
1854 mddev->raid_disks == 2) in rs_check_takeover()
1858 if (mddev->new_level == 6 && in rs_check_takeover()
1859 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || in rs_check_takeover()
1860 … __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6))) in rs_check_takeover()
1866 if (mddev->new_level == 0 && in rs_check_takeover()
1867 mddev->layout == ALGORITHM_PARITY_N) in rs_check_takeover()
1871 if (mddev->new_level == 4 && in rs_check_takeover()
1872 mddev->layout == ALGORITHM_PARITY_N) in rs_check_takeover()
1876 if (mddev->new_level == 5 && in rs_check_takeover()
1877 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || in rs_check_takeover()
1878 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC))) in rs_check_takeover()
1899 struct mddev *mddev = &rs->md; in rs_reshape_requested() local
1907 change = mddev->new_layout != mddev->layout || in rs_reshape_requested()
1908 mddev->new_chunk_sectors != mddev->chunk_sectors || in rs_reshape_requested()
1917 mddev->raid_disks != rs->raid_disks; in rs_reshape_requested()
1922 !__is_raid10_far(mddev->new_layout) && in rs_reshape_requested()
2029 struct mddev *mddev = &rs->md; in rs_check_reshape() local
2031 if (!mddev->pers || !mddev->pers->check_reshape) in rs_check_reshape()
2033 else if (mddev->degraded) in rs_check_reshape()
2059 md_error(rdev->mddev, rdev); in read_disk_sb()
2096 static void super_sync(struct mddev *mddev, struct md_rdev *rdev) in super_sync() argument
2102 struct raid_set *rs = container_of(mddev, struct raid_set, md); in super_sync()
2126 sb->num_devices = cpu_to_le32(mddev->raid_disks); in super_sync()
2129 sb->events = cpu_to_le64(mddev->events); in super_sync()
2132 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp); in super_sync()
2134 sb->level = cpu_to_le32(mddev->level); in super_sync()
2135 sb->layout = cpu_to_le32(mddev->layout); in super_sync()
2136 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); in super_sync()
2143 sb->new_level = cpu_to_le32(mddev->new_level); in super_sync()
2144 sb->new_layout = cpu_to_le32(mddev->new_layout); in super_sync()
2145 sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors); in super_sync()
2147 sb->delta_disks = cpu_to_le32(mddev->delta_disks); in super_sync()
2150 sb->reshape_position = cpu_to_le64(mddev->reshape_position); in super_sync()
2155 if (mddev->delta_disks < 0 || mddev->reshape_backwards) in super_sync()
2162 sb->array_sectors = cpu_to_le64(mddev->array_sectors); in super_sync()
2200 super_sync(rdev->mddev, rdev); in super_load()
2206 set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags); in super_load()
2227 struct mddev *mddev = &rs->md; in super_init_validation() local
2241 mddev->events = events_sb ? : 1; in super_init_validation()
2243 mddev->reshape_position = MaxSector; in super_init_validation()
2245 mddev->raid_disks = le32_to_cpu(sb->num_devices); in super_init_validation()
2246 mddev->level = le32_to_cpu(sb->level); in super_init_validation()
2247 mddev->layout = le32_to_cpu(sb->layout); in super_init_validation()
2248 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors); in super_init_validation()
2256 mddev->new_level = le32_to_cpu(sb->new_level); in super_init_validation()
2257 mddev->new_layout = le32_to_cpu(sb->new_layout); in super_init_validation()
2258 mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors); in super_init_validation()
2259 mddev->delta_disks = le32_to_cpu(sb->delta_disks); in super_init_validation()
2260 mddev->array_sectors = le64_to_cpu(sb->array_sectors); in super_init_validation()
2269 if (mddev->delta_disks < 0 || in super_init_validation()
2270 (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS))) in super_init_validation()
2271 mddev->reshape_backwards = 1; in super_init_validation()
2273 mddev->reshape_backwards = 0; in super_init_validation()
2275 mddev->reshape_position = le64_to_cpu(sb->reshape_position); in super_init_validation()
2276 rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout); in super_init_validation()
2283 struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout); in super_init_validation()
2284 struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout); in super_init_validation()
2295 if (mddev->layout != mddev->new_layout) { in super_init_validation()
2301 le32_to_cpu(sb->layout), mddev->new_layout); in super_init_validation()
2303 if (mddev->chunk_sectors != mddev->new_chunk_sectors) in super_init_validation()
2305 mddev->chunk_sectors, mddev->new_chunk_sectors); in super_init_validation()
2308 mddev->raid_disks, mddev->raid_disks + rs->delta_disks); in super_init_validation()
2311 raid10_md_layout_to_format(mddev->layout), in super_init_validation()
2312 raid10_md_layout_to_copies(mddev->layout)); in super_init_validation()
2314 raid10_md_layout_to_format(mddev->new_layout), in super_init_validation()
2315 raid10_md_layout_to_copies(mddev->new_layout)); in super_init_validation()
2324 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset); in super_init_validation()
2342 rdev_for_each(r, mddev) { in super_init_validation()
2367 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); in super_init_validation()
2386 (unsigned long long) mddev->recovery_cp); in super_init_validation()
2390 (unsigned long long) mddev->reshape_position); in super_init_validation()
2400 rdev_for_each(r, mddev) { in super_init_validation()
2417 if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) { in super_init_validation()
2418 if (mddev->raid_disks % __raid10_near_copies(mddev->layout) || in super_init_validation()
2451 struct mddev *mddev = &rs->md; in super_validate() local
2463 if (!mddev->events && super_init_validation(rs, rdev)) in super_validate()
2478 …mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(40… in super_validate()
2479 mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; in super_validate()
2524 struct mddev *mddev = &rs->md; in analyse_superblocks() local
2527 rdev_for_each(rdev, mddev) { in analyse_superblocks()
2600 rdev_for_each(rdev, mddev) in analyse_superblocks()
2727 struct mddev *mddev = &rs->md; in rs_setup_takeover() local
2729 unsigned int d = mddev->raid_disks = rs->raid_disks; in rs_setup_takeover()
2738 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR, in rs_setup_takeover()
2742 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, in rs_setup_takeover()
2749 clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags); in rs_setup_takeover()
2750 mddev->recovery_cp = MaxSector; in rs_setup_takeover()
2758 mddev->recovery_cp = rdev->recovery_offset = 0; in rs_setup_takeover()
2760 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); in rs_setup_takeover()
2773 struct mddev *mddev = &rs->md; in rs_prepare_reshape() local
2776 if (rs->raid_disks != mddev->raid_disks && in rs_prepare_reshape()
2777 __is_raid10_near(mddev->layout) && in rs_prepare_reshape()
2779 rs->raid10_copies != __raid10_near_copies(mddev->layout)) { in rs_prepare_reshape()
2793 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, in rs_prepare_reshape()
2795 mddev->new_layout = mddev->layout; in rs_prepare_reshape()
2806 mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks; in rs_prepare_reshape()
2810 mddev->raid_disks = rs->raid_disks; in rs_prepare_reshape()
2821 } else if (mddev->raid_disks < rs->raid_disks) in rs_prepare_reshape()
2857 struct mddev *mddev = &rs->md; in rs_setup_reshape() local
2860 mddev->delta_disks = rs->delta_disks; in rs_setup_reshape()
2861 cur_raid_devs = mddev->raid_disks; in rs_setup_reshape()
2864 if (mddev->delta_disks && in rs_setup_reshape()
2865 mddev->layout != mddev->new_layout) { in rs_setup_reshape()
2867 mddev->new_layout = mddev->layout; in rs_setup_reshape()
2906 rdev->sectors = mddev->dev_sectors; in rs_setup_reshape()
2910 mddev->reshape_backwards = 0; /* adding disk(s) -> forward reshape */ in rs_setup_reshape()
2915 mddev->reshape_backwards = 1; /* removing disk(s) -> backward reshape */ in rs_setup_reshape()
2940 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1; in rs_setup_reshape()
2947 if (!mddev->reshape_backwards) in rs_setup_reshape()
3314 struct mddev *mddev = &rs->md; in raid_map() local
3324 if (unlikely(bio_end_sector(bio) > mddev->array_sectors)) in raid_map()
3327 md_handle_request(mddev, bio); in raid_map()
3351 static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery) in decipher_sync_action() argument
3359 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) { in decipher_sync_action()
3374 if (mddev->reshape_position != MaxSector) in decipher_sync_action()
3413 struct mddev *mddev = &rs->md; in rs_get_progress() local
3423 state = decipher_sync_action(mddev, recovery); in rs_get_progress()
3426 r = mddev->recovery_cp; in rs_get_progress()
3428 r = mddev->curr_resync_completed; in rs_get_progress()
3484 rdev_for_each(rdev, mddev) in rs_get_progress()
3506 struct mddev *mddev = &rs->md; in raid_status() local
3507 struct r5conf *conf = mddev->private; in raid_status()
3521 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout); in raid_status()
3525 DMEMIT("%s %d ", rt->name, mddev->raid_disks); in raid_status()
3532 mddev->resync_max_sectors : mddev->dev_sectors; in raid_status()
3534 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ? in raid_status()
3535 atomic64_read(&mddev->resync_mismatches) : 0; in raid_status()
3611 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors); in raid_status()
3623 mddev->bitmap_info.daemon_sleep); in raid_status()
3626 mddev->sync_speed_min); in raid_status()
3629 mddev->sync_speed_max); in raid_status()
3637 mddev->bitmap_info.max_write_behind); in raid_status()
3643 (unsigned long long) to_sector(mddev->bitmap_info.chunksize)); in raid_status()
3646 raid10_md_layout_to_copies(mddev->layout)); in raid_status()
3649 raid10_md_layout_to_format(mddev->layout)); in raid_status()
3652 max(rs->delta_disks, mddev->delta_disks)); in raid_status()
3673 struct mddev *mddev = &rs->md; in raid_message() local
3675 if (!mddev->pers || !mddev->pers->sync_request) in raid_message()
3679 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in raid_message()
3681 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in raid_message()
3684 if (mddev->sync_thread) { in raid_message()
3685 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in raid_message()
3686 md_reap_sync_thread(mddev); in raid_message()
3688 } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle) in raid_message()
3693 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in raid_message()
3696 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid_message()
3697 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in raid_message()
3698 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid_message()
3700 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in raid_message()
3701 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid_message()
3705 if (mddev->ro == 2) { in raid_message()
3709 mddev->ro = 0; in raid_message()
3710 if (!mddev->suspended && mddev->sync_thread) in raid_message()
3711 md_wakeup_thread(mddev->sync_thread); in raid_message()
3713 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid_message()
3714 if (!mddev->suspended && mddev->thread) in raid_message()
3715 md_wakeup_thread(mddev->thread); in raid_message()
3778 struct mddev *mddev = &rs->md; in attempt_restore_of_faulty_devices() local
3782 if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk) in attempt_restore_of_faulty_devices()
3787 for (i = 0; i < mddev->raid_disks; i++) { in attempt_restore_of_faulty_devices()
3811 if (mddev->pers->hot_remove_disk(mddev, r)) { in attempt_restore_of_faulty_devices()
3822 if (mddev->pers->hot_add_disk(mddev, r)) { in attempt_restore_of_faulty_devices()
3872 struct mddev *mddev = &rs->md; in rs_update_sbs() local
3873 int ro = mddev->ro; in rs_update_sbs()
3875 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in rs_update_sbs()
3876 mddev->ro = 0; in rs_update_sbs()
3877 md_update_sb(mddev, 1); in rs_update_sbs()
3878 mddev->ro = ro; in rs_update_sbs()
3891 struct mddev *mddev = &rs->md; in rs_start_reshape() local
3892 struct md_personality *pers = mddev->pers; in rs_start_reshape()
3895 set_bit(MD_RECOVERY_WAIT, &mddev->recovery); in rs_start_reshape()
3906 r = pers->check_reshape(mddev); in rs_start_reshape()
3917 r = pers->start_reshape(mddev); in rs_start_reshape()
3938 struct mddev *mddev = &rs->md; in raid_preresume() local
3959 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap && in raid_preresume()
3960 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) { in raid_preresume()
3961 r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors, in raid_preresume()
3969 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in raid_preresume()
3970 if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) { in raid_preresume()
3971 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid_preresume()
3972 mddev->resync_min = mddev->recovery_cp; in raid_preresume()
3979 mddev_lock_nointr(mddev); in raid_preresume()
3981 mddev_unlock(mddev); in raid_preresume()
3993 struct mddev *mddev = &rs->md; in raid_resume() local
4006 if (mddev->delta_disks < 0) in raid_resume()
4009 mddev_lock_nointr(mddev); in raid_resume()
4010 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in raid_resume()
4011 mddev->ro = 0; in raid_resume()
4012 mddev->in_sync = 0; in raid_resume()
4013 mddev_resume(mddev); in raid_resume()
4014 mddev_unlock(mddev); in raid_resume()