Lines Matching refs:rs
230 static void rs_config_backup(struct raid_set *rs, struct rs_layout *l) in rs_config_backup() argument
232 struct mddev *mddev = &rs->md; in rs_config_backup()
239 static void rs_config_restore(struct raid_set *rs, struct rs_layout *l) in rs_config_restore() argument
241 struct mddev *mddev = &rs->md; in rs_config_restore()
335 static bool rs_is_raid0(struct raid_set *rs) in rs_is_raid0() argument
337 return !rs->md.level; in rs_is_raid0()
341 static bool rs_is_raid1(struct raid_set *rs) in rs_is_raid1() argument
343 return rs->md.level == 1; in rs_is_raid1()
347 static bool rs_is_raid10(struct raid_set *rs) in rs_is_raid10() argument
349 return rs->md.level == 10; in rs_is_raid10()
353 static bool rs_is_raid6(struct raid_set *rs) in rs_is_raid6() argument
355 return rs->md.level == 6; in rs_is_raid6()
359 static bool rs_is_raid456(struct raid_set *rs) in rs_is_raid456() argument
361 return __within_range(rs->md.level, 4, 6); in rs_is_raid456()
366 static bool rs_is_reshapable(struct raid_set *rs) in rs_is_reshapable() argument
368 return rs_is_raid456(rs) || in rs_is_reshapable()
369 (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout)); in rs_is_reshapable()
373 static bool rs_is_recovering(struct raid_set *rs) in rs_is_recovering() argument
375 return rs->md.recovery_cp < rs->dev[0].rdev.sectors; in rs_is_recovering()
379 static bool rs_is_reshaping(struct raid_set *rs) in rs_is_reshaping() argument
381 return rs->md.reshape_position != MaxSector; in rs_is_reshaping()
426 static unsigned long __valid_flags(struct raid_set *rs) in __valid_flags() argument
428 if (rt_is_raid0(rs->raid_type)) in __valid_flags()
430 else if (rt_is_raid1(rs->raid_type)) in __valid_flags()
432 else if (rt_is_raid10(rs->raid_type)) in __valid_flags()
434 else if (rt_is_raid45(rs->raid_type)) in __valid_flags()
436 else if (rt_is_raid6(rs->raid_type)) in __valid_flags()
447 static int rs_check_for_valid_flags(struct raid_set *rs) in rs_check_for_valid_flags() argument
449 if (rs->ctr_flags & ~__valid_flags(rs)) { in rs_check_for_valid_flags()
450 rs->ti->error = "Invalid flags combination"; in rs_check_for_valid_flags()
533 static int raid10_format_to_md_layout(struct raid_set *rs, in raid10_format_to_md_layout() argument
554 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) in raid10_format_to_md_layout()
560 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) in raid10_format_to_md_layout()
621 static void rs_set_capacity(struct raid_set *rs) in rs_set_capacity() argument
623 struct mddev *mddev = &rs->md; in rs_set_capacity()
625 struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table)); in rs_set_capacity()
642 static void rs_set_cur(struct raid_set *rs) in rs_set_cur() argument
644 struct mddev *mddev = &rs->md; in rs_set_cur()
655 static void rs_set_new(struct raid_set *rs) in rs_set_new() argument
657 struct mddev *mddev = &rs->md; in rs_set_new()
662 mddev->raid_disks = rs->raid_disks; in rs_set_new()
670 struct raid_set *rs; in raid_set_alloc() local
677 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); in raid_set_alloc()
678 if (!rs) { in raid_set_alloc()
683 mddev_init(&rs->md); in raid_set_alloc()
685 rs->raid_disks = raid_devs; in raid_set_alloc()
686 rs->delta_disks = 0; in raid_set_alloc()
688 rs->ti = ti; in raid_set_alloc()
689 rs->raid_type = raid_type; in raid_set_alloc()
690 rs->stripe_cache_entries = 256; in raid_set_alloc()
691 rs->md.raid_disks = raid_devs; in raid_set_alloc()
692 rs->md.level = raid_type->level; in raid_set_alloc()
693 rs->md.new_level = rs->md.level; in raid_set_alloc()
694 rs->md.layout = raid_type->algorithm; in raid_set_alloc()
695 rs->md.new_layout = rs->md.layout; in raid_set_alloc()
696 rs->md.delta_disks = 0; in raid_set_alloc()
697 rs->md.recovery_cp = MaxSector; in raid_set_alloc()
700 md_rdev_init(&rs->dev[i].rdev); in raid_set_alloc()
711 return rs; in raid_set_alloc()
714 static void raid_set_free(struct raid_set *rs) in raid_set_free() argument
718 for (i = 0; i < rs->raid_disks; i++) { in raid_set_free()
719 if (rs->dev[i].meta_dev) in raid_set_free()
720 dm_put_device(rs->ti, rs->dev[i].meta_dev); in raid_set_free()
721 md_rdev_clear(&rs->dev[i].rdev); in raid_set_free()
722 if (rs->dev[i].data_dev) in raid_set_free()
723 dm_put_device(rs->ti, rs->dev[i].data_dev); in raid_set_free()
726 kfree(rs); in raid_set_free()
745 static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as) in parse_dev_params() argument
758 for (i = 0; i < rs->raid_disks; i++) { in parse_dev_params()
759 rs->dev[i].rdev.raid_disk = i; in parse_dev_params()
761 rs->dev[i].meta_dev = NULL; in parse_dev_params()
762 rs->dev[i].data_dev = NULL; in parse_dev_params()
768 rs->dev[i].rdev.data_offset = 0; in parse_dev_params()
769 rs->dev[i].rdev.mddev = &rs->md; in parse_dev_params()
776 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), in parse_dev_params()
777 &rs->dev[i].meta_dev); in parse_dev_params()
779 rs->ti->error = "RAID metadata device lookup failure"; in parse_dev_params()
783 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); in parse_dev_params()
784 if (!rs->dev[i].rdev.sb_page) { in parse_dev_params()
785 rs->ti->error = "Failed to allocate superblock page"; in parse_dev_params()
795 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) && in parse_dev_params()
796 (!rs->dev[i].rdev.recovery_offset)) { in parse_dev_params()
797 rs->ti->error = "Drive designated for rebuild not specified"; in parse_dev_params()
801 if (rs->dev[i].meta_dev) { in parse_dev_params()
802 rs->ti->error = "No data device supplied with metadata device"; in parse_dev_params()
809 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), in parse_dev_params()
810 &rs->dev[i].data_dev); in parse_dev_params()
812 rs->ti->error = "RAID device lookup failure"; in parse_dev_params()
816 if (rs->dev[i].meta_dev) { in parse_dev_params()
818 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev; in parse_dev_params()
820 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; in parse_dev_params()
821 list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks); in parse_dev_params()
822 if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) in parse_dev_params()
827 rs->md.external = 0; in parse_dev_params()
828 rs->md.persistent = 1; in parse_dev_params()
829 rs->md.major_version = 2; in parse_dev_params()
830 } else if (rebuild && !rs->md.recovery_cp) { in parse_dev_params()
842 rs->ti->error = "Unable to rebuild drive while array is not in-sync"; in parse_dev_params()
859 static int validate_region_size(struct raid_set *rs, unsigned long region_size) in validate_region_size() argument
861 unsigned long min_region_size = rs->ti->len / (1 << 21); in validate_region_size()
863 if (rs_is_raid0(rs)) in validate_region_size()
883 if (region_size > rs->ti->len) { in validate_region_size()
884 rs->ti->error = "Supplied region size is too large"; in validate_region_size()
891 rs->ti->error = "Supplied region size is too small"; in validate_region_size()
896 rs->ti->error = "Region size is not a power of 2"; in validate_region_size()
900 if (region_size < rs->md.chunk_sectors) { in validate_region_size()
901 rs->ti->error = "Region size is smaller than the chunk size"; in validate_region_size()
909 rs->md.bitmap_info.chunksize = to_bytes(region_size); in validate_region_size()
923 static int validate_raid_redundancy(struct raid_set *rs) in validate_raid_redundancy() argument
929 for (i = 0; i < rs->md.raid_disks; i++) in validate_raid_redundancy()
930 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || in validate_raid_redundancy()
931 !rs->dev[i].rdev.sb_page) in validate_raid_redundancy()
934 switch (rs->raid_type->level) { in validate_raid_redundancy()
938 if (rebuild_cnt >= rs->md.raid_disks) in validate_raid_redundancy()
944 if (rebuild_cnt > rs->raid_type->parity_devs) in validate_raid_redundancy()
948 copies = raid10_md_layout_to_copies(rs->md.new_layout); in validate_raid_redundancy()
966 if (__is_raid10_near(rs->md.new_layout)) { in validate_raid_redundancy()
967 for (i = 0; i < rs->md.raid_disks; i++) { in validate_raid_redundancy()
970 if ((!rs->dev[i].rdev.sb_page || in validate_raid_redundancy()
971 !test_bit(In_sync, &rs->dev[i].rdev.flags)) && in validate_raid_redundancy()
990 group_size = (rs->md.raid_disks / copies); in validate_raid_redundancy()
991 last_group_start = (rs->md.raid_disks / group_size) - 1; in validate_raid_redundancy()
993 for (i = 0; i < rs->md.raid_disks; i++) { in validate_raid_redundancy()
996 if ((!rs->dev[i].rdev.sb_page || in validate_raid_redundancy()
997 !test_bit(In_sync, &rs->dev[i].rdev.flags)) && in validate_raid_redundancy()
1036 static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as, in parse_raid_params() argument
1046 struct raid_type *rt = rs->raid_type; in parse_raid_params()
1052 rs->ti->error = "Bad numerical argument given for chunk_size"; in parse_raid_params()
1065 rs->ti->error = "Chunk size must be a power of 2"; in parse_raid_params()
1068 rs->ti->error = "Chunk size value is too small"; in parse_raid_params()
1072 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; in parse_raid_params()
1091 for (i = 0; i < rs->raid_disks; i++) { in parse_raid_params()
1092 set_bit(In_sync, &rs->dev[i].rdev.flags); in parse_raid_params()
1093 rs->dev[i].rdev.recovery_offset = MaxSector; in parse_raid_params()
1102 rs->ti->error = "Not enough raid parameters given"; in parse_raid_params()
1107 if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) { in parse_raid_params()
1108 rs->ti->error = "Only one 'nosync' argument allowed"; in parse_raid_params()
1114 if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) { in parse_raid_params()
1115 rs->ti->error = "Only one 'sync' argument allowed"; in parse_raid_params()
1121 if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) { in parse_raid_params()
1122 rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed"; in parse_raid_params()
1131 rs->ti->error = "Wrong number of raid parameters given"; in parse_raid_params()
1140 if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) { in parse_raid_params()
1141 rs->ti->error = "Only one 'raid10_format' argument pair allowed"; in parse_raid_params()
1145 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; in parse_raid_params()
1150 rs->ti->error = "Invalid 'raid10_format' value given"; in parse_raid_params()
1157 rs->ti->error = "Bad numerical argument given in raid params"; in parse_raid_params()
1167 if (!__within_range(value, 0, rs->raid_disks - 1)) { in parse_raid_params()
1168 rs->ti->error = "Invalid rebuild index given"; in parse_raid_params()
1172 if (test_and_set_bit(value, (void *) rs->rebuild_disks)) { in parse_raid_params()
1173 rs->ti->error = "rebuild for this index already given"; in parse_raid_params()
1177 rd = rs->dev + value; in parse_raid_params()
1181 set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags); in parse_raid_params()
1184 rs->ti->error = "write_mostly option is only valid for RAID1"; in parse_raid_params()
1188 if (!__within_range(value, 0, rs->md.raid_disks - 1)) { in parse_raid_params()
1189 rs->ti->error = "Invalid write_mostly index given"; in parse_raid_params()
1194 set_bit(WriteMostly, &rs->dev[value].rdev.flags); in parse_raid_params()
1195 set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags); in parse_raid_params()
1198 rs->ti->error = "max_write_behind option is only valid for RAID1"; in parse_raid_params()
1202 if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) { in parse_raid_params()
1203 rs->ti->error = "Only one max_write_behind argument pair allowed"; in parse_raid_params()
1213 rs->ti->error = "Max write-behind limit out of range"; in parse_raid_params()
1217 rs->md.bitmap_info.max_write_behind = value; in parse_raid_params()
1219 if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) { in parse_raid_params()
1220 rs->ti->error = "Only one daemon_sleep argument pair allowed"; in parse_raid_params()
1224 rs->ti->error = "daemon sleep period out of range"; in parse_raid_params()
1227 rs->md.bitmap_info.daemon_sleep = value; in parse_raid_params()
1230 if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) { in parse_raid_params()
1231 rs->ti->error = "Only one data_offset argument pair allowed"; in parse_raid_params()
1237 rs->ti->error = "Bogus data_offset value"; in parse_raid_params()
1240 rs->data_offset = value; in parse_raid_params()
1243 if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) { in parse_raid_params()
1244 rs->ti->error = "Only one delta_disks argument pair allowed"; in parse_raid_params()
1249 rs->ti->error = "Too many delta_disk requested"; in parse_raid_params()
1253 rs->delta_disks = value; in parse_raid_params()
1255 if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) { in parse_raid_params()
1256 rs->ti->error = "Only one stripe_cache argument pair allowed"; in parse_raid_params()
1261 rs->ti->error = "Inappropriate argument: stripe_cache"; in parse_raid_params()
1265 rs->stripe_cache_entries = value; in parse_raid_params()
1267 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) { in parse_raid_params()
1268 rs->ti->error = "Only one min_recovery_rate argument pair allowed"; in parse_raid_params()
1272 rs->ti->error = "min_recovery_rate out of range"; in parse_raid_params()
1275 rs->md.sync_speed_min = (int)value; in parse_raid_params()
1277 if (test_and_set_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) { in parse_raid_params()
1278 rs->ti->error = "Only one max_recovery_rate argument pair allowed"; in parse_raid_params()
1282 rs->ti->error = "max_recovery_rate out of range"; in parse_raid_params()
1285 rs->md.sync_speed_max = (int)value; in parse_raid_params()
1287 if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) { in parse_raid_params()
1288 rs->ti->error = "Only one region_size argument pair allowed"; in parse_raid_params()
1293 rs->requested_bitmap_chunk_sectors = value; in parse_raid_params()
1295 if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) { in parse_raid_params()
1296 rs->ti->error = "Only one raid10_copies argument pair allowed"; in parse_raid_params()
1300 if (!__within_range(value, 2, rs->md.raid_disks)) { in parse_raid_params()
1301 rs->ti->error = "Bad value for 'raid10_copies'"; in parse_raid_params()
1308 rs->ti->error = "Unable to parse RAID parameter"; in parse_raid_params()
1313 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) && in parse_raid_params()
1314 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) { in parse_raid_params()
1315 rs->ti->error = "sync and nosync are mutually exclusive"; in parse_raid_params()
1319 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && in parse_raid_params()
1320 (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) || in parse_raid_params()
1321 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))) { in parse_raid_params()
1322 rs->ti->error = "sync/nosync and rebuild are mutually exclusive"; in parse_raid_params()
1326 if (write_mostly >= rs->md.raid_disks) { in parse_raid_params()
1327 rs->ti->error = "Can't set all raid1 devices to write_mostly"; in parse_raid_params()
1331 if (validate_region_size(rs, region_size)) in parse_raid_params()
1334 if (rs->md.chunk_sectors) in parse_raid_params()
1335 max_io_len = rs->md.chunk_sectors; in parse_raid_params()
1339 if (dm_set_target_max_io_len(rs->ti, max_io_len)) in parse_raid_params()
1343 if (raid10_copies > rs->md.raid_disks) { in parse_raid_params()
1344 rs->ti->error = "Not enough devices to satisfy specification"; in parse_raid_params()
1348 rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies); in parse_raid_params()
1349 if (rs->md.new_layout < 0) { in parse_raid_params()
1350 rs->ti->error = "Error getting raid10 format"; in parse_raid_params()
1351 return rs->md.new_layout; in parse_raid_params()
1354 rt = get_raid_type_by_ll(10, rs->md.new_layout); in parse_raid_params()
1356 rs->ti->error = "Failed to recognize new raid10 layout"; in parse_raid_params()
1362 test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) { in parse_raid_params()
1363 rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible"; in parse_raid_params()
1368 rs->raid10_copies = raid10_copies; in parse_raid_params()
1371 rs->md.persistent = 0; in parse_raid_params()
1372 rs->md.external = 1; in parse_raid_params()
1375 return rs_check_for_valid_flags(rs); in parse_raid_params()
1379 static int rs_set_raid456_stripe_cache(struct raid_set *rs) in rs_set_raid456_stripe_cache() argument
1383 struct mddev *mddev = &rs->md; in rs_set_raid456_stripe_cache()
1385 uint32_t nr_stripes = rs->stripe_cache_entries; in rs_set_raid456_stripe_cache()
1387 if (!rt_is_raid456(rs->raid_type)) { in rs_set_raid456_stripe_cache()
1388 rs->ti->error = "Inappropriate raid level; cannot change stripe_cache size"; in rs_set_raid456_stripe_cache()
1400 rs->ti->error = "Cannot change stripe_cache size on inactive RAID set"; in rs_set_raid456_stripe_cache()
1408 rs->ti->error = "Failed to set raid4/5/6 stripe cache size"; in rs_set_raid456_stripe_cache()
1419 static unsigned int mddev_data_stripes(struct raid_set *rs) in mddev_data_stripes() argument
1421 return rs->md.raid_disks - rs->raid_type->parity_devs; in mddev_data_stripes()
1425 static unsigned int rs_data_stripes(struct raid_set *rs) in rs_data_stripes() argument
1427 return rs->raid_disks - rs->raid_type->parity_devs; in rs_data_stripes()
1431 static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev) in rs_set_dev_and_array_sectors() argument
1435 struct mddev *mddev = &rs->md; in rs_set_dev_and_array_sectors()
1437 sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len; in rs_set_dev_and_array_sectors()
1441 data_stripes = mddev_data_stripes(rs); in rs_set_dev_and_array_sectors()
1443 delta_disks = rs->delta_disks; in rs_set_dev_and_array_sectors()
1444 data_stripes = rs_data_stripes(rs); in rs_set_dev_and_array_sectors()
1448 if (rt_is_raid1(rs->raid_type)) in rs_set_dev_and_array_sectors()
1450 else if (rt_is_raid10(rs->raid_type)) { in rs_set_dev_and_array_sectors()
1451 if (rs->raid10_copies < 2 || in rs_set_dev_and_array_sectors()
1453 rs->ti->error = "Bogus raid10 data copies or delta disks"; in rs_set_dev_and_array_sectors()
1457 dev_sectors *= rs->raid10_copies; in rs_set_dev_and_array_sectors()
1462 if (sector_div(array_sectors, rs->raid10_copies)) in rs_set_dev_and_array_sectors()
1480 rs->ti->error = "Target length not divisible by number of data devices"; in rs_set_dev_and_array_sectors()
1485 static void __rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors) in __rs_setup_recovery() argument
1488 if (rs_is_raid0(rs)) in __rs_setup_recovery()
1489 rs->md.recovery_cp = MaxSector; in __rs_setup_recovery()
1495 else if (rs_is_raid6(rs)) in __rs_setup_recovery()
1496 rs->md.recovery_cp = dev_sectors; in __rs_setup_recovery()
1502 rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) in __rs_setup_recovery()
1507 static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors) in rs_setup_recovery() argument
1511 __rs_setup_recovery(rs, 0); in rs_setup_recovery()
1514 __rs_setup_recovery(rs, MaxSector); in rs_setup_recovery()
1515 else if (rs->dev[0].rdev.sectors < dev_sectors) in rs_setup_recovery()
1517 __rs_setup_recovery(rs, rs->dev[0].rdev.sectors); in rs_setup_recovery()
1519 __rs_setup_recovery(rs, MaxSector); in rs_setup_recovery()
1524 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); in do_table_event() local
1527 if (!rs_is_reshaping(rs)) in do_table_event()
1528 rs_set_capacity(rs); in do_table_event()
1529 dm_table_event(rs->ti->table); in do_table_event()
1534 struct raid_set *rs = container_of(cb, struct raid_set, callbacks); in raid_is_congested() local
1536 return mddev_congested(&rs->md, bits); in raid_is_congested()
1545 static int rs_check_takeover(struct raid_set *rs) in rs_check_takeover() argument
1547 struct mddev *mddev = &rs->md; in rs_check_takeover()
1550 if (rs->md.degraded) { in rs_check_takeover()
1551 rs->ti->error = "Can't takeover degraded raid set"; in rs_check_takeover()
1555 if (rs_is_reshaping(rs)) { in rs_check_takeover()
1556 rs->ti->error = "Can't takeover reshaping raid set"; in rs_check_takeover()
1569 !(rs->raid_disks % mddev->raid_disks)) in rs_check_takeover()
1694 rs->ti->error = "takeover not possible"; in rs_check_takeover()
1699 static bool rs_takeover_requested(struct raid_set *rs) in rs_takeover_requested() argument
1701 return rs->md.new_level != rs->md.level; in rs_takeover_requested()
1705 static bool rs_reshape_requested(struct raid_set *rs) in rs_reshape_requested() argument
1708 struct mddev *mddev = &rs->md; in rs_reshape_requested()
1710 if (rs_takeover_requested(rs)) in rs_reshape_requested()
1718 rs->delta_disks; in rs_reshape_requested()
1722 if (rs->delta_disks) in rs_reshape_requested()
1723 return !!rs->delta_disks; in rs_reshape_requested()
1726 mddev->raid_disks != rs->raid_disks; in rs_reshape_requested()
1732 rs->delta_disks >= 0; in rs_reshape_requested()
1836 static int rs_check_reshape(struct raid_set *rs) in rs_check_reshape() argument
1838 struct mddev *mddev = &rs->md; in rs_check_reshape()
1841 rs->ti->error = "Reshape not supported"; in rs_check_reshape()
1843 rs->ti->error = "Can't reshape degraded raid set"; in rs_check_reshape()
1844 else if (rs_is_recovering(rs)) in rs_check_reshape()
1845 rs->ti->error = "Convert request on recovering raid set prohibited"; in rs_check_reshape()
1846 else if (rs_is_reshaping(rs)) in rs_check_reshape()
1847 rs->ti->error = "raid set already reshaping!"; in rs_check_reshape()
1848 else if (!(rs_is_raid1(rs) || rs_is_raid10(rs) || rs_is_raid456(rs))) in rs_check_reshape()
1849 rs->ti->error = "Reshaping only supported for raid1/4/5/6/10"; in rs_check_reshape()
1908 struct raid_set *rs = container_of(mddev, struct raid_set, md); in super_sync() local
1920 for (i = 0; i < rs->raid_disks; i++) in super_sync()
1921 if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) { in super_sync()
2031 static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) in super_init_validation() argument
2035 struct mddev *mddev = &rs->md; in super_init_validation()
2071 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) { in super_init_validation()
2083 rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout); in super_init_validation()
2111 if (!rt_is_raid1(rs->raid_type) && in super_init_validation()
2121 rs_set_new(rs); in super_init_validation()
2124 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) in super_init_validation()
2159 if (new_devs == rs->raid_disks || !rebuilds) { in super_init_validation()
2161 if (new_devs == 1 && !rs->delta_disks) in super_init_validation()
2163 if (new_devs == rs->raid_disks) { in super_init_validation()
2167 new_devs != rs->delta_disks) { in super_init_validation()
2182 } else if (rs_is_recovering(rs)) { in super_init_validation()
2186 } else if (rs_is_reshaping(rs)) { in super_init_validation()
2216 rs->raid_disks % rs->raid10_copies) { in super_init_validation()
2217 rs->ti->error = in super_init_validation()
2224 } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) && in super_init_validation()
2225 !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) && in super_init_validation()
2226 !rt_is_raid1(rs->raid_type)) { in super_init_validation()
2227 rs->ti->error = "Cannot change device positions in raid set"; in super_init_validation()
2246 static int super_validate(struct raid_set *rs, struct md_rdev *rdev) in super_validate() argument
2248 struct mddev *mddev = &rs->md; in super_validate()
2251 if (rs_is_raid0(rs) || !rdev->sb_page) in super_validate()
2260 if (!mddev->events && super_init_validation(rs, rdev)) in super_validate()
2265 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags"; in super_validate()
2270 rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet"; in super_validate()
2275 mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096); in super_validate()
2288 else if (!rs_is_reshaping(rs)) in super_validate()
2311 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) in analyse_superblocks() argument
2316 struct mddev *mddev = &rs->md; in analyse_superblocks()
2330 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) in analyse_superblocks()
2349 if (rs_is_raid0(rs)) in analyse_superblocks()
2385 if (validate_raid_redundancy(rs)) { in analyse_superblocks()
2386 rs->ti->error = "Insufficient redundancy to activate array"; in analyse_superblocks()
2394 rs->ti->error = "Unable to assemble array: Invalid superblocks"; in analyse_superblocks()
2395 if (super_validate(rs, freshest)) in analyse_superblocks()
2399 if ((rdev != freshest) && super_validate(rs, rdev)) in analyse_superblocks()
2412 static int rs_adjust_data_offsets(struct raid_set *rs) in rs_adjust_data_offsets() argument
2418 if (!test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) { in rs_adjust_data_offsets()
2419 if (!rs_is_reshapable(rs)) in rs_adjust_data_offsets()
2426 rdev = &rs->dev[0].rdev; in rs_adjust_data_offsets()
2428 if (rs->delta_disks < 0) { in rs_adjust_data_offsets()
2438 new_data_offset = rs->data_offset; in rs_adjust_data_offsets()
2440 } else if (rs->delta_disks > 0) { in rs_adjust_data_offsets()
2449 data_offset = rs->data_offset; in rs_adjust_data_offsets()
2471 data_offset = rs->data_offset ? rdev->data_offset : 0; in rs_adjust_data_offsets()
2472 new_data_offset = data_offset ? 0 : rs->data_offset; in rs_adjust_data_offsets()
2473 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in rs_adjust_data_offsets()
2479 if (rs->data_offset && in rs_adjust_data_offsets()
2481 rs->ti->error = data_offset ? "No space for forward reshape" : in rs_adjust_data_offsets()
2487 rdev_for_each(rdev, &rs->md) { in rs_adjust_data_offsets()
2496 static void __reorder_raid_disk_indexes(struct raid_set *rs) in __reorder_raid_disk_indexes() argument
2501 rdev_for_each(rdev, &rs->md) { in __reorder_raid_disk_indexes()
2510 static int rs_setup_takeover(struct raid_set *rs) in rs_setup_takeover() argument
2512 struct mddev *mddev = &rs->md; in rs_setup_takeover()
2514 unsigned int d = mddev->raid_disks = rs->raid_disks; in rs_setup_takeover()
2515 sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset; in rs_setup_takeover()
2517 if (rt_is_raid10(rs->raid_type)) { in rs_setup_takeover()
2520 __reorder_raid_disk_indexes(rs); in rs_setup_takeover()
2523 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR, in rs_setup_takeover()
2524 rs->raid10_copies); in rs_setup_takeover()
2527 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, in rs_setup_takeover()
2528 rs->raid_disks); in rs_setup_takeover()
2538 rdev = &rs->dev[d].rdev; in rs_setup_takeover()
2540 if (test_bit(d, (void *) rs->rebuild_disks)) { in rs_setup_takeover()
2555 static int rs_prepare_reshape(struct raid_set *rs) in rs_prepare_reshape() argument
2558 struct mddev *mddev = &rs->md; in rs_prepare_reshape()
2560 if (rs_is_raid10(rs)) { in rs_prepare_reshape()
2561 if (rs->raid_disks != mddev->raid_disks && in rs_prepare_reshape()
2563 rs->raid10_copies && in rs_prepare_reshape()
2564 rs->raid10_copies != __raid10_near_copies(mddev->layout)) { in rs_prepare_reshape()
2571 if (rs->raid_disks % rs->raid10_copies) { in rs_prepare_reshape()
2572 rs->ti->error = "Can't reshape raid10 mirror groups"; in rs_prepare_reshape()
2577 __reorder_raid_disk_indexes(rs); in rs_prepare_reshape()
2578 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, in rs_prepare_reshape()
2579 rs->raid10_copies); in rs_prepare_reshape()
2585 } else if (rs_is_raid456(rs)) in rs_prepare_reshape()
2588 else if (rs_is_raid1(rs)) { in rs_prepare_reshape()
2589 if (rs->delta_disks) { in rs_prepare_reshape()
2591 mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks; in rs_prepare_reshape()
2595 mddev->raid_disks = rs->raid_disks; in rs_prepare_reshape()
2599 rs->ti->error = "Called with bogus raid type"; in rs_prepare_reshape()
2604 set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags); in rs_prepare_reshape()
2605 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in rs_prepare_reshape()
2606 } else if (mddev->raid_disks < rs->raid_disks) in rs_prepare_reshape()
2608 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in rs_prepare_reshape()
2620 static int rs_setup_reshape(struct raid_set *rs) in rs_setup_reshape() argument
2624 struct mddev *mddev = &rs->md; in rs_setup_reshape()
2627 mddev->delta_disks = rs->delta_disks; in rs_setup_reshape()
2633 DMINFO("Ignoring invalid layout change with delta_disks=%d", rs->delta_disks); in rs_setup_reshape()
2660 if (rs->delta_disks > 0) { in rs_setup_reshape()
2662 for (d = cur_raid_devs; d < rs->raid_disks; d++) { in rs_setup_reshape()
2663 rdev = &rs->dev[d].rdev; in rs_setup_reshape()
2674 rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector; in rs_setup_reshape()
2680 } else if (rs->delta_disks < 0) { in rs_setup_reshape()
2681 r = rs_set_dev_and_array_sectors(rs, true); in rs_setup_reshape()
2707 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1; in rs_setup_reshape()
2717 static void configure_discard_support(struct raid_set *rs) in configure_discard_support() argument
2721 struct dm_target *ti = rs->ti; in configure_discard_support()
2727 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); in configure_discard_support()
2729 for (i = 0; i < rs->raid_disks; i++) { in configure_discard_support()
2732 if (!rs->dev[i].rdev.bdev) in configure_discard_support()
2735 q = bdev_get_queue(rs->dev[i].rdev.bdev); in configure_discard_support()
2757 ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10); in configure_discard_support()
2781 struct raid_set *rs = NULL; in raid_ctr() local
2819 rs = raid_set_alloc(ti, rt, num_raid_devs); in raid_ctr()
2820 if (IS_ERR(rs)) in raid_ctr()
2821 return PTR_ERR(rs); in raid_ctr()
2823 r = parse_raid_params(rs, &as, num_raid_params); in raid_ctr()
2827 r = parse_dev_params(rs, &as); in raid_ctr()
2831 rs->md.sync_super = super_sync; in raid_ctr()
2839 r = rs_set_dev_and_array_sectors(rs, false); in raid_ctr()
2843 calculated_dev_sectors = rs->dev[0].rdev.sectors; in raid_ctr()
2850 rs_config_backup(rs, &rs_layout); in raid_ctr()
2852 r = analyse_superblocks(ti, rs); in raid_ctr()
2856 resize = calculated_dev_sectors != rs->dev[0].rdev.sectors; in raid_ctr()
2858 INIT_WORK(&rs->md.event_work, do_table_event); in raid_ctr()
2859 ti->private = rs; in raid_ctr()
2863 rs_config_restore(rs, &rs_layout); in raid_ctr()
2871 if (test_bit(MD_ARRAY_FIRST_USE, &rs->md.flags)) { in raid_ctr()
2873 if (rs_is_raid6(rs) && in raid_ctr()
2874 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) { in raid_ctr()
2879 rs_setup_recovery(rs, 0); in raid_ctr()
2880 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in raid_ctr()
2881 rs_set_new(rs); in raid_ctr()
2882 } else if (rs_is_recovering(rs)) { in raid_ctr()
2885 } else if (rs_is_reshaping(rs)) { in raid_ctr()
2893 } else if (rs_takeover_requested(rs)) { in raid_ctr()
2894 if (rs_is_reshaping(rs)) { in raid_ctr()
2908 r = rs_check_takeover(rs); in raid_ctr()
2912 r = rs_setup_takeover(rs); in raid_ctr()
2916 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in raid_ctr()
2918 rs_setup_recovery(rs, MaxSector); in raid_ctr()
2919 rs_set_new(rs); in raid_ctr()
2920 } else if (rs_reshape_requested(rs)) { in raid_ctr()
2928 r = rs_prepare_reshape(rs); in raid_ctr()
2933 rs_setup_recovery(rs, MaxSector); in raid_ctr()
2934 rs_set_cur(rs); in raid_ctr()
2937 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) { in raid_ctr()
2938 rs_setup_recovery(rs, MaxSector); in raid_ctr()
2939 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in raid_ctr()
2941 rs_setup_recovery(rs, test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ? in raid_ctr()
2943 rs_set_cur(rs); in raid_ctr()
2947 r = rs_adjust_data_offsets(rs); in raid_ctr()
2952 rs->md.ro = 1; in raid_ctr()
2953 rs->md.in_sync = 1; in raid_ctr()
2954 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); in raid_ctr()
2957 mddev_lock_nointr(&rs->md); in raid_ctr()
2958 r = md_run(&rs->md); in raid_ctr()
2959 rs->md.in_sync = 0; /* Assume already marked dirty */ in raid_ctr()
2963 mddev_unlock(&rs->md); in raid_ctr()
2967 rs->callbacks.congested_fn = raid_is_congested; in raid_ctr()
2968 dm_table_add_target_callbacks(ti->table, &rs->callbacks); in raid_ctr()
2970 mddev_suspend(&rs->md); in raid_ctr()
2973 if (rs_is_raid456(rs)) { in raid_ctr()
2974 r = rs_set_raid456_stripe_cache(rs); in raid_ctr()
2980 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { in raid_ctr()
2981 r = rs_check_reshape(rs); in raid_ctr()
2986 rs_config_restore(rs, &rs_layout); in raid_ctr()
2988 if (rs->md.pers->start_reshape) { in raid_ctr()
2989 r = rs->md.pers->check_reshape(&rs->md); in raid_ctr()
2998 configure_discard_support(rs); in raid_ctr()
3000 mddev_unlock(&rs->md); in raid_ctr()
3005 md_stop(&rs->md); in raid_ctr()
3007 raid_set_free(rs); in raid_ctr()
3014 struct raid_set *rs = ti->private; in raid_dtr() local
3016 list_del_init(&rs->callbacks.list); in raid_dtr()
3017 md_stop(&rs->md); in raid_dtr()
3018 raid_set_free(rs); in raid_dtr()
3023 struct raid_set *rs = ti->private; in raid_map() local
3024 struct mddev *mddev = &rs->md; in raid_map()
3088 static sector_t rs_get_progress(struct raid_set *rs, in rs_get_progress() argument
3092 struct mddev *mddev = &rs->md; in rs_get_progress()
3098 if (rs_is_raid0(rs)) { in rs_get_progress()
3117 sector_div(r, mddev_data_stripes(rs)); in rs_get_progress()
3169 struct raid_set *rs = ti->private; in raid_status() local
3170 struct mddev *mddev = &rs->md; in raid_status()
3195 resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ? in raid_status()
3197 progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync); in raid_status()
3200 sync_action = decipher_sync_action(&rs->md); in raid_status()
3249 DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset); in raid_status()
3256 for (i = 0; i < rs->raid_disks; i++) in raid_status()
3257 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags)) in raid_status()
3259 rebuild_disks = memweight(rs->rebuild_disks, DISKS_ARRAY_ELEMS * sizeof(*rs->rebuild_disks)); in raid_status()
3262 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) + in raid_status()
3263 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2; in raid_status()
3265 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors); in raid_status()
3266 if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) in raid_status()
3269 if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) in raid_status()
3272 if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) in raid_status()
3274 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) in raid_status()
3276 if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) in raid_status()
3279 if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) in raid_status()
3281 (unsigned long long) rs->data_offset); in raid_status()
3282 if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) in raid_status()
3285 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) in raid_status()
3287 max(rs->delta_disks, mddev->delta_disks)); in raid_status()
3288 if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) in raid_status()
3292 for (i = 0; i < rs->raid_disks; i++) in raid_status()
3293 if (test_bit(rs->dev[i].rdev.raid_disk, (void *) rs->rebuild_disks)) in raid_status()
3295 rs->dev[i].rdev.raid_disk); in raid_status()
3297 for (i = 0; i < rs->raid_disks; i++) in raid_status()
3298 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags)) in raid_status()
3300 rs->dev[i].rdev.raid_disk); in raid_status()
3301 if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) in raid_status()
3304 if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) in raid_status()
3307 if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) in raid_status()
3310 DMEMIT(" %d", rs->raid_disks); in raid_status()
3311 for (i = 0; i < rs->raid_disks; i++) in raid_status()
3312 DMEMIT(" %s %s", __get_dev_name(rs->dev[i].meta_dev), in raid_status()
3313 __get_dev_name(rs->dev[i].data_dev)); in raid_status()
3319 struct raid_set *rs = ti->private; in raid_message() local
3320 struct mddev *mddev = &rs->md; in raid_message()
3368 struct raid_set *rs = ti->private; in raid_iterate_devices() local
3372 for (i = 0; !r && i < rs->md.raid_disks; i++) in raid_iterate_devices()
3373 if (rs->dev[i].data_dev) in raid_iterate_devices()
3375 rs->dev[i].data_dev, in raid_iterate_devices()
3377 rs->md.dev_sectors, in raid_iterate_devices()
3385 struct raid_set *rs = ti->private; in raid_io_hints() local
3386 unsigned int chunk_size = to_bytes(rs->md.chunk_sectors); in raid_io_hints()
3389 blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs)); in raid_io_hints()
3394 struct raid_set *rs = ti->private; in raid_presuspend() local
3396 md_stop_writes(&rs->md); in raid_presuspend()
3401 struct raid_set *rs = ti->private; in raid_postsuspend() local
3403 if (!rs->md.suspended) in raid_postsuspend()
3404 mddev_suspend(&rs->md); in raid_postsuspend()
3406 rs->md.ro = 1; in raid_postsuspend()
3409 static void attempt_restore_of_faulty_devices(struct raid_set *rs) in attempt_restore_of_faulty_devices() argument
3416 struct mddev *mddev = &rs->md; in attempt_restore_of_faulty_devices()
3425 for (i = 0; i < rs->md.raid_disks; i++) { in attempt_restore_of_faulty_devices()
3426 r = &rs->dev[i].rdev; in attempt_restore_of_faulty_devices()
3432 rs->raid_type->name, i); in attempt_restore_of_faulty_devices()
3470 rdev_for_each(r, &rs->md) { in attempt_restore_of_faulty_devices()
3482 static int __load_dirty_region_bitmap(struct raid_set *rs) in __load_dirty_region_bitmap() argument
3487 if (!rs_is_raid0(rs) && in __load_dirty_region_bitmap()
3488 !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) { in __load_dirty_region_bitmap()
3489 r = bitmap_load(&rs->md); in __load_dirty_region_bitmap()
3498 static void rs_update_sbs(struct raid_set *rs) in rs_update_sbs() argument
3500 struct mddev *mddev = &rs->md; in rs_update_sbs()
3516 static int rs_start_reshape(struct raid_set *rs) in rs_start_reshape() argument
3519 struct mddev *mddev = &rs->md; in rs_start_reshape()
3522 r = rs_setup_reshape(rs); in rs_start_reshape()
3537 rs->ti->error = "pers->check_reshape() failed"; in rs_start_reshape()
3548 rs->ti->error = "pers->start_reshape() failed"; in rs_start_reshape()
3562 rs_update_sbs(rs); in rs_start_reshape()
3570 struct raid_set *rs = ti->private; in raid_preresume() local
3571 struct mddev *mddev = &rs->md; in raid_preresume()
3574 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags)) in raid_preresume()
3583 if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags)) in raid_preresume()
3584 rs_update_sbs(rs); in raid_preresume()
3587 r = __load_dirty_region_bitmap(rs); in raid_preresume()
3592 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap && in raid_preresume()
3593 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) { in raid_preresume()
3595 to_bytes(rs->requested_bitmap_chunk_sectors), 0); in raid_preresume()
3608 rs_set_capacity(rs); in raid_preresume()
3611 if (test_and_clear_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { in raid_preresume()
3614 r = rs_start_reshape(rs); in raid_preresume()
3628 struct raid_set *rs = ti->private; in raid_resume() local
3629 struct mddev *mddev = &rs->md; in raid_resume()
3631 if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) { in raid_resume()
3637 attempt_restore_of_faulty_devices(rs); in raid_resume()
3650 if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS)) in raid_resume()