Home
last modified time | relevance | path

Searched refs:raid_disks (Results 1 – 18 of 18) sorted by relevance

/drivers/md/
Dmd-linear.c30 hi = mddev->raid_disks - 1; in which_dev()
49 static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) in linear_size() argument
55 WARN_ONCE(sectors || raid_disks, in linear_size()
62 static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) in linear_conf() argument
68 conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL); in linear_conf()
80 if (j < 0 || j >= raid_disks || disk->rdev) { in linear_conf()
99 if (cnt != raid_disks) { in linear_conf()
110 for (i = 1; i < raid_disks; i++) in linear_conf()
126 conf->raid_disks = raid_disks; in linear_conf()
142 conf = linear_conf(mddev, mddev->raid_disks); in linear_run()
[all …]
Dmd-multipath.c28 int i, disks = conf->raid_disks; in multipath_map()
140 seq_printf (seq, " [%d/%d] [", conf->raid_disks, in multipath_status()
141 conf->raid_disks - mddev->degraded); in multipath_status()
143 for (i = 0; i < conf->raid_disks; i++) { in multipath_status()
158 if (conf->raid_disks - mddev->degraded <= 1) { in multipath_error()
182 conf->raid_disks - mddev->degraded); in multipath_error()
195 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, in print_multipath_conf()
196 conf->raid_disks); in print_multipath_conf()
198 for (i = 0; i < conf->raid_disks; i++) { in print_multipath_conf()
214 int last = mddev->raid_disks - 1; in multipath_add_disk()
[all …]
Draid1.c133 int size = offsetof(struct r1bio, bios[pi->raid_disks]); in r1bio_pool_alloc()
159 rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages), in r1buf_pool_alloc()
167 for (j = pi->raid_disks ; j-- ; ) { in r1buf_pool_alloc()
181 need_pages = pi->raid_disks; in r1buf_pool_alloc()
184 for (j = 0; j < pi->raid_disks; j++) { in r1buf_pool_alloc()
210 while (++j < pi->raid_disks) { in r1buf_pool_alloc()
228 for (i = pi->raid_disks; i--; ) { in r1buf_pool_free()
245 for (i = 0; i < conf->raid_disks * 2; i++) { in put_all_bios()
267 for (i = 0; i < conf->raid_disks * 2; i++) { in put_buf()
352 int raid_disks = conf->raid_disks; in find_bio_disk() local
[all …]
Ddm-raid.c238 int raid_disks; member
731 mddev->raid_disks = rs->raid_disks; in rs_set_new()
754 rs->raid_disks = raid_devs; in raid_set_alloc()
760 rs->md.raid_disks = raid_devs; in raid_set_alloc()
793 for (i = 0; i < rs->raid_disks; i++) { in raid_set_free()
833 for (i = 0; i < rs->raid_disks; i++) { in parse_dev_params()
1005 unsigned int rebuilds_per_group = 0, copies, raid_disks; in validate_raid_redundancy() local
1008 for (i = 0; i < rs->raid_disks; i++) in validate_raid_redundancy()
1018 if (rebuild_cnt >= rs->md.raid_disks) in validate_raid_redundancy()
1051 raid_disks = min(rs->raid_disks, rs->md.raid_disks); in validate_raid_redundancy()
[all …]
Draid0.c41 int raid_disks = conf->strip_zone[0].nb_dev; in dump_zones() local
51 conf->devlist[j * raid_disks + k]->bdev); in dump_zones()
147 mddev->raid_disks), in create_strip_zones()
182 if (j >= mddev->raid_disks) { in create_strip_zones()
198 if (cnt != mddev->raid_disks) { in create_strip_zones()
200 mdname(mddev), cnt, mddev->raid_disks); in create_strip_zones()
214 dev = conf->devlist + i * mddev->raid_disks; in create_strip_zones()
326 int raid_disks = conf->strip_zone[0].nb_dev; in map_sector() local
349 return conf->devlist[(zone - conf->strip_zone)*raid_disks in map_sector()
353 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid0_size() argument
[all …]
Draid10.c109 int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]); in r10bio_pool_alloc()
263 for (i = 0; i < conf->geo.raid_disks; i++) { in put_all_bios()
358 for (slot = 0; slot < conf->geo.raid_disks; slot++) { in find_bio_disk()
597 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; in __raid10_find_phys()
601 last_far_set_size += (geo->raid_disks % geo->far_set_size); in __raid10_find_phys()
609 dev = sector_div(stripe, geo->raid_disks); in __raid10_find_phys()
628 if ((geo->raid_disks % geo->far_set_size) && in __raid10_find_phys()
643 if (dev >= geo->raid_disks) { in __raid10_find_phys()
676 if (geo->raid_disks % geo->far_set_size) { in raid10_find_virt()
677 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; in raid10_find_virt()
[all …]
Draid5.c129 static inline int raid6_next_disk(int disk, int raid_disks) in raid6_next_disk() argument
132 return (disk < raid_disks) ? disk : 0; in raid6_next_disk()
285 else if (injournal == conf->raid_disks - conf->max_degraded) { in do_release_stripe()
589 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
714 if (conf->raid_disks >= conf->previous_raid_disks) in raid5_calc_degraded()
718 if (conf->raid_disks == conf->previous_raid_disks) in raid5_calc_degraded()
722 for (i = 0; i < conf->raid_disks; i++) { in raid5_calc_degraded()
736 if (conf->raid_disks <= conf->previous_raid_disks) in raid5_calc_degraded()
2430 int devs = max(conf->raid_disks, conf->previous_raid_disks); in grow_stripes()
2676 for (i = conf->raid_disks; i < newsize; i++) { in resize_stripes()
[all …]
Draid1.h65 int raid_disks; member
73 int raid_disks; member
Dmd-autodetect.c184 if (!list_empty(&mddev->disks) || mddev->raid_disks) { in md_setup_drive()
197 while (devices[ainfo.raid_disks]) in md_setup_drive()
198 ainfo.raid_disks++; in md_setup_drive()
Draid5-ppl.c613 int raid_disks = conf->raid_disks; in ppl_do_flush() local
617 atomic_set(&io->pending_flushes, raid_disks); in ppl_do_flush()
619 for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) { in ppl_do_flush()
647 for (i = flushed_disks ; i < raid_disks; i++) { in ppl_do_flush()
819 data_disks = conf->raid_disks - conf->max_degraded; in ppl_recover_entry()
826 data_disks = conf->raid_disks - conf->max_degraded; in ppl_recover_entry()
1355 if (conf->raid_disks > max_disks) { in ppl_init_log()
1373 ret = mempool_init(&ppl_conf->io_pool, conf->raid_disks, ppl_io_pool_alloc, in ppl_init_log()
1378 ret = bioset_init(&ppl_conf->bs, conf->raid_disks, 0, BIOSET_NEED_BVECS); in ppl_init_log()
1382 ret = bioset_init(&ppl_conf->flush_bs, conf->raid_disks, 0, 0); in ppl_init_log()
[all …]
Dmd.c640 if (!mddev->raid_disks && list_empty(&mddev->disks) && in mddev_put()
1200 if (sb->raid_disks <= 0) in super_90_load()
1285 if (mddev->raid_disks == 0) { in super_90_validate()
1296 mddev->raid_disks = sb->raid_disks; in super_90_validate()
1406 int next_spare = mddev->raid_disks; in super_90_sync()
1439 sb->raid_disks = mddev->raid_disks; in super_90_sync()
1523 for (i=0 ; i < mddev->raid_disks ; i++) { in super_90_sync()
1796 if (mddev->raid_disks == 0) { in super_1_validate()
1806 mddev->raid_disks = le32_to_cpu(sb->raid_disks); in super_1_validate()
2015 sb->raid_disks = cpu_to_le32(mddev->raid_disks); in super_1_sync()
[all …]
Dmd-linear.h14 int raid_disks; /* a copy of mddev->raid_disks */ member
Dmd-multipath.h12 int raid_disks; member
Draid10.h36 int raid_disks; member
Dmd-faulty.c285 static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks) in faulty_size() argument
287 WARN_ONCE(raid_disks, in faulty_size()
Dmd.h344 int raid_disks; member
641 sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
Draid5.h582 int raid_disks; member
Draid5-cache.c409 (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1)); in r5c_log_required_to_flush_cache()
3087 conf->raid_disks) > PAGE_SIZE) { in r5l_init_log()
3089 mdname(conf->mddev), conf->raid_disks); in r5l_init_log()