Home
last modified time | relevance | path

Searched refs:raid_disks (Results 1 – 18 of 18) sorted by relevance

/drivers/md/
Dmd-linear.c30 hi = mddev->raid_disks - 1; in which_dev()
49 static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) in linear_size() argument
55 WARN_ONCE(sectors || raid_disks, in linear_size()
62 static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) in linear_conf() argument
69 conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL); in linear_conf()
81 if (j < 0 || j >= raid_disks || disk->rdev) { in linear_conf()
103 if (cnt != raid_disks) { in linear_conf()
119 for (i = 1; i < raid_disks; i++) in linear_conf()
135 conf->raid_disks = raid_disks; in linear_conf()
151 conf = linear_conf(mddev, mddev->raid_disks); in linear_run()
[all …]
Dmd-multipath.c28 int i, disks = conf->raid_disks; in multipath_map()
143 seq_printf (seq, " [%d/%d] [", conf->raid_disks, in multipath_status()
144 conf->raid_disks - mddev->degraded); in multipath_status()
146 for (i = 0; i < conf->raid_disks; i++) { in multipath_status()
162 if (conf->raid_disks - mddev->degraded <= 1) { in multipath_error()
186 conf->raid_disks - mddev->degraded); in multipath_error()
199 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, in print_multipath_conf()
200 conf->raid_disks); in print_multipath_conf()
202 for (i = 0; i < conf->raid_disks; i++) { in print_multipath_conf()
219 int last = mddev->raid_disks - 1; in multipath_add_disk()
[all …]
Draid1.c133 int size = offsetof(struct r1bio, bios[pi->raid_disks]); in r1bio_pool_alloc()
159 rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages), in r1buf_pool_alloc()
167 for (j = pi->raid_disks ; j-- ; ) { in r1buf_pool_alloc()
180 need_pages = pi->raid_disks; in r1buf_pool_alloc()
183 for (j = 0; j < pi->raid_disks; j++) { in r1buf_pool_alloc()
209 while (++j < pi->raid_disks) in r1buf_pool_alloc()
225 for (i = pi->raid_disks; i--; ) { in r1buf_pool_free()
241 for (i = 0; i < conf->raid_disks * 2; i++) { in put_all_bios()
263 for (i = 0; i < conf->raid_disks * 2; i++) { in put_buf()
349 int raid_disks = conf->raid_disks; in find_bio_disk() local
[all …]
Ddm-raid.c237 int raid_disks; member
730 mddev->raid_disks = rs->raid_disks; in rs_set_new()
753 rs->raid_disks = raid_devs; in raid_set_alloc()
759 rs->md.raid_disks = raid_devs; in raid_set_alloc()
792 for (i = 0; i < rs->raid_disks; i++) { in raid_set_free()
832 for (i = 0; i < rs->raid_disks; i++) { in parse_dev_params()
1004 unsigned int rebuilds_per_group = 0, copies, raid_disks; in validate_raid_redundancy() local
1007 for (i = 0; i < rs->raid_disks; i++) in validate_raid_redundancy()
1017 if (rebuild_cnt >= rs->md.raid_disks) in validate_raid_redundancy()
1050 raid_disks = min(rs->raid_disks, rs->md.raid_disks); in validate_raid_redundancy()
[all …]
Draid0.c42 int raid_disks = conf->strip_zone[0].nb_dev; in dump_zones() local
52 bdevname(conf->devlist[j*raid_disks in dump_zones()
151 mddev->raid_disks), in create_strip_zones()
186 if (j >= mddev->raid_disks) { in create_strip_zones()
202 if (cnt != mddev->raid_disks) { in create_strip_zones()
204 mdname(mddev), cnt, mddev->raid_disks); in create_strip_zones()
218 dev = conf->devlist + i * mddev->raid_disks; in create_strip_zones()
330 int raid_disks = conf->strip_zone[0].nb_dev; in map_sector() local
353 return conf->devlist[(zone - conf->strip_zone)*raid_disks in map_sector()
357 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid0_size() argument
[all …]
Draid10.c94 int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]); in r10bio_pool_alloc()
241 for (i = 0; i < conf->geo.raid_disks; i++) { in put_all_bios()
332 for (slot = 0; slot < conf->geo.raid_disks; slot++) { in find_bio_disk()
572 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; in __raid10_find_phys()
576 last_far_set_size += (geo->raid_disks % geo->far_set_size); in __raid10_find_phys()
584 dev = sector_div(stripe, geo->raid_disks); in __raid10_find_phys()
603 if ((geo->raid_disks % geo->far_set_size) && in __raid10_find_phys()
618 if (dev >= geo->raid_disks) { in __raid10_find_phys()
651 if (geo->raid_disks % geo->far_set_size) { in raid10_find_virt()
652 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; in raid10_find_virt()
[all …]
Draid5.c124 static inline int raid6_next_disk(int disk, int raid_disks) in raid6_next_disk() argument
127 return (disk < raid_disks) ? disk : 0; in raid6_next_disk()
278 else if (injournal == conf->raid_disks - conf->max_degraded) { in do_release_stripe()
581 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
659 if (conf->raid_disks >= conf->previous_raid_disks) in raid5_calc_degraded()
663 if (conf->raid_disks == conf->previous_raid_disks) in raid5_calc_degraded()
667 for (i = 0; i < conf->raid_disks; i++) { in raid5_calc_degraded()
681 if (conf->raid_disks <= conf->previous_raid_disks) in raid5_calc_degraded()
2361 int devs = max(conf->raid_disks, conf->previous_raid_disks); in grow_stripes()
2607 for (i = conf->raid_disks; i < newsize; i++) { in resize_stripes()
[all …]
Draid1.h65 int raid_disks; member
73 int raid_disks; member
Draid5-ppl.c620 int raid_disks = conf->raid_disks; in ppl_do_flush() local
624 atomic_set(&io->pending_flushes, raid_disks); in ppl_do_flush()
626 for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) { in ppl_do_flush()
656 for (i = flushed_disks ; i < raid_disks; i++) { in ppl_do_flush()
829 data_disks = conf->raid_disks - conf->max_degraded; in ppl_recover_entry()
836 data_disks = conf->raid_disks - conf->max_degraded; in ppl_recover_entry()
1366 if (conf->raid_disks > max_disks) { in ppl_init_log()
1384 ret = mempool_init(&ppl_conf->io_pool, conf->raid_disks, ppl_io_pool_alloc, in ppl_init_log()
1389 ret = bioset_init(&ppl_conf->bs, conf->raid_disks, 0, BIOSET_NEED_BVECS); in ppl_init_log()
1393 ret = bioset_init(&ppl_conf->flush_bs, conf->raid_disks, 0, 0); in ppl_init_log()
[all …]
Dmd-autodetect.c192 if (!list_empty(&mddev->disks) || mddev->raid_disks) { in md_setup_drive()
205 while (devices[ainfo.raid_disks]) in md_setup_drive()
206 ainfo.raid_disks++; in md_setup_drive()
Dmd.c670 if (!mddev->raid_disks && list_empty(&mddev->disks) && in mddev_put()
1233 if (sb->raid_disks <= 0) in super_90_load()
1318 if (mddev->raid_disks == 0) { in super_90_validate()
1329 mddev->raid_disks = sb->raid_disks; in super_90_validate()
1439 int next_spare = mddev->raid_disks; in super_90_sync()
1472 sb->raid_disks = mddev->raid_disks; in super_90_sync()
1556 for (i=0 ; i < mddev->raid_disks ; i++) { in super_90_sync()
1832 if (mddev->raid_disks == 0) { in super_1_validate()
1842 mddev->raid_disks = le32_to_cpu(sb->raid_disks); in super_1_validate()
2051 sb->raid_disks = cpu_to_le32(mddev->raid_disks); in super_1_sync()
[all …]
Dmd-linear.h14 int raid_disks; /* a copy of mddev->raid_disks */ member
Dmd-multipath.h12 int raid_disks; member
Draid10.h36 int raid_disks; member
Dmd-faulty.c283 static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks) in faulty_size() argument
285 WARN_ONCE(raid_disks, in faulty_size()
Dmd.h326 int raid_disks; member
595 sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
Draid5.h570 int raid_disks; member
Draid5-cache.c406 (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1)); in r5c_log_required_to_flush_cache()
3088 conf->raid_disks) > PAGE_SIZE) { in r5l_init_log()
3090 mdname(conf->mddev), conf->raid_disks); in r5l_init_log()