/drivers/md/ |
D | md.c | 98 static inline int speed_min(struct mddev *mddev) in speed_min() argument 100 return mddev->sync_speed_min ? in speed_min() 101 mddev->sync_speed_min : sysctl_speed_limit_min; in speed_min() 104 static inline int speed_max(struct mddev *mddev) in speed_max() argument 106 return mddev->sync_speed_max ? in speed_max() 107 mddev->sync_speed_max : sysctl_speed_limit_max; in speed_max() 160 struct mddev *mddev, **mddevp; in mddev_bio_destructor() local 163 mddev = mddevp[-1]; in mddev_bio_destructor() 165 bio_free(bio, mddev->bio_set); in mddev_bio_destructor() 169 struct mddev *mddev) in bio_alloc_mddev() argument [all …]
|
D | raid0.c | 31 struct mddev *mddev = data; in raid0_congested() local 32 struct r0conf *conf = mddev->private; in raid0_congested() 37 if (mddev_congested(mddev, bits)) in raid0_congested() 51 static void dump_zones(struct mddev *mddev) in dump_zones() argument 57 struct r0conf *conf = mddev->private; in dump_zones() 60 mdname(mddev), in dump_zones() 81 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) in create_strip_zones() argument 94 rdev_for_each(rdev1, mddev) { in create_strip_zones() 96 mdname(mddev), in create_strip_zones() 102 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones() [all …]
|
D | md.h | 43 struct mddev *mddev; /* RAID array if running */ member 200 struct mddev { struct 419 void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); argument 423 static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) in rdev_dec_pending() argument 427 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in rdev_dec_pending() 441 void (*make_request)(struct mddev *mddev, struct bio *bio); 442 int (*run)(struct mddev *mddev); 443 int (*stop)(struct mddev *mddev); 444 void (*status)(struct seq_file *seq, struct mddev *mddev); 448 void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev); [all …]
|
D | multipath.c | 62 struct mddev *mddev = mp_bh->mddev; in multipath_reschedule_retry() local 63 struct mpconf *conf = mddev->private; in multipath_reschedule_retry() 68 md_wakeup_thread(mddev->thread); in multipath_reschedule_retry() 80 struct mpconf *conf = mp_bh->mddev->private; in multipath_end_bh_io() 90 struct mpconf *conf = mp_bh->mddev->private; in multipath_end_request() 100 md_error (mp_bh->mddev, rdev); in multipath_end_request() 107 rdev_dec_pending(rdev, conf->mddev); in multipath_end_request() 110 static void multipath_make_request(struct mddev *mddev, struct bio * bio) in multipath_make_request() argument 112 struct mpconf *conf = mddev->private; in multipath_make_request() 117 md_flush_request(mddev, bio); in multipath_make_request() [all …]
|
D | linear.c | 30 static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) in which_dev() argument 36 hi = mddev->raid_disks - 1; in which_dev() 37 conf = rcu_dereference(mddev->private); in which_dev() 67 struct mddev *mddev = q->queuedata; in linear_mergeable_bvec() local 75 dev0 = which_dev(mddev, sector); in linear_mergeable_bvec() 102 struct mddev *mddev = data; in linear_congested() local 106 if (mddev_congested(mddev, bits)) in linear_congested() 110 conf = rcu_dereference(mddev->private); in linear_congested() 112 for (i = 0; i < mddev->raid_disks && !ret ; i++) { in linear_congested() 121 static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) in linear_size() argument [all …]
|
D | raid1.c | 105 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) in r1buf_pool_alloc() 121 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { in r1buf_pool_alloc() 177 struct r1conf *conf = r1_bio->mddev->private; in free_r1bio() 185 struct r1conf *conf = r1_bio->mddev->private; in put_buf() 191 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); in put_buf() 202 struct mddev *mddev = r1_bio->mddev; in reschedule_retry() local 203 struct r1conf *conf = mddev->private; in reschedule_retry() 211 md_wakeup_thread(mddev->thread); in reschedule_retry() 223 struct r1conf *conf = r1_bio->mddev->private; in call_bio_endio() 268 struct r1conf *conf = r1_bio->mddev->private; in update_head_pos() [all …]
|
D | bitmap.c | 35 return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; in bmname() 133 static struct page *read_sb_page(struct mddev *mddev, loff_t offset, in read_sb_page() argument 150 rdev_for_each(rdev, mddev) { in read_sb_page() 172 static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev) in next_active_rdev() argument 186 pos = &mddev->disks; in next_active_rdev() 189 rdev_dec_pending(rdev, mddev); in next_active_rdev() 192 list_for_each_continue_rcu(pos, &mddev->disks) { in next_active_rdev() 210 struct mddev *mddev = bitmap->mddev; in write_sb_page() local 212 while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { in write_sb_page() 214 loff_t offset = mddev->bitmap_info.offset; in write_sb_page() [all …]
|
D | raid5.c | 210 md_wakeup_thread(conf->mddev->thread); in __release_stripe() 216 md_wakeup_thread(conf->mddev->thread); in __release_stripe() 222 md_wakeup_thread(conf->mddev->thread); in __release_stripe() 434 if (conf->mddev->reshape_position == MaxSector) in has_failed() 435 return conf->mddev->degraded > conf->max_degraded; in has_failed() 584 if (!conf->mddev->external && in ops_run_io() 585 conf->mddev->flags) { in ops_run_io() 590 md_check_recovery(conf->mddev); in ops_run_io() 598 md_wait_for_blocked_rdev(rdev, conf->mddev); in ops_run_io() 601 rdev_dec_pending(rdev, conf->mddev); in ops_run_io() [all …]
|
D | raid10.c | 115 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) in r10buf_pool_alloc() 144 &conf->mddev->recovery)) { in r10buf_pool_alloc() 220 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio() 228 struct r10conf *conf = r10_bio->mddev->private; in put_buf() 238 struct mddev *mddev = r10_bio->mddev; in reschedule_retry() local 239 struct r10conf *conf = mddev->private; in reschedule_retry() 249 md_wakeup_thread(mddev->thread); in reschedule_retry() 261 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io() 289 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos() 329 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request() [all …]
|
D | faulty.c | 173 static void make_request(struct mddev *mddev, struct bio *bio) in make_request() argument 175 struct faulty_conf *conf = mddev->private; in make_request() 214 struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev); in make_request() 226 static void status(struct seq_file *seq, struct mddev *mddev) in status() argument 228 struct faulty_conf *conf = mddev->private; in status() 259 static int reshape(struct mddev *mddev) in reshape() argument 261 int mode = mddev->new_layout & ModeMask; in reshape() 262 int count = mddev->new_layout >> ModeShift; in reshape() 263 struct faulty_conf *conf = mddev->private; in reshape() 265 if (mddev->new_layout < 0) in reshape() [all …]
|
D | bitmap.h | 174 struct mddev *mddev; /* the md device that the bitmap is for */ member 219 int bitmap_create(struct mddev *mddev); 220 int bitmap_load(struct mddev *mddev); 221 void bitmap_flush(struct mddev *mddev); 222 void bitmap_destroy(struct mddev *mddev); 244 void bitmap_daemon_work(struct mddev *mddev);
|
D | raid1.h | 21 struct mddev *mddev; member 26 struct mddev *mddev; member 116 struct mddev *mddev; member 175 extern int md_raid1_congested(struct mddev *mddev, int bits);
|
D | dm-raid.c | 62 struct mddev md; 203 rs->dev[i].rdev.mddev = &rs->md; in dev_parms() 618 static void super_sync(struct mddev *mddev, struct md_rdev *rdev) in super_sync() argument 627 rdev_for_each(r, mddev) in super_sync() 636 sb->num_devices = cpu_to_le32(mddev->raid_disks); in super_sync() 639 sb->events = cpu_to_le64(mddev->events); in super_sync() 643 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp); in super_sync() 645 sb->level = cpu_to_le32(mddev->level); in super_sync() 646 sb->layout = cpu_to_le32(mddev->layout); in super_sync() 647 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); in super_sync() [all …]
|
D | multipath.h | 9 struct mddev *mddev; member 25 struct mddev *mddev; member
|
D | raid5.h | 366 struct mddev *mddev; member 517 extern int md_raid5_congested(struct mddev *mddev, int bits); 519 extern int raid5_set_cache_size(struct mddev *mddev, int size);
|
D | raid10.h | 15 struct mddev *mddev; member 88 struct mddev *mddev; member
|