• Home
  • Raw
  • Download

Lines Matching refs:mddev

169 				blk_plug_device(conf->mddev->queue);  in __release_stripe()
173 blk_plug_device(conf->mddev->queue); in __release_stripe()
178 md_wakeup_thread(conf->mddev->thread); in __release_stripe()
184 md_wakeup_thread(conf->mddev->thread); in __release_stripe()
191 md_wakeup_thread(conf->mddev->thread); in __release_stripe()
327 static void unplug_slaves(mddev_t *mddev);
357 raid5_unplug_device(conf->mddev->queue) in get_active_stripe()
898 sprintf(conf->cache_name[0], "raid5-%s", mdname(conf->mddev)); in grow_stripes()
899 sprintf(conf->cache_name[1], "raid5-%s-alt", mdname(conf->mddev)); in grow_stripes()
950 err = md_allow_write(conf->mddev); in resize_stripes()
992 unplug_slaves(conf->mddev) in resize_stripes()
1095 mdname(conf->mddev), STRIPE_SECTORS, in raid5_end_read_request()
1111 if (conf->mddev->degraded) in raid5_end_read_request()
1115 mdname(conf->mddev), in raid5_end_read_request()
1124 mdname(conf->mddev), in raid5_end_read_request()
1132 mdname(conf->mddev), bdn); in raid5_end_read_request()
1140 md_error(conf->mddev, rdev); in raid5_end_read_request()
1143 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); in raid5_end_read_request()
1169 md_error(conf->mddev, conf->disks[i].rdev); in raid5_end_write_request()
1171 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); in raid5_end_write_request()
1200 static void error(mddev_t *mddev, mdk_rdev_t *rdev) in error() argument
1203 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; in error()
1207 set_bit(MD_CHANGE_DEVS, &mddev->flags); in error()
1211 mddev->degraded++; in error()
1216 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in error()
1222 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); in error()
1765 if (conf->mddev->bitmap && firstwrite) { in add_stripe_bio()
1766 bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
1832 md_error(conf->mddev, rdev); in handle_failed_stripe()
1852 md_write_end(conf->mddev); in handle_failed_stripe()
1867 md_write_end(conf->mddev); in handle_failed_stripe()
1899 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
1905 md_wakeup_thread(conf->mddev->thread); in handle_failed_stripe()
2066 md_write_end(conf->mddev); in handle_stripe_clean_event()
2076 bitmap_endwrite(conf->mddev->bitmap, in handle_stripe_clean_event()
2086 md_wakeup_thread(conf->mddev->thread); in handle_stripe_clean_event()
2274 md_wakeup_thread(conf->mddev->thread); in handle_stripe_dirtying6()
2340 conf->mddev->resync_mismatches += STRIPE_SECTORS; in handle_parity_checks5()
2341 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) in handle_parity_checks5()
2418 conf->mddev->resync_mismatches += STRIPE_SECTORS; in handle_parity_checks6()
2419 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) in handle_parity_checks6()
2626 rdev_dec_pending(blocked_rdev, conf->mddev); in handle_stripe5()
2645 md_done_sync(conf->mddev, STRIPE_SECTORS,0); in handle_stripe5()
2700 md_wakeup_thread(conf->mddev->thread); in handle_stripe5()
2725 md_done_sync(conf->mddev, STRIPE_SECTORS,1); in handle_stripe5()
2732 if (s.failed == 1 && !conf->mddev->ro && in handle_stripe5()
2773 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); in handle_stripe5()
2785 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); in handle_stripe5()
2897 rdev_dec_pending(blocked_rdev, conf->mddev); in handle_stripe6()
2911 md_done_sync(conf->mddev, STRIPE_SECTORS,0); in handle_stripe6()
2956 md_done_sync(conf->mddev, STRIPE_SECTORS,1); in handle_stripe6()
2963 if (s.failed <= 2 && !conf->mddev->ro) in handle_stripe6()
2998 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); in handle_stripe6()
3010 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); in handle_stripe6()
3044 blk_plug_device(conf->mddev->queue); in raid5_activate_delayed()
3061 static void unplug_slaves(mddev_t *mddev) in unplug_slaves() argument
3063 raid5_conf_t *conf = mddev_to_conf(mddev); in unplug_slaves()
3067 for (i=0; i<mddev->raid_disks; i++) { in unplug_slaves()
3077 rdev_dec_pending(rdev, mddev); in unplug_slaves()
3086 mddev_t *mddev = q->queuedata; in raid5_unplug_device() local
3087 raid5_conf_t *conf = mddev_to_conf(mddev); in raid5_unplug_device()
3096 md_wakeup_thread(mddev->thread); in raid5_unplug_device()
3100 unplug_slaves(mddev); in raid5_unplug_device()
3105 mddev_t *mddev = data; in raid5_congested() local
3106 raid5_conf_t *conf = mddev_to_conf(mddev); in raid5_congested()
3128 mddev_t *mddev = q->queuedata; in raid5_mergeable_bvec() local
3131 unsigned int chunk_sectors = mddev->chunk_size >> 9; in raid5_mergeable_bvec()
3146 static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) in in_chunk_boundary() argument
3149 unsigned int chunk_sectors = mddev->chunk_size >> 9; in in_chunk_boundary()
3170 md_wakeup_thread(conf->mddev->thread); in add_bio_to_retry()
3207 mddev_t *mddev; in raid5_align_endio() local
3214 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; in raid5_align_endio()
3215 conf = mddev_to_conf(mddev); in raid5_align_endio()
3219 rdev_dec_pending(rdev, conf->mddev); in raid5_align_endio()
3256 mddev_t *mddev = q->queuedata; in chunk_aligned_read() local
3257 raid5_conf_t *conf = mddev_to_conf(mddev); in chunk_aligned_read()
3264 if (!in_chunk_boundary(mddev, raid_bio)) { in chunk_aligned_read()
3303 rdev_dec_pending(rdev, mddev); in chunk_aligned_read()
3378 mddev_t *mddev = q->queuedata; in make_request() local
3379 raid5_conf_t *conf = mddev_to_conf(mddev); in make_request()
3392 md_write_start(mddev, bi); in make_request()
3395 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); in make_request()
3396 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], in make_request()
3401 mddev->reshape_position == MaxSector && in make_request()
3474 if (logical_sector >= mddev->suspend_lo && in make_request()
3475 logical_sector < mddev->suspend_hi) { in make_request()
3487 raid5_unplug_device(mddev->queue); in make_request()
3510 md_write_end(mddev); in make_request()
3517 static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) in reshape_request() argument
3528 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; in reshape_request()
3568 mddev->reshape_position = conf->expand_progress; in reshape_request()
3569 set_bit(MD_CHANGE_DEVS, &mddev->flags); in reshape_request()
3570 md_wakeup_thread(mddev->thread); in reshape_request()
3571 wait_event(mddev->sb_wait, mddev->flags == 0 || in reshape_request()
3574 conf->expand_lo = mddev->reshape_position; in reshape_request()
3598 if (s < mddev->array_sectors) { in reshape_request()
3629 if (last_sector >= (mddev->size<<1)) in reshape_request()
3630 last_sector = (mddev->size<<1)-1; in reshape_request()
3645 if (sector_nr >= mddev->resync_max) { in reshape_request()
3649 mddev->reshape_position = conf->expand_progress; in reshape_request()
3650 set_bit(MD_CHANGE_DEVS, &mddev->flags); in reshape_request()
3651 md_wakeup_thread(mddev->thread); in reshape_request()
3652 wait_event(mddev->sb_wait, in reshape_request()
3653 !test_bit(MD_CHANGE_DEVS, &mddev->flags) in reshape_request()
3656 conf->expand_lo = mddev->reshape_position; in reshape_request()
3664 static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) in sync_request() argument
3666 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; in sync_request()
3670 sector_t max_sector = mddev->size << 1; in sync_request()
3677 unplug_slaves(mddev); in sync_request()
3678 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in sync_request()
3683 if (mddev->curr_resync < max_sector) /* aborted */ in sync_request()
3684 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, in sync_request()
3688 bitmap_close_sync(mddev->bitmap); in sync_request()
3693 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in sync_request()
3694 return reshape_request(mddev, sector_nr, skipped); in sync_request()
3706 if (mddev->degraded >= conf->max_degraded && in sync_request()
3707 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in sync_request()
3708 sector_t rv = (mddev->size << 1) - sector_nr; in sync_request()
3712 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && in sync_request()
3713 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in sync_request()
3722 bitmap_cond_end_sync(mddev->bitmap, sector_nr); in sync_request()
3737 for (i=0; i<mddev->raid_disks; i++) in sync_request()
3741 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); in sync_request()
3833 static void raid5d(mddev_t *mddev) in raid5d() argument
3836 raid5_conf_t *conf = mddev_to_conf(mddev); in raid5d()
3841 md_check_recovery(mddev); in raid5d()
3851 bitmap_unplug(mddev->bitmap); in raid5d()
3884 unplug_slaves(mddev); in raid5d()
3890 raid5_show_stripe_cache_size(mddev_t *mddev, char *page) in raid5_show_stripe_cache_size() argument
3892 raid5_conf_t *conf = mddev_to_conf(mddev); in raid5_show_stripe_cache_size()
3900 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) in raid5_store_stripe_cache_size() argument
3902 raid5_conf_t *conf = mddev_to_conf(mddev); in raid5_store_stripe_cache_size()
3921 err = md_allow_write(mddev); in raid5_store_stripe_cache_size()
3938 raid5_show_preread_threshold(mddev_t *mddev, char *page) in raid5_show_preread_threshold() argument
3940 raid5_conf_t *conf = mddev_to_conf(mddev); in raid5_show_preread_threshold()
3948 raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len) in raid5_store_preread_threshold() argument
3950 raid5_conf_t *conf = mddev_to_conf(mddev); in raid5_store_preread_threshold()
3972 stripe_cache_active_show(mddev_t *mddev, char *page) in stripe_cache_active_show() argument
3974 raid5_conf_t *conf = mddev_to_conf(mddev); in stripe_cache_active_show()
3995 static int run(mddev_t *mddev) in run() argument
4003 if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) { in run()
4005 mdname(mddev), mddev->level); in run()
4009 if (mddev->chunk_size < PAGE_SIZE) { in run()
4012 mddev->chunk_size, PAGE_SIZE); in run()
4016 if (mddev->reshape_position != MaxSector) { in run()
4024 int max_degraded = (mddev->level == 5 ? 1 : 2); in run()
4026 if (mddev->new_level != mddev->level || in run()
4027 mddev->new_layout != mddev->layout || in run()
4028 mddev->new_chunk != mddev->chunk_size) { in run()
4031 mdname(mddev)); in run()
4034 if (mddev->delta_disks <= 0) { in run()
4037 mdname(mddev)); in run()
4040 old_disks = mddev->raid_disks - mddev->delta_disks; in run()
4045 here_new = mddev->reshape_position; in run()
4046 if (sector_div(here_new, (mddev->chunk_size>>9)* in run()
4047 (mddev->raid_disks - max_degraded))) { in run()
4053 here_old = mddev->reshape_position; in run()
4054 sector_div(here_old, (mddev->chunk_size>>9)* in run()
4069 mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL); in run()
4070 if ((conf = mddev->private) == NULL) in run()
4072 if (mddev->reshape_position == MaxSector) { in run()
4073 conf->previous_raid_disks = conf->raid_disks = mddev->raid_disks; in run()
4075 conf->raid_disks = mddev->raid_disks; in run()
4076 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; in run()
4084 conf->mddev = mddev; in run()
4089 if (mddev->level == 6) { in run()
4095 mddev->queue->queue_lock = &conf->device_lock; in run()
4108 pr_debug("raid5: run(%s) called.\n", mdname(mddev)); in run()
4110 list_for_each_entry(rdev, &mddev->disks, same_set) { in run()
4133 mddev->degraded = conf->raid_disks - working_disks; in run()
4134 conf->mddev = mddev; in run()
4135 conf->chunk_size = mddev->chunk_size; in run()
4136 conf->level = mddev->level; in run()
4141 conf->algorithm = mddev->layout; in run()
4143 conf->expand_progress = mddev->reshape_position; in run()
4146 mddev->size &= ~(mddev->chunk_size/1024 -1); in run()
4147 mddev->resync_max_sectors = mddev->size << 1; in run()
4151 mdname(mddev), conf->raid_disks); in run()
4156 conf->chunk_size, mdname(mddev)); in run()
4162 conf->algorithm, mdname(mddev)); in run()
4165 if (mddev->degraded > conf->max_degraded) { in run()
4168 mdname(mddev), mddev->degraded, conf->raid_disks); in run()
4172 if (mddev->degraded > 0 && in run()
4173 mddev->recovery_cp != MaxSector) { in run()
4174 if (mddev->ok_start_degraded) in run()
4178 mdname(mddev)); in run()
4182 mdname(mddev)); in run()
4188 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5"); in run()
4189 if (!mddev->thread) { in run()
4192 mdname(mddev)); in run()
4202 md_unregister_thread(mddev->thread); in run()
4206 memory, mdname(mddev)); in run()
4208 if (mddev->degraded == 0) in run()
4210 " devices, algorithm %d\n", conf->level, mdname(mddev), in run()
4211 mddev->raid_disks-mddev->degraded, mddev->raid_disks, in run()
4216 mdname(mddev), mddev->raid_disks - mddev->degraded, in run()
4217 mddev->raid_disks, conf->algorithm); in run()
4225 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in run()
4226 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in run()
4227 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in run()
4228 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in run()
4229 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in run()
4239 (mddev->chunk_size / PAGE_SIZE); in run()
4240 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) in run()
4241 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; in run()
4245 if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) in run()
4248 mdname(mddev)); in run()
4250 mddev->queue->unplug_fn = raid5_unplug_device; in run()
4251 mddev->queue->backing_dev_info.congested_data = mddev; in run()
4252 mddev->queue->backing_dev_info.congested_fn = raid5_congested; in run()
4254 mddev->array_sectors = 2 * mddev->size * (conf->previous_raid_disks - in run()
4257 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); in run()
4268 mddev->private = NULL; in run()
4269 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); in run()
4275 static int stop(mddev_t *mddev) in stop() argument
4277 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; in stop()
4279 md_unregister_thread(mddev->thread); in stop()
4280 mddev->thread = NULL; in stop()
4283 mddev->queue->backing_dev_info.congested_fn = NULL; in stop()
4284 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ in stop()
4285 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); in stop()
4288 mddev->private = NULL; in stop()
4327 static void status(struct seq_file *seq, mddev_t *mddev) in status() argument
4329 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; in status()
4332 …eq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev in status()
4333 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); in status()
4356 conf->raid_disks - conf->mddev->degraded); in print_raid5_conf()
4368 static int raid5_spare_active(mddev_t *mddev) in raid5_spare_active() argument
4371 raid5_conf_t *conf = mddev->private; in raid5_spare_active()
4381 mddev->degraded--; in raid5_spare_active()
4389 static int raid5_remove_disk(mddev_t *mddev, int number) in raid5_remove_disk() argument
4391 raid5_conf_t *conf = mddev->private; in raid5_remove_disk()
4408 mddev->degraded <= conf->max_degraded) { in raid5_remove_disk()
4426 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) in raid5_add_disk() argument
4428 raid5_conf_t *conf = mddev->private; in raid5_add_disk()
4435 if (mddev->degraded > conf->max_degraded) in raid5_add_disk()
4466 static int raid5_resize(mddev_t *mddev, sector_t sectors) in raid5_resize() argument
4475 raid5_conf_t *conf = mddev_to_conf(mddev); in raid5_resize()
4477 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); in raid5_resize()
4478 mddev->array_sectors = sectors * (mddev->raid_disks in raid5_resize()
4480 set_capacity(mddev->gendisk, mddev->array_sectors); in raid5_resize()
4481 mddev->changed = 1; in raid5_resize()
4482 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { in raid5_resize()
4483 mddev->recovery_cp = mddev->size << 1; in raid5_resize()
4484 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid5_resize()
4486 mddev->size = sectors /2; in raid5_resize()
4487 mddev->resync_max_sectors = sectors; in raid5_resize()
4492 static int raid5_check_reshape(mddev_t *mddev) in raid5_check_reshape() argument
4494 raid5_conf_t *conf = mddev_to_conf(mddev); in raid5_check_reshape()
4497 if (mddev->delta_disks < 0 || in raid5_check_reshape()
4498 mddev->new_level != mddev->level) in raid5_check_reshape()
4500 if (mddev->delta_disks == 0) in raid5_check_reshape()
4502 if (mddev->bitmap) in raid5_check_reshape()
4514 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || in raid5_check_reshape()
4515 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { in raid5_check_reshape()
4517 (mddev->chunk_size / STRIPE_SIZE)*4); in raid5_check_reshape()
4521 err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks); in raid5_check_reshape()
4525 if (mddev->degraded > conf->max_degraded) in raid5_check_reshape()
4531 static int raid5_start_reshape(mddev_t *mddev) in raid5_start_reshape() argument
4533 raid5_conf_t *conf = mddev_to_conf(mddev); in raid5_start_reshape()
4539 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in raid5_start_reshape()
4542 list_for_each_entry(rdev, &mddev->disks, same_set) in raid5_start_reshape()
4547 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) in raid5_start_reshape()
4556 conf->raid_disks += mddev->delta_disks; in raid5_start_reshape()
4564 list_for_each_entry(rdev, &mddev->disks, same_set) in raid5_start_reshape()
4567 if (raid5_add_disk(mddev, rdev) == 0) { in raid5_start_reshape()
4573 if (sysfs_create_link(&mddev->kobj, in raid5_start_reshape()
4578 nm, mdname(mddev)); in raid5_start_reshape()
4584 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices; in raid5_start_reshape()
4586 mddev->raid_disks = conf->raid_disks; in raid5_start_reshape()
4587 mddev->reshape_position = 0; in raid5_start_reshape()
4588 set_bit(MD_CHANGE_DEVS, &mddev->flags); in raid5_start_reshape()
4590 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid5_start_reshape()
4591 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid5_start_reshape()
4592 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid5_start_reshape()
4593 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid5_start_reshape()
4594 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in raid5_start_reshape()
4596 if (!mddev->sync_thread) { in raid5_start_reshape()
4597 mddev->recovery = 0; in raid5_start_reshape()
4599 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; in raid5_start_reshape()
4604 md_wakeup_thread(mddev->sync_thread); in raid5_start_reshape()
4605 md_new_event(mddev); in raid5_start_reshape()
4614 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in end_reshape()
4615 conf->mddev->array_sectors = 2 * conf->mddev->size * in end_reshape()
4617 set_capacity(conf->mddev->gendisk, conf->mddev->array_sectors); in end_reshape()
4618 conf->mddev->changed = 1; in end_reshape()
4620 bdev = bdget_disk(conf->mddev->gendisk, 0); in end_reshape()
4624 (loff_t)conf->mddev->array_sectors << 9); in end_reshape()
4631 conf->mddev->reshape_position = MaxSector; in end_reshape()
4639 (conf->mddev->chunk_size / PAGE_SIZE); in end_reshape()
4640 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) in end_reshape()
4641 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; in end_reshape()
4646 static void raid5_quiesce(mddev_t *mddev, int state) in raid5_quiesce() argument
4648 raid5_conf_t *conf = mddev_to_conf(mddev); in raid5_quiesce()