• Home
  • Raw
  • Download

Lines Matching refs:conf

71 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)  in stripe_hash()  argument
73 int hash = (sect >> RAID5_STRIPE_SHIFT(conf)) & HASH_MASK; in stripe_hash()
74 return &conf->stripe_hashtbl[hash]; in stripe_hash()
77 static inline int stripe_hash_locks_hash(struct r5conf *conf, sector_t sect) in stripe_hash_locks_hash() argument
79 return (sect >> RAID5_STRIPE_SHIFT(conf)) & STRIPE_HASH_LOCKS_MASK; in stripe_hash_locks_hash()
82 static inline void lock_device_hash_lock(struct r5conf *conf, int hash) in lock_device_hash_lock() argument
84 spin_lock_irq(conf->hash_locks + hash); in lock_device_hash_lock()
85 spin_lock(&conf->device_lock); in lock_device_hash_lock()
88 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) in unlock_device_hash_lock() argument
90 spin_unlock(&conf->device_lock); in unlock_device_hash_lock()
91 spin_unlock_irq(conf->hash_locks + hash); in unlock_device_hash_lock()
94 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) in lock_all_device_hash_locks_irq() argument
97 spin_lock_irq(conf->hash_locks); in lock_all_device_hash_locks_irq()
99 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); in lock_all_device_hash_locks_irq()
100 spin_lock(&conf->device_lock); in lock_all_device_hash_locks_irq()
103 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) in unlock_all_device_hash_locks_irq() argument
106 spin_unlock(&conf->device_lock); in unlock_all_device_hash_locks_irq()
108 spin_unlock(conf->hash_locks + i); in unlock_all_device_hash_locks_irq()
109 spin_unlock_irq(conf->hash_locks); in unlock_all_device_hash_locks_irq()
151 static void print_raid5_conf (struct r5conf *conf);
169 struct r5conf *conf = sh->raid_conf; in raid5_wakeup_stripe_thread() local
181 group = conf->worker_groups + cpu_to_group(cpu); in raid5_wakeup_stripe_thread()
190 if (conf->worker_cnt_per_group == 0) { in raid5_wakeup_stripe_thread()
191 md_wakeup_thread(conf->mddev->thread); in raid5_wakeup_stripe_thread()
195 group = conf->worker_groups + cpu_to_group(sh->cpu); in raid5_wakeup_stripe_thread()
203 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) { in raid5_wakeup_stripe_thread()
213 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, in do_release_stripe() argument
220 BUG_ON(atomic_read(&conf->active_stripes)==0); in do_release_stripe()
222 if (r5c_is_writeback(conf->log)) in do_release_stripe()
234 (conf->quiesce && r5c_is_writeback(conf->log) && in do_release_stripe()
244 list_add_tail(&sh->lru, &conf->delayed_list); in do_release_stripe()
246 sh->bm_seq - conf->seq_write > 0) in do_release_stripe()
247 list_add_tail(&sh->lru, &conf->bitmap_list); in do_release_stripe()
251 if (conf->worker_cnt_per_group == 0) { in do_release_stripe()
254 &conf->loprio_list); in do_release_stripe()
257 &conf->handle_list); in do_release_stripe()
263 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
267 if (atomic_dec_return(&conf->preread_active_stripes) in do_release_stripe()
269 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
270 atomic_dec(&conf->active_stripes); in do_release_stripe()
272 if (!r5c_is_writeback(conf->log)) in do_release_stripe()
278 else if (injournal == conf->raid_disks - conf->max_degraded) { in do_release_stripe()
281 atomic_inc(&conf->r5c_cached_full_stripes); in do_release_stripe()
283 atomic_dec(&conf->r5c_cached_partial_stripes); in do_release_stripe()
284 list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); in do_release_stripe()
285 r5c_check_cached_full_stripe(conf); in do_release_stripe()
292 list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); in do_release_stripe()
298 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, in __release_stripe() argument
302 do_release_stripe(conf, sh, temp_inactive_list); in __release_stripe()
312 static void release_inactive_stripe_list(struct r5conf *conf, in release_inactive_stripe_list() argument
333 spin_lock_irqsave(conf->hash_locks + hash, flags); in release_inactive_stripe_list()
334 if (list_empty(conf->inactive_list + hash) && in release_inactive_stripe_list()
336 atomic_dec(&conf->empty_inactive_list_nr); in release_inactive_stripe_list()
337 list_splice_tail_init(list, conf->inactive_list + hash); in release_inactive_stripe_list()
339 spin_unlock_irqrestore(conf->hash_locks + hash, flags); in release_inactive_stripe_list()
346 wake_up(&conf->wait_for_stripe); in release_inactive_stripe_list()
347 if (atomic_read(&conf->active_stripes) == 0) in release_inactive_stripe_list()
348 wake_up(&conf->wait_for_quiescent); in release_inactive_stripe_list()
349 if (conf->retry_read_aligned) in release_inactive_stripe_list()
350 md_wakeup_thread(conf->mddev->thread); in release_inactive_stripe_list()
355 static int release_stripe_list(struct r5conf *conf, in release_stripe_list() argument
362 head = llist_del_all(&conf->released_stripes); in release_stripe_list()
376 __release_stripe(conf, sh, &temp_inactive_list[hash]); in release_stripe_list()
385 struct r5conf *conf = sh->raid_conf; in raid5_release_stripe() local
396 if (unlikely(!conf->mddev->thread) || in raid5_release_stripe()
399 wakeup = llist_add(&sh->release_list, &conf->released_stripes); in raid5_release_stripe()
401 md_wakeup_thread(conf->mddev->thread); in raid5_release_stripe()
405 if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) { in raid5_release_stripe()
408 do_release_stripe(conf, sh, &list); in raid5_release_stripe()
409 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_release_stripe()
410 release_inactive_stripe_list(conf, &list, hash); in raid5_release_stripe()
422 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) in insert_hash() argument
424 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
433 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) in get_free_stripe() argument
438 if (list_empty(conf->inactive_list + hash)) in get_free_stripe()
440 first = (conf->inactive_list + hash)->next; in get_free_stripe()
444 atomic_inc(&conf->active_stripes); in get_free_stripe()
446 if (list_empty(conf->inactive_list + hash)) in get_free_stripe()
447 atomic_inc(&conf->empty_inactive_list_nr); in get_free_stripe()
491 init_stripe_shared_pages(struct stripe_head *sh, struct r5conf *conf, int disks) in init_stripe_shared_pages() argument
499 cnt = PAGE_SIZE / conf->stripe_size; in init_stripe_shared_pages()
563 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
568 struct r5conf *conf = sh->raid_conf; in init_stripe() local
579 seq = read_seqcount_begin(&conf->gen_lock); in init_stripe()
580 sh->generation = conf->generation - previous; in init_stripe()
581 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
583 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
600 if (read_seqcount_retry(&conf->gen_lock, seq)) in init_stripe()
603 insert_hash(conf, sh); in init_stripe()
608 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, in __find_stripe() argument
614 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
634 int raid5_calc_degraded(struct r5conf *conf) in raid5_calc_degraded() argument
641 for (i = 0; i < conf->previous_raid_disks; i++) { in raid5_calc_degraded()
642 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in raid5_calc_degraded()
644 rdev = rcu_dereference(conf->disks[i].replacement); in raid5_calc_degraded()
659 if (conf->raid_disks >= conf->previous_raid_disks) in raid5_calc_degraded()
663 if (conf->raid_disks == conf->previous_raid_disks) in raid5_calc_degraded()
667 for (i = 0; i < conf->raid_disks; i++) { in raid5_calc_degraded()
668 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in raid5_calc_degraded()
670 rdev = rcu_dereference(conf->disks[i].replacement); in raid5_calc_degraded()
681 if (conf->raid_disks <= conf->previous_raid_disks) in raid5_calc_degraded()
690 static bool has_failed(struct r5conf *conf) in has_failed() argument
692 int degraded = conf->mddev->degraded; in has_failed()
694 if (test_bit(MD_BROKEN, &conf->mddev->flags)) in has_failed()
697 if (conf->mddev->reshape_position != MaxSector) in has_failed()
698 degraded = raid5_calc_degraded(conf); in has_failed()
700 return degraded > conf->max_degraded; in has_failed()
704 raid5_get_active_stripe(struct r5conf *conf, sector_t sector, in raid5_get_active_stripe() argument
708 int hash = stripe_hash_locks_hash(conf, sector); in raid5_get_active_stripe()
713 spin_lock_irq(conf->hash_locks + hash); in raid5_get_active_stripe()
716 wait_event_lock_irq(conf->wait_for_quiescent, in raid5_get_active_stripe()
717 conf->quiesce == 0 || noquiesce, in raid5_get_active_stripe()
718 *(conf->hash_locks + hash)); in raid5_get_active_stripe()
719 sh = __find_stripe(conf, sector, conf->generation - previous); in raid5_get_active_stripe()
721 if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) { in raid5_get_active_stripe()
722 sh = get_free_stripe(conf, hash); in raid5_get_active_stripe()
724 &conf->cache_state)) in raid5_get_active_stripe()
726 &conf->cache_state); in raid5_get_active_stripe()
731 r5c_check_stripe_cache_usage(conf); in raid5_get_active_stripe()
734 &conf->cache_state); in raid5_get_active_stripe()
735 r5l_wake_reclaim(conf->log, 0); in raid5_get_active_stripe()
737 conf->wait_for_stripe, in raid5_get_active_stripe()
738 !list_empty(conf->inactive_list + hash) && in raid5_get_active_stripe()
739 (atomic_read(&conf->active_stripes) in raid5_get_active_stripe()
740 < (conf->max_nr_stripes * 3 / 4) in raid5_get_active_stripe()
742 &conf->cache_state)), in raid5_get_active_stripe()
743 *(conf->hash_locks + hash)); in raid5_get_active_stripe()
745 &conf->cache_state); in raid5_get_active_stripe()
751 spin_lock(&conf->device_lock); in raid5_get_active_stripe()
754 atomic_inc(&conf->active_stripes); in raid5_get_active_stripe()
758 if (!list_empty(conf->inactive_list + hash)) in raid5_get_active_stripe()
761 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) in raid5_get_active_stripe()
762 atomic_inc(&conf->empty_inactive_list_nr); in raid5_get_active_stripe()
769 spin_unlock(&conf->device_lock); in raid5_get_active_stripe()
773 spin_unlock_irq(conf->hash_locks + hash); in raid5_get_active_stripe()
807 struct r5conf *conf = sh->raid_conf; in stripe_can_batch() local
809 if (raid5_has_log(conf) || raid5_has_ppl(conf)) in stripe_can_batch()
817 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) in stripe_add_to_batch_list() argument
827 if (!sector_div(tmp_sec, conf->chunk_sectors)) in stripe_add_to_batch_list()
829 head_sector = sh->sector - RAID5_STRIPE_SECTORS(conf); in stripe_add_to_batch_list()
831 hash = stripe_hash_locks_hash(conf, head_sector); in stripe_add_to_batch_list()
832 spin_lock_irq(conf->hash_locks + hash); in stripe_add_to_batch_list()
833 head = __find_stripe(conf, head_sector, conf->generation); in stripe_add_to_batch_list()
835 spin_lock(&conf->device_lock); in stripe_add_to_batch_list()
838 atomic_inc(&conf->active_stripes); in stripe_add_to_batch_list()
842 if (!list_empty(conf->inactive_list + hash)) in stripe_add_to_batch_list()
845 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) in stripe_add_to_batch_list()
846 atomic_inc(&conf->empty_inactive_list_nr); in stripe_add_to_batch_list()
853 spin_unlock(&conf->device_lock); in stripe_add_to_batch_list()
855 spin_unlock_irq(conf->hash_locks + hash); in stripe_add_to_batch_list()
908 if (atomic_dec_return(&conf->preread_active_stripes) in stripe_add_to_batch_list()
910 md_wakeup_thread(conf->mddev->thread); in stripe_add_to_batch_list()
931 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) in use_new_offset() argument
933 sector_t progress = conf->reshape_progress; in use_new_offset()
941 if (sh->generation == conf->generation - 1) in use_new_offset()
971 static void dispatch_defer_bios(struct r5conf *conf, int target, in dispatch_defer_bios() argument
978 if (conf->pending_data_cnt == 0) in dispatch_defer_bios()
981 list_sort(NULL, &conf->pending_list, cmp_stripe); in dispatch_defer_bios()
983 first = conf->pending_list.next; in dispatch_defer_bios()
986 if (conf->next_pending_data) in dispatch_defer_bios()
987 list_move_tail(&conf->pending_list, in dispatch_defer_bios()
988 &conf->next_pending_data->sibling); in dispatch_defer_bios()
990 while (!list_empty(&conf->pending_list)) { in dispatch_defer_bios()
991 data = list_first_entry(&conf->pending_list, in dispatch_defer_bios()
998 list_move(&data->sibling, &conf->free_list); in dispatch_defer_bios()
1003 conf->pending_data_cnt -= cnt; in dispatch_defer_bios()
1004 BUG_ON(conf->pending_data_cnt < 0 || cnt < target); in dispatch_defer_bios()
1006 if (next != &conf->pending_list) in dispatch_defer_bios()
1007 conf->next_pending_data = list_entry(next, in dispatch_defer_bios()
1010 conf->next_pending_data = NULL; in dispatch_defer_bios()
1012 if (first != &conf->pending_list) in dispatch_defer_bios()
1013 list_move_tail(&conf->pending_list, first); in dispatch_defer_bios()
1016 static void flush_deferred_bios(struct r5conf *conf) in flush_deferred_bios() argument
1020 if (conf->pending_data_cnt == 0) in flush_deferred_bios()
1023 spin_lock(&conf->pending_bios_lock); in flush_deferred_bios()
1024 dispatch_defer_bios(conf, conf->pending_data_cnt, &tmp); in flush_deferred_bios()
1025 BUG_ON(conf->pending_data_cnt != 0); in flush_deferred_bios()
1026 spin_unlock(&conf->pending_bios_lock); in flush_deferred_bios()
1031 static void defer_issue_bios(struct r5conf *conf, sector_t sector, in defer_issue_bios() argument
1037 spin_lock(&conf->pending_bios_lock); in defer_issue_bios()
1038 ent = list_first_entry(&conf->free_list, struct r5pending_data, in defer_issue_bios()
1040 list_move_tail(&ent->sibling, &conf->pending_list); in defer_issue_bios()
1044 conf->pending_data_cnt++; in defer_issue_bios()
1045 if (conf->pending_data_cnt >= PENDING_IO_MAX) in defer_issue_bios()
1046 dispatch_defer_bios(conf, PENDING_IO_ONE_FLUSH, &tmp); in defer_issue_bios()
1048 spin_unlock(&conf->pending_bios_lock); in defer_issue_bios()
1060 struct r5conf *conf = sh->raid_conf; in ops_run_io() local
1071 should_defer = conf->batch_bio_dispatch && conf->group_cnt; in ops_run_io()
1102 rrdev = rcu_dereference(conf->disks[i].replacement); in ops_run_io()
1104 rdev = rcu_dereference(conf->disks[i].rdev); in ops_run_io()
1139 int bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), in ops_run_io()
1146 if (!conf->mddev->external && in ops_run_io()
1147 conf->mddev->sb_flags) { in ops_run_io()
1152 md_check_recovery(conf->mddev); in ops_run_io()
1160 md_wait_for_blocked_rdev(rdev, conf->mddev); in ops_run_io()
1163 rdev_dec_pending(rdev, conf->mddev); in ops_run_io()
1171 md_sync_acct(rdev->bdev, RAID5_STRIPE_SECTORS(conf)); in ops_run_io()
1188 if (use_new_offset(conf, sh)) in ops_run_io()
1211 bi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf); in ops_run_io()
1213 bi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf); in ops_run_io()
1226 if (conf->mddev->gendisk) in ops_run_io()
1228 disk_devt(conf->mddev->gendisk), in ops_run_io()
1238 md_sync_acct(rrdev->bdev, RAID5_STRIPE_SECTORS(conf)); in ops_run_io()
1255 if (use_new_offset(conf, sh)) in ops_run_io()
1265 rbi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf); in ops_run_io()
1267 rbi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf); in ops_run_io()
1276 if (conf->mddev->gendisk) in ops_run_io()
1278 disk_devt(conf->mddev->gendisk), in ops_run_io()
1303 defer_issue_bios(conf, head_sh->sector, &pending_bios); in ops_run_io()
1317 struct r5conf *conf = sh->raid_conf; in async_copy_data() local
1339 if (len > 0 && page_offset + len > RAID5_STRIPE_SIZE(conf)) in async_copy_data()
1340 clen = RAID5_STRIPE_SIZE(conf) - page_offset; in async_copy_data()
1348 if (conf->skip_copy && in async_copy_data()
1350 clen == RAID5_STRIPE_SIZE(conf) && in async_copy_data()
1375 struct r5conf *conf = sh->raid_conf; in ops_complete_biofill() local
1396 dev->sector + RAID5_STRIPE_SECTORS(conf)) { in ops_complete_biofill()
1397 rbi2 = r5_next_bio(conf, rbi, dev->sector); in ops_complete_biofill()
1414 struct r5conf *conf = sh->raid_conf; in ops_run_biofill() local
1429 dev->sector + RAID5_STRIPE_SECTORS(conf)) { in ops_run_biofill()
1433 rbi = r5_next_bio(conf, rbi, dev->sector); in ops_run_biofill()
1859 struct r5conf *conf = sh->raid_conf; in ops_run_biodrain() local
1892 dev->sector + RAID5_STRIPE_SECTORS(conf)) { in ops_run_biodrain()
1903 r5c_is_writeback(conf->log)); in ops_run_biodrain()
1905 !r5c_is_writeback(conf->log)) { in ops_run_biodrain()
1911 wbi = r5_next_bio(conf, wbi, dev->sector); in ops_run_biodrain()
2216 struct r5conf *conf = sh->raid_conf; in raid_run_ops() local
2217 int level = conf->level; in raid_run_ops()
2222 percpu = per_cpu_ptr(conf->percpu, cpu); in raid_run_ops()
2295 int disks, struct r5conf *conf) in alloc_stripe() argument
2309 sh->raid_conf = conf; in alloc_stripe()
2318 if (raid5_has_ppl(conf)) { in alloc_stripe()
2326 if (init_stripe_shared_pages(sh, conf, disks)) { in alloc_stripe()
2334 static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) in grow_one_stripe() argument
2338 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf); in grow_one_stripe()
2344 free_stripe(conf->slab_cache, sh); in grow_one_stripe()
2348 conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; in grow_one_stripe()
2350 atomic_inc(&conf->active_stripes); in grow_one_stripe()
2353 conf->max_nr_stripes++; in grow_one_stripe()
2357 static int grow_stripes(struct r5conf *conf, int num) in grow_stripes() argument
2360 size_t namelen = sizeof(conf->cache_name[0]); in grow_stripes()
2361 int devs = max(conf->raid_disks, conf->previous_raid_disks); in grow_stripes()
2363 if (conf->mddev->gendisk) in grow_stripes()
2364 snprintf(conf->cache_name[0], namelen, in grow_stripes()
2365 "raid%d-%s", conf->level, mdname(conf->mddev)); in grow_stripes()
2367 snprintf(conf->cache_name[0], namelen, in grow_stripes()
2368 "raid%d-%p", conf->level, conf->mddev); in grow_stripes()
2369 snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]); in grow_stripes()
2371 conf->active_name = 0; in grow_stripes()
2372 sc = kmem_cache_create(conf->cache_name[conf->active_name], in grow_stripes()
2377 conf->slab_cache = sc; in grow_stripes()
2378 conf->pool_size = devs; in grow_stripes()
2380 if (!grow_one_stripe(conf, GFP_KERNEL)) in grow_stripes()
2427 static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) in resize_chunks() argument
2437 if (conf->scribble_disks >= new_disks && in resize_chunks()
2438 conf->scribble_sectors >= new_sectors) in resize_chunks()
2440 mddev_suspend(conf->mddev); in resize_chunks()
2446 percpu = per_cpu_ptr(conf->percpu, cpu); in resize_chunks()
2448 new_sectors / RAID5_STRIPE_SECTORS(conf)); in resize_chunks()
2454 mddev_resume(conf->mddev); in resize_chunks()
2456 conf->scribble_disks = new_disks; in resize_chunks()
2457 conf->scribble_sectors = new_sectors; in resize_chunks()
2462 static int resize_stripes(struct r5conf *conf, int newsize) in resize_stripes() argument
2495 md_allow_write(conf->mddev); in resize_stripes()
2498 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], in resize_stripes()
2505 mutex_lock(&conf->cache_size_mutex); in resize_stripes()
2507 for (i = conf->max_nr_stripes; i; i--) { in resize_stripes()
2508 nsh = alloc_stripe(sc, GFP_KERNEL, newsize, conf); in resize_stripes()
2522 mutex_unlock(&conf->cache_size_mutex); in resize_stripes()
2532 lock_device_hash_lock(conf, hash); in resize_stripes()
2533 wait_event_cmd(conf->wait_for_stripe, in resize_stripes()
2534 !list_empty(conf->inactive_list + hash), in resize_stripes()
2535 unlock_device_hash_lock(conf, hash), in resize_stripes()
2536 lock_device_hash_lock(conf, hash)); in resize_stripes()
2537 osh = get_free_stripe(conf, hash); in resize_stripes()
2538 unlock_device_hash_lock(conf, hash); in resize_stripes()
2546 for(i=0; i<conf->pool_size; i++) { in resize_stripes()
2552 free_stripe(conf->slab_cache, osh); in resize_stripes()
2554 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + in resize_stripes()
2555 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { in resize_stripes()
2560 kmem_cache_destroy(conf->slab_cache); in resize_stripes()
2569 for (i = 0; i < conf->pool_size; i++) in resize_stripes()
2570 ndisks[i] = conf->disks[i]; in resize_stripes()
2572 for (i = conf->pool_size; i < newsize; i++) { in resize_stripes()
2579 for (i = conf->pool_size; i < newsize; i++) in resize_stripes()
2584 kfree(conf->disks); in resize_stripes()
2585 conf->disks = ndisks; in resize_stripes()
2590 conf->slab_cache = sc; in resize_stripes()
2591 conf->active_name = 1-conf->active_name; in resize_stripes()
2607 for (i = conf->raid_disks; i < newsize; i++) { in resize_stripes()
2615 for (i=conf->raid_disks; i < newsize; i++) in resize_stripes()
2630 conf->pool_size = newsize; in resize_stripes()
2631 mutex_unlock(&conf->cache_size_mutex); in resize_stripes()
2636 static int drop_one_stripe(struct r5conf *conf) in drop_one_stripe() argument
2639 int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; in drop_one_stripe()
2641 spin_lock_irq(conf->hash_locks + hash); in drop_one_stripe()
2642 sh = get_free_stripe(conf, hash); in drop_one_stripe()
2643 spin_unlock_irq(conf->hash_locks + hash); in drop_one_stripe()
2648 free_stripe(conf->slab_cache, sh); in drop_one_stripe()
2649 atomic_dec(&conf->active_stripes); in drop_one_stripe()
2650 conf->max_nr_stripes--; in drop_one_stripe()
2654 static void shrink_stripes(struct r5conf *conf) in shrink_stripes() argument
2656 while (conf->max_nr_stripes && in shrink_stripes()
2657 drop_one_stripe(conf)) in shrink_stripes()
2660 kmem_cache_destroy(conf->slab_cache); in shrink_stripes()
2661 conf->slab_cache = NULL; in shrink_stripes()
2667 struct r5conf *conf = sh->raid_conf; in raid5_end_read_request() local
2691 rdev = conf->disks[i].replacement; in raid5_end_read_request()
2693 rdev = conf->disks[i].rdev; in raid5_end_read_request()
2695 if (use_new_offset(conf, sh)) in raid5_end_read_request()
2708 mdname(conf->mddev), RAID5_STRIPE_SECTORS(conf), in raid5_end_read_request()
2711 atomic_add(RAID5_STRIPE_SECTORS(conf), &rdev->corrected_errors); in raid5_end_read_request()
2737 mdname(conf->mddev), in raid5_end_read_request()
2740 else if (conf->mddev->degraded >= conf->max_degraded) { in raid5_end_read_request()
2744 mdname(conf->mddev), in raid5_end_read_request()
2752 mdname(conf->mddev), in raid5_end_read_request()
2756 > conf->max_nr_stripes) { in raid5_end_read_request()
2759 mdname(conf->mddev), in raid5_end_read_request()
2761 conf->max_nr_stripes); in raid5_end_read_request()
2763 mdname(conf->mddev), bdn); in raid5_end_read_request()
2784 rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 0))) in raid5_end_read_request()
2785 md_error(conf->mddev, rdev); in raid5_end_read_request()
2788 rdev_dec_pending(rdev, conf->mddev); in raid5_end_read_request()
2798 struct r5conf *conf = sh->raid_conf; in raid5_end_write_request() local
2807 rdev = conf->disks[i].rdev; in raid5_end_write_request()
2811 rdev = conf->disks[i].replacement; in raid5_end_write_request()
2819 rdev = conf->disks[i].rdev; in raid5_end_write_request()
2834 md_error(conf->mddev, rdev); in raid5_end_write_request()
2836 RAID5_STRIPE_SECTORS(conf), in raid5_end_write_request()
2848 RAID5_STRIPE_SECTORS(conf), in raid5_end_write_request()
2859 rdev_dec_pending(rdev, conf->mddev); in raid5_end_write_request()
2877 struct r5conf *conf = mddev->private; in raid5_error() local
2884 spin_lock_irqsave(&conf->device_lock, flags); in raid5_error()
2887 mddev->degraded = raid5_calc_degraded(conf); in raid5_error()
2889 if (has_failed(conf)) { in raid5_error()
2890 set_bit(MD_BROKEN, &conf->mddev->flags); in raid5_error()
2891 conf->recovery_disabled = mddev->recovery_disabled; in raid5_error()
2894 mdname(mddev), mddev->degraded, conf->raid_disks); in raid5_error()
2897 mdname(mddev), conf->raid_disks - mddev->degraded); in raid5_error()
2900 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_error()
2913 sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, in raid5_compute_sector() argument
2923 int algorithm = previous ? conf->prev_algo in raid5_compute_sector()
2924 : conf->algorithm; in raid5_compute_sector()
2925 int sectors_per_chunk = previous ? conf->prev_chunk_sectors in raid5_compute_sector()
2926 : conf->chunk_sectors; in raid5_compute_sector()
2927 int raid_disks = previous ? conf->previous_raid_disks in raid5_compute_sector()
2928 : conf->raid_disks; in raid5_compute_sector()
2929 int data_disks = raid_disks - conf->max_degraded; in raid5_compute_sector()
2949 switch(conf->level) { in raid5_compute_sector()
3117 struct r5conf *conf = sh->raid_conf; in raid5_compute_blocknr() local
3119 int data_disks = raid_disks - conf->max_degraded; in raid5_compute_blocknr()
3121 int sectors_per_chunk = previous ? conf->prev_chunk_sectors in raid5_compute_blocknr()
3122 : conf->chunk_sectors; in raid5_compute_blocknr()
3123 int algorithm = previous ? conf->prev_algo in raid5_compute_blocknr()
3124 : conf->algorithm; in raid5_compute_blocknr()
3137 switch(conf->level) { in raid5_compute_blocknr()
3224 check = raid5_compute_sector(conf, r_sector, in raid5_compute_blocknr()
3229 mdname(conf->mddev)); in raid5_compute_blocknr()
3273 static inline bool delay_towrite(struct r5conf *conf, in delay_towrite() argument
3282 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && in delay_towrite()
3296 struct r5conf *conf = sh->raid_conf; in schedule_reconstruction() local
3297 int level = conf->level; in schedule_reconstruction()
3311 if (dev->towrite && !delay_towrite(conf, dev, s)) { in schedule_reconstruction()
3337 if (s->locked + conf->max_degraded == disks) in schedule_reconstruction()
3339 atomic_inc(&conf->pending_full_writes); in schedule_reconstruction()
3409 struct r5conf *conf = sh->raid_conf; in add_stripe_bio() local
3435 if (forwrite && raid5_has_ppl(conf)) { in add_stripe_bio()
3461 if (first + conf->chunk_sectors * (count - 1) != last) in add_stripe_bio()
3473 md_write_inc(conf->mddev, bi); in add_stripe_bio()
3479 sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) && in add_stripe_bio()
3481 bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) { in add_stripe_bio()
3485 if (sector >= sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf)) in add_stripe_bio()
3494 if (conf->mddev->bitmap && firstwrite) { in add_stripe_bio()
3509 md_bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
3510 RAID5_STRIPE_SECTORS(conf), 0); in add_stripe_bio()
3514 sh->bm_seq = conf->seq_flush+1; in add_stripe_bio()
3521 stripe_add_to_batch_list(conf, sh); in add_stripe_bio()
3530 static void end_reshape(struct r5conf *conf);
3532 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, in stripe_set_idx() argument
3536 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; in stripe_set_idx()
3539 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; in stripe_set_idx()
3541 raid5_compute_sector(conf, in stripe_set_idx()
3542 stripe * (disks - conf->max_degraded) in stripe_set_idx()
3549 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, in handle_failed_stripe() argument
3561 rdev = rcu_dereference(conf->disks[i].rdev); in handle_failed_stripe()
3572 RAID5_STRIPE_SECTORS(conf), 0)) in handle_failed_stripe()
3573 md_error(conf->mddev, rdev); in handle_failed_stripe()
3574 rdev_dec_pending(rdev, conf->mddev); in handle_failed_stripe()
3589 wake_up(&conf->wait_for_overlap); in handle_failed_stripe()
3592 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3593 struct bio *nextbi = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3595 md_write_end(conf->mddev); in handle_failed_stripe()
3600 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3601 RAID5_STRIPE_SECTORS(conf), 0, 0); in handle_failed_stripe()
3613 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3614 struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3616 md_write_end(conf->mddev); in handle_failed_stripe()
3625 s->failed > conf->max_degraded && in handle_failed_stripe()
3633 wake_up(&conf->wait_for_overlap); in handle_failed_stripe()
3637 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3639 r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3646 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3647 RAID5_STRIPE_SECTORS(conf), 0, 0); in handle_failed_stripe()
3657 if (atomic_dec_and_test(&conf->pending_full_writes)) in handle_failed_stripe()
3658 md_wakeup_thread(conf->mddev->thread); in handle_failed_stripe()
3662 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, in handle_failed_sync() argument
3671 wake_up(&conf->wait_for_overlap); in handle_failed_sync()
3681 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { in handle_failed_sync()
3686 for (i = 0; i < conf->raid_disks; i++) { in handle_failed_sync()
3687 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in handle_failed_sync()
3692 RAID5_STRIPE_SECTORS(conf), 0)) in handle_failed_sync()
3694 rdev = rcu_dereference(conf->disks[i].replacement); in handle_failed_sync()
3699 RAID5_STRIPE_SECTORS(conf), 0)) in handle_failed_sync()
3704 conf->recovery_disabled = in handle_failed_sync()
3705 conf->mddev->recovery_disabled; in handle_failed_sync()
3707 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), !abort); in handle_failed_sync()
3961 static void handle_stripe_clean_event(struct r5conf *conf, in handle_stripe_clean_event() argument
3992 dev->sector + RAID5_STRIPE_SECTORS(conf)) { in handle_stripe_clean_event()
3993 wbi2 = r5_next_bio(conf, wbi, dev->sector); in handle_stripe_clean_event()
3994 md_write_end(conf->mddev); in handle_stripe_clean_event()
3998 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
3999 RAID5_STRIPE_SECTORS(conf), in handle_stripe_clean_event()
4037 spin_lock_irq(conf->hash_locks + hash); in handle_stripe_clean_event()
4039 spin_unlock_irq(conf->hash_locks + hash); in handle_stripe_clean_event()
4054 if (atomic_dec_and_test(&conf->pending_full_writes)) in handle_stripe_clean_event()
4055 md_wakeup_thread(conf->mddev->thread); in handle_stripe_clean_event()
4076 static int handle_stripe_dirtying(struct r5conf *conf, in handle_stripe_dirtying() argument
4082 sector_t recovery_cp = conf->mddev->recovery_cp; in handle_stripe_dirtying()
4091 if (conf->rmw_level == PARITY_DISABLE_RMW || in handle_stripe_dirtying()
4099 conf->rmw_level, (unsigned long long)recovery_cp, in handle_stripe_dirtying()
4104 if (((dev->towrite && !delay_towrite(conf, dev, s)) || in handle_stripe_dirtying()
4131 if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) { in handle_stripe_dirtying()
4133 if (conf->mddev->queue) in handle_stripe_dirtying()
4134 blk_add_trace_msg(conf->mddev->queue, in handle_stripe_dirtying()
4155 &conf->cache_state)) { in handle_stripe_dirtying()
4169 if (((dev->towrite && !delay_towrite(conf, dev, s)) || in handle_stripe_dirtying()
4188 if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) { in handle_stripe_dirtying()
4213 if (rcw && conf->mddev->queue) in handle_stripe_dirtying()
4214 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", in handle_stripe_dirtying()
4240 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks5() argument
4302 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches); in handle_parity_checks5()
4303 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks5()
4307 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks5()
4310 RAID5_STRIPE_SECTORS(conf)); in handle_parity_checks5()
4333 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks6() argument
4429 mdname(conf->mddev), in handle_parity_checks6()
4467 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches); in handle_parity_checks6()
4468 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks6()
4472 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks6()
4475 RAID5_STRIPE_SECTORS(conf)); in handle_parity_checks6()
4510 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) in handle_stripe_expansion() argument
4527 sector_t s = raid5_compute_sector(conf, bn, 0, in handle_stripe_expansion()
4529 sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1); in handle_stripe_expansion()
4547 sh->dev[i].offset, RAID5_STRIPE_SIZE(conf), in handle_stripe_expansion()
4552 for (j = 0; j < conf->raid_disks; j++) in handle_stripe_expansion()
4557 if (j == conf->raid_disks) { in handle_stripe_expansion()
4584 struct r5conf *conf = sh->raid_conf; in analyse_stripe() local
4596 s->log_failed = r5l_log_disk_error(conf); in analyse_stripe()
4644 rdev = rcu_dereference(conf->disks[i].replacement); in analyse_stripe()
4646 rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) && in analyse_stripe()
4647 !is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), in analyse_stripe()
4655 rdev = rcu_dereference(conf->disks[i].rdev); in analyse_stripe()
4661 is_bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), in analyse_stripe()
4688 else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <= rdev->recovery_offset) in analyse_stripe()
4703 conf->disks[i].rdev); in analyse_stripe()
4716 conf->disks[i].rdev); in analyse_stripe()
4725 conf->disks[i].replacement); in analyse_stripe()
4747 conf->disks[i].replacement); in analyse_stripe()
4768 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4769 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) in analyse_stripe()
4879 struct r5conf *conf = sh->raid_conf; in handle_stripe() local
4936 test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) { in handle_stripe()
4948 rdev_dec_pending(s.blocked_rdev, conf->mddev); in handle_stripe()
4968 if (s.failed > conf->max_degraded || in handle_stripe()
4974 handle_failed_stripe(conf, sh, &s, disks); in handle_stripe()
4976 handle_failed_sync(conf, sh, &s); in handle_stripe()
5029 || conf->level < 6; in handle_stripe()
5040 handle_stripe_clean_event(conf, sh, disks); in handle_stripe()
5043 r5c_handle_cached_data_endio(conf, sh, disks); in handle_stripe()
5062 r5c_finish_stripe_write_out(conf, sh, &s); in handle_stripe()
5074 if (!r5c_is_writeback(conf->log)) { in handle_stripe()
5076 handle_stripe_dirtying(conf, sh, &s, disks); in handle_stripe()
5082 ret = r5c_try_caching_write(conf, sh, &s, in handle_stripe()
5095 ret = handle_stripe_dirtying(conf, sh, &s, in handle_stripe()
5112 if (conf->level == 6) in handle_stripe()
5113 handle_parity_checks6(conf, sh, &s, disks); in handle_stripe()
5115 handle_parity_checks5(conf, sh, &s, disks); in handle_stripe()
5122 for (i = 0; i < conf->raid_disks; i++) in handle_stripe()
5136 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1); in handle_stripe()
5139 wake_up(&conf->wait_for_overlap); in handle_stripe()
5145 if (s.failed <= conf->max_degraded && !conf->mddev->ro) in handle_stripe()
5166 = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); in handle_stripe()
5175 atomic_inc(&conf->preread_active_stripes); in handle_stripe()
5184 for (i = conf->raid_disks; i--; ) { in handle_stripe()
5194 sh->disks = conf->raid_disks; in handle_stripe()
5195 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
5199 atomic_dec(&conf->reshape_stripes); in handle_stripe()
5200 wake_up(&conf->wait_for_overlap); in handle_stripe()
5201 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1); in handle_stripe()
5206 handle_stripe_expansion(conf, sh); in handle_stripe()
5211 if (conf->mddev->external) in handle_stripe()
5213 conf->mddev); in handle_stripe()
5220 conf->mddev); in handle_stripe()
5229 rdev = conf->disks[i].rdev; in handle_stripe()
5231 RAID5_STRIPE_SECTORS(conf), 0)) in handle_stripe()
5232 md_error(conf->mddev, rdev); in handle_stripe()
5233 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5236 rdev = conf->disks[i].rdev; in handle_stripe()
5238 RAID5_STRIPE_SECTORS(conf), 0); in handle_stripe()
5239 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5242 rdev = conf->disks[i].replacement; in handle_stripe()
5245 rdev = conf->disks[i].rdev; in handle_stripe()
5247 RAID5_STRIPE_SECTORS(conf), 0); in handle_stripe()
5248 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5262 atomic_dec(&conf->preread_active_stripes); in handle_stripe()
5263 if (atomic_read(&conf->preread_active_stripes) < in handle_stripe()
5265 md_wakeup_thread(conf->mddev->thread); in handle_stripe()
5271 static void raid5_activate_delayed(struct r5conf *conf) in raid5_activate_delayed() argument
5273 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { in raid5_activate_delayed()
5274 while (!list_empty(&conf->delayed_list)) { in raid5_activate_delayed()
5275 struct list_head *l = conf->delayed_list.next; in raid5_activate_delayed()
5281 atomic_inc(&conf->preread_active_stripes); in raid5_activate_delayed()
5282 list_add_tail(&sh->lru, &conf->hold_list); in raid5_activate_delayed()
5288 static void activate_bit_delay(struct r5conf *conf, in activate_bit_delay() argument
5293 list_add(&head, &conf->bitmap_list); in activate_bit_delay()
5294 list_del_init(&conf->bitmap_list); in activate_bit_delay()
5301 __release_stripe(conf, sh, &temp_inactive_list[hash]); in activate_bit_delay()
5307 struct r5conf *conf = mddev->private; in in_chunk_boundary() local
5312 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); in in_chunk_boundary()
5321 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) in add_bio_to_retry() argument
5325 spin_lock_irqsave(&conf->device_lock, flags); in add_bio_to_retry()
5327 bi->bi_next = conf->retry_read_aligned_list; in add_bio_to_retry()
5328 conf->retry_read_aligned_list = bi; in add_bio_to_retry()
5330 spin_unlock_irqrestore(&conf->device_lock, flags); in add_bio_to_retry()
5331 md_wakeup_thread(conf->mddev->thread); in add_bio_to_retry()
5334 static struct bio *remove_bio_from_retry(struct r5conf *conf, in remove_bio_from_retry() argument
5339 bi = conf->retry_read_aligned; in remove_bio_from_retry()
5341 *offset = conf->retry_read_offset; in remove_bio_from_retry()
5342 conf->retry_read_aligned = NULL; in remove_bio_from_retry()
5345 bi = conf->retry_read_aligned_list; in remove_bio_from_retry()
5347 conf->retry_read_aligned_list = bi->bi_next; in remove_bio_from_retry()
5366 struct r5conf *conf; in raid5_align_endio() local
5376 conf = mddev->private; in raid5_align_endio()
5378 rdev_dec_pending(rdev, conf->mddev); in raid5_align_endio()
5384 if (atomic_dec_and_test(&conf->active_aligned_reads)) in raid5_align_endio()
5385 wake_up(&conf->wait_for_quiescent); in raid5_align_endio()
5391 add_bio_to_retry(raid_bi, conf); in raid5_align_endio()
5396 struct r5conf *conf = mddev->private; in raid5_read_one_chunk() local
5409 sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0, in raid5_read_one_chunk()
5414 if (r5c_big_stripe_cached(conf, sector)) in raid5_read_one_chunk()
5417 rdev = rcu_dereference(conf->disks[dd_idx].replacement); in raid5_read_one_chunk()
5420 rdev = rcu_dereference(conf->disks[dd_idx].rdev); in raid5_read_one_chunk()
5454 if (conf->quiesce == 0) { in raid5_read_one_chunk()
5455 atomic_inc(&conf->active_aligned_reads); in raid5_read_one_chunk()
5459 if (!did_inc || smp_load_acquire(&conf->quiesce) != 0) { in raid5_read_one_chunk()
5463 if (did_inc && atomic_dec_and_test(&conf->active_aligned_reads)) in raid5_read_one_chunk()
5464 wake_up(&conf->wait_for_quiescent); in raid5_read_one_chunk()
5465 spin_lock_irq(&conf->device_lock); in raid5_read_one_chunk()
5466 wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0, in raid5_read_one_chunk()
5467 conf->device_lock); in raid5_read_one_chunk()
5468 atomic_inc(&conf->active_aligned_reads); in raid5_read_one_chunk()
5469 spin_unlock_irq(&conf->device_lock); in raid5_read_one_chunk()
5491 struct r5conf *conf = mddev->private; in chunk_aligned_read() local
5492 split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split); in chunk_aligned_read()
5514 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) in __get_priority_stripe() argument
5519 bool second_try = !r5c_is_writeback(conf->log) && in __get_priority_stripe()
5520 !r5l_log_disk_error(conf); in __get_priority_stripe()
5521 bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state) || in __get_priority_stripe()
5522 r5l_log_disk_error(conf); in __get_priority_stripe()
5527 if (conf->worker_cnt_per_group == 0) { in __get_priority_stripe()
5528 handle_list = try_loprio ? &conf->loprio_list : in __get_priority_stripe()
5529 &conf->handle_list; in __get_priority_stripe()
5531 handle_list = try_loprio ? &conf->worker_groups[group].loprio_list : in __get_priority_stripe()
5532 &conf->worker_groups[group].handle_list; in __get_priority_stripe()
5533 wg = &conf->worker_groups[group]; in __get_priority_stripe()
5536 for (i = 0; i < conf->group_cnt; i++) { in __get_priority_stripe()
5537 handle_list = try_loprio ? &conf->worker_groups[i].loprio_list : in __get_priority_stripe()
5538 &conf->worker_groups[i].handle_list; in __get_priority_stripe()
5539 wg = &conf->worker_groups[i]; in __get_priority_stripe()
5548 list_empty(&conf->hold_list) ? "empty" : "busy", in __get_priority_stripe()
5549 atomic_read(&conf->pending_full_writes), conf->bypass_count); in __get_priority_stripe()
5554 if (list_empty(&conf->hold_list)) in __get_priority_stripe()
5555 conf->bypass_count = 0; in __get_priority_stripe()
5557 if (conf->hold_list.next == conf->last_hold) in __get_priority_stripe()
5558 conf->bypass_count++; in __get_priority_stripe()
5560 conf->last_hold = conf->hold_list.next; in __get_priority_stripe()
5561 conf->bypass_count -= conf->bypass_threshold; in __get_priority_stripe()
5562 if (conf->bypass_count < 0) in __get_priority_stripe()
5563 conf->bypass_count = 0; in __get_priority_stripe()
5566 } else if (!list_empty(&conf->hold_list) && in __get_priority_stripe()
5567 ((conf->bypass_threshold && in __get_priority_stripe()
5568 conf->bypass_count > conf->bypass_threshold) || in __get_priority_stripe()
5569 atomic_read(&conf->pending_full_writes) == 0)) { in __get_priority_stripe()
5571 list_for_each_entry(tmp, &conf->hold_list, lru) { in __get_priority_stripe()
5572 if (conf->worker_cnt_per_group == 0 || in __get_priority_stripe()
5582 conf->bypass_count -= conf->bypass_threshold; in __get_priority_stripe()
5583 if (conf->bypass_count < 0) in __get_priority_stripe()
5584 conf->bypass_count = 0; in __get_priority_stripe()
5618 struct r5conf *conf = mddev->private; in raid5_unplug() local
5623 spin_lock_irq(&conf->device_lock); in raid5_unplug()
5639 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); in raid5_unplug()
5642 spin_unlock_irq(&conf->device_lock); in raid5_unplug()
5644 release_inactive_stripe_list(conf, cb->temp_inactive_list, in raid5_unplug()
5681 struct r5conf *conf = mddev->private; in make_discard_request() local
5690 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1); in make_discard_request()
5695 stripe_sectors = conf->chunk_sectors * in make_discard_request()
5696 (conf->raid_disks - conf->max_degraded); in make_discard_request()
5701 logical_sector *= conf->chunk_sectors; in make_discard_request()
5702 last_sector *= conf->chunk_sectors; in make_discard_request()
5705 logical_sector += RAID5_STRIPE_SECTORS(conf)) { in make_discard_request()
5709 sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); in make_discard_request()
5710 prepare_to_wait(&conf->wait_for_overlap, &w, in make_discard_request()
5720 for (d = 0; d < conf->raid_disks; d++) { in make_discard_request()
5732 finish_wait(&conf->wait_for_overlap, &w); in make_discard_request()
5734 for (d = 0; d < conf->raid_disks; d++) { in make_discard_request()
5744 if (conf->mddev->bitmap) { in make_discard_request()
5746 d < conf->raid_disks - conf->max_degraded; in make_discard_request()
5750 RAID5_STRIPE_SECTORS(conf), in make_discard_request()
5752 sh->bm_seq = conf->seq_flush + 1; in make_discard_request()
5759 atomic_inc(&conf->preread_active_stripes); in make_discard_request()
5768 struct r5conf *conf = mddev->private; in raid5_make_request() local
5779 int ret = log_handle_flush_request(conf, bi); in raid5_make_request()
5815 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1); in raid5_make_request()
5820 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); in raid5_make_request()
5821 for (; logical_sector < last_sector; logical_sector += RAID5_STRIPE_SECTORS(conf)) { in raid5_make_request()
5827 seq = read_seqcount_begin(&conf->gen_lock); in raid5_make_request()
5830 prepare_to_wait(&conf->wait_for_overlap, &w, in raid5_make_request()
5832 if (unlikely(conf->reshape_progress != MaxSector)) { in raid5_make_request()
5841 spin_lock_irq(&conf->device_lock); in raid5_make_request()
5843 ? logical_sector < conf->reshape_progress in raid5_make_request()
5844 : logical_sector >= conf->reshape_progress) { in raid5_make_request()
5848 ? logical_sector < conf->reshape_safe in raid5_make_request()
5849 : logical_sector >= conf->reshape_safe) { in raid5_make_request()
5850 spin_unlock_irq(&conf->device_lock); in raid5_make_request()
5856 spin_unlock_irq(&conf->device_lock); in raid5_make_request()
5859 new_sector = raid5_compute_sector(conf, logical_sector, in raid5_make_request()
5866 sh = raid5_get_active_stripe(conf, new_sector, previous, in raid5_make_request()
5879 spin_lock_irq(&conf->device_lock); in raid5_make_request()
5881 ? logical_sector >= conf->reshape_progress in raid5_make_request()
5882 : logical_sector < conf->reshape_progress) in raid5_make_request()
5885 spin_unlock_irq(&conf->device_lock); in raid5_make_request()
5893 if (read_seqcount_retry(&conf->gen_lock, seq)) { in raid5_make_request()
5924 atomic_inc(&conf->preread_active_stripes); in raid5_make_request()
5932 finish_wait(&conf->wait_for_overlap, &w); in raid5_make_request()
5953 struct r5conf *conf = mddev->private; in reshape_request() local
5957 int raid_disks = conf->previous_raid_disks; in reshape_request()
5958 int data_disks = raid_disks - conf->max_degraded; in reshape_request()
5959 int new_data_disks = conf->raid_disks - conf->max_degraded; in reshape_request()
5971 conf->reshape_progress < raid5_size(mddev, 0, 0)) { in reshape_request()
5973 - conf->reshape_progress; in reshape_request()
5975 conf->reshape_progress == MaxSector) { in reshape_request()
5979 conf->reshape_progress > 0) in reshape_request()
5980 sector_nr = conf->reshape_progress; in reshape_request()
5996 reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors); in reshape_request()
6004 writepos = conf->reshape_progress; in reshape_request()
6006 readpos = conf->reshape_progress; in reshape_request()
6008 safepos = conf->reshape_safe; in reshape_request()
6029 BUG_ON(conf->reshape_progress == 0); in reshape_request()
6060 if (conf->min_offset_diff < 0) { in reshape_request()
6061 safepos += -conf->min_offset_diff; in reshape_request()
6062 readpos += -conf->min_offset_diff; in reshape_request()
6064 writepos += conf->min_offset_diff; in reshape_request()
6069 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { in reshape_request()
6071 wait_event(conf->wait_for_overlap, in reshape_request()
6072 atomic_read(&conf->reshape_stripes)==0 in reshape_request()
6074 if (atomic_read(&conf->reshape_stripes) != 0) in reshape_request()
6076 mddev->reshape_position = conf->reshape_progress; in reshape_request()
6087 conf->reshape_checkpoint = jiffies; in reshape_request()
6094 spin_lock_irq(&conf->device_lock); in reshape_request()
6095 conf->reshape_safe = mddev->reshape_position; in reshape_request()
6096 spin_unlock_irq(&conf->device_lock); in reshape_request()
6097 wake_up(&conf->wait_for_overlap); in reshape_request()
6102 for (i = 0; i < reshape_sectors; i += RAID5_STRIPE_SECTORS(conf)) { in reshape_request()
6105 sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); in reshape_request()
6107 atomic_inc(&conf->reshape_stripes); in reshape_request()
6115 if (conf->level == 6 && in reshape_request()
6123 memset(page_address(sh->dev[j].page), 0, RAID5_STRIPE_SIZE(conf)); in reshape_request()
6133 spin_lock_irq(&conf->device_lock); in reshape_request()
6135 conf->reshape_progress -= reshape_sectors * new_data_disks; in reshape_request()
6137 conf->reshape_progress += reshape_sectors * new_data_disks; in reshape_request()
6138 spin_unlock_irq(&conf->device_lock); in reshape_request()
6145 raid5_compute_sector(conf, stripe_addr*(new_data_disks), in reshape_request()
6148 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) in reshape_request()
6154 sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); in reshape_request()
6158 first_sector += RAID5_STRIPE_SECTORS(conf); in reshape_request()
6178 wait_event(conf->wait_for_overlap, in reshape_request()
6179 atomic_read(&conf->reshape_stripes) == 0 in reshape_request()
6181 if (atomic_read(&conf->reshape_stripes) != 0) in reshape_request()
6183 mddev->reshape_position = conf->reshape_progress; in reshape_request()
6193 conf->reshape_checkpoint = jiffies; in reshape_request()
6201 spin_lock_irq(&conf->device_lock); in reshape_request()
6202 conf->reshape_safe = mddev->reshape_position; in reshape_request()
6203 spin_unlock_irq(&conf->device_lock); in reshape_request()
6204 wake_up(&conf->wait_for_overlap); in reshape_request()
6214 struct r5conf *conf = mddev->private; in raid5_sync_request() local
6225 end_reshape(conf); in raid5_sync_request()
6233 conf->fullsync = 0; in raid5_sync_request()
6240 wait_event(conf->wait_for_overlap, conf->quiesce != 2); in raid5_sync_request()
6255 if (mddev->degraded >= conf->max_degraded && in raid5_sync_request()
6262 !conf->fullsync && in raid5_sync_request()
6264 sync_blocks >= RAID5_STRIPE_SECTORS(conf)) { in raid5_sync_request()
6266 do_div(sync_blocks, RAID5_STRIPE_SECTORS(conf)); in raid5_sync_request()
6269 return sync_blocks * RAID5_STRIPE_SECTORS(conf); in raid5_sync_request()
6274 sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); in raid5_sync_request()
6276 sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); in raid5_sync_request()
6287 for (i = 0; i < conf->raid_disks; i++) { in raid5_sync_request()
6288 struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev); in raid5_sync_request()
6302 return RAID5_STRIPE_SECTORS(conf); in raid5_sync_request()
6305 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio, in retry_aligned_read() argument
6325 ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1); in retry_aligned_read()
6326 sector = raid5_compute_sector(conf, logical_sector, in retry_aligned_read()
6331 logical_sector += RAID5_STRIPE_SECTORS(conf), in retry_aligned_read()
6332 sector += RAID5_STRIPE_SECTORS(conf), in retry_aligned_read()
6339 sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); in retry_aligned_read()
6343 conf->retry_read_aligned = raid_bio; in retry_aligned_read()
6344 conf->retry_read_offset = scnt; in retry_aligned_read()
6350 conf->retry_read_aligned = raid_bio; in retry_aligned_read()
6351 conf->retry_read_offset = scnt; in retry_aligned_read()
6363 if (atomic_dec_and_test(&conf->active_aligned_reads)) in retry_aligned_read()
6364 wake_up(&conf->wait_for_quiescent); in retry_aligned_read()
6368 static int handle_active_stripes(struct r5conf *conf, int group, in handle_active_stripes() argument
6371 __releases(&conf->device_lock) in handle_active_stripes()
6372 __acquires(&conf->device_lock) in handle_active_stripes()
6379 (sh = __get_priority_stripe(conf, group)) != NULL) in handle_active_stripes()
6387 spin_unlock_irq(&conf->device_lock); in handle_active_stripes()
6388 log_flush_stripe_to_raid(conf); in handle_active_stripes()
6389 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
6394 spin_unlock_irq(&conf->device_lock); in handle_active_stripes()
6396 release_inactive_stripe_list(conf, temp_inactive_list, in handle_active_stripes()
6399 r5l_flush_stripe_to_raid(conf->log); in handle_active_stripes()
6401 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
6407 log_write_stripe_run(conf); in handle_active_stripes()
6411 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
6414 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); in handle_active_stripes()
6423 struct r5conf *conf = group->conf; in raid5_do_work() local
6424 struct mddev *mddev = conf->mddev; in raid5_do_work()
6425 int group_id = group - conf->worker_groups; in raid5_do_work()
6433 spin_lock_irq(&conf->device_lock); in raid5_do_work()
6437 released = release_stripe_list(conf, worker->temp_inactive_list); in raid5_do_work()
6439 batch_size = handle_active_stripes(conf, group_id, worker, in raid5_do_work()
6447 conf->device_lock); in raid5_do_work()
6451 spin_unlock_irq(&conf->device_lock); in raid5_do_work()
6453 flush_deferred_bios(conf); in raid5_do_work()
6455 r5l_flush_stripe_to_raid(conf->log); in raid5_do_work()
6473 struct r5conf *conf = mddev->private; in raid5d() local
6483 spin_lock_irq(&conf->device_lock); in raid5d()
6489 released = release_stripe_list(conf, conf->temp_inactive_list); in raid5d()
6491 clear_bit(R5_DID_ALLOC, &conf->cache_state); in raid5d()
6494 !list_empty(&conf->bitmap_list)) { in raid5d()
6496 conf->seq_flush++; in raid5d()
6497 spin_unlock_irq(&conf->device_lock); in raid5d()
6499 spin_lock_irq(&conf->device_lock); in raid5d()
6500 conf->seq_write = conf->seq_flush; in raid5d()
6501 activate_bit_delay(conf, conf->temp_inactive_list); in raid5d()
6503 raid5_activate_delayed(conf); in raid5d()
6505 while ((bio = remove_bio_from_retry(conf, &offset))) { in raid5d()
6507 spin_unlock_irq(&conf->device_lock); in raid5d()
6508 ok = retry_aligned_read(conf, bio, offset); in raid5d()
6509 spin_lock_irq(&conf->device_lock); in raid5d()
6515 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, in raid5d()
6516 conf->temp_inactive_list); in raid5d()
6522 spin_unlock_irq(&conf->device_lock); in raid5d()
6524 spin_lock_irq(&conf->device_lock); in raid5d()
6536 conf->device_lock); in raid5d()
6540 spin_unlock_irq(&conf->device_lock); in raid5d()
6541 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && in raid5d()
6542 mutex_trylock(&conf->cache_size_mutex)) { in raid5d()
6543 grow_one_stripe(conf, __GFP_NOWARN); in raid5d()
6547 set_bit(R5_DID_ALLOC, &conf->cache_state); in raid5d()
6548 mutex_unlock(&conf->cache_size_mutex); in raid5d()
6551 flush_deferred_bios(conf); in raid5d()
6553 r5l_flush_stripe_to_raid(conf->log); in raid5d()
6564 struct r5conf *conf; in raid5_show_stripe_cache_size() local
6567 conf = mddev->private; in raid5_show_stripe_cache_size()
6568 if (conf) in raid5_show_stripe_cache_size()
6569 ret = sprintf(page, "%d\n", conf->min_nr_stripes); in raid5_show_stripe_cache_size()
6578 struct r5conf *conf = mddev->private; in raid5_set_cache_size() local
6583 conf->min_nr_stripes = size; in raid5_set_cache_size()
6584 mutex_lock(&conf->cache_size_mutex); in raid5_set_cache_size()
6585 while (size < conf->max_nr_stripes && in raid5_set_cache_size()
6586 drop_one_stripe(conf)) in raid5_set_cache_size()
6588 mutex_unlock(&conf->cache_size_mutex); in raid5_set_cache_size()
6592 mutex_lock(&conf->cache_size_mutex); in raid5_set_cache_size()
6593 while (size > conf->max_nr_stripes) in raid5_set_cache_size()
6594 if (!grow_one_stripe(conf, GFP_KERNEL)) { in raid5_set_cache_size()
6595 conf->min_nr_stripes = conf->max_nr_stripes; in raid5_set_cache_size()
6599 mutex_unlock(&conf->cache_size_mutex); in raid5_set_cache_size()
6608 struct r5conf *conf; in raid5_store_stripe_cache_size() local
6619 conf = mddev->private; in raid5_store_stripe_cache_size()
6620 if (!conf) in raid5_store_stripe_cache_size()
6637 struct r5conf *conf = mddev->private; in raid5_show_rmw_level() local
6638 if (conf) in raid5_show_rmw_level()
6639 return sprintf(page, "%d\n", conf->rmw_level); in raid5_show_rmw_level()
6647 struct r5conf *conf = mddev->private; in raid5_store_rmw_level() local
6650 if (!conf) in raid5_store_rmw_level()
6667 conf->rmw_level = new; in raid5_store_rmw_level()
6679 struct r5conf *conf; in raid5_show_stripe_size() local
6683 conf = mddev->private; in raid5_show_stripe_size()
6684 if (conf) in raid5_show_stripe_size()
6685 ret = sprintf(page, "%lu\n", RAID5_STRIPE_SIZE(conf)); in raid5_show_stripe_size()
6694 struct r5conf *conf; in raid5_store_stripe_size() local
6718 conf = mddev->private; in raid5_store_stripe_size()
6719 if (!conf) { in raid5_store_stripe_size()
6724 if (new == conf->stripe_size) in raid5_store_stripe_size()
6728 conf->stripe_size, new); in raid5_store_stripe_size()
6739 mutex_lock(&conf->cache_size_mutex); in raid5_store_stripe_size()
6740 size = conf->max_nr_stripes; in raid5_store_stripe_size()
6742 shrink_stripes(conf); in raid5_store_stripe_size()
6744 conf->stripe_size = new; in raid5_store_stripe_size()
6745 conf->stripe_shift = ilog2(new) - 9; in raid5_store_stripe_size()
6746 conf->stripe_sectors = new >> 9; in raid5_store_stripe_size()
6747 if (grow_stripes(conf, size)) { in raid5_store_stripe_size()
6752 mutex_unlock(&conf->cache_size_mutex); in raid5_store_stripe_size()
6774 struct r5conf *conf; in raid5_show_preread_threshold() local
6777 conf = mddev->private; in raid5_show_preread_threshold()
6778 if (conf) in raid5_show_preread_threshold()
6779 ret = sprintf(page, "%d\n", conf->bypass_threshold); in raid5_show_preread_threshold()
6787 struct r5conf *conf; in raid5_store_preread_threshold() local
6799 conf = mddev->private; in raid5_store_preread_threshold()
6800 if (!conf) in raid5_store_preread_threshold()
6802 else if (new > conf->min_nr_stripes) in raid5_store_preread_threshold()
6805 conf->bypass_threshold = new; in raid5_store_preread_threshold()
6819 struct r5conf *conf; in raid5_show_skip_copy() local
6822 conf = mddev->private; in raid5_show_skip_copy()
6823 if (conf) in raid5_show_skip_copy()
6824 ret = sprintf(page, "%d\n", conf->skip_copy); in raid5_show_skip_copy()
6832 struct r5conf *conf; in raid5_store_skip_copy() local
6845 conf = mddev->private; in raid5_store_skip_copy()
6846 if (!conf) in raid5_store_skip_copy()
6848 else if (new != conf->skip_copy) { in raid5_store_skip_copy()
6852 conf->skip_copy = new; in raid5_store_skip_copy()
6871 struct r5conf *conf = mddev->private; in stripe_cache_active_show() local
6872 if (conf) in stripe_cache_active_show()
6873 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); in stripe_cache_active_show()
6884 struct r5conf *conf; in raid5_show_group_thread_cnt() local
6887 conf = mddev->private; in raid5_show_group_thread_cnt()
6888 if (conf) in raid5_show_group_thread_cnt()
6889 ret = sprintf(page, "%d\n", conf->worker_cnt_per_group); in raid5_show_group_thread_cnt()
6894 static int alloc_thread_groups(struct r5conf *conf, int cnt,
6900 struct r5conf *conf; in raid5_store_group_thread_cnt() local
6917 conf = mddev->private; in raid5_store_group_thread_cnt()
6918 if (!conf) in raid5_store_group_thread_cnt()
6920 else if (new != conf->worker_cnt_per_group) { in raid5_store_group_thread_cnt()
6923 old_groups = conf->worker_groups; in raid5_store_group_thread_cnt()
6927 err = alloc_thread_groups(conf, new, &group_cnt, &new_groups); in raid5_store_group_thread_cnt()
6929 spin_lock_irq(&conf->device_lock); in raid5_store_group_thread_cnt()
6930 conf->group_cnt = group_cnt; in raid5_store_group_thread_cnt()
6931 conf->worker_cnt_per_group = new; in raid5_store_group_thread_cnt()
6932 conf->worker_groups = new_groups; in raid5_store_group_thread_cnt()
6933 spin_unlock_irq(&conf->device_lock); in raid5_store_group_thread_cnt()
6968 static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt, in alloc_thread_groups() argument
6997 group->conf = conf; in alloc_thread_groups()
7013 static void free_thread_groups(struct r5conf *conf) in free_thread_groups() argument
7015 if (conf->worker_groups) in free_thread_groups()
7016 kfree(conf->worker_groups[0].workers); in free_thread_groups()
7017 kfree(conf->worker_groups); in free_thread_groups()
7018 conf->worker_groups = NULL; in free_thread_groups()
7024 struct r5conf *conf = mddev->private; in raid5_size() local
7030 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); in raid5_size()
7032 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_size()
7033 sectors &= ~((sector_t)conf->prev_chunk_sectors - 1); in raid5_size()
7034 return sectors * (raid_disks - conf->max_degraded); in raid5_size()
7037 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in free_scratch_buffer() argument
7045 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in alloc_scratch_buffer() argument
7047 if (conf->level == 6 && !percpu->spare_page) { in alloc_scratch_buffer()
7054 max(conf->raid_disks, in alloc_scratch_buffer()
7055 conf->previous_raid_disks), in alloc_scratch_buffer()
7056 max(conf->chunk_sectors, in alloc_scratch_buffer()
7057 conf->prev_chunk_sectors) in alloc_scratch_buffer()
7058 / RAID5_STRIPE_SECTORS(conf))) { in alloc_scratch_buffer()
7059 free_scratch_buffer(conf, percpu); in alloc_scratch_buffer()
7068 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); in raid456_cpu_dead() local
7070 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid456_cpu_dead()
7074 static void raid5_free_percpu(struct r5conf *conf) in raid5_free_percpu() argument
7076 if (!conf->percpu) in raid5_free_percpu()
7079 cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); in raid5_free_percpu()
7080 free_percpu(conf->percpu); in raid5_free_percpu()
7083 static void free_conf(struct r5conf *conf) in free_conf() argument
7087 log_exit(conf); in free_conf()
7089 unregister_shrinker(&conf->shrinker); in free_conf()
7090 free_thread_groups(conf); in free_conf()
7091 shrink_stripes(conf); in free_conf()
7092 raid5_free_percpu(conf); in free_conf()
7093 for (i = 0; i < conf->pool_size; i++) in free_conf()
7094 if (conf->disks[i].extra_page) in free_conf()
7095 put_page(conf->disks[i].extra_page); in free_conf()
7096 kfree(conf->disks); in free_conf()
7097 bioset_exit(&conf->bio_split); in free_conf()
7098 kfree(conf->stripe_hashtbl); in free_conf()
7099 kfree(conf->pending_data); in free_conf()
7100 kfree(conf); in free_conf()
7105 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); in raid456_cpu_up_prepare() local
7106 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); in raid456_cpu_up_prepare()
7108 if (alloc_scratch_buffer(conf, percpu)) { in raid456_cpu_up_prepare()
7116 static int raid5_alloc_percpu(struct r5conf *conf) in raid5_alloc_percpu() argument
7120 conf->percpu = alloc_percpu(struct raid5_percpu); in raid5_alloc_percpu()
7121 if (!conf->percpu) in raid5_alloc_percpu()
7124 err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); in raid5_alloc_percpu()
7126 conf->scribble_disks = max(conf->raid_disks, in raid5_alloc_percpu()
7127 conf->previous_raid_disks); in raid5_alloc_percpu()
7128 conf->scribble_sectors = max(conf->chunk_sectors, in raid5_alloc_percpu()
7129 conf->prev_chunk_sectors); in raid5_alloc_percpu()
7137 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); in raid5_cache_scan() local
7140 if (mutex_trylock(&conf->cache_size_mutex)) { in raid5_cache_scan()
7143 conf->max_nr_stripes > conf->min_nr_stripes) { in raid5_cache_scan()
7144 if (drop_one_stripe(conf) == 0) { in raid5_cache_scan()
7150 mutex_unlock(&conf->cache_size_mutex); in raid5_cache_scan()
7158 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); in raid5_cache_count() local
7160 if (conf->max_nr_stripes < conf->min_nr_stripes) in raid5_cache_count()
7163 return conf->max_nr_stripes - conf->min_nr_stripes; in raid5_cache_count()
7168 struct r5conf *conf; in setup_conf() local
7207 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); in setup_conf()
7208 if (conf == NULL) in setup_conf()
7212 conf->stripe_size = DEFAULT_STRIPE_SIZE; in setup_conf()
7213 conf->stripe_shift = ilog2(DEFAULT_STRIPE_SIZE) - 9; in setup_conf()
7214 conf->stripe_sectors = DEFAULT_STRIPE_SIZE >> 9; in setup_conf()
7216 INIT_LIST_HEAD(&conf->free_list); in setup_conf()
7217 INIT_LIST_HEAD(&conf->pending_list); in setup_conf()
7218 conf->pending_data = kcalloc(PENDING_IO_MAX, in setup_conf()
7221 if (!conf->pending_data) in setup_conf()
7224 list_add(&conf->pending_data[i].sibling, &conf->free_list); in setup_conf()
7226 if (!alloc_thread_groups(conf, 0, &group_cnt, &new_group)) { in setup_conf()
7227 conf->group_cnt = group_cnt; in setup_conf()
7228 conf->worker_cnt_per_group = 0; in setup_conf()
7229 conf->worker_groups = new_group; in setup_conf()
7232 spin_lock_init(&conf->device_lock); in setup_conf()
7233 seqcount_spinlock_init(&conf->gen_lock, &conf->device_lock); in setup_conf()
7234 mutex_init(&conf->cache_size_mutex); in setup_conf()
7235 init_waitqueue_head(&conf->wait_for_quiescent); in setup_conf()
7236 init_waitqueue_head(&conf->wait_for_stripe); in setup_conf()
7237 init_waitqueue_head(&conf->wait_for_overlap); in setup_conf()
7238 INIT_LIST_HEAD(&conf->handle_list); in setup_conf()
7239 INIT_LIST_HEAD(&conf->loprio_list); in setup_conf()
7240 INIT_LIST_HEAD(&conf->hold_list); in setup_conf()
7241 INIT_LIST_HEAD(&conf->delayed_list); in setup_conf()
7242 INIT_LIST_HEAD(&conf->bitmap_list); in setup_conf()
7243 init_llist_head(&conf->released_stripes); in setup_conf()
7244 atomic_set(&conf->active_stripes, 0); in setup_conf()
7245 atomic_set(&conf->preread_active_stripes, 0); in setup_conf()
7246 atomic_set(&conf->active_aligned_reads, 0); in setup_conf()
7247 spin_lock_init(&conf->pending_bios_lock); in setup_conf()
7248 conf->batch_bio_dispatch = true; in setup_conf()
7253 conf->batch_bio_dispatch = false; in setup_conf()
7258 conf->bypass_threshold = BYPASS_THRESHOLD; in setup_conf()
7259 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
7261 conf->raid_disks = mddev->raid_disks; in setup_conf()
7263 conf->previous_raid_disks = mddev->raid_disks; in setup_conf()
7265 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; in setup_conf()
7266 max_disks = max(conf->raid_disks, conf->previous_raid_disks); in setup_conf()
7268 conf->disks = kcalloc(max_disks, sizeof(struct disk_info), in setup_conf()
7271 if (!conf->disks) in setup_conf()
7275 conf->disks[i].extra_page = alloc_page(GFP_KERNEL); in setup_conf()
7276 if (!conf->disks[i].extra_page) in setup_conf()
7280 ret = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); in setup_conf()
7283 conf->mddev = mddev; in setup_conf()
7285 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) in setup_conf()
7293 spin_lock_init(conf->hash_locks); in setup_conf()
7295 spin_lock_init(conf->hash_locks + i); in setup_conf()
7298 INIT_LIST_HEAD(conf->inactive_list + i); in setup_conf()
7301 INIT_LIST_HEAD(conf->temp_inactive_list + i); in setup_conf()
7303 atomic_set(&conf->r5c_cached_full_stripes, 0); in setup_conf()
7304 INIT_LIST_HEAD(&conf->r5c_full_stripe_list); in setup_conf()
7305 atomic_set(&conf->r5c_cached_partial_stripes, 0); in setup_conf()
7306 INIT_LIST_HEAD(&conf->r5c_partial_stripe_list); in setup_conf()
7307 atomic_set(&conf->r5c_flushing_full_stripes, 0); in setup_conf()
7308 atomic_set(&conf->r5c_flushing_partial_stripes, 0); in setup_conf()
7310 conf->level = mddev->new_level; in setup_conf()
7311 conf->chunk_sectors = mddev->new_chunk_sectors; in setup_conf()
7312 if (raid5_alloc_percpu(conf) != 0) in setup_conf()
7322 disk = conf->disks + raid_disk; in setup_conf()
7340 conf->fullsync = 1; in setup_conf()
7343 conf->level = mddev->new_level; in setup_conf()
7344 if (conf->level == 6) { in setup_conf()
7345 conf->max_degraded = 2; in setup_conf()
7347 conf->rmw_level = PARITY_ENABLE_RMW; in setup_conf()
7349 conf->rmw_level = PARITY_DISABLE_RMW; in setup_conf()
7351 conf->max_degraded = 1; in setup_conf()
7352 conf->rmw_level = PARITY_ENABLE_RMW; in setup_conf()
7354 conf->algorithm = mddev->new_layout; in setup_conf()
7355 conf->reshape_progress = mddev->reshape_position; in setup_conf()
7356 if (conf->reshape_progress != MaxSector) { in setup_conf()
7357 conf->prev_chunk_sectors = mddev->chunk_sectors; in setup_conf()
7358 conf->prev_algo = mddev->layout; in setup_conf()
7360 conf->prev_chunk_sectors = conf->chunk_sectors; in setup_conf()
7361 conf->prev_algo = conf->algorithm; in setup_conf()
7364 conf->min_nr_stripes = NR_STRIPES; in setup_conf()
7367 ((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4, in setup_conf()
7368 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4); in setup_conf()
7369 conf->min_nr_stripes = max(NR_STRIPES, stripes); in setup_conf()
7370 if (conf->min_nr_stripes != NR_STRIPES) in setup_conf()
7372 mdname(mddev), conf->min_nr_stripes); in setup_conf()
7374 memory = conf->min_nr_stripes * (sizeof(struct stripe_head) + in setup_conf()
7376 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); in setup_conf()
7377 if (grow_stripes(conf, conf->min_nr_stripes)) { in setup_conf()
7388 conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4; in setup_conf()
7389 conf->shrinker.scan_objects = raid5_cache_scan; in setup_conf()
7390 conf->shrinker.count_objects = raid5_cache_count; in setup_conf()
7391 conf->shrinker.batch = 128; in setup_conf()
7392 conf->shrinker.flags = 0; in setup_conf()
7393 if (register_shrinker(&conf->shrinker)) { in setup_conf()
7400 conf->thread = md_register_thread(raid5d, mddev, pers_name); in setup_conf()
7401 if (!conf->thread) { in setup_conf()
7407 return conf; in setup_conf()
7410 if (conf) { in setup_conf()
7411 free_conf(conf); in setup_conf()
7443 static void raid5_set_io_opt(struct r5conf *conf) in raid5_set_io_opt() argument
7445 blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) * in raid5_set_io_opt()
7446 (conf->raid_disks - conf->max_degraded)); in raid5_set_io_opt()
7451 struct r5conf *conf; in raid5_run() local
7605 conf = setup_conf(mddev); in raid5_run()
7607 conf = mddev->private; in raid5_run()
7609 if (IS_ERR(conf)) { in raid5_run()
7610 ret = PTR_ERR(conf); in raid5_run()
7624 conf->min_offset_diff = min_offset_diff; in raid5_run()
7625 mddev->thread = conf->thread; in raid5_run()
7626 conf->thread = NULL; in raid5_run()
7627 mddev->private = conf; in raid5_run()
7629 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; in raid5_run()
7631 rdev = conf->disks[i].rdev; in raid5_run()
7632 if (!rdev && conf->disks[i].replacement) { in raid5_run()
7634 rdev = conf->disks[i].replacement; in raid5_run()
7635 conf->disks[i].replacement = NULL; in raid5_run()
7637 conf->disks[i].rdev = rdev; in raid5_run()
7641 if (conf->disks[i].replacement && in raid5_run()
7642 conf->reshape_progress != MaxSector) { in raid5_run()
7667 conf->algorithm, in raid5_run()
7668 conf->raid_disks, in raid5_run()
7669 conf->max_degraded)) in raid5_run()
7673 conf->prev_algo, in raid5_run()
7674 conf->previous_raid_disks, in raid5_run()
7675 conf->max_degraded)) in raid5_run()
7683 mddev->degraded = raid5_calc_degraded(conf); in raid5_run()
7685 if (has_failed(conf)) { in raid5_run()
7687 mdname(mddev), mddev->degraded, conf->raid_disks); in raid5_run()
7711 mdname(mddev), conf->level, in raid5_run()
7715 print_raid5_conf(conf); in raid5_run()
7717 if (conf->reshape_progress != MaxSector) { in raid5_run()
7718 conf->reshape_safe = conf->reshape_progress; in raid5_run()
7719 atomic_set(&conf->reshape_stripes, 0); in raid5_run()
7745 int data_disks = conf->previous_raid_disks - conf->max_degraded; in raid5_run()
7751 raid5_set_io_opt(conf); in raid5_run()
7802 if (log_init(conf, journal_dev, raid5_has_ppl(conf))) in raid5_run()
7808 print_raid5_conf(conf); in raid5_run()
7809 free_conf(conf); in raid5_run()
7820 struct r5conf *conf = priv; in raid5_free() local
7822 free_conf(conf); in raid5_free()
7829 struct r5conf *conf = mddev->private; in raid5_status() local
7833 conf->chunk_sectors / 2, mddev->layout); in raid5_status()
7834 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); in raid5_status()
7836 for (i = 0; i < conf->raid_disks; i++) { in raid5_status()
7837 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in raid5_status()
7844 static void print_raid5_conf (struct r5conf *conf) in print_raid5_conf() argument
7850 if (!conf) { in print_raid5_conf()
7854 pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level, in print_raid5_conf()
7855 conf->raid_disks, in print_raid5_conf()
7856 conf->raid_disks - conf->mddev->degraded); in print_raid5_conf()
7858 for (i = 0; i < conf->raid_disks; i++) { in print_raid5_conf()
7860 tmp = conf->disks + i; in print_raid5_conf()
7871 struct r5conf *conf = mddev->private; in raid5_spare_active() local
7876 for (i = 0; i < conf->raid_disks; i++) { in raid5_spare_active()
7877 tmp = conf->disks + i; in raid5_spare_active()
7904 spin_lock_irqsave(&conf->device_lock, flags); in raid5_spare_active()
7905 mddev->degraded = raid5_calc_degraded(conf); in raid5_spare_active()
7906 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_spare_active()
7907 print_raid5_conf(conf); in raid5_spare_active()
7913 struct r5conf *conf = mddev->private; in raid5_remove_disk() local
7917 struct disk_info *p = conf->disks + number; in raid5_remove_disk()
7919 print_raid5_conf(conf); in raid5_remove_disk()
7920 if (test_bit(Journal, &rdev->flags) && conf->log) { in raid5_remove_disk()
7927 if (atomic_read(&conf->active_stripes) || in raid5_remove_disk()
7928 atomic_read(&conf->r5c_cached_full_stripes) || in raid5_remove_disk()
7929 atomic_read(&conf->r5c_cached_partial_stripes)) { in raid5_remove_disk()
7932 log_exit(conf); in raid5_remove_disk()
7942 if (number >= conf->raid_disks && in raid5_remove_disk()
7943 conf->reshape_progress == MaxSector) in raid5_remove_disk()
7955 mddev->recovery_disabled != conf->recovery_disabled && in raid5_remove_disk()
7956 !has_failed(conf) && in raid5_remove_disk()
7958 number < conf->raid_disks) { in raid5_remove_disk()
7972 err = log_modify(conf, rdev, false); in raid5_remove_disk()
7986 err = log_modify(conf, p->rdev, true); in raid5_remove_disk()
7992 print_raid5_conf(conf); in raid5_remove_disk()
7998 struct r5conf *conf = mddev->private; in raid5_add_disk() local
8003 int last = conf->raid_disks - 1; in raid5_add_disk()
8006 if (conf->log) in raid5_add_disk()
8014 ret = log_init(conf, rdev, false); in raid5_add_disk()
8018 ret = r5l_start(conf->log); in raid5_add_disk()
8024 if (mddev->recovery_disabled == conf->recovery_disabled) in raid5_add_disk()
8027 if (rdev->saved_raid_disk < 0 && has_failed(conf)) in raid5_add_disk()
8041 conf->disks[rdev->saved_raid_disk].rdev == NULL) in raid5_add_disk()
8045 p = conf->disks + disk; in raid5_add_disk()
8050 conf->fullsync = 1; in raid5_add_disk()
8053 err = log_modify(conf, rdev, true); in raid5_add_disk()
8059 p = conf->disks + disk; in raid5_add_disk()
8066 conf->fullsync = 1; in raid5_add_disk()
8072 print_raid5_conf(conf); in raid5_add_disk()
8086 struct r5conf *conf = mddev->private; in raid5_resize() local
8088 if (raid5_has_log(conf) || raid5_has_ppl(conf)) in raid5_resize()
8090 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_resize()
8121 struct r5conf *conf = mddev->private; in check_stripe_cache() local
8122 if (((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4 in check_stripe_cache()
8123 > conf->min_nr_stripes || in check_stripe_cache()
8124 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4 in check_stripe_cache()
8125 > conf->min_nr_stripes) { in check_stripe_cache()
8129 / RAID5_STRIPE_SIZE(conf))*4); in check_stripe_cache()
8137 struct r5conf *conf = mddev->private; in check_reshape() local
8139 if (raid5_has_log(conf) || raid5_has_ppl(conf)) in check_reshape()
8145 if (has_failed(conf)) in check_reshape()
8165 if (resize_chunks(conf, in check_reshape()
8166 conf->previous_raid_disks in check_reshape()
8173 if (conf->previous_raid_disks + mddev->delta_disks <= conf->pool_size) in check_reshape()
8175 return resize_stripes(conf, (conf->previous_raid_disks in check_reshape()
8181 struct r5conf *conf = mddev->private; in raid5_start_reshape() local
8192 if (has_failed(conf)) in raid5_start_reshape()
8201 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) in raid5_start_reshape()
8211 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) in raid5_start_reshape()
8218 atomic_set(&conf->reshape_stripes, 0); in raid5_start_reshape()
8219 spin_lock_irq(&conf->device_lock); in raid5_start_reshape()
8220 write_seqcount_begin(&conf->gen_lock); in raid5_start_reshape()
8221 conf->previous_raid_disks = conf->raid_disks; in raid5_start_reshape()
8222 conf->raid_disks += mddev->delta_disks; in raid5_start_reshape()
8223 conf->prev_chunk_sectors = conf->chunk_sectors; in raid5_start_reshape()
8224 conf->chunk_sectors = mddev->new_chunk_sectors; in raid5_start_reshape()
8225 conf->prev_algo = conf->algorithm; in raid5_start_reshape()
8226 conf->algorithm = mddev->new_layout; in raid5_start_reshape()
8227 conf->generation++; in raid5_start_reshape()
8233 conf->reshape_progress = raid5_size(mddev, 0, 0); in raid5_start_reshape()
8235 conf->reshape_progress = 0; in raid5_start_reshape()
8236 conf->reshape_safe = conf->reshape_progress; in raid5_start_reshape()
8237 write_seqcount_end(&conf->gen_lock); in raid5_start_reshape()
8238 spin_unlock_irq(&conf->device_lock); in raid5_start_reshape()
8260 >= conf->previous_raid_disks) in raid5_start_reshape()
8268 } else if (rdev->raid_disk >= conf->previous_raid_disks in raid5_start_reshape()
8278 spin_lock_irqsave(&conf->device_lock, flags); in raid5_start_reshape()
8279 mddev->degraded = raid5_calc_degraded(conf); in raid5_start_reshape()
8280 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_start_reshape()
8282 mddev->raid_disks = conf->raid_disks; in raid5_start_reshape()
8283 mddev->reshape_position = conf->reshape_progress; in raid5_start_reshape()
8295 spin_lock_irq(&conf->device_lock); in raid5_start_reshape()
8296 write_seqcount_begin(&conf->gen_lock); in raid5_start_reshape()
8297 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; in raid5_start_reshape()
8299 conf->chunk_sectors = conf->prev_chunk_sectors; in raid5_start_reshape()
8300 mddev->new_layout = conf->algorithm = conf->prev_algo; in raid5_start_reshape()
8304 conf->generation --; in raid5_start_reshape()
8305 conf->reshape_progress = MaxSector; in raid5_start_reshape()
8307 write_seqcount_end(&conf->gen_lock); in raid5_start_reshape()
8308 spin_unlock_irq(&conf->device_lock); in raid5_start_reshape()
8311 conf->reshape_checkpoint = jiffies; in raid5_start_reshape()
8320 static void end_reshape(struct r5conf *conf) in end_reshape() argument
8323 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in end_reshape()
8326 spin_lock_irq(&conf->device_lock); in end_reshape()
8327 conf->previous_raid_disks = conf->raid_disks; in end_reshape()
8328 md_finish_reshape(conf->mddev); in end_reshape()
8330 conf->reshape_progress = MaxSector; in end_reshape()
8331 conf->mddev->reshape_position = MaxSector; in end_reshape()
8332 rdev_for_each(rdev, conf->mddev) in end_reshape()
8337 spin_unlock_irq(&conf->device_lock); in end_reshape()
8338 wake_up(&conf->wait_for_overlap); in end_reshape()
8340 if (conf->mddev->queue) in end_reshape()
8341 raid5_set_io_opt(conf); in end_reshape()
8350 struct r5conf *conf = mddev->private; in raid5_finish_reshape() local
8356 spin_lock_irq(&conf->device_lock); in raid5_finish_reshape()
8357 mddev->degraded = raid5_calc_degraded(conf); in raid5_finish_reshape()
8358 spin_unlock_irq(&conf->device_lock); in raid5_finish_reshape()
8359 for (d = conf->raid_disks ; in raid5_finish_reshape()
8360 d < conf->raid_disks - mddev->delta_disks; in raid5_finish_reshape()
8362 struct md_rdev *rdev = conf->disks[d].rdev; in raid5_finish_reshape()
8365 rdev = conf->disks[d].replacement; in raid5_finish_reshape()
8370 mddev->layout = conf->algorithm; in raid5_finish_reshape()
8371 mddev->chunk_sectors = conf->chunk_sectors; in raid5_finish_reshape()
8380 struct r5conf *conf = mddev->private; in raid5_quiesce() local
8384 lock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8388 r5c_flush_cache(conf, INT_MAX); in raid5_quiesce()
8392 smp_store_release(&conf->quiesce, 2); in raid5_quiesce()
8393 wait_event_cmd(conf->wait_for_quiescent, in raid5_quiesce()
8394 atomic_read(&conf->active_stripes) == 0 && in raid5_quiesce()
8395 atomic_read(&conf->active_aligned_reads) == 0, in raid5_quiesce()
8396 unlock_all_device_hash_locks_irq(conf), in raid5_quiesce()
8397 lock_all_device_hash_locks_irq(conf)); in raid5_quiesce()
8398 conf->quiesce = 1; in raid5_quiesce()
8399 unlock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8401 wake_up(&conf->wait_for_overlap); in raid5_quiesce()
8404 lock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8405 conf->quiesce = 0; in raid5_quiesce()
8406 wake_up(&conf->wait_for_quiescent); in raid5_quiesce()
8407 wake_up(&conf->wait_for_overlap); in raid5_quiesce()
8408 unlock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8410 log_quiesce(conf, quiesce); in raid5_quiesce()
8511 struct r5conf *conf = mddev->private; in raid5_check_reshape() local
8531 conf->algorithm = mddev->new_layout; in raid5_check_reshape()
8535 conf->chunk_sectors = new_chunk ; in raid5_check_reshape()
8654 struct r5conf *conf; in raid5_change_consistency_policy() local
8660 conf = mddev->private; in raid5_change_consistency_policy()
8661 if (!conf) { in raid5_change_consistency_policy()
8668 if (!raid5_has_ppl(conf) && conf->level == 5) { in raid5_change_consistency_policy()
8669 err = log_init(conf, NULL, true); in raid5_change_consistency_policy()
8671 err = resize_stripes(conf, conf->pool_size); in raid5_change_consistency_policy()
8673 log_exit(conf); in raid5_change_consistency_policy()
8678 if (raid5_has_ppl(conf)) { in raid5_change_consistency_policy()
8680 log_exit(conf); in raid5_change_consistency_policy()
8682 err = resize_stripes(conf, conf->pool_size); in raid5_change_consistency_policy()
8683 } else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) && in raid5_change_consistency_policy()
8684 r5l_log_disk_error(conf)) { in raid5_change_consistency_policy()
8716 struct r5conf *conf = mddev->private; in raid5_start() local
8718 return r5l_start(conf->log); in raid5_start()