• Home
  • Raw
  • Download

Lines Matching refs:conf

70 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)  in stripe_hash()  argument
73 return &conf->stripe_hashtbl[hash]; in stripe_hash()
81 static inline void lock_device_hash_lock(struct r5conf *conf, int hash) in lock_device_hash_lock() argument
83 spin_lock_irq(conf->hash_locks + hash); in lock_device_hash_lock()
84 spin_lock(&conf->device_lock); in lock_device_hash_lock()
87 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) in unlock_device_hash_lock() argument
89 spin_unlock(&conf->device_lock); in unlock_device_hash_lock()
90 spin_unlock_irq(conf->hash_locks + hash); in unlock_device_hash_lock()
93 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) in lock_all_device_hash_locks_irq() argument
96 spin_lock_irq(conf->hash_locks); in lock_all_device_hash_locks_irq()
98 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); in lock_all_device_hash_locks_irq()
99 spin_lock(&conf->device_lock); in lock_all_device_hash_locks_irq()
102 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) in unlock_all_device_hash_locks_irq() argument
105 spin_unlock(&conf->device_lock); in unlock_all_device_hash_locks_irq()
107 spin_unlock(conf->hash_locks + i); in unlock_all_device_hash_locks_irq()
108 spin_unlock_irq(conf->hash_locks); in unlock_all_device_hash_locks_irq()
150 static void print_raid5_conf (struct r5conf *conf);
168 struct r5conf *conf = sh->raid_conf; in raid5_wakeup_stripe_thread() local
180 group = conf->worker_groups + cpu_to_group(cpu); in raid5_wakeup_stripe_thread()
189 if (conf->worker_cnt_per_group == 0) { in raid5_wakeup_stripe_thread()
190 md_wakeup_thread(conf->mddev->thread); in raid5_wakeup_stripe_thread()
194 group = conf->worker_groups + cpu_to_group(sh->cpu); in raid5_wakeup_stripe_thread()
202 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) { in raid5_wakeup_stripe_thread()
212 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, in do_release_stripe() argument
219 BUG_ON(atomic_read(&conf->active_stripes)==0); in do_release_stripe()
221 if (r5c_is_writeback(conf->log)) in do_release_stripe()
233 (conf->quiesce && r5c_is_writeback(conf->log) && in do_release_stripe()
243 list_add_tail(&sh->lru, &conf->delayed_list); in do_release_stripe()
245 sh->bm_seq - conf->seq_write > 0) in do_release_stripe()
246 list_add_tail(&sh->lru, &conf->bitmap_list); in do_release_stripe()
250 if (conf->worker_cnt_per_group == 0) { in do_release_stripe()
253 &conf->loprio_list); in do_release_stripe()
256 &conf->handle_list); in do_release_stripe()
262 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
266 if (atomic_dec_return(&conf->preread_active_stripes) in do_release_stripe()
268 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
269 atomic_dec(&conf->active_stripes); in do_release_stripe()
271 if (!r5c_is_writeback(conf->log)) in do_release_stripe()
277 else if (injournal == conf->raid_disks - conf->max_degraded) { in do_release_stripe()
280 atomic_inc(&conf->r5c_cached_full_stripes); in do_release_stripe()
282 atomic_dec(&conf->r5c_cached_partial_stripes); in do_release_stripe()
283 list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); in do_release_stripe()
284 r5c_check_cached_full_stripe(conf); in do_release_stripe()
291 list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); in do_release_stripe()
297 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, in __release_stripe() argument
301 do_release_stripe(conf, sh, temp_inactive_list); in __release_stripe()
311 static void release_inactive_stripe_list(struct r5conf *conf, in release_inactive_stripe_list() argument
332 spin_lock_irqsave(conf->hash_locks + hash, flags); in release_inactive_stripe_list()
333 if (list_empty(conf->inactive_list + hash) && in release_inactive_stripe_list()
335 atomic_dec(&conf->empty_inactive_list_nr); in release_inactive_stripe_list()
336 list_splice_tail_init(list, conf->inactive_list + hash); in release_inactive_stripe_list()
338 spin_unlock_irqrestore(conf->hash_locks + hash, flags); in release_inactive_stripe_list()
345 wake_up(&conf->wait_for_stripe); in release_inactive_stripe_list()
346 if (atomic_read(&conf->active_stripes) == 0) in release_inactive_stripe_list()
347 wake_up(&conf->wait_for_quiescent); in release_inactive_stripe_list()
348 if (conf->retry_read_aligned) in release_inactive_stripe_list()
349 md_wakeup_thread(conf->mddev->thread); in release_inactive_stripe_list()
354 static int release_stripe_list(struct r5conf *conf, in release_stripe_list() argument
361 head = llist_del_all(&conf->released_stripes); in release_stripe_list()
375 __release_stripe(conf, sh, &temp_inactive_list[hash]); in release_stripe_list()
384 struct r5conf *conf = sh->raid_conf; in raid5_release_stripe() local
395 if (unlikely(!conf->mddev->thread) || in raid5_release_stripe()
398 wakeup = llist_add(&sh->release_list, &conf->released_stripes); in raid5_release_stripe()
400 md_wakeup_thread(conf->mddev->thread); in raid5_release_stripe()
404 if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) { in raid5_release_stripe()
407 do_release_stripe(conf, sh, &list); in raid5_release_stripe()
408 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_release_stripe()
409 release_inactive_stripe_list(conf, &list, hash); in raid5_release_stripe()
421 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) in insert_hash() argument
423 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
432 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) in get_free_stripe() argument
437 if (list_empty(conf->inactive_list + hash)) in get_free_stripe()
439 first = (conf->inactive_list + hash)->next; in get_free_stripe()
443 atomic_inc(&conf->active_stripes); in get_free_stripe()
445 if (list_empty(conf->inactive_list + hash)) in get_free_stripe()
446 atomic_inc(&conf->empty_inactive_list_nr); in get_free_stripe()
485 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
490 struct r5conf *conf = sh->raid_conf; in init_stripe() local
501 seq = read_seqcount_begin(&conf->gen_lock); in init_stripe()
502 sh->generation = conf->generation - previous; in init_stripe()
503 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
505 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
522 if (read_seqcount_retry(&conf->gen_lock, seq)) in init_stripe()
525 insert_hash(conf, sh); in init_stripe()
530 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, in __find_stripe() argument
536 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
556 int raid5_calc_degraded(struct r5conf *conf) in raid5_calc_degraded() argument
563 for (i = 0; i < conf->previous_raid_disks; i++) { in raid5_calc_degraded()
564 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in raid5_calc_degraded()
566 rdev = rcu_dereference(conf->disks[i].replacement); in raid5_calc_degraded()
581 if (conf->raid_disks >= conf->previous_raid_disks) in raid5_calc_degraded()
585 if (conf->raid_disks == conf->previous_raid_disks) in raid5_calc_degraded()
589 for (i = 0; i < conf->raid_disks; i++) { in raid5_calc_degraded()
590 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in raid5_calc_degraded()
592 rdev = rcu_dereference(conf->disks[i].replacement); in raid5_calc_degraded()
603 if (conf->raid_disks <= conf->previous_raid_disks) in raid5_calc_degraded()
612 static int has_failed(struct r5conf *conf) in has_failed() argument
616 if (conf->mddev->reshape_position == MaxSector) in has_failed()
617 return conf->mddev->degraded > conf->max_degraded; in has_failed()
619 degraded = raid5_calc_degraded(conf); in has_failed()
620 if (degraded > conf->max_degraded) in has_failed()
626 raid5_get_active_stripe(struct r5conf *conf, sector_t sector, in raid5_get_active_stripe() argument
635 spin_lock_irq(conf->hash_locks + hash); in raid5_get_active_stripe()
638 wait_event_lock_irq(conf->wait_for_quiescent, in raid5_get_active_stripe()
639 conf->quiesce == 0 || noquiesce, in raid5_get_active_stripe()
640 *(conf->hash_locks + hash)); in raid5_get_active_stripe()
641 sh = __find_stripe(conf, sector, conf->generation - previous); in raid5_get_active_stripe()
643 if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) { in raid5_get_active_stripe()
644 sh = get_free_stripe(conf, hash); in raid5_get_active_stripe()
646 &conf->cache_state)) in raid5_get_active_stripe()
648 &conf->cache_state); in raid5_get_active_stripe()
653 r5c_check_stripe_cache_usage(conf); in raid5_get_active_stripe()
656 &conf->cache_state); in raid5_get_active_stripe()
657 r5l_wake_reclaim(conf->log, 0); in raid5_get_active_stripe()
659 conf->wait_for_stripe, in raid5_get_active_stripe()
660 !list_empty(conf->inactive_list + hash) && in raid5_get_active_stripe()
661 (atomic_read(&conf->active_stripes) in raid5_get_active_stripe()
662 < (conf->max_nr_stripes * 3 / 4) in raid5_get_active_stripe()
664 &conf->cache_state)), in raid5_get_active_stripe()
665 *(conf->hash_locks + hash)); in raid5_get_active_stripe()
667 &conf->cache_state); in raid5_get_active_stripe()
673 spin_lock(&conf->device_lock); in raid5_get_active_stripe()
676 atomic_inc(&conf->active_stripes); in raid5_get_active_stripe()
680 if (!list_empty(conf->inactive_list + hash)) in raid5_get_active_stripe()
683 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) in raid5_get_active_stripe()
684 atomic_inc(&conf->empty_inactive_list_nr); in raid5_get_active_stripe()
691 spin_unlock(&conf->device_lock); in raid5_get_active_stripe()
695 spin_unlock_irq(conf->hash_locks + hash); in raid5_get_active_stripe()
729 struct r5conf *conf = sh->raid_conf; in stripe_can_batch() local
731 if (raid5_has_log(conf) || raid5_has_ppl(conf)) in stripe_can_batch()
739 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) in stripe_add_to_batch_list() argument
749 if (!sector_div(tmp_sec, conf->chunk_sectors)) in stripe_add_to_batch_list()
754 spin_lock_irq(conf->hash_locks + hash); in stripe_add_to_batch_list()
755 head = __find_stripe(conf, head_sector, conf->generation); in stripe_add_to_batch_list()
757 spin_lock(&conf->device_lock); in stripe_add_to_batch_list()
760 atomic_inc(&conf->active_stripes); in stripe_add_to_batch_list()
764 if (!list_empty(conf->inactive_list + hash)) in stripe_add_to_batch_list()
767 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) in stripe_add_to_batch_list()
768 atomic_inc(&conf->empty_inactive_list_nr); in stripe_add_to_batch_list()
775 spin_unlock(&conf->device_lock); in stripe_add_to_batch_list()
777 spin_unlock_irq(conf->hash_locks + hash); in stripe_add_to_batch_list()
830 if (atomic_dec_return(&conf->preread_active_stripes) in stripe_add_to_batch_list()
832 md_wakeup_thread(conf->mddev->thread); in stripe_add_to_batch_list()
853 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) in use_new_offset() argument
855 sector_t progress = conf->reshape_progress; in use_new_offset()
863 if (sh->generation == conf->generation - 1) in use_new_offset()
892 static void dispatch_defer_bios(struct r5conf *conf, int target, in dispatch_defer_bios() argument
899 if (conf->pending_data_cnt == 0) in dispatch_defer_bios()
902 list_sort(NULL, &conf->pending_list, cmp_stripe); in dispatch_defer_bios()
904 first = conf->pending_list.next; in dispatch_defer_bios()
907 if (conf->next_pending_data) in dispatch_defer_bios()
908 list_move_tail(&conf->pending_list, in dispatch_defer_bios()
909 &conf->next_pending_data->sibling); in dispatch_defer_bios()
911 while (!list_empty(&conf->pending_list)) { in dispatch_defer_bios()
912 data = list_first_entry(&conf->pending_list, in dispatch_defer_bios()
919 list_move(&data->sibling, &conf->free_list); in dispatch_defer_bios()
924 conf->pending_data_cnt -= cnt; in dispatch_defer_bios()
925 BUG_ON(conf->pending_data_cnt < 0 || cnt < target); in dispatch_defer_bios()
927 if (next != &conf->pending_list) in dispatch_defer_bios()
928 conf->next_pending_data = list_entry(next, in dispatch_defer_bios()
931 conf->next_pending_data = NULL; in dispatch_defer_bios()
933 if (first != &conf->pending_list) in dispatch_defer_bios()
934 list_move_tail(&conf->pending_list, first); in dispatch_defer_bios()
937 static void flush_deferred_bios(struct r5conf *conf) in flush_deferred_bios() argument
941 if (conf->pending_data_cnt == 0) in flush_deferred_bios()
944 spin_lock(&conf->pending_bios_lock); in flush_deferred_bios()
945 dispatch_defer_bios(conf, conf->pending_data_cnt, &tmp); in flush_deferred_bios()
946 BUG_ON(conf->pending_data_cnt != 0); in flush_deferred_bios()
947 spin_unlock(&conf->pending_bios_lock); in flush_deferred_bios()
952 static void defer_issue_bios(struct r5conf *conf, sector_t sector, in defer_issue_bios() argument
958 spin_lock(&conf->pending_bios_lock); in defer_issue_bios()
959 ent = list_first_entry(&conf->free_list, struct r5pending_data, in defer_issue_bios()
961 list_move_tail(&ent->sibling, &conf->pending_list); in defer_issue_bios()
965 conf->pending_data_cnt++; in defer_issue_bios()
966 if (conf->pending_data_cnt >= PENDING_IO_MAX) in defer_issue_bios()
967 dispatch_defer_bios(conf, PENDING_IO_ONE_FLUSH, &tmp); in defer_issue_bios()
969 spin_unlock(&conf->pending_bios_lock); in defer_issue_bios()
981 struct r5conf *conf = sh->raid_conf; in ops_run_io() local
992 should_defer = conf->batch_bio_dispatch && conf->group_cnt; in ops_run_io()
1023 rrdev = rcu_dereference(conf->disks[i].replacement); in ops_run_io()
1025 rdev = rcu_dereference(conf->disks[i].rdev); in ops_run_io()
1067 if (!conf->mddev->external && in ops_run_io()
1068 conf->mddev->sb_flags) { in ops_run_io()
1073 md_check_recovery(conf->mddev); in ops_run_io()
1081 md_wait_for_blocked_rdev(rdev, conf->mddev); in ops_run_io()
1084 rdev_dec_pending(rdev, conf->mddev); in ops_run_io()
1109 if (use_new_offset(conf, sh)) in ops_run_io()
1147 if (conf->mddev->gendisk) in ops_run_io()
1149 bi, disk_devt(conf->mddev->gendisk), in ops_run_io()
1176 if (use_new_offset(conf, sh)) in ops_run_io()
1197 if (conf->mddev->gendisk) in ops_run_io()
1199 rbi, disk_devt(conf->mddev->gendisk), in ops_run_io()
1224 defer_issue_bios(conf, head_sh->sector, &pending_bios); in ops_run_io()
1722 struct r5conf *conf = sh->raid_conf; in ops_run_biodrain() local
1765 r5c_is_writeback(conf->log)); in ops_run_biodrain()
1767 !r5c_is_writeback(conf->log)) { in ops_run_biodrain()
2056 struct r5conf *conf = sh->raid_conf; in raid_run_ops() local
2057 int level = conf->level; in raid_run_ops()
2062 percpu = per_cpu_ptr(conf->percpu, cpu); in raid_run_ops()
2132 int disks, struct r5conf *conf) in alloc_stripe() argument
2146 sh->raid_conf = conf; in alloc_stripe()
2155 if (raid5_has_ppl(conf)) { in alloc_stripe()
2165 static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) in grow_one_stripe() argument
2169 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf); in grow_one_stripe()
2175 free_stripe(conf->slab_cache, sh); in grow_one_stripe()
2179 conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; in grow_one_stripe()
2181 atomic_inc(&conf->active_stripes); in grow_one_stripe()
2184 conf->max_nr_stripes++; in grow_one_stripe()
2188 static int grow_stripes(struct r5conf *conf, int num) in grow_stripes() argument
2191 size_t namelen = sizeof(conf->cache_name[0]); in grow_stripes()
2192 int devs = max(conf->raid_disks, conf->previous_raid_disks); in grow_stripes()
2194 if (conf->mddev->gendisk) in grow_stripes()
2195 snprintf(conf->cache_name[0], namelen, in grow_stripes()
2196 "raid%d-%s", conf->level, mdname(conf->mddev)); in grow_stripes()
2198 snprintf(conf->cache_name[0], namelen, in grow_stripes()
2199 "raid%d-%p", conf->level, conf->mddev); in grow_stripes()
2200 snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]); in grow_stripes()
2202 conf->active_name = 0; in grow_stripes()
2203 sc = kmem_cache_create(conf->cache_name[conf->active_name], in grow_stripes()
2208 conf->slab_cache = sc; in grow_stripes()
2209 conf->pool_size = devs; in grow_stripes()
2211 if (!grow_one_stripe(conf, GFP_KERNEL)) in grow_stripes()
2249 static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) in resize_chunks() argument
2259 if (conf->scribble_disks >= new_disks && in resize_chunks()
2260 conf->scribble_sectors >= new_sectors) in resize_chunks()
2262 mddev_suspend(conf->mddev); in resize_chunks()
2268 percpu = per_cpu_ptr(conf->percpu, cpu); in resize_chunks()
2277 mddev_resume(conf->mddev); in resize_chunks()
2279 conf->scribble_disks = new_disks; in resize_chunks()
2280 conf->scribble_sectors = new_sectors; in resize_chunks()
2285 static int resize_stripes(struct r5conf *conf, int newsize) in resize_stripes() argument
2318 md_allow_write(conf->mddev); in resize_stripes()
2321 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], in resize_stripes()
2328 mutex_lock(&conf->cache_size_mutex); in resize_stripes()
2330 for (i = conf->max_nr_stripes; i; i--) { in resize_stripes()
2331 nsh = alloc_stripe(sc, GFP_KERNEL, newsize, conf); in resize_stripes()
2345 mutex_unlock(&conf->cache_size_mutex); in resize_stripes()
2355 lock_device_hash_lock(conf, hash); in resize_stripes()
2356 wait_event_cmd(conf->wait_for_stripe, in resize_stripes()
2357 !list_empty(conf->inactive_list + hash), in resize_stripes()
2358 unlock_device_hash_lock(conf, hash), in resize_stripes()
2359 lock_device_hash_lock(conf, hash)); in resize_stripes()
2360 osh = get_free_stripe(conf, hash); in resize_stripes()
2361 unlock_device_hash_lock(conf, hash); in resize_stripes()
2363 for(i=0; i<conf->pool_size; i++) { in resize_stripes()
2368 free_stripe(conf->slab_cache, osh); in resize_stripes()
2370 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + in resize_stripes()
2371 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { in resize_stripes()
2376 kmem_cache_destroy(conf->slab_cache); in resize_stripes()
2385 for (i = 0; i < conf->pool_size; i++) in resize_stripes()
2386 ndisks[i] = conf->disks[i]; in resize_stripes()
2388 for (i = conf->pool_size; i < newsize; i++) { in resize_stripes()
2395 for (i = conf->pool_size; i < newsize; i++) in resize_stripes()
2400 kfree(conf->disks); in resize_stripes()
2401 conf->disks = ndisks; in resize_stripes()
2406 mutex_unlock(&conf->cache_size_mutex); in resize_stripes()
2408 conf->slab_cache = sc; in resize_stripes()
2409 conf->active_name = 1-conf->active_name; in resize_stripes()
2416 for (i=conf->raid_disks; i < newsize; i++) in resize_stripes()
2429 conf->pool_size = newsize; in resize_stripes()
2433 static int drop_one_stripe(struct r5conf *conf) in drop_one_stripe() argument
2436 int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; in drop_one_stripe()
2438 spin_lock_irq(conf->hash_locks + hash); in drop_one_stripe()
2439 sh = get_free_stripe(conf, hash); in drop_one_stripe()
2440 spin_unlock_irq(conf->hash_locks + hash); in drop_one_stripe()
2445 free_stripe(conf->slab_cache, sh); in drop_one_stripe()
2446 atomic_dec(&conf->active_stripes); in drop_one_stripe()
2447 conf->max_nr_stripes--; in drop_one_stripe()
2451 static void shrink_stripes(struct r5conf *conf) in shrink_stripes() argument
2453 while (conf->max_nr_stripes && in shrink_stripes()
2454 drop_one_stripe(conf)) in shrink_stripes()
2457 kmem_cache_destroy(conf->slab_cache); in shrink_stripes()
2458 conf->slab_cache = NULL; in shrink_stripes()
2464 struct r5conf *conf = sh->raid_conf; in raid5_end_read_request() local
2488 rdev = conf->disks[i].replacement; in raid5_end_read_request()
2490 rdev = conf->disks[i].rdev; in raid5_end_read_request()
2492 if (use_new_offset(conf, sh)) in raid5_end_read_request()
2505 mdname(conf->mddev), STRIPE_SECTORS, in raid5_end_read_request()
2534 mdname(conf->mddev), in raid5_end_read_request()
2537 else if (conf->mddev->degraded >= conf->max_degraded) { in raid5_end_read_request()
2541 mdname(conf->mddev), in raid5_end_read_request()
2549 mdname(conf->mddev), in raid5_end_read_request()
2553 > conf->max_nr_stripes) { in raid5_end_read_request()
2556 mdname(conf->mddev), in raid5_end_read_request()
2558 conf->max_nr_stripes); in raid5_end_read_request()
2560 mdname(conf->mddev), bdn); in raid5_end_read_request()
2582 md_error(conf->mddev, rdev); in raid5_end_read_request()
2585 rdev_dec_pending(rdev, conf->mddev); in raid5_end_read_request()
2595 struct r5conf *conf = sh->raid_conf; in raid5_end_write_request() local
2604 rdev = conf->disks[i].rdev; in raid5_end_write_request()
2608 rdev = conf->disks[i].replacement; in raid5_end_write_request()
2616 rdev = conf->disks[i].rdev; in raid5_end_write_request()
2631 md_error(conf->mddev, rdev); in raid5_end_write_request()
2656 rdev_dec_pending(rdev, conf->mddev); in raid5_end_write_request()
2674 struct r5conf *conf = mddev->private; in raid5_error() local
2678 spin_lock_irqsave(&conf->device_lock, flags); in raid5_error()
2681 mddev->degraded == conf->max_degraded) { in raid5_error()
2686 conf->recovery_disabled = mddev->recovery_disabled; in raid5_error()
2687 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_error()
2693 mddev->degraded = raid5_calc_degraded(conf); in raid5_error()
2694 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_error()
2705 conf->raid_disks - mddev->degraded); in raid5_error()
2713 sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, in raid5_compute_sector() argument
2723 int algorithm = previous ? conf->prev_algo in raid5_compute_sector()
2724 : conf->algorithm; in raid5_compute_sector()
2725 int sectors_per_chunk = previous ? conf->prev_chunk_sectors in raid5_compute_sector()
2726 : conf->chunk_sectors; in raid5_compute_sector()
2727 int raid_disks = previous ? conf->previous_raid_disks in raid5_compute_sector()
2728 : conf->raid_disks; in raid5_compute_sector()
2729 int data_disks = raid_disks - conf->max_degraded; in raid5_compute_sector()
2749 switch(conf->level) { in raid5_compute_sector()
2917 struct r5conf *conf = sh->raid_conf; in raid5_compute_blocknr() local
2919 int data_disks = raid_disks - conf->max_degraded; in raid5_compute_blocknr()
2921 int sectors_per_chunk = previous ? conf->prev_chunk_sectors in raid5_compute_blocknr()
2922 : conf->chunk_sectors; in raid5_compute_blocknr()
2923 int algorithm = previous ? conf->prev_algo in raid5_compute_blocknr()
2924 : conf->algorithm; in raid5_compute_blocknr()
2937 switch(conf->level) { in raid5_compute_blocknr()
3024 check = raid5_compute_sector(conf, r_sector, in raid5_compute_blocknr()
3029 mdname(conf->mddev)); in raid5_compute_blocknr()
3073 static inline bool delay_towrite(struct r5conf *conf, in delay_towrite() argument
3082 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && in delay_towrite()
3096 struct r5conf *conf = sh->raid_conf; in schedule_reconstruction() local
3097 int level = conf->level; in schedule_reconstruction()
3111 if (dev->towrite && !delay_towrite(conf, dev, s)) { in schedule_reconstruction()
3137 if (s->locked + conf->max_degraded == disks) in schedule_reconstruction()
3139 atomic_inc(&conf->pending_full_writes); in schedule_reconstruction()
3209 struct r5conf *conf = sh->raid_conf; in add_stripe_bio() local
3235 if (forwrite && raid5_has_ppl(conf)) { in add_stripe_bio()
3261 if (first + conf->chunk_sectors * (count - 1) != last) in add_stripe_bio()
3273 md_write_inc(conf->mddev, bi); in add_stripe_bio()
3294 if (conf->mddev->bitmap && firstwrite) { in add_stripe_bio()
3309 md_bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
3314 sh->bm_seq = conf->seq_flush+1; in add_stripe_bio()
3321 stripe_add_to_batch_list(conf, sh); in add_stripe_bio()
3330 static void end_reshape(struct r5conf *conf);
3332 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, in stripe_set_idx() argument
3336 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; in stripe_set_idx()
3339 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; in stripe_set_idx()
3341 raid5_compute_sector(conf, in stripe_set_idx()
3342 stripe * (disks - conf->max_degraded) in stripe_set_idx()
3349 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, in handle_failed_stripe() argument
3361 rdev = rcu_dereference(conf->disks[i].rdev); in handle_failed_stripe()
3373 md_error(conf->mddev, rdev); in handle_failed_stripe()
3374 rdev_dec_pending(rdev, conf->mddev); in handle_failed_stripe()
3389 wake_up(&conf->wait_for_overlap); in handle_failed_stripe()
3395 md_write_end(conf->mddev); in handle_failed_stripe()
3400 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3416 md_write_end(conf->mddev); in handle_failed_stripe()
3425 s->failed > conf->max_degraded && in handle_failed_stripe()
3433 wake_up(&conf->wait_for_overlap); in handle_failed_stripe()
3446 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3457 if (atomic_dec_and_test(&conf->pending_full_writes)) in handle_failed_stripe()
3458 md_wakeup_thread(conf->mddev->thread); in handle_failed_stripe()
3462 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, in handle_failed_sync() argument
3471 wake_up(&conf->wait_for_overlap); in handle_failed_sync()
3481 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { in handle_failed_sync()
3486 for (i = 0; i < conf->raid_disks; i++) { in handle_failed_sync()
3487 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in handle_failed_sync()
3494 rdev = rcu_dereference(conf->disks[i].replacement); in handle_failed_sync()
3504 conf->recovery_disabled = in handle_failed_sync()
3505 conf->mddev->recovery_disabled; in handle_failed_sync()
3507 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort); in handle_failed_sync()
3750 static void handle_stripe_clean_event(struct r5conf *conf, in handle_stripe_clean_event() argument
3783 md_write_end(conf->mddev); in handle_stripe_clean_event()
3787 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
3826 spin_lock_irq(conf->hash_locks + hash); in handle_stripe_clean_event()
3828 spin_unlock_irq(conf->hash_locks + hash); in handle_stripe_clean_event()
3843 if (atomic_dec_and_test(&conf->pending_full_writes)) in handle_stripe_clean_event()
3844 md_wakeup_thread(conf->mddev->thread); in handle_stripe_clean_event()
3865 static int handle_stripe_dirtying(struct r5conf *conf, in handle_stripe_dirtying() argument
3871 sector_t recovery_cp = conf->mddev->recovery_cp; in handle_stripe_dirtying()
3880 if (conf->rmw_level == PARITY_DISABLE_RMW || in handle_stripe_dirtying()
3888 conf->rmw_level, (unsigned long long)recovery_cp, in handle_stripe_dirtying()
3893 if (((dev->towrite && !delay_towrite(conf, dev, s)) || in handle_stripe_dirtying()
3920 if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) { in handle_stripe_dirtying()
3922 if (conf->mddev->queue) in handle_stripe_dirtying()
3923 blk_add_trace_msg(conf->mddev->queue, in handle_stripe_dirtying()
3944 &conf->cache_state)) { in handle_stripe_dirtying()
3958 if (((dev->towrite && !delay_towrite(conf, dev, s)) || in handle_stripe_dirtying()
3979 if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) { in handle_stripe_dirtying()
4006 if (rcw && conf->mddev->queue) in handle_stripe_dirtying()
4007 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", in handle_stripe_dirtying()
4033 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks5() argument
4095 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); in handle_parity_checks5()
4096 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks5()
4100 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks5()
4126 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks6() argument
4222 mdname(conf->mddev), in handle_parity_checks6()
4260 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); in handle_parity_checks6()
4261 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks6()
4265 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks6()
4303 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) in handle_stripe_expansion() argument
4320 sector_t s = raid5_compute_sector(conf, bn, 0, in handle_stripe_expansion()
4322 sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1); in handle_stripe_expansion()
4344 for (j = 0; j < conf->raid_disks; j++) in handle_stripe_expansion()
4349 if (j == conf->raid_disks) { in handle_stripe_expansion()
4376 struct r5conf *conf = sh->raid_conf; in analyse_stripe() local
4388 s->log_failed = r5l_log_disk_error(conf); in analyse_stripe()
4436 rdev = rcu_dereference(conf->disks[i].replacement); in analyse_stripe()
4447 rdev = rcu_dereference(conf->disks[i].rdev); in analyse_stripe()
4495 conf->disks[i].rdev); in analyse_stripe()
4508 conf->disks[i].rdev); in analyse_stripe()
4517 conf->disks[i].replacement); in analyse_stripe()
4539 conf->disks[i].replacement); in analyse_stripe()
4560 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4561 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) in analyse_stripe()
4671 struct r5conf *conf = sh->raid_conf; in handle_stripe() local
4723 test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) { in handle_stripe()
4735 rdev_dec_pending(s.blocked_rdev, conf->mddev); in handle_stripe()
4755 if (s.failed > conf->max_degraded || in handle_stripe()
4761 handle_failed_stripe(conf, sh, &s, disks); in handle_stripe()
4763 handle_failed_sync(conf, sh, &s); in handle_stripe()
4816 || conf->level < 6; in handle_stripe()
4827 handle_stripe_clean_event(conf, sh, disks); in handle_stripe()
4830 r5c_handle_cached_data_endio(conf, sh, disks); in handle_stripe()
4838 || (conf->level == 6 && s.to_write && s.failed) in handle_stripe()
4849 r5c_finish_stripe_write_out(conf, sh, &s); in handle_stripe()
4861 if (!r5c_is_writeback(conf->log)) { in handle_stripe()
4863 handle_stripe_dirtying(conf, sh, &s, disks); in handle_stripe()
4869 ret = r5c_try_caching_write(conf, sh, &s, in handle_stripe()
4882 ret = handle_stripe_dirtying(conf, sh, &s, in handle_stripe()
4899 if (conf->level == 6) in handle_stripe()
4900 handle_parity_checks6(conf, sh, &s, disks); in handle_stripe()
4902 handle_parity_checks5(conf, sh, &s, disks); in handle_stripe()
4909 for (i = 0; i < conf->raid_disks; i++) in handle_stripe()
4923 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); in handle_stripe()
4926 wake_up(&conf->wait_for_overlap); in handle_stripe()
4932 if (s.failed <= conf->max_degraded && !conf->mddev->ro) in handle_stripe()
4956 = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); in handle_stripe()
4965 atomic_inc(&conf->preread_active_stripes); in handle_stripe()
4974 for (i = conf->raid_disks; i--; ) { in handle_stripe()
4984 sh->disks = conf->raid_disks; in handle_stripe()
4985 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
4989 atomic_dec(&conf->reshape_stripes); in handle_stripe()
4990 wake_up(&conf->wait_for_overlap); in handle_stripe()
4991 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); in handle_stripe()
4996 handle_stripe_expansion(conf, sh); in handle_stripe()
5001 if (conf->mddev->external) in handle_stripe()
5003 conf->mddev); in handle_stripe()
5010 conf->mddev); in handle_stripe()
5019 rdev = conf->disks[i].rdev; in handle_stripe()
5022 md_error(conf->mddev, rdev); in handle_stripe()
5023 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5026 rdev = conf->disks[i].rdev; in handle_stripe()
5029 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5032 rdev = conf->disks[i].replacement; in handle_stripe()
5035 rdev = conf->disks[i].rdev; in handle_stripe()
5038 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5052 atomic_dec(&conf->preread_active_stripes); in handle_stripe()
5053 if (atomic_read(&conf->preread_active_stripes) < in handle_stripe()
5055 md_wakeup_thread(conf->mddev->thread); in handle_stripe()
5061 static void raid5_activate_delayed(struct r5conf *conf) in raid5_activate_delayed() argument
5063 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { in raid5_activate_delayed()
5064 while (!list_empty(&conf->delayed_list)) { in raid5_activate_delayed()
5065 struct list_head *l = conf->delayed_list.next; in raid5_activate_delayed()
5071 atomic_inc(&conf->preread_active_stripes); in raid5_activate_delayed()
5072 list_add_tail(&sh->lru, &conf->hold_list); in raid5_activate_delayed()
5078 static void activate_bit_delay(struct r5conf *conf, in activate_bit_delay() argument
5083 list_add(&head, &conf->bitmap_list); in activate_bit_delay()
5084 list_del_init(&conf->bitmap_list); in activate_bit_delay()
5091 __release_stripe(conf, sh, &temp_inactive_list[hash]); in activate_bit_delay()
5097 struct r5conf *conf = mddev->private; in raid5_congested() local
5103 if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) in raid5_congested()
5107 if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) in raid5_congested()
5109 if (conf->quiesce) in raid5_congested()
5111 if (atomic_read(&conf->empty_inactive_list_nr)) in raid5_congested()
5119 struct r5conf *conf = mddev->private; in in_chunk_boundary() local
5126 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); in in_chunk_boundary()
5135 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) in add_bio_to_retry() argument
5139 spin_lock_irqsave(&conf->device_lock, flags); in add_bio_to_retry()
5141 bi->bi_next = conf->retry_read_aligned_list; in add_bio_to_retry()
5142 conf->retry_read_aligned_list = bi; in add_bio_to_retry()
5144 spin_unlock_irqrestore(&conf->device_lock, flags); in add_bio_to_retry()
5145 md_wakeup_thread(conf->mddev->thread); in add_bio_to_retry()
5148 static struct bio *remove_bio_from_retry(struct r5conf *conf, in remove_bio_from_retry() argument
5153 bi = conf->retry_read_aligned; in remove_bio_from_retry()
5155 *offset = conf->retry_read_offset; in remove_bio_from_retry()
5156 conf->retry_read_aligned = NULL; in remove_bio_from_retry()
5159 bi = conf->retry_read_aligned_list; in remove_bio_from_retry()
5161 conf->retry_read_aligned_list = bi->bi_next; in remove_bio_from_retry()
5179 struct r5conf *conf; in raid5_align_endio() local
5188 conf = mddev->private; in raid5_align_endio()
5190 rdev_dec_pending(rdev, conf->mddev); in raid5_align_endio()
5194 if (atomic_dec_and_test(&conf->active_aligned_reads)) in raid5_align_endio()
5195 wake_up(&conf->wait_for_quiescent); in raid5_align_endio()
5201 add_bio_to_retry(raid_bi, conf); in raid5_align_endio()
5206 struct r5conf *conf = mddev->private; in raid5_read_one_chunk() local
5232 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, in raid5_read_one_chunk()
5237 rdev = rcu_dereference(conf->disks[dd_idx].replacement); in raid5_read_one_chunk()
5240 rdev = rcu_dereference(conf->disks[dd_idx].rdev); in raid5_read_one_chunk()
5248 if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) { in raid5_read_one_chunk()
5274 spin_lock_irq(&conf->device_lock); in raid5_read_one_chunk()
5275 wait_event_lock_irq(conf->wait_for_quiescent, in raid5_read_one_chunk()
5276 conf->quiesce == 0, in raid5_read_one_chunk()
5277 conf->device_lock); in raid5_read_one_chunk()
5278 atomic_inc(&conf->active_aligned_reads); in raid5_read_one_chunk()
5279 spin_unlock_irq(&conf->device_lock); in raid5_read_one_chunk()
5302 struct r5conf *conf = mddev->private; in chunk_aligned_read() local
5303 split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split); in chunk_aligned_read()
5325 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) in __get_priority_stripe() argument
5330 bool second_try = !r5c_is_writeback(conf->log) && in __get_priority_stripe()
5331 !r5l_log_disk_error(conf); in __get_priority_stripe()
5332 bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state) || in __get_priority_stripe()
5333 r5l_log_disk_error(conf); in __get_priority_stripe()
5338 if (conf->worker_cnt_per_group == 0) { in __get_priority_stripe()
5339 handle_list = try_loprio ? &conf->loprio_list : in __get_priority_stripe()
5340 &conf->handle_list; in __get_priority_stripe()
5342 handle_list = try_loprio ? &conf->worker_groups[group].loprio_list : in __get_priority_stripe()
5343 &conf->worker_groups[group].handle_list; in __get_priority_stripe()
5344 wg = &conf->worker_groups[group]; in __get_priority_stripe()
5347 for (i = 0; i < conf->group_cnt; i++) { in __get_priority_stripe()
5348 handle_list = try_loprio ? &conf->worker_groups[i].loprio_list : in __get_priority_stripe()
5349 &conf->worker_groups[i].handle_list; in __get_priority_stripe()
5350 wg = &conf->worker_groups[i]; in __get_priority_stripe()
5359 list_empty(&conf->hold_list) ? "empty" : "busy", in __get_priority_stripe()
5360 atomic_read(&conf->pending_full_writes), conf->bypass_count); in __get_priority_stripe()
5365 if (list_empty(&conf->hold_list)) in __get_priority_stripe()
5366 conf->bypass_count = 0; in __get_priority_stripe()
5368 if (conf->hold_list.next == conf->last_hold) in __get_priority_stripe()
5369 conf->bypass_count++; in __get_priority_stripe()
5371 conf->last_hold = conf->hold_list.next; in __get_priority_stripe()
5372 conf->bypass_count -= conf->bypass_threshold; in __get_priority_stripe()
5373 if (conf->bypass_count < 0) in __get_priority_stripe()
5374 conf->bypass_count = 0; in __get_priority_stripe()
5377 } else if (!list_empty(&conf->hold_list) && in __get_priority_stripe()
5378 ((conf->bypass_threshold && in __get_priority_stripe()
5379 conf->bypass_count > conf->bypass_threshold) || in __get_priority_stripe()
5380 atomic_read(&conf->pending_full_writes) == 0)) { in __get_priority_stripe()
5382 list_for_each_entry(tmp, &conf->hold_list, lru) { in __get_priority_stripe()
5383 if (conf->worker_cnt_per_group == 0 || in __get_priority_stripe()
5393 conf->bypass_count -= conf->bypass_threshold; in __get_priority_stripe()
5394 if (conf->bypass_count < 0) in __get_priority_stripe()
5395 conf->bypass_count = 0; in __get_priority_stripe()
5429 struct r5conf *conf = mddev->private; in raid5_unplug() local
5434 spin_lock_irq(&conf->device_lock); in raid5_unplug()
5450 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); in raid5_unplug()
5453 spin_unlock_irq(&conf->device_lock); in raid5_unplug()
5455 release_inactive_stripe_list(conf, cb->temp_inactive_list, in raid5_unplug()
5492 struct r5conf *conf = mddev->private; in make_discard_request() local
5506 stripe_sectors = conf->chunk_sectors * in make_discard_request()
5507 (conf->raid_disks - conf->max_degraded); in make_discard_request()
5512 logical_sector *= conf->chunk_sectors; in make_discard_request()
5513 last_sector *= conf->chunk_sectors; in make_discard_request()
5520 sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); in make_discard_request()
5521 prepare_to_wait(&conf->wait_for_overlap, &w, in make_discard_request()
5531 for (d = 0; d < conf->raid_disks; d++) { in make_discard_request()
5543 finish_wait(&conf->wait_for_overlap, &w); in make_discard_request()
5545 for (d = 0; d < conf->raid_disks; d++) { in make_discard_request()
5555 if (conf->mddev->bitmap) { in make_discard_request()
5557 d < conf->raid_disks - conf->max_degraded; in make_discard_request()
5563 sh->bm_seq = conf->seq_flush + 1; in make_discard_request()
5570 atomic_inc(&conf->preread_active_stripes); in make_discard_request()
5579 struct r5conf *conf = mddev->private; in raid5_make_request() local
5590 int ret = log_handle_flush_request(conf, bi); in raid5_make_request()
5630 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); in raid5_make_request()
5637 seq = read_seqcount_begin(&conf->gen_lock); in raid5_make_request()
5640 prepare_to_wait(&conf->wait_for_overlap, &w, in raid5_make_request()
5642 if (unlikely(conf->reshape_progress != MaxSector)) { in raid5_make_request()
5651 spin_lock_irq(&conf->device_lock); in raid5_make_request()
5653 ? logical_sector < conf->reshape_progress in raid5_make_request()
5654 : logical_sector >= conf->reshape_progress) { in raid5_make_request()
5658 ? logical_sector < conf->reshape_safe in raid5_make_request()
5659 : logical_sector >= conf->reshape_safe) { in raid5_make_request()
5660 spin_unlock_irq(&conf->device_lock); in raid5_make_request()
5666 spin_unlock_irq(&conf->device_lock); in raid5_make_request()
5669 new_sector = raid5_compute_sector(conf, logical_sector, in raid5_make_request()
5676 sh = raid5_get_active_stripe(conf, new_sector, previous, in raid5_make_request()
5689 spin_lock_irq(&conf->device_lock); in raid5_make_request()
5691 ? logical_sector >= conf->reshape_progress in raid5_make_request()
5692 : logical_sector < conf->reshape_progress) in raid5_make_request()
5695 spin_unlock_irq(&conf->device_lock); in raid5_make_request()
5703 if (read_seqcount_retry(&conf->gen_lock, seq)) { in raid5_make_request()
5735 atomic_inc(&conf->preread_active_stripes); in raid5_make_request()
5743 finish_wait(&conf->wait_for_overlap, &w); in raid5_make_request()
5764 struct r5conf *conf = mddev->private; in reshape_request() local
5768 int raid_disks = conf->previous_raid_disks; in reshape_request()
5769 int data_disks = raid_disks - conf->max_degraded; in reshape_request()
5770 int new_data_disks = conf->raid_disks - conf->max_degraded; in reshape_request()
5782 conf->reshape_progress < raid5_size(mddev, 0, 0)) { in reshape_request()
5784 - conf->reshape_progress; in reshape_request()
5786 conf->reshape_progress == MaxSector) { in reshape_request()
5790 conf->reshape_progress > 0) in reshape_request()
5791 sector_nr = conf->reshape_progress; in reshape_request()
5807 reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors); in reshape_request()
5815 writepos = conf->reshape_progress; in reshape_request()
5817 readpos = conf->reshape_progress; in reshape_request()
5819 safepos = conf->reshape_safe; in reshape_request()
5840 BUG_ON(conf->reshape_progress == 0); in reshape_request()
5871 if (conf->min_offset_diff < 0) { in reshape_request()
5872 safepos += -conf->min_offset_diff; in reshape_request()
5873 readpos += -conf->min_offset_diff; in reshape_request()
5875 writepos += conf->min_offset_diff; in reshape_request()
5880 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { in reshape_request()
5882 wait_event(conf->wait_for_overlap, in reshape_request()
5883 atomic_read(&conf->reshape_stripes)==0 in reshape_request()
5885 if (atomic_read(&conf->reshape_stripes) != 0) in reshape_request()
5887 mddev->reshape_position = conf->reshape_progress; in reshape_request()
5898 conf->reshape_checkpoint = jiffies; in reshape_request()
5905 spin_lock_irq(&conf->device_lock); in reshape_request()
5906 conf->reshape_safe = mddev->reshape_position; in reshape_request()
5907 spin_unlock_irq(&conf->device_lock); in reshape_request()
5908 wake_up(&conf->wait_for_overlap); in reshape_request()
5916 sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); in reshape_request()
5918 atomic_inc(&conf->reshape_stripes); in reshape_request()
5926 if (conf->level == 6 && in reshape_request()
5944 spin_lock_irq(&conf->device_lock); in reshape_request()
5946 conf->reshape_progress -= reshape_sectors * new_data_disks; in reshape_request()
5948 conf->reshape_progress += reshape_sectors * new_data_disks; in reshape_request()
5949 spin_unlock_irq(&conf->device_lock); in reshape_request()
5956 raid5_compute_sector(conf, stripe_addr*(new_data_disks), in reshape_request()
5959 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) in reshape_request()
5965 sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); in reshape_request()
5989 wait_event(conf->wait_for_overlap, in reshape_request()
5990 atomic_read(&conf->reshape_stripes) == 0 in reshape_request()
5992 if (atomic_read(&conf->reshape_stripes) != 0) in reshape_request()
5994 mddev->reshape_position = conf->reshape_progress; in reshape_request()
6004 conf->reshape_checkpoint = jiffies; in reshape_request()
6012 spin_lock_irq(&conf->device_lock); in reshape_request()
6013 conf->reshape_safe = mddev->reshape_position; in reshape_request()
6014 spin_unlock_irq(&conf->device_lock); in reshape_request()
6015 wake_up(&conf->wait_for_overlap); in reshape_request()
6025 struct r5conf *conf = mddev->private; in raid5_sync_request() local
6036 end_reshape(conf); in raid5_sync_request()
6044 conf->fullsync = 0; in raid5_sync_request()
6051 wait_event(conf->wait_for_overlap, conf->quiesce != 2); in raid5_sync_request()
6066 if (mddev->degraded >= conf->max_degraded && in raid5_sync_request()
6073 !conf->fullsync && in raid5_sync_request()
6084 sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); in raid5_sync_request()
6086 sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); in raid5_sync_request()
6097 for (i = 0; i < conf->raid_disks; i++) { in raid5_sync_request()
6098 struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev); in raid5_sync_request()
6115 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio, in retry_aligned_read() argument
6136 sector = raid5_compute_sector(conf, logical_sector, in retry_aligned_read()
6149 sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); in retry_aligned_read()
6153 conf->retry_read_aligned = raid_bio; in retry_aligned_read()
6154 conf->retry_read_offset = scnt; in retry_aligned_read()
6160 conf->retry_read_aligned = raid_bio; in retry_aligned_read()
6161 conf->retry_read_offset = scnt; in retry_aligned_read()
6173 if (atomic_dec_and_test(&conf->active_aligned_reads)) in retry_aligned_read()
6174 wake_up(&conf->wait_for_quiescent); in retry_aligned_read()
6178 static int handle_active_stripes(struct r5conf *conf, int group, in handle_active_stripes() argument
6181 __releases(&conf->device_lock) in handle_active_stripes()
6182 __acquires(&conf->device_lock) in handle_active_stripes()
6189 (sh = __get_priority_stripe(conf, group)) != NULL) in handle_active_stripes()
6197 spin_unlock_irq(&conf->device_lock); in handle_active_stripes()
6198 log_flush_stripe_to_raid(conf); in handle_active_stripes()
6199 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
6204 spin_unlock_irq(&conf->device_lock); in handle_active_stripes()
6206 release_inactive_stripe_list(conf, temp_inactive_list, in handle_active_stripes()
6209 r5l_flush_stripe_to_raid(conf->log); in handle_active_stripes()
6211 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
6217 log_write_stripe_run(conf); in handle_active_stripes()
6221 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
6224 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); in handle_active_stripes()
6233 struct r5conf *conf = group->conf; in raid5_do_work() local
6234 struct mddev *mddev = conf->mddev; in raid5_do_work()
6235 int group_id = group - conf->worker_groups; in raid5_do_work()
6243 spin_lock_irq(&conf->device_lock); in raid5_do_work()
6247 released = release_stripe_list(conf, worker->temp_inactive_list); in raid5_do_work()
6249 batch_size = handle_active_stripes(conf, group_id, worker, in raid5_do_work()
6257 conf->device_lock); in raid5_do_work()
6261 spin_unlock_irq(&conf->device_lock); in raid5_do_work()
6263 flush_deferred_bios(conf); in raid5_do_work()
6265 r5l_flush_stripe_to_raid(conf->log); in raid5_do_work()
6283 struct r5conf *conf = mddev->private; in raid5d() local
6293 spin_lock_irq(&conf->device_lock); in raid5d()
6299 released = release_stripe_list(conf, conf->temp_inactive_list); in raid5d()
6301 clear_bit(R5_DID_ALLOC, &conf->cache_state); in raid5d()
6304 !list_empty(&conf->bitmap_list)) { in raid5d()
6306 conf->seq_flush++; in raid5d()
6307 spin_unlock_irq(&conf->device_lock); in raid5d()
6309 spin_lock_irq(&conf->device_lock); in raid5d()
6310 conf->seq_write = conf->seq_flush; in raid5d()
6311 activate_bit_delay(conf, conf->temp_inactive_list); in raid5d()
6313 raid5_activate_delayed(conf); in raid5d()
6315 while ((bio = remove_bio_from_retry(conf, &offset))) { in raid5d()
6317 spin_unlock_irq(&conf->device_lock); in raid5d()
6318 ok = retry_aligned_read(conf, bio, offset); in raid5d()
6319 spin_lock_irq(&conf->device_lock); in raid5d()
6325 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, in raid5d()
6326 conf->temp_inactive_list); in raid5d()
6332 spin_unlock_irq(&conf->device_lock); in raid5d()
6334 spin_lock_irq(&conf->device_lock); in raid5d()
6339 spin_unlock_irq(&conf->device_lock); in raid5d()
6340 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && in raid5d()
6341 mutex_trylock(&conf->cache_size_mutex)) { in raid5d()
6342 grow_one_stripe(conf, __GFP_NOWARN); in raid5d()
6346 set_bit(R5_DID_ALLOC, &conf->cache_state); in raid5d()
6347 mutex_unlock(&conf->cache_size_mutex); in raid5d()
6350 flush_deferred_bios(conf); in raid5d()
6352 r5l_flush_stripe_to_raid(conf->log); in raid5d()
6363 struct r5conf *conf; in raid5_show_stripe_cache_size() local
6366 conf = mddev->private; in raid5_show_stripe_cache_size()
6367 if (conf) in raid5_show_stripe_cache_size()
6368 ret = sprintf(page, "%d\n", conf->min_nr_stripes); in raid5_show_stripe_cache_size()
6377 struct r5conf *conf = mddev->private; in raid5_set_cache_size() local
6382 conf->min_nr_stripes = size; in raid5_set_cache_size()
6383 mutex_lock(&conf->cache_size_mutex); in raid5_set_cache_size()
6384 while (size < conf->max_nr_stripes && in raid5_set_cache_size()
6385 drop_one_stripe(conf)) in raid5_set_cache_size()
6387 mutex_unlock(&conf->cache_size_mutex); in raid5_set_cache_size()
6391 mutex_lock(&conf->cache_size_mutex); in raid5_set_cache_size()
6392 while (size > conf->max_nr_stripes) in raid5_set_cache_size()
6393 if (!grow_one_stripe(conf, GFP_KERNEL)) { in raid5_set_cache_size()
6394 conf->min_nr_stripes = conf->max_nr_stripes; in raid5_set_cache_size()
6398 mutex_unlock(&conf->cache_size_mutex); in raid5_set_cache_size()
6407 struct r5conf *conf; in raid5_store_stripe_cache_size() local
6418 conf = mddev->private; in raid5_store_stripe_cache_size()
6419 if (!conf) in raid5_store_stripe_cache_size()
6436 struct r5conf *conf = mddev->private; in raid5_show_rmw_level() local
6437 if (conf) in raid5_show_rmw_level()
6438 return sprintf(page, "%d\n", conf->rmw_level); in raid5_show_rmw_level()
6446 struct r5conf *conf = mddev->private; in raid5_store_rmw_level() local
6449 if (!conf) in raid5_store_rmw_level()
6466 conf->rmw_level = new; in raid5_store_rmw_level()
6479 struct r5conf *conf; in raid5_show_preread_threshold() local
6482 conf = mddev->private; in raid5_show_preread_threshold()
6483 if (conf) in raid5_show_preread_threshold()
6484 ret = sprintf(page, "%d\n", conf->bypass_threshold); in raid5_show_preread_threshold()
6492 struct r5conf *conf; in raid5_store_preread_threshold() local
6504 conf = mddev->private; in raid5_store_preread_threshold()
6505 if (!conf) in raid5_store_preread_threshold()
6507 else if (new > conf->min_nr_stripes) in raid5_store_preread_threshold()
6510 conf->bypass_threshold = new; in raid5_store_preread_threshold()
6524 struct r5conf *conf; in raid5_show_skip_copy() local
6527 conf = mddev->private; in raid5_show_skip_copy()
6528 if (conf) in raid5_show_skip_copy()
6529 ret = sprintf(page, "%d\n", conf->skip_copy); in raid5_show_skip_copy()
6537 struct r5conf *conf; in raid5_store_skip_copy() local
6550 conf = mddev->private; in raid5_store_skip_copy()
6551 if (!conf) in raid5_store_skip_copy()
6553 else if (new != conf->skip_copy) { in raid5_store_skip_copy()
6555 conf->skip_copy = new; in raid5_store_skip_copy()
6576 struct r5conf *conf = mddev->private; in stripe_cache_active_show() local
6577 if (conf) in stripe_cache_active_show()
6578 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); in stripe_cache_active_show()
6589 struct r5conf *conf; in raid5_show_group_thread_cnt() local
6592 conf = mddev->private; in raid5_show_group_thread_cnt()
6593 if (conf) in raid5_show_group_thread_cnt()
6594 ret = sprintf(page, "%d\n", conf->worker_cnt_per_group); in raid5_show_group_thread_cnt()
6599 static int alloc_thread_groups(struct r5conf *conf, int cnt,
6606 struct r5conf *conf; in raid5_store_group_thread_cnt() local
6623 conf = mddev->private; in raid5_store_group_thread_cnt()
6624 if (!conf) in raid5_store_group_thread_cnt()
6626 else if (new != conf->worker_cnt_per_group) { in raid5_store_group_thread_cnt()
6629 old_groups = conf->worker_groups; in raid5_store_group_thread_cnt()
6633 err = alloc_thread_groups(conf, new, in raid5_store_group_thread_cnt()
6637 spin_lock_irq(&conf->device_lock); in raid5_store_group_thread_cnt()
6638 conf->group_cnt = group_cnt; in raid5_store_group_thread_cnt()
6639 conf->worker_cnt_per_group = worker_cnt_per_group; in raid5_store_group_thread_cnt()
6640 conf->worker_groups = new_groups; in raid5_store_group_thread_cnt()
6641 spin_unlock_irq(&conf->device_lock); in raid5_store_group_thread_cnt()
6675 static int alloc_thread_groups(struct r5conf *conf, int cnt, in alloc_thread_groups() argument
6707 group->conf = conf; in alloc_thread_groups()
6723 static void free_thread_groups(struct r5conf *conf) in free_thread_groups() argument
6725 if (conf->worker_groups) in free_thread_groups()
6726 kfree(conf->worker_groups[0].workers); in free_thread_groups()
6727 kfree(conf->worker_groups); in free_thread_groups()
6728 conf->worker_groups = NULL; in free_thread_groups()
6734 struct r5conf *conf = mddev->private; in raid5_size() local
6740 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); in raid5_size()
6742 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_size()
6743 sectors &= ~((sector_t)conf->prev_chunk_sectors - 1); in raid5_size()
6744 return sectors * (raid_disks - conf->max_degraded); in raid5_size()
6747 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in free_scratch_buffer() argument
6755 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in alloc_scratch_buffer() argument
6757 if (conf->level == 6 && !percpu->spare_page) { in alloc_scratch_buffer()
6764 max(conf->raid_disks, in alloc_scratch_buffer()
6765 conf->previous_raid_disks), in alloc_scratch_buffer()
6766 max(conf->chunk_sectors, in alloc_scratch_buffer()
6767 conf->prev_chunk_sectors) in alloc_scratch_buffer()
6770 free_scratch_buffer(conf, percpu); in alloc_scratch_buffer()
6779 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); in raid456_cpu_dead() local
6781 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid456_cpu_dead()
6785 static void raid5_free_percpu(struct r5conf *conf) in raid5_free_percpu() argument
6787 if (!conf->percpu) in raid5_free_percpu()
6790 cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); in raid5_free_percpu()
6791 free_percpu(conf->percpu); in raid5_free_percpu()
6794 static void free_conf(struct r5conf *conf) in free_conf() argument
6798 log_exit(conf); in free_conf()
6800 unregister_shrinker(&conf->shrinker); in free_conf()
6801 free_thread_groups(conf); in free_conf()
6802 shrink_stripes(conf); in free_conf()
6803 raid5_free_percpu(conf); in free_conf()
6804 for (i = 0; i < conf->pool_size; i++) in free_conf()
6805 if (conf->disks[i].extra_page) in free_conf()
6806 put_page(conf->disks[i].extra_page); in free_conf()
6807 kfree(conf->disks); in free_conf()
6808 bioset_exit(&conf->bio_split); in free_conf()
6809 kfree(conf->stripe_hashtbl); in free_conf()
6810 kfree(conf->pending_data); in free_conf()
6811 kfree(conf); in free_conf()
6816 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); in raid456_cpu_up_prepare() local
6817 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); in raid456_cpu_up_prepare()
6819 if (alloc_scratch_buffer(conf, percpu)) { in raid456_cpu_up_prepare()
6827 static int raid5_alloc_percpu(struct r5conf *conf) in raid5_alloc_percpu() argument
6831 conf->percpu = alloc_percpu(struct raid5_percpu); in raid5_alloc_percpu()
6832 if (!conf->percpu) in raid5_alloc_percpu()
6835 err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); in raid5_alloc_percpu()
6837 conf->scribble_disks = max(conf->raid_disks, in raid5_alloc_percpu()
6838 conf->previous_raid_disks); in raid5_alloc_percpu()
6839 conf->scribble_sectors = max(conf->chunk_sectors, in raid5_alloc_percpu()
6840 conf->prev_chunk_sectors); in raid5_alloc_percpu()
6848 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); in raid5_cache_scan() local
6851 if (mutex_trylock(&conf->cache_size_mutex)) { in raid5_cache_scan()
6854 conf->max_nr_stripes > conf->min_nr_stripes) { in raid5_cache_scan()
6855 if (drop_one_stripe(conf) == 0) { in raid5_cache_scan()
6861 mutex_unlock(&conf->cache_size_mutex); in raid5_cache_scan()
6869 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); in raid5_cache_count() local
6871 if (conf->max_nr_stripes < conf->min_nr_stripes) in raid5_cache_count()
6874 return conf->max_nr_stripes - conf->min_nr_stripes; in raid5_cache_count()
6879 struct r5conf *conf; in setup_conf() local
6918 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); in setup_conf()
6919 if (conf == NULL) in setup_conf()
6921 INIT_LIST_HEAD(&conf->free_list); in setup_conf()
6922 INIT_LIST_HEAD(&conf->pending_list); in setup_conf()
6923 conf->pending_data = kcalloc(PENDING_IO_MAX, in setup_conf()
6926 if (!conf->pending_data) in setup_conf()
6929 list_add(&conf->pending_data[i].sibling, &conf->free_list); in setup_conf()
6931 if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, in setup_conf()
6933 conf->group_cnt = group_cnt; in setup_conf()
6934 conf->worker_cnt_per_group = worker_cnt_per_group; in setup_conf()
6935 conf->worker_groups = new_group; in setup_conf()
6938 spin_lock_init(&conf->device_lock); in setup_conf()
6939 seqcount_init(&conf->gen_lock); in setup_conf()
6940 mutex_init(&conf->cache_size_mutex); in setup_conf()
6941 init_waitqueue_head(&conf->wait_for_quiescent); in setup_conf()
6942 init_waitqueue_head(&conf->wait_for_stripe); in setup_conf()
6943 init_waitqueue_head(&conf->wait_for_overlap); in setup_conf()
6944 INIT_LIST_HEAD(&conf->handle_list); in setup_conf()
6945 INIT_LIST_HEAD(&conf->loprio_list); in setup_conf()
6946 INIT_LIST_HEAD(&conf->hold_list); in setup_conf()
6947 INIT_LIST_HEAD(&conf->delayed_list); in setup_conf()
6948 INIT_LIST_HEAD(&conf->bitmap_list); in setup_conf()
6949 init_llist_head(&conf->released_stripes); in setup_conf()
6950 atomic_set(&conf->active_stripes, 0); in setup_conf()
6951 atomic_set(&conf->preread_active_stripes, 0); in setup_conf()
6952 atomic_set(&conf->active_aligned_reads, 0); in setup_conf()
6953 spin_lock_init(&conf->pending_bios_lock); in setup_conf()
6954 conf->batch_bio_dispatch = true; in setup_conf()
6959 conf->batch_bio_dispatch = false; in setup_conf()
6964 conf->bypass_threshold = BYPASS_THRESHOLD; in setup_conf()
6965 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
6967 conf->raid_disks = mddev->raid_disks; in setup_conf()
6969 conf->previous_raid_disks = mddev->raid_disks; in setup_conf()
6971 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; in setup_conf()
6972 max_disks = max(conf->raid_disks, conf->previous_raid_disks); in setup_conf()
6974 conf->disks = kcalloc(max_disks, sizeof(struct disk_info), in setup_conf()
6977 if (!conf->disks) in setup_conf()
6981 conf->disks[i].extra_page = alloc_page(GFP_KERNEL); in setup_conf()
6982 if (!conf->disks[i].extra_page) in setup_conf()
6986 ret = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); in setup_conf()
6989 conf->mddev = mddev; in setup_conf()
6991 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) in setup_conf()
6999 spin_lock_init(conf->hash_locks); in setup_conf()
7001 spin_lock_init(conf->hash_locks + i); in setup_conf()
7004 INIT_LIST_HEAD(conf->inactive_list + i); in setup_conf()
7007 INIT_LIST_HEAD(conf->temp_inactive_list + i); in setup_conf()
7009 atomic_set(&conf->r5c_cached_full_stripes, 0); in setup_conf()
7010 INIT_LIST_HEAD(&conf->r5c_full_stripe_list); in setup_conf()
7011 atomic_set(&conf->r5c_cached_partial_stripes, 0); in setup_conf()
7012 INIT_LIST_HEAD(&conf->r5c_partial_stripe_list); in setup_conf()
7013 atomic_set(&conf->r5c_flushing_full_stripes, 0); in setup_conf()
7014 atomic_set(&conf->r5c_flushing_partial_stripes, 0); in setup_conf()
7016 conf->level = mddev->new_level; in setup_conf()
7017 conf->chunk_sectors = mddev->new_chunk_sectors; in setup_conf()
7018 if (raid5_alloc_percpu(conf) != 0) in setup_conf()
7028 disk = conf->disks + raid_disk; in setup_conf()
7046 conf->fullsync = 1; in setup_conf()
7049 conf->level = mddev->new_level; in setup_conf()
7050 if (conf->level == 6) { in setup_conf()
7051 conf->max_degraded = 2; in setup_conf()
7053 conf->rmw_level = PARITY_ENABLE_RMW; in setup_conf()
7055 conf->rmw_level = PARITY_DISABLE_RMW; in setup_conf()
7057 conf->max_degraded = 1; in setup_conf()
7058 conf->rmw_level = PARITY_ENABLE_RMW; in setup_conf()
7060 conf->algorithm = mddev->new_layout; in setup_conf()
7061 conf->reshape_progress = mddev->reshape_position; in setup_conf()
7062 if (conf->reshape_progress != MaxSector) { in setup_conf()
7063 conf->prev_chunk_sectors = mddev->chunk_sectors; in setup_conf()
7064 conf->prev_algo = mddev->layout; in setup_conf()
7066 conf->prev_chunk_sectors = conf->chunk_sectors; in setup_conf()
7067 conf->prev_algo = conf->algorithm; in setup_conf()
7070 conf->min_nr_stripes = NR_STRIPES; in setup_conf()
7075 conf->min_nr_stripes = max(NR_STRIPES, stripes); in setup_conf()
7076 if (conf->min_nr_stripes != NR_STRIPES) in setup_conf()
7078 mdname(mddev), conf->min_nr_stripes); in setup_conf()
7080 memory = conf->min_nr_stripes * (sizeof(struct stripe_head) + in setup_conf()
7082 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); in setup_conf()
7083 if (grow_stripes(conf, conf->min_nr_stripes)) { in setup_conf()
7094 conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4; in setup_conf()
7095 conf->shrinker.scan_objects = raid5_cache_scan; in setup_conf()
7096 conf->shrinker.count_objects = raid5_cache_count; in setup_conf()
7097 conf->shrinker.batch = 128; in setup_conf()
7098 conf->shrinker.flags = 0; in setup_conf()
7099 if (register_shrinker(&conf->shrinker)) { in setup_conf()
7106 conf->thread = md_register_thread(raid5d, mddev, pers_name); in setup_conf()
7107 if (!conf->thread) { in setup_conf()
7113 return conf; in setup_conf()
7116 if (conf) { in setup_conf()
7117 free_conf(conf); in setup_conf()
7151 struct r5conf *conf; in raid5_run() local
7292 conf = setup_conf(mddev); in raid5_run()
7294 conf = mddev->private; in raid5_run()
7296 if (IS_ERR(conf)) in raid5_run()
7297 return PTR_ERR(conf); in raid5_run()
7309 conf->min_offset_diff = min_offset_diff; in raid5_run()
7310 mddev->thread = conf->thread; in raid5_run()
7311 conf->thread = NULL; in raid5_run()
7312 mddev->private = conf; in raid5_run()
7314 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; in raid5_run()
7316 rdev = conf->disks[i].rdev; in raid5_run()
7317 if (!rdev && conf->disks[i].replacement) { in raid5_run()
7319 rdev = conf->disks[i].replacement; in raid5_run()
7320 conf->disks[i].replacement = NULL; in raid5_run()
7322 conf->disks[i].rdev = rdev; in raid5_run()
7326 if (conf->disks[i].replacement && in raid5_run()
7327 conf->reshape_progress != MaxSector) { in raid5_run()
7352 conf->algorithm, in raid5_run()
7353 conf->raid_disks, in raid5_run()
7354 conf->max_degraded)) in raid5_run()
7358 conf->prev_algo, in raid5_run()
7359 conf->previous_raid_disks, in raid5_run()
7360 conf->max_degraded)) in raid5_run()
7368 mddev->degraded = raid5_calc_degraded(conf); in raid5_run()
7370 if (has_failed(conf)) { in raid5_run()
7372 mdname(mddev), mddev->degraded, conf->raid_disks); in raid5_run()
7396 mdname(mddev), conf->level, in raid5_run()
7400 print_raid5_conf(conf); in raid5_run()
7402 if (conf->reshape_progress != MaxSector) { in raid5_run()
7403 conf->reshape_safe = conf->reshape_progress; in raid5_run()
7404 atomic_set(&conf->reshape_stripes, 0); in raid5_run()
7430 int data_disks = conf->previous_raid_disks - conf->max_degraded; in raid5_run()
7439 (conf->raid_disks - conf->max_degraded)); in raid5_run()
7490 if (log_init(conf, journal_dev, raid5_has_ppl(conf))) in raid5_run()
7496 print_raid5_conf(conf); in raid5_run()
7497 free_conf(conf); in raid5_run()
7505 struct r5conf *conf = priv; in raid5_free() local
7507 free_conf(conf); in raid5_free()
7513 struct r5conf *conf = mddev->private; in raid5_status() local
7517 conf->chunk_sectors / 2, mddev->layout); in raid5_status()
7518 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); in raid5_status()
7520 for (i = 0; i < conf->raid_disks; i++) { in raid5_status()
7521 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in raid5_status()
7528 static void print_raid5_conf (struct r5conf *conf) in print_raid5_conf() argument
7534 if (!conf) { in print_raid5_conf()
7538 pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level, in print_raid5_conf()
7539 conf->raid_disks, in print_raid5_conf()
7540 conf->raid_disks - conf->mddev->degraded); in print_raid5_conf()
7542 for (i = 0; i < conf->raid_disks; i++) { in print_raid5_conf()
7544 tmp = conf->disks + i; in print_raid5_conf()
7555 struct r5conf *conf = mddev->private; in raid5_spare_active() local
7560 for (i = 0; i < conf->raid_disks; i++) { in raid5_spare_active()
7561 tmp = conf->disks + i; in raid5_spare_active()
7588 spin_lock_irqsave(&conf->device_lock, flags); in raid5_spare_active()
7589 mddev->degraded = raid5_calc_degraded(conf); in raid5_spare_active()
7590 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_spare_active()
7591 print_raid5_conf(conf); in raid5_spare_active()
7597 struct r5conf *conf = mddev->private; in raid5_remove_disk() local
7601 struct disk_info *p = conf->disks + number; in raid5_remove_disk()
7603 print_raid5_conf(conf); in raid5_remove_disk()
7604 if (test_bit(Journal, &rdev->flags) && conf->log) { in raid5_remove_disk()
7611 if (atomic_read(&conf->active_stripes) || in raid5_remove_disk()
7612 atomic_read(&conf->r5c_cached_full_stripes) || in raid5_remove_disk()
7613 atomic_read(&conf->r5c_cached_partial_stripes)) { in raid5_remove_disk()
7616 log_exit(conf); in raid5_remove_disk()
7626 if (number >= conf->raid_disks && in raid5_remove_disk()
7627 conf->reshape_progress == MaxSector) in raid5_remove_disk()
7639 mddev->recovery_disabled != conf->recovery_disabled && in raid5_remove_disk()
7640 !has_failed(conf) && in raid5_remove_disk()
7642 number < conf->raid_disks) { in raid5_remove_disk()
7656 err = log_modify(conf, rdev, false); in raid5_remove_disk()
7670 err = log_modify(conf, p->rdev, true); in raid5_remove_disk()
7676 print_raid5_conf(conf); in raid5_remove_disk()
7682 struct r5conf *conf = mddev->private; in raid5_add_disk() local
7687 int last = conf->raid_disks - 1; in raid5_add_disk()
7690 if (conf->log) in raid5_add_disk()
7698 ret = log_init(conf, rdev, false); in raid5_add_disk()
7702 ret = r5l_start(conf->log); in raid5_add_disk()
7708 if (mddev->recovery_disabled == conf->recovery_disabled) in raid5_add_disk()
7711 if (rdev->saved_raid_disk < 0 && has_failed(conf)) in raid5_add_disk()
7724 conf->disks[rdev->saved_raid_disk].rdev == NULL) in raid5_add_disk()
7728 p = conf->disks + disk; in raid5_add_disk()
7733 conf->fullsync = 1; in raid5_add_disk()
7736 err = log_modify(conf, rdev, true); in raid5_add_disk()
7742 p = conf->disks + disk; in raid5_add_disk()
7749 conf->fullsync = 1; in raid5_add_disk()
7755 print_raid5_conf(conf); in raid5_add_disk()
7769 struct r5conf *conf = mddev->private; in raid5_resize() local
7771 if (raid5_has_log(conf) || raid5_has_ppl(conf)) in raid5_resize()
7773 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_resize()
7804 struct r5conf *conf = mddev->private; in check_stripe_cache() local
7806 > conf->min_nr_stripes || in check_stripe_cache()
7808 > conf->min_nr_stripes) { in check_stripe_cache()
7820 struct r5conf *conf = mddev->private; in check_reshape() local
7822 if (raid5_has_log(conf) || raid5_has_ppl(conf)) in check_reshape()
7828 if (has_failed(conf)) in check_reshape()
7848 if (resize_chunks(conf, in check_reshape()
7849 conf->previous_raid_disks in check_reshape()
7856 if (conf->previous_raid_disks + mddev->delta_disks <= conf->pool_size) in check_reshape()
7858 return resize_stripes(conf, (conf->previous_raid_disks in check_reshape()
7864 struct r5conf *conf = mddev->private; in raid5_start_reshape() local
7875 if (has_failed(conf)) in raid5_start_reshape()
7884 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) in raid5_start_reshape()
7894 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) in raid5_start_reshape()
7901 atomic_set(&conf->reshape_stripes, 0); in raid5_start_reshape()
7902 spin_lock_irq(&conf->device_lock); in raid5_start_reshape()
7903 write_seqcount_begin(&conf->gen_lock); in raid5_start_reshape()
7904 conf->previous_raid_disks = conf->raid_disks; in raid5_start_reshape()
7905 conf->raid_disks += mddev->delta_disks; in raid5_start_reshape()
7906 conf->prev_chunk_sectors = conf->chunk_sectors; in raid5_start_reshape()
7907 conf->chunk_sectors = mddev->new_chunk_sectors; in raid5_start_reshape()
7908 conf->prev_algo = conf->algorithm; in raid5_start_reshape()
7909 conf->algorithm = mddev->new_layout; in raid5_start_reshape()
7910 conf->generation++; in raid5_start_reshape()
7916 conf->reshape_progress = raid5_size(mddev, 0, 0); in raid5_start_reshape()
7918 conf->reshape_progress = 0; in raid5_start_reshape()
7919 conf->reshape_safe = conf->reshape_progress; in raid5_start_reshape()
7920 write_seqcount_end(&conf->gen_lock); in raid5_start_reshape()
7921 spin_unlock_irq(&conf->device_lock); in raid5_start_reshape()
7943 >= conf->previous_raid_disks) in raid5_start_reshape()
7951 } else if (rdev->raid_disk >= conf->previous_raid_disks in raid5_start_reshape()
7961 spin_lock_irqsave(&conf->device_lock, flags); in raid5_start_reshape()
7962 mddev->degraded = raid5_calc_degraded(conf); in raid5_start_reshape()
7963 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_start_reshape()
7965 mddev->raid_disks = conf->raid_disks; in raid5_start_reshape()
7966 mddev->reshape_position = conf->reshape_progress; in raid5_start_reshape()
7978 spin_lock_irq(&conf->device_lock); in raid5_start_reshape()
7979 write_seqcount_begin(&conf->gen_lock); in raid5_start_reshape()
7980 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; in raid5_start_reshape()
7982 conf->chunk_sectors = conf->prev_chunk_sectors; in raid5_start_reshape()
7983 mddev->new_layout = conf->algorithm = conf->prev_algo; in raid5_start_reshape()
7987 conf->generation --; in raid5_start_reshape()
7988 conf->reshape_progress = MaxSector; in raid5_start_reshape()
7990 write_seqcount_end(&conf->gen_lock); in raid5_start_reshape()
7991 spin_unlock_irq(&conf->device_lock); in raid5_start_reshape()
7994 conf->reshape_checkpoint = jiffies; in raid5_start_reshape()
8003 static void end_reshape(struct r5conf *conf) in end_reshape() argument
8006 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in end_reshape()
8009 spin_lock_irq(&conf->device_lock); in end_reshape()
8010 conf->previous_raid_disks = conf->raid_disks; in end_reshape()
8011 md_finish_reshape(conf->mddev); in end_reshape()
8013 conf->reshape_progress = MaxSector; in end_reshape()
8014 conf->mddev->reshape_position = MaxSector; in end_reshape()
8015 rdev_for_each(rdev, conf->mddev) in end_reshape()
8020 spin_unlock_irq(&conf->device_lock); in end_reshape()
8021 wake_up(&conf->wait_for_overlap); in end_reshape()
8026 if (conf->mddev->queue) { in end_reshape()
8027 int data_disks = conf->raid_disks - conf->max_degraded; in end_reshape()
8028 int stripe = data_disks * ((conf->chunk_sectors << 9) in end_reshape()
8030 if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) in end_reshape()
8031 conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; in end_reshape()
8041 struct r5conf *conf = mddev->private; in raid5_finish_reshape() local
8047 spin_lock_irq(&conf->device_lock); in raid5_finish_reshape()
8048 mddev->degraded = raid5_calc_degraded(conf); in raid5_finish_reshape()
8049 spin_unlock_irq(&conf->device_lock); in raid5_finish_reshape()
8050 for (d = conf->raid_disks ; in raid5_finish_reshape()
8051 d < conf->raid_disks - mddev->delta_disks; in raid5_finish_reshape()
8053 struct md_rdev *rdev = conf->disks[d].rdev; in raid5_finish_reshape()
8056 rdev = conf->disks[d].replacement; in raid5_finish_reshape()
8061 mddev->layout = conf->algorithm; in raid5_finish_reshape()
8062 mddev->chunk_sectors = conf->chunk_sectors; in raid5_finish_reshape()
8071 struct r5conf *conf = mddev->private; in raid5_quiesce() local
8075 lock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8079 r5c_flush_cache(conf, INT_MAX); in raid5_quiesce()
8080 conf->quiesce = 2; in raid5_quiesce()
8081 wait_event_cmd(conf->wait_for_quiescent, in raid5_quiesce()
8082 atomic_read(&conf->active_stripes) == 0 && in raid5_quiesce()
8083 atomic_read(&conf->active_aligned_reads) == 0, in raid5_quiesce()
8084 unlock_all_device_hash_locks_irq(conf), in raid5_quiesce()
8085 lock_all_device_hash_locks_irq(conf)); in raid5_quiesce()
8086 conf->quiesce = 1; in raid5_quiesce()
8087 unlock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8089 wake_up(&conf->wait_for_overlap); in raid5_quiesce()
8092 lock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8093 conf->quiesce = 0; in raid5_quiesce()
8094 wake_up(&conf->wait_for_quiescent); in raid5_quiesce()
8095 wake_up(&conf->wait_for_overlap); in raid5_quiesce()
8096 unlock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8098 log_quiesce(conf, quiesce); in raid5_quiesce()
8199 struct r5conf *conf = mddev->private; in raid5_check_reshape() local
8219 conf->algorithm = mddev->new_layout; in raid5_check_reshape()
8223 conf->chunk_sectors = new_chunk ; in raid5_check_reshape()
8342 struct r5conf *conf; in raid5_change_consistency_policy() local
8348 conf = mddev->private; in raid5_change_consistency_policy()
8349 if (!conf) { in raid5_change_consistency_policy()
8356 if (!raid5_has_ppl(conf) && conf->level == 5) { in raid5_change_consistency_policy()
8357 err = log_init(conf, NULL, true); in raid5_change_consistency_policy()
8359 err = resize_stripes(conf, conf->pool_size); in raid5_change_consistency_policy()
8361 log_exit(conf); in raid5_change_consistency_policy()
8366 if (raid5_has_ppl(conf)) { in raid5_change_consistency_policy()
8368 log_exit(conf); in raid5_change_consistency_policy()
8370 err = resize_stripes(conf, conf->pool_size); in raid5_change_consistency_policy()
8371 } else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) && in raid5_change_consistency_policy()
8372 r5l_log_disk_error(conf)) { in raid5_change_consistency_policy()
8404 struct r5conf *conf = mddev->private; in raid5_start() local
8406 return r5l_start(conf->log); in raid5_start()