Lines Matching refs:conf
86 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) in stripe_hash() argument
89 return &conf->stripe_hashtbl[hash]; in stripe_hash()
97 static inline void lock_device_hash_lock(struct r5conf *conf, int hash) in lock_device_hash_lock() argument
99 spin_lock_irq(conf->hash_locks + hash); in lock_device_hash_lock()
100 spin_lock(&conf->device_lock); in lock_device_hash_lock()
103 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) in unlock_device_hash_lock() argument
105 spin_unlock(&conf->device_lock); in unlock_device_hash_lock()
106 spin_unlock_irq(conf->hash_locks + hash); in unlock_device_hash_lock()
109 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) in lock_all_device_hash_locks_irq() argument
113 spin_lock(conf->hash_locks); in lock_all_device_hash_locks_irq()
115 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); in lock_all_device_hash_locks_irq()
116 spin_lock(&conf->device_lock); in lock_all_device_hash_locks_irq()
119 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) in unlock_all_device_hash_locks_irq() argument
122 spin_unlock(&conf->device_lock); in unlock_all_device_hash_locks_irq()
124 spin_unlock(conf->hash_locks + i - 1); in unlock_all_device_hash_locks_irq()
240 static void print_raid5_conf (struct r5conf *conf);
251 struct r5conf *conf = sh->raid_conf; in raid5_wakeup_stripe_thread() local
263 group = conf->worker_groups + cpu_to_group(cpu); in raid5_wakeup_stripe_thread()
269 if (conf->worker_cnt_per_group == 0) { in raid5_wakeup_stripe_thread()
270 md_wakeup_thread(conf->mddev->thread); in raid5_wakeup_stripe_thread()
274 group = conf->worker_groups + cpu_to_group(sh->cpu); in raid5_wakeup_stripe_thread()
282 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) { in raid5_wakeup_stripe_thread()
292 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, in do_release_stripe() argument
296 BUG_ON(atomic_read(&conf->active_stripes)==0); in do_release_stripe()
300 list_add_tail(&sh->lru, &conf->delayed_list); in do_release_stripe()
301 if (atomic_read(&conf->preread_active_stripes) in do_release_stripe()
303 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
305 sh->bm_seq - conf->seq_write > 0) in do_release_stripe()
306 list_add_tail(&sh->lru, &conf->bitmap_list); in do_release_stripe()
310 if (conf->worker_cnt_per_group == 0) { in do_release_stripe()
311 list_add_tail(&sh->lru, &conf->handle_list); in do_release_stripe()
317 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
321 if (atomic_dec_return(&conf->preread_active_stripes) in do_release_stripe()
323 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
324 atomic_dec(&conf->active_stripes); in do_release_stripe()
330 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, in __release_stripe() argument
334 do_release_stripe(conf, sh, temp_inactive_list); in __release_stripe()
344 static void release_inactive_stripe_list(struct r5conf *conf, in release_inactive_stripe_list() argument
365 spin_lock_irqsave(conf->hash_locks + hash, flags); in release_inactive_stripe_list()
366 if (list_empty(conf->inactive_list + hash) && in release_inactive_stripe_list()
368 atomic_dec(&conf->empty_inactive_list_nr); in release_inactive_stripe_list()
369 list_splice_tail_init(list, conf->inactive_list + hash); in release_inactive_stripe_list()
371 spin_unlock_irqrestore(conf->hash_locks + hash, flags); in release_inactive_stripe_list()
378 wake_up(&conf->wait_for_stripe); in release_inactive_stripe_list()
379 if (conf->retry_read_aligned) in release_inactive_stripe_list()
380 md_wakeup_thread(conf->mddev->thread); in release_inactive_stripe_list()
385 static int release_stripe_list(struct r5conf *conf, in release_stripe_list() argument
392 head = llist_del_all(&conf->released_stripes); in release_stripe_list()
408 __release_stripe(conf, sh, &temp_inactive_list[hash]); in release_stripe_list()
417 struct r5conf *conf = sh->raid_conf; in release_stripe() local
428 if (unlikely(!conf->mddev->thread) || in release_stripe()
431 wakeup = llist_add(&sh->release_list, &conf->released_stripes); in release_stripe()
433 md_wakeup_thread(conf->mddev->thread); in release_stripe()
438 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { in release_stripe()
441 do_release_stripe(conf, sh, &list); in release_stripe()
442 spin_unlock(&conf->device_lock); in release_stripe()
443 release_inactive_stripe_list(conf, &list, hash); in release_stripe()
456 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) in insert_hash() argument
458 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
467 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) in get_free_stripe() argument
472 if (list_empty(conf->inactive_list + hash)) in get_free_stripe()
474 first = (conf->inactive_list + hash)->next; in get_free_stripe()
478 atomic_inc(&conf->active_stripes); in get_free_stripe()
480 if (list_empty(conf->inactive_list + hash)) in get_free_stripe()
481 atomic_inc(&conf->empty_inactive_list_nr); in get_free_stripe()
520 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
525 struct r5conf *conf = sh->raid_conf; in init_stripe() local
535 seq = read_seqcount_begin(&conf->gen_lock); in init_stripe()
536 sh->generation = conf->generation - previous; in init_stripe()
537 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
539 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
556 if (read_seqcount_retry(&conf->gen_lock, seq)) in init_stripe()
558 insert_hash(conf, sh); in init_stripe()
562 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, in __find_stripe() argument
568 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
588 static int calc_degraded(struct r5conf *conf) in calc_degraded() argument
595 for (i = 0; i < conf->previous_raid_disks; i++) { in calc_degraded()
596 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in calc_degraded()
598 rdev = rcu_dereference(conf->disks[i].replacement); in calc_degraded()
613 if (conf->raid_disks >= conf->previous_raid_disks) in calc_degraded()
617 if (conf->raid_disks == conf->previous_raid_disks) in calc_degraded()
621 for (i = 0; i < conf->raid_disks; i++) { in calc_degraded()
622 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in calc_degraded()
624 rdev = rcu_dereference(conf->disks[i].replacement); in calc_degraded()
635 if (conf->raid_disks <= conf->previous_raid_disks) in calc_degraded()
644 static int has_failed(struct r5conf *conf) in has_failed() argument
648 if (conf->mddev->reshape_position == MaxSector) in has_failed()
649 return conf->mddev->degraded > conf->max_degraded; in has_failed()
651 degraded = calc_degraded(conf); in has_failed()
652 if (degraded > conf->max_degraded) in has_failed()
658 get_active_stripe(struct r5conf *conf, sector_t sector, in get_active_stripe() argument
666 spin_lock_irq(conf->hash_locks + hash); in get_active_stripe()
669 wait_event_lock_irq(conf->wait_for_stripe, in get_active_stripe()
670 conf->quiesce == 0 || noquiesce, in get_active_stripe()
671 *(conf->hash_locks + hash)); in get_active_stripe()
672 sh = __find_stripe(conf, sector, conf->generation - previous); in get_active_stripe()
674 if (!conf->inactive_blocked) in get_active_stripe()
675 sh = get_free_stripe(conf, hash); in get_active_stripe()
679 conf->inactive_blocked = 1; in get_active_stripe()
681 conf->wait_for_stripe, in get_active_stripe()
682 !list_empty(conf->inactive_list + hash) && in get_active_stripe()
683 (atomic_read(&conf->active_stripes) in get_active_stripe()
684 < (conf->max_nr_stripes * 3 / 4) in get_active_stripe()
685 || !conf->inactive_blocked), in get_active_stripe()
686 *(conf->hash_locks + hash)); in get_active_stripe()
687 conf->inactive_blocked = 0; in get_active_stripe()
693 spin_lock(&conf->device_lock); in get_active_stripe()
696 atomic_inc(&conf->active_stripes); in get_active_stripe()
706 spin_unlock(&conf->device_lock); in get_active_stripe()
710 spin_unlock_irq(conf->hash_locks + hash); in get_active_stripe()
717 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) in use_new_offset() argument
719 sector_t progress = conf->reshape_progress; in use_new_offset()
727 if (sh->generation == conf->generation - 1) in use_new_offset()
742 struct r5conf *conf = sh->raid_conf; in ops_run_io() local
774 rrdev = rcu_dereference(conf->disks[i].replacement); in ops_run_io()
776 rdev = rcu_dereference(conf->disks[i].rdev); in ops_run_io()
818 if (!conf->mddev->external && in ops_run_io()
819 conf->mddev->flags) { in ops_run_io()
824 md_check_recovery(conf->mddev); in ops_run_io()
832 md_wait_for_blocked_rdev(rdev, conf->mddev); in ops_run_io()
835 rdev_dec_pending(rdev, conf->mddev); in ops_run_io()
859 if (use_new_offset(conf, sh)) in ops_run_io()
884 if (conf->mddev->gendisk) in ops_run_io()
886 bi, disk_devt(conf->mddev->gendisk), in ops_run_io()
909 if (use_new_offset(conf, sh)) in ops_run_io()
928 if (conf->mddev->gendisk) in ops_run_io()
930 rbi, disk_devt(conf->mddev->gendisk), in ops_run_io()
1650 struct r5conf *conf = sh->raid_conf; in raid_run_ops() local
1651 int level = conf->level; in raid_run_ops()
1656 percpu = per_cpu_ptr(conf->percpu, cpu); in raid_run_ops()
1711 static int grow_one_stripe(struct r5conf *conf, int hash) in grow_one_stripe() argument
1714 sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL); in grow_one_stripe()
1718 sh->raid_conf = conf; in grow_one_stripe()
1724 kmem_cache_free(conf->slab_cache, sh); in grow_one_stripe()
1730 atomic_inc(&conf->active_stripes); in grow_one_stripe()
1736 static int grow_stripes(struct r5conf *conf, int num) in grow_stripes() argument
1739 int devs = max(conf->raid_disks, conf->previous_raid_disks); in grow_stripes()
1742 if (conf->mddev->gendisk) in grow_stripes()
1743 sprintf(conf->cache_name[0], in grow_stripes()
1744 "raid%d-%s", conf->level, mdname(conf->mddev)); in grow_stripes()
1746 sprintf(conf->cache_name[0], in grow_stripes()
1747 "raid%d-%p", conf->level, conf->mddev); in grow_stripes()
1748 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); in grow_stripes()
1750 conf->active_name = 0; in grow_stripes()
1751 sc = kmem_cache_create(conf->cache_name[conf->active_name], in grow_stripes()
1756 conf->slab_cache = sc; in grow_stripes()
1757 conf->pool_size = devs; in grow_stripes()
1758 hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; in grow_stripes()
1760 if (!grow_one_stripe(conf, hash)) in grow_stripes()
1762 conf->max_nr_stripes++; in grow_stripes()
1790 static int resize_stripes(struct r5conf *conf, int newsize) in resize_stripes() argument
1824 if (newsize <= conf->pool_size) in resize_stripes()
1827 err = md_allow_write(conf->mddev); in resize_stripes()
1832 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], in resize_stripes()
1838 for (i = conf->max_nr_stripes; i; i--) { in resize_stripes()
1843 nsh->raid_conf = conf; in resize_stripes()
1865 lock_device_hash_lock(conf, hash); in resize_stripes()
1866 wait_event_cmd(conf->wait_for_stripe, in resize_stripes()
1867 !list_empty(conf->inactive_list + hash), in resize_stripes()
1868 unlock_device_hash_lock(conf, hash), in resize_stripes()
1869 lock_device_hash_lock(conf, hash)); in resize_stripes()
1870 osh = get_free_stripe(conf, hash); in resize_stripes()
1871 unlock_device_hash_lock(conf, hash); in resize_stripes()
1873 for(i=0; i<conf->pool_size; i++) { in resize_stripes()
1880 kmem_cache_free(conf->slab_cache, osh); in resize_stripes()
1882 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + in resize_stripes()
1883 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { in resize_stripes()
1888 kmem_cache_destroy(conf->slab_cache); in resize_stripes()
1897 for (i=0; i<conf->raid_disks; i++) in resize_stripes()
1898 ndisks[i] = conf->disks[i]; in resize_stripes()
1899 kfree(conf->disks); in resize_stripes()
1900 conf->disks = ndisks; in resize_stripes()
1905 conf->scribble_len = scribble_len(newsize); in resize_stripes()
1910 percpu = per_cpu_ptr(conf->percpu, cpu); in resize_stripes()
1911 scribble = kmalloc(conf->scribble_len, GFP_NOIO); in resize_stripes()
1928 for (i=conf->raid_disks; i < newsize; i++) in resize_stripes()
1940 conf->slab_cache = sc; in resize_stripes()
1941 conf->active_name = 1-conf->active_name; in resize_stripes()
1943 conf->pool_size = newsize; in resize_stripes()
1947 static int drop_one_stripe(struct r5conf *conf, int hash) in drop_one_stripe() argument
1951 spin_lock_irq(conf->hash_locks + hash); in drop_one_stripe()
1952 sh = get_free_stripe(conf, hash); in drop_one_stripe()
1953 spin_unlock_irq(conf->hash_locks + hash); in drop_one_stripe()
1958 kmem_cache_free(conf->slab_cache, sh); in drop_one_stripe()
1959 atomic_dec(&conf->active_stripes); in drop_one_stripe()
1963 static void shrink_stripes(struct r5conf *conf) in shrink_stripes() argument
1967 while (drop_one_stripe(conf, hash)) in shrink_stripes()
1970 if (conf->slab_cache) in shrink_stripes()
1971 kmem_cache_destroy(conf->slab_cache); in shrink_stripes()
1972 conf->slab_cache = NULL; in shrink_stripes()
1978 struct r5conf *conf = sh->raid_conf; in raid5_end_read_request() local
2002 rdev = conf->disks[i].replacement; in raid5_end_read_request()
2004 rdev = conf->disks[i].rdev; in raid5_end_read_request()
2006 if (use_new_offset(conf, sh)) in raid5_end_read_request()
2021 mdname(conf->mddev), STRIPE_SECTORS, in raid5_end_read_request()
2044 mdname(conf->mddev), in raid5_end_read_request()
2047 else if (conf->mddev->degraded >= conf->max_degraded) { in raid5_end_read_request()
2053 mdname(conf->mddev), in raid5_end_read_request()
2063 mdname(conf->mddev), in raid5_end_read_request()
2067 > conf->max_nr_stripes) in raid5_end_read_request()
2070 mdname(conf->mddev), bdn); in raid5_end_read_request()
2089 md_error(conf->mddev, rdev); in raid5_end_read_request()
2092 rdev_dec_pending(rdev, conf->mddev); in raid5_end_read_request()
2101 struct r5conf *conf = sh->raid_conf; in raid5_end_write_request() local
2111 rdev = conf->disks[i].rdev; in raid5_end_write_request()
2115 rdev = conf->disks[i].replacement; in raid5_end_write_request()
2123 rdev = conf->disks[i].rdev; in raid5_end_write_request()
2137 md_error(conf->mddev, rdev); in raid5_end_write_request()
2162 rdev_dec_pending(rdev, conf->mddev); in raid5_end_write_request()
2193 struct r5conf *conf = mddev->private; in error() local
2197 spin_lock_irqsave(&conf->device_lock, flags); in error()
2199 mddev->degraded = calc_degraded(conf); in error()
2200 spin_unlock_irqrestore(&conf->device_lock, flags); in error()
2212 conf->raid_disks - mddev->degraded); in error()
2219 static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, in raid5_compute_sector() argument
2229 int algorithm = previous ? conf->prev_algo in raid5_compute_sector()
2230 : conf->algorithm; in raid5_compute_sector()
2231 int sectors_per_chunk = previous ? conf->prev_chunk_sectors in raid5_compute_sector()
2232 : conf->chunk_sectors; in raid5_compute_sector()
2233 int raid_disks = previous ? conf->previous_raid_disks in raid5_compute_sector()
2234 : conf->raid_disks; in raid5_compute_sector()
2235 int data_disks = raid_disks - conf->max_degraded; in raid5_compute_sector()
2255 switch(conf->level) { in raid5_compute_sector()
2423 struct r5conf *conf = sh->raid_conf; in compute_blocknr() local
2425 int data_disks = raid_disks - conf->max_degraded; in compute_blocknr()
2427 int sectors_per_chunk = previous ? conf->prev_chunk_sectors in compute_blocknr()
2428 : conf->chunk_sectors; in compute_blocknr()
2429 int algorithm = previous ? conf->prev_algo in compute_blocknr()
2430 : conf->algorithm; in compute_blocknr()
2443 switch(conf->level) { in compute_blocknr()
2530 check = raid5_compute_sector(conf, r_sector, in compute_blocknr()
2535 mdname(conf->mddev)); in compute_blocknr()
2546 struct r5conf *conf = sh->raid_conf; in schedule_reconstruction() local
2547 int level = conf->level; in schedule_reconstruction()
2577 if (s->locked + conf->max_degraded == disks) in schedule_reconstruction()
2579 atomic_inc(&conf->pending_full_writes); in schedule_reconstruction()
2637 struct r5conf *conf = sh->raid_conf; in add_stripe_bio() local
2692 if (conf->mddev->bitmap && firstwrite) { in add_stripe_bio()
2693 bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
2695 sh->bm_seq = conf->seq_flush+1; in add_stripe_bio()
2706 static void end_reshape(struct r5conf *conf);
2708 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, in stripe_set_idx() argument
2712 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; in stripe_set_idx()
2715 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; in stripe_set_idx()
2717 raid5_compute_sector(conf, in stripe_set_idx()
2718 stripe * (disks - conf->max_degraded) in stripe_set_idx()
2725 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, in handle_failed_stripe() argument
2737 rdev = rcu_dereference(conf->disks[i].rdev); in handle_failed_stripe()
2748 md_error(conf->mddev, rdev); in handle_failed_stripe()
2749 rdev_dec_pending(rdev, conf->mddev); in handle_failed_stripe()
2761 wake_up(&conf->wait_for_overlap); in handle_failed_stripe()
2768 md_write_end(conf->mddev); in handle_failed_stripe()
2775 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
2792 md_write_end(conf->mddev); in handle_failed_stripe()
2810 wake_up(&conf->wait_for_overlap); in handle_failed_stripe()
2824 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
2833 if (atomic_dec_and_test(&conf->pending_full_writes)) in handle_failed_stripe()
2834 md_wakeup_thread(conf->mddev->thread); in handle_failed_stripe()
2838 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, in handle_failed_sync() argument
2846 wake_up(&conf->wait_for_overlap); in handle_failed_sync()
2856 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { in handle_failed_sync()
2860 for (i = 0; i < conf->raid_disks; i++) { in handle_failed_sync()
2861 struct md_rdev *rdev = conf->disks[i].rdev; in handle_failed_sync()
2868 rdev = conf->disks[i].replacement; in handle_failed_sync()
2877 conf->recovery_disabled = in handle_failed_sync()
2878 conf->mddev->recovery_disabled; in handle_failed_sync()
2880 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort); in handle_failed_sync()
3020 static void handle_stripe_clean_event(struct r5conf *conf, in handle_stripe_clean_event() argument
3049 md_write_end(conf->mddev); in handle_stripe_clean_event()
3055 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
3081 spin_lock_irq(conf->hash_locks + hash); in handle_stripe_clean_event()
3083 spin_unlock_irq(conf->hash_locks + hash); in handle_stripe_clean_event()
3090 if (atomic_dec_and_test(&conf->pending_full_writes)) in handle_stripe_clean_event()
3091 md_wakeup_thread(conf->mddev->thread); in handle_stripe_clean_event()
3094 static void handle_stripe_dirtying(struct r5conf *conf, in handle_stripe_dirtying() argument
3100 sector_t recovery_cp = conf->mddev->recovery_cp; in handle_stripe_dirtying()
3110 if (conf->max_degraded == 2 || in handle_stripe_dirtying()
3118 conf->max_degraded, (unsigned long long)recovery_cp, in handle_stripe_dirtying()
3148 if (conf->mddev->queue) in handle_stripe_dirtying()
3149 blk_add_trace_msg(conf->mddev->queue, in handle_stripe_dirtying()
3200 if (rcw && conf->mddev->queue) in handle_stripe_dirtying()
3201 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", in handle_stripe_dirtying()
3226 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks5() argument
3287 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); in handle_parity_checks5()
3288 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) in handle_parity_checks5()
3313 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks6() argument
3438 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); in handle_parity_checks6()
3439 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) in handle_parity_checks6()
3476 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) in handle_stripe_expansion() argument
3492 sector_t s = raid5_compute_sector(conf, bn, 0, in handle_stripe_expansion()
3494 sh2 = get_active_stripe(conf, s, 0, 1, 1); in handle_stripe_expansion()
3516 for (j = 0; j < conf->raid_disks; j++) in handle_stripe_expansion()
3521 if (j == conf->raid_disks) { in handle_stripe_expansion()
3548 struct r5conf *conf = sh->raid_conf; in analyse_stripe() local
3607 rdev = rcu_dereference(conf->disks[i].replacement); in analyse_stripe()
3616 rdev = rcu_dereference(conf->disks[i].rdev); in analyse_stripe()
3664 conf->disks[i].rdev); in analyse_stripe()
3677 conf->disks[i].rdev); in analyse_stripe()
3686 conf->disks[i].replacement); in analyse_stripe()
3718 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
3719 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) in analyse_stripe()
3730 struct r5conf *conf = sh->raid_conf; in handle_stripe() local
3777 rdev_dec_pending(s.blocked_rdev, conf->mddev); in handle_stripe()
3793 if (s.failed > conf->max_degraded) { in handle_stripe()
3797 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); in handle_stripe()
3799 handle_failed_sync(conf, sh, &s); in handle_stripe()
3851 || conf->level < 6; in handle_stripe()
3862 handle_stripe_clean_event(conf, sh, disks, &s.return_bi); in handle_stripe()
3869 || (conf->level == 6 && s.to_write && s.failed) in handle_stripe()
3882 handle_stripe_dirtying(conf, sh, &s, disks); in handle_stripe()
3893 if (conf->level == 6) in handle_stripe()
3894 handle_parity_checks6(conf, sh, &s, disks); in handle_stripe()
3896 handle_parity_checks5(conf, sh, &s, disks); in handle_stripe()
3903 for (i = 0; i < conf->raid_disks; i++) in handle_stripe()
3917 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); in handle_stripe()
3920 wake_up(&conf->wait_for_overlap); in handle_stripe()
3926 if (s.failed <= conf->max_degraded && !conf->mddev->ro) in handle_stripe()
3950 = get_active_stripe(conf, sh->sector, 1, 1, 1); in handle_stripe()
3959 atomic_inc(&conf->preread_active_stripes); in handle_stripe()
3968 for (i = conf->raid_disks; i--; ) { in handle_stripe()
3978 sh->disks = conf->raid_disks; in handle_stripe()
3979 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
3983 atomic_dec(&conf->reshape_stripes); in handle_stripe()
3984 wake_up(&conf->wait_for_overlap); in handle_stripe()
3985 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); in handle_stripe()
3990 handle_stripe_expansion(conf, sh); in handle_stripe()
3995 if (conf->mddev->external) in handle_stripe()
3997 conf->mddev); in handle_stripe()
4004 conf->mddev); in handle_stripe()
4013 rdev = conf->disks[i].rdev; in handle_stripe()
4016 md_error(conf->mddev, rdev); in handle_stripe()
4017 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
4020 rdev = conf->disks[i].rdev; in handle_stripe()
4023 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
4026 rdev = conf->disks[i].replacement; in handle_stripe()
4029 rdev = conf->disks[i].rdev; in handle_stripe()
4032 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
4046 atomic_dec(&conf->preread_active_stripes); in handle_stripe()
4047 if (atomic_read(&conf->preread_active_stripes) < in handle_stripe()
4049 md_wakeup_thread(conf->mddev->thread); in handle_stripe()
4057 static void raid5_activate_delayed(struct r5conf *conf) in raid5_activate_delayed() argument
4059 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { in raid5_activate_delayed()
4060 while (!list_empty(&conf->delayed_list)) { in raid5_activate_delayed()
4061 struct list_head *l = conf->delayed_list.next; in raid5_activate_delayed()
4067 atomic_inc(&conf->preread_active_stripes); in raid5_activate_delayed()
4068 list_add_tail(&sh->lru, &conf->hold_list); in raid5_activate_delayed()
4074 static void activate_bit_delay(struct r5conf *conf, in activate_bit_delay() argument
4079 list_add(&head, &conf->bitmap_list); in activate_bit_delay()
4080 list_del_init(&conf->bitmap_list); in activate_bit_delay()
4087 __release_stripe(conf, sh, &temp_inactive_list[hash]); in activate_bit_delay()
4093 struct r5conf *conf = mddev->private; in md_raid5_congested() local
4099 if (conf->inactive_blocked) in md_raid5_congested()
4101 if (conf->quiesce) in md_raid5_congested()
4103 if (atomic_read(&conf->empty_inactive_list_nr)) in md_raid5_congested()
4160 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) in add_bio_to_retry() argument
4164 spin_lock_irqsave(&conf->device_lock, flags); in add_bio_to_retry()
4166 bi->bi_next = conf->retry_read_aligned_list; in add_bio_to_retry()
4167 conf->retry_read_aligned_list = bi; in add_bio_to_retry()
4169 spin_unlock_irqrestore(&conf->device_lock, flags); in add_bio_to_retry()
4170 md_wakeup_thread(conf->mddev->thread); in add_bio_to_retry()
4173 static struct bio *remove_bio_from_retry(struct r5conf *conf) in remove_bio_from_retry() argument
4177 bi = conf->retry_read_aligned; in remove_bio_from_retry()
4179 conf->retry_read_aligned = NULL; in remove_bio_from_retry()
4182 bi = conf->retry_read_aligned_list; in remove_bio_from_retry()
4184 conf->retry_read_aligned_list = bi->bi_next; in remove_bio_from_retry()
4206 struct r5conf *conf; in raid5_align_endio() local
4215 conf = mddev->private; in raid5_align_endio()
4217 rdev_dec_pending(rdev, conf->mddev); in raid5_align_endio()
4223 if (atomic_dec_and_test(&conf->active_aligned_reads)) in raid5_align_endio()
4224 wake_up(&conf->wait_for_stripe); in raid5_align_endio()
4230 add_bio_to_retry(raid_bi, conf); in raid5_align_endio()
4254 struct r5conf *conf = mddev->private; in chunk_aligned_read() local
4280 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, in chunk_aligned_read()
4285 rdev = rcu_dereference(conf->disks[dd_idx].replacement); in chunk_aligned_read()
4288 rdev = rcu_dereference(conf->disks[dd_idx].rdev); in chunk_aligned_read()
4318 spin_lock_irq(&conf->device_lock); in chunk_aligned_read()
4319 wait_event_lock_irq(conf->wait_for_stripe, in chunk_aligned_read()
4320 conf->quiesce == 0, in chunk_aligned_read()
4321 conf->device_lock); in chunk_aligned_read()
4322 atomic_inc(&conf->active_aligned_reads); in chunk_aligned_read()
4323 spin_unlock_irq(&conf->device_lock); in chunk_aligned_read()
4348 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) in __get_priority_stripe() argument
4354 if (conf->worker_cnt_per_group == 0) { in __get_priority_stripe()
4355 handle_list = &conf->handle_list; in __get_priority_stripe()
4357 handle_list = &conf->worker_groups[group].handle_list; in __get_priority_stripe()
4358 wg = &conf->worker_groups[group]; in __get_priority_stripe()
4361 for (i = 0; i < conf->group_cnt; i++) { in __get_priority_stripe()
4362 handle_list = &conf->worker_groups[i].handle_list; in __get_priority_stripe()
4363 wg = &conf->worker_groups[i]; in __get_priority_stripe()
4372 list_empty(&conf->hold_list) ? "empty" : "busy", in __get_priority_stripe()
4373 atomic_read(&conf->pending_full_writes), conf->bypass_count); in __get_priority_stripe()
4378 if (list_empty(&conf->hold_list)) in __get_priority_stripe()
4379 conf->bypass_count = 0; in __get_priority_stripe()
4381 if (conf->hold_list.next == conf->last_hold) in __get_priority_stripe()
4382 conf->bypass_count++; in __get_priority_stripe()
4384 conf->last_hold = conf->hold_list.next; in __get_priority_stripe()
4385 conf->bypass_count -= conf->bypass_threshold; in __get_priority_stripe()
4386 if (conf->bypass_count < 0) in __get_priority_stripe()
4387 conf->bypass_count = 0; in __get_priority_stripe()
4390 } else if (!list_empty(&conf->hold_list) && in __get_priority_stripe()
4391 ((conf->bypass_threshold && in __get_priority_stripe()
4392 conf->bypass_count > conf->bypass_threshold) || in __get_priority_stripe()
4393 atomic_read(&conf->pending_full_writes) == 0)) { in __get_priority_stripe()
4395 list_for_each_entry(tmp, &conf->hold_list, lru) { in __get_priority_stripe()
4396 if (conf->worker_cnt_per_group == 0 || in __get_priority_stripe()
4406 conf->bypass_count -= conf->bypass_threshold; in __get_priority_stripe()
4407 if (conf->bypass_count < 0) in __get_priority_stripe()
4408 conf->bypass_count = 0; in __get_priority_stripe()
4437 struct r5conf *conf = mddev->private; in raid5_unplug() local
4442 spin_lock_irq(&conf->device_lock); in raid5_unplug()
4458 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); in raid5_unplug()
4461 spin_unlock_irq(&conf->device_lock); in raid5_unplug()
4463 release_inactive_stripe_list(conf, cb->temp_inactive_list, in raid5_unplug()
4500 struct r5conf *conf = mddev->private; in make_discard_request() local
4516 stripe_sectors = conf->chunk_sectors * in make_discard_request()
4517 (conf->raid_disks - conf->max_degraded); in make_discard_request()
4522 logical_sector *= conf->chunk_sectors; in make_discard_request()
4523 last_sector *= conf->chunk_sectors; in make_discard_request()
4530 sh = get_active_stripe(conf, logical_sector, 0, 0, 0); in make_discard_request()
4531 prepare_to_wait(&conf->wait_for_overlap, &w, in make_discard_request()
4541 for (d = 0; d < conf->raid_disks; d++) { in make_discard_request()
4553 finish_wait(&conf->wait_for_overlap, &w); in make_discard_request()
4554 for (d = 0; d < conf->raid_disks; d++) { in make_discard_request()
4562 if (conf->mddev->bitmap) { in make_discard_request()
4564 d < conf->raid_disks - conf->max_degraded; in make_discard_request()
4570 sh->bm_seq = conf->seq_flush + 1; in make_discard_request()
4577 atomic_inc(&conf->preread_active_stripes); in make_discard_request()
4590 struct r5conf *conf = mddev->private; in make_request() local
4622 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); in make_request()
4629 seq = read_seqcount_begin(&conf->gen_lock); in make_request()
4632 prepare_to_wait(&conf->wait_for_overlap, &w, in make_request()
4634 if (unlikely(conf->reshape_progress != MaxSector)) { in make_request()
4643 spin_lock_irq(&conf->device_lock); in make_request()
4645 ? logical_sector < conf->reshape_progress in make_request()
4646 : logical_sector >= conf->reshape_progress) { in make_request()
4650 ? logical_sector < conf->reshape_safe in make_request()
4651 : logical_sector >= conf->reshape_safe) { in make_request()
4652 spin_unlock_irq(&conf->device_lock); in make_request()
4658 spin_unlock_irq(&conf->device_lock); in make_request()
4661 new_sector = raid5_compute_sector(conf, logical_sector, in make_request()
4668 sh = get_active_stripe(conf, new_sector, previous, in make_request()
4681 spin_lock_irq(&conf->device_lock); in make_request()
4683 ? logical_sector >= conf->reshape_progress in make_request()
4684 : logical_sector < conf->reshape_progress) in make_request()
4687 spin_unlock_irq(&conf->device_lock); in make_request()
4695 if (read_seqcount_retry(&conf->gen_lock, seq)) { in make_request()
4711 prepare_to_wait(&conf->wait_for_overlap, in make_request()
4741 atomic_inc(&conf->preread_active_stripes); in make_request()
4749 finish_wait(&conf->wait_for_overlap, &w); in make_request()
4776 struct r5conf *conf = mddev->private; in reshape_request() local
4779 int raid_disks = conf->previous_raid_disks; in reshape_request()
4780 int data_disks = raid_disks - conf->max_degraded; in reshape_request()
4781 int new_data_disks = conf->raid_disks - conf->max_degraded; in reshape_request()
4792 conf->reshape_progress < raid5_size(mddev, 0, 0)) { in reshape_request()
4794 - conf->reshape_progress; in reshape_request()
4796 conf->reshape_progress > 0) in reshape_request()
4797 sector_nr = conf->reshape_progress; in reshape_request()
4822 writepos = conf->reshape_progress; in reshape_request()
4824 readpos = conf->reshape_progress; in reshape_request()
4826 safepos = conf->reshape_safe; in reshape_request()
4842 BUG_ON(conf->reshape_progress == 0); in reshape_request()
4873 if (conf->min_offset_diff < 0) { in reshape_request()
4874 safepos += -conf->min_offset_diff; in reshape_request()
4875 readpos += -conf->min_offset_diff; in reshape_request()
4877 writepos += conf->min_offset_diff; in reshape_request()
4882 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { in reshape_request()
4884 wait_event(conf->wait_for_overlap, in reshape_request()
4885 atomic_read(&conf->reshape_stripes)==0 in reshape_request()
4887 if (atomic_read(&conf->reshape_stripes) != 0) in reshape_request()
4889 mddev->reshape_position = conf->reshape_progress; in reshape_request()
4891 conf->reshape_checkpoint = jiffies; in reshape_request()
4898 spin_lock_irq(&conf->device_lock); in reshape_request()
4899 conf->reshape_safe = mddev->reshape_position; in reshape_request()
4900 spin_unlock_irq(&conf->device_lock); in reshape_request()
4901 wake_up(&conf->wait_for_overlap); in reshape_request()
4909 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); in reshape_request()
4911 atomic_inc(&conf->reshape_stripes); in reshape_request()
4919 if (conf->level == 6 && in reshape_request()
4937 spin_lock_irq(&conf->device_lock); in reshape_request()
4939 conf->reshape_progress -= reshape_sectors * new_data_disks; in reshape_request()
4941 conf->reshape_progress += reshape_sectors * new_data_disks; in reshape_request()
4942 spin_unlock_irq(&conf->device_lock); in reshape_request()
4949 raid5_compute_sector(conf, stripe_addr*(new_data_disks), in reshape_request()
4952 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) in reshape_request()
4958 sh = get_active_stripe(conf, first_sector, 1, 0, 1); in reshape_request()
4979 wait_event(conf->wait_for_overlap, in reshape_request()
4980 atomic_read(&conf->reshape_stripes) == 0 in reshape_request()
4982 if (atomic_read(&conf->reshape_stripes) != 0) in reshape_request()
4984 mddev->reshape_position = conf->reshape_progress; in reshape_request()
4986 conf->reshape_checkpoint = jiffies; in reshape_request()
4994 spin_lock_irq(&conf->device_lock); in reshape_request()
4995 conf->reshape_safe = mddev->reshape_position; in reshape_request()
4996 spin_unlock_irq(&conf->device_lock); in reshape_request()
4997 wake_up(&conf->wait_for_overlap); in reshape_request()
5007 struct r5conf *conf = mddev->private; in sync_request() local
5018 end_reshape(conf); in sync_request()
5026 conf->fullsync = 0; in sync_request()
5033 wait_event(conf->wait_for_overlap, conf->quiesce != 2); in sync_request()
5048 if (mddev->degraded >= conf->max_degraded && in sync_request()
5055 !conf->fullsync && in sync_request()
5066 sh = get_active_stripe(conf, sector_nr, 0, 1, 0); in sync_request()
5068 sh = get_active_stripe(conf, sector_nr, 0, 0, 0); in sync_request()
5078 for (i = 0; i < conf->raid_disks; i++) in sync_request()
5079 if (conf->disks[i].rdev == NULL) in sync_request()
5092 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) in retry_aligned_read() argument
5113 sector = raid5_compute_sector(conf, logical_sector, in retry_aligned_read()
5126 sh = get_active_stripe(conf, sector, 0, 1, 1); in retry_aligned_read()
5131 conf->retry_read_aligned = raid_bio; in retry_aligned_read()
5138 conf->retry_read_aligned = raid_bio; in retry_aligned_read()
5153 if (atomic_dec_and_test(&conf->active_aligned_reads)) in retry_aligned_read()
5154 wake_up(&conf->wait_for_stripe); in retry_aligned_read()
5158 static int handle_active_stripes(struct r5conf *conf, int group, in handle_active_stripes() argument
5167 (sh = __get_priority_stripe(conf, group)) != NULL) in handle_active_stripes()
5178 spin_unlock_irq(&conf->device_lock); in handle_active_stripes()
5180 release_inactive_stripe_list(conf, temp_inactive_list, in handle_active_stripes()
5184 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
5193 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
5196 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); in handle_active_stripes()
5205 struct r5conf *conf = group->conf; in raid5_do_work() local
5206 int group_id = group - conf->worker_groups; in raid5_do_work()
5214 spin_lock_irq(&conf->device_lock); in raid5_do_work()
5218 released = release_stripe_list(conf, worker->temp_inactive_list); in raid5_do_work()
5220 batch_size = handle_active_stripes(conf, group_id, worker, in raid5_do_work()
5229 spin_unlock_irq(&conf->device_lock); in raid5_do_work()
5247 struct r5conf *conf = mddev->private; in raid5d() local
5257 spin_lock_irq(&conf->device_lock); in raid5d()
5262 released = release_stripe_list(conf, conf->temp_inactive_list); in raid5d()
5265 !list_empty(&conf->bitmap_list)) { in raid5d()
5267 conf->seq_flush++; in raid5d()
5268 spin_unlock_irq(&conf->device_lock); in raid5d()
5270 spin_lock_irq(&conf->device_lock); in raid5d()
5271 conf->seq_write = conf->seq_flush; in raid5d()
5272 activate_bit_delay(conf, conf->temp_inactive_list); in raid5d()
5274 raid5_activate_delayed(conf); in raid5d()
5276 while ((bio = remove_bio_from_retry(conf))) { in raid5d()
5278 spin_unlock_irq(&conf->device_lock); in raid5d()
5279 ok = retry_aligned_read(conf, bio); in raid5d()
5280 spin_lock_irq(&conf->device_lock); in raid5d()
5286 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, in raid5d()
5287 conf->temp_inactive_list); in raid5d()
5293 spin_unlock_irq(&conf->device_lock); in raid5d()
5295 spin_lock_irq(&conf->device_lock); in raid5d()
5300 spin_unlock_irq(&conf->device_lock); in raid5d()
5311 struct r5conf *conf = mddev->private; in raid5_show_stripe_cache_size() local
5312 if (conf) in raid5_show_stripe_cache_size()
5313 return sprintf(page, "%d\n", conf->max_nr_stripes); in raid5_show_stripe_cache_size()
5321 struct r5conf *conf = mddev->private; in raid5_set_cache_size() local
5327 hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; in raid5_set_cache_size()
5328 while (size < conf->max_nr_stripes) { in raid5_set_cache_size()
5329 if (drop_one_stripe(conf, hash)) in raid5_set_cache_size()
5330 conf->max_nr_stripes--; in raid5_set_cache_size()
5340 hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; in raid5_set_cache_size()
5341 while (size > conf->max_nr_stripes) { in raid5_set_cache_size()
5342 if (grow_one_stripe(conf, hash)) in raid5_set_cache_size()
5343 conf->max_nr_stripes++; in raid5_set_cache_size()
5354 struct r5conf *conf = mddev->private; in raid5_store_stripe_cache_size() local
5360 if (!conf) in raid5_store_stripe_cache_size()
5379 struct r5conf *conf = mddev->private; in raid5_show_preread_threshold() local
5380 if (conf) in raid5_show_preread_threshold()
5381 return sprintf(page, "%d\n", conf->bypass_threshold); in raid5_show_preread_threshold()
5389 struct r5conf *conf = mddev->private; in raid5_store_preread_threshold() local
5393 if (!conf) in raid5_store_preread_threshold()
5398 if (new > conf->max_nr_stripes) in raid5_store_preread_threshold()
5400 conf->bypass_threshold = new; in raid5_store_preread_threshold()
5413 struct r5conf *conf = mddev->private; in raid5_show_skip_copy() local
5414 if (conf) in raid5_show_skip_copy()
5415 return sprintf(page, "%d\n", conf->skip_copy); in raid5_show_skip_copy()
5423 struct r5conf *conf = mddev->private; in raid5_store_skip_copy() local
5427 if (!conf) in raid5_store_skip_copy()
5433 if (new == conf->skip_copy) in raid5_store_skip_copy()
5437 conf->skip_copy = new; in raid5_store_skip_copy()
5456 struct r5conf *conf = mddev->private; in stripe_cache_active_show() local
5457 if (conf) in stripe_cache_active_show()
5458 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); in stripe_cache_active_show()
5469 struct r5conf *conf = mddev->private; in raid5_show_group_thread_cnt() local
5470 if (conf) in raid5_show_group_thread_cnt()
5471 return sprintf(page, "%d\n", conf->worker_cnt_per_group); in raid5_show_group_thread_cnt()
5476 static int alloc_thread_groups(struct r5conf *conf, int cnt,
5483 struct r5conf *conf = mddev->private; in raid5_store_group_thread_cnt() local
5491 if (!conf) in raid5_store_group_thread_cnt()
5497 if (new == conf->worker_cnt_per_group) in raid5_store_group_thread_cnt()
5502 old_groups = conf->worker_groups; in raid5_store_group_thread_cnt()
5506 err = alloc_thread_groups(conf, new, in raid5_store_group_thread_cnt()
5510 spin_lock_irq(&conf->device_lock); in raid5_store_group_thread_cnt()
5511 conf->group_cnt = group_cnt; in raid5_store_group_thread_cnt()
5512 conf->worker_cnt_per_group = worker_cnt_per_group; in raid5_store_group_thread_cnt()
5513 conf->worker_groups = new_groups; in raid5_store_group_thread_cnt()
5514 spin_unlock_irq(&conf->device_lock); in raid5_store_group_thread_cnt()
5546 static int alloc_thread_groups(struct r5conf *conf, int cnt, in alloc_thread_groups() argument
5577 group->conf = conf; in alloc_thread_groups()
5593 static void free_thread_groups(struct r5conf *conf) in free_thread_groups() argument
5595 if (conf->worker_groups) in free_thread_groups()
5596 kfree(conf->worker_groups[0].workers); in free_thread_groups()
5597 kfree(conf->worker_groups); in free_thread_groups()
5598 conf->worker_groups = NULL; in free_thread_groups()
5604 struct r5conf *conf = mddev->private; in raid5_size() local
5610 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); in raid5_size()
5614 return sectors * (raid_disks - conf->max_degraded); in raid5_size()
5617 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in free_scratch_buffer() argument
5625 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in alloc_scratch_buffer() argument
5627 if (conf->level == 6 && !percpu->spare_page) in alloc_scratch_buffer()
5630 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); in alloc_scratch_buffer()
5632 if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { in alloc_scratch_buffer()
5633 free_scratch_buffer(conf, percpu); in alloc_scratch_buffer()
5640 static void raid5_free_percpu(struct r5conf *conf) in raid5_free_percpu() argument
5644 if (!conf->percpu) in raid5_free_percpu()
5648 unregister_cpu_notifier(&conf->cpu_notify); in raid5_free_percpu()
5653 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid5_free_percpu()
5656 free_percpu(conf->percpu); in raid5_free_percpu()
5659 static void free_conf(struct r5conf *conf) in free_conf() argument
5661 free_thread_groups(conf); in free_conf()
5662 shrink_stripes(conf); in free_conf()
5663 raid5_free_percpu(conf); in free_conf()
5664 kfree(conf->disks); in free_conf()
5665 kfree(conf->stripe_hashtbl); in free_conf()
5666 kfree(conf); in free_conf()
5673 struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify); in raid456_cpu_notify() local
5675 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); in raid456_cpu_notify()
5680 if (alloc_scratch_buffer(conf, percpu)) { in raid456_cpu_notify()
5688 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid456_cpu_notify()
5697 static int raid5_alloc_percpu(struct r5conf *conf) in raid5_alloc_percpu() argument
5702 conf->percpu = alloc_percpu(struct raid5_percpu); in raid5_alloc_percpu()
5703 if (!conf->percpu) in raid5_alloc_percpu()
5707 conf->cpu_notify.notifier_call = raid456_cpu_notify; in raid5_alloc_percpu()
5708 conf->cpu_notify.priority = 0; in raid5_alloc_percpu()
5709 err = register_cpu_notifier(&conf->cpu_notify); in raid5_alloc_percpu()
5716 err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid5_alloc_percpu()
5730 struct r5conf *conf; in setup_conf() local
5768 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); in setup_conf()
5769 if (conf == NULL) in setup_conf()
5772 if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, in setup_conf()
5774 conf->group_cnt = group_cnt; in setup_conf()
5775 conf->worker_cnt_per_group = worker_cnt_per_group; in setup_conf()
5776 conf->worker_groups = new_group; in setup_conf()
5779 spin_lock_init(&conf->device_lock); in setup_conf()
5780 seqcount_init(&conf->gen_lock); in setup_conf()
5781 init_waitqueue_head(&conf->wait_for_stripe); in setup_conf()
5782 init_waitqueue_head(&conf->wait_for_overlap); in setup_conf()
5783 INIT_LIST_HEAD(&conf->handle_list); in setup_conf()
5784 INIT_LIST_HEAD(&conf->hold_list); in setup_conf()
5785 INIT_LIST_HEAD(&conf->delayed_list); in setup_conf()
5786 INIT_LIST_HEAD(&conf->bitmap_list); in setup_conf()
5787 init_llist_head(&conf->released_stripes); in setup_conf()
5788 atomic_set(&conf->active_stripes, 0); in setup_conf()
5789 atomic_set(&conf->preread_active_stripes, 0); in setup_conf()
5790 atomic_set(&conf->active_aligned_reads, 0); in setup_conf()
5791 conf->bypass_threshold = BYPASS_THRESHOLD; in setup_conf()
5792 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
5794 conf->raid_disks = mddev->raid_disks; in setup_conf()
5796 conf->previous_raid_disks = mddev->raid_disks; in setup_conf()
5798 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; in setup_conf()
5799 max_disks = max(conf->raid_disks, conf->previous_raid_disks); in setup_conf()
5800 conf->scribble_len = scribble_len(max_disks); in setup_conf()
5802 conf->disks = kzalloc(max_disks * sizeof(struct disk_info), in setup_conf()
5804 if (!conf->disks) in setup_conf()
5807 conf->mddev = mddev; in setup_conf()
5809 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) in setup_conf()
5817 spin_lock_init(conf->hash_locks); in setup_conf()
5819 spin_lock_init(conf->hash_locks + i); in setup_conf()
5822 INIT_LIST_HEAD(conf->inactive_list + i); in setup_conf()
5825 INIT_LIST_HEAD(conf->temp_inactive_list + i); in setup_conf()
5827 conf->level = mddev->new_level; in setup_conf()
5828 if (raid5_alloc_percpu(conf) != 0) in setup_conf()
5838 disk = conf->disks + raid_disk; in setup_conf()
5857 conf->fullsync = 1; in setup_conf()
5860 conf->chunk_sectors = mddev->new_chunk_sectors; in setup_conf()
5861 conf->level = mddev->new_level; in setup_conf()
5862 if (conf->level == 6) in setup_conf()
5863 conf->max_degraded = 2; in setup_conf()
5865 conf->max_degraded = 1; in setup_conf()
5866 conf->algorithm = mddev->new_layout; in setup_conf()
5867 conf->reshape_progress = mddev->reshape_position; in setup_conf()
5868 if (conf->reshape_progress != MaxSector) { in setup_conf()
5869 conf->prev_chunk_sectors = mddev->chunk_sectors; in setup_conf()
5870 conf->prev_algo = mddev->layout; in setup_conf()
5873 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + in setup_conf()
5875 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); in setup_conf()
5876 if (grow_stripes(conf, NR_STRIPES)) { in setup_conf()
5886 conf->thread = md_register_thread(raid5d, mddev, pers_name); in setup_conf()
5887 if (!conf->thread) { in setup_conf()
5894 return conf; in setup_conf()
5897 if (conf) { in setup_conf()
5898 free_conf(conf); in setup_conf()
5932 struct r5conf *conf; in run() local
6050 conf = setup_conf(mddev); in run()
6052 conf = mddev->private; in run()
6054 if (IS_ERR(conf)) in run()
6055 return PTR_ERR(conf); in run()
6057 conf->min_offset_diff = min_offset_diff; in run()
6058 mddev->thread = conf->thread; in run()
6059 conf->thread = NULL; in run()
6060 mddev->private = conf; in run()
6062 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; in run()
6064 rdev = conf->disks[i].rdev; in run()
6065 if (!rdev && conf->disks[i].replacement) { in run()
6067 rdev = conf->disks[i].replacement; in run()
6068 conf->disks[i].replacement = NULL; in run()
6070 conf->disks[i].rdev = rdev; in run()
6074 if (conf->disks[i].replacement && in run()
6075 conf->reshape_progress != MaxSector) { in run()
6101 conf->algorithm, in run()
6102 conf->raid_disks, in run()
6103 conf->max_degraded)) in run()
6107 conf->prev_algo, in run()
6108 conf->previous_raid_disks, in run()
6109 conf->max_degraded)) in run()
6117 mddev->degraded = calc_degraded(conf); in run()
6119 if (has_failed(conf)) { in run()
6122 mdname(mddev), mddev->degraded, conf->raid_disks); in run()
6147 " devices, algorithm %d\n", mdname(mddev), conf->level, in run()
6153 mdname(mddev), conf->level, in run()
6157 print_raid5_conf(conf); in run()
6159 if (conf->reshape_progress != MaxSector) { in run()
6160 conf->reshape_safe = conf->reshape_progress; in run()
6161 atomic_set(&conf->reshape_stripes, 0); in run()
6187 int data_disks = conf->previous_raid_disks - conf->max_degraded; in run()
6201 (conf->raid_disks - conf->max_degraded)); in run()
6277 print_raid5_conf(conf); in run()
6278 free_conf(conf); in run()
6286 struct r5conf *conf = mddev->private; in stop() local
6291 free_conf(conf); in stop()
6299 struct r5conf *conf = mddev->private; in status() local
6304 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); in status()
6305 for (i = 0; i < conf->raid_disks; i++) in status()
6307 conf->disks[i].rdev && in status()
6308 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); in status()
6312 static void print_raid5_conf (struct r5conf *conf) in print_raid5_conf() argument
6318 if (!conf) { in print_raid5_conf()
6322 printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level, in print_raid5_conf()
6323 conf->raid_disks, in print_raid5_conf()
6324 conf->raid_disks - conf->mddev->degraded); in print_raid5_conf()
6326 for (i = 0; i < conf->raid_disks; i++) { in print_raid5_conf()
6328 tmp = conf->disks + i; in print_raid5_conf()
6339 struct r5conf *conf = mddev->private; in raid5_spare_active() local
6344 for (i = 0; i < conf->raid_disks; i++) { in raid5_spare_active()
6345 tmp = conf->disks + i; in raid5_spare_active()
6372 spin_lock_irqsave(&conf->device_lock, flags); in raid5_spare_active()
6373 mddev->degraded = calc_degraded(conf); in raid5_spare_active()
6374 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_spare_active()
6375 print_raid5_conf(conf); in raid5_spare_active()
6381 struct r5conf *conf = mddev->private; in raid5_remove_disk() local
6385 struct disk_info *p = conf->disks + number; in raid5_remove_disk()
6387 print_raid5_conf(conf); in raid5_remove_disk()
6395 if (number >= conf->raid_disks && in raid5_remove_disk()
6396 conf->reshape_progress == MaxSector) in raid5_remove_disk()
6408 mddev->recovery_disabled != conf->recovery_disabled && in raid5_remove_disk()
6409 !has_failed(conf) && in raid5_remove_disk()
6411 number < conf->raid_disks) { in raid5_remove_disk()
6437 print_raid5_conf(conf); in raid5_remove_disk()
6443 struct r5conf *conf = mddev->private; in raid5_add_disk() local
6448 int last = conf->raid_disks - 1; in raid5_add_disk()
6450 if (mddev->recovery_disabled == conf->recovery_disabled) in raid5_add_disk()
6453 if (rdev->saved_raid_disk < 0 && has_failed(conf)) in raid5_add_disk()
6466 conf->disks[rdev->saved_raid_disk].rdev == NULL) in raid5_add_disk()
6470 p = conf->disks + disk; in raid5_add_disk()
6476 conf->fullsync = 1; in raid5_add_disk()
6482 p = conf->disks + disk; in raid5_add_disk()
6489 conf->fullsync = 1; in raid5_add_disk()
6495 print_raid5_conf(conf); in raid5_add_disk()
6542 struct r5conf *conf = mddev->private; in check_stripe_cache() local
6544 > conf->max_nr_stripes || in check_stripe_cache()
6546 > conf->max_nr_stripes) { in check_stripe_cache()
6558 struct r5conf *conf = mddev->private; in check_reshape() local
6564 if (has_failed(conf)) in check_reshape()
6582 return resize_stripes(conf, (conf->previous_raid_disks in check_reshape()
6588 struct r5conf *conf = mddev->private; in raid5_start_reshape() local
6599 if (has_failed(conf)) in raid5_start_reshape()
6608 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) in raid5_start_reshape()
6618 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) in raid5_start_reshape()
6625 atomic_set(&conf->reshape_stripes, 0); in raid5_start_reshape()
6626 spin_lock_irq(&conf->device_lock); in raid5_start_reshape()
6627 write_seqcount_begin(&conf->gen_lock); in raid5_start_reshape()
6628 conf->previous_raid_disks = conf->raid_disks; in raid5_start_reshape()
6629 conf->raid_disks += mddev->delta_disks; in raid5_start_reshape()
6630 conf->prev_chunk_sectors = conf->chunk_sectors; in raid5_start_reshape()
6631 conf->chunk_sectors = mddev->new_chunk_sectors; in raid5_start_reshape()
6632 conf->prev_algo = conf->algorithm; in raid5_start_reshape()
6633 conf->algorithm = mddev->new_layout; in raid5_start_reshape()
6634 conf->generation++; in raid5_start_reshape()
6640 conf->reshape_progress = raid5_size(mddev, 0, 0); in raid5_start_reshape()
6642 conf->reshape_progress = 0; in raid5_start_reshape()
6643 conf->reshape_safe = conf->reshape_progress; in raid5_start_reshape()
6644 write_seqcount_end(&conf->gen_lock); in raid5_start_reshape()
6645 spin_unlock_irq(&conf->device_lock); in raid5_start_reshape()
6667 >= conf->previous_raid_disks) in raid5_start_reshape()
6675 } else if (rdev->raid_disk >= conf->previous_raid_disks in raid5_start_reshape()
6685 spin_lock_irqsave(&conf->device_lock, flags); in raid5_start_reshape()
6686 mddev->degraded = calc_degraded(conf); in raid5_start_reshape()
6687 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_start_reshape()
6689 mddev->raid_disks = conf->raid_disks; in raid5_start_reshape()
6690 mddev->reshape_position = conf->reshape_progress; in raid5_start_reshape()
6701 spin_lock_irq(&conf->device_lock); in raid5_start_reshape()
6702 write_seqcount_begin(&conf->gen_lock); in raid5_start_reshape()
6703 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; in raid5_start_reshape()
6705 conf->chunk_sectors = conf->prev_chunk_sectors; in raid5_start_reshape()
6706 mddev->new_layout = conf->algorithm = conf->prev_algo; in raid5_start_reshape()
6710 conf->generation --; in raid5_start_reshape()
6711 conf->reshape_progress = MaxSector; in raid5_start_reshape()
6713 write_seqcount_end(&conf->gen_lock); in raid5_start_reshape()
6714 spin_unlock_irq(&conf->device_lock); in raid5_start_reshape()
6717 conf->reshape_checkpoint = jiffies; in raid5_start_reshape()
6726 static void end_reshape(struct r5conf *conf) in end_reshape() argument
6729 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in end_reshape()
6731 spin_lock_irq(&conf->device_lock); in end_reshape()
6732 conf->previous_raid_disks = conf->raid_disks; in end_reshape()
6733 md_finish_reshape(conf->mddev); in end_reshape()
6735 conf->reshape_progress = MaxSector; in end_reshape()
6736 spin_unlock_irq(&conf->device_lock); in end_reshape()
6737 wake_up(&conf->wait_for_overlap); in end_reshape()
6742 if (conf->mddev->queue) { in end_reshape()
6743 int data_disks = conf->raid_disks - conf->max_degraded; in end_reshape()
6744 int stripe = data_disks * ((conf->chunk_sectors << 9) in end_reshape()
6746 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) in end_reshape()
6747 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; in end_reshape()
6757 struct r5conf *conf = mddev->private; in raid5_finish_reshape() local
6767 spin_lock_irq(&conf->device_lock); in raid5_finish_reshape()
6768 mddev->degraded = calc_degraded(conf); in raid5_finish_reshape()
6769 spin_unlock_irq(&conf->device_lock); in raid5_finish_reshape()
6770 for (d = conf->raid_disks ; in raid5_finish_reshape()
6771 d < conf->raid_disks - mddev->delta_disks; in raid5_finish_reshape()
6773 struct md_rdev *rdev = conf->disks[d].rdev; in raid5_finish_reshape()
6776 rdev = conf->disks[d].replacement; in raid5_finish_reshape()
6781 mddev->layout = conf->algorithm; in raid5_finish_reshape()
6782 mddev->chunk_sectors = conf->chunk_sectors; in raid5_finish_reshape()
6791 struct r5conf *conf = mddev->private; in raid5_quiesce() local
6795 wake_up(&conf->wait_for_overlap); in raid5_quiesce()
6799 lock_all_device_hash_locks_irq(conf); in raid5_quiesce()
6803 conf->quiesce = 2; in raid5_quiesce()
6804 wait_event_cmd(conf->wait_for_stripe, in raid5_quiesce()
6805 atomic_read(&conf->active_stripes) == 0 && in raid5_quiesce()
6806 atomic_read(&conf->active_aligned_reads) == 0, in raid5_quiesce()
6807 unlock_all_device_hash_locks_irq(conf), in raid5_quiesce()
6808 lock_all_device_hash_locks_irq(conf)); in raid5_quiesce()
6809 conf->quiesce = 1; in raid5_quiesce()
6810 unlock_all_device_hash_locks_irq(conf); in raid5_quiesce()
6812 wake_up(&conf->wait_for_overlap); in raid5_quiesce()
6816 lock_all_device_hash_locks_irq(conf); in raid5_quiesce()
6817 conf->quiesce = 0; in raid5_quiesce()
6818 wake_up(&conf->wait_for_stripe); in raid5_quiesce()
6819 wake_up(&conf->wait_for_overlap); in raid5_quiesce()
6820 unlock_all_device_hash_locks_irq(conf); in raid5_quiesce()
6918 struct r5conf *conf = mddev->private; in raid5_check_reshape() local
6938 conf->algorithm = mddev->new_layout; in raid5_check_reshape()
6942 conf->chunk_sectors = new_chunk ; in raid5_check_reshape()