Lines Matching full:conf
46 static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
47 static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
237 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio) in put_all_bios() argument
241 for (i = 0; i < conf->raid_disks * 2; i++) { in put_all_bios()
251 struct r1conf *conf = r1_bio->mddev->private; in free_r1bio() local
253 put_all_bios(conf, r1_bio); in free_r1bio()
254 mempool_free(r1_bio, &conf->r1bio_pool); in free_r1bio()
259 struct r1conf *conf = r1_bio->mddev->private; in put_buf() local
263 for (i = 0; i < conf->raid_disks * 2; i++) { in put_buf()
266 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); in put_buf()
269 mempool_free(r1_bio, &conf->r1buf_pool); in put_buf()
271 lower_barrier(conf, sect); in put_buf()
278 struct r1conf *conf = mddev->private; in reschedule_retry() local
282 spin_lock_irqsave(&conf->device_lock, flags); in reschedule_retry()
283 list_add(&r1_bio->retry_list, &conf->retry_list); in reschedule_retry()
284 atomic_inc(&conf->nr_queued[idx]); in reschedule_retry()
285 spin_unlock_irqrestore(&conf->device_lock, flags); in reschedule_retry()
287 wake_up(&conf->wait_barrier); in reschedule_retry()
309 struct r1conf *conf = r1_bio->mddev->private; in raid_end_bio_io() local
324 allow_barrier(conf, r1_bio->sector); in raid_end_bio_io()
334 struct r1conf *conf = r1_bio->mddev->private; in update_head_pos() local
336 conf->mirrors[disk].head_position = in update_head_pos()
346 struct r1conf *conf = r1_bio->mddev->private; in find_bio_disk() local
347 int raid_disks = conf->raid_disks; in find_bio_disk()
363 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_read_request() local
364 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; in raid1_end_read_request()
384 spin_lock_irqsave(&conf->device_lock, flags); in raid1_end_read_request()
385 if (r1_bio->mddev->degraded == conf->raid_disks || in raid1_end_read_request()
386 (r1_bio->mddev->degraded == conf->raid_disks-1 && in raid1_end_read_request()
389 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_end_read_request()
394 rdev_dec_pending(rdev, conf->mddev); in raid1_end_read_request()
401 mdname(conf->mddev), in raid1_end_read_request()
446 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_write_request() local
449 struct md_rdev *rdev = conf->mirrors[mirror].rdev; in raid1_end_write_request()
463 conf->mddev->recovery); in raid1_end_write_request()
549 rdev_dec_pending(rdev, conf->mddev); in raid1_end_write_request()
594 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) in read_balance() argument
626 if ((conf->mddev->recovery_cp < this_sector + sectors) || in read_balance()
627 (mddev_is_clustered(conf->mddev) && in read_balance()
628 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, in read_balance()
634 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { in read_balance()
641 rdev = rcu_dereference(conf->mirrors[disk].rdev); in read_balance()
708 dist = abs(this_sector - conf->mirrors[disk].head_position); in read_balance()
714 if (conf->mirrors[disk].next_seq_sect == this_sector in read_balance()
717 struct raid1_info *mirror = &conf->mirrors[disk]; in read_balance()
772 rdev = rcu_dereference(conf->mirrors[best_disk].rdev); in read_balance()
778 if (conf->mirrors[best_disk].next_seq_sect != this_sector) in read_balance()
779 conf->mirrors[best_disk].seq_start = this_sector; in read_balance()
781 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; in read_balance()
789 static void flush_bio_list(struct r1conf *conf, struct bio *bio) in flush_bio_list() argument
792 md_bitmap_unplug(conf->mddev->bitmap); in flush_bio_list()
793 wake_up(&conf->wait_barrier); in flush_bio_list()
813 static void flush_pending_writes(struct r1conf *conf) in flush_pending_writes() argument
818 spin_lock_irq(&conf->device_lock); in flush_pending_writes()
820 if (conf->pending_bio_list.head) { in flush_pending_writes()
824 bio = bio_list_get(&conf->pending_bio_list); in flush_pending_writes()
825 conf->pending_count = 0; in flush_pending_writes()
826 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
839 flush_bio_list(conf, bio); in flush_pending_writes()
842 spin_unlock_irq(&conf->device_lock); in flush_pending_writes()
869 static int raise_barrier(struct r1conf *conf, sector_t sector_nr) in raise_barrier() argument
873 spin_lock_irq(&conf->resync_lock); in raise_barrier()
876 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
877 !atomic_read(&conf->nr_waiting[idx]), in raise_barrier()
878 conf->resync_lock); in raise_barrier()
881 atomic_inc(&conf->barrier[idx]); in raise_barrier()
883 * In raise_barrier() we firstly increase conf->barrier[idx] then in raise_barrier()
884 * check conf->nr_pending[idx]. In _wait_barrier() we firstly in raise_barrier()
885 * increase conf->nr_pending[idx] then check conf->barrier[idx]. in raise_barrier()
886 * A memory barrier here to make sure conf->nr_pending[idx] won't in raise_barrier()
887 * be fetched before conf->barrier[idx] is increased. Otherwise in raise_barrier()
894 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O in raise_barrier()
896 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches in raise_barrier()
899 wait_event_lock_irq(conf->wait_barrier, in raise_barrier()
900 (!conf->array_frozen && in raise_barrier()
901 !atomic_read(&conf->nr_pending[idx]) && in raise_barrier()
902 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) || in raise_barrier()
903 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery), in raise_barrier()
904 conf->resync_lock); in raise_barrier()
906 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in raise_barrier()
907 atomic_dec(&conf->barrier[idx]); in raise_barrier()
908 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
909 wake_up(&conf->wait_barrier); in raise_barrier()
913 atomic_inc(&conf->nr_sync_pending); in raise_barrier()
914 spin_unlock_irq(&conf->resync_lock); in raise_barrier()
919 static void lower_barrier(struct r1conf *conf, sector_t sector_nr) in lower_barrier() argument
923 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0); in lower_barrier()
925 atomic_dec(&conf->barrier[idx]); in lower_barrier()
926 atomic_dec(&conf->nr_sync_pending); in lower_barrier()
927 wake_up(&conf->wait_barrier); in lower_barrier()
930 static void _wait_barrier(struct r1conf *conf, int idx) in _wait_barrier() argument
933 * We need to increase conf->nr_pending[idx] very early here, in _wait_barrier()
935 * conf->nr_pending[idx] to be 0. Then we can avoid holding in _wait_barrier()
936 * conf->resync_lock when there is no barrier raised in same in _wait_barrier()
940 atomic_inc(&conf->nr_pending[idx]); in _wait_barrier()
942 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then in _wait_barrier()
943 * check conf->barrier[idx]. In raise_barrier() we firstly increase in _wait_barrier()
944 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory in _wait_barrier()
945 * barrier is necessary here to make sure conf->barrier[idx] won't be in _wait_barrier()
946 * fetched before conf->nr_pending[idx] is increased. Otherwise there in _wait_barrier()
953 * here. If during we check conf->barrier[idx], the array is in _wait_barrier()
954 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is in _wait_barrier()
960 if (!READ_ONCE(conf->array_frozen) && in _wait_barrier()
961 !atomic_read(&conf->barrier[idx])) in _wait_barrier()
965 * After holding conf->resync_lock, conf->nr_pending[idx] in _wait_barrier()
968 * raise_barrer() might be waiting for conf->nr_pending[idx] in _wait_barrier()
971 spin_lock_irq(&conf->resync_lock); in _wait_barrier()
972 atomic_inc(&conf->nr_waiting[idx]); in _wait_barrier()
973 atomic_dec(&conf->nr_pending[idx]); in _wait_barrier()
978 wake_up(&conf->wait_barrier); in _wait_barrier()
980 wait_event_lock_irq(conf->wait_barrier, in _wait_barrier()
981 !conf->array_frozen && in _wait_barrier()
982 !atomic_read(&conf->barrier[idx]), in _wait_barrier()
983 conf->resync_lock); in _wait_barrier()
984 atomic_inc(&conf->nr_pending[idx]); in _wait_barrier()
985 atomic_dec(&conf->nr_waiting[idx]); in _wait_barrier()
986 spin_unlock_irq(&conf->resync_lock); in _wait_barrier()
989 static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr) in wait_read_barrier() argument
998 * conf->barrier[idx] here, memory barrier is unnecessary as well. in wait_read_barrier()
1000 atomic_inc(&conf->nr_pending[idx]); in wait_read_barrier()
1002 if (!READ_ONCE(conf->array_frozen)) in wait_read_barrier()
1005 spin_lock_irq(&conf->resync_lock); in wait_read_barrier()
1006 atomic_inc(&conf->nr_waiting[idx]); in wait_read_barrier()
1007 atomic_dec(&conf->nr_pending[idx]); in wait_read_barrier()
1012 wake_up(&conf->wait_barrier); in wait_read_barrier()
1014 wait_event_lock_irq(conf->wait_barrier, in wait_read_barrier()
1015 !conf->array_frozen, in wait_read_barrier()
1016 conf->resync_lock); in wait_read_barrier()
1017 atomic_inc(&conf->nr_pending[idx]); in wait_read_barrier()
1018 atomic_dec(&conf->nr_waiting[idx]); in wait_read_barrier()
1019 spin_unlock_irq(&conf->resync_lock); in wait_read_barrier()
1022 static void wait_barrier(struct r1conf *conf, sector_t sector_nr) in wait_barrier() argument
1026 _wait_barrier(conf, idx); in wait_barrier()
1029 static void _allow_barrier(struct r1conf *conf, int idx) in _allow_barrier() argument
1031 atomic_dec(&conf->nr_pending[idx]); in _allow_barrier()
1032 wake_up(&conf->wait_barrier); in _allow_barrier()
1035 static void allow_barrier(struct r1conf *conf, sector_t sector_nr) in allow_barrier() argument
1039 _allow_barrier(conf, idx); in allow_barrier()
1042 /* conf->resync_lock should be held */
1043 static int get_unqueued_pending(struct r1conf *conf) in get_unqueued_pending() argument
1047 ret = atomic_read(&conf->nr_sync_pending); in get_unqueued_pending()
1049 ret += atomic_read(&conf->nr_pending[idx]) - in get_unqueued_pending()
1050 atomic_read(&conf->nr_queued[idx]); in get_unqueued_pending()
1055 static void freeze_array(struct r1conf *conf, int extra) in freeze_array() argument
1068 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the in freeze_array()
1070 * normal I/O are queued, sum of all conf->nr_pending[] will match sum in freeze_array()
1071 * of all conf->nr_queued[]. But normal I/O failure is an exception, in freeze_array()
1077 * get_unqueued_pendings(conf) gets equal to extra. For in freeze_array()
1080 spin_lock_irq(&conf->resync_lock); in freeze_array()
1081 conf->array_frozen = 1; in freeze_array()
1082 raid1_log(conf->mddev, "wait freeze"); in freeze_array()
1084 conf->wait_barrier, in freeze_array()
1085 get_unqueued_pending(conf) == extra, in freeze_array()
1086 conf->resync_lock, in freeze_array()
1087 flush_pending_writes(conf)); in freeze_array()
1088 spin_unlock_irq(&conf->resync_lock); in freeze_array()
1090 static void unfreeze_array(struct r1conf *conf) in unfreeze_array() argument
1093 spin_lock_irq(&conf->resync_lock); in unfreeze_array()
1094 conf->array_frozen = 0; in unfreeze_array()
1095 spin_unlock_irq(&conf->resync_lock); in unfreeze_array()
1096 wake_up(&conf->wait_barrier); in unfreeze_array()
1158 struct r1conf *conf = mddev->private; in raid1_unplug() local
1162 spin_lock_irq(&conf->device_lock); in raid1_unplug()
1163 bio_list_merge(&conf->pending_bio_list, &plug->pending); in raid1_unplug()
1164 conf->pending_count += plug->pending_cnt; in raid1_unplug()
1165 spin_unlock_irq(&conf->device_lock); in raid1_unplug()
1166 wake_up(&conf->wait_barrier); in raid1_unplug()
1174 flush_bio_list(conf, bio); in raid1_unplug()
1190 struct r1conf *conf = mddev->private; in alloc_r1bio() local
1193 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO); in alloc_r1bio()
1195 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0])); in alloc_r1bio()
1203 struct r1conf *conf = mddev->private; in raid1_read_request() local
1225 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev); in raid1_read_request()
1237 wait_read_barrier(conf, bio->bi_iter.bi_sector); in raid1_read_request()
1249 rdisk = read_balance(conf, r1_bio, &max_sectors); in raid1_read_request()
1262 mirror = conf->mirrors + rdisk; in raid1_read_request()
1283 gfp, &conf->bio_split); in raid1_read_request()
1317 struct r1conf *conf = mddev->private; in raid1_write_request() local
1334 prepare_to_wait(&conf->wait_barrier, in raid1_write_request()
1342 finish_wait(&conf->wait_barrier, &w); in raid1_write_request()
1350 wait_barrier(conf, bio->bi_iter.bi_sector); in raid1_write_request()
1355 if (conf->pending_count >= max_queued_requests) { in raid1_write_request()
1358 wait_event(conf->wait_barrier, in raid1_write_request()
1359 conf->pending_count < max_queued_requests); in raid1_write_request()
1372 disks = conf->raid_disks * 2; in raid1_write_request()
1378 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in raid1_write_request()
1386 if (i < conf->raid_disks) in raid1_write_request()
1443 rdev_dec_pending(conf->mirrors[j].rdev, mddev); in raid1_write_request()
1445 allow_barrier(conf, bio->bi_iter.bi_sector); in raid1_write_request()
1448 wait_barrier(conf, bio->bi_iter.bi_sector); in raid1_write_request()
1454 GFP_NOIO, &conf->bio_split); in raid1_write_request()
1469 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_write_request()
1507 conf->mirrors[i].rdev->data_offset); in raid1_write_request()
1508 bio_set_dev(mbio, conf->mirrors[i].rdev->bdev); in raid1_write_request()
1511 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && in raid1_write_request()
1512 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) && in raid1_write_request()
1513 conf->raid_disks - mddev->degraded > 1) in raid1_write_request()
1524 mbio->bi_disk = (void *)conf->mirrors[i].rdev; in raid1_write_request()
1535 spin_lock_irqsave(&conf->device_lock, flags); in raid1_write_request()
1536 bio_list_add(&conf->pending_bio_list, mbio); in raid1_write_request()
1537 conf->pending_count++; in raid1_write_request()
1538 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_write_request()
1546 wake_up(&conf->wait_barrier); in raid1_write_request()
1579 struct r1conf *conf = mddev->private; in raid1_status() local
1582 seq_printf(seq, " [%d/%d] [", conf->raid_disks, in raid1_status()
1583 conf->raid_disks - mddev->degraded); in raid1_status()
1585 for (i = 0; i < conf->raid_disks; i++) { in raid1_status()
1586 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in raid1_status()
1597 struct r1conf *conf = mddev->private; in raid1_error() local
1606 spin_lock_irqsave(&conf->device_lock, flags); in raid1_error()
1608 && (conf->raid_disks - mddev->degraded) == 1) { in raid1_error()
1615 conf->recovery_disabled = mddev->recovery_disabled; in raid1_error()
1616 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_error()
1623 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_error()
1633 mdname(mddev), conf->raid_disks - mddev->degraded); in raid1_error()
1636 static void print_conf(struct r1conf *conf) in print_conf() argument
1640 pr_debug("RAID1 conf printout:\n"); in print_conf()
1641 if (!conf) { in print_conf()
1642 pr_debug("(!conf)\n"); in print_conf()
1645 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, in print_conf()
1646 conf->raid_disks); in print_conf()
1649 for (i = 0; i < conf->raid_disks; i++) { in print_conf()
1651 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); in print_conf()
1661 static void close_sync(struct r1conf *conf) in close_sync() argument
1666 _wait_barrier(conf, idx); in close_sync()
1667 _allow_barrier(conf, idx); in close_sync()
1670 mempool_exit(&conf->r1buf_pool); in close_sync()
1676 struct r1conf *conf = mddev->private; in raid1_spare_active() local
1687 spin_lock_irqsave(&conf->device_lock, flags); in raid1_spare_active()
1688 for (i = 0; i < conf->raid_disks; i++) { in raid1_spare_active()
1689 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_spare_active()
1690 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; in raid1_spare_active()
1719 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_spare_active()
1721 print_conf(conf); in raid1_spare_active()
1727 struct r1conf *conf = mddev->private; in raid1_add_disk() local
1732 int last = conf->raid_disks - 1; in raid1_add_disk()
1734 if (mddev->recovery_disabled == conf->recovery_disabled) in raid1_add_disk()
1749 rdev->saved_raid_disk < conf->raid_disks && in raid1_add_disk()
1750 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) in raid1_add_disk()
1754 p = conf->mirrors + mirror; in raid1_add_disk()
1767 conf->fullsync = 1; in raid1_add_disk()
1772 p[conf->raid_disks].rdev == NULL) { in raid1_add_disk()
1778 conf->fullsync = 1; in raid1_add_disk()
1779 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev); in raid1_add_disk()
1785 print_conf(conf); in raid1_add_disk()
1791 struct r1conf *conf = mddev->private; in raid1_remove_disk() local
1794 struct raid1_info *p = conf->mirrors + number; in raid1_remove_disk()
1796 if (unlikely(number >= conf->raid_disks)) in raid1_remove_disk()
1800 p = conf->mirrors + conf->raid_disks + number; in raid1_remove_disk()
1802 print_conf(conf); in raid1_remove_disk()
1813 mddev->recovery_disabled != conf->recovery_disabled && in raid1_remove_disk()
1814 mddev->degraded < conf->raid_disks) { in raid1_remove_disk()
1828 if (conf->mirrors[conf->raid_disks + number].rdev) { in raid1_remove_disk()
1834 conf->mirrors[conf->raid_disks + number].rdev; in raid1_remove_disk()
1835 freeze_array(conf, 0); in raid1_remove_disk()
1844 unfreeze_array(conf); in raid1_remove_disk()
1849 conf->mirrors[conf->raid_disks + number].rdev = NULL; in raid1_remove_disk()
1850 unfreeze_array(conf); in raid1_remove_disk()
1858 print_conf(conf); in raid1_remove_disk()
1915 struct r1conf *conf = mddev->private; in end_sync_write() local
1918 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; in end_sync_write()
1929 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, in end_sync_write()
1972 struct r1conf *conf = mddev->private; in fix_sync_read_error() local
1980 rdev = conf->mirrors[r1_bio->read_disk].rdev; in fix_sync_read_error()
2006 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2015 if (d == conf->raid_disks * 2) in fix_sync_read_error()
2030 for (d = 0; d < conf->raid_disks * 2; d++) { in fix_sync_read_error()
2031 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2038 conf->recovery_disabled = in fix_sync_read_error()
2056 d = conf->raid_disks * 2; in fix_sync_read_error()
2060 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2071 d = conf->raid_disks * 2; in fix_sync_read_error()
2075 rdev = conf->mirrors[d].rdev; in fix_sync_read_error()
2100 struct r1conf *conf = mddev->private; in process_checks() local
2107 for (i = 0; i < conf->raid_disks * 2; i++) { in process_checks()
2118 conf->mirrors[i].rdev->data_offset; in process_checks()
2119 bio_set_dev(b, conf->mirrors[i].rdev->bdev); in process_checks()
2127 for (primary = 0; primary < conf->raid_disks * 2; primary++) in process_checks()
2131 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); in process_checks()
2135 for (i = 0; i < conf->raid_disks * 2; i++) { in process_checks()
2169 rdev_dec_pending(conf->mirrors[i].rdev, mddev); in process_checks()
2179 struct r1conf *conf = mddev->private; in sync_request_write() local
2181 int disks = conf->raid_disks * 2; in sync_request_write()
2203 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { in sync_request_write()
2209 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) in sync_request_write()
2214 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); in sync_request_write()
2230 static void fix_read_error(struct r1conf *conf, int read_disk, in fix_read_error() argument
2233 struct mddev *mddev = conf->mddev; in fix_read_error()
2249 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2259 conf->tmppage, REQ_OP_READ, 0, false)) in fix_read_error()
2267 if (d == conf->raid_disks * 2) in fix_read_error()
2273 struct md_rdev *rdev = conf->mirrors[read_disk].rdev; in fix_read_error()
2282 d = conf->raid_disks * 2; in fix_read_error()
2285 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2291 conf->tmppage, WRITE); in fix_read_error()
2300 d = conf->raid_disks * 2; in fix_read_error()
2303 rdev = rcu_dereference(conf->mirrors[d].rdev); in fix_read_error()
2309 conf->tmppage, READ)) { in fix_read_error()
2329 struct r1conf *conf = mddev->private; in narrow_write_error() local
2330 struct md_rdev *rdev = conf->mirrors[i].rdev; in narrow_write_error()
2396 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_sync_write_finished() argument
2400 for (m = 0; m < conf->raid_disks * 2 ; m++) { in handle_sync_write_finished()
2401 struct md_rdev *rdev = conf->mirrors[m].rdev; in handle_sync_write_finished()
2412 md_error(conf->mddev, rdev); in handle_sync_write_finished()
2416 md_done_sync(conf->mddev, s, 1); in handle_sync_write_finished()
2419 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_write_finished() argument
2424 for (m = 0; m < conf->raid_disks * 2 ; m++) in handle_write_finished()
2426 struct md_rdev *rdev = conf->mirrors[m].rdev; in handle_write_finished()
2430 rdev_dec_pending(rdev, conf->mddev); in handle_write_finished()
2438 md_error(conf->mddev, in handle_write_finished()
2439 conf->mirrors[m].rdev); in handle_write_finished()
2443 rdev_dec_pending(conf->mirrors[m].rdev, in handle_write_finished()
2444 conf->mddev); in handle_write_finished()
2447 spin_lock_irq(&conf->device_lock); in handle_write_finished()
2448 list_add(&r1_bio->retry_list, &conf->bio_end_io_list); in handle_write_finished()
2450 atomic_inc(&conf->nr_queued[idx]); in handle_write_finished()
2451 spin_unlock_irq(&conf->device_lock); in handle_write_finished()
2456 wake_up(&conf->wait_barrier); in handle_write_finished()
2457 md_wakeup_thread(conf->mddev->thread); in handle_write_finished()
2465 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) in handle_read_error() argument
2467 struct mddev *mddev = conf->mddev; in handle_read_error()
2485 rdev = conf->mirrors[r1_bio->read_disk].rdev; in handle_read_error()
2488 freeze_array(conf, 1); in handle_read_error()
2489 fix_read_error(conf, r1_bio->read_disk, in handle_read_error()
2491 unfreeze_array(conf); in handle_read_error()
2498 rdev_dec_pending(rdev, conf->mddev); in handle_read_error()
2499 allow_barrier(conf, r1_bio->sector); in handle_read_error()
2512 struct r1conf *conf = mddev->private; in raid1d() local
2513 struct list_head *head = &conf->retry_list; in raid1d()
2519 if (!list_empty_careful(&conf->bio_end_io_list) && in raid1d()
2522 spin_lock_irqsave(&conf->device_lock, flags); in raid1d()
2524 list_splice_init(&conf->bio_end_io_list, &tmp); in raid1d()
2525 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2531 atomic_dec(&conf->nr_queued[idx]); in raid1d()
2543 flush_pending_writes(conf); in raid1d()
2545 spin_lock_irqsave(&conf->device_lock, flags); in raid1d()
2547 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2553 atomic_dec(&conf->nr_queued[idx]); in raid1d()
2554 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1d()
2557 conf = mddev->private; in raid1d()
2561 handle_sync_write_finished(conf, r1_bio); in raid1d()
2566 handle_write_finished(conf, r1_bio); in raid1d()
2568 handle_read_error(conf, r1_bio); in raid1d()
2579 static int init_resync(struct r1conf *conf) in init_resync() argument
2584 BUG_ON(mempool_initialized(&conf->r1buf_pool)); in init_resync()
2586 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc, in init_resync()
2587 r1buf_pool_free, conf->poolinfo); in init_resync()
2590 static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf) in raid1_alloc_init_r1buf() argument
2592 struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO); in raid1_alloc_init_r1buf()
2597 for (i = conf->poolinfo->raid_disks; i--; ) { in raid1_alloc_init_r1buf()
2620 struct r1conf *conf = mddev->private; in raid1_sync_request() local
2635 if (!mempool_initialized(&conf->r1buf_pool)) in raid1_sync_request()
2636 if (init_resync(conf)) in raid1_sync_request()
2650 conf->fullsync = 0; in raid1_sync_request()
2653 close_sync(conf); in raid1_sync_request()
2656 conf->cluster_sync_low = 0; in raid1_sync_request()
2657 conf->cluster_sync_high = 0; in raid1_sync_request()
2665 conf->fullsync == 0) { in raid1_sync_request()
2673 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in raid1_sync_request()
2683 if (atomic_read(&conf->nr_waiting[idx])) in raid1_sync_request()
2691 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); in raid1_sync_request()
2694 if (raise_barrier(conf, sector_nr)) in raid1_sync_request()
2697 r1_bio = raid1_alloc_init_r1buf(conf); in raid1_sync_request()
2716 for (i = 0; i < conf->raid_disks * 2; i++) { in raid1_sync_request()
2720 rdev = rcu_dereference(conf->mirrors[i].rdev); in raid1_sync_request()
2723 if (i < conf->raid_disks) in raid1_sync_request()
2788 for (i = 0 ; i < conf->raid_disks * 2 ; i++) in raid1_sync_request()
2790 struct md_rdev *rdev = conf->mirrors[i].rdev; in raid1_sync_request()
2805 conf->recovery_disabled = mddev->recovery_disabled; in raid1_sync_request()
2851 !conf->fullsync && in raid1_sync_request()
2858 for (i = 0 ; i < conf->raid_disks * 2; i++) { in raid1_sync_request()
2881 conf->cluster_sync_high < sector_nr + nr_sectors) { in raid1_sync_request()
2882 conf->cluster_sync_low = mddev->curr_resync_completed; in raid1_sync_request()
2883 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS; in raid1_sync_request()
2886 conf->cluster_sync_low, in raid1_sync_request()
2887 conf->cluster_sync_high); in raid1_sync_request()
2895 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) { in raid1_sync_request()
2926 struct r1conf *conf; in setup_conf() local
2932 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL); in setup_conf()
2933 if (!conf) in setup_conf()
2936 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
2938 if (!conf->nr_pending) in setup_conf()
2941 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
2943 if (!conf->nr_waiting) in setup_conf()
2946 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
2948 if (!conf->nr_queued) in setup_conf()
2951 conf->barrier = kcalloc(BARRIER_BUCKETS_NR, in setup_conf()
2953 if (!conf->barrier) in setup_conf()
2956 conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info), in setup_conf()
2959 if (!conf->mirrors) in setup_conf()
2962 conf->tmppage = alloc_page(GFP_KERNEL); in setup_conf()
2963 if (!conf->tmppage) in setup_conf()
2966 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); in setup_conf()
2967 if (!conf->poolinfo) in setup_conf()
2969 conf->poolinfo->raid_disks = mddev->raid_disks * 2; in setup_conf()
2970 err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc, in setup_conf()
2971 rbio_pool_free, conf->poolinfo); in setup_conf()
2975 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); in setup_conf()
2979 conf->poolinfo->mddev = mddev; in setup_conf()
2982 spin_lock_init(&conf->device_lock); in setup_conf()
2989 disk = conf->mirrors + mddev->raid_disks + disk_idx; in setup_conf()
2991 disk = conf->mirrors + disk_idx; in setup_conf()
2999 conf->raid_disks = mddev->raid_disks; in setup_conf()
3000 conf->mddev = mddev; in setup_conf()
3001 INIT_LIST_HEAD(&conf->retry_list); in setup_conf()
3002 INIT_LIST_HEAD(&conf->bio_end_io_list); in setup_conf()
3004 spin_lock_init(&conf->resync_lock); in setup_conf()
3005 init_waitqueue_head(&conf->wait_barrier); in setup_conf()
3007 bio_list_init(&conf->pending_bio_list); in setup_conf()
3008 conf->pending_count = 0; in setup_conf()
3009 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
3012 for (i = 0; i < conf->raid_disks * 2; i++) { in setup_conf()
3014 disk = conf->mirrors + i; in setup_conf()
3016 if (i < conf->raid_disks && in setup_conf()
3017 disk[conf->raid_disks].rdev) { in setup_conf()
3024 disk[conf->raid_disks].rdev; in setup_conf()
3025 disk[conf->raid_disks].rdev = NULL; in setup_conf()
3036 conf->fullsync = 1; in setup_conf()
3041 conf->thread = md_register_thread(raid1d, mddev, "raid1"); in setup_conf()
3042 if (!conf->thread) in setup_conf()
3045 return conf; in setup_conf()
3048 if (conf) { in setup_conf()
3049 mempool_exit(&conf->r1bio_pool); in setup_conf()
3050 kfree(conf->mirrors); in setup_conf()
3051 safe_put_page(conf->tmppage); in setup_conf()
3052 kfree(conf->poolinfo); in setup_conf()
3053 kfree(conf->nr_pending); in setup_conf()
3054 kfree(conf->nr_waiting); in setup_conf()
3055 kfree(conf->nr_queued); in setup_conf()
3056 kfree(conf->barrier); in setup_conf()
3057 bioset_exit(&conf->bio_split); in setup_conf()
3058 kfree(conf); in setup_conf()
3066 struct r1conf *conf; in raid1_run() local
3090 conf = setup_conf(mddev); in raid1_run()
3092 conf = mddev->private; in raid1_run()
3094 if (IS_ERR(conf)) in raid1_run()
3095 return PTR_ERR(conf); in raid1_run()
3112 for (i = 0; i < conf->raid_disks; i++) in raid1_run()
3113 if (conf->mirrors[i].rdev == NULL || in raid1_run()
3114 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || in raid1_run()
3115 test_bit(Faulty, &conf->mirrors[i].rdev->flags)) in raid1_run()
3120 if (conf->raid_disks - mddev->degraded < 1) { in raid1_run()
3121 md_unregister_thread(&conf->thread); in raid1_run()
3126 if (conf->raid_disks - mddev->degraded == 1) in raid1_run()
3139 mddev->thread = conf->thread; in raid1_run()
3140 conf->thread = NULL; in raid1_run()
3141 mddev->private = conf; in raid1_run()
3163 raid1_free(mddev, conf); in raid1_run()
3169 struct r1conf *conf = priv; in raid1_free() local
3171 mempool_exit(&conf->r1bio_pool); in raid1_free()
3172 kfree(conf->mirrors); in raid1_free()
3173 safe_put_page(conf->tmppage); in raid1_free()
3174 kfree(conf->poolinfo); in raid1_free()
3175 kfree(conf->nr_pending); in raid1_free()
3176 kfree(conf->nr_waiting); in raid1_free()
3177 kfree(conf->nr_queued); in raid1_free()
3178 kfree(conf->barrier); in raid1_free()
3179 bioset_exit(&conf->bio_split); in raid1_free()
3180 kfree(conf); in raid1_free()
3216 * 2/ resize conf->mirrors in raid1_reshape()
3220 * Then resize conf->mirrors and swap in the new r1bio pool. in raid1_reshape()
3228 struct r1conf *conf = mddev->private; in raid1_reshape() local
3252 if (raid_disks < conf->raid_disks) { in raid1_reshape()
3254 for (d= 0; d < conf->raid_disks; d++) in raid1_reshape()
3255 if (conf->mirrors[d].rdev) in raid1_reshape()
3282 freeze_array(conf, 0); in raid1_reshape()
3285 oldpool = conf->r1bio_pool; in raid1_reshape()
3286 conf->r1bio_pool = newpool; in raid1_reshape()
3288 for (d = d2 = 0; d < conf->raid_disks; d++) { in raid1_reshape()
3289 struct md_rdev *rdev = conf->mirrors[d].rdev; in raid1_reshape()
3301 kfree(conf->mirrors); in raid1_reshape()
3302 conf->mirrors = newmirrors; in raid1_reshape()
3303 kfree(conf->poolinfo); in raid1_reshape()
3304 conf->poolinfo = newpoolinfo; in raid1_reshape()
3306 spin_lock_irqsave(&conf->device_lock, flags); in raid1_reshape()
3307 mddev->degraded += (raid_disks - conf->raid_disks); in raid1_reshape()
3308 spin_unlock_irqrestore(&conf->device_lock, flags); in raid1_reshape()
3309 conf->raid_disks = mddev->raid_disks = raid_disks; in raid1_reshape()
3312 unfreeze_array(conf); in raid1_reshape()
3324 struct r1conf *conf = mddev->private; in raid1_quiesce() local
3327 freeze_array(conf, 0); in raid1_quiesce()
3329 unfreeze_array(conf); in raid1_quiesce()
3338 struct r1conf *conf; in raid1_takeover() local
3342 conf = setup_conf(mddev); in raid1_takeover()
3343 if (!IS_ERR(conf)) { in raid1_takeover()
3345 conf->array_frozen = 1; in raid1_takeover()
3349 return conf; in raid1_takeover()