Lines Matching full:sh
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
121 static inline int raid6_d0(struct stripe_head *sh) in raid6_d0() argument
123 if (sh->ddf_layout) in raid6_d0()
127 if (sh->qd_idx == sh->disks - 1) in raid6_d0()
130 return sh->qd_idx + 1; in raid6_d0()
143 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, in raid6_idx_to_slot() argument
148 if (sh->ddf_layout) in raid6_idx_to_slot()
150 if (idx == sh->pd_idx) in raid6_idx_to_slot()
152 if (idx == sh->qd_idx) in raid6_idx_to_slot()
154 if (!sh->ddf_layout) in raid6_idx_to_slot()
161 static int stripe_operations_active(struct stripe_head *sh) in stripe_operations_active() argument
163 return sh->check_state || sh->reconstruct_state || in stripe_operations_active()
164 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || in stripe_operations_active()
165 test_bit(STRIPE_COMPUTE_RUN, &sh->state); in stripe_operations_active()
168 static bool stripe_is_lowprio(struct stripe_head *sh) in stripe_is_lowprio() argument
170 return (test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) || in stripe_is_lowprio()
171 test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) && in stripe_is_lowprio()
172 !test_bit(STRIPE_R5C_CACHING, &sh->state); in stripe_is_lowprio()
175 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) in raid5_wakeup_stripe_thread() argument
177 struct r5conf *conf = sh->raid_conf; in raid5_wakeup_stripe_thread()
180 int i, cpu = sh->cpu; in raid5_wakeup_stripe_thread()
184 sh->cpu = cpu; in raid5_wakeup_stripe_thread()
187 if (list_empty(&sh->lru)) { in raid5_wakeup_stripe_thread()
190 if (stripe_is_lowprio(sh)) in raid5_wakeup_stripe_thread()
191 list_add_tail(&sh->lru, &group->loprio_list); in raid5_wakeup_stripe_thread()
193 list_add_tail(&sh->lru, &group->handle_list); in raid5_wakeup_stripe_thread()
195 sh->group = group; in raid5_wakeup_stripe_thread()
203 group = conf->worker_groups + cpu_to_group(sh->cpu); in raid5_wakeup_stripe_thread()
207 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); in raid5_wakeup_stripe_thread()
214 queue_work_on(sh->cpu, raid5_wq, in raid5_wakeup_stripe_thread()
221 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, in do_release_stripe() argument
227 BUG_ON(!list_empty(&sh->lru)); in do_release_stripe()
231 for (i = sh->disks; i--; ) in do_release_stripe()
232 if (test_bit(R5_InJournal, &sh->dev[i].flags)) in do_release_stripe()
241 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) || in do_release_stripe()
243 !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0)) { in do_release_stripe()
244 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) in do_release_stripe()
245 r5c_make_stripe_write_out(sh); in do_release_stripe()
246 set_bit(STRIPE_HANDLE, &sh->state); in do_release_stripe()
249 if (test_bit(STRIPE_HANDLE, &sh->state)) { in do_release_stripe()
250 if (test_bit(STRIPE_DELAYED, &sh->state) && in do_release_stripe()
251 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in do_release_stripe()
252 list_add_tail(&sh->lru, &conf->delayed_list); in do_release_stripe()
253 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && in do_release_stripe()
254 sh->bm_seq - conf->seq_write > 0) in do_release_stripe()
255 list_add_tail(&sh->lru, &conf->bitmap_list); in do_release_stripe()
257 clear_bit(STRIPE_DELAYED, &sh->state); in do_release_stripe()
258 clear_bit(STRIPE_BIT_DELAY, &sh->state); in do_release_stripe()
260 if (stripe_is_lowprio(sh)) in do_release_stripe()
261 list_add_tail(&sh->lru, in do_release_stripe()
264 list_add_tail(&sh->lru, in do_release_stripe()
267 raid5_wakeup_stripe_thread(sh); in do_release_stripe()
273 BUG_ON(stripe_operations_active(sh)); in do_release_stripe()
274 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in do_release_stripe()
279 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { in do_release_stripe()
281 list_add_tail(&sh->lru, temp_inactive_list); in do_release_stripe()
283 WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)); in do_release_stripe()
285 list_add_tail(&sh->lru, temp_inactive_list); in do_release_stripe()
288 if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) in do_release_stripe()
290 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) in do_release_stripe()
292 list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); in do_release_stripe()
300 list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); in do_release_stripe()
306 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, in __release_stripe() argument
309 if (atomic_dec_and_test(&sh->count)) in __release_stripe()
310 do_release_stripe(conf, sh, temp_inactive_list); in __release_stripe()
366 struct stripe_head *sh, *t; in release_stripe_list() local
372 llist_for_each_entry_safe(sh, t, head, release_list) { in release_stripe_list()
375 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ in release_stripe_list()
377 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); in release_stripe_list()
383 hash = sh->hash_lock_index; in release_stripe_list()
384 __release_stripe(conf, sh, &temp_inactive_list[hash]); in release_stripe_list()
391 void raid5_release_stripe(struct stripe_head *sh) in raid5_release_stripe() argument
393 struct r5conf *conf = sh->raid_conf; in raid5_release_stripe()
401 if (atomic_add_unless(&sh->count, -1, 1)) in raid5_release_stripe()
405 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) in raid5_release_stripe()
407 wakeup = llist_add(&sh->release_list, &conf->released_stripes); in raid5_release_stripe()
413 if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) { in raid5_release_stripe()
415 hash = sh->hash_lock_index; in raid5_release_stripe()
416 do_release_stripe(conf, sh, &list); in raid5_release_stripe()
422 static inline void remove_hash(struct stripe_head *sh) in remove_hash() argument
425 (unsigned long long)sh->sector); in remove_hash()
427 hlist_del_init(&sh->hash); in remove_hash()
430 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) in insert_hash() argument
432 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
435 (unsigned long long)sh->sector); in insert_hash()
437 hlist_add_head(&sh->hash, hp); in insert_hash()
443 struct stripe_head *sh = NULL; in get_free_stripe() local
449 sh = list_entry(first, struct stripe_head, lru); in get_free_stripe()
451 remove_hash(sh); in get_free_stripe()
453 BUG_ON(hash != sh->hash_lock_index); in get_free_stripe()
457 return sh; in get_free_stripe()
460 static void shrink_buffers(struct stripe_head *sh) in shrink_buffers() argument
464 int num = sh->raid_conf->pool_size; in shrink_buffers()
467 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); in shrink_buffers()
468 p = sh->dev[i].page; in shrink_buffers()
471 sh->dev[i].page = NULL; in shrink_buffers()
476 static int grow_buffers(struct stripe_head *sh, gfp_t gfp) in grow_buffers() argument
479 int num = sh->raid_conf->pool_size; in grow_buffers()
487 sh->dev[i].page = page; in grow_buffers()
488 sh->dev[i].orig_page = page; in grow_buffers()
495 struct stripe_head *sh);
497 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) in init_stripe() argument
499 struct r5conf *conf = sh->raid_conf; in init_stripe()
502 BUG_ON(atomic_read(&sh->count) != 0); in init_stripe()
503 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); in init_stripe()
504 BUG_ON(stripe_operations_active(sh)); in init_stripe()
505 BUG_ON(sh->batch_head); in init_stripe()
511 sh->generation = conf->generation - previous; in init_stripe()
512 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
513 sh->sector = sector; in init_stripe()
514 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
515 sh->state = 0; in init_stripe()
517 for (i = sh->disks; i--; ) { in init_stripe()
518 struct r5dev *dev = &sh->dev[i]; in init_stripe()
523 (unsigned long long)sh->sector, i, dev->toread, in init_stripe()
529 dev->sector = raid5_compute_blocknr(sh, i, previous); in init_stripe()
533 sh->overwrite_disks = 0; in init_stripe()
534 insert_hash(conf, sh); in init_stripe()
535 sh->cpu = smp_processor_id(); in init_stripe()
536 set_bit(STRIPE_BATCH_READY, &sh->state); in init_stripe()
542 struct stripe_head *sh; in __find_stripe() local
545 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
546 if (sh->sector == sector && sh->generation == generation) in __find_stripe()
547 return sh; in __find_stripe()
638 struct stripe_head *sh; in raid5_get_active_stripe() local
650 sh = __find_stripe(conf, sector, conf->generation - previous); in raid5_get_active_stripe()
651 if (!sh) { in raid5_get_active_stripe()
653 sh = get_free_stripe(conf, hash); in raid5_get_active_stripe()
654 if (!sh && !test_bit(R5_DID_ALLOC, in raid5_get_active_stripe()
659 if (noblock && sh == NULL) in raid5_get_active_stripe()
663 if (!sh) { in raid5_get_active_stripe()
678 init_stripe(sh, sector, previous); in raid5_get_active_stripe()
679 atomic_inc(&sh->count); in raid5_get_active_stripe()
681 } else if (!atomic_inc_not_zero(&sh->count)) { in raid5_get_active_stripe()
683 if (!atomic_read(&sh->count)) { in raid5_get_active_stripe()
684 if (!test_bit(STRIPE_HANDLE, &sh->state)) in raid5_get_active_stripe()
686 BUG_ON(list_empty(&sh->lru) && in raid5_get_active_stripe()
687 !test_bit(STRIPE_EXPANDING, &sh->state)); in raid5_get_active_stripe()
691 list_del_init(&sh->lru); in raid5_get_active_stripe()
694 if (sh->group) { in raid5_get_active_stripe()
695 sh->group->stripes_cnt--; in raid5_get_active_stripe()
696 sh->group = NULL; in raid5_get_active_stripe()
699 atomic_inc(&sh->count); in raid5_get_active_stripe()
702 } while (sh == NULL); in raid5_get_active_stripe()
705 return sh; in raid5_get_active_stripe()
708 static bool is_full_stripe_write(struct stripe_head *sh) in is_full_stripe_write() argument
710 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); in is_full_stripe_write()
711 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); in is_full_stripe_write()
732 static bool stripe_can_batch(struct stripe_head *sh) in stripe_can_batch() argument
734 struct r5conf *conf = sh->raid_conf; in stripe_can_batch()
738 return test_bit(STRIPE_BATCH_READY, &sh->state) && in stripe_can_batch()
739 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && in stripe_can_batch()
740 is_full_stripe_write(sh); in stripe_can_batch()
744 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) in stripe_add_to_batch_list() argument
753 tmp_sec = sh->sector; in stripe_add_to_batch_list()
756 head_sector = sh->sector - STRIPE_SECTORS; in stripe_add_to_batch_list()
789 lock_two_stripes(head, sh); in stripe_add_to_batch_list()
791 if (!stripe_can_batch(head) || !stripe_can_batch(sh)) in stripe_add_to_batch_list()
794 if (sh->batch_head) in stripe_add_to_batch_list()
798 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_add_to_batch_list()
800 if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf || in stripe_add_to_batch_list()
801 bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) in stripe_add_to_batch_list()
818 sh->batch_head = head->batch_head; in stripe_add_to_batch_list()
824 list_add(&sh->batch_list, &head->batch_list); in stripe_add_to_batch_list()
828 sh->batch_head = head->batch_head; in stripe_add_to_batch_list()
830 list_add_tail(&sh->batch_list, &head->batch_list); in stripe_add_to_batch_list()
834 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in stripe_add_to_batch_list()
839 if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { in stripe_add_to_batch_list()
840 int seq = sh->bm_seq; in stripe_add_to_batch_list()
841 if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && in stripe_add_to_batch_list()
842 sh->batch_head->bm_seq > seq) in stripe_add_to_batch_list()
843 seq = sh->batch_head->bm_seq; in stripe_add_to_batch_list()
844 set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); in stripe_add_to_batch_list()
845 sh->batch_head->bm_seq = seq; in stripe_add_to_batch_list()
848 atomic_inc(&sh->count); in stripe_add_to_batch_list()
850 unlock_two_stripes(head, sh); in stripe_add_to_batch_list()
858 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) in use_new_offset() argument
868 if (sh->generation == conf->generation - 1) in use_new_offset()
984 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) in ops_run_io() argument
986 struct r5conf *conf = sh->raid_conf; in ops_run_io()
987 int i, disks = sh->disks; in ops_run_io()
988 struct stripe_head *head_sh = sh; in ops_run_io()
994 if (log_stripe(sh, s) == 0) in ops_run_io()
1005 sh = head_sh; in ops_run_io()
1006 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { in ops_run_io()
1008 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) in ops_run_io()
1010 if (test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_io()
1012 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) in ops_run_io()
1015 &sh->dev[i].flags)) { in ops_run_io()
1020 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) in ops_run_io()
1024 bi = &sh->dev[i].req; in ops_run_io()
1025 rbi = &sh->dev[i].rreq; /* For writing to replacement */ in ops_run_io()
1065 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, in ops_run_io()
1099 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
1106 bi->bi_private = sh; in ops_run_io()
1109 __func__, (unsigned long long)sh->sector, in ops_run_io()
1111 atomic_inc(&sh->count); in ops_run_io()
1112 if (sh != head_sh) in ops_run_io()
1114 if (use_new_offset(conf, sh)) in ops_run_io()
1115 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1118 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1123 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) in ops_run_io()
1124 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in ops_run_io()
1127 test_bit(R5_InJournal, &sh->dev[i].flags)) in ops_run_io()
1133 sh->dev[i].vec.bv_page = sh->dev[i].orig_page; in ops_run_io()
1135 sh->dev[i].vec.bv_page = sh->dev[i].page; in ops_run_io()
1140 bi->bi_write_hint = sh->dev[i].write_hint; in ops_run_io()
1142 sh->dev[i].write_hint = RWF_WRITE_LIFE_NOT_SET; in ops_run_io()
1150 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); in ops_run_io()
1155 sh->dev[i].sector); in ops_run_io()
1166 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
1172 rbi->bi_private = sh; in ops_run_io()
1176 __func__, (unsigned long long)sh->sector, in ops_run_io()
1178 atomic_inc(&sh->count); in ops_run_io()
1179 if (sh != head_sh) in ops_run_io()
1181 if (use_new_offset(conf, sh)) in ops_run_io()
1182 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1185 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1187 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) in ops_run_io()
1188 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in ops_run_io()
1189 sh->dev[i].rvec.bv_page = sh->dev[i].page; in ops_run_io()
1194 rbi->bi_write_hint = sh->dev[i].write_hint; in ops_run_io()
1195 sh->dev[i].write_hint = RWF_WRITE_LIFE_NOT_SET; in ops_run_io()
1205 sh->dev[i].sector); in ops_run_io()
1213 set_bit(STRIPE_DEGRADED, &sh->state); in ops_run_io()
1215 bi->bi_opf, i, (unsigned long long)sh->sector); in ops_run_io()
1216 clear_bit(R5_LOCKED, &sh->dev[i].flags); in ops_run_io()
1217 set_bit(STRIPE_HANDLE, &sh->state); in ops_run_io()
1222 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_io()
1224 if (sh != head_sh) in ops_run_io()
1235 struct stripe_head *sh, int no_skipcopy) in async_copy_data() argument
1273 if (sh->raid_conf->skip_copy && in async_copy_data()
1298 struct stripe_head *sh = stripe_head_ref; in ops_complete_biofill() local
1302 (unsigned long long)sh->sector); in ops_complete_biofill()
1305 for (i = sh->disks; i--; ) { in ops_complete_biofill()
1306 struct r5dev *dev = &sh->dev[i]; in ops_complete_biofill()
1327 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); in ops_complete_biofill()
1329 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_biofill()
1330 raid5_release_stripe(sh); in ops_complete_biofill()
1333 static void ops_run_biofill(struct stripe_head *sh) in ops_run_biofill() argument
1339 BUG_ON(sh->batch_head); in ops_run_biofill()
1341 (unsigned long long)sh->sector); in ops_run_biofill()
1343 for (i = sh->disks; i--; ) { in ops_run_biofill()
1344 struct r5dev *dev = &sh->dev[i]; in ops_run_biofill()
1347 spin_lock_irq(&sh->stripe_lock); in ops_run_biofill()
1350 spin_unlock_irq(&sh->stripe_lock); in ops_run_biofill()
1354 dev->sector, tx, sh, 0); in ops_run_biofill()
1360 atomic_inc(&sh->count); in ops_run_biofill()
1361 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); in ops_run_biofill()
1365 static void mark_target_uptodate(struct stripe_head *sh, int target) in mark_target_uptodate() argument
1372 tgt = &sh->dev[target]; in mark_target_uptodate()
1380 struct stripe_head *sh = stripe_head_ref; in ops_complete_compute() local
1383 (unsigned long long)sh->sector); in ops_complete_compute()
1386 mark_target_uptodate(sh, sh->ops.target); in ops_complete_compute()
1387 mark_target_uptodate(sh, sh->ops.target2); in ops_complete_compute()
1389 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); in ops_complete_compute()
1390 if (sh->check_state == check_state_compute_run) in ops_complete_compute()
1391 sh->check_state = check_state_compute_result; in ops_complete_compute()
1392 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_compute()
1393 raid5_release_stripe(sh); in ops_complete_compute()
1397 static addr_conv_t *to_addr_conv(struct stripe_head *sh, in to_addr_conv() argument
1403 return addr + sizeof(struct page *) * (sh->disks + 2); in to_addr_conv()
1416 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute5() argument
1418 int disks = sh->disks; in ops_run_compute5()
1420 int target = sh->ops.target; in ops_run_compute5()
1421 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute5()
1428 BUG_ON(sh->batch_head); in ops_run_compute5()
1431 __func__, (unsigned long long)sh->sector, target); in ops_run_compute5()
1436 xor_srcs[count++] = sh->dev[i].page; in ops_run_compute5()
1438 atomic_inc(&sh->count); in ops_run_compute5()
1441 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); in ops_run_compute5()
1451 * @srcs - (struct page *) array of size sh->disks
1452 * @sh - stripe_head to parse
1460 struct stripe_head *sh, in set_syndrome_sources() argument
1463 int disks = sh->disks; in set_syndrome_sources()
1464 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); in set_syndrome_sources()
1465 int d0_idx = raid6_d0(sh); in set_syndrome_sources()
1475 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in set_syndrome_sources()
1476 struct r5dev *dev = &sh->dev[i]; in set_syndrome_sources()
1478 if (i == sh->qd_idx || i == sh->pd_idx || in set_syndrome_sources()
1487 srcs[slot] = sh->dev[i].orig_page; in set_syndrome_sources()
1489 srcs[slot] = sh->dev[i].page; in set_syndrome_sources()
1498 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_1() argument
1500 int disks = sh->disks; in ops_run_compute6_1()
1503 int qd_idx = sh->qd_idx; in ops_run_compute6_1()
1511 BUG_ON(sh->batch_head); in ops_run_compute6_1()
1512 if (sh->ops.target < 0) in ops_run_compute6_1()
1513 target = sh->ops.target2; in ops_run_compute6_1()
1514 else if (sh->ops.target2 < 0) in ops_run_compute6_1()
1515 target = sh->ops.target; in ops_run_compute6_1()
1521 __func__, (unsigned long long)sh->sector, target); in ops_run_compute6_1()
1523 tgt = &sh->dev[target]; in ops_run_compute6_1()
1527 atomic_inc(&sh->count); in ops_run_compute6_1()
1530 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); in ops_run_compute6_1()
1534 ops_complete_compute, sh, in ops_run_compute6_1()
1535 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1543 blocks[count++] = sh->dev[i].page; in ops_run_compute6_1()
1547 NULL, ops_complete_compute, sh, in ops_run_compute6_1()
1548 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1556 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_2() argument
1558 int i, count, disks = sh->disks; in ops_run_compute6_2()
1559 int syndrome_disks = sh->ddf_layout ? disks : disks-2; in ops_run_compute6_2()
1560 int d0_idx = raid6_d0(sh); in ops_run_compute6_2()
1562 int target = sh->ops.target; in ops_run_compute6_2()
1563 int target2 = sh->ops.target2; in ops_run_compute6_2()
1564 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute6_2()
1565 struct r5dev *tgt2 = &sh->dev[target2]; in ops_run_compute6_2()
1570 BUG_ON(sh->batch_head); in ops_run_compute6_2()
1572 __func__, (unsigned long long)sh->sector, target, target2); in ops_run_compute6_2()
1585 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in ops_run_compute6_2()
1587 blocks[slot] = sh->dev[i].page; in ops_run_compute6_2()
1600 __func__, (unsigned long long)sh->sector, faila, failb); in ops_run_compute6_2()
1602 atomic_inc(&sh->count); in ops_run_compute6_2()
1609 ops_complete_compute, sh, in ops_run_compute6_2()
1610 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1616 int qd_idx = sh->qd_idx; in ops_run_compute6_2()
1628 blocks[count++] = sh->dev[i].page; in ops_run_compute6_2()
1630 dest = sh->dev[data_target].page; in ops_run_compute6_2()
1634 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1638 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); in ops_run_compute6_2()
1640 ops_complete_compute, sh, in ops_run_compute6_2()
1641 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1647 ops_complete_compute, sh, in ops_run_compute6_2()
1648 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1665 struct stripe_head *sh = stripe_head_ref; in ops_complete_prexor() local
1668 (unsigned long long)sh->sector); in ops_complete_prexor()
1670 if (r5c_is_writeback(sh->raid_conf->log)) in ops_complete_prexor()
1675 r5c_release_extra_page(sh); in ops_complete_prexor()
1679 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor5() argument
1682 int disks = sh->disks; in ops_run_prexor5()
1684 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_prexor5()
1688 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_prexor5()
1690 BUG_ON(sh->batch_head); in ops_run_prexor5()
1692 (unsigned long long)sh->sector); in ops_run_prexor5()
1695 struct r5dev *dev = &sh->dev[i]; in ops_run_prexor5()
1704 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor5()
1711 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor6() argument
1719 (unsigned long long)sh->sector); in ops_run_prexor6()
1721 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN); in ops_run_prexor6()
1724 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor6()
1731 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) in ops_run_biodrain() argument
1733 struct r5conf *conf = sh->raid_conf; in ops_run_biodrain()
1734 int disks = sh->disks; in ops_run_biodrain()
1736 struct stripe_head *head_sh = sh; in ops_run_biodrain()
1739 (unsigned long long)sh->sector); in ops_run_biodrain()
1745 sh = head_sh; in ops_run_biodrain()
1750 dev = &sh->dev[i]; in ops_run_biodrain()
1756 spin_lock_irq(&sh->stripe_lock); in ops_run_biodrain()
1759 sh->overwrite_disks = 0; in ops_run_biodrain()
1762 spin_unlock_irq(&sh->stripe_lock); in ops_run_biodrain()
1775 dev->sector, tx, sh, in ops_run_biodrain()
1788 sh = list_first_entry(&sh->batch_list, in ops_run_biodrain()
1791 if (sh == head_sh) in ops_run_biodrain()
1803 struct stripe_head *sh = stripe_head_ref; in ops_complete_reconstruct() local
1804 int disks = sh->disks; in ops_complete_reconstruct()
1805 int pd_idx = sh->pd_idx; in ops_complete_reconstruct()
1806 int qd_idx = sh->qd_idx; in ops_complete_reconstruct()
1811 (unsigned long long)sh->sector); in ops_complete_reconstruct()
1814 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); in ops_complete_reconstruct()
1815 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); in ops_complete_reconstruct()
1816 discard |= test_bit(R5_Discard, &sh->dev[i].flags); in ops_complete_reconstruct()
1820 struct r5dev *dev = &sh->dev[i]; in ops_complete_reconstruct()
1825 if (test_bit(STRIPE_EXPAND_READY, &sh->state)) in ops_complete_reconstruct()
1835 if (sh->reconstruct_state == reconstruct_state_drain_run) in ops_complete_reconstruct()
1836 sh->reconstruct_state = reconstruct_state_drain_result; in ops_complete_reconstruct()
1837 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) in ops_complete_reconstruct()
1838 sh->reconstruct_state = reconstruct_state_prexor_drain_result; in ops_complete_reconstruct()
1840 BUG_ON(sh->reconstruct_state != reconstruct_state_run); in ops_complete_reconstruct()
1841 sh->reconstruct_state = reconstruct_state_result; in ops_complete_reconstruct()
1844 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_reconstruct()
1845 raid5_release_stripe(sh); in ops_complete_reconstruct()
1849 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct5() argument
1852 int disks = sh->disks; in ops_run_reconstruct5()
1855 int count, pd_idx = sh->pd_idx, i; in ops_run_reconstruct5()
1860 struct stripe_head *head_sh = sh; in ops_run_reconstruct5()
1864 (unsigned long long)sh->sector); in ops_run_reconstruct5()
1866 for (i = 0; i < sh->disks; i++) { in ops_run_reconstruct5()
1869 if (!test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_reconstruct5()
1872 if (i >= sh->disks) { in ops_run_reconstruct5()
1873 atomic_inc(&sh->count); in ops_run_reconstruct5()
1874 set_bit(R5_Discard, &sh->dev[pd_idx].flags); in ops_run_reconstruct5()
1875 ops_complete_reconstruct(sh); in ops_run_reconstruct5()
1886 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_reconstruct5()
1888 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
1894 xor_dest = sh->dev[pd_idx].page; in ops_run_reconstruct5()
1896 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
1908 list_first_entry(&sh->batch_list, in ops_run_reconstruct5()
1916 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
1920 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
1929 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_reconstruct5()
1936 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct6() argument
1942 struct stripe_head *head_sh = sh; in ops_run_reconstruct6()
1947 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); in ops_run_reconstruct6()
1949 for (i = 0; i < sh->disks; i++) { in ops_run_reconstruct6()
1950 if (sh->pd_idx == i || sh->qd_idx == i) in ops_run_reconstruct6()
1952 if (!test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_reconstruct6()
1955 if (i >= sh->disks) { in ops_run_reconstruct6()
1956 atomic_inc(&sh->count); in ops_run_reconstruct6()
1957 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in ops_run_reconstruct6()
1958 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); in ops_run_reconstruct6()
1959 ops_complete_reconstruct(sh); in ops_run_reconstruct6()
1966 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { in ops_run_reconstruct6()
1974 count = set_syndrome_sources(blocks, sh, synflags); in ops_run_reconstruct6()
1976 list_first_entry(&sh->batch_list, in ops_run_reconstruct6()
1982 head_sh, to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
1985 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
1989 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_reconstruct6()
1997 struct stripe_head *sh = stripe_head_ref; in ops_complete_check() local
2000 (unsigned long long)sh->sector); in ops_complete_check()
2002 sh->check_state = check_state_check_result; in ops_complete_check()
2003 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_check()
2004 raid5_release_stripe(sh); in ops_complete_check()
2007 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_check_p() argument
2009 int disks = sh->disks; in ops_run_check_p()
2010 int pd_idx = sh->pd_idx; in ops_run_check_p()
2011 int qd_idx = sh->qd_idx; in ops_run_check_p()
2020 (unsigned long long)sh->sector); in ops_run_check_p()
2022 BUG_ON(sh->batch_head); in ops_run_check_p()
2024 xor_dest = sh->dev[pd_idx].page; in ops_run_check_p()
2029 xor_srcs[count++] = sh->dev[i].page; in ops_run_check_p()
2033 to_addr_conv(sh, percpu, 0)); in ops_run_check_p()
2035 &sh->ops.zero_sum_result, &submit); in ops_run_check_p()
2037 atomic_inc(&sh->count); in ops_run_check_p()
2038 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); in ops_run_check_p()
2042 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) in ops_run_check_pq() argument
2049 (unsigned long long)sh->sector, checkp); in ops_run_check_pq()
2051 BUG_ON(sh->batch_head); in ops_run_check_pq()
2052 count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL); in ops_run_check_pq()
2056 atomic_inc(&sh->count); in ops_run_check_pq()
2058 sh, to_addr_conv(sh, percpu, 0)); in ops_run_check_pq()
2060 &sh->ops.zero_sum_result, percpu->spare_page, &submit); in ops_run_check_pq()
2063 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) in raid_run_ops() argument
2065 int overlap_clear = 0, i, disks = sh->disks; in raid_run_ops()
2067 struct r5conf *conf = sh->raid_conf; in raid_run_ops()
2075 ops_run_biofill(sh); in raid_run_ops()
2081 tx = ops_run_compute5(sh, percpu); in raid_run_ops()
2083 if (sh->ops.target2 < 0 || sh->ops.target < 0) in raid_run_ops()
2084 tx = ops_run_compute6_1(sh, percpu); in raid_run_ops()
2086 tx = ops_run_compute6_2(sh, percpu); in raid_run_ops()
2095 tx = ops_run_prexor5(sh, percpu, tx); in raid_run_ops()
2097 tx = ops_run_prexor6(sh, percpu, tx); in raid_run_ops()
2101 tx = ops_run_partial_parity(sh, percpu, tx); in raid_run_ops()
2104 tx = ops_run_biodrain(sh, tx); in raid_run_ops()
2110 ops_run_reconstruct5(sh, percpu, tx); in raid_run_ops()
2112 ops_run_reconstruct6(sh, percpu, tx); in raid_run_ops()
2116 if (sh->check_state == check_state_run) in raid_run_ops()
2117 ops_run_check_p(sh, percpu); in raid_run_ops()
2118 else if (sh->check_state == check_state_run_q) in raid_run_ops()
2119 ops_run_check_pq(sh, percpu, 0); in raid_run_ops()
2120 else if (sh->check_state == check_state_run_pq) in raid_run_ops()
2121 ops_run_check_pq(sh, percpu, 1); in raid_run_ops()
2126 if (overlap_clear && !sh->batch_head) in raid_run_ops()
2128 struct r5dev *dev = &sh->dev[i]; in raid_run_ops()
2130 wake_up(&sh->raid_conf->wait_for_overlap); in raid_run_ops()
2135 static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) in free_stripe() argument
2137 if (sh->ppl_page) in free_stripe()
2138 __free_page(sh->ppl_page); in free_stripe()
2139 kmem_cache_free(sc, sh); in free_stripe()
2145 struct stripe_head *sh; in alloc_stripe() local
2148 sh = kmem_cache_zalloc(sc, gfp); in alloc_stripe()
2149 if (sh) { in alloc_stripe()
2150 spin_lock_init(&sh->stripe_lock); in alloc_stripe()
2151 spin_lock_init(&sh->batch_lock); in alloc_stripe()
2152 INIT_LIST_HEAD(&sh->batch_list); in alloc_stripe()
2153 INIT_LIST_HEAD(&sh->lru); in alloc_stripe()
2154 INIT_LIST_HEAD(&sh->r5c); in alloc_stripe()
2155 INIT_LIST_HEAD(&sh->log_list); in alloc_stripe()
2156 atomic_set(&sh->count, 1); in alloc_stripe()
2157 sh->raid_conf = conf; in alloc_stripe()
2158 sh->log_start = MaxSector; in alloc_stripe()
2160 struct r5dev *dev = &sh->dev[i]; in alloc_stripe()
2167 sh->ppl_page = alloc_page(gfp); in alloc_stripe()
2168 if (!sh->ppl_page) { in alloc_stripe()
2169 free_stripe(sc, sh); in alloc_stripe()
2170 sh = NULL; in alloc_stripe()
2174 return sh; in alloc_stripe()
2178 struct stripe_head *sh; in grow_one_stripe() local
2180 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf); in grow_one_stripe()
2181 if (!sh) in grow_one_stripe()
2184 if (grow_buffers(sh, gfp)) { in grow_one_stripe()
2185 shrink_buffers(sh); in grow_one_stripe()
2186 free_stripe(conf->slab_cache, sh); in grow_one_stripe()
2189 sh->hash_lock_index = in grow_one_stripe()
2194 raid5_release_stripe(sh); in grow_one_stripe()
2449 struct stripe_head *sh; in drop_one_stripe() local
2453 sh = get_free_stripe(conf, hash); in drop_one_stripe()
2455 if (!sh) in drop_one_stripe()
2457 BUG_ON(atomic_read(&sh->count)); in drop_one_stripe()
2458 shrink_buffers(sh); in drop_one_stripe()
2459 free_stripe(conf->slab_cache, sh); in drop_one_stripe()
2477 struct stripe_head *sh = bi->bi_private; in raid5_end_read_request() local
2478 struct r5conf *conf = sh->raid_conf; in raid5_end_read_request()
2479 int disks = sh->disks, i; in raid5_end_read_request()
2485 if (bi == &sh->dev[i].req) in raid5_end_read_request()
2489 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_read_request()
2496 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
2506 if (use_new_offset(conf, sh)) in raid5_end_read_request()
2507 s = sh->sector + rdev->new_data_offset; in raid5_end_read_request()
2509 s = sh->sector + rdev->data_offset; in raid5_end_read_request()
2511 set_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
2512 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in raid5_end_read_request()
2523 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2524 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
2525 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) in raid5_end_read_request()
2526 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2528 if (test_bit(R5_InJournal, &sh->dev[i].flags)) in raid5_end_read_request()
2533 set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); in raid5_end_read_request()
2542 clear_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
2545 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
2558 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { in raid5_end_read_request()
2573 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) in raid5_end_read_request()
2576 if (sh->qd_idx >= 0 && sh->pd_idx == i) in raid5_end_read_request()
2577 set_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2578 else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { in raid5_end_read_request()
2579 set_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2580 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2582 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2584 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2585 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
2589 rdev, sh->sector, STRIPE_SECTORS, 0))) in raid5_end_read_request()
2595 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_read_request()
2596 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_read_request()
2597 raid5_release_stripe(sh); in raid5_end_read_request()
2602 struct stripe_head *sh = bi->bi_private; in raid5_end_write_request() local
2603 struct r5conf *conf = sh->raid_conf; in raid5_end_write_request()
2604 int disks = sh->disks, i; in raid5_end_write_request()
2611 if (bi == &sh->dev[i].req) { in raid5_end_write_request()
2615 if (bi == &sh->dev[i].rreq) { in raid5_end_write_request()
2629 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_write_request()
2640 else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
2643 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); in raid5_end_write_request()
2646 set_bit(STRIPE_DEGRADED, &sh->state); in raid5_end_write_request()
2648 set_bit(R5_WriteError, &sh->dev[i].flags); in raid5_end_write_request()
2652 } else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
2655 set_bit(R5_MadeGood, &sh->dev[i].flags); in raid5_end_write_request()
2656 if (test_bit(R5_ReadError, &sh->dev[i].flags)) in raid5_end_write_request()
2661 set_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_write_request()
2666 if (sh->batch_head && bi->bi_status && !replacement) in raid5_end_write_request()
2667 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); in raid5_end_write_request()
2670 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) in raid5_end_write_request()
2671 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_write_request()
2672 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_write_request()
2673 raid5_release_stripe(sh); in raid5_end_write_request()
2675 if (sh->batch_head && sh != sh->batch_head) in raid5_end_write_request()
2676 raid5_release_stripe(sh->batch_head); in raid5_end_write_request()
2711 struct stripe_head *sh) in raid5_compute_sector() argument
2899 if (sh) { in raid5_compute_sector()
2900 sh->pd_idx = pd_idx; in raid5_compute_sector()
2901 sh->qd_idx = qd_idx; in raid5_compute_sector()
2902 sh->ddf_layout = ddf_layout; in raid5_compute_sector()
2911 sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) in raid5_compute_blocknr() argument
2913 struct r5conf *conf = sh->raid_conf; in raid5_compute_blocknr()
2914 int raid_disks = sh->disks; in raid5_compute_blocknr()
2916 sector_t new_sector = sh->sector, check; in raid5_compute_blocknr()
2931 if (i == sh->pd_idx) in raid5_compute_blocknr()
2939 if (i > sh->pd_idx) in raid5_compute_blocknr()
2944 if (i < sh->pd_idx) in raid5_compute_blocknr()
2946 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
2958 if (i == sh->qd_idx) in raid5_compute_blocknr()
2965 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
2967 else if (i > sh->pd_idx) in raid5_compute_blocknr()
2972 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
2976 if (i < sh->pd_idx) in raid5_compute_blocknr()
2978 i -= (sh->pd_idx + 2); in raid5_compute_blocknr()
2988 if (sh->pd_idx == 0) in raid5_compute_blocknr()
2992 if (i < sh->pd_idx) in raid5_compute_blocknr()
2994 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
2999 if (i > sh->pd_idx) in raid5_compute_blocknr()
3004 if (i < sh->pd_idx) in raid5_compute_blocknr()
3006 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3022 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in raid5_compute_blocknr()
3023 || sh2.qd_idx != sh->qd_idx) { in raid5_compute_blocknr()
3088 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, in schedule_reconstruction() argument
3091 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; in schedule_reconstruction()
3092 struct r5conf *conf = sh->raid_conf; in schedule_reconstruction()
3102 r5c_release_extra_page(sh); in schedule_reconstruction()
3105 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
3126 sh->reconstruct_state = reconstruct_state_drain_run; in schedule_reconstruction()
3129 sh->reconstruct_state = reconstruct_state_run; in schedule_reconstruction()
3134 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) in schedule_reconstruction()
3137 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || in schedule_reconstruction()
3138 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); in schedule_reconstruction()
3140 (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || in schedule_reconstruction()
3141 test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); in schedule_reconstruction()
3144 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
3163 sh->reconstruct_state = reconstruct_state_prexor_drain_run; in schedule_reconstruction()
3172 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); in schedule_reconstruction()
3173 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in schedule_reconstruction()
3177 int qd_idx = sh->qd_idx; in schedule_reconstruction()
3178 struct r5dev *dev = &sh->dev[qd_idx]; in schedule_reconstruction()
3185 if (raid5_has_ppl(sh->raid_conf) && sh->ppl_page && in schedule_reconstruction()
3187 !test_bit(STRIPE_FULL_WRITE, &sh->state) && in schedule_reconstruction()
3188 test_bit(R5_Insync, &sh->dev[pd_idx].flags)) in schedule_reconstruction()
3192 __func__, (unsigned long long)sh->sector, in schedule_reconstruction()
3201 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in add_stripe_bio() argument
3205 struct r5conf *conf = sh->raid_conf; in add_stripe_bio()
3210 (unsigned long long)sh->sector); in add_stripe_bio()
3212 spin_lock_irq(&sh->stripe_lock); in add_stripe_bio()
3213 sh->dev[dd_idx].write_hint = bi->bi_write_hint; in add_stripe_bio()
3215 if (sh->batch_head) in add_stripe_bio()
3218 bip = &sh->dev[dd_idx].towrite; in add_stripe_bio()
3222 bip = &sh->dev[dd_idx].toread; in add_stripe_bio()
3245 for (i = 0; i < sh->disks; i++) { in add_stripe_bio()
3246 if (i != sh->pd_idx && in add_stripe_bio()
3247 (i == dd_idx || sh->dev[i].towrite)) { in add_stripe_bio()
3248 sector = sh->dev[i].sector; in add_stripe_bio()
3262 clear_bit(STRIPE_BATCH_READY, &sh->state); in add_stripe_bio()
3273 sector_t sector = sh->dev[dd_idx].sector; in add_stripe_bio()
3274 for (bi=sh->dev[dd_idx].towrite; in add_stripe_bio()
3275 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && in add_stripe_bio()
3277 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { in add_stripe_bio()
3281 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) in add_stripe_bio()
3282 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) in add_stripe_bio()
3283 sh->overwrite_disks++; in add_stripe_bio()
3288 (unsigned long long)sh->sector, dd_idx); in add_stripe_bio()
3303 set_bit(STRIPE_BITMAP_PENDING, &sh->state); in add_stripe_bio()
3304 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3305 md_bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
3307 spin_lock_irq(&sh->stripe_lock); in add_stripe_bio()
3308 clear_bit(STRIPE_BITMAP_PENDING, &sh->state); in add_stripe_bio()
3309 if (!sh->batch_head) { in add_stripe_bio()
3310 sh->bm_seq = conf->seq_flush+1; in add_stripe_bio()
3311 set_bit(STRIPE_BIT_DELAY, &sh->state); in add_stripe_bio()
3314 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3316 if (stripe_can_batch(sh)) in add_stripe_bio()
3317 stripe_add_to_batch_list(conf, sh); in add_stripe_bio()
3321 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); in add_stripe_bio()
3322 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3329 struct stripe_head *sh) in stripe_set_idx() argument
3341 &dd_idx, sh); in stripe_set_idx()
3345 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, in handle_failed_stripe() argument
3349 BUG_ON(sh->batch_head); in handle_failed_stripe()
3354 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in handle_failed_stripe()
3367 sh->sector, in handle_failed_stripe()
3373 spin_lock_irq(&sh->stripe_lock); in handle_failed_stripe()
3375 bi = sh->dev[i].towrite; in handle_failed_stripe()
3376 sh->dev[i].towrite = NULL; in handle_failed_stripe()
3377 sh->overwrite_disks = 0; in handle_failed_stripe()
3378 spin_unlock_irq(&sh->stripe_lock); in handle_failed_stripe()
3382 log_stripe_write_finished(sh); in handle_failed_stripe()
3384 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
3388 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
3389 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
3396 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3400 bi = sh->dev[i].written; in handle_failed_stripe()
3401 sh->dev[i].written = NULL; in handle_failed_stripe()
3402 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { in handle_failed_stripe()
3403 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in handle_failed_stripe()
3404 sh->dev[i].page = sh->dev[i].orig_page; in handle_failed_stripe()
3409 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
3410 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
3420 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && in handle_failed_stripe()
3422 (!test_bit(R5_Insync, &sh->dev[i].flags) || in handle_failed_stripe()
3423 test_bit(R5_ReadError, &sh->dev[i].flags))) { in handle_failed_stripe()
3424 spin_lock_irq(&sh->stripe_lock); in handle_failed_stripe()
3425 bi = sh->dev[i].toread; in handle_failed_stripe()
3426 sh->dev[i].toread = NULL; in handle_failed_stripe()
3427 spin_unlock_irq(&sh->stripe_lock); in handle_failed_stripe()
3428 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
3433 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
3435 r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
3442 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3447 clear_bit(R5_LOCKED, &sh->dev[i].flags); in handle_failed_stripe()
3452 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_failed_stripe()
3458 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, in handle_failed_sync() argument
3464 BUG_ON(sh->batch_head); in handle_failed_sync()
3465 clear_bit(STRIPE_SYNCING, &sh->state); in handle_failed_sync()
3466 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_failed_sync()
3487 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3494 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3506 static int want_replace(struct stripe_head *sh, int disk_idx) in want_replace() argument
3512 rdev = rcu_dereference(sh->raid_conf->disks[disk_idx].replacement); in want_replace()
3516 && (rdev->recovery_offset <= sh->sector in want_replace()
3517 || rdev->mddev->recovery_cp <= sh->sector)) in want_replace()
3523 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, in need_this_block() argument
3526 struct r5dev *dev = &sh->dev[disk_idx]; in need_this_block()
3527 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], in need_this_block()
3528 &sh->dev[s->failed_num[1]] }; in need_this_block()
3545 (s->replacing && want_replace(sh, disk_idx))) in need_this_block()
3570 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in need_this_block()
3598 if (sh->raid_conf->level != 6 && in need_this_block()
3599 sh->raid_conf->rmw_level != PARITY_DISABLE_RMW && in need_this_block()
3600 sh->sector < sh->raid_conf->mddev->recovery_cp) in need_this_block()
3604 if (s->failed_num[i] != sh->pd_idx && in need_this_block()
3605 s->failed_num[i] != sh->qd_idx && in need_this_block()
3620 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, in fetch_block() argument
3623 struct r5dev *dev = &sh->dev[disk_idx]; in fetch_block()
3626 if (need_this_block(sh, s, disk_idx, disks)) { in fetch_block()
3632 BUG_ON(sh->batch_head); in fetch_block()
3644 ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) || in fetch_block()
3651 (unsigned long long)sh->sector, disk_idx); in fetch_block()
3652 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
3655 sh->ops.target = disk_idx; in fetch_block()
3656 sh->ops.target2 = -1; /* no 2nd target */ in fetch_block()
3675 &sh->dev[other].flags)) in fetch_block()
3680 (unsigned long long)sh->sector, in fetch_block()
3682 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
3684 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); in fetch_block()
3685 set_bit(R5_Wantcompute, &sh->dev[other].flags); in fetch_block()
3686 sh->ops.target = disk_idx; in fetch_block()
3687 sh->ops.target2 = other; in fetch_block()
3706 static void handle_stripe_fill(struct stripe_head *sh, in handle_stripe_fill() argument
3716 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && in handle_stripe_fill()
3717 !sh->reconstruct_state) { in handle_stripe_fill()
3727 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) in handle_stripe_fill()
3728 r5c_make_stripe_write_out(sh); in handle_stripe_fill()
3733 if (fetch_block(sh, s, i, disks)) in handle_stripe_fill()
3737 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_fill()
3748 struct stripe_head *sh, int disks) in handle_stripe_clean_event() argument
3753 struct stripe_head *head_sh = sh; in handle_stripe_clean_event()
3757 if (sh->dev[i].written) { in handle_stripe_clean_event()
3758 dev = &sh->dev[i]; in handle_stripe_clean_event()
3784 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
3786 !test_bit(STRIPE_DEGRADED, &sh->state), in handle_stripe_clean_event()
3789 sh = list_first_entry(&sh->batch_list, in handle_stripe_clean_event()
3792 if (sh != head_sh) { in handle_stripe_clean_event()
3793 dev = &sh->dev[i]; in handle_stripe_clean_event()
3797 sh = head_sh; in handle_stripe_clean_event()
3798 dev = &sh->dev[i]; in handle_stripe_clean_event()
3803 log_stripe_write_finished(sh); in handle_stripe_clean_event()
3806 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_clean_event()
3808 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
3809 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
3810 if (sh->qd_idx >= 0) { in handle_stripe_clean_event()
3811 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); in handle_stripe_clean_event()
3812 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); in handle_stripe_clean_event()
3815 clear_bit(STRIPE_DISCARD, &sh->state); in handle_stripe_clean_event()
3822 hash = sh->hash_lock_index; in handle_stripe_clean_event()
3824 remove_hash(sh); in handle_stripe_clean_event()
3827 sh = list_first_entry(&sh->batch_list, in handle_stripe_clean_event()
3829 if (sh != head_sh) in handle_stripe_clean_event()
3832 sh = head_sh; in handle_stripe_clean_event()
3834 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) in handle_stripe_clean_event()
3835 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_clean_event()
3839 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_stripe_clean_event()
3863 struct stripe_head *sh, in handle_stripe_dirtying() argument
3878 (recovery_cp < MaxSector && sh->sector >= recovery_cp && in handle_stripe_dirtying()
3884 pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n", in handle_stripe_dirtying()
3886 (unsigned long long)sh->sector); in handle_stripe_dirtying()
3889 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
3891 i == sh->pd_idx || i == sh->qd_idx || in handle_stripe_dirtying()
3903 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
3915 (unsigned long long)sh->sector, sh->state, rmw, rcw); in handle_stripe_dirtying()
3916 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
3922 (unsigned long long)sh->sector, rmw); in handle_stripe_dirtying()
3924 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
3927 !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_dirtying()
3942 r5c_use_extra_page(sh); in handle_stripe_dirtying()
3947 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
3954 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
3956 i == sh->pd_idx || i == sh->qd_idx || in handle_stripe_dirtying()
3963 &sh->state)) { in handle_stripe_dirtying()
3970 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
3971 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
3981 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
3983 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
3990 &sh->state)) { in handle_stripe_dirtying()
3998 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
3999 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
4005 (unsigned long long)sh->sector, in handle_stripe_dirtying()
4006 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); in handle_stripe_dirtying()
4010 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in handle_stripe_dirtying()
4011 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4023 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && in handle_stripe_dirtying()
4025 !test_bit(STRIPE_BIT_DELAY, &sh->state))) in handle_stripe_dirtying()
4026 schedule_reconstruction(sh, s, rcw == 0, 0); in handle_stripe_dirtying()
4030 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks5() argument
4035 BUG_ON(sh->batch_head); in handle_parity_checks5()
4036 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks5()
4038 switch (sh->check_state) { in handle_parity_checks5()
4043 sh->check_state = check_state_run; in handle_parity_checks5()
4045 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
4049 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks5()
4052 sh->check_state = check_state_idle; in handle_parity_checks5()
4054 dev = &sh->dev[sh->pd_idx]; in handle_parity_checks5()
4057 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks5()
4068 clear_bit(STRIPE_DEGRADED, &sh->state); in handle_parity_checks5()
4069 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4074 sh->check_state = check_state_idle; in handle_parity_checks5()
4086 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) in handle_parity_checks5()
4090 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4095 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4098 (unsigned long long) sh->sector, in handle_parity_checks5()
4099 (unsigned long long) sh->sector + in handle_parity_checks5()
4102 sh->check_state = check_state_compute_run; in handle_parity_checks5()
4103 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks5()
4106 &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
4107 sh->ops.target = sh->pd_idx; in handle_parity_checks5()
4108 sh->ops.target2 = -1; in handle_parity_checks5()
4117 __func__, sh->check_state, in handle_parity_checks5()
4118 (unsigned long long) sh->sector); in handle_parity_checks5()
4123 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks6() argument
4127 int pd_idx = sh->pd_idx; in handle_parity_checks6()
4128 int qd_idx = sh->qd_idx; in handle_parity_checks6()
4131 BUG_ON(sh->batch_head); in handle_parity_checks6()
4132 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks6()
4142 switch (sh->check_state) { in handle_parity_checks6()
4150 sh->check_state = check_state_run; in handle_parity_checks6()
4156 if (sh->check_state == check_state_run) in handle_parity_checks6()
4157 sh->check_state = check_state_run_pq; in handle_parity_checks6()
4159 sh->check_state = check_state_run_q; in handle_parity_checks6()
4163 sh->ops.zero_sum_result = 0; in handle_parity_checks6()
4165 if (sh->check_state == check_state_run) { in handle_parity_checks6()
4167 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in handle_parity_checks6()
4170 if (sh->check_state >= check_state_run && in handle_parity_checks6()
4171 sh->check_state <= check_state_run_pq) { in handle_parity_checks6()
4183 sh->check_state = check_state_idle; in handle_parity_checks6()
4186 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks6()
4194 dev = &sh->dev[s->failed_num[1]]; in handle_parity_checks6()
4200 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks6()
4205 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
4206 dev = &sh->dev[pd_idx]; in handle_parity_checks6()
4211 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
4212 dev = &sh->dev[qd_idx]; in handle_parity_checks6()
4220 dev - (struct r5dev *) &sh->dev)) { in handle_parity_checks6()
4225 clear_bit(STRIPE_DEGRADED, &sh->state); in handle_parity_checks6()
4227 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4234 sh->check_state = check_state_idle; in handle_parity_checks6()
4240 if (sh->ops.zero_sum_result == 0) { in handle_parity_checks6()
4243 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4249 sh->check_state = check_state_compute_result; in handle_parity_checks6()
4260 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4263 (unsigned long long) sh->sector, in handle_parity_checks6()
4264 (unsigned long long) sh->sector + in handle_parity_checks6()
4267 int *target = &sh->ops.target; in handle_parity_checks6()
4269 sh->ops.target = -1; in handle_parity_checks6()
4270 sh->ops.target2 = -1; in handle_parity_checks6()
4271 sh->check_state = check_state_compute_run; in handle_parity_checks6()
4272 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks6()
4274 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
4276 &sh->dev[pd_idx].flags); in handle_parity_checks6()
4278 target = &sh->ops.target2; in handle_parity_checks6()
4281 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
4283 &sh->dev[qd_idx].flags); in handle_parity_checks6()
4294 __func__, sh->check_state, in handle_parity_checks6()
4295 (unsigned long long) sh->sector); in handle_parity_checks6()
4300 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) in handle_stripe_expansion() argument
4308 BUG_ON(sh->batch_head); in handle_stripe_expansion()
4309 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); in handle_stripe_expansion()
4310 for (i = 0; i < sh->disks; i++) in handle_stripe_expansion()
4311 if (i != sh->pd_idx && i != sh->qd_idx) { in handle_stripe_expansion()
4316 sector_t bn = raid5_compute_blocknr(sh, i, 1); in handle_stripe_expansion()
4336 sh->dev[i].page, 0, 0, STRIPE_SIZE, in handle_stripe_expansion()
4371 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) in analyse_stripe() argument
4373 struct r5conf *conf = sh->raid_conf; in analyse_stripe()
4374 int disks = sh->disks; in analyse_stripe()
4381 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; in analyse_stripe()
4382 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; in analyse_stripe()
4395 dev = &sh->dev[i]; in analyse_stripe()
4406 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) in analyse_stripe()
4435 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && in analyse_stripe()
4436 !is_badblock(rdev, sh->sector, STRIPE_SECTORS, in analyse_stripe()
4450 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, in analyse_stripe()
4477 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) in analyse_stripe()
4547 if (test_bit(STRIPE_SYNCING, &sh->state)) { in analyse_stripe()
4557 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4566 static int clear_batch_ready(struct stripe_head *sh) in clear_batch_ready() argument
4573 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) in clear_batch_ready()
4574 return (sh->batch_head && sh->batch_head != sh); in clear_batch_ready()
4575 spin_lock(&sh->stripe_lock); in clear_batch_ready()
4576 if (!sh->batch_head) { in clear_batch_ready()
4577 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4585 if (sh->batch_head != sh) { in clear_batch_ready()
4586 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4589 spin_lock(&sh->batch_lock); in clear_batch_ready()
4590 list_for_each_entry(tmp, &sh->batch_list, batch_list) in clear_batch_ready()
4592 spin_unlock(&sh->batch_lock); in clear_batch_ready()
4593 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4605 struct stripe_head *sh, *next; in break_stripe_batch_list() local
4609 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { in break_stripe_batch_list()
4611 list_del_init(&sh->batch_list); in break_stripe_batch_list()
4613 WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | in break_stripe_batch_list()
4626 "stripe state: %lx\n", sh->state); in break_stripe_batch_list()
4631 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | in break_stripe_batch_list()
4637 sh->check_state = head_sh->check_state; in break_stripe_batch_list()
4638 sh->reconstruct_state = head_sh->reconstruct_state; in break_stripe_batch_list()
4639 spin_lock_irq(&sh->stripe_lock); in break_stripe_batch_list()
4640 sh->batch_head = NULL; in break_stripe_batch_list()
4641 spin_unlock_irq(&sh->stripe_lock); in break_stripe_batch_list()
4642 for (i = 0; i < sh->disks; i++) { in break_stripe_batch_list()
4643 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in break_stripe_batch_list()
4645 sh->dev[i].flags = head_sh->dev[i].flags & in break_stripe_batch_list()
4649 sh->state & handle_flags) in break_stripe_batch_list()
4650 set_bit(STRIPE_HANDLE, &sh->state); in break_stripe_batch_list()
4651 raid5_release_stripe(sh); in break_stripe_batch_list()
4666 static void handle_stripe(struct stripe_head *sh) in handle_stripe() argument
4669 struct r5conf *conf = sh->raid_conf; in handle_stripe()
4672 int disks = sh->disks; in handle_stripe()
4675 clear_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4676 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { in handle_stripe()
4679 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4683 if (clear_batch_ready(sh) ) { in handle_stripe()
4684 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); in handle_stripe()
4688 if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) in handle_stripe()
4689 break_stripe_batch_list(sh, 0); in handle_stripe()
4691 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { in handle_stripe()
4692 spin_lock(&sh->stripe_lock); in handle_stripe()
4697 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && in handle_stripe()
4698 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) && in handle_stripe()
4699 !test_bit(STRIPE_DISCARD, &sh->state) && in handle_stripe()
4700 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { in handle_stripe()
4701 set_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
4702 clear_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
4703 clear_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
4705 spin_unlock(&sh->stripe_lock); in handle_stripe()
4707 clear_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
4711 (unsigned long long)sh->sector, sh->state, in handle_stripe()
4712 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, in handle_stripe()
4713 sh->check_state, sh->reconstruct_state); in handle_stripe()
4715 analyse_stripe(sh, &s); in handle_stripe()
4717 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) in handle_stripe()
4722 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4729 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4737 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { in handle_stripe()
4739 set_bit(STRIPE_BIOFILL_RUN, &sh->state); in handle_stripe()
4755 sh->check_state = 0; in handle_stripe()
4756 sh->reconstruct_state = 0; in handle_stripe()
4757 break_stripe_batch_list(sh, 0); in handle_stripe()
4759 handle_failed_stripe(conf, sh, &s, disks); in handle_stripe()
4761 handle_failed_sync(conf, sh, &s); in handle_stripe()
4768 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) in handle_stripe()
4770 if (sh->reconstruct_state == reconstruct_state_drain_result || in handle_stripe()
4771 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { in handle_stripe()
4772 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
4777 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && in handle_stripe()
4778 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); in handle_stripe()
4779 BUG_ON(sh->qd_idx >= 0 && in handle_stripe()
4780 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && in handle_stripe()
4781 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); in handle_stripe()
4783 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
4785 (i == sh->pd_idx || i == sh->qd_idx || in handle_stripe()
4795 ((i == sh->pd_idx || i == sh->qd_idx) && in handle_stripe()
4797 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
4800 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in handle_stripe()
4808 pdev = &sh->dev[sh->pd_idx]; in handle_stripe()
4809 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) in handle_stripe()
4810 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); in handle_stripe()
4811 qdev = &sh->dev[sh->qd_idx]; in handle_stripe()
4812 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) in handle_stripe()
4813 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) in handle_stripe()
4825 handle_stripe_clean_event(conf, sh, disks); in handle_stripe()
4828 r5c_handle_cached_data_endio(conf, sh, disks); in handle_stripe()
4829 log_stripe_write_finished(sh); in handle_stripe()
4840 handle_stripe_fill(sh, &s, disks); in handle_stripe()
4847 r5c_finish_stripe_write_out(conf, sh, &s); in handle_stripe()
4858 if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) { in handle_stripe()
4861 handle_stripe_dirtying(conf, sh, &s, disks); in handle_stripe()
4867 ret = r5c_try_caching_write(conf, sh, &s, in handle_stripe()
4878 (!test_bit(STRIPE_R5C_CACHING, &sh->state) && in handle_stripe()
4880 ret = handle_stripe_dirtying(conf, sh, &s, in handle_stripe()
4893 if (sh->check_state || in handle_stripe()
4895 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
4896 !test_bit(STRIPE_INSYNC, &sh->state))) { in handle_stripe()
4898 handle_parity_checks6(conf, sh, &s, disks); in handle_stripe()
4900 handle_parity_checks5(conf, sh, &s, disks); in handle_stripe()
4904 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) in handle_stripe()
4905 && !test_bit(STRIPE_REPLACED, &sh->state)) { in handle_stripe()
4908 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { in handle_stripe()
4909 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); in handle_stripe()
4910 set_bit(R5_WantReplace, &sh->dev[i].flags); in handle_stripe()
4911 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
4915 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
4916 set_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
4919 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
4920 test_bit(STRIPE_INSYNC, &sh->state)) { in handle_stripe()
4922 clear_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
4923 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_stripe()
4932 struct r5dev *dev = &sh->dev[s.failed_num[i]]; in handle_stripe()
4952 if (sh->reconstruct_state == reconstruct_state_result) { in handle_stripe()
4954 = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); in handle_stripe()
4956 /* sh cannot be written until sh_src has been read. in handle_stripe()
4957 * so arrange for sh to be delayed a little in handle_stripe()
4959 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
4960 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4970 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
4971 clear_bit(STRIPE_EXPANDING, &sh->state); in handle_stripe()
4973 set_bit(R5_Wantwrite, &sh->dev[i].flags); in handle_stripe()
4974 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
4979 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && in handle_stripe()
4980 !sh->reconstruct_state) { in handle_stripe()
4982 sh->disks = conf->raid_disks; in handle_stripe()
4983 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
4984 schedule_reconstruction(sh, &s, 1, 1); in handle_stripe()
4985 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { in handle_stripe()
4986 clear_bit(STRIPE_EXPAND_READY, &sh->state); in handle_stripe()
4993 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) in handle_stripe()
4994 handle_stripe_expansion(conf, sh); in handle_stripe()
5014 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
5018 if (!rdev_set_badblocks(rdev, sh->sector, in handle_stripe()
5025 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
5034 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
5041 raid_run_ops(sh, s.ops_request); in handle_stripe()
5043 ops_run_io(sh, &s); in handle_stripe()
5056 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); in handle_stripe()
5064 struct stripe_head *sh; in raid5_activate_delayed() local
5065 sh = list_entry(l, struct stripe_head, lru); in raid5_activate_delayed()
5067 clear_bit(STRIPE_DELAYED, &sh->state); in raid5_activate_delayed()
5068 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in raid5_activate_delayed()
5070 list_add_tail(&sh->lru, &conf->hold_list); in raid5_activate_delayed()
5071 raid5_wakeup_stripe_thread(sh); in raid5_activate_delayed()
5084 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); in activate_bit_delay() local
5086 list_del_init(&sh->lru); in activate_bit_delay()
5087 atomic_inc(&sh->count); in activate_bit_delay()
5088 hash = sh->hash_lock_index; in activate_bit_delay()
5089 __release_stripe(conf, sh, &temp_inactive_list[hash]); in activate_bit_delay()
5326 struct stripe_head *sh, *tmp; in __get_priority_stripe() local
5336 sh = NULL; in __get_priority_stripe()
5362 sh = list_entry(handle_list->next, typeof(*sh), lru); in __get_priority_stripe()
5366 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { in __get_priority_stripe()
5386 sh = tmp; in __get_priority_stripe()
5391 if (sh) { in __get_priority_stripe()
5399 if (!sh) { in __get_priority_stripe()
5409 sh->group = NULL; in __get_priority_stripe()
5411 list_del_init(&sh->lru); in __get_priority_stripe()
5412 BUG_ON(atomic_inc_return(&sh->count) != 1); in __get_priority_stripe()
5413 return sh; in __get_priority_stripe()
5426 struct stripe_head *sh; in raid5_unplug() local
5435 sh = list_first_entry(&cb->list, struct stripe_head, lru); in raid5_unplug()
5436 list_del_init(&sh->lru); in raid5_unplug()
5443 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); in raid5_unplug()
5448 hash = sh->hash_lock_index; in raid5_unplug()
5449 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); in raid5_unplug()
5462 struct stripe_head *sh) in release_stripe_plug() argument
5470 raid5_release_stripe(sh); in release_stripe_plug()
5483 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) in release_stripe_plug()
5484 list_add_tail(&sh->lru, &cb->list); in release_stripe_plug()
5486 raid5_release_stripe(sh); in release_stripe_plug()
5493 struct stripe_head *sh; in make_discard_request() local
5519 sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); in make_discard_request()
5522 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5523 if (test_bit(STRIPE_SYNCING, &sh->state)) { in make_discard_request()
5524 raid5_release_stripe(sh); in make_discard_request()
5528 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5529 spin_lock_irq(&sh->stripe_lock); in make_discard_request()
5531 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5533 if (sh->dev[d].towrite || sh->dev[d].toread) { in make_discard_request()
5534 set_bit(R5_Overlap, &sh->dev[d].flags); in make_discard_request()
5535 spin_unlock_irq(&sh->stripe_lock); in make_discard_request()
5536 raid5_release_stripe(sh); in make_discard_request()
5541 set_bit(STRIPE_DISCARD, &sh->state); in make_discard_request()
5543 sh->overwrite_disks = 0; in make_discard_request()
5545 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5547 sh->dev[d].towrite = bi; in make_discard_request()
5548 set_bit(R5_OVERWRITE, &sh->dev[d].flags); in make_discard_request()
5551 sh->overwrite_disks++; in make_discard_request()
5553 spin_unlock_irq(&sh->stripe_lock); in make_discard_request()
5559 sh->sector, in make_discard_request()
5562 sh->bm_seq = conf->seq_flush + 1; in make_discard_request()
5563 set_bit(STRIPE_BIT_DELAY, &sh->state); in make_discard_request()
5566 set_bit(STRIPE_HANDLE, &sh->state); in make_discard_request()
5567 clear_bit(STRIPE_DELAYED, &sh->state); in make_discard_request()
5568 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in make_discard_request()
5570 release_stripe_plug(mddev, sh); in make_discard_request()
5582 struct stripe_head *sh; in raid5_make_request() local
5675 sh = raid5_get_active_stripe(conf, new_sector, previous, in raid5_make_request()
5677 if (sh) { in raid5_make_request()
5683 * 'sh', we know that if that happens, in raid5_make_request()
5696 raid5_release_stripe(sh); in raid5_make_request()
5706 raid5_release_stripe(sh); in raid5_make_request()
5710 if (test_bit(STRIPE_EXPANDING, &sh->state) || in raid5_make_request()
5711 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { in raid5_make_request()
5717 raid5_release_stripe(sh); in raid5_make_request()
5723 set_bit(STRIPE_R5C_PREFLUSH, &sh->state); in raid5_make_request()
5728 if (!sh->batch_head || sh == sh->batch_head) in raid5_make_request()
5729 set_bit(STRIPE_HANDLE, &sh->state); in raid5_make_request()
5730 clear_bit(STRIPE_DELAYED, &sh->state); in raid5_make_request()
5731 if ((!sh->batch_head || sh == sh->batch_head) && in raid5_make_request()
5733 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in raid5_make_request()
5735 release_stripe_plug(mddev, sh); in raid5_make_request()
5764 struct stripe_head *sh; in reshape_request() local
5915 sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); in reshape_request()
5916 set_bit(STRIPE_EXPANDING, &sh->state); in reshape_request()
5921 for (j=sh->disks; j--;) { in reshape_request()
5923 if (j == sh->pd_idx) in reshape_request()
5926 j == sh->qd_idx) in reshape_request()
5928 s = raid5_compute_blocknr(sh, j, 0); in reshape_request()
5933 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); in reshape_request()
5934 set_bit(R5_Expanded, &sh->dev[j].flags); in reshape_request()
5935 set_bit(R5_UPTODATE, &sh->dev[j].flags); in reshape_request()
5938 set_bit(STRIPE_EXPAND_READY, &sh->state); in reshape_request()
5939 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
5941 list_add(&sh->lru, &stripes); in reshape_request()
5964 sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); in reshape_request()
5965 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); in reshape_request()
5966 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
5967 raid5_release_stripe(sh); in reshape_request()
5974 sh = list_entry(stripes.next, struct stripe_head, lru); in reshape_request()
5975 list_del_init(&sh->lru); in reshape_request()
5976 raid5_release_stripe(sh); in reshape_request()
6025 struct stripe_head *sh; in raid5_sync_request() local
6083 sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); in raid5_sync_request()
6084 if (sh == NULL) { in raid5_sync_request()
6085 sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); in raid5_sync_request()
6106 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); in raid5_sync_request()
6107 set_bit(STRIPE_HANDLE, &sh->state); in raid5_sync_request()
6109 raid5_release_stripe(sh); in raid5_sync_request()
6127 struct stripe_head *sh; in retry_aligned_read() local
6148 sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); in retry_aligned_read()
6150 if (!sh) { in retry_aligned_read()
6157 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { in retry_aligned_read()
6158 raid5_release_stripe(sh); in retry_aligned_read()
6164 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); in retry_aligned_read()
6165 handle_stripe(sh); in retry_aligned_read()
6166 raid5_release_stripe(sh); in retry_aligned_read()
6181 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; in handle_active_stripes() local
6186 (sh = __get_priority_stripe(conf, group)) != NULL) in handle_active_stripes()
6187 batch[batch_size++] = sh; in handle_active_stripes()