• Home
  • Raw
  • Download

Lines Matching full:sh

26  * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
113 static inline int raid6_d0(struct stripe_head *sh) in raid6_d0() argument
115 if (sh->ddf_layout) in raid6_d0()
119 if (sh->qd_idx == sh->disks - 1) in raid6_d0()
122 return sh->qd_idx + 1; in raid6_d0()
135 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, in raid6_idx_to_slot() argument
140 if (sh->ddf_layout) in raid6_idx_to_slot()
142 if (idx == sh->pd_idx) in raid6_idx_to_slot()
144 if (idx == sh->qd_idx) in raid6_idx_to_slot()
146 if (!sh->ddf_layout) in raid6_idx_to_slot()
153 static int stripe_operations_active(struct stripe_head *sh) in stripe_operations_active() argument
155 return sh->check_state || sh->reconstruct_state || in stripe_operations_active()
156 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || in stripe_operations_active()
157 test_bit(STRIPE_COMPUTE_RUN, &sh->state); in stripe_operations_active()
160 static bool stripe_is_lowprio(struct stripe_head *sh) in stripe_is_lowprio() argument
162 return (test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) || in stripe_is_lowprio()
163 test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) && in stripe_is_lowprio()
164 !test_bit(STRIPE_R5C_CACHING, &sh->state); in stripe_is_lowprio()
167 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) in raid5_wakeup_stripe_thread() argument
169 struct r5conf *conf = sh->raid_conf; in raid5_wakeup_stripe_thread()
172 int i, cpu = sh->cpu; in raid5_wakeup_stripe_thread()
176 sh->cpu = cpu; in raid5_wakeup_stripe_thread()
179 if (list_empty(&sh->lru)) { in raid5_wakeup_stripe_thread()
182 if (stripe_is_lowprio(sh)) in raid5_wakeup_stripe_thread()
183 list_add_tail(&sh->lru, &group->loprio_list); in raid5_wakeup_stripe_thread()
185 list_add_tail(&sh->lru, &group->handle_list); in raid5_wakeup_stripe_thread()
187 sh->group = group; in raid5_wakeup_stripe_thread()
195 group = conf->worker_groups + cpu_to_group(sh->cpu); in raid5_wakeup_stripe_thread()
199 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); in raid5_wakeup_stripe_thread()
206 queue_work_on(sh->cpu, raid5_wq, in raid5_wakeup_stripe_thread()
213 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, in do_release_stripe() argument
219 BUG_ON(!list_empty(&sh->lru)); in do_release_stripe()
223 for (i = sh->disks; i--; ) in do_release_stripe()
224 if (test_bit(R5_InJournal, &sh->dev[i].flags)) in do_release_stripe()
233 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) || in do_release_stripe()
235 !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0)) { in do_release_stripe()
236 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) in do_release_stripe()
237 r5c_make_stripe_write_out(sh); in do_release_stripe()
238 set_bit(STRIPE_HANDLE, &sh->state); in do_release_stripe()
241 if (test_bit(STRIPE_HANDLE, &sh->state)) { in do_release_stripe()
242 if (test_bit(STRIPE_DELAYED, &sh->state) && in do_release_stripe()
243 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in do_release_stripe()
244 list_add_tail(&sh->lru, &conf->delayed_list); in do_release_stripe()
245 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && in do_release_stripe()
246 sh->bm_seq - conf->seq_write > 0) in do_release_stripe()
247 list_add_tail(&sh->lru, &conf->bitmap_list); in do_release_stripe()
249 clear_bit(STRIPE_DELAYED, &sh->state); in do_release_stripe()
250 clear_bit(STRIPE_BIT_DELAY, &sh->state); in do_release_stripe()
252 if (stripe_is_lowprio(sh)) in do_release_stripe()
253 list_add_tail(&sh->lru, in do_release_stripe()
256 list_add_tail(&sh->lru, in do_release_stripe()
259 raid5_wakeup_stripe_thread(sh); in do_release_stripe()
265 BUG_ON(stripe_operations_active(sh)); in do_release_stripe()
266 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in do_release_stripe()
271 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { in do_release_stripe()
273 list_add_tail(&sh->lru, temp_inactive_list); in do_release_stripe()
275 WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)); in do_release_stripe()
277 list_add_tail(&sh->lru, temp_inactive_list); in do_release_stripe()
280 if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) in do_release_stripe()
282 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) in do_release_stripe()
284 list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); in do_release_stripe()
292 list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); in do_release_stripe()
298 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, in __release_stripe() argument
301 if (atomic_dec_and_test(&sh->count)) in __release_stripe()
302 do_release_stripe(conf, sh, temp_inactive_list); in __release_stripe()
358 struct stripe_head *sh, *t; in release_stripe_list() local
364 llist_for_each_entry_safe(sh, t, head, release_list) { in release_stripe_list()
367 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ in release_stripe_list()
369 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); in release_stripe_list()
375 hash = sh->hash_lock_index; in release_stripe_list()
376 __release_stripe(conf, sh, &temp_inactive_list[hash]); in release_stripe_list()
383 void raid5_release_stripe(struct stripe_head *sh) in raid5_release_stripe() argument
385 struct r5conf *conf = sh->raid_conf; in raid5_release_stripe()
393 if (atomic_add_unless(&sh->count, -1, 1)) in raid5_release_stripe()
397 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) in raid5_release_stripe()
399 wakeup = llist_add(&sh->release_list, &conf->released_stripes); in raid5_release_stripe()
405 if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) { in raid5_release_stripe()
407 hash = sh->hash_lock_index; in raid5_release_stripe()
408 do_release_stripe(conf, sh, &list); in raid5_release_stripe()
414 static inline void remove_hash(struct stripe_head *sh) in remove_hash() argument
417 (unsigned long long)sh->sector); in remove_hash()
419 hlist_del_init(&sh->hash); in remove_hash()
422 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) in insert_hash() argument
424 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
427 (unsigned long long)sh->sector); in insert_hash()
429 hlist_add_head(&sh->hash, hp); in insert_hash()
435 struct stripe_head *sh = NULL; in get_free_stripe() local
441 sh = list_entry(first, struct stripe_head, lru); in get_free_stripe()
443 remove_hash(sh); in get_free_stripe()
445 BUG_ON(hash != sh->hash_lock_index); in get_free_stripe()
449 return sh; in get_free_stripe()
453 static void free_stripe_pages(struct stripe_head *sh) in free_stripe_pages() argument
459 if (!sh->pages) in free_stripe_pages()
462 for (i = 0; i < sh->nr_pages; i++) { in free_stripe_pages()
463 p = sh->pages[i]; in free_stripe_pages()
466 sh->pages[i] = NULL; in free_stripe_pages()
470 static int alloc_stripe_pages(struct stripe_head *sh, gfp_t gfp) in alloc_stripe_pages() argument
475 for (i = 0; i < sh->nr_pages; i++) { in alloc_stripe_pages()
477 if (sh->pages[i]) in alloc_stripe_pages()
482 free_stripe_pages(sh); in alloc_stripe_pages()
485 sh->pages[i] = p; in alloc_stripe_pages()
491 init_stripe_shared_pages(struct stripe_head *sh, struct r5conf *conf, int disks) in init_stripe_shared_pages() argument
495 if (sh->pages) in init_stripe_shared_pages()
498 /* Each of the sh->dev[i] need one conf->stripe_size */ in init_stripe_shared_pages()
502 sh->pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); in init_stripe_shared_pages()
503 if (!sh->pages) in init_stripe_shared_pages()
505 sh->nr_pages = nr_pages; in init_stripe_shared_pages()
506 sh->stripes_per_page = cnt; in init_stripe_shared_pages()
511 static void shrink_buffers(struct stripe_head *sh) in shrink_buffers() argument
514 int num = sh->raid_conf->pool_size; in shrink_buffers()
520 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); in shrink_buffers()
521 p = sh->dev[i].page; in shrink_buffers()
524 sh->dev[i].page = NULL; in shrink_buffers()
529 sh->dev[i].page = NULL; in shrink_buffers()
530 free_stripe_pages(sh); /* Free pages */ in shrink_buffers()
534 static int grow_buffers(struct stripe_head *sh, gfp_t gfp) in grow_buffers() argument
537 int num = sh->raid_conf->pool_size; in grow_buffers()
546 sh->dev[i].page = page; in grow_buffers()
547 sh->dev[i].orig_page = page; in grow_buffers()
548 sh->dev[i].offset = 0; in grow_buffers()
551 if (alloc_stripe_pages(sh, gfp)) in grow_buffers()
555 sh->dev[i].page = raid5_get_dev_page(sh, i); in grow_buffers()
556 sh->dev[i].orig_page = sh->dev[i].page; in grow_buffers()
557 sh->dev[i].offset = raid5_get_page_offset(sh, i); in grow_buffers()
564 struct stripe_head *sh);
566 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) in init_stripe() argument
568 struct r5conf *conf = sh->raid_conf; in init_stripe()
571 BUG_ON(atomic_read(&sh->count) != 0); in init_stripe()
572 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); in init_stripe()
573 BUG_ON(stripe_operations_active(sh)); in init_stripe()
574 BUG_ON(sh->batch_head); in init_stripe()
580 sh->generation = conf->generation - previous; in init_stripe()
581 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
582 sh->sector = sector; in init_stripe()
583 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
584 sh->state = 0; in init_stripe()
586 for (i = sh->disks; i--; ) { in init_stripe()
587 struct r5dev *dev = &sh->dev[i]; in init_stripe()
592 (unsigned long long)sh->sector, i, dev->toread, in init_stripe()
598 dev->sector = raid5_compute_blocknr(sh, i, previous); in init_stripe()
602 sh->overwrite_disks = 0; in init_stripe()
603 insert_hash(conf, sh); in init_stripe()
604 sh->cpu = smp_processor_id(); in init_stripe()
605 set_bit(STRIPE_BATCH_READY, &sh->state); in init_stripe()
611 struct stripe_head *sh; in __find_stripe() local
614 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
615 if (sh->sector == sector && sh->generation == generation) in __find_stripe()
616 return sh; in __find_stripe()
707 struct stripe_head *sh; in raid5_get_active_stripe() local
719 sh = __find_stripe(conf, sector, conf->generation - previous); in raid5_get_active_stripe()
720 if (!sh) { in raid5_get_active_stripe()
722 sh = get_free_stripe(conf, hash); in raid5_get_active_stripe()
723 if (!sh && !test_bit(R5_DID_ALLOC, in raid5_get_active_stripe()
728 if (noblock && sh == NULL) in raid5_get_active_stripe()
732 if (!sh) { in raid5_get_active_stripe()
747 init_stripe(sh, sector, previous); in raid5_get_active_stripe()
748 atomic_inc(&sh->count); in raid5_get_active_stripe()
750 } else if (!atomic_inc_not_zero(&sh->count)) { in raid5_get_active_stripe()
752 if (!atomic_read(&sh->count)) { in raid5_get_active_stripe()
753 if (!test_bit(STRIPE_HANDLE, &sh->state)) in raid5_get_active_stripe()
755 BUG_ON(list_empty(&sh->lru) && in raid5_get_active_stripe()
756 !test_bit(STRIPE_EXPANDING, &sh->state)); in raid5_get_active_stripe()
760 list_del_init(&sh->lru); in raid5_get_active_stripe()
763 if (sh->group) { in raid5_get_active_stripe()
764 sh->group->stripes_cnt--; in raid5_get_active_stripe()
765 sh->group = NULL; in raid5_get_active_stripe()
768 atomic_inc(&sh->count); in raid5_get_active_stripe()
771 } while (sh == NULL); in raid5_get_active_stripe()
774 return sh; in raid5_get_active_stripe()
777 static bool is_full_stripe_write(struct stripe_head *sh) in is_full_stripe_write() argument
779 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); in is_full_stripe_write()
780 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); in is_full_stripe_write()
805 static bool stripe_can_batch(struct stripe_head *sh) in stripe_can_batch() argument
807 struct r5conf *conf = sh->raid_conf; in stripe_can_batch()
811 return test_bit(STRIPE_BATCH_READY, &sh->state) && in stripe_can_batch()
812 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && in stripe_can_batch()
813 is_full_stripe_write(sh); in stripe_can_batch()
817 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) in stripe_add_to_batch_list() argument
826 tmp_sec = sh->sector; in stripe_add_to_batch_list()
829 head_sector = sh->sector - RAID5_STRIPE_SECTORS(conf); in stripe_add_to_batch_list()
862 lock_two_stripes(head, sh); in stripe_add_to_batch_list()
864 if (!stripe_can_batch(head) || !stripe_can_batch(sh)) in stripe_add_to_batch_list()
867 if (sh->batch_head) in stripe_add_to_batch_list()
871 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_add_to_batch_list()
873 if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf || in stripe_add_to_batch_list()
874 bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) in stripe_add_to_batch_list()
891 sh->batch_head = head->batch_head; in stripe_add_to_batch_list()
897 list_add(&sh->batch_list, &head->batch_list); in stripe_add_to_batch_list()
901 sh->batch_head = head->batch_head; in stripe_add_to_batch_list()
903 list_add_tail(&sh->batch_list, &head->batch_list); in stripe_add_to_batch_list()
907 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in stripe_add_to_batch_list()
912 if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { in stripe_add_to_batch_list()
913 int seq = sh->bm_seq; in stripe_add_to_batch_list()
914 if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && in stripe_add_to_batch_list()
915 sh->batch_head->bm_seq > seq) in stripe_add_to_batch_list()
916 seq = sh->batch_head->bm_seq; in stripe_add_to_batch_list()
917 set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); in stripe_add_to_batch_list()
918 sh->batch_head->bm_seq = seq; in stripe_add_to_batch_list()
921 atomic_inc(&sh->count); in stripe_add_to_batch_list()
923 unlock_two_stripes(head, sh); in stripe_add_to_batch_list()
931 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) in use_new_offset() argument
941 if (sh->generation == conf->generation - 1) in use_new_offset()
1058 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) in ops_run_io() argument
1060 struct r5conf *conf = sh->raid_conf; in ops_run_io()
1061 int i, disks = sh->disks; in ops_run_io()
1062 struct stripe_head *head_sh = sh; in ops_run_io()
1068 if (log_stripe(sh, s) == 0) in ops_run_io()
1079 sh = head_sh; in ops_run_io()
1080 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { in ops_run_io()
1082 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) in ops_run_io()
1084 if (test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_io()
1086 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) in ops_run_io()
1089 &sh->dev[i].flags)) { in ops_run_io()
1094 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) in ops_run_io()
1098 bi = &sh->dev[i].req; in ops_run_io()
1099 rbi = &sh->dev[i].rreq; /* For writing to replacement */ in ops_run_io()
1139 int bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), in ops_run_io()
1173 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
1180 bi->bi_private = sh; in ops_run_io()
1183 __func__, (unsigned long long)sh->sector, in ops_run_io()
1185 atomic_inc(&sh->count); in ops_run_io()
1186 if (sh != head_sh) in ops_run_io()
1188 if (use_new_offset(conf, sh)) in ops_run_io()
1189 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1192 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1197 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) in ops_run_io()
1198 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in ops_run_io()
1201 test_bit(R5_InJournal, &sh->dev[i].flags)) in ops_run_io()
1207 sh->dev[i].vec.bv_page = sh->dev[i].orig_page; in ops_run_io()
1209 sh->dev[i].vec.bv_page = sh->dev[i].page; in ops_run_io()
1212 bi->bi_io_vec[0].bv_offset = sh->dev[i].offset; in ops_run_io()
1214 bi->bi_write_hint = sh->dev[i].write_hint; in ops_run_io()
1216 sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET; in ops_run_io()
1224 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); in ops_run_io()
1229 sh->dev[i].sector); in ops_run_io()
1240 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
1246 rbi->bi_private = sh; in ops_run_io()
1250 __func__, (unsigned long long)sh->sector, in ops_run_io()
1252 atomic_inc(&sh->count); in ops_run_io()
1253 if (sh != head_sh) in ops_run_io()
1255 if (use_new_offset(conf, sh)) in ops_run_io()
1256 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1259 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1261 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) in ops_run_io()
1262 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in ops_run_io()
1263 sh->dev[i].rvec.bv_page = sh->dev[i].page; in ops_run_io()
1266 rbi->bi_io_vec[0].bv_offset = sh->dev[i].offset; in ops_run_io()
1268 rbi->bi_write_hint = sh->dev[i].write_hint; in ops_run_io()
1269 sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET; in ops_run_io()
1279 sh->dev[i].sector); in ops_run_io()
1287 set_bit(STRIPE_DEGRADED, &sh->state); in ops_run_io()
1289 bi->bi_opf, i, (unsigned long long)sh->sector); in ops_run_io()
1290 clear_bit(R5_LOCKED, &sh->dev[i].flags); in ops_run_io()
1291 set_bit(STRIPE_HANDLE, &sh->state); in ops_run_io()
1296 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_io()
1298 if (sh != head_sh) in ops_run_io()
1309 struct stripe_head *sh, int no_skipcopy) in async_copy_data() argument
1317 struct r5conf *conf = sh->raid_conf; in async_copy_data()
1373 struct stripe_head *sh = stripe_head_ref; in ops_complete_biofill() local
1375 struct r5conf *conf = sh->raid_conf; in ops_complete_biofill()
1378 (unsigned long long)sh->sector); in ops_complete_biofill()
1381 for (i = sh->disks; i--; ) { in ops_complete_biofill()
1382 struct r5dev *dev = &sh->dev[i]; in ops_complete_biofill()
1403 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); in ops_complete_biofill()
1405 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_biofill()
1406 raid5_release_stripe(sh); in ops_complete_biofill()
1409 static void ops_run_biofill(struct stripe_head *sh) in ops_run_biofill() argument
1414 struct r5conf *conf = sh->raid_conf; in ops_run_biofill()
1416 BUG_ON(sh->batch_head); in ops_run_biofill()
1418 (unsigned long long)sh->sector); in ops_run_biofill()
1420 for (i = sh->disks; i--; ) { in ops_run_biofill()
1421 struct r5dev *dev = &sh->dev[i]; in ops_run_biofill()
1424 spin_lock_irq(&sh->stripe_lock); in ops_run_biofill()
1427 spin_unlock_irq(&sh->stripe_lock); in ops_run_biofill()
1432 dev->sector, tx, sh, 0); in ops_run_biofill()
1438 atomic_inc(&sh->count); in ops_run_biofill()
1439 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); in ops_run_biofill()
1443 static void mark_target_uptodate(struct stripe_head *sh, int target) in mark_target_uptodate() argument
1450 tgt = &sh->dev[target]; in mark_target_uptodate()
1458 struct stripe_head *sh = stripe_head_ref; in ops_complete_compute() local
1461 (unsigned long long)sh->sector); in ops_complete_compute()
1464 mark_target_uptodate(sh, sh->ops.target); in ops_complete_compute()
1465 mark_target_uptodate(sh, sh->ops.target2); in ops_complete_compute()
1467 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); in ops_complete_compute()
1468 if (sh->check_state == check_state_compute_run) in ops_complete_compute()
1469 sh->check_state = check_state_compute_result; in ops_complete_compute()
1470 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_compute()
1471 raid5_release_stripe(sh); in ops_complete_compute()
1481 static addr_conv_t *to_addr_conv(struct stripe_head *sh, in to_addr_conv() argument
1484 return (void *) (to_addr_page(percpu, i) + sh->disks + 2); in to_addr_conv()
1491 to_addr_offs(struct stripe_head *sh, struct raid5_percpu *percpu) in to_addr_offs() argument
1493 return (unsigned int *) (to_addr_conv(sh, percpu, 0) + sh->disks + 2); in to_addr_offs()
1497 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute5() argument
1499 int disks = sh->disks; in ops_run_compute5()
1501 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_compute5()
1502 int target = sh->ops.target; in ops_run_compute5()
1503 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute5()
1511 BUG_ON(sh->batch_head); in ops_run_compute5()
1514 __func__, (unsigned long long)sh->sector, target); in ops_run_compute5()
1519 off_srcs[count] = sh->dev[i].offset; in ops_run_compute5()
1520 xor_srcs[count++] = sh->dev[i].page; in ops_run_compute5()
1524 atomic_inc(&sh->count); in ops_run_compute5()
1527 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); in ops_run_compute5()
1530 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_compute5()
1533 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_compute5()
1539 * @srcs - (struct page *) array of size sh->disks
1541 * @sh - stripe_head to parse
1550 struct stripe_head *sh, in set_syndrome_sources() argument
1553 int disks = sh->disks; in set_syndrome_sources()
1554 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); in set_syndrome_sources()
1555 int d0_idx = raid6_d0(sh); in set_syndrome_sources()
1565 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in set_syndrome_sources()
1566 struct r5dev *dev = &sh->dev[i]; in set_syndrome_sources()
1568 if (i == sh->qd_idx || i == sh->pd_idx || in set_syndrome_sources()
1577 srcs[slot] = sh->dev[i].orig_page; in set_syndrome_sources()
1579 srcs[slot] = sh->dev[i].page; in set_syndrome_sources()
1585 offs[slot] = sh->dev[i].offset; in set_syndrome_sources()
1594 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_1() argument
1596 int disks = sh->disks; in ops_run_compute6_1()
1598 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_compute6_1()
1600 int qd_idx = sh->qd_idx; in ops_run_compute6_1()
1609 BUG_ON(sh->batch_head); in ops_run_compute6_1()
1610 if (sh->ops.target < 0) in ops_run_compute6_1()
1611 target = sh->ops.target2; in ops_run_compute6_1()
1612 else if (sh->ops.target2 < 0) in ops_run_compute6_1()
1613 target = sh->ops.target; in ops_run_compute6_1()
1619 __func__, (unsigned long long)sh->sector, target); in ops_run_compute6_1()
1621 tgt = &sh->dev[target]; in ops_run_compute6_1()
1626 atomic_inc(&sh->count); in ops_run_compute6_1()
1629 count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL); in ops_run_compute6_1()
1633 ops_complete_compute, sh, in ops_run_compute6_1()
1634 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1636 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_compute6_1()
1643 offs[count] = sh->dev[i].offset; in ops_run_compute6_1()
1644 blocks[count++] = sh->dev[i].page; in ops_run_compute6_1()
1648 NULL, ops_complete_compute, sh, in ops_run_compute6_1()
1649 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1651 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_compute6_1()
1658 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_2() argument
1660 int i, count, disks = sh->disks; in ops_run_compute6_2()
1661 int syndrome_disks = sh->ddf_layout ? disks : disks-2; in ops_run_compute6_2()
1662 int d0_idx = raid6_d0(sh); in ops_run_compute6_2()
1664 int target = sh->ops.target; in ops_run_compute6_2()
1665 int target2 = sh->ops.target2; in ops_run_compute6_2()
1666 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute6_2()
1667 struct r5dev *tgt2 = &sh->dev[target2]; in ops_run_compute6_2()
1670 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_compute6_2()
1673 BUG_ON(sh->batch_head); in ops_run_compute6_2()
1675 __func__, (unsigned long long)sh->sector, target, target2); in ops_run_compute6_2()
1690 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in ops_run_compute6_2()
1692 offs[slot] = sh->dev[i].offset; in ops_run_compute6_2()
1693 blocks[slot] = sh->dev[i].page; in ops_run_compute6_2()
1706 __func__, (unsigned long long)sh->sector, faila, failb); in ops_run_compute6_2()
1708 atomic_inc(&sh->count); in ops_run_compute6_2()
1715 ops_complete_compute, sh, in ops_run_compute6_2()
1716 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1718 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1724 int qd_idx = sh->qd_idx; in ops_run_compute6_2()
1736 offs[count] = sh->dev[i].offset; in ops_run_compute6_2()
1737 blocks[count++] = sh->dev[i].page; in ops_run_compute6_2()
1739 dest = sh->dev[data_target].page; in ops_run_compute6_2()
1740 dest_off = sh->dev[data_target].offset; in ops_run_compute6_2()
1744 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1746 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1749 count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL); in ops_run_compute6_2()
1751 ops_complete_compute, sh, in ops_run_compute6_2()
1752 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1754 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1759 ops_complete_compute, sh, in ops_run_compute6_2()
1760 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1764 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1770 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1779 struct stripe_head *sh = stripe_head_ref; in ops_complete_prexor() local
1782 (unsigned long long)sh->sector); in ops_complete_prexor()
1784 if (r5c_is_writeback(sh->raid_conf->log)) in ops_complete_prexor()
1789 r5c_release_extra_page(sh); in ops_complete_prexor()
1793 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor5() argument
1796 int disks = sh->disks; in ops_run_prexor5()
1798 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_prexor5()
1799 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_prexor5()
1803 unsigned int off_dest = off_srcs[count] = sh->dev[pd_idx].offset; in ops_run_prexor5()
1804 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_prexor5()
1806 BUG_ON(sh->batch_head); in ops_run_prexor5()
1808 (unsigned long long)sh->sector); in ops_run_prexor5()
1811 struct r5dev *dev = &sh->dev[i]; in ops_run_prexor5()
1827 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor5()
1829 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_prexor5()
1835 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor6() argument
1839 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_prexor6()
1844 (unsigned long long)sh->sector); in ops_run_prexor6()
1846 count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_WANT_DRAIN); in ops_run_prexor6()
1849 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor6()
1851 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_prexor6()
1857 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) in ops_run_biodrain() argument
1859 struct r5conf *conf = sh->raid_conf; in ops_run_biodrain()
1860 int disks = sh->disks; in ops_run_biodrain()
1862 struct stripe_head *head_sh = sh; in ops_run_biodrain()
1865 (unsigned long long)sh->sector); in ops_run_biodrain()
1871 sh = head_sh; in ops_run_biodrain()
1876 dev = &sh->dev[i]; in ops_run_biodrain()
1882 spin_lock_irq(&sh->stripe_lock); in ops_run_biodrain()
1885 sh->overwrite_disks = 0; in ops_run_biodrain()
1888 spin_unlock_irq(&sh->stripe_lock); in ops_run_biodrain()
1902 dev->sector, tx, sh, in ops_run_biodrain()
1915 sh = list_first_entry(&sh->batch_list, in ops_run_biodrain()
1918 if (sh == head_sh) in ops_run_biodrain()
1930 struct stripe_head *sh = stripe_head_ref; in ops_complete_reconstruct() local
1931 int disks = sh->disks; in ops_complete_reconstruct()
1932 int pd_idx = sh->pd_idx; in ops_complete_reconstruct()
1933 int qd_idx = sh->qd_idx; in ops_complete_reconstruct()
1938 (unsigned long long)sh->sector); in ops_complete_reconstruct()
1941 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); in ops_complete_reconstruct()
1942 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); in ops_complete_reconstruct()
1943 discard |= test_bit(R5_Discard, &sh->dev[i].flags); in ops_complete_reconstruct()
1947 struct r5dev *dev = &sh->dev[i]; in ops_complete_reconstruct()
1952 if (test_bit(STRIPE_EXPAND_READY, &sh->state)) in ops_complete_reconstruct()
1962 if (sh->reconstruct_state == reconstruct_state_drain_run) in ops_complete_reconstruct()
1963 sh->reconstruct_state = reconstruct_state_drain_result; in ops_complete_reconstruct()
1964 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) in ops_complete_reconstruct()
1965 sh->reconstruct_state = reconstruct_state_prexor_drain_result; in ops_complete_reconstruct()
1967 BUG_ON(sh->reconstruct_state != reconstruct_state_run); in ops_complete_reconstruct()
1968 sh->reconstruct_state = reconstruct_state_result; in ops_complete_reconstruct()
1971 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_reconstruct()
1972 raid5_release_stripe(sh); in ops_complete_reconstruct()
1976 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct5() argument
1979 int disks = sh->disks; in ops_run_reconstruct5()
1983 int count, pd_idx = sh->pd_idx, i; in ops_run_reconstruct5()
1989 struct stripe_head *head_sh = sh; in ops_run_reconstruct5()
1993 (unsigned long long)sh->sector); in ops_run_reconstruct5()
1995 for (i = 0; i < sh->disks; i++) { in ops_run_reconstruct5()
1998 if (!test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_reconstruct5()
2001 if (i >= sh->disks) { in ops_run_reconstruct5()
2002 atomic_inc(&sh->count); in ops_run_reconstruct5()
2003 set_bit(R5_Discard, &sh->dev[pd_idx].flags); in ops_run_reconstruct5()
2004 ops_complete_reconstruct(sh); in ops_run_reconstruct5()
2010 off_srcs = to_addr_offs(sh, percpu); in ops_run_reconstruct5()
2016 off_dest = off_srcs[count] = sh->dev[pd_idx].offset; in ops_run_reconstruct5()
2017 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_reconstruct5()
2019 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
2027 xor_dest = sh->dev[pd_idx].page; in ops_run_reconstruct5()
2028 off_dest = sh->dev[pd_idx].offset; in ops_run_reconstruct5()
2030 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
2044 list_first_entry(&sh->batch_list, in ops_run_reconstruct5()
2052 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
2056 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
2061 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_reconstruct5()
2064 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_reconstruct5()
2067 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_reconstruct5()
2074 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct6() argument
2081 struct stripe_head *head_sh = sh; in ops_run_reconstruct6()
2086 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); in ops_run_reconstruct6()
2088 for (i = 0; i < sh->disks; i++) { in ops_run_reconstruct6()
2089 if (sh->pd_idx == i || sh->qd_idx == i) in ops_run_reconstruct6()
2091 if (!test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_reconstruct6()
2094 if (i >= sh->disks) { in ops_run_reconstruct6()
2095 atomic_inc(&sh->count); in ops_run_reconstruct6()
2096 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in ops_run_reconstruct6()
2097 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); in ops_run_reconstruct6()
2098 ops_complete_reconstruct(sh); in ops_run_reconstruct6()
2104 offs = to_addr_offs(sh, percpu); in ops_run_reconstruct6()
2106 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { in ops_run_reconstruct6()
2114 count = set_syndrome_sources(blocks, offs, sh, synflags); in ops_run_reconstruct6()
2116 list_first_entry(&sh->batch_list, in ops_run_reconstruct6()
2122 head_sh, to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
2125 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
2127 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_reconstruct6()
2130 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_reconstruct6()
2138 struct stripe_head *sh = stripe_head_ref; in ops_complete_check() local
2141 (unsigned long long)sh->sector); in ops_complete_check()
2143 sh->check_state = check_state_check_result; in ops_complete_check()
2144 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_check()
2145 raid5_release_stripe(sh); in ops_complete_check()
2148 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_check_p() argument
2150 int disks = sh->disks; in ops_run_check_p()
2151 int pd_idx = sh->pd_idx; in ops_run_check_p()
2152 int qd_idx = sh->qd_idx; in ops_run_check_p()
2156 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_check_p()
2163 (unsigned long long)sh->sector); in ops_run_check_p()
2165 BUG_ON(sh->batch_head); in ops_run_check_p()
2167 xor_dest = sh->dev[pd_idx].page; in ops_run_check_p()
2168 off_dest = sh->dev[pd_idx].offset; in ops_run_check_p()
2174 off_srcs[count] = sh->dev[i].offset; in ops_run_check_p()
2175 xor_srcs[count++] = sh->dev[i].page; in ops_run_check_p()
2179 to_addr_conv(sh, percpu, 0)); in ops_run_check_p()
2181 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_check_p()
2182 &sh->ops.zero_sum_result, &submit); in ops_run_check_p()
2184 atomic_inc(&sh->count); in ops_run_check_p()
2185 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); in ops_run_check_p()
2189 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) in ops_run_check_pq() argument
2192 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_check_pq()
2197 (unsigned long long)sh->sector, checkp); in ops_run_check_pq()
2199 BUG_ON(sh->batch_head); in ops_run_check_pq()
2200 count = set_syndrome_sources(srcs, offs, sh, SYNDROME_SRC_ALL); in ops_run_check_pq()
2204 atomic_inc(&sh->count); in ops_run_check_pq()
2206 sh, to_addr_conv(sh, percpu, 0)); in ops_run_check_pq()
2208 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_check_pq()
2209 &sh->ops.zero_sum_result, percpu->spare_page, 0, &submit); in ops_run_check_pq()
2212 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) in raid_run_ops() argument
2214 int overlap_clear = 0, i, disks = sh->disks; in raid_run_ops()
2216 struct r5conf *conf = sh->raid_conf; in raid_run_ops()
2224 ops_run_biofill(sh); in raid_run_ops()
2230 tx = ops_run_compute5(sh, percpu); in raid_run_ops()
2232 if (sh->ops.target2 < 0 || sh->ops.target < 0) in raid_run_ops()
2233 tx = ops_run_compute6_1(sh, percpu); in raid_run_ops()
2235 tx = ops_run_compute6_2(sh, percpu); in raid_run_ops()
2244 tx = ops_run_prexor5(sh, percpu, tx); in raid_run_ops()
2246 tx = ops_run_prexor6(sh, percpu, tx); in raid_run_ops()
2250 tx = ops_run_partial_parity(sh, percpu, tx); in raid_run_ops()
2253 tx = ops_run_biodrain(sh, tx); in raid_run_ops()
2259 ops_run_reconstruct5(sh, percpu, tx); in raid_run_ops()
2261 ops_run_reconstruct6(sh, percpu, tx); in raid_run_ops()
2265 if (sh->check_state == check_state_run) in raid_run_ops()
2266 ops_run_check_p(sh, percpu); in raid_run_ops()
2267 else if (sh->check_state == check_state_run_q) in raid_run_ops()
2268 ops_run_check_pq(sh, percpu, 0); in raid_run_ops()
2269 else if (sh->check_state == check_state_run_pq) in raid_run_ops()
2270 ops_run_check_pq(sh, percpu, 1); in raid_run_ops()
2275 if (overlap_clear && !sh->batch_head) in raid_run_ops()
2277 struct r5dev *dev = &sh->dev[i]; in raid_run_ops()
2279 wake_up(&sh->raid_conf->wait_for_overlap); in raid_run_ops()
2284 static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) in free_stripe() argument
2287 kfree(sh->pages); in free_stripe()
2289 if (sh->ppl_page) in free_stripe()
2290 __free_page(sh->ppl_page); in free_stripe()
2291 kmem_cache_free(sc, sh); in free_stripe()
2297 struct stripe_head *sh; in alloc_stripe() local
2300 sh = kmem_cache_zalloc(sc, gfp); in alloc_stripe()
2301 if (sh) { in alloc_stripe()
2302 spin_lock_init(&sh->stripe_lock); in alloc_stripe()
2303 spin_lock_init(&sh->batch_lock); in alloc_stripe()
2304 INIT_LIST_HEAD(&sh->batch_list); in alloc_stripe()
2305 INIT_LIST_HEAD(&sh->lru); in alloc_stripe()
2306 INIT_LIST_HEAD(&sh->r5c); in alloc_stripe()
2307 INIT_LIST_HEAD(&sh->log_list); in alloc_stripe()
2308 atomic_set(&sh->count, 1); in alloc_stripe()
2309 sh->raid_conf = conf; in alloc_stripe()
2310 sh->log_start = MaxSector; in alloc_stripe()
2312 struct r5dev *dev = &sh->dev[i]; in alloc_stripe()
2319 sh->ppl_page = alloc_page(gfp); in alloc_stripe()
2320 if (!sh->ppl_page) { in alloc_stripe()
2321 free_stripe(sc, sh); in alloc_stripe()
2326 if (init_stripe_shared_pages(sh, conf, disks)) { in alloc_stripe()
2327 free_stripe(sc, sh); in alloc_stripe()
2332 return sh; in alloc_stripe()
2336 struct stripe_head *sh; in grow_one_stripe() local
2338 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf); in grow_one_stripe()
2339 if (!sh) in grow_one_stripe()
2342 if (grow_buffers(sh, gfp)) { in grow_one_stripe()
2343 shrink_buffers(sh); in grow_one_stripe()
2344 free_stripe(conf->slab_cache, sh); in grow_one_stripe()
2347 sh->hash_lock_index = in grow_one_stripe()
2352 raid5_release_stripe(sh); in grow_one_stripe()
2638 struct stripe_head *sh; in drop_one_stripe() local
2642 sh = get_free_stripe(conf, hash); in drop_one_stripe()
2644 if (!sh) in drop_one_stripe()
2646 BUG_ON(atomic_read(&sh->count)); in drop_one_stripe()
2647 shrink_buffers(sh); in drop_one_stripe()
2648 free_stripe(conf->slab_cache, sh); in drop_one_stripe()
2666 struct stripe_head *sh = bi->bi_private; in raid5_end_read_request() local
2667 struct r5conf *conf = sh->raid_conf; in raid5_end_read_request()
2668 int disks = sh->disks, i; in raid5_end_read_request()
2674 if (bi == &sh->dev[i].req) in raid5_end_read_request()
2678 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_read_request()
2685 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
2695 if (use_new_offset(conf, sh)) in raid5_end_read_request()
2696 s = sh->sector + rdev->new_data_offset; in raid5_end_read_request()
2698 s = sh->sector + rdev->data_offset; in raid5_end_read_request()
2700 set_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
2701 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in raid5_end_read_request()
2712 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2713 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
2714 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) in raid5_end_read_request()
2715 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2717 if (test_bit(R5_InJournal, &sh->dev[i].flags)) in raid5_end_read_request()
2722 set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); in raid5_end_read_request()
2731 clear_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
2734 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
2747 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { in raid5_end_read_request()
2768 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) in raid5_end_read_request()
2771 if (sh->qd_idx >= 0 && sh->pd_idx == i) in raid5_end_read_request()
2772 set_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2773 else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { in raid5_end_read_request()
2774 set_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2775 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2777 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2779 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2780 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
2784 rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 0))) in raid5_end_read_request()
2790 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_read_request()
2791 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_read_request()
2792 raid5_release_stripe(sh); in raid5_end_read_request()
2797 struct stripe_head *sh = bi->bi_private; in raid5_end_write_request() local
2798 struct r5conf *conf = sh->raid_conf; in raid5_end_write_request()
2799 int disks = sh->disks, i; in raid5_end_write_request()
2806 if (bi == &sh->dev[i].req) { in raid5_end_write_request()
2810 if (bi == &sh->dev[i].rreq) { in raid5_end_write_request()
2824 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_write_request()
2835 else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
2838 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); in raid5_end_write_request()
2841 set_bit(STRIPE_DEGRADED, &sh->state); in raid5_end_write_request()
2843 set_bit(R5_WriteError, &sh->dev[i].flags); in raid5_end_write_request()
2847 } else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
2850 set_bit(R5_MadeGood, &sh->dev[i].flags); in raid5_end_write_request()
2851 if (test_bit(R5_ReadError, &sh->dev[i].flags)) in raid5_end_write_request()
2856 set_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_write_request()
2861 if (sh->batch_head && bi->bi_status && !replacement) in raid5_end_write_request()
2862 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); in raid5_end_write_request()
2865 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) in raid5_end_write_request()
2866 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_write_request()
2867 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_write_request()
2869 if (sh->batch_head && sh != sh->batch_head) in raid5_end_write_request()
2870 raid5_release_stripe(sh->batch_head); in raid5_end_write_request()
2871 raid5_release_stripe(sh); in raid5_end_write_request()
2915 struct stripe_head *sh) in raid5_compute_sector() argument
3103 if (sh) { in raid5_compute_sector()
3104 sh->pd_idx = pd_idx; in raid5_compute_sector()
3105 sh->qd_idx = qd_idx; in raid5_compute_sector()
3106 sh->ddf_layout = ddf_layout; in raid5_compute_sector()
3115 sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) in raid5_compute_blocknr() argument
3117 struct r5conf *conf = sh->raid_conf; in raid5_compute_blocknr()
3118 int raid_disks = sh->disks; in raid5_compute_blocknr()
3120 sector_t new_sector = sh->sector, check; in raid5_compute_blocknr()
3135 if (i == sh->pd_idx) in raid5_compute_blocknr()
3143 if (i > sh->pd_idx) in raid5_compute_blocknr()
3148 if (i < sh->pd_idx) in raid5_compute_blocknr()
3150 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3162 if (i == sh->qd_idx) in raid5_compute_blocknr()
3169 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
3171 else if (i > sh->pd_idx) in raid5_compute_blocknr()
3176 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
3180 if (i < sh->pd_idx) in raid5_compute_blocknr()
3182 i -= (sh->pd_idx + 2); in raid5_compute_blocknr()
3192 if (sh->pd_idx == 0) in raid5_compute_blocknr()
3196 if (i < sh->pd_idx) in raid5_compute_blocknr()
3198 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3203 if (i > sh->pd_idx) in raid5_compute_blocknr()
3208 if (i < sh->pd_idx) in raid5_compute_blocknr()
3210 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3226 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in raid5_compute_blocknr()
3227 || sh2.qd_idx != sh->qd_idx) { in raid5_compute_blocknr()
3292 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, in schedule_reconstruction() argument
3295 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; in schedule_reconstruction()
3296 struct r5conf *conf = sh->raid_conf; in schedule_reconstruction()
3306 r5c_release_extra_page(sh); in schedule_reconstruction()
3309 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
3330 sh->reconstruct_state = reconstruct_state_drain_run; in schedule_reconstruction()
3333 sh->reconstruct_state = reconstruct_state_run; in schedule_reconstruction()
3338 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) in schedule_reconstruction()
3341 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || in schedule_reconstruction()
3342 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); in schedule_reconstruction()
3344 (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || in schedule_reconstruction()
3345 test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); in schedule_reconstruction()
3348 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
3367 sh->reconstruct_state = reconstruct_state_prexor_drain_run; in schedule_reconstruction()
3376 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); in schedule_reconstruction()
3377 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in schedule_reconstruction()
3381 int qd_idx = sh->qd_idx; in schedule_reconstruction()
3382 struct r5dev *dev = &sh->dev[qd_idx]; in schedule_reconstruction()
3389 if (raid5_has_ppl(sh->raid_conf) && sh->ppl_page && in schedule_reconstruction()
3391 !test_bit(STRIPE_FULL_WRITE, &sh->state) && in schedule_reconstruction()
3392 test_bit(R5_Insync, &sh->dev[pd_idx].flags)) in schedule_reconstruction()
3396 __func__, (unsigned long long)sh->sector, in schedule_reconstruction()
3405 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in add_stripe_bio() argument
3409 struct r5conf *conf = sh->raid_conf; in add_stripe_bio()
3414 (unsigned long long)sh->sector); in add_stripe_bio()
3416 spin_lock_irq(&sh->stripe_lock); in add_stripe_bio()
3417 sh->dev[dd_idx].write_hint = bi->bi_write_hint; in add_stripe_bio()
3419 if (sh->batch_head) in add_stripe_bio()
3422 bip = &sh->dev[dd_idx].towrite; in add_stripe_bio()
3426 bip = &sh->dev[dd_idx].toread; in add_stripe_bio()
3449 for (i = 0; i < sh->disks; i++) { in add_stripe_bio()
3450 if (i != sh->pd_idx && in add_stripe_bio()
3451 (i == dd_idx || sh->dev[i].towrite)) { in add_stripe_bio()
3452 sector = sh->dev[i].sector; in add_stripe_bio()
3466 clear_bit(STRIPE_BATCH_READY, &sh->state); in add_stripe_bio()
3477 sector_t sector = sh->dev[dd_idx].sector; in add_stripe_bio()
3478 for (bi=sh->dev[dd_idx].towrite; in add_stripe_bio()
3479 sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) && in add_stripe_bio()
3481 bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) { in add_stripe_bio()
3485 if (sector >= sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf)) in add_stripe_bio()
3486 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) in add_stripe_bio()
3487 sh->overwrite_disks++; in add_stripe_bio()
3492 (unsigned long long)sh->sector, dd_idx); in add_stripe_bio()
3507 set_bit(STRIPE_BITMAP_PENDING, &sh->state); in add_stripe_bio()
3508 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3509 md_bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
3511 spin_lock_irq(&sh->stripe_lock); in add_stripe_bio()
3512 clear_bit(STRIPE_BITMAP_PENDING, &sh->state); in add_stripe_bio()
3513 if (!sh->batch_head) { in add_stripe_bio()
3514 sh->bm_seq = conf->seq_flush+1; in add_stripe_bio()
3515 set_bit(STRIPE_BIT_DELAY, &sh->state); in add_stripe_bio()
3518 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3520 if (stripe_can_batch(sh)) in add_stripe_bio()
3521 stripe_add_to_batch_list(conf, sh); in add_stripe_bio()
3525 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); in add_stripe_bio()
3526 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3533 struct stripe_head *sh) in stripe_set_idx() argument
3545 &dd_idx, sh); in stripe_set_idx()
3549 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, in handle_failed_stripe() argument
3553 BUG_ON(sh->batch_head); in handle_failed_stripe()
3558 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in handle_failed_stripe()
3571 sh->sector, in handle_failed_stripe()
3577 spin_lock_irq(&sh->stripe_lock); in handle_failed_stripe()
3579 bi = sh->dev[i].towrite; in handle_failed_stripe()
3580 sh->dev[i].towrite = NULL; in handle_failed_stripe()
3581 sh->overwrite_disks = 0; in handle_failed_stripe()
3582 spin_unlock_irq(&sh->stripe_lock); in handle_failed_stripe()
3586 log_stripe_write_finished(sh); in handle_failed_stripe()
3588 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
3592 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3593 struct bio *nextbi = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3600 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3604 bi = sh->dev[i].written; in handle_failed_stripe()
3605 sh->dev[i].written = NULL; in handle_failed_stripe()
3606 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { in handle_failed_stripe()
3607 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in handle_failed_stripe()
3608 sh->dev[i].page = sh->dev[i].orig_page; in handle_failed_stripe()
3613 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3614 struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3624 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && in handle_failed_stripe()
3626 (!test_bit(R5_Insync, &sh->dev[i].flags) || in handle_failed_stripe()
3627 test_bit(R5_ReadError, &sh->dev[i].flags))) { in handle_failed_stripe()
3628 spin_lock_irq(&sh->stripe_lock); in handle_failed_stripe()
3629 bi = sh->dev[i].toread; in handle_failed_stripe()
3630 sh->dev[i].toread = NULL; in handle_failed_stripe()
3631 spin_unlock_irq(&sh->stripe_lock); in handle_failed_stripe()
3632 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
3637 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3639 r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3646 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3651 clear_bit(R5_LOCKED, &sh->dev[i].flags); in handle_failed_stripe()
3656 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_failed_stripe()
3662 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, in handle_failed_sync() argument
3668 BUG_ON(sh->batch_head); in handle_failed_sync()
3669 clear_bit(STRIPE_SYNCING, &sh->state); in handle_failed_sync()
3670 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_failed_sync()
3691 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3698 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3710 static int want_replace(struct stripe_head *sh, int disk_idx) in want_replace() argument
3716 rdev = rcu_dereference(sh->raid_conf->disks[disk_idx].replacement); in want_replace()
3720 && (rdev->recovery_offset <= sh->sector in want_replace()
3721 || rdev->mddev->recovery_cp <= sh->sector)) in want_replace()
3727 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, in need_this_block() argument
3730 struct r5dev *dev = &sh->dev[disk_idx]; in need_this_block()
3731 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], in need_this_block()
3732 &sh->dev[s->failed_num[1]] }; in need_this_block()
3734 bool force_rcw = (sh->raid_conf->rmw_level == PARITY_DISABLE_RMW); in need_this_block()
3750 (s->replacing && want_replace(sh, disk_idx))) in need_this_block()
3775 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in need_this_block()
3796 s->failed_num[i] == sh->pd_idx || in need_this_block()
3797 s->failed_num[i] == sh->qd_idx) && in need_this_block()
3814 sh->sector < sh->raid_conf->mddev->recovery_cp) in need_this_block()
3818 if (s->failed_num[i] != sh->pd_idx && in need_this_block()
3819 s->failed_num[i] != sh->qd_idx && in need_this_block()
3834 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, in fetch_block() argument
3837 struct r5dev *dev = &sh->dev[disk_idx]; in fetch_block()
3840 if (need_this_block(sh, s, disk_idx, disks)) { in fetch_block()
3846 BUG_ON(sh->batch_head); in fetch_block()
3858 ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) || in fetch_block()
3865 (unsigned long long)sh->sector, disk_idx); in fetch_block()
3866 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
3869 sh->ops.target = disk_idx; in fetch_block()
3870 sh->ops.target2 = -1; /* no 2nd target */ in fetch_block()
3889 &sh->dev[other].flags)) in fetch_block()
3894 (unsigned long long)sh->sector, in fetch_block()
3896 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
3898 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); in fetch_block()
3899 set_bit(R5_Wantcompute, &sh->dev[other].flags); in fetch_block()
3900 sh->ops.target = disk_idx; in fetch_block()
3901 sh->ops.target2 = other; in fetch_block()
3920 static void handle_stripe_fill(struct stripe_head *sh, in handle_stripe_fill() argument
3930 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && in handle_stripe_fill()
3931 !sh->reconstruct_state) { in handle_stripe_fill()
3941 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) in handle_stripe_fill()
3942 r5c_make_stripe_write_out(sh); in handle_stripe_fill()
3947 if (fetch_block(sh, s, i, disks)) in handle_stripe_fill()
3951 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_fill()
3962 struct stripe_head *sh, int disks) in handle_stripe_clean_event() argument
3967 struct stripe_head *head_sh = sh; in handle_stripe_clean_event()
3971 if (sh->dev[i].written) { in handle_stripe_clean_event()
3972 dev = &sh->dev[i]; in handle_stripe_clean_event()
3998 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
4000 !test_bit(STRIPE_DEGRADED, &sh->state), in handle_stripe_clean_event()
4003 sh = list_first_entry(&sh->batch_list, in handle_stripe_clean_event()
4006 if (sh != head_sh) { in handle_stripe_clean_event()
4007 dev = &sh->dev[i]; in handle_stripe_clean_event()
4011 sh = head_sh; in handle_stripe_clean_event()
4012 dev = &sh->dev[i]; in handle_stripe_clean_event()
4017 log_stripe_write_finished(sh); in handle_stripe_clean_event()
4020 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_clean_event()
4022 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
4023 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
4024 if (sh->qd_idx >= 0) { in handle_stripe_clean_event()
4025 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); in handle_stripe_clean_event()
4026 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); in handle_stripe_clean_event()
4029 clear_bit(STRIPE_DISCARD, &sh->state); in handle_stripe_clean_event()
4036 hash = sh->hash_lock_index; in handle_stripe_clean_event()
4038 remove_hash(sh); in handle_stripe_clean_event()
4041 sh = list_first_entry(&sh->batch_list, in handle_stripe_clean_event()
4043 if (sh != head_sh) in handle_stripe_clean_event()
4046 sh = head_sh; in handle_stripe_clean_event()
4048 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) in handle_stripe_clean_event()
4049 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_clean_event()
4053 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_stripe_clean_event()
4077 struct stripe_head *sh, in handle_stripe_dirtying() argument
4092 (recovery_cp < MaxSector && sh->sector >= recovery_cp && in handle_stripe_dirtying()
4098 pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n", in handle_stripe_dirtying()
4100 (unsigned long long)sh->sector); in handle_stripe_dirtying()
4103 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
4105 i == sh->pd_idx || i == sh->qd_idx || in handle_stripe_dirtying()
4117 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
4129 (unsigned long long)sh->sector, sh->state, rmw, rcw); in handle_stripe_dirtying()
4130 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
4136 (unsigned long long)sh->sector, rmw); in handle_stripe_dirtying()
4138 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
4141 !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_dirtying()
4156 r5c_use_extra_page(sh); in handle_stripe_dirtying()
4161 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4168 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
4170 i == sh->pd_idx || i == sh->qd_idx || in handle_stripe_dirtying()
4177 &sh->state)) { in handle_stripe_dirtying()
4184 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4193 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
4195 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
4202 &sh->state)) { in handle_stripe_dirtying()
4210 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4215 (unsigned long long)sh->sector, in handle_stripe_dirtying()
4216 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); in handle_stripe_dirtying()
4220 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in handle_stripe_dirtying()
4221 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4233 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && in handle_stripe_dirtying()
4235 !test_bit(STRIPE_BIT_DELAY, &sh->state))) in handle_stripe_dirtying()
4236 schedule_reconstruction(sh, s, rcw == 0, 0); in handle_stripe_dirtying()
4240 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks5() argument
4245 BUG_ON(sh->batch_head); in handle_parity_checks5()
4246 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks5()
4248 switch (sh->check_state) { in handle_parity_checks5()
4253 sh->check_state = check_state_run; in handle_parity_checks5()
4255 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
4259 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks5()
4262 sh->check_state = check_state_idle; in handle_parity_checks5()
4264 dev = &sh->dev[sh->pd_idx]; in handle_parity_checks5()
4267 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks5()
4278 clear_bit(STRIPE_DEGRADED, &sh->state); in handle_parity_checks5()
4279 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4284 sh->check_state = check_state_idle; in handle_parity_checks5()
4296 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) in handle_parity_checks5()
4300 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4305 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4308 (unsigned long long) sh->sector, in handle_parity_checks5()
4309 (unsigned long long) sh->sector + in handle_parity_checks5()
4312 sh->check_state = check_state_compute_run; in handle_parity_checks5()
4313 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks5()
4316 &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
4317 sh->ops.target = sh->pd_idx; in handle_parity_checks5()
4318 sh->ops.target2 = -1; in handle_parity_checks5()
4327 __func__, sh->check_state, in handle_parity_checks5()
4328 (unsigned long long) sh->sector); in handle_parity_checks5()
4333 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks6() argument
4337 int pd_idx = sh->pd_idx; in handle_parity_checks6()
4338 int qd_idx = sh->qd_idx; in handle_parity_checks6()
4341 BUG_ON(sh->batch_head); in handle_parity_checks6()
4342 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks6()
4352 switch (sh->check_state) { in handle_parity_checks6()
4360 sh->check_state = check_state_run; in handle_parity_checks6()
4366 if (sh->check_state == check_state_run) in handle_parity_checks6()
4367 sh->check_state = check_state_run_pq; in handle_parity_checks6()
4369 sh->check_state = check_state_run_q; in handle_parity_checks6()
4373 sh->ops.zero_sum_result = 0; in handle_parity_checks6()
4375 if (sh->check_state == check_state_run) { in handle_parity_checks6()
4377 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in handle_parity_checks6()
4380 if (sh->check_state >= check_state_run && in handle_parity_checks6()
4381 sh->check_state <= check_state_run_pq) { in handle_parity_checks6()
4393 sh->check_state = check_state_idle; in handle_parity_checks6()
4396 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks6()
4404 dev = &sh->dev[s->failed_num[1]]; in handle_parity_checks6()
4410 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks6()
4415 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
4416 dev = &sh->dev[pd_idx]; in handle_parity_checks6()
4421 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
4422 dev = &sh->dev[qd_idx]; in handle_parity_checks6()
4430 dev - (struct r5dev *) &sh->dev)) { in handle_parity_checks6()
4435 clear_bit(STRIPE_DEGRADED, &sh->state); in handle_parity_checks6()
4437 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4444 sh->check_state = check_state_idle; in handle_parity_checks6()
4450 if (sh->ops.zero_sum_result == 0) { in handle_parity_checks6()
4453 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4459 sh->check_state = check_state_compute_result; in handle_parity_checks6()
4470 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4473 (unsigned long long) sh->sector, in handle_parity_checks6()
4474 (unsigned long long) sh->sector + in handle_parity_checks6()
4477 int *target = &sh->ops.target; in handle_parity_checks6()
4479 sh->ops.target = -1; in handle_parity_checks6()
4480 sh->ops.target2 = -1; in handle_parity_checks6()
4481 sh->check_state = check_state_compute_run; in handle_parity_checks6()
4482 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks6()
4484 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
4486 &sh->dev[pd_idx].flags); in handle_parity_checks6()
4488 target = &sh->ops.target2; in handle_parity_checks6()
4491 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
4493 &sh->dev[qd_idx].flags); in handle_parity_checks6()
4504 __func__, sh->check_state, in handle_parity_checks6()
4505 (unsigned long long) sh->sector); in handle_parity_checks6()
4510 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) in handle_stripe_expansion() argument
4518 BUG_ON(sh->batch_head); in handle_stripe_expansion()
4519 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); in handle_stripe_expansion()
4520 for (i = 0; i < sh->disks; i++) in handle_stripe_expansion()
4521 if (i != sh->pd_idx && i != sh->qd_idx) { in handle_stripe_expansion()
4526 sector_t bn = raid5_compute_blocknr(sh, i, 1); in handle_stripe_expansion()
4546 sh->dev[i].page, sh2->dev[dd_idx].offset, in handle_stripe_expansion()
4547 sh->dev[i].offset, RAID5_STRIPE_SIZE(conf), in handle_stripe_expansion()
4582 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) in analyse_stripe() argument
4584 struct r5conf *conf = sh->raid_conf; in analyse_stripe()
4585 int disks = sh->disks; in analyse_stripe()
4592 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; in analyse_stripe()
4593 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; in analyse_stripe()
4606 dev = &sh->dev[i]; in analyse_stripe()
4617 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) in analyse_stripe()
4646 rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) && in analyse_stripe()
4647 !is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), in analyse_stripe()
4661 is_bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), in analyse_stripe()
4688 else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <= rdev->recovery_offset) in analyse_stripe()
4758 if (test_bit(STRIPE_SYNCING, &sh->state)) { in analyse_stripe()
4768 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4781 static int clear_batch_ready(struct stripe_head *sh) in clear_batch_ready() argument
4784 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) in clear_batch_ready()
4785 return (sh->batch_head && sh->batch_head != sh); in clear_batch_ready()
4786 spin_lock(&sh->stripe_lock); in clear_batch_ready()
4787 if (!sh->batch_head) { in clear_batch_ready()
4788 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4796 if (sh->batch_head != sh) { in clear_batch_ready()
4797 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4800 spin_lock(&sh->batch_lock); in clear_batch_ready()
4801 list_for_each_entry(tmp, &sh->batch_list, batch_list) in clear_batch_ready()
4803 spin_unlock(&sh->batch_lock); in clear_batch_ready()
4804 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4816 struct stripe_head *sh, *next; in break_stripe_batch_list() local
4820 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { in break_stripe_batch_list()
4822 list_del_init(&sh->batch_list); in break_stripe_batch_list()
4824 WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | in break_stripe_batch_list()
4836 "stripe state: %lx\n", sh->state); in break_stripe_batch_list()
4841 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | in break_stripe_batch_list()
4847 sh->check_state = head_sh->check_state; in break_stripe_batch_list()
4848 sh->reconstruct_state = head_sh->reconstruct_state; in break_stripe_batch_list()
4849 spin_lock_irq(&sh->stripe_lock); in break_stripe_batch_list()
4850 sh->batch_head = NULL; in break_stripe_batch_list()
4851 spin_unlock_irq(&sh->stripe_lock); in break_stripe_batch_list()
4852 for (i = 0; i < sh->disks; i++) { in break_stripe_batch_list()
4853 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in break_stripe_batch_list()
4855 sh->dev[i].flags = head_sh->dev[i].flags & in break_stripe_batch_list()
4859 sh->state & handle_flags) in break_stripe_batch_list()
4860 set_bit(STRIPE_HANDLE, &sh->state); in break_stripe_batch_list()
4861 raid5_release_stripe(sh); in break_stripe_batch_list()
4876 static void handle_stripe(struct stripe_head *sh) in handle_stripe() argument
4879 struct r5conf *conf = sh->raid_conf; in handle_stripe()
4882 int disks = sh->disks; in handle_stripe()
4885 clear_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4893 if (clear_batch_ready(sh)) in handle_stripe()
4896 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { in handle_stripe()
4899 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4903 if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) in handle_stripe()
4904 break_stripe_batch_list(sh, 0); in handle_stripe()
4906 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { in handle_stripe()
4907 spin_lock(&sh->stripe_lock); in handle_stripe()
4912 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && in handle_stripe()
4913 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) && in handle_stripe()
4914 !test_bit(STRIPE_DISCARD, &sh->state) && in handle_stripe()
4915 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { in handle_stripe()
4916 set_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
4917 clear_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
4918 clear_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
4920 spin_unlock(&sh->stripe_lock); in handle_stripe()
4922 clear_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
4926 (unsigned long long)sh->sector, sh->state, in handle_stripe()
4927 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, in handle_stripe()
4928 sh->check_state, sh->reconstruct_state); in handle_stripe()
4930 analyse_stripe(sh, &s); in handle_stripe()
4932 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) in handle_stripe()
4937 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4944 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4952 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { in handle_stripe()
4954 set_bit(STRIPE_BIOFILL_RUN, &sh->state); in handle_stripe()
4970 sh->check_state = 0; in handle_stripe()
4971 sh->reconstruct_state = 0; in handle_stripe()
4972 break_stripe_batch_list(sh, 0); in handle_stripe()
4974 handle_failed_stripe(conf, sh, &s, disks); in handle_stripe()
4976 handle_failed_sync(conf, sh, &s); in handle_stripe()
4983 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) in handle_stripe()
4985 if (sh->reconstruct_state == reconstruct_state_drain_result || in handle_stripe()
4986 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { in handle_stripe()
4987 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
4992 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && in handle_stripe()
4993 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); in handle_stripe()
4994 BUG_ON(sh->qd_idx >= 0 && in handle_stripe()
4995 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && in handle_stripe()
4996 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); in handle_stripe()
4998 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
5000 (i == sh->pd_idx || i == sh->qd_idx || in handle_stripe()
5010 ((i == sh->pd_idx || i == sh->qd_idx) && in handle_stripe()
5012 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
5015 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in handle_stripe()
5023 pdev = &sh->dev[sh->pd_idx]; in handle_stripe()
5024 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) in handle_stripe()
5025 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); in handle_stripe()
5026 qdev = &sh->dev[sh->qd_idx]; in handle_stripe()
5027 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) in handle_stripe()
5028 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) in handle_stripe()
5040 handle_stripe_clean_event(conf, sh, disks); in handle_stripe()
5043 r5c_handle_cached_data_endio(conf, sh, disks); in handle_stripe()
5044 log_stripe_write_finished(sh); in handle_stripe()
5055 handle_stripe_fill(sh, &s, disks); in handle_stripe()
5062 r5c_finish_stripe_write_out(conf, sh, &s); in handle_stripe()
5073 if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) { in handle_stripe()
5076 handle_stripe_dirtying(conf, sh, &s, disks); in handle_stripe()
5082 ret = r5c_try_caching_write(conf, sh, &s, in handle_stripe()
5093 (!test_bit(STRIPE_R5C_CACHING, &sh->state) && in handle_stripe()
5095 ret = handle_stripe_dirtying(conf, sh, &s, in handle_stripe()
5108 if (sh->check_state || in handle_stripe()
5110 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
5111 !test_bit(STRIPE_INSYNC, &sh->state))) { in handle_stripe()
5113 handle_parity_checks6(conf, sh, &s, disks); in handle_stripe()
5115 handle_parity_checks5(conf, sh, &s, disks); in handle_stripe()
5119 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) in handle_stripe()
5120 && !test_bit(STRIPE_REPLACED, &sh->state)) { in handle_stripe()
5123 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { in handle_stripe()
5124 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); in handle_stripe()
5125 set_bit(R5_WantReplace, &sh->dev[i].flags); in handle_stripe()
5126 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
5130 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
5131 set_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
5134 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
5135 test_bit(STRIPE_INSYNC, &sh->state)) { in handle_stripe()
5137 clear_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
5138 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_stripe()
5147 struct r5dev *dev = &sh->dev[s.failed_num[i]]; in handle_stripe()
5164 if (sh->reconstruct_state == reconstruct_state_result) { in handle_stripe()
5166 = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); in handle_stripe()
5168 /* sh cannot be written until sh_src has been read. in handle_stripe()
5169 * so arrange for sh to be delayed a little in handle_stripe()
5171 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
5172 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
5182 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
5183 clear_bit(STRIPE_EXPANDING, &sh->state); in handle_stripe()
5185 set_bit(R5_Wantwrite, &sh->dev[i].flags); in handle_stripe()
5186 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
5191 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && in handle_stripe()
5192 !sh->reconstruct_state) { in handle_stripe()
5194 sh->disks = conf->raid_disks; in handle_stripe()
5195 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
5196 schedule_reconstruction(sh, &s, 1, 1); in handle_stripe()
5197 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { in handle_stripe()
5198 clear_bit(STRIPE_EXPAND_READY, &sh->state); in handle_stripe()
5205 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) in handle_stripe()
5206 handle_stripe_expansion(conf, sh); in handle_stripe()
5226 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
5230 if (!rdev_set_badblocks(rdev, sh->sector, in handle_stripe()
5237 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
5246 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
5253 raid_run_ops(sh, s.ops_request); in handle_stripe()
5255 ops_run_io(sh, &s); in handle_stripe()
5268 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); in handle_stripe()
5276 struct stripe_head *sh; in raid5_activate_delayed() local
5277 sh = list_entry(l, struct stripe_head, lru); in raid5_activate_delayed()
5279 clear_bit(STRIPE_DELAYED, &sh->state); in raid5_activate_delayed()
5280 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in raid5_activate_delayed()
5282 list_add_tail(&sh->lru, &conf->hold_list); in raid5_activate_delayed()
5283 raid5_wakeup_stripe_thread(sh); in raid5_activate_delayed()
5296 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); in activate_bit_delay() local
5298 list_del_init(&sh->lru); in activate_bit_delay()
5299 atomic_inc(&sh->count); in activate_bit_delay()
5300 hash = sh->hash_lock_index; in activate_bit_delay()
5301 __release_stripe(conf, sh, &temp_inactive_list[hash]); in activate_bit_delay()
5515 struct stripe_head *sh, *tmp; in __get_priority_stripe() local
5525 sh = NULL; in __get_priority_stripe()
5551 sh = list_entry(handle_list->next, typeof(*sh), lru); in __get_priority_stripe()
5555 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { in __get_priority_stripe()
5575 sh = tmp; in __get_priority_stripe()
5580 if (sh) { in __get_priority_stripe()
5588 if (!sh) { in __get_priority_stripe()
5598 sh->group = NULL; in __get_priority_stripe()
5600 list_del_init(&sh->lru); in __get_priority_stripe()
5601 BUG_ON(atomic_inc_return(&sh->count) != 1); in __get_priority_stripe()
5602 return sh; in __get_priority_stripe()
5615 struct stripe_head *sh; in raid5_unplug() local
5624 sh = list_first_entry(&cb->list, struct stripe_head, lru); in raid5_unplug()
5625 list_del_init(&sh->lru); in raid5_unplug()
5632 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); in raid5_unplug()
5637 hash = sh->hash_lock_index; in raid5_unplug()
5638 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); in raid5_unplug()
5651 struct stripe_head *sh) in release_stripe_plug() argument
5659 raid5_release_stripe(sh); in release_stripe_plug()
5672 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) in release_stripe_plug()
5673 list_add_tail(&sh->lru, &cb->list); in release_stripe_plug()
5675 raid5_release_stripe(sh); in release_stripe_plug()
5682 struct stripe_head *sh; in make_discard_request() local
5708 sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); in make_discard_request()
5711 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5712 if (test_bit(STRIPE_SYNCING, &sh->state)) { in make_discard_request()
5713 raid5_release_stripe(sh); in make_discard_request()
5717 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5718 spin_lock_irq(&sh->stripe_lock); in make_discard_request()
5720 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5722 if (sh->dev[d].towrite || sh->dev[d].toread) { in make_discard_request()
5723 set_bit(R5_Overlap, &sh->dev[d].flags); in make_discard_request()
5724 spin_unlock_irq(&sh->stripe_lock); in make_discard_request()
5725 raid5_release_stripe(sh); in make_discard_request()
5730 set_bit(STRIPE_DISCARD, &sh->state); in make_discard_request()
5732 sh->overwrite_disks = 0; in make_discard_request()
5734 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5736 sh->dev[d].towrite = bi; in make_discard_request()
5737 set_bit(R5_OVERWRITE, &sh->dev[d].flags); in make_discard_request()
5740 sh->overwrite_disks++; in make_discard_request()
5742 spin_unlock_irq(&sh->stripe_lock); in make_discard_request()
5748 sh->sector, in make_discard_request()
5751 sh->bm_seq = conf->seq_flush + 1; in make_discard_request()
5752 set_bit(STRIPE_BIT_DELAY, &sh->state); in make_discard_request()
5755 set_bit(STRIPE_HANDLE, &sh->state); in make_discard_request()
5756 clear_bit(STRIPE_DELAYED, &sh->state); in make_discard_request()
5757 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in make_discard_request()
5759 release_stripe_plug(mddev, sh); in make_discard_request()
5771 struct stripe_head *sh; in raid5_make_request() local
5864 sh = raid5_get_active_stripe(conf, new_sector, previous, in raid5_make_request()
5866 if (sh) { in raid5_make_request()
5872 * 'sh', we know that if that happens, in raid5_make_request()
5885 raid5_release_stripe(sh); in raid5_make_request()
5895 raid5_release_stripe(sh); in raid5_make_request()
5899 if (test_bit(STRIPE_EXPANDING, &sh->state) || in raid5_make_request()
5900 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { in raid5_make_request()
5906 raid5_release_stripe(sh); in raid5_make_request()
5912 set_bit(STRIPE_R5C_PREFLUSH, &sh->state); in raid5_make_request()
5917 set_bit(STRIPE_HANDLE, &sh->state); in raid5_make_request()
5918 clear_bit(STRIPE_DELAYED, &sh->state); in raid5_make_request()
5919 if ((!sh->batch_head || sh == sh->batch_head) && in raid5_make_request()
5921 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in raid5_make_request()
5923 release_stripe_plug(mddev, sh); in raid5_make_request()
5952 struct stripe_head *sh; in reshape_request() local
6103 sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); in reshape_request()
6104 set_bit(STRIPE_EXPANDING, &sh->state); in reshape_request()
6109 for (j=sh->disks; j--;) { in reshape_request()
6111 if (j == sh->pd_idx) in reshape_request()
6114 j == sh->qd_idx) in reshape_request()
6116 s = raid5_compute_blocknr(sh, j, 0); in reshape_request()
6121 memset(page_address(sh->dev[j].page), 0, RAID5_STRIPE_SIZE(conf)); in reshape_request()
6122 set_bit(R5_Expanded, &sh->dev[j].flags); in reshape_request()
6123 set_bit(R5_UPTODATE, &sh->dev[j].flags); in reshape_request()
6126 set_bit(STRIPE_EXPAND_READY, &sh->state); in reshape_request()
6127 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
6129 list_add(&sh->lru, &stripes); in reshape_request()
6152 sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); in reshape_request()
6153 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); in reshape_request()
6154 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
6155 raid5_release_stripe(sh); in reshape_request()
6162 sh = list_entry(stripes.next, struct stripe_head, lru); in reshape_request()
6163 list_del_init(&sh->lru); in reshape_request()
6164 raid5_release_stripe(sh); in reshape_request()
6213 struct stripe_head *sh; in raid5_sync_request() local
6272 sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); in raid5_sync_request()
6273 if (sh == NULL) { in raid5_sync_request()
6274 sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); in raid5_sync_request()
6295 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); in raid5_sync_request()
6296 set_bit(STRIPE_HANDLE, &sh->state); in raid5_sync_request()
6298 raid5_release_stripe(sh); in raid5_sync_request()
6316 struct stripe_head *sh; in retry_aligned_read() local
6337 sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); in retry_aligned_read()
6339 if (!sh) { in retry_aligned_read()
6346 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { in retry_aligned_read()
6347 raid5_release_stripe(sh); in retry_aligned_read()
6353 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); in retry_aligned_read()
6354 handle_stripe(sh); in retry_aligned_read()
6355 raid5_release_stripe(sh); in retry_aligned_read()
6372 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; in handle_active_stripes() local
6377 (sh = __get_priority_stripe(conf, group)) != NULL) in handle_active_stripes()
6378 batch[batch_size++] = sh; in handle_active_stripes()