• Home
  • Raw
  • Download

Lines Matching full:sh

26  * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
118 static inline int raid6_d0(struct stripe_head *sh) in raid6_d0() argument
120 if (sh->ddf_layout) in raid6_d0()
124 if (sh->qd_idx == sh->disks - 1) in raid6_d0()
127 return sh->qd_idx + 1; in raid6_d0()
140 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, in raid6_idx_to_slot() argument
145 if (sh->ddf_layout) in raid6_idx_to_slot()
147 if (idx == sh->pd_idx) in raid6_idx_to_slot()
149 if (idx == sh->qd_idx) in raid6_idx_to_slot()
151 if (!sh->ddf_layout) in raid6_idx_to_slot()
158 static int stripe_operations_active(struct stripe_head *sh) in stripe_operations_active() argument
160 return sh->check_state || sh->reconstruct_state || in stripe_operations_active()
161 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || in stripe_operations_active()
162 test_bit(STRIPE_COMPUTE_RUN, &sh->state); in stripe_operations_active()
165 static bool stripe_is_lowprio(struct stripe_head *sh) in stripe_is_lowprio() argument
167 return (test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) || in stripe_is_lowprio()
168 test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) && in stripe_is_lowprio()
169 !test_bit(STRIPE_R5C_CACHING, &sh->state); in stripe_is_lowprio()
172 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) in raid5_wakeup_stripe_thread() argument
173 __must_hold(&sh->raid_conf->device_lock) in raid5_wakeup_stripe_thread()
175 struct r5conf *conf = sh->raid_conf; in raid5_wakeup_stripe_thread()
178 int i, cpu = sh->cpu; in raid5_wakeup_stripe_thread()
182 sh->cpu = cpu; in raid5_wakeup_stripe_thread()
185 if (list_empty(&sh->lru)) { in raid5_wakeup_stripe_thread()
188 if (stripe_is_lowprio(sh)) in raid5_wakeup_stripe_thread()
189 list_add_tail(&sh->lru, &group->loprio_list); in raid5_wakeup_stripe_thread()
191 list_add_tail(&sh->lru, &group->handle_list); in raid5_wakeup_stripe_thread()
193 sh->group = group; in raid5_wakeup_stripe_thread()
201 group = conf->worker_groups + cpu_to_group(sh->cpu); in raid5_wakeup_stripe_thread()
205 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); in raid5_wakeup_stripe_thread()
212 queue_work_on(sh->cpu, raid5_wq, in raid5_wakeup_stripe_thread()
219 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, in do_release_stripe() argument
226 BUG_ON(!list_empty(&sh->lru)); in do_release_stripe()
230 for (i = sh->disks; i--; ) in do_release_stripe()
231 if (test_bit(R5_InJournal, &sh->dev[i].flags)) in do_release_stripe()
240 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) || in do_release_stripe()
242 !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0)) { in do_release_stripe()
243 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) in do_release_stripe()
244 r5c_make_stripe_write_out(sh); in do_release_stripe()
245 set_bit(STRIPE_HANDLE, &sh->state); in do_release_stripe()
248 if (test_bit(STRIPE_HANDLE, &sh->state)) { in do_release_stripe()
249 if (test_bit(STRIPE_DELAYED, &sh->state) && in do_release_stripe()
250 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in do_release_stripe()
251 list_add_tail(&sh->lru, &conf->delayed_list); in do_release_stripe()
252 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && in do_release_stripe()
253 sh->bm_seq - conf->seq_write > 0) in do_release_stripe()
254 list_add_tail(&sh->lru, &conf->bitmap_list); in do_release_stripe()
256 clear_bit(STRIPE_DELAYED, &sh->state); in do_release_stripe()
257 clear_bit(STRIPE_BIT_DELAY, &sh->state); in do_release_stripe()
259 if (stripe_is_lowprio(sh)) in do_release_stripe()
260 list_add_tail(&sh->lru, in do_release_stripe()
263 list_add_tail(&sh->lru, in do_release_stripe()
266 raid5_wakeup_stripe_thread(sh); in do_release_stripe()
272 BUG_ON(stripe_operations_active(sh)); in do_release_stripe()
273 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in do_release_stripe()
278 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { in do_release_stripe()
280 list_add_tail(&sh->lru, temp_inactive_list); in do_release_stripe()
282 WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)); in do_release_stripe()
284 list_add_tail(&sh->lru, temp_inactive_list); in do_release_stripe()
287 if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) in do_release_stripe()
289 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) in do_release_stripe()
291 list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); in do_release_stripe()
299 list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); in do_release_stripe()
305 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, in __release_stripe() argument
309 if (atomic_dec_and_test(&sh->count)) in __release_stripe()
310 do_release_stripe(conf, sh, temp_inactive_list); in __release_stripe()
366 struct stripe_head *sh, *t; in release_stripe_list() local
372 llist_for_each_entry_safe(sh, t, head, release_list) { in release_stripe_list()
375 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ in release_stripe_list()
377 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); in release_stripe_list()
383 hash = sh->hash_lock_index; in release_stripe_list()
384 __release_stripe(conf, sh, &temp_inactive_list[hash]); in release_stripe_list()
391 void raid5_release_stripe(struct stripe_head *sh) in raid5_release_stripe() argument
393 struct r5conf *conf = sh->raid_conf; in raid5_release_stripe()
401 if (atomic_add_unless(&sh->count, -1, 1)) in raid5_release_stripe()
405 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) in raid5_release_stripe()
407 wakeup = llist_add(&sh->release_list, &conf->released_stripes); in raid5_release_stripe()
413 if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) { in raid5_release_stripe()
415 hash = sh->hash_lock_index; in raid5_release_stripe()
416 do_release_stripe(conf, sh, &list); in raid5_release_stripe()
422 static inline void remove_hash(struct stripe_head *sh) in remove_hash() argument
425 (unsigned long long)sh->sector); in remove_hash()
427 hlist_del_init(&sh->hash); in remove_hash()
430 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) in insert_hash() argument
432 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
435 (unsigned long long)sh->sector); in insert_hash()
437 hlist_add_head(&sh->hash, hp); in insert_hash()
443 struct stripe_head *sh = NULL; in get_free_stripe() local
449 sh = list_entry(first, struct stripe_head, lru); in get_free_stripe()
451 remove_hash(sh); in get_free_stripe()
453 BUG_ON(hash != sh->hash_lock_index); in get_free_stripe()
457 return sh; in get_free_stripe()
461 static void free_stripe_pages(struct stripe_head *sh) in free_stripe_pages() argument
467 if (!sh->pages) in free_stripe_pages()
470 for (i = 0; i < sh->nr_pages; i++) { in free_stripe_pages()
471 p = sh->pages[i]; in free_stripe_pages()
474 sh->pages[i] = NULL; in free_stripe_pages()
478 static int alloc_stripe_pages(struct stripe_head *sh, gfp_t gfp) in alloc_stripe_pages() argument
483 for (i = 0; i < sh->nr_pages; i++) { in alloc_stripe_pages()
485 if (sh->pages[i]) in alloc_stripe_pages()
490 free_stripe_pages(sh); in alloc_stripe_pages()
493 sh->pages[i] = p; in alloc_stripe_pages()
499 init_stripe_shared_pages(struct stripe_head *sh, struct r5conf *conf, int disks) in init_stripe_shared_pages() argument
503 if (sh->pages) in init_stripe_shared_pages()
506 /* Each of the sh->dev[i] need one conf->stripe_size */ in init_stripe_shared_pages()
510 sh->pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); in init_stripe_shared_pages()
511 if (!sh->pages) in init_stripe_shared_pages()
513 sh->nr_pages = nr_pages; in init_stripe_shared_pages()
514 sh->stripes_per_page = cnt; in init_stripe_shared_pages()
519 static void shrink_buffers(struct stripe_head *sh) in shrink_buffers() argument
522 int num = sh->raid_conf->pool_size; in shrink_buffers()
528 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); in shrink_buffers()
529 p = sh->dev[i].page; in shrink_buffers()
532 sh->dev[i].page = NULL; in shrink_buffers()
537 sh->dev[i].page = NULL; in shrink_buffers()
538 free_stripe_pages(sh); /* Free pages */ in shrink_buffers()
542 static int grow_buffers(struct stripe_head *sh, gfp_t gfp) in grow_buffers() argument
545 int num = sh->raid_conf->pool_size; in grow_buffers()
554 sh->dev[i].page = page; in grow_buffers()
555 sh->dev[i].orig_page = page; in grow_buffers()
556 sh->dev[i].offset = 0; in grow_buffers()
559 if (alloc_stripe_pages(sh, gfp)) in grow_buffers()
563 sh->dev[i].page = raid5_get_dev_page(sh, i); in grow_buffers()
564 sh->dev[i].orig_page = sh->dev[i].page; in grow_buffers()
565 sh->dev[i].offset = raid5_get_page_offset(sh, i); in grow_buffers()
572 struct stripe_head *sh);
574 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) in init_stripe() argument
576 struct r5conf *conf = sh->raid_conf; in init_stripe()
579 BUG_ON(atomic_read(&sh->count) != 0); in init_stripe()
580 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); in init_stripe()
581 BUG_ON(stripe_operations_active(sh)); in init_stripe()
582 BUG_ON(sh->batch_head); in init_stripe()
588 sh->generation = conf->generation - previous; in init_stripe()
589 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
590 sh->sector = sector; in init_stripe()
591 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
592 sh->state = 0; in init_stripe()
594 for (i = sh->disks; i--; ) { in init_stripe()
595 struct r5dev *dev = &sh->dev[i]; in init_stripe()
600 (unsigned long long)sh->sector, i, dev->toread, in init_stripe()
606 dev->sector = raid5_compute_blocknr(sh, i, previous); in init_stripe()
610 sh->overwrite_disks = 0; in init_stripe()
611 insert_hash(conf, sh); in init_stripe()
612 sh->cpu = smp_processor_id(); in init_stripe()
613 set_bit(STRIPE_BATCH_READY, &sh->state); in init_stripe()
619 struct stripe_head *sh; in __find_stripe() local
622 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
623 if (sh->sector == sector && sh->generation == generation) in __find_stripe()
624 return sh; in __find_stripe()
633 struct stripe_head *sh; in find_get_stripe() local
635 sh = __find_stripe(conf, sector, generation); in find_get_stripe()
636 if (!sh) in find_get_stripe()
639 if (atomic_inc_not_zero(&sh->count)) in find_get_stripe()
640 return sh; in find_get_stripe()
644 * be on a list (sh->lru). Must remove the stripe from the list that in find_get_stripe()
649 if (!atomic_read(&sh->count)) { in find_get_stripe()
650 if (!test_bit(STRIPE_HANDLE, &sh->state)) in find_get_stripe()
652 BUG_ON(list_empty(&sh->lru) && in find_get_stripe()
653 !test_bit(STRIPE_EXPANDING, &sh->state)); in find_get_stripe()
657 list_del_init(&sh->lru); in find_get_stripe()
661 if (sh->group) { in find_get_stripe()
662 sh->group->stripes_cnt--; in find_get_stripe()
663 sh->group = NULL; in find_get_stripe()
666 atomic_inc(&sh->count); in find_get_stripe()
669 return sh; in find_get_stripe()
806 struct stripe_head *sh; in raid5_get_active_stripe() local
833 sh = find_get_stripe(conf, sector, conf->generation - previous, in raid5_get_active_stripe()
835 if (sh) in raid5_get_active_stripe()
839 sh = get_free_stripe(conf, hash); in raid5_get_active_stripe()
840 if (sh) { in raid5_get_active_stripe()
842 init_stripe(sh, sector, previous); in raid5_get_active_stripe()
843 atomic_inc(&sh->count); in raid5_get_active_stripe()
870 return sh; in raid5_get_active_stripe()
873 static bool is_full_stripe_write(struct stripe_head *sh) in is_full_stripe_write() argument
875 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); in is_full_stripe_write()
876 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); in is_full_stripe_write()
901 static bool stripe_can_batch(struct stripe_head *sh) in stripe_can_batch() argument
903 struct r5conf *conf = sh->raid_conf; in stripe_can_batch()
907 return test_bit(STRIPE_BATCH_READY, &sh->state) && in stripe_can_batch()
908 is_full_stripe_write(sh); in stripe_can_batch()
913 struct stripe_head *sh, struct stripe_head *last_sh) in stripe_add_to_batch_list() argument
921 tmp_sec = sh->sector; in stripe_add_to_batch_list()
924 head_sector = sh->sector - RAID5_STRIPE_SECTORS(conf); in stripe_add_to_batch_list()
941 lock_two_stripes(head, sh); in stripe_add_to_batch_list()
943 if (!stripe_can_batch(head) || !stripe_can_batch(sh)) in stripe_add_to_batch_list()
946 if (sh->batch_head) in stripe_add_to_batch_list()
950 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_add_to_batch_list()
952 if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf || in stripe_add_to_batch_list()
953 bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) in stripe_add_to_batch_list()
970 sh->batch_head = head->batch_head; in stripe_add_to_batch_list()
976 list_add(&sh->batch_list, &head->batch_list); in stripe_add_to_batch_list()
980 sh->batch_head = head->batch_head; in stripe_add_to_batch_list()
982 list_add_tail(&sh->batch_list, &head->batch_list); in stripe_add_to_batch_list()
986 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in stripe_add_to_batch_list()
991 if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { in stripe_add_to_batch_list()
992 int seq = sh->bm_seq; in stripe_add_to_batch_list()
993 if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && in stripe_add_to_batch_list()
994 sh->batch_head->bm_seq > seq) in stripe_add_to_batch_list()
995 seq = sh->batch_head->bm_seq; in stripe_add_to_batch_list()
996 set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); in stripe_add_to_batch_list()
997 sh->batch_head->bm_seq = seq; in stripe_add_to_batch_list()
1000 atomic_inc(&sh->count); in stripe_add_to_batch_list()
1002 unlock_two_stripes(head, sh); in stripe_add_to_batch_list()
1010 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) in use_new_offset() argument
1020 if (sh->generation == conf->generation - 1) in use_new_offset()
1137 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) in ops_run_io() argument
1139 struct r5conf *conf = sh->raid_conf; in ops_run_io()
1140 int i, disks = sh->disks; in ops_run_io()
1141 struct stripe_head *head_sh = sh; in ops_run_io()
1148 if (log_stripe(sh, s) == 0) in ops_run_io()
1160 sh = head_sh; in ops_run_io()
1161 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { in ops_run_io()
1163 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) in ops_run_io()
1165 if (test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_io()
1167 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) in ops_run_io()
1170 &sh->dev[i].flags)) { in ops_run_io()
1175 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) in ops_run_io()
1179 dev = &sh->dev[i]; in ops_run_io()
1221 int bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), in ops_run_io()
1255 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
1261 bi->bi_private = sh; in ops_run_io()
1264 __func__, (unsigned long long)sh->sector, in ops_run_io()
1266 atomic_inc(&sh->count); in ops_run_io()
1267 if (sh != head_sh) in ops_run_io()
1269 if (use_new_offset(conf, sh)) in ops_run_io()
1270 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1273 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1278 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) in ops_run_io()
1279 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in ops_run_io()
1282 test_bit(R5_InJournal, &sh->dev[i].flags)) in ops_run_io()
1288 sh->dev[i].vec.bv_page = sh->dev[i].orig_page; in ops_run_io()
1290 sh->dev[i].vec.bv_page = sh->dev[i].page; in ops_run_io()
1293 bi->bi_io_vec[0].bv_offset = sh->dev[i].offset; in ops_run_io()
1302 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); in ops_run_io()
1307 sh->dev[i].sector); in ops_run_io()
1318 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
1323 rbi->bi_private = sh; in ops_run_io()
1327 __func__, (unsigned long long)sh->sector, in ops_run_io()
1329 atomic_inc(&sh->count); in ops_run_io()
1330 if (sh != head_sh) in ops_run_io()
1332 if (use_new_offset(conf, sh)) in ops_run_io()
1333 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1336 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1338 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) in ops_run_io()
1339 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in ops_run_io()
1340 sh->dev[i].rvec.bv_page = sh->dev[i].page; in ops_run_io()
1343 rbi->bi_io_vec[0].bv_offset = sh->dev[i].offset; in ops_run_io()
1354 sh->dev[i].sector); in ops_run_io()
1362 bi->bi_opf, i, (unsigned long long)sh->sector); in ops_run_io()
1363 clear_bit(R5_LOCKED, &sh->dev[i].flags); in ops_run_io()
1364 set_bit(STRIPE_HANDLE, &sh->state); in ops_run_io()
1369 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_io()
1371 if (sh != head_sh) in ops_run_io()
1382 struct stripe_head *sh, int no_skipcopy) in async_copy_data() argument
1390 struct r5conf *conf = sh->raid_conf; in async_copy_data()
1446 struct stripe_head *sh = stripe_head_ref; in ops_complete_biofill() local
1448 struct r5conf *conf = sh->raid_conf; in ops_complete_biofill()
1451 (unsigned long long)sh->sector); in ops_complete_biofill()
1454 for (i = sh->disks; i--; ) { in ops_complete_biofill()
1455 struct r5dev *dev = &sh->dev[i]; in ops_complete_biofill()
1476 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); in ops_complete_biofill()
1478 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_biofill()
1479 raid5_release_stripe(sh); in ops_complete_biofill()
1482 static void ops_run_biofill(struct stripe_head *sh) in ops_run_biofill() argument
1487 struct r5conf *conf = sh->raid_conf; in ops_run_biofill()
1489 BUG_ON(sh->batch_head); in ops_run_biofill()
1491 (unsigned long long)sh->sector); in ops_run_biofill()
1493 for (i = sh->disks; i--; ) { in ops_run_biofill()
1494 struct r5dev *dev = &sh->dev[i]; in ops_run_biofill()
1497 spin_lock_irq(&sh->stripe_lock); in ops_run_biofill()
1500 spin_unlock_irq(&sh->stripe_lock); in ops_run_biofill()
1505 dev->sector, tx, sh, 0); in ops_run_biofill()
1511 atomic_inc(&sh->count); in ops_run_biofill()
1512 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); in ops_run_biofill()
1516 static void mark_target_uptodate(struct stripe_head *sh, int target) in mark_target_uptodate() argument
1523 tgt = &sh->dev[target]; in mark_target_uptodate()
1531 struct stripe_head *sh = stripe_head_ref; in ops_complete_compute() local
1534 (unsigned long long)sh->sector); in ops_complete_compute()
1537 mark_target_uptodate(sh, sh->ops.target); in ops_complete_compute()
1538 mark_target_uptodate(sh, sh->ops.target2); in ops_complete_compute()
1540 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); in ops_complete_compute()
1541 if (sh->check_state == check_state_compute_run) in ops_complete_compute()
1542 sh->check_state = check_state_compute_result; in ops_complete_compute()
1543 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_compute()
1544 raid5_release_stripe(sh); in ops_complete_compute()
1554 static addr_conv_t *to_addr_conv(struct stripe_head *sh, in to_addr_conv() argument
1557 return (void *) (to_addr_page(percpu, i) + sh->disks + 2); in to_addr_conv()
1564 to_addr_offs(struct stripe_head *sh, struct raid5_percpu *percpu) in to_addr_offs() argument
1566 return (unsigned int *) (to_addr_conv(sh, percpu, 0) + sh->disks + 2); in to_addr_offs()
1570 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute5() argument
1572 int disks = sh->disks; in ops_run_compute5()
1574 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_compute5()
1575 int target = sh->ops.target; in ops_run_compute5()
1576 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute5()
1584 BUG_ON(sh->batch_head); in ops_run_compute5()
1587 __func__, (unsigned long long)sh->sector, target); in ops_run_compute5()
1592 off_srcs[count] = sh->dev[i].offset; in ops_run_compute5()
1593 xor_srcs[count++] = sh->dev[i].page; in ops_run_compute5()
1597 atomic_inc(&sh->count); in ops_run_compute5()
1600 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); in ops_run_compute5()
1603 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_compute5()
1606 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_compute5()
1612 * @srcs - (struct page *) array of size sh->disks
1614 * @sh - stripe_head to parse
1623 struct stripe_head *sh, in set_syndrome_sources() argument
1626 int disks = sh->disks; in set_syndrome_sources()
1627 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); in set_syndrome_sources()
1628 int d0_idx = raid6_d0(sh); in set_syndrome_sources()
1638 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in set_syndrome_sources()
1639 struct r5dev *dev = &sh->dev[i]; in set_syndrome_sources()
1641 if (i == sh->qd_idx || i == sh->pd_idx || in set_syndrome_sources()
1650 srcs[slot] = sh->dev[i].orig_page; in set_syndrome_sources()
1652 srcs[slot] = sh->dev[i].page; in set_syndrome_sources()
1658 offs[slot] = sh->dev[i].offset; in set_syndrome_sources()
1667 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_1() argument
1669 int disks = sh->disks; in ops_run_compute6_1()
1671 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_compute6_1()
1673 int qd_idx = sh->qd_idx; in ops_run_compute6_1()
1682 BUG_ON(sh->batch_head); in ops_run_compute6_1()
1683 if (sh->ops.target < 0) in ops_run_compute6_1()
1684 target = sh->ops.target2; in ops_run_compute6_1()
1685 else if (sh->ops.target2 < 0) in ops_run_compute6_1()
1686 target = sh->ops.target; in ops_run_compute6_1()
1692 __func__, (unsigned long long)sh->sector, target); in ops_run_compute6_1()
1694 tgt = &sh->dev[target]; in ops_run_compute6_1()
1699 atomic_inc(&sh->count); in ops_run_compute6_1()
1702 count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL); in ops_run_compute6_1()
1706 ops_complete_compute, sh, in ops_run_compute6_1()
1707 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1709 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_compute6_1()
1716 offs[count] = sh->dev[i].offset; in ops_run_compute6_1()
1717 blocks[count++] = sh->dev[i].page; in ops_run_compute6_1()
1721 NULL, ops_complete_compute, sh, in ops_run_compute6_1()
1722 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1724 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_compute6_1()
1731 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_2() argument
1733 int i, count, disks = sh->disks; in ops_run_compute6_2()
1734 int syndrome_disks = sh->ddf_layout ? disks : disks-2; in ops_run_compute6_2()
1735 int d0_idx = raid6_d0(sh); in ops_run_compute6_2()
1737 int target = sh->ops.target; in ops_run_compute6_2()
1738 int target2 = sh->ops.target2; in ops_run_compute6_2()
1739 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute6_2()
1740 struct r5dev *tgt2 = &sh->dev[target2]; in ops_run_compute6_2()
1743 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_compute6_2()
1746 BUG_ON(sh->batch_head); in ops_run_compute6_2()
1748 __func__, (unsigned long long)sh->sector, target, target2); in ops_run_compute6_2()
1763 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in ops_run_compute6_2()
1765 offs[slot] = sh->dev[i].offset; in ops_run_compute6_2()
1766 blocks[slot] = sh->dev[i].page; in ops_run_compute6_2()
1779 __func__, (unsigned long long)sh->sector, faila, failb); in ops_run_compute6_2()
1781 atomic_inc(&sh->count); in ops_run_compute6_2()
1788 ops_complete_compute, sh, in ops_run_compute6_2()
1789 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1791 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1797 int qd_idx = sh->qd_idx; in ops_run_compute6_2()
1809 offs[count] = sh->dev[i].offset; in ops_run_compute6_2()
1810 blocks[count++] = sh->dev[i].page; in ops_run_compute6_2()
1812 dest = sh->dev[data_target].page; in ops_run_compute6_2()
1813 dest_off = sh->dev[data_target].offset; in ops_run_compute6_2()
1817 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1819 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1822 count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL); in ops_run_compute6_2()
1824 ops_complete_compute, sh, in ops_run_compute6_2()
1825 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1827 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1832 ops_complete_compute, sh, in ops_run_compute6_2()
1833 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1837 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1843 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1852 struct stripe_head *sh = stripe_head_ref; in ops_complete_prexor() local
1855 (unsigned long long)sh->sector); in ops_complete_prexor()
1857 if (r5c_is_writeback(sh->raid_conf->log)) in ops_complete_prexor()
1862 r5c_release_extra_page(sh); in ops_complete_prexor()
1866 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor5() argument
1869 int disks = sh->disks; in ops_run_prexor5()
1871 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_prexor5()
1872 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_prexor5()
1876 unsigned int off_dest = off_srcs[count] = sh->dev[pd_idx].offset; in ops_run_prexor5()
1877 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_prexor5()
1879 BUG_ON(sh->batch_head); in ops_run_prexor5()
1881 (unsigned long long)sh->sector); in ops_run_prexor5()
1884 struct r5dev *dev = &sh->dev[i]; in ops_run_prexor5()
1900 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor5()
1902 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_prexor5()
1908 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor6() argument
1912 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_prexor6()
1917 (unsigned long long)sh->sector); in ops_run_prexor6()
1919 count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_WANT_DRAIN); in ops_run_prexor6()
1922 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor6()
1924 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_prexor6()
1930 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) in ops_run_biodrain() argument
1932 struct r5conf *conf = sh->raid_conf; in ops_run_biodrain()
1933 int disks = sh->disks; in ops_run_biodrain()
1935 struct stripe_head *head_sh = sh; in ops_run_biodrain()
1938 (unsigned long long)sh->sector); in ops_run_biodrain()
1944 sh = head_sh; in ops_run_biodrain()
1949 dev = &sh->dev[i]; in ops_run_biodrain()
1955 spin_lock_irq(&sh->stripe_lock); in ops_run_biodrain()
1958 sh->overwrite_disks = 0; in ops_run_biodrain()
1961 spin_unlock_irq(&sh->stripe_lock); in ops_run_biodrain()
1975 dev->sector, tx, sh, in ops_run_biodrain()
1988 sh = list_first_entry(&sh->batch_list, in ops_run_biodrain()
1991 if (sh == head_sh) in ops_run_biodrain()
2003 struct stripe_head *sh = stripe_head_ref; in ops_complete_reconstruct() local
2004 int disks = sh->disks; in ops_complete_reconstruct()
2005 int pd_idx = sh->pd_idx; in ops_complete_reconstruct()
2006 int qd_idx = sh->qd_idx; in ops_complete_reconstruct()
2011 (unsigned long long)sh->sector); in ops_complete_reconstruct()
2014 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); in ops_complete_reconstruct()
2015 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); in ops_complete_reconstruct()
2016 discard |= test_bit(R5_Discard, &sh->dev[i].flags); in ops_complete_reconstruct()
2020 struct r5dev *dev = &sh->dev[i]; in ops_complete_reconstruct()
2025 if (test_bit(STRIPE_EXPAND_READY, &sh->state)) in ops_complete_reconstruct()
2035 if (sh->reconstruct_state == reconstruct_state_drain_run) in ops_complete_reconstruct()
2036 sh->reconstruct_state = reconstruct_state_drain_result; in ops_complete_reconstruct()
2037 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) in ops_complete_reconstruct()
2038 sh->reconstruct_state = reconstruct_state_prexor_drain_result; in ops_complete_reconstruct()
2040 BUG_ON(sh->reconstruct_state != reconstruct_state_run); in ops_complete_reconstruct()
2041 sh->reconstruct_state = reconstruct_state_result; in ops_complete_reconstruct()
2044 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_reconstruct()
2045 raid5_release_stripe(sh); in ops_complete_reconstruct()
2049 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct5() argument
2052 int disks = sh->disks; in ops_run_reconstruct5()
2056 int count, pd_idx = sh->pd_idx, i; in ops_run_reconstruct5()
2062 struct stripe_head *head_sh = sh; in ops_run_reconstruct5()
2066 (unsigned long long)sh->sector); in ops_run_reconstruct5()
2068 for (i = 0; i < sh->disks; i++) { in ops_run_reconstruct5()
2071 if (!test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_reconstruct5()
2074 if (i >= sh->disks) { in ops_run_reconstruct5()
2075 atomic_inc(&sh->count); in ops_run_reconstruct5()
2076 set_bit(R5_Discard, &sh->dev[pd_idx].flags); in ops_run_reconstruct5()
2077 ops_complete_reconstruct(sh); in ops_run_reconstruct5()
2083 off_srcs = to_addr_offs(sh, percpu); in ops_run_reconstruct5()
2089 off_dest = off_srcs[count] = sh->dev[pd_idx].offset; in ops_run_reconstruct5()
2090 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_reconstruct5()
2092 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
2100 xor_dest = sh->dev[pd_idx].page; in ops_run_reconstruct5()
2101 off_dest = sh->dev[pd_idx].offset; in ops_run_reconstruct5()
2103 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
2117 list_first_entry(&sh->batch_list, in ops_run_reconstruct5()
2125 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
2129 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
2134 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_reconstruct5()
2137 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_reconstruct5()
2140 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_reconstruct5()
2147 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct6() argument
2154 struct stripe_head *head_sh = sh; in ops_run_reconstruct6()
2159 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); in ops_run_reconstruct6()
2161 for (i = 0; i < sh->disks; i++) { in ops_run_reconstruct6()
2162 if (sh->pd_idx == i || sh->qd_idx == i) in ops_run_reconstruct6()
2164 if (!test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_reconstruct6()
2167 if (i >= sh->disks) { in ops_run_reconstruct6()
2168 atomic_inc(&sh->count); in ops_run_reconstruct6()
2169 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in ops_run_reconstruct6()
2170 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); in ops_run_reconstruct6()
2171 ops_complete_reconstruct(sh); in ops_run_reconstruct6()
2177 offs = to_addr_offs(sh, percpu); in ops_run_reconstruct6()
2179 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { in ops_run_reconstruct6()
2187 count = set_syndrome_sources(blocks, offs, sh, synflags); in ops_run_reconstruct6()
2189 list_first_entry(&sh->batch_list, in ops_run_reconstruct6()
2195 head_sh, to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
2198 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
2200 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_reconstruct6()
2203 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_reconstruct6()
2211 struct stripe_head *sh = stripe_head_ref; in ops_complete_check() local
2214 (unsigned long long)sh->sector); in ops_complete_check()
2216 sh->check_state = check_state_check_result; in ops_complete_check()
2217 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_check()
2218 raid5_release_stripe(sh); in ops_complete_check()
2221 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_check_p() argument
2223 int disks = sh->disks; in ops_run_check_p()
2224 int pd_idx = sh->pd_idx; in ops_run_check_p()
2225 int qd_idx = sh->qd_idx; in ops_run_check_p()
2229 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_check_p()
2236 (unsigned long long)sh->sector); in ops_run_check_p()
2238 BUG_ON(sh->batch_head); in ops_run_check_p()
2240 xor_dest = sh->dev[pd_idx].page; in ops_run_check_p()
2241 off_dest = sh->dev[pd_idx].offset; in ops_run_check_p()
2247 off_srcs[count] = sh->dev[i].offset; in ops_run_check_p()
2248 xor_srcs[count++] = sh->dev[i].page; in ops_run_check_p()
2252 to_addr_conv(sh, percpu, 0)); in ops_run_check_p()
2254 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_check_p()
2255 &sh->ops.zero_sum_result, &submit); in ops_run_check_p()
2257 atomic_inc(&sh->count); in ops_run_check_p()
2258 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); in ops_run_check_p()
2262 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) in ops_run_check_pq() argument
2265 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_check_pq()
2270 (unsigned long long)sh->sector, checkp); in ops_run_check_pq()
2272 BUG_ON(sh->batch_head); in ops_run_check_pq()
2273 count = set_syndrome_sources(srcs, offs, sh, SYNDROME_SRC_ALL); in ops_run_check_pq()
2277 atomic_inc(&sh->count); in ops_run_check_pq()
2279 sh, to_addr_conv(sh, percpu, 0)); in ops_run_check_pq()
2281 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_check_pq()
2282 &sh->ops.zero_sum_result, percpu->spare_page, 0, &submit); in ops_run_check_pq()
2285 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) in raid_run_ops() argument
2287 int overlap_clear = 0, i, disks = sh->disks; in raid_run_ops()
2289 struct r5conf *conf = sh->raid_conf; in raid_run_ops()
2296 ops_run_biofill(sh); in raid_run_ops()
2302 tx = ops_run_compute5(sh, percpu); in raid_run_ops()
2304 if (sh->ops.target2 < 0 || sh->ops.target < 0) in raid_run_ops()
2305 tx = ops_run_compute6_1(sh, percpu); in raid_run_ops()
2307 tx = ops_run_compute6_2(sh, percpu); in raid_run_ops()
2316 tx = ops_run_prexor5(sh, percpu, tx); in raid_run_ops()
2318 tx = ops_run_prexor6(sh, percpu, tx); in raid_run_ops()
2322 tx = ops_run_partial_parity(sh, percpu, tx); in raid_run_ops()
2325 tx = ops_run_biodrain(sh, tx); in raid_run_ops()
2331 ops_run_reconstruct5(sh, percpu, tx); in raid_run_ops()
2333 ops_run_reconstruct6(sh, percpu, tx); in raid_run_ops()
2337 if (sh->check_state == check_state_run) in raid_run_ops()
2338 ops_run_check_p(sh, percpu); in raid_run_ops()
2339 else if (sh->check_state == check_state_run_q) in raid_run_ops()
2340 ops_run_check_pq(sh, percpu, 0); in raid_run_ops()
2341 else if (sh->check_state == check_state_run_pq) in raid_run_ops()
2342 ops_run_check_pq(sh, percpu, 1); in raid_run_ops()
2347 if (overlap_clear && !sh->batch_head) { in raid_run_ops()
2349 struct r5dev *dev = &sh->dev[i]; in raid_run_ops()
2351 wake_up(&sh->raid_conf->wait_for_overlap); in raid_run_ops()
2357 static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) in free_stripe() argument
2360 kfree(sh->pages); in free_stripe()
2362 if (sh->ppl_page) in free_stripe()
2363 __free_page(sh->ppl_page); in free_stripe()
2364 kmem_cache_free(sc, sh); in free_stripe()
2370 struct stripe_head *sh; in alloc_stripe() local
2372 sh = kmem_cache_zalloc(sc, gfp); in alloc_stripe()
2373 if (sh) { in alloc_stripe()
2374 spin_lock_init(&sh->stripe_lock); in alloc_stripe()
2375 spin_lock_init(&sh->batch_lock); in alloc_stripe()
2376 INIT_LIST_HEAD(&sh->batch_list); in alloc_stripe()
2377 INIT_LIST_HEAD(&sh->lru); in alloc_stripe()
2378 INIT_LIST_HEAD(&sh->r5c); in alloc_stripe()
2379 INIT_LIST_HEAD(&sh->log_list); in alloc_stripe()
2380 atomic_set(&sh->count, 1); in alloc_stripe()
2381 sh->raid_conf = conf; in alloc_stripe()
2382 sh->log_start = MaxSector; in alloc_stripe()
2385 sh->ppl_page = alloc_page(gfp); in alloc_stripe()
2386 if (!sh->ppl_page) { in alloc_stripe()
2387 free_stripe(sc, sh); in alloc_stripe()
2392 if (init_stripe_shared_pages(sh, conf, disks)) { in alloc_stripe()
2393 free_stripe(sc, sh); in alloc_stripe()
2398 return sh; in alloc_stripe()
2402 struct stripe_head *sh; in grow_one_stripe() local
2404 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf); in grow_one_stripe()
2405 if (!sh) in grow_one_stripe()
2408 if (grow_buffers(sh, gfp)) { in grow_one_stripe()
2409 shrink_buffers(sh); in grow_one_stripe()
2410 free_stripe(conf->slab_cache, sh); in grow_one_stripe()
2413 sh->hash_lock_index = in grow_one_stripe()
2418 raid5_release_stripe(sh); in grow_one_stripe()
2704 struct stripe_head *sh; in drop_one_stripe() local
2708 sh = get_free_stripe(conf, hash); in drop_one_stripe()
2710 if (!sh) in drop_one_stripe()
2712 BUG_ON(atomic_read(&sh->count)); in drop_one_stripe()
2713 shrink_buffers(sh); in drop_one_stripe()
2714 free_stripe(conf->slab_cache, sh); in drop_one_stripe()
2754 struct stripe_head *sh = bi->bi_private; in raid5_end_read_request() local
2755 struct r5conf *conf = sh->raid_conf; in raid5_end_read_request()
2756 int disks = sh->disks, i; in raid5_end_read_request()
2761 if (bi == &sh->dev[i].req) in raid5_end_read_request()
2765 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_read_request()
2771 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
2781 if (use_new_offset(conf, sh)) in raid5_end_read_request()
2782 s = sh->sector + rdev->new_data_offset; in raid5_end_read_request()
2784 s = sh->sector + rdev->data_offset; in raid5_end_read_request()
2786 set_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
2787 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in raid5_end_read_request()
2798 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2799 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
2800 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) in raid5_end_read_request()
2801 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2803 if (test_bit(R5_InJournal, &sh->dev[i].flags)) in raid5_end_read_request()
2808 set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); in raid5_end_read_request()
2816 clear_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
2819 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
2832 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { in raid5_end_read_request()
2853 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) in raid5_end_read_request()
2856 if (sh->qd_idx >= 0 && sh->pd_idx == i) in raid5_end_read_request()
2857 set_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2858 else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { in raid5_end_read_request()
2859 set_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2860 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2862 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2864 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2865 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
2869 rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 0))) in raid5_end_read_request()
2875 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_read_request()
2876 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_read_request()
2877 raid5_release_stripe(sh); in raid5_end_read_request()
2882 struct stripe_head *sh = bi->bi_private; in raid5_end_write_request() local
2883 struct r5conf *conf = sh->raid_conf; in raid5_end_write_request()
2884 int disks = sh->disks, i; in raid5_end_write_request()
2891 if (bi == &sh->dev[i].req) { in raid5_end_write_request()
2895 if (bi == &sh->dev[i].rreq) { in raid5_end_write_request()
2909 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_write_request()
2919 else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
2922 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); in raid5_end_write_request()
2926 set_bit(R5_WriteError, &sh->dev[i].flags); in raid5_end_write_request()
2930 } else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
2933 set_bit(R5_MadeGood, &sh->dev[i].flags); in raid5_end_write_request()
2934 if (test_bit(R5_ReadError, &sh->dev[i].flags)) in raid5_end_write_request()
2939 set_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_write_request()
2944 if (sh->batch_head && bi->bi_status && !replacement) in raid5_end_write_request()
2945 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); in raid5_end_write_request()
2948 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) in raid5_end_write_request()
2949 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_write_request()
2950 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_write_request()
2952 if (sh->batch_head && sh != sh->batch_head) in raid5_end_write_request()
2953 raid5_release_stripe(sh->batch_head); in raid5_end_write_request()
2954 raid5_release_stripe(sh); in raid5_end_write_request()
2997 struct stripe_head *sh) in raid5_compute_sector() argument
3185 if (sh) { in raid5_compute_sector()
3186 sh->pd_idx = pd_idx; in raid5_compute_sector()
3187 sh->qd_idx = qd_idx; in raid5_compute_sector()
3188 sh->ddf_layout = ddf_layout; in raid5_compute_sector()
3197 sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) in raid5_compute_blocknr() argument
3199 struct r5conf *conf = sh->raid_conf; in raid5_compute_blocknr()
3200 int raid_disks = sh->disks; in raid5_compute_blocknr()
3202 sector_t new_sector = sh->sector, check; in raid5_compute_blocknr()
3217 if (i == sh->pd_idx) in raid5_compute_blocknr()
3225 if (i > sh->pd_idx) in raid5_compute_blocknr()
3230 if (i < sh->pd_idx) in raid5_compute_blocknr()
3232 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3244 if (i == sh->qd_idx) in raid5_compute_blocknr()
3251 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
3253 else if (i > sh->pd_idx) in raid5_compute_blocknr()
3258 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
3262 if (i < sh->pd_idx) in raid5_compute_blocknr()
3264 i -= (sh->pd_idx + 2); in raid5_compute_blocknr()
3274 if (sh->pd_idx == 0) in raid5_compute_blocknr()
3278 if (i < sh->pd_idx) in raid5_compute_blocknr()
3280 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3285 if (i > sh->pd_idx) in raid5_compute_blocknr()
3290 if (i < sh->pd_idx) in raid5_compute_blocknr()
3292 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3308 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in raid5_compute_blocknr()
3309 || sh2.qd_idx != sh->qd_idx) { in raid5_compute_blocknr()
3374 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, in schedule_reconstruction() argument
3377 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; in schedule_reconstruction()
3378 struct r5conf *conf = sh->raid_conf; in schedule_reconstruction()
3388 r5c_release_extra_page(sh); in schedule_reconstruction()
3391 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
3412 sh->reconstruct_state = reconstruct_state_drain_run; in schedule_reconstruction()
3415 sh->reconstruct_state = reconstruct_state_run; in schedule_reconstruction()
3420 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) in schedule_reconstruction()
3423 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || in schedule_reconstruction()
3424 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); in schedule_reconstruction()
3426 (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || in schedule_reconstruction()
3427 test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); in schedule_reconstruction()
3430 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
3449 sh->reconstruct_state = reconstruct_state_prexor_drain_run; in schedule_reconstruction()
3458 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); in schedule_reconstruction()
3459 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in schedule_reconstruction()
3463 int qd_idx = sh->qd_idx; in schedule_reconstruction()
3464 struct r5dev *dev = &sh->dev[qd_idx]; in schedule_reconstruction()
3471 if (raid5_has_ppl(sh->raid_conf) && sh->ppl_page && in schedule_reconstruction()
3473 !test_bit(STRIPE_FULL_WRITE, &sh->state) && in schedule_reconstruction()
3474 test_bit(R5_Insync, &sh->dev[pd_idx].flags)) in schedule_reconstruction()
3478 __func__, (unsigned long long)sh->sector, in schedule_reconstruction()
3482 static bool stripe_bio_overlaps(struct stripe_head *sh, struct bio *bi, in stripe_bio_overlaps() argument
3485 struct r5conf *conf = sh->raid_conf; in stripe_bio_overlaps()
3489 bi->bi_iter.bi_sector, sh->sector); in stripe_bio_overlaps()
3492 if (sh->batch_head) in stripe_bio_overlaps()
3496 bip = &sh->dev[dd_idx].towrite; in stripe_bio_overlaps()
3498 bip = &sh->dev[dd_idx].toread; in stripe_bio_overlaps()
3523 for (i = 0; i < sh->disks; i++) { in stripe_bio_overlaps()
3524 if (i != sh->pd_idx && in stripe_bio_overlaps()
3525 (i == dd_idx || sh->dev[i].towrite)) { in stripe_bio_overlaps()
3526 sector = sh->dev[i].sector; in stripe_bio_overlaps()
3542 static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi, in __add_stripe_bio() argument
3545 struct r5conf *conf = sh->raid_conf; in __add_stripe_bio()
3550 bip = &sh->dev[dd_idx].towrite; in __add_stripe_bio()
3554 bip = &sh->dev[dd_idx].toread; in __add_stripe_bio()
3561 clear_bit(STRIPE_BATCH_READY, &sh->state); in __add_stripe_bio()
3572 sector_t sector = sh->dev[dd_idx].sector; in __add_stripe_bio()
3573 for (bi=sh->dev[dd_idx].towrite; in __add_stripe_bio()
3574 sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) && in __add_stripe_bio()
3576 bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) { in __add_stripe_bio()
3580 if (sector >= sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf)) in __add_stripe_bio()
3581 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) in __add_stripe_bio()
3582 sh->overwrite_disks++; in __add_stripe_bio()
3586 (*bip)->bi_iter.bi_sector, sh->sector, dd_idx, in __add_stripe_bio()
3587 sh->dev[dd_idx].sector); in __add_stripe_bio()
3589 if (conf->mddev->bitmap && firstwrite && !sh->batch_head) { in __add_stripe_bio()
3590 sh->bm_seq = conf->seq_flush+1; in __add_stripe_bio()
3591 set_bit(STRIPE_BIT_DELAY, &sh->state); in __add_stripe_bio()
3600 static bool add_stripe_bio(struct stripe_head *sh, struct bio *bi, in add_stripe_bio() argument
3603 spin_lock_irq(&sh->stripe_lock); in add_stripe_bio()
3605 if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) { in add_stripe_bio()
3606 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); in add_stripe_bio()
3607 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3611 __add_stripe_bio(sh, bi, dd_idx, forwrite, previous); in add_stripe_bio()
3612 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3619 struct stripe_head *sh) in stripe_set_idx() argument
3631 &dd_idx, sh); in stripe_set_idx()
3635 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, in handle_failed_stripe() argument
3639 BUG_ON(sh->batch_head); in handle_failed_stripe()
3643 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in handle_failed_stripe()
3656 sh->sector, in handle_failed_stripe()
3662 spin_lock_irq(&sh->stripe_lock); in handle_failed_stripe()
3664 bi = sh->dev[i].towrite; in handle_failed_stripe()
3665 sh->dev[i].towrite = NULL; in handle_failed_stripe()
3666 sh->overwrite_disks = 0; in handle_failed_stripe()
3667 spin_unlock_irq(&sh->stripe_lock); in handle_failed_stripe()
3669 log_stripe_write_finished(sh); in handle_failed_stripe()
3671 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
3675 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3676 struct bio *nextbi = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3683 bi = sh->dev[i].written; in handle_failed_stripe()
3684 sh->dev[i].written = NULL; in handle_failed_stripe()
3685 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { in handle_failed_stripe()
3686 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in handle_failed_stripe()
3687 sh->dev[i].page = sh->dev[i].orig_page; in handle_failed_stripe()
3691 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3692 struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3702 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && in handle_failed_stripe()
3704 (!test_bit(R5_Insync, &sh->dev[i].flags) || in handle_failed_stripe()
3705 test_bit(R5_ReadError, &sh->dev[i].flags))) { in handle_failed_stripe()
3706 spin_lock_irq(&sh->stripe_lock); in handle_failed_stripe()
3707 bi = sh->dev[i].toread; in handle_failed_stripe()
3708 sh->dev[i].toread = NULL; in handle_failed_stripe()
3709 spin_unlock_irq(&sh->stripe_lock); in handle_failed_stripe()
3710 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
3715 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3717 r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3726 clear_bit(R5_LOCKED, &sh->dev[i].flags); in handle_failed_stripe()
3731 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_failed_stripe()
3737 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, in handle_failed_sync() argument
3743 BUG_ON(sh->batch_head); in handle_failed_sync()
3744 clear_bit(STRIPE_SYNCING, &sh->state); in handle_failed_sync()
3745 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_failed_sync()
3766 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3773 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3785 static int want_replace(struct stripe_head *sh, int disk_idx) in want_replace() argument
3791 rdev = rcu_dereference(sh->raid_conf->disks[disk_idx].replacement); in want_replace()
3795 && (rdev->recovery_offset <= sh->sector in want_replace()
3796 || rdev->mddev->recovery_cp <= sh->sector)) in want_replace()
3802 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, in need_this_block() argument
3805 struct r5dev *dev = &sh->dev[disk_idx]; in need_this_block()
3806 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], in need_this_block()
3807 &sh->dev[s->failed_num[1]] }; in need_this_block()
3809 bool force_rcw = (sh->raid_conf->rmw_level == PARITY_DISABLE_RMW); in need_this_block()
3825 (s->replacing && want_replace(sh, disk_idx))) in need_this_block()
3850 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in need_this_block()
3871 s->failed_num[i] == sh->pd_idx || in need_this_block()
3872 s->failed_num[i] == sh->qd_idx) && in need_this_block()
3889 sh->sector < sh->raid_conf->mddev->recovery_cp) in need_this_block()
3893 if (s->failed_num[i] != sh->pd_idx && in need_this_block()
3894 s->failed_num[i] != sh->qd_idx && in need_this_block()
3909 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, in fetch_block() argument
3912 struct r5dev *dev = &sh->dev[disk_idx]; in fetch_block()
3915 if (need_this_block(sh, s, disk_idx, disks)) { in fetch_block()
3921 BUG_ON(sh->batch_head); in fetch_block()
3933 ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) || in fetch_block()
3940 (unsigned long long)sh->sector, disk_idx); in fetch_block()
3941 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
3944 sh->ops.target = disk_idx; in fetch_block()
3945 sh->ops.target2 = -1; /* no 2nd target */ in fetch_block()
3964 &sh->dev[other].flags)) in fetch_block()
3969 (unsigned long long)sh->sector, in fetch_block()
3971 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
3973 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); in fetch_block()
3974 set_bit(R5_Wantcompute, &sh->dev[other].flags); in fetch_block()
3975 sh->ops.target = disk_idx; in fetch_block()
3976 sh->ops.target2 = other; in fetch_block()
3995 static void handle_stripe_fill(struct stripe_head *sh, in handle_stripe_fill() argument
4005 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && in handle_stripe_fill()
4006 !sh->reconstruct_state) { in handle_stripe_fill()
4016 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) in handle_stripe_fill()
4017 r5c_make_stripe_write_out(sh); in handle_stripe_fill()
4022 if (fetch_block(sh, s, i, disks)) in handle_stripe_fill()
4026 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_fill()
4037 struct stripe_head *sh, int disks) in handle_stripe_clean_event() argument
4042 struct stripe_head *head_sh = sh; in handle_stripe_clean_event()
4046 if (sh->dev[i].written) { in handle_stripe_clean_event()
4047 dev = &sh->dev[i]; in handle_stripe_clean_event()
4075 sh = list_first_entry(&sh->batch_list, in handle_stripe_clean_event()
4078 if (sh != head_sh) { in handle_stripe_clean_event()
4079 dev = &sh->dev[i]; in handle_stripe_clean_event()
4083 sh = head_sh; in handle_stripe_clean_event()
4084 dev = &sh->dev[i]; in handle_stripe_clean_event()
4089 log_stripe_write_finished(sh); in handle_stripe_clean_event()
4092 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_clean_event()
4094 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
4095 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
4096 if (sh->qd_idx >= 0) { in handle_stripe_clean_event()
4097 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); in handle_stripe_clean_event()
4098 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); in handle_stripe_clean_event()
4101 clear_bit(STRIPE_DISCARD, &sh->state); in handle_stripe_clean_event()
4108 hash = sh->hash_lock_index; in handle_stripe_clean_event()
4110 remove_hash(sh); in handle_stripe_clean_event()
4113 sh = list_first_entry(&sh->batch_list, in handle_stripe_clean_event()
4115 if (sh != head_sh) in handle_stripe_clean_event()
4118 sh = head_sh; in handle_stripe_clean_event()
4120 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) in handle_stripe_clean_event()
4121 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_clean_event()
4125 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_stripe_clean_event()
4149 struct stripe_head *sh, in handle_stripe_dirtying() argument
4164 (recovery_cp < MaxSector && sh->sector >= recovery_cp && in handle_stripe_dirtying()
4170 pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n", in handle_stripe_dirtying()
4172 (unsigned long long)sh->sector); in handle_stripe_dirtying()
4175 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
4177 i == sh->pd_idx || i == sh->qd_idx || in handle_stripe_dirtying()
4189 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
4201 (unsigned long long)sh->sector, sh->state, rmw, rcw); in handle_stripe_dirtying()
4202 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
4208 (unsigned long long)sh->sector, rmw); in handle_stripe_dirtying()
4210 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
4213 !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_dirtying()
4228 r5c_use_extra_page(sh); in handle_stripe_dirtying()
4233 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4240 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
4242 i == sh->pd_idx || i == sh->qd_idx || in handle_stripe_dirtying()
4249 &sh->state)) { in handle_stripe_dirtying()
4256 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4265 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
4267 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
4274 &sh->state)) { in handle_stripe_dirtying()
4282 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4287 (unsigned long long)sh->sector, in handle_stripe_dirtying()
4288 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); in handle_stripe_dirtying()
4292 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in handle_stripe_dirtying()
4293 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4305 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && in handle_stripe_dirtying()
4307 !test_bit(STRIPE_BIT_DELAY, &sh->state))) in handle_stripe_dirtying()
4308 schedule_reconstruction(sh, s, rcw == 0, 0); in handle_stripe_dirtying()
4312 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks5() argument
4317 BUG_ON(sh->batch_head); in handle_parity_checks5()
4318 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks5()
4320 switch (sh->check_state) { in handle_parity_checks5()
4325 sh->check_state = check_state_run; in handle_parity_checks5()
4327 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
4331 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks5()
4334 sh->check_state = check_state_idle; in handle_parity_checks5()
4336 dev = &sh->dev[sh->pd_idx]; in handle_parity_checks5()
4339 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks5()
4350 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4355 sh->check_state = check_state_idle; in handle_parity_checks5()
4367 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) in handle_parity_checks5()
4371 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4376 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4379 (unsigned long long) sh->sector, in handle_parity_checks5()
4380 (unsigned long long) sh->sector + in handle_parity_checks5()
4383 sh->check_state = check_state_compute_run; in handle_parity_checks5()
4384 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks5()
4387 &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
4388 sh->ops.target = sh->pd_idx; in handle_parity_checks5()
4389 sh->ops.target2 = -1; in handle_parity_checks5()
4398 __func__, sh->check_state, in handle_parity_checks5()
4399 (unsigned long long) sh->sector); in handle_parity_checks5()
4404 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks6() argument
4408 int pd_idx = sh->pd_idx; in handle_parity_checks6()
4409 int qd_idx = sh->qd_idx; in handle_parity_checks6()
4412 BUG_ON(sh->batch_head); in handle_parity_checks6()
4413 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks6()
4423 switch (sh->check_state) { in handle_parity_checks6()
4431 sh->check_state = check_state_run; in handle_parity_checks6()
4437 if (sh->check_state == check_state_run) in handle_parity_checks6()
4438 sh->check_state = check_state_run_pq; in handle_parity_checks6()
4440 sh->check_state = check_state_run_q; in handle_parity_checks6()
4444 sh->ops.zero_sum_result = 0; in handle_parity_checks6()
4446 if (sh->check_state == check_state_run) { in handle_parity_checks6()
4448 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in handle_parity_checks6()
4451 if (sh->check_state >= check_state_run && in handle_parity_checks6()
4452 sh->check_state <= check_state_run_pq) { in handle_parity_checks6()
4464 sh->check_state = check_state_idle; in handle_parity_checks6()
4467 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks6()
4475 dev = &sh->dev[s->failed_num[1]]; in handle_parity_checks6()
4481 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks6()
4486 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
4487 dev = &sh->dev[pd_idx]; in handle_parity_checks6()
4492 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
4493 dev = &sh->dev[qd_idx]; in handle_parity_checks6()
4501 dev - (struct r5dev *) &sh->dev)) { in handle_parity_checks6()
4507 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4514 sh->check_state = check_state_idle; in handle_parity_checks6()
4520 if (sh->ops.zero_sum_result == 0) { in handle_parity_checks6()
4523 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4529 sh->check_state = check_state_compute_result; in handle_parity_checks6()
4540 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4543 (unsigned long long) sh->sector, in handle_parity_checks6()
4544 (unsigned long long) sh->sector + in handle_parity_checks6()
4547 int *target = &sh->ops.target; in handle_parity_checks6()
4549 sh->ops.target = -1; in handle_parity_checks6()
4550 sh->ops.target2 = -1; in handle_parity_checks6()
4551 sh->check_state = check_state_compute_run; in handle_parity_checks6()
4552 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks6()
4554 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
4556 &sh->dev[pd_idx].flags); in handle_parity_checks6()
4558 target = &sh->ops.target2; in handle_parity_checks6()
4561 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
4563 &sh->dev[qd_idx].flags); in handle_parity_checks6()
4574 __func__, sh->check_state, in handle_parity_checks6()
4575 (unsigned long long) sh->sector); in handle_parity_checks6()
4580 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) in handle_stripe_expansion() argument
4588 BUG_ON(sh->batch_head); in handle_stripe_expansion()
4589 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); in handle_stripe_expansion()
4590 for (i = 0; i < sh->disks; i++) in handle_stripe_expansion()
4591 if (i != sh->pd_idx && i != sh->qd_idx) { in handle_stripe_expansion()
4596 sector_t bn = raid5_compute_blocknr(sh, i, 1); in handle_stripe_expansion()
4617 sh->dev[i].page, sh2->dev[dd_idx].offset, in handle_stripe_expansion()
4618 sh->dev[i].offset, RAID5_STRIPE_SIZE(conf), in handle_stripe_expansion()
4653 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) in analyse_stripe() argument
4655 struct r5conf *conf = sh->raid_conf; in analyse_stripe()
4656 int disks = sh->disks; in analyse_stripe()
4663 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; in analyse_stripe()
4664 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; in analyse_stripe()
4677 dev = &sh->dev[i]; in analyse_stripe()
4688 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) in analyse_stripe()
4717 rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) && in analyse_stripe()
4718 !is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), in analyse_stripe()
4732 is_bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), in analyse_stripe()
4759 else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <= rdev->recovery_offset) in analyse_stripe()
4829 if (test_bit(STRIPE_SYNCING, &sh->state)) { in analyse_stripe()
4839 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4852 static int clear_batch_ready(struct stripe_head *sh) in clear_batch_ready() argument
4855 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) in clear_batch_ready()
4856 return (sh->batch_head && sh->batch_head != sh); in clear_batch_ready()
4857 spin_lock(&sh->stripe_lock); in clear_batch_ready()
4858 if (!sh->batch_head) { in clear_batch_ready()
4859 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4867 if (sh->batch_head != sh) { in clear_batch_ready()
4868 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4871 spin_lock(&sh->batch_lock); in clear_batch_ready()
4872 list_for_each_entry(tmp, &sh->batch_list, batch_list) in clear_batch_ready()
4874 spin_unlock(&sh->batch_lock); in clear_batch_ready()
4875 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4887 struct stripe_head *sh, *next; in break_stripe_batch_list() local
4891 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { in break_stripe_batch_list()
4893 list_del_init(&sh->batch_list); in break_stripe_batch_list()
4895 WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | in break_stripe_batch_list()
4906 "stripe state: %lx\n", sh->state); in break_stripe_batch_list()
4911 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | in break_stripe_batch_list()
4916 sh->check_state = head_sh->check_state; in break_stripe_batch_list()
4917 sh->reconstruct_state = head_sh->reconstruct_state; in break_stripe_batch_list()
4918 spin_lock_irq(&sh->stripe_lock); in break_stripe_batch_list()
4919 sh->batch_head = NULL; in break_stripe_batch_list()
4920 spin_unlock_irq(&sh->stripe_lock); in break_stripe_batch_list()
4921 for (i = 0; i < sh->disks; i++) { in break_stripe_batch_list()
4922 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in break_stripe_batch_list()
4924 sh->dev[i].flags = head_sh->dev[i].flags & in break_stripe_batch_list()
4928 sh->state & handle_flags) in break_stripe_batch_list()
4929 set_bit(STRIPE_HANDLE, &sh->state); in break_stripe_batch_list()
4930 raid5_release_stripe(sh); in break_stripe_batch_list()
4945 static void handle_stripe(struct stripe_head *sh) in handle_stripe() argument
4948 struct r5conf *conf = sh->raid_conf; in handle_stripe()
4951 int disks = sh->disks; in handle_stripe()
4954 clear_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4962 if (clear_batch_ready(sh)) in handle_stripe()
4965 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { in handle_stripe()
4968 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4972 if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) in handle_stripe()
4973 break_stripe_batch_list(sh, 0); in handle_stripe()
4975 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { in handle_stripe()
4976 spin_lock(&sh->stripe_lock); in handle_stripe()
4981 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && in handle_stripe()
4982 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) && in handle_stripe()
4983 !test_bit(STRIPE_DISCARD, &sh->state) && in handle_stripe()
4984 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { in handle_stripe()
4985 set_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
4986 clear_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
4987 clear_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
4989 spin_unlock(&sh->stripe_lock); in handle_stripe()
4991 clear_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
4995 (unsigned long long)sh->sector, sh->state, in handle_stripe()
4996 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, in handle_stripe()
4997 sh->check_state, sh->reconstruct_state); in handle_stripe()
4999 analyse_stripe(sh, &s); in handle_stripe()
5001 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) in handle_stripe()
5006 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
5013 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
5021 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { in handle_stripe()
5023 set_bit(STRIPE_BIOFILL_RUN, &sh->state); in handle_stripe()
5039 sh->check_state = 0; in handle_stripe()
5040 sh->reconstruct_state = 0; in handle_stripe()
5041 break_stripe_batch_list(sh, 0); in handle_stripe()
5043 handle_failed_stripe(conf, sh, &s, disks); in handle_stripe()
5045 handle_failed_sync(conf, sh, &s); in handle_stripe()
5052 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) in handle_stripe()
5054 if (sh->reconstruct_state == reconstruct_state_drain_result || in handle_stripe()
5055 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { in handle_stripe()
5056 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
5061 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && in handle_stripe()
5062 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); in handle_stripe()
5063 BUG_ON(sh->qd_idx >= 0 && in handle_stripe()
5064 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && in handle_stripe()
5065 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); in handle_stripe()
5067 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
5069 (i == sh->pd_idx || i == sh->qd_idx || in handle_stripe()
5079 ((i == sh->pd_idx || i == sh->qd_idx) && in handle_stripe()
5081 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
5084 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in handle_stripe()
5092 pdev = &sh->dev[sh->pd_idx]; in handle_stripe()
5093 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) in handle_stripe()
5094 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); in handle_stripe()
5095 qdev = &sh->dev[sh->qd_idx]; in handle_stripe()
5096 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) in handle_stripe()
5097 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) in handle_stripe()
5109 handle_stripe_clean_event(conf, sh, disks); in handle_stripe()
5112 r5c_handle_cached_data_endio(conf, sh, disks); in handle_stripe()
5113 log_stripe_write_finished(sh); in handle_stripe()
5124 handle_stripe_fill(sh, &s, disks); in handle_stripe()
5131 r5c_finish_stripe_write_out(conf, sh, &s); in handle_stripe()
5142 if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) { in handle_stripe()
5145 handle_stripe_dirtying(conf, sh, &s, disks); in handle_stripe()
5151 ret = r5c_try_caching_write(conf, sh, &s, in handle_stripe()
5162 (!test_bit(STRIPE_R5C_CACHING, &sh->state) && in handle_stripe()
5164 ret = handle_stripe_dirtying(conf, sh, &s, in handle_stripe()
5177 if (sh->check_state || in handle_stripe()
5179 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
5180 !test_bit(STRIPE_INSYNC, &sh->state))) { in handle_stripe()
5182 handle_parity_checks6(conf, sh, &s, disks); in handle_stripe()
5184 handle_parity_checks5(conf, sh, &s, disks); in handle_stripe()
5188 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) in handle_stripe()
5189 && !test_bit(STRIPE_REPLACED, &sh->state)) { in handle_stripe()
5192 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { in handle_stripe()
5193 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); in handle_stripe()
5194 set_bit(R5_WantReplace, &sh->dev[i].flags); in handle_stripe()
5195 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
5199 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
5200 set_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
5203 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
5204 test_bit(STRIPE_INSYNC, &sh->state)) { in handle_stripe()
5206 clear_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
5207 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_stripe()
5216 struct r5dev *dev = &sh->dev[s.failed_num[i]]; in handle_stripe()
5233 if (sh->reconstruct_state == reconstruct_state_result) { in handle_stripe()
5235 = raid5_get_active_stripe(conf, NULL, sh->sector, in handle_stripe()
5239 /* sh cannot be written until sh_src has been read. in handle_stripe()
5240 * so arrange for sh to be delayed a little in handle_stripe()
5242 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
5243 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
5253 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
5254 clear_bit(STRIPE_EXPANDING, &sh->state); in handle_stripe()
5256 set_bit(R5_Wantwrite, &sh->dev[i].flags); in handle_stripe()
5257 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
5262 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && in handle_stripe()
5263 !sh->reconstruct_state) { in handle_stripe()
5265 sh->disks = conf->raid_disks; in handle_stripe()
5266 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
5267 schedule_reconstruction(sh, &s, 1, 1); in handle_stripe()
5268 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { in handle_stripe()
5269 clear_bit(STRIPE_EXPAND_READY, &sh->state); in handle_stripe()
5276 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) in handle_stripe()
5277 handle_stripe_expansion(conf, sh); in handle_stripe()
5297 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
5301 if (!rdev_set_badblocks(rdev, sh->sector, in handle_stripe()
5308 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
5317 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
5324 raid_run_ops(sh, s.ops_request); in handle_stripe()
5326 ops_run_io(sh, &s); in handle_stripe()
5339 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); in handle_stripe()
5348 struct stripe_head *sh; in raid5_activate_delayed() local
5349 sh = list_entry(l, struct stripe_head, lru); in raid5_activate_delayed()
5351 clear_bit(STRIPE_DELAYED, &sh->state); in raid5_activate_delayed()
5352 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in raid5_activate_delayed()
5354 list_add_tail(&sh->lru, &conf->hold_list); in raid5_activate_delayed()
5355 raid5_wakeup_stripe_thread(sh); in raid5_activate_delayed()
5368 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); in activate_bit_delay() local
5370 list_del_init(&sh->lru); in activate_bit_delay()
5371 atomic_inc(&sh->count); in activate_bit_delay()
5372 hash = sh->hash_lock_index; in activate_bit_delay()
5373 __release_stripe(conf, sh, &temp_inactive_list[hash]); in activate_bit_delay()
5576 struct stripe_head *sh, *tmp; in __get_priority_stripe() local
5586 sh = NULL; in __get_priority_stripe()
5612 sh = list_entry(handle_list->next, typeof(*sh), lru); in __get_priority_stripe()
5616 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { in __get_priority_stripe()
5636 sh = tmp; in __get_priority_stripe()
5641 if (sh) { in __get_priority_stripe()
5649 if (!sh) { in __get_priority_stripe()
5659 sh->group = NULL; in __get_priority_stripe()
5661 list_del_init(&sh->lru); in __get_priority_stripe()
5662 BUG_ON(atomic_inc_return(&sh->count) != 1); in __get_priority_stripe()
5663 return sh; in __get_priority_stripe()
5676 struct stripe_head *sh; in raid5_unplug() local
5685 sh = list_first_entry(&cb->list, struct stripe_head, lru); in raid5_unplug()
5686 list_del_init(&sh->lru); in raid5_unplug()
5693 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); in raid5_unplug()
5698 hash = sh->hash_lock_index; in raid5_unplug()
5699 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); in raid5_unplug()
5712 struct stripe_head *sh) in release_stripe_plug() argument
5720 raid5_release_stripe(sh); in release_stripe_plug()
5733 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) in release_stripe_plug()
5734 list_add_tail(&sh->lru, &cb->list); in release_stripe_plug()
5736 raid5_release_stripe(sh); in release_stripe_plug()
5743 struct stripe_head *sh; in make_discard_request() local
5773 sh = raid5_get_active_stripe(conf, NULL, logical_sector, 0); in make_discard_request()
5776 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5777 if (test_bit(STRIPE_SYNCING, &sh->state)) { in make_discard_request()
5778 raid5_release_stripe(sh); in make_discard_request()
5782 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5783 spin_lock_irq(&sh->stripe_lock); in make_discard_request()
5785 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5787 if (sh->dev[d].towrite || sh->dev[d].toread) { in make_discard_request()
5788 set_bit(R5_Overlap, &sh->dev[d].flags); in make_discard_request()
5789 spin_unlock_irq(&sh->stripe_lock); in make_discard_request()
5790 raid5_release_stripe(sh); in make_discard_request()
5795 set_bit(STRIPE_DISCARD, &sh->state); in make_discard_request()
5797 sh->overwrite_disks = 0; in make_discard_request()
5799 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5801 sh->dev[d].towrite = bi; in make_discard_request()
5802 set_bit(R5_OVERWRITE, &sh->dev[d].flags); in make_discard_request()
5805 sh->overwrite_disks++; in make_discard_request()
5807 spin_unlock_irq(&sh->stripe_lock); in make_discard_request()
5809 sh->bm_seq = conf->seq_flush + 1; in make_discard_request()
5810 set_bit(STRIPE_BIT_DELAY, &sh->state); in make_discard_request()
5813 set_bit(STRIPE_HANDLE, &sh->state); in make_discard_request()
5814 clear_bit(STRIPE_DELAYED, &sh->state); in make_discard_request()
5815 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in make_discard_request()
5817 release_stripe_plug(mddev, sh); in make_discard_request()
5838 struct stripe_head *sh) in stripe_ahead_of_reshape() argument
5844 for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) { in stripe_ahead_of_reshape()
5845 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_ahead_of_reshape()
5848 min_sector = min(min_sector, sh->dev[dd_idx].sector); in stripe_ahead_of_reshape()
5849 max_sector = max(max_sector, sh->dev[dd_idx].sector); in stripe_ahead_of_reshape()
5865 struct stripe_request_ctx *ctx, struct stripe_head *sh, in add_all_stripe_bios() argument
5871 spin_lock_irq(&sh->stripe_lock); in add_all_stripe_bios()
5873 for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) { in add_all_stripe_bios()
5874 struct r5dev *dev = &sh->dev[dd_idx]; in add_all_stripe_bios()
5876 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in add_all_stripe_bios()
5883 if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) { in add_all_stripe_bios()
5893 for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) { in add_all_stripe_bios()
5894 struct r5dev *dev = &sh->dev[dd_idx]; in add_all_stripe_bios()
5896 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in add_all_stripe_bios()
5903 __add_stripe_bio(sh, bi, dd_idx, forwrite, previous); in add_all_stripe_bios()
5909 spin_unlock_irq(&sh->stripe_lock); in add_all_stripe_bios()
6013 struct stripe_head *sh; in make_stripe_request() local
6040 sh = raid5_get_active_stripe(conf, ctx, new_sector, flags); in make_stripe_request()
6041 if (unlikely(!sh)) { in make_stripe_request()
6048 stripe_ahead_of_reshape(mddev, conf, sh)) { in make_stripe_request()
6053 * 'sh', we know that if that happens, in make_stripe_request()
6067 if (test_bit(STRIPE_EXPANDING, &sh->state) || in make_stripe_request()
6068 !add_all_stripe_bios(conf, ctx, sh, bi, rw, previous)) { in make_stripe_request()
6078 if (stripe_can_batch(sh)) { in make_stripe_request()
6079 stripe_add_to_batch_list(conf, sh, ctx->batch_last); in make_stripe_request()
6082 atomic_inc(&sh->count); in make_stripe_request()
6083 ctx->batch_last = sh; in make_stripe_request()
6087 set_bit(STRIPE_R5C_PREFLUSH, &sh->state); in make_stripe_request()
6092 set_bit(STRIPE_HANDLE, &sh->state); in make_stripe_request()
6093 clear_bit(STRIPE_DELAYED, &sh->state); in make_stripe_request()
6094 if ((!sh->batch_head || sh == sh->batch_head) && in make_stripe_request()
6096 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in make_stripe_request()
6099 release_stripe_plug(mddev, sh); in make_stripe_request()
6103 raid5_release_stripe(sh); in make_stripe_request()
6126 struct stripe_head sh; in raid5_bio_lowest_chunk_sector() local
6132 sector = raid5_compute_sector(conf, r_sector, 0, &dd_idx, &sh); in raid5_bio_lowest_chunk_sector()
6141 while (dd_idx == sh.pd_idx || dd_idx == sh.qd_idx) in raid5_bio_lowest_chunk_sector()
6289 struct stripe_head *sh; in reshape_request() local
6446 sh = raid5_get_active_stripe(conf, NULL, stripe_addr+i, in reshape_request()
6448 set_bit(STRIPE_EXPANDING, &sh->state); in reshape_request()
6453 for (j=sh->disks; j--;) { in reshape_request()
6455 if (j == sh->pd_idx) in reshape_request()
6458 j == sh->qd_idx) in reshape_request()
6460 s = raid5_compute_blocknr(sh, j, 0); in reshape_request()
6465 memset(page_address(sh->dev[j].page), 0, RAID5_STRIPE_SIZE(conf)); in reshape_request()
6466 set_bit(R5_Expanded, &sh->dev[j].flags); in reshape_request()
6467 set_bit(R5_UPTODATE, &sh->dev[j].flags); in reshape_request()
6470 set_bit(STRIPE_EXPAND_READY, &sh->state); in reshape_request()
6471 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
6473 list_add(&sh->lru, &stripes); in reshape_request()
6496 sh = raid5_get_active_stripe(conf, NULL, first_sector, in reshape_request()
6498 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); in reshape_request()
6499 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
6500 raid5_release_stripe(sh); in reshape_request()
6507 sh = list_entry(stripes.next, struct stripe_head, lru); in reshape_request()
6508 list_del_init(&sh->lru); in reshape_request()
6509 raid5_release_stripe(sh); in reshape_request()
6558 struct stripe_head *sh; in raid5_sync_request() local
6617 sh = raid5_get_active_stripe(conf, NULL, sector_nr, in raid5_sync_request()
6619 if (sh == NULL) { in raid5_sync_request()
6620 sh = raid5_get_active_stripe(conf, NULL, sector_nr, 0); in raid5_sync_request()
6641 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); in raid5_sync_request()
6642 set_bit(STRIPE_HANDLE, &sh->state); in raid5_sync_request()
6644 raid5_release_stripe(sh); in raid5_sync_request()
6662 struct stripe_head *sh; in retry_aligned_read() local
6683 sh = raid5_get_active_stripe(conf, NULL, sector, in retry_aligned_read()
6685 if (!sh) { in retry_aligned_read()
6692 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { in retry_aligned_read()
6693 raid5_release_stripe(sh); in retry_aligned_read()
6699 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); in retry_aligned_read()
6700 handle_stripe(sh); in retry_aligned_read()
6701 raid5_release_stripe(sh); in retry_aligned_read()
6717 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; in handle_active_stripes() local
6722 (sh = __get_priority_stripe(conf, group)) != NULL) in handle_active_stripes()
6723 batch[batch_size++] = sh; in handle_active_stripes()