Lines Matching full:stripe
65 /* while we're doing rmw on a stripe
67 * lock the stripe and merge more rbios
73 * LRU list for the stripe cache
84 * to add more bios into the stripe
93 * stripe locking code also uses it to hand off
94 * the stripe lock to the next pending IO
104 /* size of each individual stripe on disk */
121 /* first bad stripe */
124 /* second bad stripe (for raid6 use) */
130 * stripe
137 * stripe or not
166 * bitmap to record which horizontal stripe has data
198 * the stripe hash table is used for locking, and to collect
199 * bios in hopes of making a full stripe
244 * use the page uptodate bit in the stripe cache array
278 * we hash on the first logical address of the stripe
296 * stealing an rbio means taking all the uptodate pages from the stripe
456 * insert an rbio into the stripe cache. It
527 * Returns true if the bio list inside this rbio covers an entire stripe (no
548 * the same stripe and if they are both going in the same
581 * We've need read the full stripe from the drive. in rbio_can_merge()
586 * change this stripe needs to do their own rmw. in rbio_can_merge()
616 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe, in rbio_stripe_page_index() argument
619 return stripe * rbio->stripe_npages + index; in rbio_stripe_page_index()
626 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, in rbio_stripe_page() argument
629 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)]; in rbio_stripe_page()
652 * The first stripe in the table for a logical address
655 * 1) Nobody has the stripe locked yet. The rbio is given
659 * 2) Someone has the stripe locked, but we're able to merge
663 * 3) Someone has the stripe locked, but we're not able to merge.
755 * rbios waiting for this stripe, the next one on the list will be started
792 * waiting for the chance to lock this stripe. in unlock_stripe()
906 * get here, we've written a full stripe
936 * else. This function decides if a given index (stripe number)
937 * and page number in that stripe fall inside the original bio
968 * number of pages we need for the entire stripe across all the
1083 * add a single page from a specific stripe into our list of bios for IO
1097 struct btrfs_bio_stripe *stripe; in rbio_add_io_page() local
1100 stripe = &rbio->bbio->stripes[stripe_nr]; in rbio_add_io_page()
1101 disk_start = stripe->physical + (page_index << PAGE_SHIFT); in rbio_add_io_page()
1103 /* if the device is missing, just fail this stripe */ in rbio_add_io_page()
1104 if (!stripe->dev->bdev) in rbio_add_io_page()
1117 last->bi_disk == stripe->dev->bdev->bd_disk && in rbio_add_io_page()
1118 last->bi_partno == stripe->dev->bdev->bd_partno) { in rbio_add_io_page()
1127 btrfs_io_bio(bio)->device = stripe->dev; in rbio_add_io_page()
1129 bio_set_dev(bio, stripe->dev->bdev); in rbio_add_io_page()
1157 * searching through the bio list as we setup the IO in finish_rmw or stripe
1192 * have a full stripe from the higher layers, or we've read all
1203 int stripe; in finish_rmw() local
1222 /* at this point we either have a full stripe, in finish_rmw()
1223 * or we've read the full stripe from the drive. in finish_rmw()
1228 * change this stripe needs to do their own rmw. in finish_rmw()
1253 /* first collect one page from each data stripe */ in finish_rmw()
1254 for (stripe = 0; stripe < nr_data; stripe++) { in finish_rmw()
1255 p = page_in_rbio(rbio, stripe, pagenr, 0); in finish_rmw()
1256 pointers[stripe] = kmap(p); in finish_rmw()
1259 /* then add the parity stripe */ in finish_rmw()
1262 pointers[stripe++] = kmap(p); in finish_rmw()
1272 pointers[stripe++] = kmap(p); in finish_rmw()
1283 for (stripe = 0; stripe < rbio->real_stripes; stripe++) in finish_rmw()
1284 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); in finish_rmw()
1292 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in finish_rmw()
1296 /* This vertical stripe has no data, skip it. */ in finish_rmw()
1300 if (stripe < rbio->nr_data) { in finish_rmw()
1301 page = page_in_rbio(rbio, stripe, pagenr, 1); in finish_rmw()
1305 page = rbio_stripe_page(rbio, stripe, pagenr); in finish_rmw()
1309 page, stripe, pagenr, rbio->stripe_len); in finish_rmw()
1318 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in finish_rmw()
1319 if (!bbio->tgtdev_map[stripe]) in finish_rmw()
1325 /* This vertical stripe has no data, skip it. */ in finish_rmw()
1329 if (stripe < rbio->nr_data) { in finish_rmw()
1330 page = page_in_rbio(rbio, stripe, pagenr, 1); in finish_rmw()
1334 page = rbio_stripe_page(rbio, stripe, pagenr); in finish_rmw()
1338 rbio->bbio->tgtdev_map[stripe], in finish_rmw()
1366 * helper to find the stripe number for a given bio. Used to figure out which
1367 * stripe has failed. This expects the bio to correspond to a physical disk,
1375 struct btrfs_bio_stripe *stripe; in find_bio_stripe() local
1380 stripe = &rbio->bbio->stripes[i]; in find_bio_stripe()
1381 if (in_range(physical, stripe->physical, rbio->stripe_len) && in find_bio_stripe()
1382 stripe->dev->bdev && in find_bio_stripe()
1383 bio->bi_disk == stripe->dev->bdev->bd_disk && in find_bio_stripe()
1384 bio->bi_partno == stripe->dev->bdev->bd_partno) { in find_bio_stripe()
1392 * helper to find the stripe number for a given
1393 * bio (before mapping). Used to figure out which stripe has
1421 /* we already know this stripe is bad, move on */ in fail_rbio_index()
1443 * helper to fail a stripe based on a physical disk
1474 * stripe bios we've read from the disk so we can recalculate the parity of the
1475 * stripe.
1511 * the stripe must be locked by the caller. It will
1520 int stripe; in raid56_rmw_stripe() local
1534 * stripe in raid56_rmw_stripe()
1536 for (stripe = 0; stripe < rbio->nr_data; stripe++) { in raid56_rmw_stripe()
1543 * we don't need to read it off the stripe. in raid56_rmw_stripe()
1545 page = page_in_rbio(rbio, stripe, pagenr, 1); in raid56_rmw_stripe()
1549 page = rbio_stripe_page(rbio, stripe, pagenr); in raid56_rmw_stripe()
1558 stripe, pagenr, rbio->stripe_len); in raid56_rmw_stripe()
1570 * safe to do the full stripe write yet. in raid56_rmw_stripe()
1606 * if the upper layers pass in a full stripe, we thank them by only allocating
1626 * partial stripe writes get handed over to async helpers.
1643 * a full stripe. So we do a check here to see if we can
1648 /* head off into rmw land if we don't have a full stripe */ in __raid56_parity_write()
1656 * Any time we get a partial stripe write while plugged
1707 /* we have a full stripe, send it down */ in run_plug()
1838 int pagenr, stripe; in __raid_recover_end_io() local
1873 * from each stripe in __raid_recover_end_io()
1875 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid_recover_end_io()
1882 (stripe == faila || stripe == failb)) { in __raid_recover_end_io()
1883 page = page_in_rbio(rbio, stripe, pagenr, 0); in __raid_recover_end_io()
1885 page = rbio_stripe_page(rbio, stripe, pagenr); in __raid_recover_end_io()
1887 pointers[stripe] = kmap(page); in __raid_recover_end_io()
1899 * Just the P stripe has failed, without in __raid_recover_end_io()
1900 * a bad data or Q stripe. in __raid_recover_end_io()
1917 /* if the q stripe is failed, do a pstripe reconstruction in __raid_recover_end_io()
1919 * If both the q stripe and the P stripe are failed, we're in __raid_recover_end_io()
1930 * otherwise we have one bad data stripe and in __raid_recover_end_io()
1931 * a good P stripe. raid5! in __raid_recover_end_io()
1947 /* rebuild from P stripe here (raid5 or raid6) */ in __raid_recover_end_io()
1955 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) in __raid_recover_end_io()
1956 pointers[stripe] = pointers[stripe + 1]; in __raid_recover_end_io()
1980 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid_recover_end_io()
1987 (stripe == faila || stripe == failb)) { in __raid_recover_end_io()
1988 page = page_in_rbio(rbio, stripe, pagenr, 0); in __raid_recover_end_io()
1990 page = rbio_stripe_page(rbio, stripe, pagenr); in __raid_recover_end_io()
2053 * we only read stripe pages off the disk, set them in raid_recover_end_io()
2085 int stripe; in __raid56_parity_recover() local
2104 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid56_parity_recover()
2105 if (rbio->faila == stripe || rbio->failb == stripe) { in __raid56_parity_recover()
2112 rbio_stripe_page(rbio, stripe, pagenr), in __raid56_parity_recover()
2113 stripe, pagenr, rbio->stripe_len); in __raid56_parity_recover()
2193 …"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %l… in raid56_parity_recover()
2212 * for 'mirror_num > 2', select a stripe to fail on every retry. in raid56_parity_recover()
2216 * 'mirror == 3' is to fail the p stripe and in raid56_parity_recover()
2217 * reconstruct from the q stripe. 'mirror > 3' is to in raid56_parity_recover()
2218 * fail a data stripe and reconstruct from p+q stripe. in raid56_parity_recover()
2263 * The following code is used to scrub/replace the parity stripe
2295 * stripe. in raid56_parity_alloc_scrub_rbio()
2367 int stripe; in finish_parity_scrub() local
2407 /* RAID6, allocate and map temp space for the Q stripe */ in finish_parity_scrub()
2419 /* Map the parity stripe just once */ in finish_parity_scrub()
2425 /* first collect one page from each data stripe */ in finish_parity_scrub()
2426 for (stripe = 0; stripe < nr_data; stripe++) { in finish_parity_scrub()
2427 p = page_in_rbio(rbio, stripe, pagenr, 0); in finish_parity_scrub()
2428 pointers[stripe] = kmap(p); in finish_parity_scrub()
2451 for (stripe = 0; stripe < nr_data; stripe++) in finish_parity_scrub()
2452 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); in finish_parity_scrub()
2518 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) in is_data_stripe() argument
2520 if (stripe >= 0 && stripe < rbio->nr_data) in is_data_stripe()
2568 * Here means we got one corrupted data stripe and one in validate_rbio_for_parity_scrub()
2571 * the data, or we can not repair the data stripe. in validate_rbio_for_parity_scrub()
2588 * stripe bios we've read from the disk so we can recalculate the parity of the
2589 * stripe.
2622 int stripe; in raid56_parity_scrub_stripe() local
2634 * stripe in raid56_parity_scrub_stripe()
2636 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in raid56_parity_scrub_stripe()
2643 * we don't need to read it off the stripe. in raid56_parity_scrub_stripe()
2645 page = page_in_rbio(rbio, stripe, pagenr, 1); in raid56_parity_scrub_stripe()
2649 page = rbio_stripe_page(rbio, stripe, pagenr); in raid56_parity_scrub_stripe()
2658 stripe, pagenr, rbio->stripe_len); in raid56_parity_scrub_stripe()
2670 * safe to do the full stripe write yet. in raid56_parity_scrub_stripe()