Lines Matching +full:data +full:- +full:mirror
1 // SPDX-License-Identifier: GPL-2.0
13 #include "disk-io.h"
14 #include "ordered-data.h"
18 #include "dev-replace.h"
19 #include "check-integrity.h"
21 #include "block-group.h"
25 #include "file-item.h"
29 * This is only the first step towards a full-features scrub. It reads all
31 * is found or the extent cannot be read, good data will be written back if
35 * - In case an unrepairable extent is encountered, track which files are
37 * - track and record media errors, throw out bad devices
38 * - add a mode to also read unallocated space
54 * This would be 8M per device, the same value as the old scrub in-flight bios
73 * Csum pointer for data csum verification. Should point to a
76 * NULL if this data sector has no csum.
92 /* Set when the read-repair is finished. */
96 * Set for data stripes if it's triggered from P/Q stripe.
97 * During such scrub, we should not report errors in data stripes, nor
125 * How many data/meta extents are in this stripe. Only for scrub status
163 * IO and csum errors can happen for both metadata and data.
177 * Checksum for the whole stripe if this stripe is inside a data block
214 * Use a ref counter to avoid use-after-free issues. Scrub workers
238 if (stripe->pages[i]) in release_scrub_stripe()
239 __free_page(stripe->pages[i]); in release_scrub_stripe()
240 stripe->pages[i] = NULL; in release_scrub_stripe()
242 kfree(stripe->sectors); in release_scrub_stripe()
243 kfree(stripe->csums); in release_scrub_stripe()
244 stripe->sectors = NULL; in release_scrub_stripe()
245 stripe->csums = NULL; in release_scrub_stripe()
246 stripe->sctx = NULL; in release_scrub_stripe()
247 stripe->state = 0; in release_scrub_stripe()
257 stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits; in init_scrub_stripe()
258 stripe->state = 0; in init_scrub_stripe()
260 init_waitqueue_head(&stripe->io_wait); in init_scrub_stripe()
261 init_waitqueue_head(&stripe->repair_wait); in init_scrub_stripe()
262 atomic_set(&stripe->pending_io, 0); in init_scrub_stripe()
263 spin_lock_init(&stripe->write_error_lock); in init_scrub_stripe()
265 ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages); in init_scrub_stripe()
269 stripe->sectors = kcalloc(stripe->nr_sectors, in init_scrub_stripe()
272 if (!stripe->sectors) in init_scrub_stripe()
275 stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits, in init_scrub_stripe()
276 fs_info->csum_size, GFP_KERNEL); in init_scrub_stripe()
277 if (!stripe->csums) in init_scrub_stripe()
282 return -ENOMEM; in init_scrub_stripe()
287 wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0); in wait_scrub_stripe_io()
294 while (atomic_read(&fs_info->scrub_pause_req)) { in __scrub_blocked_if_needed()
295 mutex_unlock(&fs_info->scrub_lock); in __scrub_blocked_if_needed()
296 wait_event(fs_info->scrub_pause_wait, in __scrub_blocked_if_needed()
297 atomic_read(&fs_info->scrub_pause_req) == 0); in __scrub_blocked_if_needed()
298 mutex_lock(&fs_info->scrub_lock); in __scrub_blocked_if_needed()
304 atomic_inc(&fs_info->scrubs_paused); in scrub_pause_on()
305 wake_up(&fs_info->scrub_pause_wait); in scrub_pause_on()
310 mutex_lock(&fs_info->scrub_lock); in scrub_pause_off()
312 atomic_dec(&fs_info->scrubs_paused); in scrub_pause_off()
313 mutex_unlock(&fs_info->scrub_lock); in scrub_pause_off()
315 wake_up(&fs_info->scrub_pause_wait); in scrub_pause_off()
332 release_scrub_stripe(&sctx->stripes[i]); in scrub_free_ctx()
339 if (refcount_dec_and_test(&sctx->refs)) in scrub_put_ctx()
355 refcount_set(&sctx->refs, 1); in scrub_setup_ctx()
356 sctx->is_dev_replace = is_dev_replace; in scrub_setup_ctx()
357 sctx->fs_info = fs_info; in scrub_setup_ctx()
358 sctx->extent_path.search_commit_root = 1; in scrub_setup_ctx()
359 sctx->extent_path.skip_locking = 1; in scrub_setup_ctx()
360 sctx->csum_path.search_commit_root = 1; in scrub_setup_ctx()
361 sctx->csum_path.skip_locking = 1; in scrub_setup_ctx()
365 ret = init_scrub_stripe(fs_info, &sctx->stripes[i]); in scrub_setup_ctx()
368 sctx->stripes[i].sctx = sctx; in scrub_setup_ctx()
370 sctx->first_free = 0; in scrub_setup_ctx()
371 atomic_set(&sctx->cancel_req, 0); in scrub_setup_ctx()
373 spin_lock_init(&sctx->stat_lock); in scrub_setup_ctx()
374 sctx->throttle_deadline = 0; in scrub_setup_ctx()
376 mutex_init(&sctx->wr_lock); in scrub_setup_ctx()
378 WARN_ON(!fs_info->dev_replace.tgtdev); in scrub_setup_ctx()
379 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; in scrub_setup_ctx()
386 return ERR_PTR(-ENOMEM); in scrub_setup_ctx()
399 struct btrfs_fs_info *fs_info = swarn->dev->fs_info; in scrub_print_warning_inode()
417 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0); in scrub_print_warning_inode()
420 btrfs_release_path(swarn->path); in scrub_print_warning_inode()
424 eb = swarn->path->nodes[0]; in scrub_print_warning_inode()
425 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], in scrub_print_warning_inode()
428 btrfs_release_path(swarn->path); in scrub_print_warning_inode()
436 ipath = init_ipath(4096, local_root, swarn->path); in scrub_print_warning_inode()
453 for (i = 0; i < ipath->fspath->elem_cnt; ++i) in scrub_print_warning_inode()
456 swarn->errstr, swarn->logical, in scrub_print_warning_inode()
457 btrfs_dev_name(swarn->dev), in scrub_print_warning_inode()
458 swarn->physical, in scrub_print_warning_inode()
460 fs_info->sectorsize, nlink, in scrub_print_warning_inode()
461 (char *)(unsigned long)ipath->fspath->val[i]); in scrub_print_warning_inode()
470 swarn->errstr, swarn->logical, in scrub_print_warning_inode()
471 btrfs_dev_name(swarn->dev), in scrub_print_warning_inode()
472 swarn->physical, in scrub_print_warning_inode()
482 struct btrfs_fs_info *fs_info = dev->fs_info; in scrub_print_common_warning()
514 eb = path->nodes[0]; in scrub_print_common_warning()
515 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); in scrub_print_common_warning()
516 item_size = btrfs_item_size(eb, path->slots[0]); in scrub_print_common_warning()
548 ctx.extent_item_pos = swarn.logical - found_key.objectid; in scrub_print_common_warning()
566 if (!btrfs_is_zoned(sctx->fs_info)) in fill_writer_pointer_gap()
569 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) in fill_writer_pointer_gap()
572 if (sctx->write_pointer < physical) { in fill_writer_pointer_gap()
573 length = physical - sctx->write_pointer; in fill_writer_pointer_gap()
575 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev, in fill_writer_pointer_gap()
576 sctx->write_pointer, length); in fill_writer_pointer_gap()
578 sctx->write_pointer = physical; in fill_writer_pointer_gap()
585 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_stripe_get_page()
586 int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT; in scrub_stripe_get_page()
588 return stripe->pages[page_index]; in scrub_stripe_get_page()
594 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_stripe_get_page_offset()
596 return offset_in_page(sector_nr << fs_info->sectorsize_bits); in scrub_stripe_get_page_offset()
601 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_verify_one_metadata()
602 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; in scrub_verify_one_metadata()
603 const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits); in scrub_verify_one_metadata()
606 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); in scrub_verify_one_metadata()
617 memcpy(on_disk_csum, header->csum, fs_info->csum_size); in scrub_verify_one_metadata()
620 bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
621 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
623 "tree block %llu mirror %u has bad bytenr, has %llu want %llu", in scrub_verify_one_metadata()
624 logical, stripe->mirror_num, in scrub_verify_one_metadata()
628 if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid, in scrub_verify_one_metadata()
630 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
631 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
633 "tree block %llu mirror %u has bad fsid, has %pU want %pU", in scrub_verify_one_metadata()
634 logical, stripe->mirror_num, in scrub_verify_one_metadata()
635 header->fsid, fs_info->fs_devices->fsid); in scrub_verify_one_metadata()
638 if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid, in scrub_verify_one_metadata()
640 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
641 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
643 "tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU", in scrub_verify_one_metadata()
644 logical, stripe->mirror_num, in scrub_verify_one_metadata()
645 header->chunk_tree_uuid, fs_info->chunk_tree_uuid); in scrub_verify_one_metadata()
650 shash->tfm = fs_info->csum_shash; in scrub_verify_one_metadata()
653 BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE); in scrub_verify_one_metadata()
660 fs_info->sectorsize); in scrub_verify_one_metadata()
664 if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) { in scrub_verify_one_metadata()
665 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
666 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
668 "tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT, in scrub_verify_one_metadata()
669 logical, stripe->mirror_num, in scrub_verify_one_metadata()
670 CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum), in scrub_verify_one_metadata()
671 CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum)); in scrub_verify_one_metadata()
674 if (stripe->sectors[sector_nr].generation != in scrub_verify_one_metadata()
676 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
677 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
679 "tree block %llu mirror %u has bad generation, has %llu want %llu", in scrub_verify_one_metadata()
680 logical, stripe->mirror_num, in scrub_verify_one_metadata()
682 stripe->sectors[sector_nr].generation); in scrub_verify_one_metadata()
685 bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
686 bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
687 bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
692 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_verify_one_sector()
693 struct scrub_sector_verification *sector = &stripe->sectors[sector_nr]; in scrub_verify_one_sector()
694 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; in scrub_verify_one_sector()
700 ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors); in scrub_verify_one_sector()
703 if (!test_bit(sector_nr, &stripe->extent_sector_bitmap)) in scrub_verify_one_sector()
707 if (test_bit(sector_nr, &stripe->io_error_bitmap)) in scrub_verify_one_sector()
711 if (sector->is_metadata) { in scrub_verify_one_sector()
720 if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) { in scrub_verify_one_sector()
723 stripe->logical + in scrub_verify_one_sector()
724 (sector_nr << fs_info->sectorsize_bits), in scrub_verify_one_sector()
725 stripe->logical); in scrub_verify_one_sector()
733 * Data is easier, we just verify the data csum (if we have it). For in scrub_verify_one_sector()
736 if (!sector->csum) { in scrub_verify_one_sector()
737 clear_bit(sector_nr, &stripe->error_bitmap); in scrub_verify_one_sector()
741 ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum); in scrub_verify_one_sector()
743 set_bit(sector_nr, &stripe->csum_error_bitmap); in scrub_verify_one_sector()
744 set_bit(sector_nr, &stripe->error_bitmap); in scrub_verify_one_sector()
746 clear_bit(sector_nr, &stripe->csum_error_bitmap); in scrub_verify_one_sector()
747 clear_bit(sector_nr, &stripe->error_bitmap); in scrub_verify_one_sector()
754 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_verify_one_stripe()
755 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; in scrub_verify_one_stripe()
758 for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) { in scrub_verify_one_stripe()
760 if (stripe->sectors[sector_nr].is_metadata) in scrub_verify_one_stripe()
761 sector_nr += sectors_per_tree - 1; in scrub_verify_one_stripe()
769 for (i = 0; i < stripe->nr_sectors; i++) { in calc_sector_number()
770 if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page && in calc_sector_number()
771 scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset) in calc_sector_number()
774 ASSERT(i < stripe->nr_sectors); in calc_sector_number()
781 * - Only reads the failed sectors
782 * - May have extra blocksize limits
786 struct scrub_stripe *stripe = bbio->private; in scrub_repair_read_endio()
787 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_repair_read_endio()
789 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio)); in scrub_repair_read_endio()
793 ASSERT(sector_nr < stripe->nr_sectors); in scrub_repair_read_endio()
795 bio_for_each_bvec_all(bvec, &bbio->bio, i) in scrub_repair_read_endio()
796 bio_size += bvec->bv_len; in scrub_repair_read_endio()
798 if (bbio->bio.bi_status) { in scrub_repair_read_endio()
799 bitmap_set(&stripe->io_error_bitmap, sector_nr, in scrub_repair_read_endio()
800 bio_size >> fs_info->sectorsize_bits); in scrub_repair_read_endio()
801 bitmap_set(&stripe->error_bitmap, sector_nr, in scrub_repair_read_endio()
802 bio_size >> fs_info->sectorsize_bits); in scrub_repair_read_endio()
804 bitmap_clear(&stripe->io_error_bitmap, sector_nr, in scrub_repair_read_endio()
805 bio_size >> fs_info->sectorsize_bits); in scrub_repair_read_endio()
807 bio_put(&bbio->bio); in scrub_repair_read_endio()
808 if (atomic_dec_and_test(&stripe->pending_io)) in scrub_repair_read_endio()
809 wake_up(&stripe->io_wait); in scrub_repair_read_endio()
812 static int calc_next_mirror(int mirror, int num_copies) in calc_next_mirror() argument
814 ASSERT(mirror <= num_copies); in calc_next_mirror()
815 return (mirror + 1 > num_copies) ? 1 : mirror + 1; in calc_next_mirror()
819 int mirror, int blocksize, bool wait) in scrub_stripe_submit_repair_read() argument
821 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_stripe_submit_repair_read()
823 const unsigned long old_error_bitmap = stripe->error_bitmap; in scrub_stripe_submit_repair_read()
826 ASSERT(stripe->mirror_num >= 1); in scrub_stripe_submit_repair_read()
827 ASSERT(atomic_read(&stripe->pending_io) == 0); in scrub_stripe_submit_repair_read()
829 for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) { in scrub_stripe_submit_repair_read()
838 if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) || in scrub_stripe_submit_repair_read()
839 bbio->bio.bi_iter.bi_size >= blocksize)) { in scrub_stripe_submit_repair_read()
840 ASSERT(bbio->bio.bi_iter.bi_size); in scrub_stripe_submit_repair_read()
841 atomic_inc(&stripe->pending_io); in scrub_stripe_submit_repair_read()
842 btrfs_submit_bio(bbio, mirror); in scrub_stripe_submit_repair_read()
849 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ, in scrub_stripe_submit_repair_read()
851 bbio->bio.bi_iter.bi_sector = (stripe->logical + in scrub_stripe_submit_repair_read()
852 (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT; in scrub_stripe_submit_repair_read()
855 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); in scrub_stripe_submit_repair_read()
856 ASSERT(ret == fs_info->sectorsize); in scrub_stripe_submit_repair_read()
859 ASSERT(bbio->bio.bi_iter.bi_size); in scrub_stripe_submit_repair_read()
860 atomic_inc(&stripe->pending_io); in scrub_stripe_submit_repair_read()
861 btrfs_submit_bio(bbio, mirror); in scrub_stripe_submit_repair_read()
872 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_stripe_report_errors()
881 if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state)) in scrub_stripe_report_errors()
890 if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) { in scrub_stripe_report_errors()
891 u64 mapped_len = fs_info->sectorsize; in scrub_stripe_report_errors()
893 int stripe_index = stripe->mirror_num - 1; in scrub_stripe_report_errors()
897 ASSERT(stripe->mirror_num >= 1); in scrub_stripe_report_errors()
899 stripe->logical, &mapped_len, &bioc, in scrub_stripe_report_errors()
907 physical = bioc->stripes[stripe_index].physical; in scrub_stripe_report_errors()
908 dev = bioc->stripes[stripe_index].dev; in scrub_stripe_report_errors()
913 for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) { in scrub_stripe_report_errors()
916 if (stripe->sectors[sector_nr].is_metadata) { in scrub_stripe_report_errors()
920 if (!stripe->sectors[sector_nr].csum) in scrub_stripe_report_errors()
924 if (test_bit(sector_nr, &stripe->init_error_bitmap) && in scrub_stripe_report_errors()
925 !test_bit(sector_nr, &stripe->error_bitmap)) { in scrub_stripe_report_errors()
931 if (!test_bit(sector_nr, &stripe->init_error_bitmap)) in scrub_stripe_report_errors()
942 stripe->logical, btrfs_dev_name(dev), in scrub_stripe_report_errors()
946 "fixed up error at logical %llu on mirror %u", in scrub_stripe_report_errors()
947 stripe->logical, stripe->mirror_num); in scrub_stripe_report_errors()
956 stripe->logical, btrfs_dev_name(dev), in scrub_stripe_report_errors()
960 "unable to fixup (regular) error at logical %llu on mirror %u", in scrub_stripe_report_errors()
961 stripe->logical, stripe->mirror_num); in scrub_stripe_report_errors()
964 if (test_bit(sector_nr, &stripe->io_error_bitmap)) in scrub_stripe_report_errors()
967 stripe->logical, physical); in scrub_stripe_report_errors()
968 if (test_bit(sector_nr, &stripe->csum_error_bitmap)) in scrub_stripe_report_errors()
971 stripe->logical, physical); in scrub_stripe_report_errors()
972 if (test_bit(sector_nr, &stripe->meta_error_bitmap)) in scrub_stripe_report_errors()
975 stripe->logical, physical); in scrub_stripe_report_errors()
978 spin_lock(&sctx->stat_lock); in scrub_stripe_report_errors()
979 sctx->stat.data_extents_scrubbed += stripe->nr_data_extents; in scrub_stripe_report_errors()
980 sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents; in scrub_stripe_report_errors()
981 sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits; in scrub_stripe_report_errors()
982 sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits; in scrub_stripe_report_errors()
983 sctx->stat.no_csum += nr_nodatacsum_sectors; in scrub_stripe_report_errors()
984 sctx->stat.read_errors += stripe->init_nr_io_errors; in scrub_stripe_report_errors()
985 sctx->stat.csum_errors += stripe->init_nr_csum_errors; in scrub_stripe_report_errors()
986 sctx->stat.verify_errors += stripe->init_nr_meta_errors; in scrub_stripe_report_errors()
987 sctx->stat.uncorrectable_errors += in scrub_stripe_report_errors()
988 bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors); in scrub_stripe_report_errors()
989 sctx->stat.corrected_errors += nr_repaired_sectors; in scrub_stripe_report_errors()
990 spin_unlock(&sctx->stat_lock); in scrub_stripe_report_errors()
999 * - Wait for the initial read to finish
1000 * - Verify and locate any bad sectors
1001 * - Go through the remaining mirrors and try to read as large blocksize as
1003 * - Go through all mirrors (including the failed mirror) sector-by-sector
1004 * - Submit writeback for repaired sectors
1006 * Writeback for dev-replace does not happen here, it needs extra
1012 struct scrub_ctx *sctx = stripe->sctx; in scrub_stripe_read_repair_worker()
1013 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_stripe_read_repair_worker()
1014 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start, in scrub_stripe_read_repair_worker()
1015 stripe->bg->length); in scrub_stripe_read_repair_worker()
1017 int mirror; in scrub_stripe_read_repair_worker() local
1020 ASSERT(stripe->mirror_num > 0); in scrub_stripe_read_repair_worker()
1023 scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap); in scrub_stripe_read_repair_worker()
1025 stripe->init_error_bitmap = stripe->error_bitmap; in scrub_stripe_read_repair_worker()
1026 stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap, in scrub_stripe_read_repair_worker()
1027 stripe->nr_sectors); in scrub_stripe_read_repair_worker()
1028 stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap, in scrub_stripe_read_repair_worker()
1029 stripe->nr_sectors); in scrub_stripe_read_repair_worker()
1030 stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap, in scrub_stripe_read_repair_worker()
1031 stripe->nr_sectors); in scrub_stripe_read_repair_worker()
1033 if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) in scrub_stripe_read_repair_worker()
1042 for (mirror = calc_next_mirror(stripe->mirror_num, num_copies); in scrub_stripe_read_repair_worker()
1043 mirror != stripe->mirror_num; in scrub_stripe_read_repair_worker()
1044 mirror = calc_next_mirror(mirror, num_copies)) { in scrub_stripe_read_repair_worker()
1045 const unsigned long old_error_bitmap = stripe->error_bitmap; in scrub_stripe_read_repair_worker()
1047 scrub_stripe_submit_repair_read(stripe, mirror, in scrub_stripe_read_repair_worker()
1051 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) in scrub_stripe_read_repair_worker()
1056 * Last safety net, try re-checking all mirrors, including the failed in scrub_stripe_read_repair_worker()
1057 * one, sector-by-sector. in scrub_stripe_read_repair_worker()
1061 * Thus here we do sector-by-sector read. in scrub_stripe_read_repair_worker()
1066 for (i = 0, mirror = stripe->mirror_num; in scrub_stripe_read_repair_worker()
1068 i++, mirror = calc_next_mirror(mirror, num_copies)) { in scrub_stripe_read_repair_worker()
1069 const unsigned long old_error_bitmap = stripe->error_bitmap; in scrub_stripe_read_repair_worker()
1071 scrub_stripe_submit_repair_read(stripe, mirror, in scrub_stripe_read_repair_worker()
1072 fs_info->sectorsize, true); in scrub_stripe_read_repair_worker()
1075 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) in scrub_stripe_read_repair_worker()
1081 * in-place, but queue the bg to be relocated. in scrub_stripe_read_repair_worker()
1083 bitmap_andnot(&repaired, &stripe->init_error_bitmap, &stripe->error_bitmap, in scrub_stripe_read_repair_worker()
1084 stripe->nr_sectors); in scrub_stripe_read_repair_worker()
1085 if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) { in scrub_stripe_read_repair_worker()
1087 btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start); in scrub_stripe_read_repair_worker()
1095 set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state); in scrub_stripe_read_repair_worker()
1096 wake_up(&stripe->repair_wait); in scrub_stripe_read_repair_worker()
1101 struct scrub_stripe *stripe = bbio->private; in scrub_read_endio()
1103 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio)); in scrub_read_endio()
1108 ASSERT(sector_nr < stripe->nr_sectors); in scrub_read_endio()
1109 bio_for_each_bvec_all(bvec, &bbio->bio, i) in scrub_read_endio()
1110 bio_size += bvec->bv_len; in scrub_read_endio()
1111 num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits; in scrub_read_endio()
1113 if (bbio->bio.bi_status) { in scrub_read_endio()
1114 bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors); in scrub_read_endio()
1115 bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors); in scrub_read_endio()
1117 bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors); in scrub_read_endio()
1119 bio_put(&bbio->bio); in scrub_read_endio()
1120 if (atomic_dec_and_test(&stripe->pending_io)) { in scrub_read_endio()
1121 wake_up(&stripe->io_wait); in scrub_read_endio()
1122 INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker); in scrub_read_endio()
1123 queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work); in scrub_read_endio()
1129 struct scrub_stripe *stripe = bbio->private; in scrub_write_endio()
1130 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_write_endio()
1132 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio)); in scrub_write_endio()
1136 bio_for_each_bvec_all(bvec, &bbio->bio, i) in scrub_write_endio()
1137 bio_size += bvec->bv_len; in scrub_write_endio()
1139 if (bbio->bio.bi_status) { in scrub_write_endio()
1142 spin_lock_irqsave(&stripe->write_error_lock, flags); in scrub_write_endio()
1143 bitmap_set(&stripe->write_error_bitmap, sector_nr, in scrub_write_endio()
1144 bio_size >> fs_info->sectorsize_bits); in scrub_write_endio()
1145 spin_unlock_irqrestore(&stripe->write_error_lock, flags); in scrub_write_endio()
1147 bio_put(&bbio->bio); in scrub_write_endio()
1149 if (atomic_dec_and_test(&stripe->pending_io)) in scrub_write_endio()
1150 wake_up(&stripe->io_wait); in scrub_write_endio()
1157 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_submit_write_bio()
1158 u32 bio_len = bbio->bio.bi_iter.bi_size; in scrub_submit_write_bio()
1159 u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) - in scrub_submit_write_bio()
1160 stripe->logical; in scrub_submit_write_bio()
1162 fill_writer_pointer_gap(sctx, stripe->physical + bio_off); in scrub_submit_write_bio()
1163 atomic_inc(&stripe->pending_io); in scrub_submit_write_bio()
1164 btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace); in scrub_submit_write_bio()
1177 if (!test_bit(bio_off >> fs_info->sectorsize_bits, in scrub_submit_write_bio()
1178 &stripe->write_error_bitmap)) in scrub_submit_write_bio()
1179 sctx->write_pointer += bio_len; in scrub_submit_write_bio()
1187 * - Only needs logical bytenr and mirror_num
1190 * - Would only result in writes to the specified mirror
1193 * - Handle dev-replace and read-repair writeback differently
1198 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_write_sectors()
1202 for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) { in scrub_write_sectors()
1208 ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap)); in scrub_write_sectors()
1211 if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) { in scrub_write_sectors()
1216 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE, in scrub_write_sectors()
1218 bbio->bio.bi_iter.bi_sector = (stripe->logical + in scrub_write_sectors()
1219 (sector_nr << fs_info->sectorsize_bits)) >> in scrub_write_sectors()
1222 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); in scrub_write_sectors()
1223 ASSERT(ret == fs_info->sectorsize); in scrub_write_sectors()
1230 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
1242 bwlimit = READ_ONCE(device->scrub_speed_max); in scrub_throttle_dev_io()
1255 if (sctx->throttle_deadline == 0) { in scrub_throttle_dev_io()
1256 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div); in scrub_throttle_dev_io()
1257 sctx->throttle_sent = 0; in scrub_throttle_dev_io()
1261 if (ktime_before(now, sctx->throttle_deadline)) { in scrub_throttle_dev_io()
1263 sctx->throttle_sent += bio_size; in scrub_throttle_dev_io()
1264 if (sctx->throttle_sent <= div_u64(bwlimit, div)) in scrub_throttle_dev_io()
1268 delta = ktime_ms_delta(sctx->throttle_deadline, now); in scrub_throttle_dev_io()
1282 sctx->throttle_deadline = 0; in scrub_throttle_dev_io()
1288 * the most left data stripe's logical offset.
1290 * return 0 if it is a data stripe, 1 means parity stripe.
1301 last_offset = (physical - map->stripes[num].physical) * data_stripes; in get_raid56_logic_offset()
1315 /* Work out the disk rotation on this stripe-set */ in get_raid56_logic_offset()
1316 rot = stripe_nr % map->num_stripes; in get_raid56_logic_offset()
1317 /* calculate which stripe this data locates */ in get_raid56_logic_offset()
1319 stripe_index = rot % map->num_stripes; in get_raid56_logic_offset()
1337 struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info; in compare_extent_item_range()
1341 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in compare_extent_item_range()
1345 len = fs_info->nodesize; in compare_extent_item_range()
1350 return -1; in compare_extent_item_range()
1366 * return the extent item. This is for data extent crossing stripe boundary.
1376 struct btrfs_fs_info *fs_info = extent_root->fs_info; in find_first_extent_item()
1381 if (path->nodes[0]) in find_first_extent_item()
1389 key.offset = (u64)-1; in find_first_extent_item()
1409 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in find_first_extent_item()
1422 path->slots[0]++; in find_first_extent_item()
1423 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { in find_first_extent_item()
1442 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in get_extent_info()
1447 *size_ret = path->nodes[0]->fs_info->nodesize; in get_extent_info()
1450 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item); in get_extent_info()
1451 *flags_ret = btrfs_extent_flags(path->nodes[0], ei); in get_extent_info()
1452 *generation_ret = btrfs_extent_generation(path->nodes[0], ei); in get_extent_info()
1458 struct btrfs_fs_info *fs_info = sctx->fs_info; in sync_write_pointer_for_zoned()
1464 mutex_lock(&sctx->wr_lock); in sync_write_pointer_for_zoned()
1465 if (sctx->write_pointer < physical_end) { in sync_write_pointer_for_zoned()
1466 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical, in sync_write_pointer_for_zoned()
1468 sctx->write_pointer); in sync_write_pointer_for_zoned()
1473 mutex_unlock(&sctx->wr_lock); in sync_write_pointer_for_zoned()
1474 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical); in sync_write_pointer_for_zoned()
1484 for (u64 cur_logical = max(stripe->logical, extent_start); in fill_one_extent_info()
1485 cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN, in fill_one_extent_info()
1487 cur_logical += fs_info->sectorsize) { in fill_one_extent_info()
1488 const int nr_sector = (cur_logical - stripe->logical) >> in fill_one_extent_info()
1489 fs_info->sectorsize_bits; in fill_one_extent_info()
1491 &stripe->sectors[nr_sector]; in fill_one_extent_info()
1493 set_bit(nr_sector, &stripe->extent_sector_bitmap); in fill_one_extent_info()
1495 sector->is_metadata = true; in fill_one_extent_info()
1496 sector->generation = extent_gen; in fill_one_extent_info()
1503 stripe->extent_sector_bitmap = 0; in scrub_stripe_reset_bitmaps()
1504 stripe->init_error_bitmap = 0; in scrub_stripe_reset_bitmaps()
1505 stripe->init_nr_io_errors = 0; in scrub_stripe_reset_bitmaps()
1506 stripe->init_nr_csum_errors = 0; in scrub_stripe_reset_bitmaps()
1507 stripe->init_nr_meta_errors = 0; in scrub_stripe_reset_bitmaps()
1508 stripe->error_bitmap = 0; in scrub_stripe_reset_bitmaps()
1509 stripe->io_error_bitmap = 0; in scrub_stripe_reset_bitmaps()
1510 stripe->csum_error_bitmap = 0; in scrub_stripe_reset_bitmaps()
1511 stripe->meta_error_bitmap = 0; in scrub_stripe_reset_bitmaps()
1529 struct btrfs_fs_info *fs_info = bg->fs_info; in scrub_find_fill_first_stripe()
1530 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start); in scrub_find_fill_first_stripe()
1531 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start); in scrub_find_fill_first_stripe()
1543 return -EUCLEAN; in scrub_find_fill_first_stripe()
1545 memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) * in scrub_find_fill_first_stripe()
1546 stripe->nr_sectors); in scrub_find_fill_first_stripe()
1550 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length); in scrub_find_fill_first_stripe()
1560 stripe->nr_meta_extents++; in scrub_find_fill_first_stripe()
1562 stripe->nr_data_extents++; in scrub_find_fill_first_stripe()
1568 * The extra calculation against bg->start is to handle block groups in scrub_find_fill_first_stripe()
1571 stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) + in scrub_find_fill_first_stripe()
1572 bg->start; in scrub_find_fill_first_stripe()
1573 stripe->physical = physical + stripe->logical - logical_start; in scrub_find_fill_first_stripe()
1574 stripe->dev = dev; in scrub_find_fill_first_stripe()
1575 stripe->bg = bg; in scrub_find_fill_first_stripe()
1576 stripe->mirror_num = mirror_num; in scrub_find_fill_first_stripe()
1577 stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1; in scrub_find_fill_first_stripe()
1579 /* Fill the first extent info into stripe->sectors[] array. */ in scrub_find_fill_first_stripe()
1587 stripe_end - cur_logical + 1); in scrub_find_fill_first_stripe()
1597 stripe->nr_meta_extents++; in scrub_find_fill_first_stripe()
1599 stripe->nr_data_extents++; in scrub_find_fill_first_stripe()
1605 /* Now fill the data csum. */ in scrub_find_fill_first_stripe()
1606 if (bg->flags & BTRFS_BLOCK_GROUP_DATA) { in scrub_find_fill_first_stripe()
1611 ASSERT(stripe->csums); in scrub_find_fill_first_stripe()
1617 ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits); in scrub_find_fill_first_stripe()
1620 stripe->logical, stripe_end, in scrub_find_fill_first_stripe()
1621 stripe->csums, &csum_bitmap); in scrub_find_fill_first_stripe()
1627 for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) { in scrub_find_fill_first_stripe()
1628 stripe->sectors[sector_nr].csum = stripe->csums + in scrub_find_fill_first_stripe()
1629 sector_nr * fs_info->csum_size; in scrub_find_fill_first_stripe()
1632 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state); in scrub_find_fill_first_stripe()
1641 stripe->nr_meta_extents = 0; in scrub_reset_stripe()
1642 stripe->nr_data_extents = 0; in scrub_reset_stripe()
1643 stripe->state = 0; in scrub_reset_stripe()
1645 for (int i = 0; i < stripe->nr_sectors; i++) { in scrub_reset_stripe()
1646 stripe->sectors[i].is_metadata = false; in scrub_reset_stripe()
1647 stripe->sectors[i].csum = NULL; in scrub_reset_stripe()
1648 stripe->sectors[i].generation = 0; in scrub_reset_stripe()
1655 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_submit_initial_read()
1657 unsigned int nr_sectors = min_t(u64, BTRFS_STRIPE_LEN, stripe->bg->start + in scrub_submit_initial_read()
1658 stripe->bg->length - stripe->logical) >> in scrub_submit_initial_read()
1659 fs_info->sectorsize_bits; in scrub_submit_initial_read()
1660 int mirror = stripe->mirror_num; in scrub_submit_initial_read() local
1662 ASSERT(stripe->bg); in scrub_submit_initial_read()
1663 ASSERT(stripe->mirror_num > 0); in scrub_submit_initial_read()
1664 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state)); in scrub_submit_initial_read()
1669 bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT; in scrub_submit_initial_read()
1676 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); in scrub_submit_initial_read()
1678 ASSERT(ret == fs_info->sectorsize); in scrub_submit_initial_read()
1680 atomic_inc(&stripe->pending_io); in scrub_submit_initial_read()
1683 * For dev-replace, either user asks to avoid the source dev, or in scrub_submit_initial_read()
1684 * the device is missing, we try the next mirror instead. in scrub_submit_initial_read()
1686 if (sctx->is_dev_replace && in scrub_submit_initial_read()
1687 (fs_info->dev_replace.cont_reading_from_srcdev_mode == in scrub_submit_initial_read()
1689 !stripe->dev->bdev)) { in scrub_submit_initial_read()
1690 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start, in scrub_submit_initial_read()
1691 stripe->bg->length); in scrub_submit_initial_read()
1693 mirror = calc_next_mirror(mirror, num_copies); in scrub_submit_initial_read()
1695 btrfs_submit_bio(bbio, mirror); in scrub_submit_initial_read()
1702 for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) { in stripe_has_metadata_error()
1703 if (stripe->sectors[i].is_metadata) { in stripe_has_metadata_error()
1704 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in stripe_has_metadata_error()
1708 stripe->logical, in stripe_has_metadata_error()
1709 stripe->logical + (i << fs_info->sectorsize_bits)); in stripe_has_metadata_error()
1725 scrub_throttle_dev_io(sctx, sctx->stripes[0].dev, in submit_initial_group_read()
1729 struct scrub_stripe *stripe = &sctx->stripes[first_slot + i]; in submit_initial_group_read()
1732 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state)); in submit_initial_group_read()
1740 struct btrfs_fs_info *fs_info = sctx->fs_info; in flush_scrub_stripes()
1742 const int nr_stripes = sctx->cur_stripe; in flush_scrub_stripes()
1748 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state)); in flush_scrub_stripes()
1754 submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot); in flush_scrub_stripes()
1758 stripe = &sctx->stripes[i]; in flush_scrub_stripes()
1760 wait_event(stripe->repair_wait, in flush_scrub_stripes()
1761 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state)); in flush_scrub_stripes()
1764 /* Submit for dev-replace. */ in flush_scrub_stripes()
1765 if (sctx->is_dev_replace) { in flush_scrub_stripes()
1767 * For dev-replace, if we know there is something wrong with in flush_scrub_stripes()
1771 if (stripe_has_metadata_error(&sctx->stripes[i])) { in flush_scrub_stripes()
1772 ret = -EIO; in flush_scrub_stripes()
1779 stripe = &sctx->stripes[i]; in flush_scrub_stripes()
1781 ASSERT(stripe->dev == fs_info->dev_replace.srcdev); in flush_scrub_stripes()
1783 bitmap_andnot(&good, &stripe->extent_sector_bitmap, in flush_scrub_stripes()
1784 &stripe->error_bitmap, stripe->nr_sectors); in flush_scrub_stripes()
1791 stripe = &sctx->stripes[i]; in flush_scrub_stripes()
1797 sctx->cur_stripe = 0; in flush_scrub_stripes()
1803 complete(bio->bi_private); in raid56_scrub_wait_endio()
1818 ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES); in queue_scrub_stripe()
1823 stripe = &sctx->stripes[sctx->cur_stripe]; in queue_scrub_stripe()
1825 ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path, in queue_scrub_stripe()
1826 &sctx->csum_path, dev, physical, in queue_scrub_stripe()
1831 *found_logical_ret = stripe->logical; in queue_scrub_stripe()
1832 sctx->cur_stripe++; in queue_scrub_stripe()
1835 if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) { in queue_scrub_stripe()
1836 const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP; in queue_scrub_stripe()
1842 if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES) in queue_scrub_stripe()
1854 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_raid56_parity_stripe()
1867 ASSERT(sctx->raid56_data_stripes); in scrub_raid56_parity_stripe()
1870 * For data stripe search, we cannot re-use the same extent/csum paths, in scrub_raid56_parity_stripe()
1871 * as the data stripe bytenr may be smaller than previous extent. Thus in scrub_raid56_parity_stripe()
1884 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
1885 rot = div_u64(full_stripe_start - bg->start, in scrub_raid56_parity_stripe()
1887 stripe_index = (i + rot) % map->num_stripes; in scrub_raid56_parity_stripe()
1888 physical = map->stripes[stripe_index].physical + in scrub_raid56_parity_stripe()
1892 set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state); in scrub_raid56_parity_stripe()
1894 map->stripes[stripe_index].dev, physical, 1, in scrub_raid56_parity_stripe()
1900 * No extent in this data stripe, need to manually mark them in scrub_raid56_parity_stripe()
1904 stripe->logical = full_stripe_start + in scrub_raid56_parity_stripe()
1906 stripe->dev = map->stripes[stripe_index].dev; in scrub_raid56_parity_stripe()
1907 stripe->mirror_num = 1; in scrub_raid56_parity_stripe()
1908 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state); in scrub_raid56_parity_stripe()
1912 /* Check if all data stripes are empty. */ in scrub_raid56_parity_stripe()
1914 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
1915 if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) { in scrub_raid56_parity_stripe()
1926 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
1930 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
1932 wait_event(stripe->repair_wait, in scrub_raid56_parity_stripe()
1933 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state)); in scrub_raid56_parity_stripe()
1936 ASSERT(!btrfs_is_zoned(sctx->fs_info)); in scrub_raid56_parity_stripe()
1939 * Now all data stripes are properly verified. Check if we have any in scrub_raid56_parity_stripe()
1948 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
1952 * As we may hit an empty data stripe while it's missing. in scrub_raid56_parity_stripe()
1954 bitmap_and(&error, &stripe->error_bitmap, in scrub_raid56_parity_stripe()
1955 &stripe->extent_sector_bitmap, stripe->nr_sectors); in scrub_raid56_parity_stripe()
1956 if (!bitmap_empty(&error, stripe->nr_sectors)) { in scrub_raid56_parity_stripe()
1958 "unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl", in scrub_raid56_parity_stripe()
1959 full_stripe_start, i, stripe->nr_sectors, in scrub_raid56_parity_stripe()
1961 ret = -EIO; in scrub_raid56_parity_stripe()
1965 &stripe->extent_sector_bitmap, stripe->nr_sectors); in scrub_raid56_parity_stripe()
1970 bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT; in scrub_raid56_parity_stripe()
1971 bio->bi_private = &io_done; in scrub_raid56_parity_stripe()
1972 bio->bi_end_io = raid56_scrub_wait_endio; in scrub_raid56_parity_stripe()
1983 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits); in scrub_raid56_parity_stripe()
1986 ret = -ENOMEM; in scrub_raid56_parity_stripe()
1992 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
1994 raid56_parity_cache_data_pages(rbio, stripe->pages, in scrub_raid56_parity_stripe()
1999 ret = blk_status_to_errno(bio->bi_status); in scrub_raid56_parity_stripe()
2010 * Scrub one range which can only has simple mirror based profile.
2024 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_simple_mirror()
2030 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length); in scrub_simple_mirror()
2035 u64 cur_physical = physical + cur_logical - logical_start; in scrub_simple_mirror()
2038 if (atomic_read(&fs_info->scrub_cancel_req) || in scrub_simple_mirror()
2039 atomic_read(&sctx->cancel_req)) { in scrub_simple_mirror()
2040 ret = -ECANCELED; in scrub_simple_mirror()
2044 if (atomic_read(&fs_info->scrub_pause_req)) { in scrub_simple_mirror()
2049 spin_lock(&bg->lock); in scrub_simple_mirror()
2050 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) { in scrub_simple_mirror()
2051 spin_unlock(&bg->lock); in scrub_simple_mirror()
2055 spin_unlock(&bg->lock); in scrub_simple_mirror()
2058 cur_logical, logical_end - cur_logical, in scrub_simple_mirror()
2062 sctx->stat.last_physical = physical + logical_length; in scrub_simple_mirror()
2082 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | in simple_stripe_full_stripe_len()
2085 return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes); in simple_stripe_full_stripe_len()
2093 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | in simple_stripe_get_logical()
2095 ASSERT(stripe_index < map->num_stripes); in simple_stripe_get_logical()
2098 * (stripe_index / sub_stripes) gives how many data stripes we need to in simple_stripe_get_logical()
2101 return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) + in simple_stripe_get_logical()
2102 bg->start; in simple_stripe_get_logical()
2105 /* Get the mirror number for the stripe */
2108 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | in simple_stripe_mirror_num()
2110 ASSERT(stripe_index < map->num_stripes); in simple_stripe_mirror_num()
2113 return stripe_index % map->sub_stripes + 1; in simple_stripe_mirror_num()
2124 const u64 orig_physical = map->stripes[stripe_index].physical; in scrub_simple_stripe()
2130 while (cur_logical < bg->start + bg->length) { in scrub_simple_stripe()
2155 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_stripe()
2156 struct map_lookup *map = em->map_lookup; in scrub_stripe()
2157 const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK; in scrub_stripe()
2158 const u64 chunk_logical = bg->start; in scrub_stripe()
2161 u64 physical = map->stripes[stripe_index].physical; in scrub_stripe()
2174 ASSERT(sctx->extent_path.nodes[0] == NULL); in scrub_stripe()
2178 if (sctx->is_dev_replace && in scrub_stripe()
2179 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) { in scrub_stripe()
2180 mutex_lock(&sctx->wr_lock); in scrub_stripe()
2181 sctx->write_pointer = physical; in scrub_stripe()
2182 mutex_unlock(&sctx->wr_lock); in scrub_stripe()
2185 /* Prepare the extra data stripes used by RAID56. */ in scrub_stripe()
2187 ASSERT(sctx->raid56_data_stripes == NULL); in scrub_stripe()
2189 sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map), in scrub_stripe()
2192 if (!sctx->raid56_data_stripes) { in scrub_stripe()
2193 ret = -ENOMEM; in scrub_stripe()
2198 &sctx->raid56_data_stripes[i]); in scrub_stripe()
2201 sctx->raid56_data_stripes[i].bg = bg; in scrub_stripe()
2202 sctx->raid56_data_stripes[i].sctx = sctx; in scrub_stripe()
2222 ret = scrub_simple_mirror(sctx, bg, map, bg->start, bg->length, in scrub_stripe()
2223 scrub_dev, map->stripes[stripe_index].physical, in scrub_stripe()
2230 offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes); in scrub_stripe()
2235 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK); in scrub_stripe()
2266 * Now we're at a data stripe, scrub each extents in the range. in scrub_stripe()
2268 * At this stage, if we ignore the repair part, inside each data in scrub_stripe()
2280 spin_lock(&sctx->stat_lock); in scrub_stripe()
2282 sctx->stat.last_physical = in scrub_stripe()
2283 map->stripes[stripe_index].physical + dev_stripe_len; in scrub_stripe()
2285 sctx->stat.last_physical = physical; in scrub_stripe()
2286 spin_unlock(&sctx->stat_lock); in scrub_stripe()
2294 btrfs_release_path(&sctx->extent_path); in scrub_stripe()
2295 btrfs_release_path(&sctx->csum_path); in scrub_stripe()
2297 if (sctx->raid56_data_stripes) { in scrub_stripe()
2299 release_scrub_stripe(&sctx->raid56_data_stripes[i]); in scrub_stripe()
2300 kfree(sctx->raid56_data_stripes); in scrub_stripe()
2301 sctx->raid56_data_stripes = NULL; in scrub_stripe()
2304 if (sctx->is_dev_replace && ret >= 0) { in scrub_stripe()
2309 map->stripes[stripe_index].physical, in scrub_stripe()
2324 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_chunk()
2325 struct extent_map_tree *map_tree = &fs_info->mapping_tree; in scrub_chunk()
2331 read_lock(&map_tree->lock); in scrub_chunk()
2332 em = lookup_extent_mapping(map_tree, bg->start, bg->length); in scrub_chunk()
2333 read_unlock(&map_tree->lock); in scrub_chunk()
2340 spin_lock(&bg->lock); in scrub_chunk()
2341 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) in scrub_chunk()
2342 ret = -EINVAL; in scrub_chunk()
2343 spin_unlock(&bg->lock); in scrub_chunk()
2347 if (em->start != bg->start) in scrub_chunk()
2349 if (em->len < dev_extent_len) in scrub_chunk()
2352 map = em->map_lookup; in scrub_chunk()
2353 for (i = 0; i < map->num_stripes; ++i) { in scrub_chunk()
2354 if (map->stripes[i].dev->bdev == scrub_dev->bdev && in scrub_chunk()
2355 map->stripes[i].physical == dev_offset) { in scrub_chunk()
2370 struct btrfs_fs_info *fs_info = cache->fs_info; in finish_extent_writes_for_zoned()
2378 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length); in finish_extent_writes_for_zoned()
2392 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_enumerate_chunks()
2393 struct btrfs_root *root = fs_info->dev_root; in scrub_enumerate_chunks()
2402 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; in scrub_enumerate_chunks()
2406 return -ENOMEM; in scrub_enumerate_chunks()
2408 path->reada = READA_FORWARD; in scrub_enumerate_chunks()
2409 path->search_commit_root = 1; in scrub_enumerate_chunks()
2410 path->skip_locking = 1; in scrub_enumerate_chunks()
2412 key.objectid = scrub_dev->devid; in scrub_enumerate_chunks()
2423 if (path->slots[0] >= in scrub_enumerate_chunks()
2424 btrfs_header_nritems(path->nodes[0])) { in scrub_enumerate_chunks()
2437 l = path->nodes[0]; in scrub_enumerate_chunks()
2438 slot = path->slots[0]; in scrub_enumerate_chunks()
2442 if (found_key.objectid != scrub_dev->devid) in scrub_enumerate_chunks()
2473 ASSERT(cache->start <= chunk_offset); in scrub_enumerate_chunks()
2493 if (cache->start < chunk_offset) { in scrub_enumerate_chunks()
2498 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) { in scrub_enumerate_chunks()
2499 if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) { in scrub_enumerate_chunks()
2513 spin_lock(&cache->lock); in scrub_enumerate_chunks()
2514 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) { in scrub_enumerate_chunks()
2515 spin_unlock(&cache->lock); in scrub_enumerate_chunks()
2520 spin_unlock(&cache->lock); in scrub_enumerate_chunks()
2526 * -> btrfs_wait_for_commit() in scrub_enumerate_chunks()
2527 * -> btrfs_commit_transaction() in scrub_enumerate_chunks()
2528 * -> btrfs_scrub_pause() in scrub_enumerate_chunks()
2536 * -EFBIG from btrfs_finish_chunk_alloc() like: in scrub_enumerate_chunks()
2552 * - Write duplication in scrub_enumerate_chunks()
2553 * Contains latest data in scrub_enumerate_chunks()
2554 * - Scrub copy in scrub_enumerate_chunks()
2555 * Contains data from commit tree in scrub_enumerate_chunks()
2558 * be overwritten by scrub copy, causing data corruption. in scrub_enumerate_chunks()
2559 * So for dev-replace, it's not allowed to continue if a block in scrub_enumerate_chunks()
2562 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace); in scrub_enumerate_chunks()
2563 if (!ret && sctx->is_dev_replace) { in scrub_enumerate_chunks()
2575 } else if (ret == -ENOSPC && !sctx->is_dev_replace && in scrub_enumerate_chunks()
2576 !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) { in scrub_enumerate_chunks()
2578 * btrfs_inc_block_group_ro return -ENOSPC when it in scrub_enumerate_chunks()
2584 * For RAID56 chunks, we have to mark them read-only in scrub_enumerate_chunks()
2591 } else if (ret == -ETXTBSY) { in scrub_enumerate_chunks()
2594 cache->start); in scrub_enumerate_chunks()
2609 * finish before dev-replace. in scrub_enumerate_chunks()
2612 if (sctx->is_dev_replace) { in scrub_enumerate_chunks()
2614 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, in scrub_enumerate_chunks()
2615 cache->length); in scrub_enumerate_chunks()
2619 down_write(&dev_replace->rwsem); in scrub_enumerate_chunks()
2620 dev_replace->cursor_right = found_key.offset + dev_extent_len; in scrub_enumerate_chunks()
2621 dev_replace->cursor_left = found_key.offset; in scrub_enumerate_chunks()
2622 dev_replace->item_needs_writeback = 1; in scrub_enumerate_chunks()
2623 up_write(&dev_replace->rwsem); in scrub_enumerate_chunks()
2627 if (sctx->is_dev_replace && in scrub_enumerate_chunks()
2628 !btrfs_finish_block_group_to_copy(dev_replace->srcdev, in scrub_enumerate_chunks()
2632 down_write(&dev_replace->rwsem); in scrub_enumerate_chunks()
2633 dev_replace->cursor_left = dev_replace->cursor_right; in scrub_enumerate_chunks()
2634 dev_replace->item_needs_writeback = 1; in scrub_enumerate_chunks()
2635 up_write(&dev_replace->rwsem); in scrub_enumerate_chunks()
2647 spin_lock(&cache->lock); in scrub_enumerate_chunks()
2648 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) && in scrub_enumerate_chunks()
2649 !cache->ro && cache->reserved == 0 && cache->used == 0) { in scrub_enumerate_chunks()
2650 spin_unlock(&cache->lock); in scrub_enumerate_chunks()
2652 btrfs_discard_queue_work(&fs_info->discard_ctl, in scrub_enumerate_chunks()
2657 spin_unlock(&cache->lock); in scrub_enumerate_chunks()
2664 if (sctx->is_dev_replace && in scrub_enumerate_chunks()
2665 atomic64_read(&dev_replace->num_write_errors) > 0) { in scrub_enumerate_chunks()
2666 ret = -EIO; in scrub_enumerate_chunks()
2669 if (sctx->stat.malloc_errors > 0) { in scrub_enumerate_chunks()
2670 ret = -ENOMEM; in scrub_enumerate_chunks()
2686 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_one_super()
2692 bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ); in scrub_one_super()
2704 physical, dev->devid); in scrub_one_super()
2705 return -EIO; in scrub_one_super()
2710 physical, dev->devid, in scrub_one_super()
2712 return -EUCLEAN; in scrub_one_super()
2715 return btrfs_validate_super(fs_info, sb, -1); in scrub_one_super()
2726 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_supers()
2729 return -EROFS; in scrub_supers()
2733 spin_lock(&sctx->stat_lock); in scrub_supers()
2734 sctx->stat.malloc_errors++; in scrub_supers()
2735 spin_unlock(&sctx->stat_lock); in scrub_supers()
2736 return -ENOMEM; in scrub_supers()
2740 if (scrub_dev->fs_devices != fs_info->fs_devices) in scrub_supers()
2741 gen = scrub_dev->generation; in scrub_supers()
2743 gen = fs_info->last_trans_committed; in scrub_supers()
2747 if (ret == -ENOENT) in scrub_supers()
2751 spin_lock(&sctx->stat_lock); in scrub_supers()
2752 sctx->stat.super_errors++; in scrub_supers()
2753 spin_unlock(&sctx->stat_lock); in scrub_supers()
2758 scrub_dev->commit_total_bytes) in scrub_supers()
2765 spin_lock(&sctx->stat_lock); in scrub_supers()
2766 sctx->stat.super_errors++; in scrub_supers()
2767 spin_unlock(&sctx->stat_lock); in scrub_supers()
2776 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt, in scrub_workers_put()
2777 &fs_info->scrub_lock)) { in scrub_workers_put()
2778 struct workqueue_struct *scrub_workers = fs_info->scrub_workers; in scrub_workers_put()
2780 fs_info->scrub_workers = NULL; in scrub_workers_put()
2781 mutex_unlock(&fs_info->scrub_lock); in scrub_workers_put()
2789 * get a reference count on fs_info->scrub_workers. start worker if necessary
2795 int max_active = fs_info->thread_pool_size; in scrub_workers_get()
2796 int ret = -ENOMEM; in scrub_workers_get()
2798 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt)) in scrub_workers_get()
2801 scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active); in scrub_workers_get()
2803 return -ENOMEM; in scrub_workers_get()
2805 mutex_lock(&fs_info->scrub_lock); in scrub_workers_get()
2806 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) { in scrub_workers_get()
2807 ASSERT(fs_info->scrub_workers == NULL); in scrub_workers_get()
2808 fs_info->scrub_workers = scrub_workers; in scrub_workers_get()
2809 refcount_set(&fs_info->scrub_workers_refcnt, 1); in scrub_workers_get()
2810 mutex_unlock(&fs_info->scrub_lock); in scrub_workers_get()
2814 refcount_inc(&fs_info->scrub_workers_refcnt); in scrub_workers_get()
2815 mutex_unlock(&fs_info->scrub_lock); in scrub_workers_get()
2835 return -EAGAIN; in btrfs_scrub_dev()
2838 ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN); in btrfs_scrub_dev()
2845 ASSERT(fs_info->nodesize <= in btrfs_scrub_dev()
2846 SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits); in btrfs_scrub_dev()
2857 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2858 dev = btrfs_find_device(fs_info->fs_devices, &args); in btrfs_scrub_dev()
2859 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && in btrfs_scrub_dev()
2861 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2862 ret = -ENODEV; in btrfs_scrub_dev()
2867 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { in btrfs_scrub_dev()
2868 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2872 ret = -EROFS; in btrfs_scrub_dev()
2876 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_dev()
2877 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || in btrfs_scrub_dev()
2878 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { in btrfs_scrub_dev()
2879 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
2880 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2881 ret = -EIO; in btrfs_scrub_dev()
2885 down_read(&fs_info->dev_replace.rwsem); in btrfs_scrub_dev()
2886 if (dev->scrub_ctx || in btrfs_scrub_dev()
2888 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { in btrfs_scrub_dev()
2889 up_read(&fs_info->dev_replace.rwsem); in btrfs_scrub_dev()
2890 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
2891 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2892 ret = -EINPROGRESS; in btrfs_scrub_dev()
2895 up_read(&fs_info->dev_replace.rwsem); in btrfs_scrub_dev()
2897 sctx->readonly = readonly; in btrfs_scrub_dev()
2898 dev->scrub_ctx = sctx; in btrfs_scrub_dev()
2899 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2906 atomic_inc(&fs_info->scrubs_running); in btrfs_scrub_dev()
2907 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
2916 * before incrementing fs_info->scrubs_running). in btrfs_scrub_dev()
2922 spin_lock(&sctx->stat_lock); in btrfs_scrub_dev()
2923 old_super_errors = sctx->stat.super_errors; in btrfs_scrub_dev()
2924 spin_unlock(&sctx->stat_lock); in btrfs_scrub_dev()
2931 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2933 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2935 spin_lock(&sctx->stat_lock); in btrfs_scrub_dev()
2941 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly) in btrfs_scrub_dev()
2943 spin_unlock(&sctx->stat_lock); in btrfs_scrub_dev()
2950 atomic_dec(&fs_info->scrubs_running); in btrfs_scrub_dev()
2951 wake_up(&fs_info->scrub_pause_wait); in btrfs_scrub_dev()
2954 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_dev()
2960 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_dev()
2961 dev->scrub_ctx = NULL; in btrfs_scrub_dev()
2962 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
2974 trans = btrfs_start_transaction(fs_info->tree_root, 0); in btrfs_scrub_dev()
2997 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_pause()
2998 atomic_inc(&fs_info->scrub_pause_req); in btrfs_scrub_pause()
2999 while (atomic_read(&fs_info->scrubs_paused) != in btrfs_scrub_pause()
3000 atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_pause()
3001 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_pause()
3002 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_pause()
3003 atomic_read(&fs_info->scrubs_paused) == in btrfs_scrub_pause()
3004 atomic_read(&fs_info->scrubs_running)); in btrfs_scrub_pause()
3005 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_pause()
3007 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_pause()
3012 atomic_dec(&fs_info->scrub_pause_req); in btrfs_scrub_continue()
3013 wake_up(&fs_info->scrub_pause_wait); in btrfs_scrub_continue()
3018 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3019 if (!atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_cancel()
3020 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3021 return -ENOTCONN; in btrfs_scrub_cancel()
3024 atomic_inc(&fs_info->scrub_cancel_req); in btrfs_scrub_cancel()
3025 while (atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_cancel()
3026 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3027 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_cancel()
3028 atomic_read(&fs_info->scrubs_running) == 0); in btrfs_scrub_cancel()
3029 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3031 atomic_dec(&fs_info->scrub_cancel_req); in btrfs_scrub_cancel()
3032 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3039 struct btrfs_fs_info *fs_info = dev->fs_info; in btrfs_scrub_cancel_dev()
3042 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3043 sctx = dev->scrub_ctx; in btrfs_scrub_cancel_dev()
3045 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3046 return -ENOTCONN; in btrfs_scrub_cancel_dev()
3048 atomic_inc(&sctx->cancel_req); in btrfs_scrub_cancel_dev()
3049 while (dev->scrub_ctx) { in btrfs_scrub_cancel_dev()
3050 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3051 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_cancel_dev()
3052 dev->scrub_ctx == NULL); in btrfs_scrub_cancel_dev()
3053 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3055 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3067 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_progress()
3068 dev = btrfs_find_device(fs_info->fs_devices, &args); in btrfs_scrub_progress()
3070 sctx = dev->scrub_ctx; in btrfs_scrub_progress()
3072 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_progress()
3073 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_progress()
3075 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; in btrfs_scrub_progress()