Lines Matching +full:path +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0
13 #include "disk-io.h"
14 #include "ordered-data.h"
18 #include "dev-replace.h"
19 #include "check-integrity.h"
21 #include "block-group.h"
25 #include "file-item.h"
29 * This is only the first step towards a full-features scrub. It reads all
35 * - In case an unrepairable extent is encountered, track which files are
37 * - track and record media errors, throw out bad devices
38 * - add a mode to also read unallocated space
54 * This would be 8M per device, the same value as the old scrub in-flight bios
92 /* Set when the read-repair is finished. */
217 * Use a ref counter to avoid use-after-free issues. Scrub workers
227 struct btrfs_path *path; member
241 if (stripe->pages[i]) in release_scrub_stripe()
242 __free_page(stripe->pages[i]); in release_scrub_stripe()
243 stripe->pages[i] = NULL; in release_scrub_stripe()
245 kfree(stripe->sectors); in release_scrub_stripe()
246 kfree(stripe->csums); in release_scrub_stripe()
247 stripe->sectors = NULL; in release_scrub_stripe()
248 stripe->csums = NULL; in release_scrub_stripe()
249 stripe->sctx = NULL; in release_scrub_stripe()
250 stripe->state = 0; in release_scrub_stripe()
260 stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits; in init_scrub_stripe()
261 stripe->state = 0; in init_scrub_stripe()
263 init_waitqueue_head(&stripe->io_wait); in init_scrub_stripe()
264 init_waitqueue_head(&stripe->repair_wait); in init_scrub_stripe()
265 atomic_set(&stripe->pending_io, 0); in init_scrub_stripe()
266 spin_lock_init(&stripe->write_error_lock); in init_scrub_stripe()
268 ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages); in init_scrub_stripe()
272 stripe->sectors = kcalloc(stripe->nr_sectors, in init_scrub_stripe()
275 if (!stripe->sectors) in init_scrub_stripe()
278 stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits, in init_scrub_stripe()
279 fs_info->csum_size, GFP_KERNEL); in init_scrub_stripe()
280 if (!stripe->csums) in init_scrub_stripe()
285 return -ENOMEM; in init_scrub_stripe()
290 wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0); in wait_scrub_stripe_io()
297 while (atomic_read(&fs_info->scrub_pause_req)) { in __scrub_blocked_if_needed()
298 mutex_unlock(&fs_info->scrub_lock); in __scrub_blocked_if_needed()
299 wait_event(fs_info->scrub_pause_wait, in __scrub_blocked_if_needed()
300 atomic_read(&fs_info->scrub_pause_req) == 0); in __scrub_blocked_if_needed()
301 mutex_lock(&fs_info->scrub_lock); in __scrub_blocked_if_needed()
307 atomic_inc(&fs_info->scrubs_paused); in scrub_pause_on()
308 wake_up(&fs_info->scrub_pause_wait); in scrub_pause_on()
313 mutex_lock(&fs_info->scrub_lock); in scrub_pause_off()
315 atomic_dec(&fs_info->scrubs_paused); in scrub_pause_off()
316 mutex_unlock(&fs_info->scrub_lock); in scrub_pause_off()
318 wake_up(&fs_info->scrub_pause_wait); in scrub_pause_off()
335 release_scrub_stripe(&sctx->stripes[i]); in scrub_free_ctx()
342 if (refcount_dec_and_test(&sctx->refs)) in scrub_put_ctx()
358 refcount_set(&sctx->refs, 1); in scrub_setup_ctx()
359 sctx->is_dev_replace = is_dev_replace; in scrub_setup_ctx()
360 sctx->fs_info = fs_info; in scrub_setup_ctx()
361 sctx->extent_path.search_commit_root = 1; in scrub_setup_ctx()
362 sctx->extent_path.skip_locking = 1; in scrub_setup_ctx()
363 sctx->csum_path.search_commit_root = 1; in scrub_setup_ctx()
364 sctx->csum_path.skip_locking = 1; in scrub_setup_ctx()
368 ret = init_scrub_stripe(fs_info, &sctx->stripes[i]); in scrub_setup_ctx()
371 sctx->stripes[i].sctx = sctx; in scrub_setup_ctx()
373 sctx->first_free = 0; in scrub_setup_ctx()
374 atomic_set(&sctx->cancel_req, 0); in scrub_setup_ctx()
376 spin_lock_init(&sctx->stat_lock); in scrub_setup_ctx()
377 sctx->throttle_deadline = 0; in scrub_setup_ctx()
379 mutex_init(&sctx->wr_lock); in scrub_setup_ctx()
381 WARN_ON(!fs_info->dev_replace.tgtdev); in scrub_setup_ctx()
382 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; in scrub_setup_ctx()
389 return ERR_PTR(-ENOMEM); in scrub_setup_ctx()
402 struct btrfs_fs_info *fs_info = swarn->dev->fs_info; in scrub_print_warning_inode()
414 * this makes the path point to (inum INODE_ITEM ioff) in scrub_print_warning_inode()
420 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0); in scrub_print_warning_inode()
423 btrfs_release_path(swarn->path); in scrub_print_warning_inode()
427 eb = swarn->path->nodes[0]; in scrub_print_warning_inode()
428 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], in scrub_print_warning_inode()
431 btrfs_release_path(swarn->path); in scrub_print_warning_inode()
439 ipath = init_ipath(4096, local_root, swarn->path); in scrub_print_warning_inode()
456 for (i = 0; i < ipath->fspath->elem_cnt; ++i) in scrub_print_warning_inode()
458 …%llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)", in scrub_print_warning_inode()
459 swarn->errstr, swarn->logical, in scrub_print_warning_inode()
460 btrfs_dev_name(swarn->dev), in scrub_print_warning_inode()
461 swarn->physical, in scrub_print_warning_inode()
463 fs_info->sectorsize, nlink, in scrub_print_warning_inode()
464 (char *)(unsigned long)ipath->fspath->val[i]); in scrub_print_warning_inode()
472 …"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving f… in scrub_print_warning_inode()
473 swarn->errstr, swarn->logical, in scrub_print_warning_inode()
474 btrfs_dev_name(swarn->dev), in scrub_print_warning_inode()
475 swarn->physical, in scrub_print_warning_inode()
485 struct btrfs_fs_info *fs_info = dev->fs_info; in scrub_print_common_warning()
486 struct btrfs_path *path; in scrub_print_common_warning() local
501 path = btrfs_alloc_path(); in scrub_print_common_warning()
502 if (!path) in scrub_print_common_warning()
510 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key, in scrub_print_common_warning()
517 eb = path->nodes[0]; in scrub_print_common_warning()
518 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); in scrub_print_common_warning()
519 item_size = btrfs_item_size(eb, path->slots[0]); in scrub_print_common_warning()
544 btrfs_release_path(path); in scrub_print_common_warning()
548 btrfs_release_path(path); in scrub_print_common_warning()
551 ctx.extent_item_pos = swarn.logical - found_key.objectid; in scrub_print_common_warning()
554 swarn.path = path; in scrub_print_common_warning()
561 btrfs_free_path(path); in scrub_print_common_warning()
569 if (!btrfs_is_zoned(sctx->fs_info)) in fill_writer_pointer_gap()
572 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) in fill_writer_pointer_gap()
575 if (sctx->write_pointer < physical) { in fill_writer_pointer_gap()
576 length = physical - sctx->write_pointer; in fill_writer_pointer_gap()
578 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev, in fill_writer_pointer_gap()
579 sctx->write_pointer, length); in fill_writer_pointer_gap()
581 sctx->write_pointer = physical; in fill_writer_pointer_gap()
588 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_stripe_get_page()
589 int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT; in scrub_stripe_get_page()
591 return stripe->pages[page_index]; in scrub_stripe_get_page()
597 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_stripe_get_page_offset()
599 return offset_in_page(sector_nr << fs_info->sectorsize_bits); in scrub_stripe_get_page_offset()
604 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_verify_one_metadata()
605 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; in scrub_verify_one_metadata()
606 const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits); in scrub_verify_one_metadata()
609 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); in scrub_verify_one_metadata()
620 memcpy(on_disk_csum, header->csum, fs_info->csum_size); in scrub_verify_one_metadata()
623 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
624 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
627 logical, stripe->mirror_num, in scrub_verify_one_metadata()
631 if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid, in scrub_verify_one_metadata()
633 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
634 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
637 logical, stripe->mirror_num, in scrub_verify_one_metadata()
638 header->fsid, fs_info->fs_devices->fsid); in scrub_verify_one_metadata()
641 if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid, in scrub_verify_one_metadata()
643 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
644 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
647 logical, stripe->mirror_num, in scrub_verify_one_metadata()
648 header->chunk_tree_uuid, fs_info->chunk_tree_uuid); in scrub_verify_one_metadata()
653 shash->tfm = fs_info->csum_shash; in scrub_verify_one_metadata()
656 BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE); in scrub_verify_one_metadata()
663 fs_info->sectorsize); in scrub_verify_one_metadata()
667 if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) { in scrub_verify_one_metadata()
668 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
669 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
672 logical, stripe->mirror_num, in scrub_verify_one_metadata()
673 CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum), in scrub_verify_one_metadata()
674 CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum)); in scrub_verify_one_metadata()
677 if (stripe->sectors[sector_nr].generation != in scrub_verify_one_metadata()
679 bitmap_set(&stripe->meta_gen_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
680 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
683 logical, stripe->mirror_num, in scrub_verify_one_metadata()
685 stripe->sectors[sector_nr].generation); in scrub_verify_one_metadata()
688 bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
689 bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
690 bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
691 bitmap_clear(&stripe->meta_gen_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
696 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_verify_one_sector()
697 struct scrub_sector_verification *sector = &stripe->sectors[sector_nr]; in scrub_verify_one_sector()
698 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; in scrub_verify_one_sector()
704 ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors); in scrub_verify_one_sector()
707 if (!test_bit(sector_nr, &stripe->extent_sector_bitmap)) in scrub_verify_one_sector()
711 if (test_bit(sector_nr, &stripe->io_error_bitmap)) in scrub_verify_one_sector()
715 if (sector->is_metadata) { in scrub_verify_one_sector()
724 if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) { in scrub_verify_one_sector()
727 stripe->logical + in scrub_verify_one_sector()
728 (sector_nr << fs_info->sectorsize_bits), in scrub_verify_one_sector()
729 stripe->logical); in scrub_verify_one_sector()
740 if (!sector->csum) { in scrub_verify_one_sector()
741 clear_bit(sector_nr, &stripe->error_bitmap); in scrub_verify_one_sector()
745 ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum); in scrub_verify_one_sector()
747 set_bit(sector_nr, &stripe->csum_error_bitmap); in scrub_verify_one_sector()
748 set_bit(sector_nr, &stripe->error_bitmap); in scrub_verify_one_sector()
750 clear_bit(sector_nr, &stripe->csum_error_bitmap); in scrub_verify_one_sector()
751 clear_bit(sector_nr, &stripe->error_bitmap); in scrub_verify_one_sector()
758 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_verify_one_stripe()
759 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; in scrub_verify_one_stripe()
762 for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) { in scrub_verify_one_stripe()
764 if (stripe->sectors[sector_nr].is_metadata) in scrub_verify_one_stripe()
765 sector_nr += sectors_per_tree - 1; in scrub_verify_one_stripe()
773 for (i = 0; i < stripe->nr_sectors; i++) { in calc_sector_number()
774 if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page && in calc_sector_number()
775 scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset) in calc_sector_number()
778 ASSERT(i < stripe->nr_sectors); in calc_sector_number()
785 * - Only reads the failed sectors
786 * - May have extra blocksize limits
790 struct scrub_stripe *stripe = bbio->private; in scrub_repair_read_endio()
791 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_repair_read_endio()
793 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio)); in scrub_repair_read_endio()
797 ASSERT(sector_nr < stripe->nr_sectors); in scrub_repair_read_endio()
799 bio_for_each_bvec_all(bvec, &bbio->bio, i) in scrub_repair_read_endio()
800 bio_size += bvec->bv_len; in scrub_repair_read_endio()
802 if (bbio->bio.bi_status) { in scrub_repair_read_endio()
803 bitmap_set(&stripe->io_error_bitmap, sector_nr, in scrub_repair_read_endio()
804 bio_size >> fs_info->sectorsize_bits); in scrub_repair_read_endio()
805 bitmap_set(&stripe->error_bitmap, sector_nr, in scrub_repair_read_endio()
806 bio_size >> fs_info->sectorsize_bits); in scrub_repair_read_endio()
808 bitmap_clear(&stripe->io_error_bitmap, sector_nr, in scrub_repair_read_endio()
809 bio_size >> fs_info->sectorsize_bits); in scrub_repair_read_endio()
811 bio_put(&bbio->bio); in scrub_repair_read_endio()
812 if (atomic_dec_and_test(&stripe->pending_io)) in scrub_repair_read_endio()
813 wake_up(&stripe->io_wait); in scrub_repair_read_endio()
825 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_stripe_submit_repair_read()
827 const unsigned long old_error_bitmap = stripe->error_bitmap; in scrub_stripe_submit_repair_read()
830 ASSERT(stripe->mirror_num >= 1); in scrub_stripe_submit_repair_read()
831 ASSERT(atomic_read(&stripe->pending_io) == 0); in scrub_stripe_submit_repair_read()
833 for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) { in scrub_stripe_submit_repair_read()
842 if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) || in scrub_stripe_submit_repair_read()
843 bbio->bio.bi_iter.bi_size >= blocksize)) { in scrub_stripe_submit_repair_read()
844 ASSERT(bbio->bio.bi_iter.bi_size); in scrub_stripe_submit_repair_read()
845 atomic_inc(&stripe->pending_io); in scrub_stripe_submit_repair_read()
853 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ, in scrub_stripe_submit_repair_read()
855 bbio->bio.bi_iter.bi_sector = (stripe->logical + in scrub_stripe_submit_repair_read()
856 (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT; in scrub_stripe_submit_repair_read()
859 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); in scrub_stripe_submit_repair_read()
860 ASSERT(ret == fs_info->sectorsize); in scrub_stripe_submit_repair_read()
863 ASSERT(bbio->bio.bi_iter.bi_size); in scrub_stripe_submit_repair_read()
864 atomic_inc(&stripe->pending_io); in scrub_stripe_submit_repair_read()
876 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_stripe_report_errors()
885 if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state)) in scrub_stripe_report_errors()
894 if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) { in scrub_stripe_report_errors()
895 u64 mapped_len = fs_info->sectorsize; in scrub_stripe_report_errors()
897 int stripe_index = stripe->mirror_num - 1; in scrub_stripe_report_errors()
901 ASSERT(stripe->mirror_num >= 1); in scrub_stripe_report_errors()
903 stripe->logical, &mapped_len, &bioc, in scrub_stripe_report_errors()
911 physical = bioc->stripes[stripe_index].physical; in scrub_stripe_report_errors()
912 dev = bioc->stripes[stripe_index].dev; in scrub_stripe_report_errors()
917 for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) { in scrub_stripe_report_errors()
920 if (stripe->sectors[sector_nr].is_metadata) { in scrub_stripe_report_errors()
924 if (!stripe->sectors[sector_nr].csum) in scrub_stripe_report_errors()
928 if (test_bit(sector_nr, &stripe->init_error_bitmap) && in scrub_stripe_report_errors()
929 !test_bit(sector_nr, &stripe->error_bitmap)) { in scrub_stripe_report_errors()
935 if (!test_bit(sector_nr, &stripe->init_error_bitmap)) in scrub_stripe_report_errors()
946 stripe->logical, btrfs_dev_name(dev), in scrub_stripe_report_errors()
951 stripe->logical, stripe->mirror_num); in scrub_stripe_report_errors()
960 stripe->logical, btrfs_dev_name(dev), in scrub_stripe_report_errors()
965 stripe->logical, stripe->mirror_num); in scrub_stripe_report_errors()
968 if (test_bit(sector_nr, &stripe->io_error_bitmap)) in scrub_stripe_report_errors()
971 stripe->logical, physical); in scrub_stripe_report_errors()
972 if (test_bit(sector_nr, &stripe->csum_error_bitmap)) in scrub_stripe_report_errors()
975 stripe->logical, physical); in scrub_stripe_report_errors()
976 if (test_bit(sector_nr, &stripe->meta_error_bitmap)) in scrub_stripe_report_errors()
979 stripe->logical, physical); in scrub_stripe_report_errors()
980 if (test_bit(sector_nr, &stripe->meta_gen_error_bitmap)) in scrub_stripe_report_errors()
983 stripe->logical, physical); in scrub_stripe_report_errors()
987 for (int i = 0; i < stripe->init_nr_io_errors; i++) in scrub_stripe_report_errors()
988 btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_READ_ERRS); in scrub_stripe_report_errors()
989 for (int i = 0; i < stripe->init_nr_csum_errors; i++) in scrub_stripe_report_errors()
990 btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); in scrub_stripe_report_errors()
992 for (int i = 0; i < stripe->init_nr_meta_gen_errors; in scrub_stripe_report_errors()
993 i += (fs_info->nodesize >> fs_info->sectorsize_bits)) in scrub_stripe_report_errors()
994 btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_GENERATION_ERRS); in scrub_stripe_report_errors()
996 spin_lock(&sctx->stat_lock); in scrub_stripe_report_errors()
997 sctx->stat.data_extents_scrubbed += stripe->nr_data_extents; in scrub_stripe_report_errors()
998 sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents; in scrub_stripe_report_errors()
999 sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits; in scrub_stripe_report_errors()
1000 sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits; in scrub_stripe_report_errors()
1001 sctx->stat.no_csum += nr_nodatacsum_sectors; in scrub_stripe_report_errors()
1002 sctx->stat.read_errors += stripe->init_nr_io_errors; in scrub_stripe_report_errors()
1003 sctx->stat.csum_errors += stripe->init_nr_csum_errors; in scrub_stripe_report_errors()
1004 sctx->stat.verify_errors += stripe->init_nr_meta_errors + in scrub_stripe_report_errors()
1005 stripe->init_nr_meta_gen_errors; in scrub_stripe_report_errors()
1006 sctx->stat.uncorrectable_errors += in scrub_stripe_report_errors()
1007 bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors); in scrub_stripe_report_errors()
1008 sctx->stat.corrected_errors += nr_repaired_sectors; in scrub_stripe_report_errors()
1009 spin_unlock(&sctx->stat_lock); in scrub_stripe_report_errors()
1018 * - Wait for the initial read to finish
1019 * - Verify and locate any bad sectors
1020 * - Go through the remaining mirrors and try to read as large blocksize as
1022 * - Go through all mirrors (including the failed mirror) sector-by-sector
1023 * - Submit writeback for repaired sectors
1025 * Writeback for dev-replace does not happen here, it needs extra
1031 struct scrub_ctx *sctx = stripe->sctx; in scrub_stripe_read_repair_worker()
1032 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_stripe_read_repair_worker()
1033 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start, in scrub_stripe_read_repair_worker()
1034 stripe->bg->length); in scrub_stripe_read_repair_worker()
1039 ASSERT(stripe->mirror_num > 0); in scrub_stripe_read_repair_worker()
1042 scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap); in scrub_stripe_read_repair_worker()
1044 stripe->init_error_bitmap = stripe->error_bitmap; in scrub_stripe_read_repair_worker()
1045 stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap, in scrub_stripe_read_repair_worker()
1046 stripe->nr_sectors); in scrub_stripe_read_repair_worker()
1047 stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap, in scrub_stripe_read_repair_worker()
1048 stripe->nr_sectors); in scrub_stripe_read_repair_worker()
1049 stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap, in scrub_stripe_read_repair_worker()
1050 stripe->nr_sectors); in scrub_stripe_read_repair_worker()
1051 stripe->init_nr_meta_gen_errors = bitmap_weight(&stripe->meta_gen_error_bitmap, in scrub_stripe_read_repair_worker()
1052 stripe->nr_sectors); in scrub_stripe_read_repair_worker()
1054 if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) in scrub_stripe_read_repair_worker()
1063 for (mirror = calc_next_mirror(stripe->mirror_num, num_copies); in scrub_stripe_read_repair_worker()
1064 mirror != stripe->mirror_num; in scrub_stripe_read_repair_worker()
1066 const unsigned long old_error_bitmap = stripe->error_bitmap; in scrub_stripe_read_repair_worker()
1072 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) in scrub_stripe_read_repair_worker()
1077 * Last safety net, try re-checking all mirrors, including the failed in scrub_stripe_read_repair_worker()
1078 * one, sector-by-sector. in scrub_stripe_read_repair_worker()
1082 * Thus here we do sector-by-sector read. in scrub_stripe_read_repair_worker()
1087 for (i = 0, mirror = stripe->mirror_num; in scrub_stripe_read_repair_worker()
1090 const unsigned long old_error_bitmap = stripe->error_bitmap; in scrub_stripe_read_repair_worker()
1093 fs_info->sectorsize, true); in scrub_stripe_read_repair_worker()
1096 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) in scrub_stripe_read_repair_worker()
1102 * in-place, but queue the bg to be relocated. in scrub_stripe_read_repair_worker()
1104 bitmap_andnot(&repaired, &stripe->init_error_bitmap, &stripe->error_bitmap, in scrub_stripe_read_repair_worker()
1105 stripe->nr_sectors); in scrub_stripe_read_repair_worker()
1106 if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) { in scrub_stripe_read_repair_worker()
1108 btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start); in scrub_stripe_read_repair_worker()
1116 set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state); in scrub_stripe_read_repair_worker()
1117 wake_up(&stripe->repair_wait); in scrub_stripe_read_repair_worker()
1122 struct scrub_stripe *stripe = bbio->private; in scrub_read_endio()
1124 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio)); in scrub_read_endio()
1129 ASSERT(sector_nr < stripe->nr_sectors); in scrub_read_endio()
1130 bio_for_each_bvec_all(bvec, &bbio->bio, i) in scrub_read_endio()
1131 bio_size += bvec->bv_len; in scrub_read_endio()
1132 num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits; in scrub_read_endio()
1134 if (bbio->bio.bi_status) { in scrub_read_endio()
1135 bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors); in scrub_read_endio()
1136 bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors); in scrub_read_endio()
1138 bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors); in scrub_read_endio()
1140 bio_put(&bbio->bio); in scrub_read_endio()
1141 if (atomic_dec_and_test(&stripe->pending_io)) { in scrub_read_endio()
1142 wake_up(&stripe->io_wait); in scrub_read_endio()
1143 INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker); in scrub_read_endio()
1144 queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work); in scrub_read_endio()
1150 struct scrub_stripe *stripe = bbio->private; in scrub_write_endio()
1151 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_write_endio()
1153 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio)); in scrub_write_endio()
1157 bio_for_each_bvec_all(bvec, &bbio->bio, i) in scrub_write_endio()
1158 bio_size += bvec->bv_len; in scrub_write_endio()
1160 if (bbio->bio.bi_status) { in scrub_write_endio()
1163 spin_lock_irqsave(&stripe->write_error_lock, flags); in scrub_write_endio()
1164 bitmap_set(&stripe->write_error_bitmap, sector_nr, in scrub_write_endio()
1165 bio_size >> fs_info->sectorsize_bits); in scrub_write_endio()
1166 spin_unlock_irqrestore(&stripe->write_error_lock, flags); in scrub_write_endio()
1167 for (int i = 0; i < (bio_size >> fs_info->sectorsize_bits); i++) in scrub_write_endio()
1168 btrfs_dev_stat_inc_and_print(stripe->dev, in scrub_write_endio()
1171 bio_put(&bbio->bio); in scrub_write_endio()
1173 if (atomic_dec_and_test(&stripe->pending_io)) in scrub_write_endio()
1174 wake_up(&stripe->io_wait); in scrub_write_endio()
1181 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_submit_write_bio()
1182 u32 bio_len = bbio->bio.bi_iter.bi_size; in scrub_submit_write_bio()
1183 u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) - in scrub_submit_write_bio()
1184 stripe->logical; in scrub_submit_write_bio()
1186 fill_writer_pointer_gap(sctx, stripe->physical + bio_off); in scrub_submit_write_bio()
1187 atomic_inc(&stripe->pending_io); in scrub_submit_write_bio()
1188 btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace); in scrub_submit_write_bio()
1201 if (!test_bit(bio_off >> fs_info->sectorsize_bits, in scrub_submit_write_bio()
1202 &stripe->write_error_bitmap)) in scrub_submit_write_bio()
1203 sctx->write_pointer += bio_len; in scrub_submit_write_bio()
1211 * - Only needs logical bytenr and mirror_num
1212 * Just like the scrub read path
1214 * - Would only result in writes to the specified mirror
1215 * Unlike the regular writeback path, which would write back to all stripes
1217 * - Handle dev-replace and read-repair writeback differently
1222 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in scrub_write_sectors()
1226 for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) { in scrub_write_sectors()
1232 ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap)); in scrub_write_sectors()
1235 if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) { in scrub_write_sectors()
1240 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE, in scrub_write_sectors()
1242 bbio->bio.bi_iter.bi_sector = (stripe->logical + in scrub_write_sectors()
1243 (sector_nr << fs_info->sectorsize_bits)) >> in scrub_write_sectors()
1246 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); in scrub_write_sectors()
1247 ASSERT(ret == fs_info->sectorsize); in scrub_write_sectors()
1254 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
1266 bwlimit = READ_ONCE(device->scrub_speed_max); in scrub_throttle_dev_io()
1279 if (sctx->throttle_deadline == 0) { in scrub_throttle_dev_io()
1280 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div); in scrub_throttle_dev_io()
1281 sctx->throttle_sent = 0; in scrub_throttle_dev_io()
1285 if (ktime_before(now, sctx->throttle_deadline)) { in scrub_throttle_dev_io()
1287 sctx->throttle_sent += bio_size; in scrub_throttle_dev_io()
1288 if (sctx->throttle_sent <= div_u64(bwlimit, div)) in scrub_throttle_dev_io()
1292 delta = ktime_ms_delta(sctx->throttle_deadline, now); in scrub_throttle_dev_io()
1306 sctx->throttle_deadline = 0; in scrub_throttle_dev_io()
1317 struct map_lookup *map, u64 *offset, in get_raid56_logic_offset() argument
1323 const int data_stripes = nr_data_stripes(map); in get_raid56_logic_offset()
1325 last_offset = (physical - map->stripes[num].physical) * data_stripes; in get_raid56_logic_offset()
1339 /* Work out the disk rotation on this stripe-set */ in get_raid56_logic_offset()
1340 rot = stripe_nr % map->num_stripes; in get_raid56_logic_offset()
1343 stripe_index = rot % map->num_stripes; in get_raid56_logic_offset()
1358 static int compare_extent_item_range(struct btrfs_path *path, in compare_extent_item_range() argument
1361 struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info; in compare_extent_item_range()
1365 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in compare_extent_item_range()
1369 len = fs_info->nodesize; in compare_extent_item_range()
1374 return -1; in compare_extent_item_range()
1384 * If the path is not initialized, we will initialize the search by doing
1386 * If the path is already initialized, we will use the path as the initial
1392 * Return 0 if we found such extent item, and @path will point to the extent item.
1393 * Return >0 if no such extent item can be found, and @path will be released.
1394 * Return <0 if hit fatal error, and @path will be released.
1397 struct btrfs_path *path, in find_first_extent_item() argument
1400 struct btrfs_fs_info *fs_info = extent_root->fs_info; in find_first_extent_item()
1404 /* Continue using the existing path */ in find_first_extent_item()
1405 if (path->nodes[0]) in find_first_extent_item()
1413 key.offset = (u64)-1; in find_first_extent_item()
1415 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); in find_first_extent_item()
1424 ret = btrfs_previous_extent_item(extent_root, path, 0); in find_first_extent_item()
1433 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in find_first_extent_item()
1440 ret = compare_extent_item_range(path, search_start, search_len); in find_first_extent_item()
1446 path->slots[0]++; in find_first_extent_item()
1447 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { in find_first_extent_item()
1448 ret = btrfs_next_leaf(extent_root, path); in find_first_extent_item()
1451 btrfs_release_path(path); in find_first_extent_item()
1456 btrfs_release_path(path); in find_first_extent_item()
1460 static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret, in get_extent_info() argument
1466 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in get_extent_info()
1471 *size_ret = path->nodes[0]->fs_info->nodesize; in get_extent_info()
1474 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item); in get_extent_info()
1475 *flags_ret = btrfs_extent_flags(path->nodes[0], ei); in get_extent_info()
1476 *generation_ret = btrfs_extent_generation(path->nodes[0], ei); in get_extent_info()
1482 struct btrfs_fs_info *fs_info = sctx->fs_info; in sync_write_pointer_for_zoned()
1488 mutex_lock(&sctx->wr_lock); in sync_write_pointer_for_zoned()
1489 if (sctx->write_pointer < physical_end) { in sync_write_pointer_for_zoned()
1490 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical, in sync_write_pointer_for_zoned()
1492 sctx->write_pointer); in sync_write_pointer_for_zoned()
1497 mutex_unlock(&sctx->wr_lock); in sync_write_pointer_for_zoned()
1498 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical); in sync_write_pointer_for_zoned()
1508 for (u64 cur_logical = max(stripe->logical, extent_start); in fill_one_extent_info()
1509 cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN, in fill_one_extent_info()
1511 cur_logical += fs_info->sectorsize) { in fill_one_extent_info()
1512 const int nr_sector = (cur_logical - stripe->logical) >> in fill_one_extent_info()
1513 fs_info->sectorsize_bits; in fill_one_extent_info()
1515 &stripe->sectors[nr_sector]; in fill_one_extent_info()
1517 set_bit(nr_sector, &stripe->extent_sector_bitmap); in fill_one_extent_info()
1519 sector->is_metadata = true; in fill_one_extent_info()
1520 sector->generation = extent_gen; in fill_one_extent_info()
1527 stripe->extent_sector_bitmap = 0; in scrub_stripe_reset_bitmaps()
1528 stripe->init_error_bitmap = 0; in scrub_stripe_reset_bitmaps()
1529 stripe->init_nr_io_errors = 0; in scrub_stripe_reset_bitmaps()
1530 stripe->init_nr_csum_errors = 0; in scrub_stripe_reset_bitmaps()
1531 stripe->init_nr_meta_errors = 0; in scrub_stripe_reset_bitmaps()
1532 stripe->init_nr_meta_gen_errors = 0; in scrub_stripe_reset_bitmaps()
1533 stripe->error_bitmap = 0; in scrub_stripe_reset_bitmaps()
1534 stripe->io_error_bitmap = 0; in scrub_stripe_reset_bitmaps()
1535 stripe->csum_error_bitmap = 0; in scrub_stripe_reset_bitmaps()
1536 stripe->meta_error_bitmap = 0; in scrub_stripe_reset_bitmaps()
1537 stripe->meta_gen_error_bitmap = 0; in scrub_stripe_reset_bitmaps()
1555 struct btrfs_fs_info *fs_info = bg->fs_info; in scrub_find_fill_first_stripe()
1556 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start); in scrub_find_fill_first_stripe()
1557 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start); in scrub_find_fill_first_stripe()
1569 return -EUCLEAN; in scrub_find_fill_first_stripe()
1571 memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) * in scrub_find_fill_first_stripe()
1572 stripe->nr_sectors); in scrub_find_fill_first_stripe()
1576 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length); in scrub_find_fill_first_stripe()
1586 stripe->nr_meta_extents++; in scrub_find_fill_first_stripe()
1588 stripe->nr_data_extents++; in scrub_find_fill_first_stripe()
1594 * The extra calculation against bg->start is to handle block groups in scrub_find_fill_first_stripe()
1597 stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) + in scrub_find_fill_first_stripe()
1598 bg->start; in scrub_find_fill_first_stripe()
1599 stripe->physical = physical + stripe->logical - logical_start; in scrub_find_fill_first_stripe()
1600 stripe->dev = dev; in scrub_find_fill_first_stripe()
1601 stripe->bg = bg; in scrub_find_fill_first_stripe()
1602 stripe->mirror_num = mirror_num; in scrub_find_fill_first_stripe()
1603 stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1; in scrub_find_fill_first_stripe()
1605 /* Fill the first extent info into stripe->sectors[] array. */ in scrub_find_fill_first_stripe()
1613 stripe_end - cur_logical + 1); in scrub_find_fill_first_stripe()
1623 stripe->nr_meta_extents++; in scrub_find_fill_first_stripe()
1625 stripe->nr_data_extents++; in scrub_find_fill_first_stripe()
1632 if (bg->flags & BTRFS_BLOCK_GROUP_DATA) { in scrub_find_fill_first_stripe()
1637 ASSERT(stripe->csums); in scrub_find_fill_first_stripe()
1643 ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits); in scrub_find_fill_first_stripe()
1646 stripe->logical, stripe_end, in scrub_find_fill_first_stripe()
1647 stripe->csums, &csum_bitmap); in scrub_find_fill_first_stripe()
1653 for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) { in scrub_find_fill_first_stripe()
1654 stripe->sectors[sector_nr].csum = stripe->csums + in scrub_find_fill_first_stripe()
1655 sector_nr * fs_info->csum_size; in scrub_find_fill_first_stripe()
1658 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state); in scrub_find_fill_first_stripe()
1667 stripe->nr_meta_extents = 0; in scrub_reset_stripe()
1668 stripe->nr_data_extents = 0; in scrub_reset_stripe()
1669 stripe->state = 0; in scrub_reset_stripe()
1671 for (int i = 0; i < stripe->nr_sectors; i++) { in scrub_reset_stripe()
1672 stripe->sectors[i].is_metadata = false; in scrub_reset_stripe()
1673 stripe->sectors[i].csum = NULL; in scrub_reset_stripe()
1674 stripe->sectors[i].generation = 0; in scrub_reset_stripe()
1681 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_submit_initial_read()
1683 unsigned int nr_sectors = min_t(u64, BTRFS_STRIPE_LEN, stripe->bg->start + in scrub_submit_initial_read()
1684 stripe->bg->length - stripe->logical) >> in scrub_submit_initial_read()
1685 fs_info->sectorsize_bits; in scrub_submit_initial_read()
1686 int mirror = stripe->mirror_num; in scrub_submit_initial_read()
1688 ASSERT(stripe->bg); in scrub_submit_initial_read()
1689 ASSERT(stripe->mirror_num > 0); in scrub_submit_initial_read()
1690 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state)); in scrub_submit_initial_read()
1695 bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT; in scrub_submit_initial_read()
1702 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); in scrub_submit_initial_read()
1704 ASSERT(ret == fs_info->sectorsize); in scrub_submit_initial_read()
1706 atomic_inc(&stripe->pending_io); in scrub_submit_initial_read()
1709 * For dev-replace, either user asks to avoid the source dev, or in scrub_submit_initial_read()
1712 if (sctx->is_dev_replace && in scrub_submit_initial_read()
1713 (fs_info->dev_replace.cont_reading_from_srcdev_mode == in scrub_submit_initial_read()
1715 !stripe->dev->bdev)) { in scrub_submit_initial_read()
1716 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start, in scrub_submit_initial_read()
1717 stripe->bg->length); in scrub_submit_initial_read()
1728 for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) { in stripe_has_metadata_error()
1729 if (stripe->sectors[i].is_metadata) { in stripe_has_metadata_error()
1730 struct btrfs_fs_info *fs_info = stripe->bg->fs_info; in stripe_has_metadata_error()
1734 stripe->logical, in stripe_has_metadata_error()
1735 stripe->logical + (i << fs_info->sectorsize_bits)); in stripe_has_metadata_error()
1751 scrub_throttle_dev_io(sctx, sctx->stripes[0].dev, in submit_initial_group_read()
1755 struct scrub_stripe *stripe = &sctx->stripes[first_slot + i]; in submit_initial_group_read()
1758 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state)); in submit_initial_group_read()
1766 struct btrfs_fs_info *fs_info = sctx->fs_info; in flush_scrub_stripes()
1768 const int nr_stripes = sctx->cur_stripe; in flush_scrub_stripes()
1774 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state)); in flush_scrub_stripes()
1780 submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot); in flush_scrub_stripes()
1784 stripe = &sctx->stripes[i]; in flush_scrub_stripes()
1786 wait_event(stripe->repair_wait, in flush_scrub_stripes()
1787 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state)); in flush_scrub_stripes()
1790 /* Submit for dev-replace. */ in flush_scrub_stripes()
1791 if (sctx->is_dev_replace) { in flush_scrub_stripes()
1793 * For dev-replace, if we know there is something wrong with in flush_scrub_stripes()
1797 if (stripe_has_metadata_error(&sctx->stripes[i])) { in flush_scrub_stripes()
1798 ret = -EIO; in flush_scrub_stripes()
1805 stripe = &sctx->stripes[i]; in flush_scrub_stripes()
1807 ASSERT(stripe->dev == fs_info->dev_replace.srcdev); in flush_scrub_stripes()
1809 bitmap_andnot(&good, &stripe->extent_sector_bitmap, in flush_scrub_stripes()
1810 &stripe->error_bitmap, stripe->nr_sectors); in flush_scrub_stripes()
1817 stripe = &sctx->stripes[i]; in flush_scrub_stripes()
1823 sctx->cur_stripe = 0; in flush_scrub_stripes()
1829 complete(bio->bi_private); in raid56_scrub_wait_endio()
1844 ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES); in queue_scrub_stripe()
1849 stripe = &sctx->stripes[sctx->cur_stripe]; in queue_scrub_stripe()
1851 ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path, in queue_scrub_stripe()
1852 &sctx->csum_path, dev, physical, in queue_scrub_stripe()
1857 *found_logical_ret = stripe->logical; in queue_scrub_stripe()
1858 sctx->cur_stripe++; in queue_scrub_stripe()
1861 if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) { in queue_scrub_stripe()
1862 const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP; in queue_scrub_stripe()
1868 if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES) in queue_scrub_stripe()
1876 struct map_lookup *map, in scrub_raid56_parity_stripe() argument
1880 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_raid56_parity_stripe()
1888 const int data_stripes = nr_data_stripes(map); in scrub_raid56_parity_stripe()
1893 ASSERT(sctx->raid56_data_stripes); in scrub_raid56_parity_stripe()
1896 * For data stripe search, we cannot re-use the same extent/csum paths, in scrub_raid56_parity_stripe()
1910 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
1911 rot = div_u64(full_stripe_start - bg->start, in scrub_raid56_parity_stripe()
1913 stripe_index = (i + rot) % map->num_stripes; in scrub_raid56_parity_stripe()
1914 physical = map->stripes[stripe_index].physical + in scrub_raid56_parity_stripe()
1918 set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state); in scrub_raid56_parity_stripe()
1920 map->stripes[stripe_index].dev, physical, 1, in scrub_raid56_parity_stripe()
1930 stripe->logical = full_stripe_start + in scrub_raid56_parity_stripe()
1932 stripe->dev = map->stripes[stripe_index].dev; in scrub_raid56_parity_stripe()
1933 stripe->mirror_num = 1; in scrub_raid56_parity_stripe()
1934 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state); in scrub_raid56_parity_stripe()
1940 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
1941 if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) { in scrub_raid56_parity_stripe()
1952 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
1956 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
1958 wait_event(stripe->repair_wait, in scrub_raid56_parity_stripe()
1959 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state)); in scrub_raid56_parity_stripe()
1962 ASSERT(!btrfs_is_zoned(sctx->fs_info)); in scrub_raid56_parity_stripe()
1974 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
1980 bitmap_and(&error, &stripe->error_bitmap, in scrub_raid56_parity_stripe()
1981 &stripe->extent_sector_bitmap, stripe->nr_sectors); in scrub_raid56_parity_stripe()
1982 if (!bitmap_empty(&error, stripe->nr_sectors)) { in scrub_raid56_parity_stripe()
1985 full_stripe_start, i, stripe->nr_sectors, in scrub_raid56_parity_stripe()
1987 ret = -EIO; in scrub_raid56_parity_stripe()
1991 &stripe->extent_sector_bitmap, stripe->nr_sectors); in scrub_raid56_parity_stripe()
1996 bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT; in scrub_raid56_parity_stripe()
1997 bio->bi_private = &io_done; in scrub_raid56_parity_stripe()
1998 bio->bi_end_io = raid56_scrub_wait_endio; in scrub_raid56_parity_stripe()
2009 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits); in scrub_raid56_parity_stripe()
2012 ret = -ENOMEM; in scrub_raid56_parity_stripe()
2018 stripe = &sctx->raid56_data_stripes[i]; in scrub_raid56_parity_stripe()
2020 raid56_parity_cache_data_pages(rbio, stripe->pages, in scrub_raid56_parity_stripe()
2025 ret = blk_status_to_errno(bio->bi_status); in scrub_raid56_parity_stripe()
2045 struct map_lookup *map, in scrub_simple_mirror() argument
2050 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_simple_mirror()
2056 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length); in scrub_simple_mirror()
2061 u64 cur_physical = physical + cur_logical - logical_start; in scrub_simple_mirror()
2064 if (atomic_read(&fs_info->scrub_cancel_req) || in scrub_simple_mirror()
2065 atomic_read(&sctx->cancel_req)) { in scrub_simple_mirror()
2066 ret = -ECANCELED; in scrub_simple_mirror()
2070 if (atomic_read(&fs_info->scrub_pause_req)) { in scrub_simple_mirror()
2075 spin_lock(&bg->lock); in scrub_simple_mirror()
2076 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) { in scrub_simple_mirror()
2077 spin_unlock(&bg->lock); in scrub_simple_mirror()
2081 spin_unlock(&bg->lock); in scrub_simple_mirror()
2084 cur_logical, logical_end - cur_logical, in scrub_simple_mirror()
2088 sctx->stat.last_physical = physical + logical_length; in scrub_simple_mirror()
2106 static u64 simple_stripe_full_stripe_len(const struct map_lookup *map) in simple_stripe_full_stripe_len() argument
2108 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | in simple_stripe_full_stripe_len()
2111 return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes); in simple_stripe_full_stripe_len()
2115 static u64 simple_stripe_get_logical(struct map_lookup *map, in simple_stripe_get_logical() argument
2119 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | in simple_stripe_get_logical()
2121 ASSERT(stripe_index < map->num_stripes); in simple_stripe_get_logical()
2127 return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) + in simple_stripe_get_logical()
2128 bg->start; in simple_stripe_get_logical()
2132 static int simple_stripe_mirror_num(struct map_lookup *map, int stripe_index) in simple_stripe_mirror_num() argument
2134 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | in simple_stripe_mirror_num()
2136 ASSERT(stripe_index < map->num_stripes); in simple_stripe_mirror_num()
2139 return stripe_index % map->sub_stripes + 1; in simple_stripe_mirror_num()
2144 struct map_lookup *map, in scrub_simple_stripe() argument
2148 const u64 logical_increment = simple_stripe_full_stripe_len(map); in scrub_simple_stripe()
2149 const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index); in scrub_simple_stripe()
2150 const u64 orig_physical = map->stripes[stripe_index].physical; in scrub_simple_stripe()
2151 const int mirror_num = simple_stripe_mirror_num(map, stripe_index); in scrub_simple_stripe()
2156 while (cur_logical < bg->start + bg->length) { in scrub_simple_stripe()
2162 ret = scrub_simple_mirror(sctx, bg, map, cur_logical, in scrub_simple_stripe()
2181 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_stripe()
2182 struct map_lookup *map = em->map_lookup; in scrub_stripe() local
2183 const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK; in scrub_stripe()
2184 const u64 chunk_logical = bg->start; in scrub_stripe()
2187 u64 physical = map->stripes[stripe_index].physical; in scrub_stripe()
2200 ASSERT(sctx->extent_path.nodes[0] == NULL); in scrub_stripe()
2204 if (sctx->is_dev_replace && in scrub_stripe()
2205 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) { in scrub_stripe()
2206 mutex_lock(&sctx->wr_lock); in scrub_stripe()
2207 sctx->write_pointer = physical; in scrub_stripe()
2208 mutex_unlock(&sctx->wr_lock); in scrub_stripe()
2213 ASSERT(sctx->raid56_data_stripes == NULL); in scrub_stripe()
2215 sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map), in scrub_stripe()
2218 if (!sctx->raid56_data_stripes) { in scrub_stripe()
2219 ret = -ENOMEM; in scrub_stripe()
2222 for (int i = 0; i < nr_data_stripes(map); i++) { in scrub_stripe()
2224 &sctx->raid56_data_stripes[i]); in scrub_stripe()
2227 sctx->raid56_data_stripes[i].bg = bg; in scrub_stripe()
2228 sctx->raid56_data_stripes[i].sctx = sctx; in scrub_stripe()
2248 ret = scrub_simple_mirror(sctx, bg, map, bg->start, bg->length, in scrub_stripe()
2249 scrub_dev, map->stripes[stripe_index].physical, in scrub_stripe()
2255 ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index); in scrub_stripe()
2256 offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes); in scrub_stripe()
2261 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK); in scrub_stripe()
2266 map, &logic_end, NULL); in scrub_stripe()
2270 get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL); in scrub_stripe()
2271 increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); in scrub_stripe()
2278 ret = get_raid56_logic_offset(physical, stripe_index, map, in scrub_stripe()
2285 map, stripe_logical); in scrub_stripe()
2299 ret = scrub_simple_mirror(sctx, bg, map, logical, BTRFS_STRIPE_LEN, in scrub_stripe()
2306 spin_lock(&sctx->stat_lock); in scrub_stripe()
2308 sctx->stat.last_physical = in scrub_stripe()
2309 map->stripes[stripe_index].physical + dev_stripe_len; in scrub_stripe()
2311 sctx->stat.last_physical = physical; in scrub_stripe()
2312 spin_unlock(&sctx->stat_lock); in scrub_stripe()
2320 btrfs_release_path(&sctx->extent_path); in scrub_stripe()
2321 btrfs_release_path(&sctx->csum_path); in scrub_stripe()
2323 if (sctx->raid56_data_stripes) { in scrub_stripe()
2324 for (int i = 0; i < nr_data_stripes(map); i++) in scrub_stripe()
2325 release_scrub_stripe(&sctx->raid56_data_stripes[i]); in scrub_stripe()
2326 kfree(sctx->raid56_data_stripes); in scrub_stripe()
2327 sctx->raid56_data_stripes = NULL; in scrub_stripe()
2330 if (sctx->is_dev_replace && ret >= 0) { in scrub_stripe()
2335 map->stripes[stripe_index].physical, in scrub_stripe()
2350 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_chunk()
2351 struct extent_map_tree *map_tree = &fs_info->mapping_tree; in scrub_chunk()
2352 struct map_lookup *map; in scrub_chunk() local
2357 read_lock(&map_tree->lock); in scrub_chunk()
2358 em = lookup_extent_mapping(map_tree, bg->start, bg->length); in scrub_chunk()
2359 read_unlock(&map_tree->lock); in scrub_chunk()
2366 spin_lock(&bg->lock); in scrub_chunk()
2367 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) in scrub_chunk()
2368 ret = -EINVAL; in scrub_chunk()
2369 spin_unlock(&bg->lock); in scrub_chunk()
2373 if (em->start != bg->start) in scrub_chunk()
2375 if (em->len < dev_extent_len) in scrub_chunk()
2378 map = em->map_lookup; in scrub_chunk()
2379 for (i = 0; i < map->num_stripes; ++i) { in scrub_chunk()
2380 if (map->stripes[i].dev->bdev == scrub_dev->bdev && in scrub_chunk()
2381 map->stripes[i].physical == dev_offset) { in scrub_chunk()
2396 struct btrfs_fs_info *fs_info = cache->fs_info; in finish_extent_writes_for_zoned()
2404 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length); in finish_extent_writes_for_zoned()
2417 struct btrfs_path *path; in scrub_enumerate_chunks() local
2418 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_enumerate_chunks()
2419 struct btrfs_root *root = fs_info->dev_root; in scrub_enumerate_chunks()
2428 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; in scrub_enumerate_chunks()
2430 path = btrfs_alloc_path(); in scrub_enumerate_chunks()
2431 if (!path) in scrub_enumerate_chunks()
2432 return -ENOMEM; in scrub_enumerate_chunks()
2434 path->reada = READA_FORWARD; in scrub_enumerate_chunks()
2435 path->search_commit_root = 1; in scrub_enumerate_chunks()
2436 path->skip_locking = 1; in scrub_enumerate_chunks()
2438 key.objectid = scrub_dev->devid; in scrub_enumerate_chunks()
2445 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); in scrub_enumerate_chunks()
2449 if (path->slots[0] >= in scrub_enumerate_chunks()
2450 btrfs_header_nritems(path->nodes[0])) { in scrub_enumerate_chunks()
2451 ret = btrfs_next_leaf(root, path); in scrub_enumerate_chunks()
2463 l = path->nodes[0]; in scrub_enumerate_chunks()
2464 slot = path->slots[0]; in scrub_enumerate_chunks()
2468 if (found_key.objectid != scrub_dev->devid) in scrub_enumerate_chunks()
2499 ASSERT(cache->start <= chunk_offset); in scrub_enumerate_chunks()
2515 * setup through the regular write path (by btrfs_map_block()), in scrub_enumerate_chunks()
2519 if (cache->start < chunk_offset) { in scrub_enumerate_chunks()
2524 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) { in scrub_enumerate_chunks()
2525 if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) { in scrub_enumerate_chunks()
2539 spin_lock(&cache->lock); in scrub_enumerate_chunks()
2540 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) { in scrub_enumerate_chunks()
2541 spin_unlock(&cache->lock); in scrub_enumerate_chunks()
2546 spin_unlock(&cache->lock); in scrub_enumerate_chunks()
2552 * -> btrfs_wait_for_commit() in scrub_enumerate_chunks()
2553 * -> btrfs_commit_transaction() in scrub_enumerate_chunks()
2554 * -> btrfs_scrub_pause() in scrub_enumerate_chunks()
2562 * -EFBIG from btrfs_finish_chunk_alloc() like: in scrub_enumerate_chunks()
2578 * - Write duplication in scrub_enumerate_chunks()
2580 * - Scrub copy in scrub_enumerate_chunks()
2585 * So for dev-replace, it's not allowed to continue if a block in scrub_enumerate_chunks()
2588 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace); in scrub_enumerate_chunks()
2589 if (!ret && sctx->is_dev_replace) { in scrub_enumerate_chunks()
2601 } else if (ret == -ENOSPC && !sctx->is_dev_replace && in scrub_enumerate_chunks()
2602 !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) { in scrub_enumerate_chunks()
2604 * btrfs_inc_block_group_ro return -ENOSPC when it in scrub_enumerate_chunks()
2610 * For RAID56 chunks, we have to mark them read-only in scrub_enumerate_chunks()
2617 } else if (ret == -ETXTBSY) { in scrub_enumerate_chunks()
2620 cache->start); in scrub_enumerate_chunks()
2635 * finish before dev-replace. in scrub_enumerate_chunks()
2638 if (sctx->is_dev_replace) { in scrub_enumerate_chunks()
2640 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, in scrub_enumerate_chunks()
2641 cache->length); in scrub_enumerate_chunks()
2645 down_write(&dev_replace->rwsem); in scrub_enumerate_chunks()
2646 dev_replace->cursor_right = found_key.offset + dev_extent_len; in scrub_enumerate_chunks()
2647 dev_replace->cursor_left = found_key.offset; in scrub_enumerate_chunks()
2648 dev_replace->item_needs_writeback = 1; in scrub_enumerate_chunks()
2649 up_write(&dev_replace->rwsem); in scrub_enumerate_chunks()
2653 if (sctx->is_dev_replace && in scrub_enumerate_chunks()
2654 !btrfs_finish_block_group_to_copy(dev_replace->srcdev, in scrub_enumerate_chunks()
2658 down_write(&dev_replace->rwsem); in scrub_enumerate_chunks()
2659 dev_replace->cursor_left = dev_replace->cursor_right; in scrub_enumerate_chunks()
2660 dev_replace->item_needs_writeback = 1; in scrub_enumerate_chunks()
2661 up_write(&dev_replace->rwsem); in scrub_enumerate_chunks()
2673 spin_lock(&cache->lock); in scrub_enumerate_chunks()
2674 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) && in scrub_enumerate_chunks()
2675 !cache->ro && cache->reserved == 0 && cache->used == 0) { in scrub_enumerate_chunks()
2676 spin_unlock(&cache->lock); in scrub_enumerate_chunks()
2678 btrfs_discard_queue_work(&fs_info->discard_ctl, in scrub_enumerate_chunks()
2683 spin_unlock(&cache->lock); in scrub_enumerate_chunks()
2690 if (sctx->is_dev_replace && in scrub_enumerate_chunks()
2691 atomic64_read(&dev_replace->num_write_errors) > 0) { in scrub_enumerate_chunks()
2692 ret = -EIO; in scrub_enumerate_chunks()
2695 if (sctx->stat.malloc_errors > 0) { in scrub_enumerate_chunks()
2696 ret = -ENOMEM; in scrub_enumerate_chunks()
2701 btrfs_release_path(path); in scrub_enumerate_chunks()
2704 btrfs_free_path(path); in scrub_enumerate_chunks()
2712 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_one_super()
2718 bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ); in scrub_one_super()
2730 physical, dev->devid); in scrub_one_super()
2731 return -EIO; in scrub_one_super()
2736 physical, dev->devid, in scrub_one_super()
2738 return -EUCLEAN; in scrub_one_super()
2741 return btrfs_validate_super(fs_info, sb, -1); in scrub_one_super()
2752 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_supers()
2755 return -EROFS; in scrub_supers()
2759 spin_lock(&sctx->stat_lock); in scrub_supers()
2760 sctx->stat.malloc_errors++; in scrub_supers()
2761 spin_unlock(&sctx->stat_lock); in scrub_supers()
2762 return -ENOMEM; in scrub_supers()
2766 if (scrub_dev->fs_devices != fs_info->fs_devices) in scrub_supers()
2767 gen = scrub_dev->generation; in scrub_supers()
2769 gen = fs_info->last_trans_committed; in scrub_supers()
2773 if (ret == -ENOENT) in scrub_supers()
2777 spin_lock(&sctx->stat_lock); in scrub_supers()
2778 sctx->stat.super_errors++; in scrub_supers()
2779 spin_unlock(&sctx->stat_lock); in scrub_supers()
2784 scrub_dev->commit_total_bytes) in scrub_supers()
2791 spin_lock(&sctx->stat_lock); in scrub_supers()
2792 sctx->stat.super_errors++; in scrub_supers()
2793 spin_unlock(&sctx->stat_lock); in scrub_supers()
2802 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt, in scrub_workers_put()
2803 &fs_info->scrub_lock)) { in scrub_workers_put()
2804 struct workqueue_struct *scrub_workers = fs_info->scrub_workers; in scrub_workers_put()
2806 fs_info->scrub_workers = NULL; in scrub_workers_put()
2807 mutex_unlock(&fs_info->scrub_lock); in scrub_workers_put()
2815 * get a reference count on fs_info->scrub_workers. start worker if necessary
2821 int max_active = fs_info->thread_pool_size; in scrub_workers_get()
2822 int ret = -ENOMEM; in scrub_workers_get()
2824 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt)) in scrub_workers_get()
2827 scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active); in scrub_workers_get()
2829 return -ENOMEM; in scrub_workers_get()
2831 mutex_lock(&fs_info->scrub_lock); in scrub_workers_get()
2832 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) { in scrub_workers_get()
2833 ASSERT(fs_info->scrub_workers == NULL); in scrub_workers_get()
2834 fs_info->scrub_workers = scrub_workers; in scrub_workers_get()
2835 refcount_set(&fs_info->scrub_workers_refcnt, 1); in scrub_workers_get()
2836 mutex_unlock(&fs_info->scrub_lock); in scrub_workers_get()
2840 refcount_inc(&fs_info->scrub_workers_refcnt); in scrub_workers_get()
2841 mutex_unlock(&fs_info->scrub_lock); in scrub_workers_get()
2861 return -EAGAIN; in btrfs_scrub_dev()
2864 ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN); in btrfs_scrub_dev()
2871 ASSERT(fs_info->nodesize <= in btrfs_scrub_dev()
2872 SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits); in btrfs_scrub_dev()
2883 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2884 dev = btrfs_find_device(fs_info->fs_devices, &args); in btrfs_scrub_dev()
2885 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && in btrfs_scrub_dev()
2887 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2888 ret = -ENODEV; in btrfs_scrub_dev()
2893 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { in btrfs_scrub_dev()
2894 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2898 ret = -EROFS; in btrfs_scrub_dev()
2902 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_dev()
2903 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || in btrfs_scrub_dev()
2904 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { in btrfs_scrub_dev()
2905 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
2906 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2907 ret = -EIO; in btrfs_scrub_dev()
2911 down_read(&fs_info->dev_replace.rwsem); in btrfs_scrub_dev()
2912 if (dev->scrub_ctx || in btrfs_scrub_dev()
2914 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { in btrfs_scrub_dev()
2915 up_read(&fs_info->dev_replace.rwsem); in btrfs_scrub_dev()
2916 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
2917 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2918 ret = -EINPROGRESS; in btrfs_scrub_dev()
2921 up_read(&fs_info->dev_replace.rwsem); in btrfs_scrub_dev()
2923 sctx->readonly = readonly; in btrfs_scrub_dev()
2924 dev->scrub_ctx = sctx; in btrfs_scrub_dev()
2925 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2932 atomic_inc(&fs_info->scrubs_running); in btrfs_scrub_dev()
2933 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
2942 * before incrementing fs_info->scrubs_running). in btrfs_scrub_dev()
2948 spin_lock(&sctx->stat_lock); in btrfs_scrub_dev()
2949 old_super_errors = sctx->stat.super_errors; in btrfs_scrub_dev()
2950 spin_unlock(&sctx->stat_lock); in btrfs_scrub_dev()
2957 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2959 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
2961 spin_lock(&sctx->stat_lock); in btrfs_scrub_dev()
2967 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly) in btrfs_scrub_dev()
2969 spin_unlock(&sctx->stat_lock); in btrfs_scrub_dev()
2976 atomic_dec(&fs_info->scrubs_running); in btrfs_scrub_dev()
2977 wake_up(&fs_info->scrub_pause_wait); in btrfs_scrub_dev()
2980 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_dev()
2986 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_dev()
2987 dev->scrub_ctx = NULL; in btrfs_scrub_dev()
2988 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3000 trans = btrfs_start_transaction(fs_info->tree_root, 0); in btrfs_scrub_dev()
3023 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_pause()
3024 atomic_inc(&fs_info->scrub_pause_req); in btrfs_scrub_pause()
3025 while (atomic_read(&fs_info->scrubs_paused) != in btrfs_scrub_pause()
3026 atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_pause()
3027 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_pause()
3028 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_pause()
3029 atomic_read(&fs_info->scrubs_paused) == in btrfs_scrub_pause()
3030 atomic_read(&fs_info->scrubs_running)); in btrfs_scrub_pause()
3031 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_pause()
3033 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_pause()
3038 atomic_dec(&fs_info->scrub_pause_req); in btrfs_scrub_continue()
3039 wake_up(&fs_info->scrub_pause_wait); in btrfs_scrub_continue()
3044 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3045 if (!atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_cancel()
3046 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3047 return -ENOTCONN; in btrfs_scrub_cancel()
3050 atomic_inc(&fs_info->scrub_cancel_req); in btrfs_scrub_cancel()
3051 while (atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_cancel()
3052 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3053 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_cancel()
3054 atomic_read(&fs_info->scrubs_running) == 0); in btrfs_scrub_cancel()
3055 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3057 atomic_dec(&fs_info->scrub_cancel_req); in btrfs_scrub_cancel()
3058 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
3065 struct btrfs_fs_info *fs_info = dev->fs_info; in btrfs_scrub_cancel_dev()
3068 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3069 sctx = dev->scrub_ctx; in btrfs_scrub_cancel_dev()
3071 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3072 return -ENOTCONN; in btrfs_scrub_cancel_dev()
3074 atomic_inc(&sctx->cancel_req); in btrfs_scrub_cancel_dev()
3075 while (dev->scrub_ctx) { in btrfs_scrub_cancel_dev()
3076 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3077 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_cancel_dev()
3078 dev->scrub_ctx == NULL); in btrfs_scrub_cancel_dev()
3079 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3081 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
3093 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_progress()
3094 dev = btrfs_find_device(fs_info->fs_devices, &args); in btrfs_scrub_progress()
3096 sctx = dev->scrub_ctx; in btrfs_scrub_progress()
3098 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_progress()
3099 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_progress()
3101 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; in btrfs_scrub_progress()