Lines Matching +full:data +full:- +full:mirror
1 // SPDX-License-Identifier: GPL-2.0
12 #include "async-thread.h"
13 #include "check-integrity.h"
14 #include "dev-replace.h"
15 #include "rcu-string.h"
17 #include "file-item.h"
30 /* Is this a data path I/O that needs storage layer checksum and repair? */
33 return bbio->inode && is_data_inode(&bbio->inode->vfs_inode); in is_data_bbio()
38 return is_data_bbio(bbio) && btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE; in bbio_has_ordered_extent()
49 bbio->fs_info = fs_info; in btrfs_bio_init()
50 bbio->end_io = end_io; in btrfs_bio_init()
51 bbio->private = private; in btrfs_bio_init()
52 atomic_set(&bbio->pending_ios, 1); in btrfs_bio_init()
85 bio = bio_split_rw(&orig_bbio->bio, &fs_info->limits, &nr_segs, in btrfs_split_bio()
88 bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT, in btrfs_split_bio()
93 bbio->inode = orig_bbio->inode; in btrfs_split_bio()
94 bbio->file_offset = orig_bbio->file_offset; in btrfs_split_bio()
95 orig_bbio->file_offset += map_length; in btrfs_split_bio()
97 refcount_inc(&orig_bbio->ordered->refs); in btrfs_split_bio()
98 bbio->ordered = orig_bbio->ordered; in btrfs_split_bio()
100 atomic_inc(&orig_bbio->pending_ios); in btrfs_split_bio()
108 btrfs_put_ordered_extent(bbio->ordered); in btrfs_cleanup_bio()
109 bio_put(&bbio->bio); in btrfs_cleanup_bio()
115 struct btrfs_ordered_extent *ordered = bbio->ordered; in __btrfs_bio_end_io()
117 bbio->end_io(bbio); in __btrfs_bio_end_io()
120 bbio->end_io(bbio); in __btrfs_bio_end_io()
126 bbio->bio.bi_status = status; in btrfs_bio_end_io()
136 * For writes we tolerate nr_mirrors - 1 write failures, so we can't in btrfs_bbio_propagate_error()
141 if (bbio->bio.bi_end_io == &btrfs_orig_write_end_io) { in btrfs_bbio_propagate_error()
142 struct btrfs_io_stripe *orig_stripe = orig_bbio->bio.bi_private; in btrfs_bbio_propagate_error()
143 struct btrfs_io_context *orig_bioc = orig_stripe->bioc; in btrfs_bbio_propagate_error()
145 atomic_add(orig_bioc->max_errors, &orig_bioc->error); in btrfs_bbio_propagate_error()
147 orig_bbio->bio.bi_status = bbio->bio.bi_status; in btrfs_bbio_propagate_error()
153 if (bbio->bio.bi_pool == &btrfs_clone_bioset) { in btrfs_orig_bbio_end_io()
154 struct btrfs_bio *orig_bbio = bbio->private; in btrfs_orig_bbio_end_io()
156 if (bbio->bio.bi_status) in btrfs_orig_bbio_end_io()
162 if (atomic_dec_and_test(&bbio->pending_ios)) in btrfs_orig_bbio_end_io()
168 if (cur_mirror == fbio->num_copies) in next_repair_mirror()
169 return cur_mirror + 1 - fbio->num_copies; in next_repair_mirror()
176 return fbio->num_copies; in prev_repair_mirror()
177 return cur_mirror - 1; in prev_repair_mirror()
182 if (atomic_dec_and_test(&fbio->repair_count)) { in btrfs_repair_done()
183 btrfs_orig_bbio_end_io(fbio->bbio); in btrfs_repair_done()
191 struct btrfs_failed_bio *fbio = repair_bbio->private; in btrfs_end_repair_bio()
192 struct btrfs_inode *inode = repair_bbio->inode; in btrfs_end_repair_bio()
193 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_end_repair_bio()
194 struct bio_vec *bv = bio_first_bvec_all(&repair_bbio->bio); in btrfs_end_repair_bio()
195 int mirror = repair_bbio->mirror_num; in btrfs_end_repair_bio() local
197 if (repair_bbio->bio.bi_status || in btrfs_end_repair_bio()
199 bio_reset(&repair_bbio->bio, NULL, REQ_OP_READ); in btrfs_end_repair_bio()
200 repair_bbio->bio.bi_iter = repair_bbio->saved_iter; in btrfs_end_repair_bio()
202 mirror = next_repair_mirror(fbio, mirror); in btrfs_end_repair_bio()
203 if (mirror == fbio->bbio->mirror_num) { in btrfs_end_repair_bio()
204 btrfs_debug(fs_info, "no mirror left"); in btrfs_end_repair_bio()
205 fbio->bbio->bio.bi_status = BLK_STS_IOERR; in btrfs_end_repair_bio()
209 btrfs_submit_bio(repair_bbio, mirror); in btrfs_end_repair_bio()
214 mirror = prev_repair_mirror(fbio, mirror); in btrfs_end_repair_bio()
216 repair_bbio->file_offset, fs_info->sectorsize, in btrfs_end_repair_bio()
217 repair_bbio->saved_iter.bi_sector << SECTOR_SHIFT, in btrfs_end_repair_bio()
218 bv->bv_page, bv->bv_offset, mirror); in btrfs_end_repair_bio()
219 } while (mirror != fbio->bbio->mirror_num); in btrfs_end_repair_bio()
223 bio_put(&repair_bbio->bio); in btrfs_end_repair_bio()
227 * Try to kick off a repair read to the next available mirror for a bad sector.
229 * This primarily tries to recover good data to serve the actual read request,
230 * but also tries to write the good data back to the bad mirror(s) when a
238 struct btrfs_inode *inode = failed_bbio->inode; in repair_one_sector()
239 struct btrfs_fs_info *fs_info = inode->root->fs_info; in repair_one_sector()
240 const u32 sectorsize = fs_info->sectorsize; in repair_one_sector()
241 const u64 logical = (failed_bbio->saved_iter.bi_sector << SECTOR_SHIFT); in repair_one_sector()
245 int mirror; in repair_one_sector() local
248 failed_bbio->file_offset + bio_offset); in repair_one_sector()
253 failed_bbio->bio.bi_status = BLK_STS_IOERR; in repair_one_sector()
259 fbio->bbio = failed_bbio; in repair_one_sector()
260 fbio->num_copies = num_copies; in repair_one_sector()
261 atomic_set(&fbio->repair_count, 1); in repair_one_sector()
264 atomic_inc(&fbio->repair_count); in repair_one_sector()
268 repair_bio->bi_iter.bi_sector = failed_bbio->saved_iter.bi_sector; in repair_one_sector()
269 __bio_add_page(repair_bio, bv->bv_page, bv->bv_len, bv->bv_offset); in repair_one_sector()
273 repair_bbio->inode = failed_bbio->inode; in repair_one_sector()
274 repair_bbio->file_offset = failed_bbio->file_offset + bio_offset; in repair_one_sector()
276 mirror = next_repair_mirror(fbio, failed_bbio->mirror_num); in repair_one_sector()
277 btrfs_debug(fs_info, "submitting repair read to mirror %d", mirror); in repair_one_sector()
278 btrfs_submit_bio(repair_bbio, mirror); in repair_one_sector()
284 struct btrfs_inode *inode = bbio->inode; in btrfs_check_read_bio()
285 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_check_read_bio()
286 u32 sectorsize = fs_info->sectorsize; in btrfs_check_read_bio()
287 struct bvec_iter *iter = &bbio->saved_iter; in btrfs_check_read_bio()
288 blk_status_t status = bbio->bio.bi_status; in btrfs_check_read_bio()
292 /* Read-repair requires the inode field to be set by the submitter. */ in btrfs_check_read_bio()
299 if (bbio->bio.bi_pool == &btrfs_repair_bioset) { in btrfs_check_read_bio()
305 bbio->bio.bi_status = BLK_STS_OK; in btrfs_check_read_bio()
307 while (iter->bi_size) { in btrfs_check_read_bio()
308 struct bio_vec bv = bio_iter_iovec(&bbio->bio, *iter); in btrfs_check_read_bio()
314 bio_advance_iter_single(&bbio->bio, iter, sectorsize); in btrfs_check_read_bio()
318 if (bbio->csum != bbio->csum_inline) in btrfs_check_read_bio()
319 kfree(bbio->csum); in btrfs_check_read_bio()
329 if (!dev || !dev->bdev) in btrfs_log_dev_io_error()
331 if (bio->bi_status != BLK_STS_IOERR && bio->bi_status != BLK_STS_TARGET) in btrfs_log_dev_io_error()
336 else if (!(bio->bi_opf & REQ_RAHEAD)) in btrfs_log_dev_io_error()
338 if (bio->bi_opf & REQ_PREFLUSH) in btrfs_log_dev_io_error()
345 if (bio->bi_opf & REQ_META) in btrfs_end_io_wq()
346 return fs_info->endio_meta_workers; in btrfs_end_io_wq()
347 return fs_info->endio_workers; in btrfs_end_io_wq()
356 btrfs_check_read_bio(bbio, bbio->bio.bi_private); in btrfs_end_bio_work()
364 struct btrfs_device *dev = bio->bi_private; in btrfs_simple_end_io()
365 struct btrfs_fs_info *fs_info = bbio->fs_info; in btrfs_simple_end_io()
369 if (bio->bi_status) in btrfs_simple_end_io()
373 INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work); in btrfs_simple_end_io()
374 queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work); in btrfs_simple_end_io()
376 if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status) in btrfs_simple_end_io()
384 struct btrfs_io_context *bioc = bio->bi_private; in btrfs_raid56_end_io()
387 btrfs_bio_counter_dec(bioc->fs_info); in btrfs_raid56_end_io()
388 bbio->mirror_num = bioc->mirror_num; in btrfs_raid56_end_io()
399 struct btrfs_io_stripe *stripe = bio->bi_private; in btrfs_orig_write_end_io()
400 struct btrfs_io_context *bioc = stripe->bioc; in btrfs_orig_write_end_io()
403 btrfs_bio_counter_dec(bioc->fs_info); in btrfs_orig_write_end_io()
405 if (bio->bi_status) { in btrfs_orig_write_end_io()
406 atomic_inc(&bioc->error); in btrfs_orig_write_end_io()
407 btrfs_log_dev_io_error(bio, stripe->dev); in btrfs_orig_write_end_io()
414 if (atomic_read(&bioc->error) > bioc->max_errors) in btrfs_orig_write_end_io()
415 bio->bi_status = BLK_STS_IOERR; in btrfs_orig_write_end_io()
417 bio->bi_status = BLK_STS_OK; in btrfs_orig_write_end_io()
425 struct btrfs_io_stripe *stripe = bio->bi_private; in btrfs_clone_write_end_io()
427 if (bio->bi_status) { in btrfs_clone_write_end_io()
428 atomic_inc(&stripe->bioc->error); in btrfs_clone_write_end_io()
429 btrfs_log_dev_io_error(bio, stripe->dev); in btrfs_clone_write_end_io()
433 bio_endio(stripe->bioc->orig_bio); in btrfs_clone_write_end_io()
439 if (!dev || !dev->bdev || in btrfs_submit_dev_bio()
440 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || in btrfs_submit_dev_bio()
442 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { in btrfs_submit_dev_bio()
447 bio_set_dev(bio, dev->bdev); in btrfs_submit_dev_bio()
454 u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT; in btrfs_submit_dev_bio()
455 u64 zone_start = round_down(physical, dev->fs_info->zone_size); in btrfs_submit_dev_bio()
458 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; in btrfs_submit_dev_bio()
460 btrfs_debug_in_rcu(dev->fs_info, in btrfs_submit_dev_bio()
462 __func__, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, in btrfs_submit_dev_bio()
463 (unsigned long)dev->bdev->bd_dev, btrfs_dev_name(dev), in btrfs_submit_dev_bio()
464 dev->devid, bio->bi_iter.bi_size); in btrfs_submit_dev_bio()
468 if (bio->bi_opf & REQ_BTRFS_CGROUP_PUNT) in btrfs_submit_dev_bio()
476 struct bio *orig_bio = bioc->orig_bio, *bio; in btrfs_submit_mirrored_bio()
480 /* Reuse the bio embedded into the btrfs_bio for the last mirror */ in btrfs_submit_mirrored_bio()
481 if (dev_nr == bioc->num_stripes - 1) { in btrfs_submit_mirrored_bio()
483 bio->bi_end_io = btrfs_orig_write_end_io; in btrfs_submit_mirrored_bio()
487 bio->bi_end_io = btrfs_clone_write_end_io; in btrfs_submit_mirrored_bio()
490 bio->bi_private = &bioc->stripes[dev_nr]; in btrfs_submit_mirrored_bio()
491 bio->bi_iter.bi_sector = bioc->stripes[dev_nr].physical >> SECTOR_SHIFT; in btrfs_submit_mirrored_bio()
492 bioc->stripes[dev_nr].bioc = bioc; in btrfs_submit_mirrored_bio()
493 btrfs_submit_dev_bio(bioc->stripes[dev_nr].dev, bio); in btrfs_submit_mirrored_bio()
500 /* Single mirror read/write fast path. */ in __btrfs_submit_bio()
501 btrfs_bio(bio)->mirror_num = mirror_num; in __btrfs_submit_bio()
502 bio->bi_iter.bi_sector = smap->physical >> SECTOR_SHIFT; in __btrfs_submit_bio()
504 btrfs_bio(bio)->orig_physical = smap->physical; in __btrfs_submit_bio()
505 bio->bi_private = smap->dev; in __btrfs_submit_bio()
506 bio->bi_end_io = btrfs_simple_end_io; in __btrfs_submit_bio()
507 btrfs_submit_dev_bio(smap->dev, bio); in __btrfs_submit_bio()
508 } else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { in __btrfs_submit_bio()
510 bio->bi_private = bioc; in __btrfs_submit_bio()
511 bio->bi_end_io = btrfs_raid56_end_io; in __btrfs_submit_bio()
518 int total_devs = bioc->num_stripes; in __btrfs_submit_bio()
520 bioc->orig_bio = bio; in __btrfs_submit_bio()
528 if (bbio->bio.bi_opf & REQ_META) in btrfs_bio_csum()
559 ret = btrfs_bio_csum(async->bbio); in run_one_async_start()
561 async->bbio->bio.bi_status = ret; in run_one_async_start()
576 struct bio *bio = &async->bbio->bio; in run_one_async_done()
579 if (bio->bi_status) { in run_one_async_done()
580 btrfs_orig_bbio_end_io(async->bbio); in run_one_async_done()
589 bio->bi_opf |= REQ_BTRFS_CGROUP_PUNT; in run_one_async_done()
590 __btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num); in run_one_async_done()
601 if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &bbio->fs_info->flags)) in should_async_write()
608 if (op_is_sync(bbio->bio.bi_opf)) in should_async_write()
612 if ((bbio->bio.bi_opf & REQ_META) && btrfs_is_zoned(bbio->fs_info)) in should_async_write()
627 struct btrfs_fs_info *fs_info = bbio->fs_info; in btrfs_wq_submit_bio()
634 async->bbio = bbio; in btrfs_wq_submit_bio()
635 async->bioc = bioc; in btrfs_wq_submit_bio()
636 async->smap = *smap; in btrfs_wq_submit_bio()
637 async->mirror_num = mirror_num; in btrfs_wq_submit_bio()
639 btrfs_init_work(&async->work, run_one_async_start, run_one_async_done, in btrfs_wq_submit_bio()
641 btrfs_queue_work(fs_info->workers, &async->work); in btrfs_wq_submit_bio()
647 struct btrfs_inode *inode = bbio->inode; in btrfs_submit_chunk()
648 struct btrfs_fs_info *fs_info = bbio->fs_info; in btrfs_submit_chunk()
649 struct bio *bio = &bbio->bio; in btrfs_submit_chunk()
650 u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT; in btrfs_submit_chunk()
651 u64 length = bio->bi_iter.bi_size; in btrfs_submit_chunk()
669 map_length = min(map_length, fs_info->max_zone_append_size); in btrfs_submit_chunk()
673 bio = &bbio->bio; in btrfs_submit_chunk()
678 * data reads. in btrfs_submit_chunk()
681 bbio->saved_iter = bio->bi_iter; in btrfs_submit_chunk()
689 bio->bi_opf &= ~REQ_OP_WRITE; in btrfs_submit_chunk()
690 bio->bi_opf |= REQ_OP_ZONE_APPEND; in btrfs_submit_chunk()
695 * point, so they are handled as part of the no-checksum case. in btrfs_submit_chunk()
697 if (inode && !(inode->flags & BTRFS_INODE_NODATASUM) && in btrfs_submit_chunk()
698 !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state) && in btrfs_submit_chunk()
699 !btrfs_is_data_reloc_root(inode->root)) { in btrfs_submit_chunk()
709 inode->flags & BTRFS_INODE_NODATASUM)) { in btrfs_submit_chunk()
727 struct btrfs_bio *remaining = bbio->private; in btrfs_submit_chunk()
729 ASSERT(bbio->bio.bi_pool == &btrfs_clone_bioset); in btrfs_submit_chunk()
732 remaining->bio.bi_status = ret; in btrfs_submit_chunk()
735 bbio->bio.bi_status = ret; in btrfs_submit_chunk()
743 /* If bbio->inode is not populated, its file_offset must be 0. */ in btrfs_submit_bio()
744 ASSERT(bbio->inode || bbio->file_offset == 0); in btrfs_submit_bio()
769 ASSERT(!(fs_info->sb->s_flags & SB_RDONLY)); in btrfs_repair_io_failure()
785 if (!smap.dev->bdev || in btrfs_repair_io_failure()
786 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &smap.dev->dev_state)) { in btrfs_repair_io_failure()
787 ret = -EIO; in btrfs_repair_io_failure()
791 bio_init(&bio, smap.dev->bdev, &bvec, 1, REQ_OP_WRITE | REQ_SYNC); in btrfs_repair_io_failure()
819 * If @dev_replace is true, the write would be submitted to dev-replace target.
823 struct btrfs_fs_info *fs_info = bbio->fs_info; in btrfs_submit_repair_write()
824 u64 logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; in btrfs_submit_repair_write()
825 u64 length = bbio->bio.bi_iter.bi_size; in btrfs_submit_repair_write()
831 ASSERT(btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE); in btrfs_submit_repair_write()
832 ASSERT(!bbio->inode); in btrfs_submit_repair_write()
840 ASSERT(smap.dev == fs_info->dev_replace.srcdev); in btrfs_submit_repair_write()
841 smap.dev = fs_info->dev_replace.tgtdev; in btrfs_submit_repair_write()
843 __btrfs_submit_bio(&bbio->bio, NULL, &smap, mirror_num); in btrfs_submit_repair_write()
856 return -ENOMEM; in btrfs_bioset_init()
875 return -ENOMEM; in btrfs_bioset_init()