1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 */
5
6 #include <linux/blkdev.h>
7 #include <linux/ratelimit.h>
8 #include <linux/sched/mm.h>
9 #include <crypto/hash.h>
10 #include "ctree.h"
11 #include "discard.h"
12 #include "volumes.h"
13 #include "disk-io.h"
14 #include "ordered-data.h"
15 #include "transaction.h"
16 #include "backref.h"
17 #include "extent_io.h"
18 #include "dev-replace.h"
19 #include "check-integrity.h"
20 #include "rcu-string.h"
21 #include "raid56.h"
22 #include "block-group.h"
23 #include "zoned.h"
24
25 /*
26 * This is only the first step towards a full-features scrub. It reads all
27 * extent and super block and verifies the checksums. In case a bad checksum
28 * is found or the extent cannot be read, good data will be written back if
29 * any can be found.
30 *
31 * Future enhancements:
32 * - In case an unrepairable extent is encountered, track which files are
33 * affected and report them
34 * - track and record media errors, throw out bad devices
35 * - add a mode to also read unallocated space
36 */
37
38 struct scrub_block;
39 struct scrub_ctx;
40
41 /*
42 * The following three values only influence the performance.
43 *
44 * The last one configures the number of parallel and outstanding I/O
45 * operations. The first one configures an upper limit for the number
46 * of (dynamically allocated) pages that are added to a bio.
47 */
48 #define SCRUB_SECTORS_PER_BIO 32 /* 128KiB per bio for 4KiB pages */
49 #define SCRUB_BIOS_PER_SCTX 64 /* 8MiB per device in flight for 4KiB pages */
50
51 /*
52 * The following value times PAGE_SIZE needs to be large enough to match the
53 * largest node/leaf/sector size that shall be supported.
54 */
55 #define SCRUB_MAX_SECTORS_PER_BLOCK (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
56
57 #define SCRUB_MAX_PAGES (DIV_ROUND_UP(BTRFS_MAX_METADATA_BLOCKSIZE, PAGE_SIZE))
58
59 struct scrub_recover {
60 refcount_t refs;
61 struct btrfs_io_context *bioc;
62 u64 map_length;
63 };
64
65 struct scrub_sector {
66 struct scrub_block *sblock;
67 struct list_head list;
68 u64 flags; /* extent flags */
69 u64 generation;
70 /* Offset in bytes to @sblock. */
71 u32 offset;
72 atomic_t refs;
73 unsigned int have_csum:1;
74 unsigned int io_error:1;
75 u8 csum[BTRFS_CSUM_SIZE];
76
77 struct scrub_recover *recover;
78 };
79
80 struct scrub_bio {
81 int index;
82 struct scrub_ctx *sctx;
83 struct btrfs_device *dev;
84 struct bio *bio;
85 blk_status_t status;
86 u64 logical;
87 u64 physical;
88 struct scrub_sector *sectors[SCRUB_SECTORS_PER_BIO];
89 int sector_count;
90 int next_free;
91 struct work_struct work;
92 };
93
94 struct scrub_block {
95 /*
96 * Each page will have its page::private used to record the logical
97 * bytenr.
98 */
99 struct page *pages[SCRUB_MAX_PAGES];
100 struct scrub_sector *sectors[SCRUB_MAX_SECTORS_PER_BLOCK];
101 struct btrfs_device *dev;
102 /* Logical bytenr of the sblock */
103 u64 logical;
104 u64 physical;
105 u64 physical_for_dev_replace;
106 /* Length of sblock in bytes */
107 u32 len;
108 int sector_count;
109 int mirror_num;
110
111 atomic_t outstanding_sectors;
112 refcount_t refs; /* free mem on transition to zero */
113 struct scrub_ctx *sctx;
114 struct scrub_parity *sparity;
115 struct {
116 unsigned int header_error:1;
117 unsigned int checksum_error:1;
118 unsigned int no_io_error_seen:1;
119 unsigned int generation_error:1; /* also sets header_error */
120
121 /* The following is for the data used to check parity */
122 /* It is for the data with checksum */
123 unsigned int data_corrected:1;
124 };
125 struct work_struct work;
126 };
127
128 /* Used for the chunks with parity stripe such RAID5/6 */
129 struct scrub_parity {
130 struct scrub_ctx *sctx;
131
132 struct btrfs_device *scrub_dev;
133
134 u64 logic_start;
135
136 u64 logic_end;
137
138 int nsectors;
139
140 u32 stripe_len;
141
142 refcount_t refs;
143
144 struct list_head sectors_list;
145
146 /* Work of parity check and repair */
147 struct work_struct work;
148
149 /* Mark the parity blocks which have data */
150 unsigned long dbitmap;
151
152 /*
153 * Mark the parity blocks which have data, but errors happen when
154 * read data or check data
155 */
156 unsigned long ebitmap;
157 };
158
159 struct scrub_ctx {
160 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
161 struct btrfs_fs_info *fs_info;
162 int first_free;
163 int curr;
164 atomic_t bios_in_flight;
165 atomic_t workers_pending;
166 spinlock_t list_lock;
167 wait_queue_head_t list_wait;
168 struct list_head csum_list;
169 atomic_t cancel_req;
170 int readonly;
171 int sectors_per_bio;
172
173 /* State of IO submission throttling affecting the associated device */
174 ktime_t throttle_deadline;
175 u64 throttle_sent;
176
177 int is_dev_replace;
178 u64 write_pointer;
179
180 struct scrub_bio *wr_curr_bio;
181 struct mutex wr_lock;
182 struct btrfs_device *wr_tgtdev;
183 bool flush_all_writes;
184
185 /*
186 * statistics
187 */
188 struct btrfs_scrub_progress stat;
189 spinlock_t stat_lock;
190
191 /*
192 * Use a ref counter to avoid use-after-free issues. Scrub workers
193 * decrement bios_in_flight and workers_pending and then do a wakeup
194 * on the list_wait wait queue. We must ensure the main scrub task
195 * doesn't free the scrub context before or while the workers are
196 * doing the wakeup() call.
197 */
198 refcount_t refs;
199 };
200
201 struct scrub_warning {
202 struct btrfs_path *path;
203 u64 extent_item_size;
204 const char *errstr;
205 u64 physical;
206 u64 logical;
207 struct btrfs_device *dev;
208 };
209
210 struct full_stripe_lock {
211 struct rb_node node;
212 u64 logical;
213 u64 refs;
214 struct mutex mutex;
215 };
216
217 #ifndef CONFIG_64BIT
218 /* This structure is for archtectures whose (void *) is smaller than u64 */
219 struct scrub_page_private {
220 u64 logical;
221 };
222 #endif
223
attach_scrub_page_private(struct page * page,u64 logical)224 static int attach_scrub_page_private(struct page *page, u64 logical)
225 {
226 #ifdef CONFIG_64BIT
227 attach_page_private(page, (void *)logical);
228 return 0;
229 #else
230 struct scrub_page_private *spp;
231
232 spp = kmalloc(sizeof(*spp), GFP_KERNEL);
233 if (!spp)
234 return -ENOMEM;
235 spp->logical = logical;
236 attach_page_private(page, (void *)spp);
237 return 0;
238 #endif
239 }
240
detach_scrub_page_private(struct page * page)241 static void detach_scrub_page_private(struct page *page)
242 {
243 #ifdef CONFIG_64BIT
244 detach_page_private(page);
245 return;
246 #else
247 struct scrub_page_private *spp;
248
249 spp = detach_page_private(page);
250 kfree(spp);
251 return;
252 #endif
253 }
254
alloc_scrub_block(struct scrub_ctx * sctx,struct btrfs_device * dev,u64 logical,u64 physical,u64 physical_for_dev_replace,int mirror_num)255 static struct scrub_block *alloc_scrub_block(struct scrub_ctx *sctx,
256 struct btrfs_device *dev,
257 u64 logical, u64 physical,
258 u64 physical_for_dev_replace,
259 int mirror_num)
260 {
261 struct scrub_block *sblock;
262
263 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
264 if (!sblock)
265 return NULL;
266 refcount_set(&sblock->refs, 1);
267 sblock->sctx = sctx;
268 sblock->logical = logical;
269 sblock->physical = physical;
270 sblock->physical_for_dev_replace = physical_for_dev_replace;
271 sblock->dev = dev;
272 sblock->mirror_num = mirror_num;
273 sblock->no_io_error_seen = 1;
274 /*
275 * Scrub_block::pages will be allocated at alloc_scrub_sector() when
276 * the corresponding page is not allocated.
277 */
278 return sblock;
279 }
280
281 /*
282 * Allocate a new scrub sector and attach it to @sblock.
283 *
284 * Will also allocate new pages for @sblock if needed.
285 */
alloc_scrub_sector(struct scrub_block * sblock,u64 logical,gfp_t gfp)286 static struct scrub_sector *alloc_scrub_sector(struct scrub_block *sblock,
287 u64 logical, gfp_t gfp)
288 {
289 const pgoff_t page_index = (logical - sblock->logical) >> PAGE_SHIFT;
290 struct scrub_sector *ssector;
291
292 /* We must never have scrub_block exceed U32_MAX in size. */
293 ASSERT(logical - sblock->logical < U32_MAX);
294
295 ssector = kzalloc(sizeof(*ssector), gfp);
296 if (!ssector)
297 return NULL;
298
299 /* Allocate a new page if the slot is not allocated */
300 if (!sblock->pages[page_index]) {
301 int ret;
302
303 sblock->pages[page_index] = alloc_page(gfp);
304 if (!sblock->pages[page_index]) {
305 kfree(ssector);
306 return NULL;
307 }
308 ret = attach_scrub_page_private(sblock->pages[page_index],
309 sblock->logical + (page_index << PAGE_SHIFT));
310 if (ret < 0) {
311 kfree(ssector);
312 __free_page(sblock->pages[page_index]);
313 sblock->pages[page_index] = NULL;
314 return NULL;
315 }
316 }
317
318 atomic_set(&ssector->refs, 1);
319 ssector->sblock = sblock;
320 /* The sector to be added should not be used */
321 ASSERT(sblock->sectors[sblock->sector_count] == NULL);
322 ssector->offset = logical - sblock->logical;
323
324 /* The sector count must be smaller than the limit */
325 ASSERT(sblock->sector_count < SCRUB_MAX_SECTORS_PER_BLOCK);
326
327 sblock->sectors[sblock->sector_count] = ssector;
328 sblock->sector_count++;
329 sblock->len += sblock->sctx->fs_info->sectorsize;
330
331 return ssector;
332 }
333
scrub_sector_get_page(struct scrub_sector * ssector)334 static struct page *scrub_sector_get_page(struct scrub_sector *ssector)
335 {
336 struct scrub_block *sblock = ssector->sblock;
337 pgoff_t index;
338 /*
339 * When calling this function, ssector must be alreaday attached to the
340 * parent sblock.
341 */
342 ASSERT(sblock);
343
344 /* The range should be inside the sblock range */
345 ASSERT(ssector->offset < sblock->len);
346
347 index = ssector->offset >> PAGE_SHIFT;
348 ASSERT(index < SCRUB_MAX_PAGES);
349 ASSERT(sblock->pages[index]);
350 ASSERT(PagePrivate(sblock->pages[index]));
351 return sblock->pages[index];
352 }
353
scrub_sector_get_page_offset(struct scrub_sector * ssector)354 static unsigned int scrub_sector_get_page_offset(struct scrub_sector *ssector)
355 {
356 struct scrub_block *sblock = ssector->sblock;
357
358 /*
359 * When calling this function, ssector must be already attached to the
360 * parent sblock.
361 */
362 ASSERT(sblock);
363
364 /* The range should be inside the sblock range */
365 ASSERT(ssector->offset < sblock->len);
366
367 return offset_in_page(ssector->offset);
368 }
369
scrub_sector_get_kaddr(struct scrub_sector * ssector)370 static char *scrub_sector_get_kaddr(struct scrub_sector *ssector)
371 {
372 return page_address(scrub_sector_get_page(ssector)) +
373 scrub_sector_get_page_offset(ssector);
374 }
375
bio_add_scrub_sector(struct bio * bio,struct scrub_sector * ssector,unsigned int len)376 static int bio_add_scrub_sector(struct bio *bio, struct scrub_sector *ssector,
377 unsigned int len)
378 {
379 return bio_add_page(bio, scrub_sector_get_page(ssector), len,
380 scrub_sector_get_page_offset(ssector));
381 }
382
383 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
384 struct scrub_block *sblocks_for_recheck[]);
385 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
386 struct scrub_block *sblock,
387 int retry_failed_mirror);
388 static void scrub_recheck_block_checksum(struct scrub_block *sblock);
389 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
390 struct scrub_block *sblock_good);
391 static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
392 struct scrub_block *sblock_good,
393 int sector_num, int force_write);
394 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
395 static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock,
396 int sector_num);
397 static int scrub_checksum_data(struct scrub_block *sblock);
398 static int scrub_checksum_tree_block(struct scrub_block *sblock);
399 static int scrub_checksum_super(struct scrub_block *sblock);
400 static void scrub_block_put(struct scrub_block *sblock);
401 static void scrub_sector_get(struct scrub_sector *sector);
402 static void scrub_sector_put(struct scrub_sector *sector);
403 static void scrub_parity_get(struct scrub_parity *sparity);
404 static void scrub_parity_put(struct scrub_parity *sparity);
405 static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
406 u64 physical, struct btrfs_device *dev, u64 flags,
407 u64 gen, int mirror_num, u8 *csum,
408 u64 physical_for_dev_replace);
409 static void scrub_bio_end_io(struct bio *bio);
410 static void scrub_bio_end_io_worker(struct work_struct *work);
411 static void scrub_block_complete(struct scrub_block *sblock);
412 static void scrub_find_good_copy(struct btrfs_fs_info *fs_info,
413 u64 extent_logical, u32 extent_len,
414 u64 *extent_physical,
415 struct btrfs_device **extent_dev,
416 int *extent_mirror_num);
417 static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
418 struct scrub_sector *sector);
419 static void scrub_wr_submit(struct scrub_ctx *sctx);
420 static void scrub_wr_bio_end_io(struct bio *bio);
421 static void scrub_wr_bio_end_io_worker(struct work_struct *work);
422 static void scrub_put_ctx(struct scrub_ctx *sctx);
423
scrub_is_page_on_raid56(struct scrub_sector * sector)424 static inline int scrub_is_page_on_raid56(struct scrub_sector *sector)
425 {
426 return sector->recover &&
427 (sector->recover->bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
428 }
429
scrub_pending_bio_inc(struct scrub_ctx * sctx)430 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
431 {
432 refcount_inc(&sctx->refs);
433 atomic_inc(&sctx->bios_in_flight);
434 }
435
scrub_pending_bio_dec(struct scrub_ctx * sctx)436 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
437 {
438 atomic_dec(&sctx->bios_in_flight);
439 wake_up(&sctx->list_wait);
440 scrub_put_ctx(sctx);
441 }
442
__scrub_blocked_if_needed(struct btrfs_fs_info * fs_info)443 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
444 {
445 while (atomic_read(&fs_info->scrub_pause_req)) {
446 mutex_unlock(&fs_info->scrub_lock);
447 wait_event(fs_info->scrub_pause_wait,
448 atomic_read(&fs_info->scrub_pause_req) == 0);
449 mutex_lock(&fs_info->scrub_lock);
450 }
451 }
452
scrub_pause_on(struct btrfs_fs_info * fs_info)453 static void scrub_pause_on(struct btrfs_fs_info *fs_info)
454 {
455 atomic_inc(&fs_info->scrubs_paused);
456 wake_up(&fs_info->scrub_pause_wait);
457 }
458
scrub_pause_off(struct btrfs_fs_info * fs_info)459 static void scrub_pause_off(struct btrfs_fs_info *fs_info)
460 {
461 mutex_lock(&fs_info->scrub_lock);
462 __scrub_blocked_if_needed(fs_info);
463 atomic_dec(&fs_info->scrubs_paused);
464 mutex_unlock(&fs_info->scrub_lock);
465
466 wake_up(&fs_info->scrub_pause_wait);
467 }
468
scrub_blocked_if_needed(struct btrfs_fs_info * fs_info)469 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
470 {
471 scrub_pause_on(fs_info);
472 scrub_pause_off(fs_info);
473 }
474
475 /*
476 * Insert new full stripe lock into full stripe locks tree
477 *
478 * Return pointer to existing or newly inserted full_stripe_lock structure if
479 * everything works well.
480 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
481 *
482 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
483 * function
484 */
insert_full_stripe_lock(struct btrfs_full_stripe_locks_tree * locks_root,u64 fstripe_logical)485 static struct full_stripe_lock *insert_full_stripe_lock(
486 struct btrfs_full_stripe_locks_tree *locks_root,
487 u64 fstripe_logical)
488 {
489 struct rb_node **p;
490 struct rb_node *parent = NULL;
491 struct full_stripe_lock *entry;
492 struct full_stripe_lock *ret;
493
494 lockdep_assert_held(&locks_root->lock);
495
496 p = &locks_root->root.rb_node;
497 while (*p) {
498 parent = *p;
499 entry = rb_entry(parent, struct full_stripe_lock, node);
500 if (fstripe_logical < entry->logical) {
501 p = &(*p)->rb_left;
502 } else if (fstripe_logical > entry->logical) {
503 p = &(*p)->rb_right;
504 } else {
505 entry->refs++;
506 return entry;
507 }
508 }
509
510 /*
511 * Insert new lock.
512 */
513 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
514 if (!ret)
515 return ERR_PTR(-ENOMEM);
516 ret->logical = fstripe_logical;
517 ret->refs = 1;
518 mutex_init(&ret->mutex);
519
520 rb_link_node(&ret->node, parent, p);
521 rb_insert_color(&ret->node, &locks_root->root);
522 return ret;
523 }
524
525 /*
526 * Search for a full stripe lock of a block group
527 *
528 * Return pointer to existing full stripe lock if found
529 * Return NULL if not found
530 */
search_full_stripe_lock(struct btrfs_full_stripe_locks_tree * locks_root,u64 fstripe_logical)531 static struct full_stripe_lock *search_full_stripe_lock(
532 struct btrfs_full_stripe_locks_tree *locks_root,
533 u64 fstripe_logical)
534 {
535 struct rb_node *node;
536 struct full_stripe_lock *entry;
537
538 lockdep_assert_held(&locks_root->lock);
539
540 node = locks_root->root.rb_node;
541 while (node) {
542 entry = rb_entry(node, struct full_stripe_lock, node);
543 if (fstripe_logical < entry->logical)
544 node = node->rb_left;
545 else if (fstripe_logical > entry->logical)
546 node = node->rb_right;
547 else
548 return entry;
549 }
550 return NULL;
551 }
552
553 /*
554 * Helper to get full stripe logical from a normal bytenr.
555 *
556 * Caller must ensure @cache is a RAID56 block group.
557 */
get_full_stripe_logical(struct btrfs_block_group * cache,u64 bytenr)558 static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr)
559 {
560 u64 ret;
561
562 /*
563 * Due to chunk item size limit, full stripe length should not be
564 * larger than U32_MAX. Just a sanity check here.
565 */
566 WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
567
568 /*
569 * round_down() can only handle power of 2, while RAID56 full
570 * stripe length can be 64KiB * n, so we need to manually round down.
571 */
572 ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
573 cache->full_stripe_len + cache->start;
574 return ret;
575 }
576
577 /*
578 * Lock a full stripe to avoid concurrency of recovery and read
579 *
580 * It's only used for profiles with parities (RAID5/6), for other profiles it
581 * does nothing.
582 *
583 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
584 * So caller must call unlock_full_stripe() at the same context.
585 *
586 * Return <0 if encounters error.
587 */
lock_full_stripe(struct btrfs_fs_info * fs_info,u64 bytenr,bool * locked_ret)588 static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
589 bool *locked_ret)
590 {
591 struct btrfs_block_group *bg_cache;
592 struct btrfs_full_stripe_locks_tree *locks_root;
593 struct full_stripe_lock *existing;
594 u64 fstripe_start;
595 int ret = 0;
596
597 *locked_ret = false;
598 bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
599 if (!bg_cache) {
600 ASSERT(0);
601 return -ENOENT;
602 }
603
604 /* Profiles not based on parity don't need full stripe lock */
605 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
606 goto out;
607 locks_root = &bg_cache->full_stripe_locks_root;
608
609 fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
610
611 /* Now insert the full stripe lock */
612 mutex_lock(&locks_root->lock);
613 existing = insert_full_stripe_lock(locks_root, fstripe_start);
614 mutex_unlock(&locks_root->lock);
615 if (IS_ERR(existing)) {
616 ret = PTR_ERR(existing);
617 goto out;
618 }
619 mutex_lock(&existing->mutex);
620 *locked_ret = true;
621 out:
622 btrfs_put_block_group(bg_cache);
623 return ret;
624 }
625
626 /*
627 * Unlock a full stripe.
628 *
629 * NOTE: Caller must ensure it's the same context calling corresponding
630 * lock_full_stripe().
631 *
632 * Return 0 if we unlock full stripe without problem.
633 * Return <0 for error
634 */
unlock_full_stripe(struct btrfs_fs_info * fs_info,u64 bytenr,bool locked)635 static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
636 bool locked)
637 {
638 struct btrfs_block_group *bg_cache;
639 struct btrfs_full_stripe_locks_tree *locks_root;
640 struct full_stripe_lock *fstripe_lock;
641 u64 fstripe_start;
642 bool freeit = false;
643 int ret = 0;
644
645 /* If we didn't acquire full stripe lock, no need to continue */
646 if (!locked)
647 return 0;
648
649 bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
650 if (!bg_cache) {
651 ASSERT(0);
652 return -ENOENT;
653 }
654 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
655 goto out;
656
657 locks_root = &bg_cache->full_stripe_locks_root;
658 fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
659
660 mutex_lock(&locks_root->lock);
661 fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
662 /* Unpaired unlock_full_stripe() detected */
663 if (!fstripe_lock) {
664 WARN_ON(1);
665 ret = -ENOENT;
666 mutex_unlock(&locks_root->lock);
667 goto out;
668 }
669
670 if (fstripe_lock->refs == 0) {
671 WARN_ON(1);
672 btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
673 fstripe_lock->logical);
674 } else {
675 fstripe_lock->refs--;
676 }
677
678 if (fstripe_lock->refs == 0) {
679 rb_erase(&fstripe_lock->node, &locks_root->root);
680 freeit = true;
681 }
682 mutex_unlock(&locks_root->lock);
683
684 mutex_unlock(&fstripe_lock->mutex);
685 if (freeit)
686 kfree(fstripe_lock);
687 out:
688 btrfs_put_block_group(bg_cache);
689 return ret;
690 }
691
scrub_free_csums(struct scrub_ctx * sctx)692 static void scrub_free_csums(struct scrub_ctx *sctx)
693 {
694 while (!list_empty(&sctx->csum_list)) {
695 struct btrfs_ordered_sum *sum;
696 sum = list_first_entry(&sctx->csum_list,
697 struct btrfs_ordered_sum, list);
698 list_del(&sum->list);
699 kfree(sum);
700 }
701 }
702
scrub_free_ctx(struct scrub_ctx * sctx)703 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
704 {
705 int i;
706
707 if (!sctx)
708 return;
709
710 /* this can happen when scrub is cancelled */
711 if (sctx->curr != -1) {
712 struct scrub_bio *sbio = sctx->bios[sctx->curr];
713
714 for (i = 0; i < sbio->sector_count; i++)
715 scrub_block_put(sbio->sectors[i]->sblock);
716 bio_put(sbio->bio);
717 }
718
719 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
720 struct scrub_bio *sbio = sctx->bios[i];
721
722 if (!sbio)
723 break;
724 kfree(sbio);
725 }
726
727 kfree(sctx->wr_curr_bio);
728 scrub_free_csums(sctx);
729 kfree(sctx);
730 }
731
scrub_put_ctx(struct scrub_ctx * sctx)732 static void scrub_put_ctx(struct scrub_ctx *sctx)
733 {
734 if (refcount_dec_and_test(&sctx->refs))
735 scrub_free_ctx(sctx);
736 }
737
scrub_setup_ctx(struct btrfs_fs_info * fs_info,int is_dev_replace)738 static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
739 struct btrfs_fs_info *fs_info, int is_dev_replace)
740 {
741 struct scrub_ctx *sctx;
742 int i;
743
744 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
745 if (!sctx)
746 goto nomem;
747 refcount_set(&sctx->refs, 1);
748 sctx->is_dev_replace = is_dev_replace;
749 sctx->sectors_per_bio = SCRUB_SECTORS_PER_BIO;
750 sctx->curr = -1;
751 sctx->fs_info = fs_info;
752 INIT_LIST_HEAD(&sctx->csum_list);
753 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
754 struct scrub_bio *sbio;
755
756 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
757 if (!sbio)
758 goto nomem;
759 sctx->bios[i] = sbio;
760
761 sbio->index = i;
762 sbio->sctx = sctx;
763 sbio->sector_count = 0;
764 INIT_WORK(&sbio->work, scrub_bio_end_io_worker);
765
766 if (i != SCRUB_BIOS_PER_SCTX - 1)
767 sctx->bios[i]->next_free = i + 1;
768 else
769 sctx->bios[i]->next_free = -1;
770 }
771 sctx->first_free = 0;
772 atomic_set(&sctx->bios_in_flight, 0);
773 atomic_set(&sctx->workers_pending, 0);
774 atomic_set(&sctx->cancel_req, 0);
775
776 spin_lock_init(&sctx->list_lock);
777 spin_lock_init(&sctx->stat_lock);
778 init_waitqueue_head(&sctx->list_wait);
779 sctx->throttle_deadline = 0;
780
781 WARN_ON(sctx->wr_curr_bio != NULL);
782 mutex_init(&sctx->wr_lock);
783 sctx->wr_curr_bio = NULL;
784 if (is_dev_replace) {
785 WARN_ON(!fs_info->dev_replace.tgtdev);
786 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
787 sctx->flush_all_writes = false;
788 }
789
790 return sctx;
791
792 nomem:
793 scrub_free_ctx(sctx);
794 return ERR_PTR(-ENOMEM);
795 }
796
scrub_print_warning_inode(u64 inum,u64 offset,u64 root,void * warn_ctx)797 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
798 void *warn_ctx)
799 {
800 u32 nlink;
801 int ret;
802 int i;
803 unsigned nofs_flag;
804 struct extent_buffer *eb;
805 struct btrfs_inode_item *inode_item;
806 struct scrub_warning *swarn = warn_ctx;
807 struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
808 struct inode_fs_paths *ipath = NULL;
809 struct btrfs_root *local_root;
810 struct btrfs_key key;
811
812 local_root = btrfs_get_fs_root(fs_info, root, true);
813 if (IS_ERR(local_root)) {
814 ret = PTR_ERR(local_root);
815 goto err;
816 }
817
818 /*
819 * this makes the path point to (inum INODE_ITEM ioff)
820 */
821 key.objectid = inum;
822 key.type = BTRFS_INODE_ITEM_KEY;
823 key.offset = 0;
824
825 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
826 if (ret) {
827 btrfs_put_root(local_root);
828 btrfs_release_path(swarn->path);
829 goto err;
830 }
831
832 eb = swarn->path->nodes[0];
833 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
834 struct btrfs_inode_item);
835 nlink = btrfs_inode_nlink(eb, inode_item);
836 btrfs_release_path(swarn->path);
837
838 /*
839 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
840 * uses GFP_NOFS in this context, so we keep it consistent but it does
841 * not seem to be strictly necessary.
842 */
843 nofs_flag = memalloc_nofs_save();
844 ipath = init_ipath(4096, local_root, swarn->path);
845 memalloc_nofs_restore(nofs_flag);
846 if (IS_ERR(ipath)) {
847 btrfs_put_root(local_root);
848 ret = PTR_ERR(ipath);
849 ipath = NULL;
850 goto err;
851 }
852 ret = paths_from_inode(inum, ipath);
853
854 if (ret < 0)
855 goto err;
856
857 /*
858 * we deliberately ignore the bit ipath might have been too small to
859 * hold all of the paths here
860 */
861 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
862 btrfs_warn_in_rcu(fs_info,
863 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
864 swarn->errstr, swarn->logical,
865 rcu_str_deref(swarn->dev->name),
866 swarn->physical,
867 root, inum, offset,
868 fs_info->sectorsize, nlink,
869 (char *)(unsigned long)ipath->fspath->val[i]);
870
871 btrfs_put_root(local_root);
872 free_ipath(ipath);
873 return 0;
874
875 err:
876 btrfs_warn_in_rcu(fs_info,
877 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
878 swarn->errstr, swarn->logical,
879 rcu_str_deref(swarn->dev->name),
880 swarn->physical,
881 root, inum, offset, ret);
882
883 free_ipath(ipath);
884 return 0;
885 }
886
scrub_print_warning(const char * errstr,struct scrub_block * sblock)887 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
888 {
889 struct btrfs_device *dev;
890 struct btrfs_fs_info *fs_info;
891 struct btrfs_path *path;
892 struct btrfs_key found_key;
893 struct extent_buffer *eb;
894 struct btrfs_extent_item *ei;
895 struct scrub_warning swarn;
896 unsigned long ptr = 0;
897 u64 extent_item_pos;
898 u64 flags = 0;
899 u64 ref_root;
900 u32 item_size;
901 u8 ref_level = 0;
902 int ret;
903
904 WARN_ON(sblock->sector_count < 1);
905 dev = sblock->dev;
906 fs_info = sblock->sctx->fs_info;
907
908 /* Super block error, no need to search extent tree. */
909 if (sblock->sectors[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
910 btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
911 errstr, rcu_str_deref(dev->name),
912 sblock->physical);
913 return;
914 }
915 path = btrfs_alloc_path();
916 if (!path)
917 return;
918
919 swarn.physical = sblock->physical;
920 swarn.logical = sblock->logical;
921 swarn.errstr = errstr;
922 swarn.dev = NULL;
923
924 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
925 &flags);
926 if (ret < 0)
927 goto out;
928
929 extent_item_pos = swarn.logical - found_key.objectid;
930 swarn.extent_item_size = found_key.offset;
931
932 eb = path->nodes[0];
933 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
934 item_size = btrfs_item_size(eb, path->slots[0]);
935
936 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
937 do {
938 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
939 item_size, &ref_root,
940 &ref_level);
941 btrfs_warn_in_rcu(fs_info,
942 "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
943 errstr, swarn.logical,
944 rcu_str_deref(dev->name),
945 swarn.physical,
946 ref_level ? "node" : "leaf",
947 ret < 0 ? -1 : ref_level,
948 ret < 0 ? -1 : ref_root);
949 } while (ret != 1);
950 btrfs_release_path(path);
951 } else {
952 btrfs_release_path(path);
953 swarn.path = path;
954 swarn.dev = dev;
955 iterate_extent_inodes(fs_info, found_key.objectid,
956 extent_item_pos, 1,
957 scrub_print_warning_inode, &swarn, false);
958 }
959
960 out:
961 btrfs_free_path(path);
962 }
963
scrub_get_recover(struct scrub_recover * recover)964 static inline void scrub_get_recover(struct scrub_recover *recover)
965 {
966 refcount_inc(&recover->refs);
967 }
968
scrub_put_recover(struct btrfs_fs_info * fs_info,struct scrub_recover * recover)969 static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
970 struct scrub_recover *recover)
971 {
972 if (refcount_dec_and_test(&recover->refs)) {
973 btrfs_bio_counter_dec(fs_info);
974 btrfs_put_bioc(recover->bioc);
975 kfree(recover);
976 }
977 }
978
979 /*
980 * scrub_handle_errored_block gets called when either verification of the
981 * sectors failed or the bio failed to read, e.g. with EIO. In the latter
982 * case, this function handles all sectors in the bio, even though only one
983 * may be bad.
984 * The goal of this function is to repair the errored block by using the
985 * contents of one of the mirrors.
986 */
scrub_handle_errored_block(struct scrub_block * sblock_to_check)987 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
988 {
989 struct scrub_ctx *sctx = sblock_to_check->sctx;
990 struct btrfs_device *dev = sblock_to_check->dev;
991 struct btrfs_fs_info *fs_info;
992 u64 logical;
993 unsigned int failed_mirror_index;
994 unsigned int is_metadata;
995 unsigned int have_csum;
996 /* One scrub_block for each mirror */
997 struct scrub_block *sblocks_for_recheck[BTRFS_MAX_MIRRORS] = { 0 };
998 struct scrub_block *sblock_bad;
999 int ret;
1000 int mirror_index;
1001 int sector_num;
1002 int success;
1003 bool full_stripe_locked;
1004 unsigned int nofs_flag;
1005 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
1006 DEFAULT_RATELIMIT_BURST);
1007
1008 BUG_ON(sblock_to_check->sector_count < 1);
1009 fs_info = sctx->fs_info;
1010 if (sblock_to_check->sectors[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
1011 /*
1012 * If we find an error in a super block, we just report it.
1013 * They will get written with the next transaction commit
1014 * anyway
1015 */
1016 scrub_print_warning("super block error", sblock_to_check);
1017 spin_lock(&sctx->stat_lock);
1018 ++sctx->stat.super_errors;
1019 spin_unlock(&sctx->stat_lock);
1020 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
1021 return 0;
1022 }
1023 logical = sblock_to_check->logical;
1024 ASSERT(sblock_to_check->mirror_num);
1025 failed_mirror_index = sblock_to_check->mirror_num - 1;
1026 is_metadata = !(sblock_to_check->sectors[0]->flags &
1027 BTRFS_EXTENT_FLAG_DATA);
1028 have_csum = sblock_to_check->sectors[0]->have_csum;
1029
1030 if (!sctx->is_dev_replace && btrfs_repair_one_zone(fs_info, logical))
1031 return 0;
1032
1033 /*
1034 * We must use GFP_NOFS because the scrub task might be waiting for a
1035 * worker task executing this function and in turn a transaction commit
1036 * might be waiting the scrub task to pause (which needs to wait for all
1037 * the worker tasks to complete before pausing).
1038 * We do allocations in the workers through insert_full_stripe_lock()
1039 * and scrub_add_sector_to_wr_bio(), which happens down the call chain of
1040 * this function.
1041 */
1042 nofs_flag = memalloc_nofs_save();
1043 /*
1044 * For RAID5/6, race can happen for a different device scrub thread.
1045 * For data corruption, Parity and Data threads will both try
1046 * to recovery the data.
1047 * Race can lead to doubly added csum error, or even unrecoverable
1048 * error.
1049 */
1050 ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
1051 if (ret < 0) {
1052 memalloc_nofs_restore(nofs_flag);
1053 spin_lock(&sctx->stat_lock);
1054 if (ret == -ENOMEM)
1055 sctx->stat.malloc_errors++;
1056 sctx->stat.read_errors++;
1057 sctx->stat.uncorrectable_errors++;
1058 spin_unlock(&sctx->stat_lock);
1059 return ret;
1060 }
1061
1062 /*
1063 * read all mirrors one after the other. This includes to
1064 * re-read the extent or metadata block that failed (that was
1065 * the cause that this fixup code is called) another time,
1066 * sector by sector this time in order to know which sectors
1067 * caused I/O errors and which ones are good (for all mirrors).
1068 * It is the goal to handle the situation when more than one
1069 * mirror contains I/O errors, but the errors do not
1070 * overlap, i.e. the data can be repaired by selecting the
1071 * sectors from those mirrors without I/O error on the
1072 * particular sectors. One example (with blocks >= 2 * sectorsize)
1073 * would be that mirror #1 has an I/O error on the first sector,
1074 * the second sector is good, and mirror #2 has an I/O error on
1075 * the second sector, but the first sector is good.
1076 * Then the first sector of the first mirror can be repaired by
1077 * taking the first sector of the second mirror, and the
1078 * second sector of the second mirror can be repaired by
1079 * copying the contents of the 2nd sector of the 1st mirror.
1080 * One more note: if the sectors of one mirror contain I/O
1081 * errors, the checksum cannot be verified. In order to get
1082 * the best data for repairing, the first attempt is to find
1083 * a mirror without I/O errors and with a validated checksum.
1084 * Only if this is not possible, the sectors are picked from
1085 * mirrors with I/O errors without considering the checksum.
1086 * If the latter is the case, at the end, the checksum of the
1087 * repaired area is verified in order to correctly maintain
1088 * the statistics.
1089 */
1090 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) {
1091 /*
1092 * Note: the two members refs and outstanding_sectors are not
1093 * used in the blocks that are used for the recheck procedure.
1094 *
1095 * But alloc_scrub_block() will initialize sblock::ref anyway,
1096 * so we can use scrub_block_put() to clean them up.
1097 *
1098 * And here we don't setup the physical/dev for the sblock yet,
1099 * they will be correctly initialized in scrub_setup_recheck_block().
1100 */
1101 sblocks_for_recheck[mirror_index] = alloc_scrub_block(sctx, NULL,
1102 logical, 0, 0, mirror_index);
1103 if (!sblocks_for_recheck[mirror_index]) {
1104 spin_lock(&sctx->stat_lock);
1105 sctx->stat.malloc_errors++;
1106 sctx->stat.read_errors++;
1107 sctx->stat.uncorrectable_errors++;
1108 spin_unlock(&sctx->stat_lock);
1109 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1110 goto out;
1111 }
1112 }
1113
1114 /* Setup the context, map the logical blocks and alloc the sectors */
1115 ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
1116 if (ret) {
1117 spin_lock(&sctx->stat_lock);
1118 sctx->stat.read_errors++;
1119 sctx->stat.uncorrectable_errors++;
1120 spin_unlock(&sctx->stat_lock);
1121 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1122 goto out;
1123 }
1124 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
1125 sblock_bad = sblocks_for_recheck[failed_mirror_index];
1126
1127 /* build and submit the bios for the failed mirror, check checksums */
1128 scrub_recheck_block(fs_info, sblock_bad, 1);
1129
1130 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
1131 sblock_bad->no_io_error_seen) {
1132 /*
1133 * The error disappeared after reading sector by sector, or
1134 * the area was part of a huge bio and other parts of the
1135 * bio caused I/O errors, or the block layer merged several
1136 * read requests into one and the error is caused by a
1137 * different bio (usually one of the two latter cases is
1138 * the cause)
1139 */
1140 spin_lock(&sctx->stat_lock);
1141 sctx->stat.unverified_errors++;
1142 sblock_to_check->data_corrected = 1;
1143 spin_unlock(&sctx->stat_lock);
1144
1145 if (sctx->is_dev_replace)
1146 scrub_write_block_to_dev_replace(sblock_bad);
1147 goto out;
1148 }
1149
1150 if (!sblock_bad->no_io_error_seen) {
1151 spin_lock(&sctx->stat_lock);
1152 sctx->stat.read_errors++;
1153 spin_unlock(&sctx->stat_lock);
1154 if (__ratelimit(&rs))
1155 scrub_print_warning("i/o error", sblock_to_check);
1156 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1157 } else if (sblock_bad->checksum_error) {
1158 spin_lock(&sctx->stat_lock);
1159 sctx->stat.csum_errors++;
1160 spin_unlock(&sctx->stat_lock);
1161 if (__ratelimit(&rs))
1162 scrub_print_warning("checksum error", sblock_to_check);
1163 btrfs_dev_stat_inc_and_print(dev,
1164 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1165 } else if (sblock_bad->header_error) {
1166 spin_lock(&sctx->stat_lock);
1167 sctx->stat.verify_errors++;
1168 spin_unlock(&sctx->stat_lock);
1169 if (__ratelimit(&rs))
1170 scrub_print_warning("checksum/header error",
1171 sblock_to_check);
1172 if (sblock_bad->generation_error)
1173 btrfs_dev_stat_inc_and_print(dev,
1174 BTRFS_DEV_STAT_GENERATION_ERRS);
1175 else
1176 btrfs_dev_stat_inc_and_print(dev,
1177 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1178 }
1179
1180 if (sctx->readonly) {
1181 ASSERT(!sctx->is_dev_replace);
1182 goto out;
1183 }
1184
1185 /*
1186 * now build and submit the bios for the other mirrors, check
1187 * checksums.
1188 * First try to pick the mirror which is completely without I/O
1189 * errors and also does not have a checksum error.
1190 * If one is found, and if a checksum is present, the full block
1191 * that is known to contain an error is rewritten. Afterwards
1192 * the block is known to be corrected.
1193 * If a mirror is found which is completely correct, and no
1194 * checksum is present, only those sectors are rewritten that had
1195 * an I/O error in the block to be repaired, since it cannot be
1196 * determined, which copy of the other sectors is better (and it
1197 * could happen otherwise that a correct sector would be
1198 * overwritten by a bad one).
1199 */
1200 for (mirror_index = 0; ;mirror_index++) {
1201 struct scrub_block *sblock_other;
1202
1203 if (mirror_index == failed_mirror_index)
1204 continue;
1205
1206 /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
1207 if (!scrub_is_page_on_raid56(sblock_bad->sectors[0])) {
1208 if (mirror_index >= BTRFS_MAX_MIRRORS)
1209 break;
1210 if (!sblocks_for_recheck[mirror_index]->sector_count)
1211 break;
1212
1213 sblock_other = sblocks_for_recheck[mirror_index];
1214 } else {
1215 struct scrub_recover *r = sblock_bad->sectors[0]->recover;
1216 int max_allowed = r->bioc->num_stripes - r->bioc->num_tgtdevs;
1217
1218 if (mirror_index >= max_allowed)
1219 break;
1220 if (!sblocks_for_recheck[1]->sector_count)
1221 break;
1222
1223 ASSERT(failed_mirror_index == 0);
1224 sblock_other = sblocks_for_recheck[1];
1225 sblock_other->mirror_num = 1 + mirror_index;
1226 }
1227
1228 /* build and submit the bios, check checksums */
1229 scrub_recheck_block(fs_info, sblock_other, 0);
1230
1231 if (!sblock_other->header_error &&
1232 !sblock_other->checksum_error &&
1233 sblock_other->no_io_error_seen) {
1234 if (sctx->is_dev_replace) {
1235 scrub_write_block_to_dev_replace(sblock_other);
1236 goto corrected_error;
1237 } else {
1238 ret = scrub_repair_block_from_good_copy(
1239 sblock_bad, sblock_other);
1240 if (!ret)
1241 goto corrected_error;
1242 }
1243 }
1244 }
1245
1246 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1247 goto did_not_correct_error;
1248
1249 /*
1250 * In case of I/O errors in the area that is supposed to be
1251 * repaired, continue by picking good copies of those sectors.
1252 * Select the good sectors from mirrors to rewrite bad sectors from
1253 * the area to fix. Afterwards verify the checksum of the block
1254 * that is supposed to be repaired. This verification step is
1255 * only done for the purpose of statistic counting and for the
1256 * final scrub report, whether errors remain.
1257 * A perfect algorithm could make use of the checksum and try
1258 * all possible combinations of sectors from the different mirrors
1259 * until the checksum verification succeeds. For example, when
1260 * the 2nd sector of mirror #1 faces I/O errors, and the 2nd sector
1261 * of mirror #2 is readable but the final checksum test fails,
1262 * then the 2nd sector of mirror #3 could be tried, whether now
1263 * the final checksum succeeds. But this would be a rare
1264 * exception and is therefore not implemented. At least it is
1265 * avoided that the good copy is overwritten.
1266 * A more useful improvement would be to pick the sectors
1267 * without I/O error based on sector sizes (512 bytes on legacy
1268 * disks) instead of on sectorsize. Then maybe 512 byte of one
1269 * mirror could be repaired by taking 512 byte of a different
1270 * mirror, even if other 512 byte sectors in the same sectorsize
1271 * area are unreadable.
1272 */
1273 success = 1;
1274 for (sector_num = 0; sector_num < sblock_bad->sector_count;
1275 sector_num++) {
1276 struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num];
1277 struct scrub_block *sblock_other = NULL;
1278
1279 /* Skip no-io-error sectors in scrub */
1280 if (!sector_bad->io_error && !sctx->is_dev_replace)
1281 continue;
1282
1283 if (scrub_is_page_on_raid56(sblock_bad->sectors[0])) {
1284 /*
1285 * In case of dev replace, if raid56 rebuild process
1286 * didn't work out correct data, then copy the content
1287 * in sblock_bad to make sure target device is identical
1288 * to source device, instead of writing garbage data in
1289 * sblock_for_recheck array to target device.
1290 */
1291 sblock_other = NULL;
1292 } else if (sector_bad->io_error) {
1293 /* Try to find no-io-error sector in mirrors */
1294 for (mirror_index = 0;
1295 mirror_index < BTRFS_MAX_MIRRORS &&
1296 sblocks_for_recheck[mirror_index]->sector_count > 0;
1297 mirror_index++) {
1298 if (!sblocks_for_recheck[mirror_index]->
1299 sectors[sector_num]->io_error) {
1300 sblock_other = sblocks_for_recheck[mirror_index];
1301 break;
1302 }
1303 }
1304 if (!sblock_other)
1305 success = 0;
1306 }
1307
1308 if (sctx->is_dev_replace) {
1309 /*
1310 * Did not find a mirror to fetch the sector from.
1311 * scrub_write_sector_to_dev_replace() handles this
1312 * case (sector->io_error), by filling the block with
1313 * zeros before submitting the write request
1314 */
1315 if (!sblock_other)
1316 sblock_other = sblock_bad;
1317
1318 if (scrub_write_sector_to_dev_replace(sblock_other,
1319 sector_num) != 0) {
1320 atomic64_inc(
1321 &fs_info->dev_replace.num_write_errors);
1322 success = 0;
1323 }
1324 } else if (sblock_other) {
1325 ret = scrub_repair_sector_from_good_copy(sblock_bad,
1326 sblock_other,
1327 sector_num, 0);
1328 if (0 == ret)
1329 sector_bad->io_error = 0;
1330 else
1331 success = 0;
1332 }
1333 }
1334
1335 if (success && !sctx->is_dev_replace) {
1336 if (is_metadata || have_csum) {
1337 /*
1338 * need to verify the checksum now that all
1339 * sectors on disk are repaired (the write
1340 * request for data to be repaired is on its way).
1341 * Just be lazy and use scrub_recheck_block()
1342 * which re-reads the data before the checksum
1343 * is verified, but most likely the data comes out
1344 * of the page cache.
1345 */
1346 scrub_recheck_block(fs_info, sblock_bad, 1);
1347 if (!sblock_bad->header_error &&
1348 !sblock_bad->checksum_error &&
1349 sblock_bad->no_io_error_seen)
1350 goto corrected_error;
1351 else
1352 goto did_not_correct_error;
1353 } else {
1354 corrected_error:
1355 spin_lock(&sctx->stat_lock);
1356 sctx->stat.corrected_errors++;
1357 sblock_to_check->data_corrected = 1;
1358 spin_unlock(&sctx->stat_lock);
1359 btrfs_err_rl_in_rcu(fs_info,
1360 "fixed up error at logical %llu on dev %s",
1361 logical, rcu_str_deref(dev->name));
1362 }
1363 } else {
1364 did_not_correct_error:
1365 spin_lock(&sctx->stat_lock);
1366 sctx->stat.uncorrectable_errors++;
1367 spin_unlock(&sctx->stat_lock);
1368 btrfs_err_rl_in_rcu(fs_info,
1369 "unable to fixup (regular) error at logical %llu on dev %s",
1370 logical, rcu_str_deref(dev->name));
1371 }
1372
1373 out:
1374 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) {
1375 struct scrub_block *sblock = sblocks_for_recheck[mirror_index];
1376 struct scrub_recover *recover;
1377 int sector_index;
1378
1379 /* Not allocated, continue checking the next mirror */
1380 if (!sblock)
1381 continue;
1382
1383 for (sector_index = 0; sector_index < sblock->sector_count;
1384 sector_index++) {
1385 /*
1386 * Here we just cleanup the recover, each sector will be
1387 * properly cleaned up by later scrub_block_put()
1388 */
1389 recover = sblock->sectors[sector_index]->recover;
1390 if (recover) {
1391 scrub_put_recover(fs_info, recover);
1392 sblock->sectors[sector_index]->recover = NULL;
1393 }
1394 }
1395 scrub_block_put(sblock);
1396 }
1397
1398 ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
1399 memalloc_nofs_restore(nofs_flag);
1400 if (ret < 0)
1401 return ret;
1402 return 0;
1403 }
1404
scrub_nr_raid_mirrors(struct btrfs_io_context * bioc)1405 static inline int scrub_nr_raid_mirrors(struct btrfs_io_context *bioc)
1406 {
1407 if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5)
1408 return 2;
1409 else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6)
1410 return 3;
1411 else
1412 return (int)bioc->num_stripes;
1413 }
1414
scrub_stripe_index_and_offset(u64 logical,u64 map_type,u64 * raid_map,int nstripes,int mirror,int * stripe_index,u64 * stripe_offset)1415 static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1416 u64 *raid_map,
1417 int nstripes, int mirror,
1418 int *stripe_index,
1419 u64 *stripe_offset)
1420 {
1421 int i;
1422
1423 if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1424 /* RAID5/6 */
1425 for (i = 0; i < nstripes; i++) {
1426 if (raid_map[i] == RAID6_Q_STRIPE ||
1427 raid_map[i] == RAID5_P_STRIPE)
1428 continue;
1429
1430 if (logical >= raid_map[i] &&
1431 logical < raid_map[i] + BTRFS_STRIPE_LEN)
1432 break;
1433 }
1434
1435 *stripe_index = i;
1436 *stripe_offset = logical - raid_map[i];
1437 } else {
1438 /* The other RAID type */
1439 *stripe_index = mirror;
1440 *stripe_offset = 0;
1441 }
1442 }
1443
scrub_setup_recheck_block(struct scrub_block * original_sblock,struct scrub_block * sblocks_for_recheck[])1444 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1445 struct scrub_block *sblocks_for_recheck[])
1446 {
1447 struct scrub_ctx *sctx = original_sblock->sctx;
1448 struct btrfs_fs_info *fs_info = sctx->fs_info;
1449 u64 logical = original_sblock->logical;
1450 u64 length = original_sblock->sector_count << fs_info->sectorsize_bits;
1451 u64 generation = original_sblock->sectors[0]->generation;
1452 u64 flags = original_sblock->sectors[0]->flags;
1453 u64 have_csum = original_sblock->sectors[0]->have_csum;
1454 struct scrub_recover *recover;
1455 struct btrfs_io_context *bioc;
1456 u64 sublen;
1457 u64 mapped_length;
1458 u64 stripe_offset;
1459 int stripe_index;
1460 int sector_index = 0;
1461 int mirror_index;
1462 int nmirrors;
1463 int ret;
1464
1465 while (length > 0) {
1466 sublen = min_t(u64, length, fs_info->sectorsize);
1467 mapped_length = sublen;
1468 bioc = NULL;
1469
1470 /*
1471 * With a length of sectorsize, each returned stripe represents
1472 * one mirror
1473 */
1474 btrfs_bio_counter_inc_blocked(fs_info);
1475 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1476 logical, &mapped_length, &bioc);
1477 if (ret || !bioc || mapped_length < sublen) {
1478 btrfs_put_bioc(bioc);
1479 btrfs_bio_counter_dec(fs_info);
1480 return -EIO;
1481 }
1482
1483 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1484 if (!recover) {
1485 btrfs_put_bioc(bioc);
1486 btrfs_bio_counter_dec(fs_info);
1487 return -ENOMEM;
1488 }
1489
1490 refcount_set(&recover->refs, 1);
1491 recover->bioc = bioc;
1492 recover->map_length = mapped_length;
1493
1494 ASSERT(sector_index < SCRUB_MAX_SECTORS_PER_BLOCK);
1495
1496 nmirrors = min(scrub_nr_raid_mirrors(bioc), BTRFS_MAX_MIRRORS);
1497
1498 for (mirror_index = 0; mirror_index < nmirrors;
1499 mirror_index++) {
1500 struct scrub_block *sblock;
1501 struct scrub_sector *sector;
1502
1503 sblock = sblocks_for_recheck[mirror_index];
1504 sblock->sctx = sctx;
1505
1506 sector = alloc_scrub_sector(sblock, logical, GFP_NOFS);
1507 if (!sector) {
1508 spin_lock(&sctx->stat_lock);
1509 sctx->stat.malloc_errors++;
1510 spin_unlock(&sctx->stat_lock);
1511 scrub_put_recover(fs_info, recover);
1512 return -ENOMEM;
1513 }
1514 sector->flags = flags;
1515 sector->generation = generation;
1516 sector->have_csum = have_csum;
1517 if (have_csum)
1518 memcpy(sector->csum,
1519 original_sblock->sectors[0]->csum,
1520 sctx->fs_info->csum_size);
1521
1522 scrub_stripe_index_and_offset(logical,
1523 bioc->map_type,
1524 bioc->raid_map,
1525 bioc->num_stripes -
1526 bioc->num_tgtdevs,
1527 mirror_index,
1528 &stripe_index,
1529 &stripe_offset);
1530 /*
1531 * We're at the first sector, also populate @sblock
1532 * physical and dev.
1533 */
1534 if (sector_index == 0) {
1535 sblock->physical =
1536 bioc->stripes[stripe_index].physical +
1537 stripe_offset;
1538 sblock->dev = bioc->stripes[stripe_index].dev;
1539 sblock->physical_for_dev_replace =
1540 original_sblock->physical_for_dev_replace;
1541 }
1542
1543 BUG_ON(sector_index >= original_sblock->sector_count);
1544 scrub_get_recover(recover);
1545 sector->recover = recover;
1546 }
1547 scrub_put_recover(fs_info, recover);
1548 length -= sublen;
1549 logical += sublen;
1550 sector_index++;
1551 }
1552
1553 return 0;
1554 }
1555
scrub_bio_wait_endio(struct bio * bio)1556 static void scrub_bio_wait_endio(struct bio *bio)
1557 {
1558 complete(bio->bi_private);
1559 }
1560
scrub_submit_raid56_bio_wait(struct btrfs_fs_info * fs_info,struct bio * bio,struct scrub_sector * sector)1561 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1562 struct bio *bio,
1563 struct scrub_sector *sector)
1564 {
1565 DECLARE_COMPLETION_ONSTACK(done);
1566
1567 bio->bi_iter.bi_sector = (sector->offset + sector->sblock->logical) >>
1568 SECTOR_SHIFT;
1569 bio->bi_private = &done;
1570 bio->bi_end_io = scrub_bio_wait_endio;
1571 raid56_parity_recover(bio, sector->recover->bioc, sector->sblock->mirror_num);
1572
1573 wait_for_completion_io(&done);
1574 return blk_status_to_errno(bio->bi_status);
1575 }
1576
scrub_recheck_block_on_raid56(struct btrfs_fs_info * fs_info,struct scrub_block * sblock)1577 static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
1578 struct scrub_block *sblock)
1579 {
1580 struct scrub_sector *first_sector = sblock->sectors[0];
1581 struct bio *bio;
1582 int i;
1583
1584 /* All sectors in sblock belong to the same stripe on the same device. */
1585 ASSERT(sblock->dev);
1586 if (!sblock->dev->bdev)
1587 goto out;
1588
1589 bio = bio_alloc(sblock->dev->bdev, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
1590
1591 for (i = 0; i < sblock->sector_count; i++) {
1592 struct scrub_sector *sector = sblock->sectors[i];
1593
1594 bio_add_scrub_sector(bio, sector, fs_info->sectorsize);
1595 }
1596
1597 if (scrub_submit_raid56_bio_wait(fs_info, bio, first_sector)) {
1598 bio_put(bio);
1599 goto out;
1600 }
1601
1602 bio_put(bio);
1603
1604 scrub_recheck_block_checksum(sblock);
1605
1606 return;
1607 out:
1608 for (i = 0; i < sblock->sector_count; i++)
1609 sblock->sectors[i]->io_error = 1;
1610
1611 sblock->no_io_error_seen = 0;
1612 }
1613
1614 /*
1615 * This function will check the on disk data for checksum errors, header errors
1616 * and read I/O errors. If any I/O errors happen, the exact sectors which are
1617 * errored are marked as being bad. The goal is to enable scrub to take those
1618 * sectors that are not errored from all the mirrors so that the sectors that
1619 * are errored in the just handled mirror can be repaired.
1620 */
scrub_recheck_block(struct btrfs_fs_info * fs_info,struct scrub_block * sblock,int retry_failed_mirror)1621 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1622 struct scrub_block *sblock,
1623 int retry_failed_mirror)
1624 {
1625 int i;
1626
1627 sblock->no_io_error_seen = 1;
1628
1629 /* short cut for raid56 */
1630 if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->sectors[0]))
1631 return scrub_recheck_block_on_raid56(fs_info, sblock);
1632
1633 for (i = 0; i < sblock->sector_count; i++) {
1634 struct scrub_sector *sector = sblock->sectors[i];
1635 struct bio bio;
1636 struct bio_vec bvec;
1637
1638 if (sblock->dev->bdev == NULL) {
1639 sector->io_error = 1;
1640 sblock->no_io_error_seen = 0;
1641 continue;
1642 }
1643
1644 bio_init(&bio, sblock->dev->bdev, &bvec, 1, REQ_OP_READ);
1645 bio_add_scrub_sector(&bio, sector, fs_info->sectorsize);
1646 bio.bi_iter.bi_sector = (sblock->physical + sector->offset) >>
1647 SECTOR_SHIFT;
1648
1649 btrfsic_check_bio(&bio);
1650 if (submit_bio_wait(&bio)) {
1651 sector->io_error = 1;
1652 sblock->no_io_error_seen = 0;
1653 }
1654
1655 bio_uninit(&bio);
1656 }
1657
1658 if (sblock->no_io_error_seen)
1659 scrub_recheck_block_checksum(sblock);
1660 }
1661
scrub_check_fsid(u8 fsid[],struct scrub_sector * sector)1662 static inline int scrub_check_fsid(u8 fsid[], struct scrub_sector *sector)
1663 {
1664 struct btrfs_fs_devices *fs_devices = sector->sblock->dev->fs_devices;
1665 int ret;
1666
1667 ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1668 return !ret;
1669 }
1670
scrub_recheck_block_checksum(struct scrub_block * sblock)1671 static void scrub_recheck_block_checksum(struct scrub_block *sblock)
1672 {
1673 sblock->header_error = 0;
1674 sblock->checksum_error = 0;
1675 sblock->generation_error = 0;
1676
1677 if (sblock->sectors[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1678 scrub_checksum_data(sblock);
1679 else
1680 scrub_checksum_tree_block(sblock);
1681 }
1682
scrub_repair_block_from_good_copy(struct scrub_block * sblock_bad,struct scrub_block * sblock_good)1683 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1684 struct scrub_block *sblock_good)
1685 {
1686 int i;
1687 int ret = 0;
1688
1689 for (i = 0; i < sblock_bad->sector_count; i++) {
1690 int ret_sub;
1691
1692 ret_sub = scrub_repair_sector_from_good_copy(sblock_bad,
1693 sblock_good, i, 1);
1694 if (ret_sub)
1695 ret = ret_sub;
1696 }
1697
1698 return ret;
1699 }
1700
scrub_repair_sector_from_good_copy(struct scrub_block * sblock_bad,struct scrub_block * sblock_good,int sector_num,int force_write)1701 static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
1702 struct scrub_block *sblock_good,
1703 int sector_num, int force_write)
1704 {
1705 struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num];
1706 struct scrub_sector *sector_good = sblock_good->sectors[sector_num];
1707 struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1708 const u32 sectorsize = fs_info->sectorsize;
1709
1710 if (force_write || sblock_bad->header_error ||
1711 sblock_bad->checksum_error || sector_bad->io_error) {
1712 struct bio bio;
1713 struct bio_vec bvec;
1714 int ret;
1715
1716 if (!sblock_bad->dev->bdev) {
1717 btrfs_warn_rl(fs_info,
1718 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1719 return -EIO;
1720 }
1721
1722 bio_init(&bio, sblock_bad->dev->bdev, &bvec, 1, REQ_OP_WRITE);
1723 bio.bi_iter.bi_sector = (sblock_bad->physical +
1724 sector_bad->offset) >> SECTOR_SHIFT;
1725 ret = bio_add_scrub_sector(&bio, sector_good, sectorsize);
1726
1727 btrfsic_check_bio(&bio);
1728 ret = submit_bio_wait(&bio);
1729 bio_uninit(&bio);
1730
1731 if (ret) {
1732 btrfs_dev_stat_inc_and_print(sblock_bad->dev,
1733 BTRFS_DEV_STAT_WRITE_ERRS);
1734 atomic64_inc(&fs_info->dev_replace.num_write_errors);
1735 return -EIO;
1736 }
1737 }
1738
1739 return 0;
1740 }
1741
scrub_write_block_to_dev_replace(struct scrub_block * sblock)1742 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1743 {
1744 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1745 int i;
1746
1747 /*
1748 * This block is used for the check of the parity on the source device,
1749 * so the data needn't be written into the destination device.
1750 */
1751 if (sblock->sparity)
1752 return;
1753
1754 for (i = 0; i < sblock->sector_count; i++) {
1755 int ret;
1756
1757 ret = scrub_write_sector_to_dev_replace(sblock, i);
1758 if (ret)
1759 atomic64_inc(&fs_info->dev_replace.num_write_errors);
1760 }
1761 }
1762
scrub_write_sector_to_dev_replace(struct scrub_block * sblock,int sector_num)1763 static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock, int sector_num)
1764 {
1765 const u32 sectorsize = sblock->sctx->fs_info->sectorsize;
1766 struct scrub_sector *sector = sblock->sectors[sector_num];
1767
1768 if (sector->io_error)
1769 memset(scrub_sector_get_kaddr(sector), 0, sectorsize);
1770
1771 return scrub_add_sector_to_wr_bio(sblock->sctx, sector);
1772 }
1773
fill_writer_pointer_gap(struct scrub_ctx * sctx,u64 physical)1774 static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
1775 {
1776 int ret = 0;
1777 u64 length;
1778
1779 if (!btrfs_is_zoned(sctx->fs_info))
1780 return 0;
1781
1782 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
1783 return 0;
1784
1785 if (sctx->write_pointer < physical) {
1786 length = physical - sctx->write_pointer;
1787
1788 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
1789 sctx->write_pointer, length);
1790 if (!ret)
1791 sctx->write_pointer = physical;
1792 }
1793 return ret;
1794 }
1795
scrub_block_get(struct scrub_block * sblock)1796 static void scrub_block_get(struct scrub_block *sblock)
1797 {
1798 refcount_inc(&sblock->refs);
1799 }
1800
scrub_add_sector_to_wr_bio(struct scrub_ctx * sctx,struct scrub_sector * sector)1801 static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
1802 struct scrub_sector *sector)
1803 {
1804 struct scrub_block *sblock = sector->sblock;
1805 struct scrub_bio *sbio;
1806 int ret;
1807 const u32 sectorsize = sctx->fs_info->sectorsize;
1808
1809 mutex_lock(&sctx->wr_lock);
1810 again:
1811 if (!sctx->wr_curr_bio) {
1812 sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
1813 GFP_KERNEL);
1814 if (!sctx->wr_curr_bio) {
1815 mutex_unlock(&sctx->wr_lock);
1816 return -ENOMEM;
1817 }
1818 sctx->wr_curr_bio->sctx = sctx;
1819 sctx->wr_curr_bio->sector_count = 0;
1820 }
1821 sbio = sctx->wr_curr_bio;
1822 if (sbio->sector_count == 0) {
1823 ret = fill_writer_pointer_gap(sctx, sector->offset +
1824 sblock->physical_for_dev_replace);
1825 if (ret) {
1826 mutex_unlock(&sctx->wr_lock);
1827 return ret;
1828 }
1829
1830 sbio->physical = sblock->physical_for_dev_replace + sector->offset;
1831 sbio->logical = sblock->logical + sector->offset;
1832 sbio->dev = sctx->wr_tgtdev;
1833 if (!sbio->bio) {
1834 sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio,
1835 REQ_OP_WRITE, GFP_NOFS);
1836 }
1837 sbio->bio->bi_private = sbio;
1838 sbio->bio->bi_end_io = scrub_wr_bio_end_io;
1839 sbio->bio->bi_iter.bi_sector = sbio->physical >> 9;
1840 sbio->status = 0;
1841 } else if (sbio->physical + sbio->sector_count * sectorsize !=
1842 sblock->physical_for_dev_replace + sector->offset ||
1843 sbio->logical + sbio->sector_count * sectorsize !=
1844 sblock->logical + sector->offset) {
1845 scrub_wr_submit(sctx);
1846 goto again;
1847 }
1848
1849 ret = bio_add_scrub_sector(sbio->bio, sector, sectorsize);
1850 if (ret != sectorsize) {
1851 if (sbio->sector_count < 1) {
1852 bio_put(sbio->bio);
1853 sbio->bio = NULL;
1854 mutex_unlock(&sctx->wr_lock);
1855 return -EIO;
1856 }
1857 scrub_wr_submit(sctx);
1858 goto again;
1859 }
1860
1861 sbio->sectors[sbio->sector_count] = sector;
1862 scrub_sector_get(sector);
1863 /*
1864 * Since ssector no longer holds a page, but uses sblock::pages, we
1865 * have to ensure the sblock had not been freed before our write bio
1866 * finished.
1867 */
1868 scrub_block_get(sector->sblock);
1869
1870 sbio->sector_count++;
1871 if (sbio->sector_count == sctx->sectors_per_bio)
1872 scrub_wr_submit(sctx);
1873 mutex_unlock(&sctx->wr_lock);
1874
1875 return 0;
1876 }
1877
scrub_wr_submit(struct scrub_ctx * sctx)1878 static void scrub_wr_submit(struct scrub_ctx *sctx)
1879 {
1880 struct scrub_bio *sbio;
1881
1882 if (!sctx->wr_curr_bio)
1883 return;
1884
1885 sbio = sctx->wr_curr_bio;
1886 sctx->wr_curr_bio = NULL;
1887 scrub_pending_bio_inc(sctx);
1888 /* process all writes in a single worker thread. Then the block layer
1889 * orders the requests before sending them to the driver which
1890 * doubled the write performance on spinning disks when measured
1891 * with Linux 3.5 */
1892 btrfsic_check_bio(sbio->bio);
1893 submit_bio(sbio->bio);
1894
1895 if (btrfs_is_zoned(sctx->fs_info))
1896 sctx->write_pointer = sbio->physical + sbio->sector_count *
1897 sctx->fs_info->sectorsize;
1898 }
1899
scrub_wr_bio_end_io(struct bio * bio)1900 static void scrub_wr_bio_end_io(struct bio *bio)
1901 {
1902 struct scrub_bio *sbio = bio->bi_private;
1903 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1904
1905 sbio->status = bio->bi_status;
1906 sbio->bio = bio;
1907
1908 INIT_WORK(&sbio->work, scrub_wr_bio_end_io_worker);
1909 queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1910 }
1911
scrub_wr_bio_end_io_worker(struct work_struct * work)1912 static void scrub_wr_bio_end_io_worker(struct work_struct *work)
1913 {
1914 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1915 struct scrub_ctx *sctx = sbio->sctx;
1916 int i;
1917
1918 ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO);
1919 if (sbio->status) {
1920 struct btrfs_dev_replace *dev_replace =
1921 &sbio->sctx->fs_info->dev_replace;
1922
1923 for (i = 0; i < sbio->sector_count; i++) {
1924 struct scrub_sector *sector = sbio->sectors[i];
1925
1926 sector->io_error = 1;
1927 atomic64_inc(&dev_replace->num_write_errors);
1928 }
1929 }
1930
1931 /*
1932 * In scrub_add_sector_to_wr_bio() we grab extra ref for sblock, now in
1933 * endio we should put the sblock.
1934 */
1935 for (i = 0; i < sbio->sector_count; i++) {
1936 scrub_block_put(sbio->sectors[i]->sblock);
1937 scrub_sector_put(sbio->sectors[i]);
1938 }
1939
1940 bio_put(sbio->bio);
1941 kfree(sbio);
1942 scrub_pending_bio_dec(sctx);
1943 }
1944
scrub_checksum(struct scrub_block * sblock)1945 static int scrub_checksum(struct scrub_block *sblock)
1946 {
1947 u64 flags;
1948 int ret;
1949
1950 /*
1951 * No need to initialize these stats currently,
1952 * because this function only use return value
1953 * instead of these stats value.
1954 *
1955 * Todo:
1956 * always use stats
1957 */
1958 sblock->header_error = 0;
1959 sblock->generation_error = 0;
1960 sblock->checksum_error = 0;
1961
1962 WARN_ON(sblock->sector_count < 1);
1963 flags = sblock->sectors[0]->flags;
1964 ret = 0;
1965 if (flags & BTRFS_EXTENT_FLAG_DATA)
1966 ret = scrub_checksum_data(sblock);
1967 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1968 ret = scrub_checksum_tree_block(sblock);
1969 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1970 ret = scrub_checksum_super(sblock);
1971 else
1972 WARN_ON(1);
1973 if (ret)
1974 scrub_handle_errored_block(sblock);
1975
1976 return ret;
1977 }
1978
scrub_checksum_data(struct scrub_block * sblock)1979 static int scrub_checksum_data(struct scrub_block *sblock)
1980 {
1981 struct scrub_ctx *sctx = sblock->sctx;
1982 struct btrfs_fs_info *fs_info = sctx->fs_info;
1983 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1984 u8 csum[BTRFS_CSUM_SIZE];
1985 struct scrub_sector *sector;
1986 char *kaddr;
1987
1988 BUG_ON(sblock->sector_count < 1);
1989 sector = sblock->sectors[0];
1990 if (!sector->have_csum)
1991 return 0;
1992
1993 kaddr = scrub_sector_get_kaddr(sector);
1994
1995 shash->tfm = fs_info->csum_shash;
1996 crypto_shash_init(shash);
1997
1998 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
1999
2000 if (memcmp(csum, sector->csum, fs_info->csum_size))
2001 sblock->checksum_error = 1;
2002 return sblock->checksum_error;
2003 }
2004
scrub_checksum_tree_block(struct scrub_block * sblock)2005 static int scrub_checksum_tree_block(struct scrub_block *sblock)
2006 {
2007 struct scrub_ctx *sctx = sblock->sctx;
2008 struct btrfs_header *h;
2009 struct btrfs_fs_info *fs_info = sctx->fs_info;
2010 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
2011 u8 calculated_csum[BTRFS_CSUM_SIZE];
2012 u8 on_disk_csum[BTRFS_CSUM_SIZE];
2013 /*
2014 * This is done in sectorsize steps even for metadata as there's a
2015 * constraint for nodesize to be aligned to sectorsize. This will need
2016 * to change so we don't misuse data and metadata units like that.
2017 */
2018 const u32 sectorsize = sctx->fs_info->sectorsize;
2019 const int num_sectors = fs_info->nodesize >> fs_info->sectorsize_bits;
2020 int i;
2021 struct scrub_sector *sector;
2022 char *kaddr;
2023
2024 BUG_ON(sblock->sector_count < 1);
2025
2026 /* Each member in sectors is just one sector */
2027 ASSERT(sblock->sector_count == num_sectors);
2028
2029 sector = sblock->sectors[0];
2030 kaddr = scrub_sector_get_kaddr(sector);
2031 h = (struct btrfs_header *)kaddr;
2032 memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size);
2033
2034 /*
2035 * we don't use the getter functions here, as we
2036 * a) don't have an extent buffer and
2037 * b) the page is already kmapped
2038 */
2039 if (sblock->logical != btrfs_stack_header_bytenr(h)) {
2040 sblock->header_error = 1;
2041 btrfs_warn_rl(fs_info,
2042 "tree block %llu mirror %u has bad bytenr, has %llu want %llu",
2043 sblock->logical, sblock->mirror_num,
2044 btrfs_stack_header_bytenr(h),
2045 sblock->logical);
2046 goto out;
2047 }
2048
2049 if (!scrub_check_fsid(h->fsid, sector)) {
2050 sblock->header_error = 1;
2051 btrfs_warn_rl(fs_info,
2052 "tree block %llu mirror %u has bad fsid, has %pU want %pU",
2053 sblock->logical, sblock->mirror_num,
2054 h->fsid, sblock->dev->fs_devices->fsid);
2055 goto out;
2056 }
2057
2058 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, BTRFS_UUID_SIZE)) {
2059 sblock->header_error = 1;
2060 btrfs_warn_rl(fs_info,
2061 "tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
2062 sblock->logical, sblock->mirror_num,
2063 h->chunk_tree_uuid, fs_info->chunk_tree_uuid);
2064 goto out;
2065 }
2066
2067 shash->tfm = fs_info->csum_shash;
2068 crypto_shash_init(shash);
2069 crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
2070 sectorsize - BTRFS_CSUM_SIZE);
2071
2072 for (i = 1; i < num_sectors; i++) {
2073 kaddr = scrub_sector_get_kaddr(sblock->sectors[i]);
2074 crypto_shash_update(shash, kaddr, sectorsize);
2075 }
2076
2077 crypto_shash_final(shash, calculated_csum);
2078 if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size)) {
2079 sblock->checksum_error = 1;
2080 btrfs_warn_rl(fs_info,
2081 "tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
2082 sblock->logical, sblock->mirror_num,
2083 CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
2084 CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
2085 goto out;
2086 }
2087
2088 if (sector->generation != btrfs_stack_header_generation(h)) {
2089 sblock->header_error = 1;
2090 sblock->generation_error = 1;
2091 btrfs_warn_rl(fs_info,
2092 "tree block %llu mirror %u has bad generation, has %llu want %llu",
2093 sblock->logical, sblock->mirror_num,
2094 btrfs_stack_header_generation(h),
2095 sector->generation);
2096 }
2097
2098 out:
2099 return sblock->header_error || sblock->checksum_error;
2100 }
2101
scrub_checksum_super(struct scrub_block * sblock)2102 static int scrub_checksum_super(struct scrub_block *sblock)
2103 {
2104 struct btrfs_super_block *s;
2105 struct scrub_ctx *sctx = sblock->sctx;
2106 struct btrfs_fs_info *fs_info = sctx->fs_info;
2107 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
2108 u8 calculated_csum[BTRFS_CSUM_SIZE];
2109 struct scrub_sector *sector;
2110 char *kaddr;
2111 int fail_gen = 0;
2112 int fail_cor = 0;
2113
2114 BUG_ON(sblock->sector_count < 1);
2115 sector = sblock->sectors[0];
2116 kaddr = scrub_sector_get_kaddr(sector);
2117 s = (struct btrfs_super_block *)kaddr;
2118
2119 if (sblock->logical != btrfs_super_bytenr(s))
2120 ++fail_cor;
2121
2122 if (sector->generation != btrfs_super_generation(s))
2123 ++fail_gen;
2124
2125 if (!scrub_check_fsid(s->fsid, sector))
2126 ++fail_cor;
2127
2128 shash->tfm = fs_info->csum_shash;
2129 crypto_shash_init(shash);
2130 crypto_shash_digest(shash, kaddr + BTRFS_CSUM_SIZE,
2131 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, calculated_csum);
2132
2133 if (memcmp(calculated_csum, s->csum, sctx->fs_info->csum_size))
2134 ++fail_cor;
2135
2136 return fail_cor + fail_gen;
2137 }
2138
scrub_block_put(struct scrub_block * sblock)2139 static void scrub_block_put(struct scrub_block *sblock)
2140 {
2141 if (refcount_dec_and_test(&sblock->refs)) {
2142 int i;
2143
2144 if (sblock->sparity)
2145 scrub_parity_put(sblock->sparity);
2146
2147 for (i = 0; i < sblock->sector_count; i++)
2148 scrub_sector_put(sblock->sectors[i]);
2149 for (i = 0; i < DIV_ROUND_UP(sblock->len, PAGE_SIZE); i++) {
2150 if (sblock->pages[i]) {
2151 detach_scrub_page_private(sblock->pages[i]);
2152 __free_page(sblock->pages[i]);
2153 }
2154 }
2155 kfree(sblock);
2156 }
2157 }
2158
scrub_sector_get(struct scrub_sector * sector)2159 static void scrub_sector_get(struct scrub_sector *sector)
2160 {
2161 atomic_inc(§or->refs);
2162 }
2163
scrub_sector_put(struct scrub_sector * sector)2164 static void scrub_sector_put(struct scrub_sector *sector)
2165 {
2166 if (atomic_dec_and_test(§or->refs))
2167 kfree(sector);
2168 }
2169
2170 /*
2171 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
2172 * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
2173 */
scrub_throttle(struct scrub_ctx * sctx)2174 static void scrub_throttle(struct scrub_ctx *sctx)
2175 {
2176 const int time_slice = 1000;
2177 struct scrub_bio *sbio;
2178 struct btrfs_device *device;
2179 s64 delta;
2180 ktime_t now;
2181 u32 div;
2182 u64 bwlimit;
2183
2184 sbio = sctx->bios[sctx->curr];
2185 device = sbio->dev;
2186 bwlimit = READ_ONCE(device->scrub_speed_max);
2187 if (bwlimit == 0)
2188 return;
2189
2190 /*
2191 * Slice is divided into intervals when the IO is submitted, adjust by
2192 * bwlimit and maximum of 64 intervals.
2193 */
2194 div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
2195 div = min_t(u32, 64, div);
2196
2197 /* Start new epoch, set deadline */
2198 now = ktime_get();
2199 if (sctx->throttle_deadline == 0) {
2200 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
2201 sctx->throttle_sent = 0;
2202 }
2203
2204 /* Still in the time to send? */
2205 if (ktime_before(now, sctx->throttle_deadline)) {
2206 /* If current bio is within the limit, send it */
2207 sctx->throttle_sent += sbio->bio->bi_iter.bi_size;
2208 if (sctx->throttle_sent <= div_u64(bwlimit, div))
2209 return;
2210
2211 /* We're over the limit, sleep until the rest of the slice */
2212 delta = ktime_ms_delta(sctx->throttle_deadline, now);
2213 } else {
2214 /* New request after deadline, start new epoch */
2215 delta = 0;
2216 }
2217
2218 if (delta) {
2219 long timeout;
2220
2221 timeout = div_u64(delta * HZ, 1000);
2222 schedule_timeout_interruptible(timeout);
2223 }
2224
2225 /* Next call will start the deadline period */
2226 sctx->throttle_deadline = 0;
2227 }
2228
scrub_submit(struct scrub_ctx * sctx)2229 static void scrub_submit(struct scrub_ctx *sctx)
2230 {
2231 struct scrub_bio *sbio;
2232
2233 if (sctx->curr == -1)
2234 return;
2235
2236 scrub_throttle(sctx);
2237
2238 sbio = sctx->bios[sctx->curr];
2239 sctx->curr = -1;
2240 scrub_pending_bio_inc(sctx);
2241 btrfsic_check_bio(sbio->bio);
2242 submit_bio(sbio->bio);
2243 }
2244
scrub_add_sector_to_rd_bio(struct scrub_ctx * sctx,struct scrub_sector * sector)2245 static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx,
2246 struct scrub_sector *sector)
2247 {
2248 struct scrub_block *sblock = sector->sblock;
2249 struct scrub_bio *sbio;
2250 const u32 sectorsize = sctx->fs_info->sectorsize;
2251 int ret;
2252
2253 again:
2254 /*
2255 * grab a fresh bio or wait for one to become available
2256 */
2257 while (sctx->curr == -1) {
2258 spin_lock(&sctx->list_lock);
2259 sctx->curr = sctx->first_free;
2260 if (sctx->curr != -1) {
2261 sctx->first_free = sctx->bios[sctx->curr]->next_free;
2262 sctx->bios[sctx->curr]->next_free = -1;
2263 sctx->bios[sctx->curr]->sector_count = 0;
2264 spin_unlock(&sctx->list_lock);
2265 } else {
2266 spin_unlock(&sctx->list_lock);
2267 wait_event(sctx->list_wait, sctx->first_free != -1);
2268 }
2269 }
2270 sbio = sctx->bios[sctx->curr];
2271 if (sbio->sector_count == 0) {
2272 sbio->physical = sblock->physical + sector->offset;
2273 sbio->logical = sblock->logical + sector->offset;
2274 sbio->dev = sblock->dev;
2275 if (!sbio->bio) {
2276 sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio,
2277 REQ_OP_READ, GFP_NOFS);
2278 }
2279 sbio->bio->bi_private = sbio;
2280 sbio->bio->bi_end_io = scrub_bio_end_io;
2281 sbio->bio->bi_iter.bi_sector = sbio->physical >> 9;
2282 sbio->status = 0;
2283 } else if (sbio->physical + sbio->sector_count * sectorsize !=
2284 sblock->physical + sector->offset ||
2285 sbio->logical + sbio->sector_count * sectorsize !=
2286 sblock->logical + sector->offset ||
2287 sbio->dev != sblock->dev) {
2288 scrub_submit(sctx);
2289 goto again;
2290 }
2291
2292 sbio->sectors[sbio->sector_count] = sector;
2293 ret = bio_add_scrub_sector(sbio->bio, sector, sectorsize);
2294 if (ret != sectorsize) {
2295 if (sbio->sector_count < 1) {
2296 bio_put(sbio->bio);
2297 sbio->bio = NULL;
2298 return -EIO;
2299 }
2300 scrub_submit(sctx);
2301 goto again;
2302 }
2303
2304 scrub_block_get(sblock); /* one for the page added to the bio */
2305 atomic_inc(&sblock->outstanding_sectors);
2306 sbio->sector_count++;
2307 if (sbio->sector_count == sctx->sectors_per_bio)
2308 scrub_submit(sctx);
2309
2310 return 0;
2311 }
2312
scrub_missing_raid56_end_io(struct bio * bio)2313 static void scrub_missing_raid56_end_io(struct bio *bio)
2314 {
2315 struct scrub_block *sblock = bio->bi_private;
2316 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2317
2318 btrfs_bio_counter_dec(fs_info);
2319 if (bio->bi_status)
2320 sblock->no_io_error_seen = 0;
2321
2322 bio_put(bio);
2323
2324 queue_work(fs_info->scrub_workers, &sblock->work);
2325 }
2326
scrub_missing_raid56_worker(struct work_struct * work)2327 static void scrub_missing_raid56_worker(struct work_struct *work)
2328 {
2329 struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2330 struct scrub_ctx *sctx = sblock->sctx;
2331 struct btrfs_fs_info *fs_info = sctx->fs_info;
2332 u64 logical;
2333 struct btrfs_device *dev;
2334
2335 logical = sblock->logical;
2336 dev = sblock->dev;
2337
2338 if (sblock->no_io_error_seen)
2339 scrub_recheck_block_checksum(sblock);
2340
2341 if (!sblock->no_io_error_seen) {
2342 spin_lock(&sctx->stat_lock);
2343 sctx->stat.read_errors++;
2344 spin_unlock(&sctx->stat_lock);
2345 btrfs_err_rl_in_rcu(fs_info,
2346 "IO error rebuilding logical %llu for dev %s",
2347 logical, rcu_str_deref(dev->name));
2348 } else if (sblock->header_error || sblock->checksum_error) {
2349 spin_lock(&sctx->stat_lock);
2350 sctx->stat.uncorrectable_errors++;
2351 spin_unlock(&sctx->stat_lock);
2352 btrfs_err_rl_in_rcu(fs_info,
2353 "failed to rebuild valid logical %llu for dev %s",
2354 logical, rcu_str_deref(dev->name));
2355 } else {
2356 scrub_write_block_to_dev_replace(sblock);
2357 }
2358
2359 if (sctx->is_dev_replace && sctx->flush_all_writes) {
2360 mutex_lock(&sctx->wr_lock);
2361 scrub_wr_submit(sctx);
2362 mutex_unlock(&sctx->wr_lock);
2363 }
2364
2365 scrub_block_put(sblock);
2366 scrub_pending_bio_dec(sctx);
2367 }
2368
scrub_missing_raid56_pages(struct scrub_block * sblock)2369 static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2370 {
2371 struct scrub_ctx *sctx = sblock->sctx;
2372 struct btrfs_fs_info *fs_info = sctx->fs_info;
2373 u64 length = sblock->sector_count << fs_info->sectorsize_bits;
2374 u64 logical = sblock->logical;
2375 struct btrfs_io_context *bioc = NULL;
2376 struct bio *bio;
2377 struct btrfs_raid_bio *rbio;
2378 int ret;
2379 int i;
2380
2381 btrfs_bio_counter_inc_blocked(fs_info);
2382 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2383 &length, &bioc);
2384 if (ret || !bioc || !bioc->raid_map)
2385 goto bioc_out;
2386
2387 if (WARN_ON(!sctx->is_dev_replace ||
2388 !(bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2389 /*
2390 * We shouldn't be scrubbing a missing device. Even for dev
2391 * replace, we should only get here for RAID 5/6. We either
2392 * managed to mount something with no mirrors remaining or
2393 * there's a bug in scrub_find_good_copy()/btrfs_map_block().
2394 */
2395 goto bioc_out;
2396 }
2397
2398 bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
2399 bio->bi_iter.bi_sector = logical >> 9;
2400 bio->bi_private = sblock;
2401 bio->bi_end_io = scrub_missing_raid56_end_io;
2402
2403 rbio = raid56_alloc_missing_rbio(bio, bioc);
2404 if (!rbio)
2405 goto rbio_out;
2406
2407 for (i = 0; i < sblock->sector_count; i++) {
2408 struct scrub_sector *sector = sblock->sectors[i];
2409
2410 raid56_add_scrub_pages(rbio, scrub_sector_get_page(sector),
2411 scrub_sector_get_page_offset(sector),
2412 sector->offset + sector->sblock->logical);
2413 }
2414
2415 INIT_WORK(&sblock->work, scrub_missing_raid56_worker);
2416 scrub_block_get(sblock);
2417 scrub_pending_bio_inc(sctx);
2418 raid56_submit_missing_rbio(rbio);
2419 btrfs_put_bioc(bioc);
2420 return;
2421
2422 rbio_out:
2423 bio_put(bio);
2424 bioc_out:
2425 btrfs_bio_counter_dec(fs_info);
2426 btrfs_put_bioc(bioc);
2427 spin_lock(&sctx->stat_lock);
2428 sctx->stat.malloc_errors++;
2429 spin_unlock(&sctx->stat_lock);
2430 }
2431
scrub_sectors(struct scrub_ctx * sctx,u64 logical,u32 len,u64 physical,struct btrfs_device * dev,u64 flags,u64 gen,int mirror_num,u8 * csum,u64 physical_for_dev_replace)2432 static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
2433 u64 physical, struct btrfs_device *dev, u64 flags,
2434 u64 gen, int mirror_num, u8 *csum,
2435 u64 physical_for_dev_replace)
2436 {
2437 struct scrub_block *sblock;
2438 const u32 sectorsize = sctx->fs_info->sectorsize;
2439 int index;
2440
2441 sblock = alloc_scrub_block(sctx, dev, logical, physical,
2442 physical_for_dev_replace, mirror_num);
2443 if (!sblock) {
2444 spin_lock(&sctx->stat_lock);
2445 sctx->stat.malloc_errors++;
2446 spin_unlock(&sctx->stat_lock);
2447 return -ENOMEM;
2448 }
2449
2450 for (index = 0; len > 0; index++) {
2451 struct scrub_sector *sector;
2452 /*
2453 * Here we will allocate one page for one sector to scrub.
2454 * This is fine if PAGE_SIZE == sectorsize, but will cost
2455 * more memory for PAGE_SIZE > sectorsize case.
2456 */
2457 u32 l = min(sectorsize, len);
2458
2459 sector = alloc_scrub_sector(sblock, logical, GFP_KERNEL);
2460 if (!sector) {
2461 spin_lock(&sctx->stat_lock);
2462 sctx->stat.malloc_errors++;
2463 spin_unlock(&sctx->stat_lock);
2464 scrub_block_put(sblock);
2465 return -ENOMEM;
2466 }
2467 sector->flags = flags;
2468 sector->generation = gen;
2469 if (csum) {
2470 sector->have_csum = 1;
2471 memcpy(sector->csum, csum, sctx->fs_info->csum_size);
2472 } else {
2473 sector->have_csum = 0;
2474 }
2475 len -= l;
2476 logical += l;
2477 physical += l;
2478 physical_for_dev_replace += l;
2479 }
2480
2481 WARN_ON(sblock->sector_count == 0);
2482 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2483 /*
2484 * This case should only be hit for RAID 5/6 device replace. See
2485 * the comment in scrub_missing_raid56_pages() for details.
2486 */
2487 scrub_missing_raid56_pages(sblock);
2488 } else {
2489 for (index = 0; index < sblock->sector_count; index++) {
2490 struct scrub_sector *sector = sblock->sectors[index];
2491 int ret;
2492
2493 ret = scrub_add_sector_to_rd_bio(sctx, sector);
2494 if (ret) {
2495 scrub_block_put(sblock);
2496 return ret;
2497 }
2498 }
2499
2500 if (flags & BTRFS_EXTENT_FLAG_SUPER)
2501 scrub_submit(sctx);
2502 }
2503
2504 /* last one frees, either here or in bio completion for last page */
2505 scrub_block_put(sblock);
2506 return 0;
2507 }
2508
scrub_bio_end_io(struct bio * bio)2509 static void scrub_bio_end_io(struct bio *bio)
2510 {
2511 struct scrub_bio *sbio = bio->bi_private;
2512 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2513
2514 sbio->status = bio->bi_status;
2515 sbio->bio = bio;
2516
2517 queue_work(fs_info->scrub_workers, &sbio->work);
2518 }
2519
scrub_bio_end_io_worker(struct work_struct * work)2520 static void scrub_bio_end_io_worker(struct work_struct *work)
2521 {
2522 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2523 struct scrub_ctx *sctx = sbio->sctx;
2524 int i;
2525
2526 ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO);
2527 if (sbio->status) {
2528 for (i = 0; i < sbio->sector_count; i++) {
2529 struct scrub_sector *sector = sbio->sectors[i];
2530
2531 sector->io_error = 1;
2532 sector->sblock->no_io_error_seen = 0;
2533 }
2534 }
2535
2536 /* Now complete the scrub_block items that have all pages completed */
2537 for (i = 0; i < sbio->sector_count; i++) {
2538 struct scrub_sector *sector = sbio->sectors[i];
2539 struct scrub_block *sblock = sector->sblock;
2540
2541 if (atomic_dec_and_test(&sblock->outstanding_sectors))
2542 scrub_block_complete(sblock);
2543 scrub_block_put(sblock);
2544 }
2545
2546 bio_put(sbio->bio);
2547 sbio->bio = NULL;
2548 spin_lock(&sctx->list_lock);
2549 sbio->next_free = sctx->first_free;
2550 sctx->first_free = sbio->index;
2551 spin_unlock(&sctx->list_lock);
2552
2553 if (sctx->is_dev_replace && sctx->flush_all_writes) {
2554 mutex_lock(&sctx->wr_lock);
2555 scrub_wr_submit(sctx);
2556 mutex_unlock(&sctx->wr_lock);
2557 }
2558
2559 scrub_pending_bio_dec(sctx);
2560 }
2561
__scrub_mark_bitmap(struct scrub_parity * sparity,unsigned long * bitmap,u64 start,u32 len)2562 static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2563 unsigned long *bitmap,
2564 u64 start, u32 len)
2565 {
2566 u64 offset;
2567 u32 nsectors;
2568 u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits;
2569
2570 if (len >= sparity->stripe_len) {
2571 bitmap_set(bitmap, 0, sparity->nsectors);
2572 return;
2573 }
2574
2575 start -= sparity->logic_start;
2576 start = div64_u64_rem(start, sparity->stripe_len, &offset);
2577 offset = offset >> sectorsize_bits;
2578 nsectors = len >> sectorsize_bits;
2579
2580 if (offset + nsectors <= sparity->nsectors) {
2581 bitmap_set(bitmap, offset, nsectors);
2582 return;
2583 }
2584
2585 bitmap_set(bitmap, offset, sparity->nsectors - offset);
2586 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2587 }
2588
scrub_parity_mark_sectors_error(struct scrub_parity * sparity,u64 start,u32 len)2589 static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2590 u64 start, u32 len)
2591 {
2592 __scrub_mark_bitmap(sparity, &sparity->ebitmap, start, len);
2593 }
2594
scrub_parity_mark_sectors_data(struct scrub_parity * sparity,u64 start,u32 len)2595 static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2596 u64 start, u32 len)
2597 {
2598 __scrub_mark_bitmap(sparity, &sparity->dbitmap, start, len);
2599 }
2600
scrub_block_complete(struct scrub_block * sblock)2601 static void scrub_block_complete(struct scrub_block *sblock)
2602 {
2603 int corrupted = 0;
2604
2605 if (!sblock->no_io_error_seen) {
2606 corrupted = 1;
2607 scrub_handle_errored_block(sblock);
2608 } else {
2609 /*
2610 * if has checksum error, write via repair mechanism in
2611 * dev replace case, otherwise write here in dev replace
2612 * case.
2613 */
2614 corrupted = scrub_checksum(sblock);
2615 if (!corrupted && sblock->sctx->is_dev_replace)
2616 scrub_write_block_to_dev_replace(sblock);
2617 }
2618
2619 if (sblock->sparity && corrupted && !sblock->data_corrected) {
2620 u64 start = sblock->logical;
2621 u64 end = sblock->logical +
2622 sblock->sectors[sblock->sector_count - 1]->offset +
2623 sblock->sctx->fs_info->sectorsize;
2624
2625 ASSERT(end - start <= U32_MAX);
2626 scrub_parity_mark_sectors_error(sblock->sparity,
2627 start, end - start);
2628 }
2629 }
2630
drop_csum_range(struct scrub_ctx * sctx,struct btrfs_ordered_sum * sum)2631 static void drop_csum_range(struct scrub_ctx *sctx, struct btrfs_ordered_sum *sum)
2632 {
2633 sctx->stat.csum_discards += sum->len >> sctx->fs_info->sectorsize_bits;
2634 list_del(&sum->list);
2635 kfree(sum);
2636 }
2637
2638 /*
2639 * Find the desired csum for range [logical, logical + sectorsize), and store
2640 * the csum into @csum.
2641 *
2642 * The search source is sctx->csum_list, which is a pre-populated list
2643 * storing bytenr ordered csum ranges. We're responsible to cleanup any range
2644 * that is before @logical.
2645 *
2646 * Return 0 if there is no csum for the range.
2647 * Return 1 if there is csum for the range and copied to @csum.
2648 */
scrub_find_csum(struct scrub_ctx * sctx,u64 logical,u8 * csum)2649 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
2650 {
2651 bool found = false;
2652
2653 while (!list_empty(&sctx->csum_list)) {
2654 struct btrfs_ordered_sum *sum = NULL;
2655 unsigned long index;
2656 unsigned long num_sectors;
2657
2658 sum = list_first_entry(&sctx->csum_list,
2659 struct btrfs_ordered_sum, list);
2660 /* The current csum range is beyond our range, no csum found */
2661 if (sum->bytenr > logical)
2662 break;
2663
2664 /*
2665 * The current sum is before our bytenr, since scrub is always
2666 * done in bytenr order, the csum will never be used anymore,
2667 * clean it up so that later calls won't bother with the range,
2668 * and continue search the next range.
2669 */
2670 if (sum->bytenr + sum->len <= logical) {
2671 drop_csum_range(sctx, sum);
2672 continue;
2673 }
2674
2675 /* Now the csum range covers our bytenr, copy the csum */
2676 found = true;
2677 index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits;
2678 num_sectors = sum->len >> sctx->fs_info->sectorsize_bits;
2679
2680 memcpy(csum, sum->sums + index * sctx->fs_info->csum_size,
2681 sctx->fs_info->csum_size);
2682
2683 /* Cleanup the range if we're at the end of the csum range */
2684 if (index == num_sectors - 1)
2685 drop_csum_range(sctx, sum);
2686 break;
2687 }
2688 if (!found)
2689 return 0;
2690 return 1;
2691 }
2692
2693 /* scrub extent tries to collect up to 64 kB for each bio */
scrub_extent(struct scrub_ctx * sctx,struct map_lookup * map,u64 logical,u32 len,u64 physical,struct btrfs_device * dev,u64 flags,u64 gen,int mirror_num)2694 static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
2695 u64 logical, u32 len,
2696 u64 physical, struct btrfs_device *dev, u64 flags,
2697 u64 gen, int mirror_num)
2698 {
2699 struct btrfs_device *src_dev = dev;
2700 u64 src_physical = physical;
2701 int src_mirror = mirror_num;
2702 int ret;
2703 u8 csum[BTRFS_CSUM_SIZE];
2704 u32 blocksize;
2705
2706 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2707 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2708 blocksize = map->stripe_len;
2709 else
2710 blocksize = sctx->fs_info->sectorsize;
2711 spin_lock(&sctx->stat_lock);
2712 sctx->stat.data_extents_scrubbed++;
2713 sctx->stat.data_bytes_scrubbed += len;
2714 spin_unlock(&sctx->stat_lock);
2715 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2716 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2717 blocksize = map->stripe_len;
2718 else
2719 blocksize = sctx->fs_info->nodesize;
2720 spin_lock(&sctx->stat_lock);
2721 sctx->stat.tree_extents_scrubbed++;
2722 sctx->stat.tree_bytes_scrubbed += len;
2723 spin_unlock(&sctx->stat_lock);
2724 } else {
2725 blocksize = sctx->fs_info->sectorsize;
2726 WARN_ON(1);
2727 }
2728
2729 /*
2730 * For dev-replace case, we can have @dev being a missing device.
2731 * Regular scrub will avoid its execution on missing device at all,
2732 * as that would trigger tons of read error.
2733 *
2734 * Reading from missing device will cause read error counts to
2735 * increase unnecessarily.
2736 * So here we change the read source to a good mirror.
2737 */
2738 if (sctx->is_dev_replace && !dev->bdev)
2739 scrub_find_good_copy(sctx->fs_info, logical, len, &src_physical,
2740 &src_dev, &src_mirror);
2741 while (len) {
2742 u32 l = min(len, blocksize);
2743 int have_csum = 0;
2744
2745 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2746 /* push csums to sbio */
2747 have_csum = scrub_find_csum(sctx, logical, csum);
2748 if (have_csum == 0)
2749 ++sctx->stat.no_csum;
2750 }
2751 ret = scrub_sectors(sctx, logical, l, src_physical, src_dev,
2752 flags, gen, src_mirror,
2753 have_csum ? csum : NULL, physical);
2754 if (ret)
2755 return ret;
2756 len -= l;
2757 logical += l;
2758 physical += l;
2759 src_physical += l;
2760 }
2761 return 0;
2762 }
2763
scrub_sectors_for_parity(struct scrub_parity * sparity,u64 logical,u32 len,u64 physical,struct btrfs_device * dev,u64 flags,u64 gen,int mirror_num,u8 * csum)2764 static int scrub_sectors_for_parity(struct scrub_parity *sparity,
2765 u64 logical, u32 len,
2766 u64 physical, struct btrfs_device *dev,
2767 u64 flags, u64 gen, int mirror_num, u8 *csum)
2768 {
2769 struct scrub_ctx *sctx = sparity->sctx;
2770 struct scrub_block *sblock;
2771 const u32 sectorsize = sctx->fs_info->sectorsize;
2772 int index;
2773
2774 ASSERT(IS_ALIGNED(len, sectorsize));
2775
2776 sblock = alloc_scrub_block(sctx, dev, logical, physical, physical, mirror_num);
2777 if (!sblock) {
2778 spin_lock(&sctx->stat_lock);
2779 sctx->stat.malloc_errors++;
2780 spin_unlock(&sctx->stat_lock);
2781 return -ENOMEM;
2782 }
2783
2784 sblock->sparity = sparity;
2785 scrub_parity_get(sparity);
2786
2787 for (index = 0; len > 0; index++) {
2788 struct scrub_sector *sector;
2789
2790 sector = alloc_scrub_sector(sblock, logical, GFP_KERNEL);
2791 if (!sector) {
2792 spin_lock(&sctx->stat_lock);
2793 sctx->stat.malloc_errors++;
2794 spin_unlock(&sctx->stat_lock);
2795 scrub_block_put(sblock);
2796 return -ENOMEM;
2797 }
2798 sblock->sectors[index] = sector;
2799 /* For scrub parity */
2800 scrub_sector_get(sector);
2801 list_add_tail(§or->list, &sparity->sectors_list);
2802 sector->flags = flags;
2803 sector->generation = gen;
2804 if (csum) {
2805 sector->have_csum = 1;
2806 memcpy(sector->csum, csum, sctx->fs_info->csum_size);
2807 } else {
2808 sector->have_csum = 0;
2809 }
2810
2811 /* Iterate over the stripe range in sectorsize steps */
2812 len -= sectorsize;
2813 logical += sectorsize;
2814 physical += sectorsize;
2815 }
2816
2817 WARN_ON(sblock->sector_count == 0);
2818 for (index = 0; index < sblock->sector_count; index++) {
2819 struct scrub_sector *sector = sblock->sectors[index];
2820 int ret;
2821
2822 ret = scrub_add_sector_to_rd_bio(sctx, sector);
2823 if (ret) {
2824 scrub_block_put(sblock);
2825 return ret;
2826 }
2827 }
2828
2829 /* Last one frees, either here or in bio completion for last sector */
2830 scrub_block_put(sblock);
2831 return 0;
2832 }
2833
scrub_extent_for_parity(struct scrub_parity * sparity,u64 logical,u32 len,u64 physical,struct btrfs_device * dev,u64 flags,u64 gen,int mirror_num)2834 static int scrub_extent_for_parity(struct scrub_parity *sparity,
2835 u64 logical, u32 len,
2836 u64 physical, struct btrfs_device *dev,
2837 u64 flags, u64 gen, int mirror_num)
2838 {
2839 struct scrub_ctx *sctx = sparity->sctx;
2840 int ret;
2841 u8 csum[BTRFS_CSUM_SIZE];
2842 u32 blocksize;
2843
2844 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2845 scrub_parity_mark_sectors_error(sparity, logical, len);
2846 return 0;
2847 }
2848
2849 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2850 blocksize = sparity->stripe_len;
2851 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2852 blocksize = sparity->stripe_len;
2853 } else {
2854 blocksize = sctx->fs_info->sectorsize;
2855 WARN_ON(1);
2856 }
2857
2858 while (len) {
2859 u32 l = min(len, blocksize);
2860 int have_csum = 0;
2861
2862 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2863 /* push csums to sbio */
2864 have_csum = scrub_find_csum(sctx, logical, csum);
2865 if (have_csum == 0)
2866 goto skip;
2867 }
2868 ret = scrub_sectors_for_parity(sparity, logical, l, physical, dev,
2869 flags, gen, mirror_num,
2870 have_csum ? csum : NULL);
2871 if (ret)
2872 return ret;
2873 skip:
2874 len -= l;
2875 logical += l;
2876 physical += l;
2877 }
2878 return 0;
2879 }
2880
2881 /*
2882 * Given a physical address, this will calculate it's
2883 * logical offset. if this is a parity stripe, it will return
2884 * the most left data stripe's logical offset.
2885 *
2886 * return 0 if it is a data stripe, 1 means parity stripe.
2887 */
get_raid56_logic_offset(u64 physical,int num,struct map_lookup * map,u64 * offset,u64 * stripe_start)2888 static int get_raid56_logic_offset(u64 physical, int num,
2889 struct map_lookup *map, u64 *offset,
2890 u64 *stripe_start)
2891 {
2892 int i;
2893 int j = 0;
2894 u64 stripe_nr;
2895 u64 last_offset;
2896 u32 stripe_index;
2897 u32 rot;
2898 const int data_stripes = nr_data_stripes(map);
2899
2900 last_offset = (physical - map->stripes[num].physical) * data_stripes;
2901 if (stripe_start)
2902 *stripe_start = last_offset;
2903
2904 *offset = last_offset;
2905 for (i = 0; i < data_stripes; i++) {
2906 *offset = last_offset + i * map->stripe_len;
2907
2908 stripe_nr = div64_u64(*offset, map->stripe_len);
2909 stripe_nr = div_u64(stripe_nr, data_stripes);
2910
2911 /* Work out the disk rotation on this stripe-set */
2912 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2913 /* calculate which stripe this data locates */
2914 rot += i;
2915 stripe_index = rot % map->num_stripes;
2916 if (stripe_index == num)
2917 return 0;
2918 if (stripe_index < num)
2919 j++;
2920 }
2921 *offset = last_offset + j * map->stripe_len;
2922 return 1;
2923 }
2924
scrub_free_parity(struct scrub_parity * sparity)2925 static void scrub_free_parity(struct scrub_parity *sparity)
2926 {
2927 struct scrub_ctx *sctx = sparity->sctx;
2928 struct scrub_sector *curr, *next;
2929 int nbits;
2930
2931 nbits = bitmap_weight(&sparity->ebitmap, sparity->nsectors);
2932 if (nbits) {
2933 spin_lock(&sctx->stat_lock);
2934 sctx->stat.read_errors += nbits;
2935 sctx->stat.uncorrectable_errors += nbits;
2936 spin_unlock(&sctx->stat_lock);
2937 }
2938
2939 list_for_each_entry_safe(curr, next, &sparity->sectors_list, list) {
2940 list_del_init(&curr->list);
2941 scrub_sector_put(curr);
2942 }
2943
2944 kfree(sparity);
2945 }
2946
scrub_parity_bio_endio_worker(struct work_struct * work)2947 static void scrub_parity_bio_endio_worker(struct work_struct *work)
2948 {
2949 struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2950 work);
2951 struct scrub_ctx *sctx = sparity->sctx;
2952
2953 btrfs_bio_counter_dec(sctx->fs_info);
2954 scrub_free_parity(sparity);
2955 scrub_pending_bio_dec(sctx);
2956 }
2957
scrub_parity_bio_endio(struct bio * bio)2958 static void scrub_parity_bio_endio(struct bio *bio)
2959 {
2960 struct scrub_parity *sparity = bio->bi_private;
2961 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
2962
2963 if (bio->bi_status)
2964 bitmap_or(&sparity->ebitmap, &sparity->ebitmap,
2965 &sparity->dbitmap, sparity->nsectors);
2966
2967 bio_put(bio);
2968
2969 INIT_WORK(&sparity->work, scrub_parity_bio_endio_worker);
2970 queue_work(fs_info->scrub_parity_workers, &sparity->work);
2971 }
2972
scrub_parity_check_and_repair(struct scrub_parity * sparity)2973 static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2974 {
2975 struct scrub_ctx *sctx = sparity->sctx;
2976 struct btrfs_fs_info *fs_info = sctx->fs_info;
2977 struct bio *bio;
2978 struct btrfs_raid_bio *rbio;
2979 struct btrfs_io_context *bioc = NULL;
2980 u64 length;
2981 int ret;
2982
2983 if (!bitmap_andnot(&sparity->dbitmap, &sparity->dbitmap,
2984 &sparity->ebitmap, sparity->nsectors))
2985 goto out;
2986
2987 length = sparity->logic_end - sparity->logic_start;
2988
2989 btrfs_bio_counter_inc_blocked(fs_info);
2990 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
2991 &length, &bioc);
2992 if (ret || !bioc || !bioc->raid_map)
2993 goto bioc_out;
2994
2995 bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
2996 bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2997 bio->bi_private = sparity;
2998 bio->bi_end_io = scrub_parity_bio_endio;
2999
3000 rbio = raid56_parity_alloc_scrub_rbio(bio, bioc,
3001 sparity->scrub_dev,
3002 &sparity->dbitmap,
3003 sparity->nsectors);
3004 btrfs_put_bioc(bioc);
3005 if (!rbio)
3006 goto rbio_out;
3007
3008 scrub_pending_bio_inc(sctx);
3009 raid56_parity_submit_scrub_rbio(rbio);
3010 return;
3011
3012 rbio_out:
3013 bio_put(bio);
3014 bioc_out:
3015 btrfs_bio_counter_dec(fs_info);
3016 bitmap_or(&sparity->ebitmap, &sparity->ebitmap, &sparity->dbitmap,
3017 sparity->nsectors);
3018 spin_lock(&sctx->stat_lock);
3019 sctx->stat.malloc_errors++;
3020 spin_unlock(&sctx->stat_lock);
3021 out:
3022 scrub_free_parity(sparity);
3023 }
3024
scrub_parity_get(struct scrub_parity * sparity)3025 static void scrub_parity_get(struct scrub_parity *sparity)
3026 {
3027 refcount_inc(&sparity->refs);
3028 }
3029
scrub_parity_put(struct scrub_parity * sparity)3030 static void scrub_parity_put(struct scrub_parity *sparity)
3031 {
3032 if (!refcount_dec_and_test(&sparity->refs))
3033 return;
3034
3035 scrub_parity_check_and_repair(sparity);
3036 }
3037
3038 /*
3039 * Return 0 if the extent item range covers any byte of the range.
3040 * Return <0 if the extent item is before @search_start.
3041 * Return >0 if the extent item is after @start_start + @search_len.
3042 */
compare_extent_item_range(struct btrfs_path * path,u64 search_start,u64 search_len)3043 static int compare_extent_item_range(struct btrfs_path *path,
3044 u64 search_start, u64 search_len)
3045 {
3046 struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
3047 u64 len;
3048 struct btrfs_key key;
3049
3050 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3051 ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
3052 key.type == BTRFS_METADATA_ITEM_KEY);
3053 if (key.type == BTRFS_METADATA_ITEM_KEY)
3054 len = fs_info->nodesize;
3055 else
3056 len = key.offset;
3057
3058 if (key.objectid + len <= search_start)
3059 return -1;
3060 if (key.objectid >= search_start + search_len)
3061 return 1;
3062 return 0;
3063 }
3064
3065 /*
3066 * Locate one extent item which covers any byte in range
3067 * [@search_start, @search_start + @search_length)
3068 *
3069 * If the path is not initialized, we will initialize the search by doing
3070 * a btrfs_search_slot().
3071 * If the path is already initialized, we will use the path as the initial
3072 * slot, to avoid duplicated btrfs_search_slot() calls.
3073 *
3074 * NOTE: If an extent item starts before @search_start, we will still
3075 * return the extent item. This is for data extent crossing stripe boundary.
3076 *
3077 * Return 0 if we found such extent item, and @path will point to the extent item.
3078 * Return >0 if no such extent item can be found, and @path will be released.
3079 * Return <0 if hit fatal error, and @path will be released.
3080 */
find_first_extent_item(struct btrfs_root * extent_root,struct btrfs_path * path,u64 search_start,u64 search_len)3081 static int find_first_extent_item(struct btrfs_root *extent_root,
3082 struct btrfs_path *path,
3083 u64 search_start, u64 search_len)
3084 {
3085 struct btrfs_fs_info *fs_info = extent_root->fs_info;
3086 struct btrfs_key key;
3087 int ret;
3088
3089 /* Continue using the existing path */
3090 if (path->nodes[0])
3091 goto search_forward;
3092
3093 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3094 key.type = BTRFS_METADATA_ITEM_KEY;
3095 else
3096 key.type = BTRFS_EXTENT_ITEM_KEY;
3097 key.objectid = search_start;
3098 key.offset = (u64)-1;
3099
3100 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3101 if (ret < 0)
3102 return ret;
3103
3104 ASSERT(ret > 0);
3105 /*
3106 * Here we intentionally pass 0 as @min_objectid, as there could be
3107 * an extent item starting before @search_start.
3108 */
3109 ret = btrfs_previous_extent_item(extent_root, path, 0);
3110 if (ret < 0)
3111 return ret;
3112 /*
3113 * No matter whether we have found an extent item, the next loop will
3114 * properly do every check on the key.
3115 */
3116 search_forward:
3117 while (true) {
3118 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3119 if (key.objectid >= search_start + search_len)
3120 break;
3121 if (key.type != BTRFS_METADATA_ITEM_KEY &&
3122 key.type != BTRFS_EXTENT_ITEM_KEY)
3123 goto next;
3124
3125 ret = compare_extent_item_range(path, search_start, search_len);
3126 if (ret == 0)
3127 return ret;
3128 if (ret > 0)
3129 break;
3130 next:
3131 path->slots[0]++;
3132 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
3133 ret = btrfs_next_leaf(extent_root, path);
3134 if (ret) {
3135 /* Either no more item or fatal error */
3136 btrfs_release_path(path);
3137 return ret;
3138 }
3139 }
3140 }
3141 btrfs_release_path(path);
3142 return 1;
3143 }
3144
get_extent_info(struct btrfs_path * path,u64 * extent_start_ret,u64 * size_ret,u64 * flags_ret,u64 * generation_ret)3145 static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
3146 u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
3147 {
3148 struct btrfs_key key;
3149 struct btrfs_extent_item *ei;
3150
3151 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3152 ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
3153 key.type == BTRFS_EXTENT_ITEM_KEY);
3154 *extent_start_ret = key.objectid;
3155 if (key.type == BTRFS_METADATA_ITEM_KEY)
3156 *size_ret = path->nodes[0]->fs_info->nodesize;
3157 else
3158 *size_ret = key.offset;
3159 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
3160 *flags_ret = btrfs_extent_flags(path->nodes[0], ei);
3161 *generation_ret = btrfs_extent_generation(path->nodes[0], ei);
3162 }
3163
does_range_cross_boundary(u64 extent_start,u64 extent_len,u64 boundary_start,u64 boudary_len)3164 static bool does_range_cross_boundary(u64 extent_start, u64 extent_len,
3165 u64 boundary_start, u64 boudary_len)
3166 {
3167 return (extent_start < boundary_start &&
3168 extent_start + extent_len > boundary_start) ||
3169 (extent_start < boundary_start + boudary_len &&
3170 extent_start + extent_len > boundary_start + boudary_len);
3171 }
3172
scrub_raid56_data_stripe_for_parity(struct scrub_ctx * sctx,struct scrub_parity * sparity,struct map_lookup * map,struct btrfs_device * sdev,struct btrfs_path * path,u64 logical)3173 static int scrub_raid56_data_stripe_for_parity(struct scrub_ctx *sctx,
3174 struct scrub_parity *sparity,
3175 struct map_lookup *map,
3176 struct btrfs_device *sdev,
3177 struct btrfs_path *path,
3178 u64 logical)
3179 {
3180 struct btrfs_fs_info *fs_info = sctx->fs_info;
3181 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
3182 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, logical);
3183 u64 cur_logical = logical;
3184 int ret;
3185
3186 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
3187
3188 /* Path must not be populated */
3189 ASSERT(!path->nodes[0]);
3190
3191 while (cur_logical < logical + map->stripe_len) {
3192 struct btrfs_io_context *bioc = NULL;
3193 struct btrfs_device *extent_dev;
3194 u64 extent_start;
3195 u64 extent_size;
3196 u64 mapped_length;
3197 u64 extent_flags;
3198 u64 extent_gen;
3199 u64 extent_physical;
3200 u64 extent_mirror_num;
3201
3202 ret = find_first_extent_item(extent_root, path, cur_logical,
3203 logical + map->stripe_len - cur_logical);
3204 /* No more extent item in this data stripe */
3205 if (ret > 0) {
3206 ret = 0;
3207 break;
3208 }
3209 if (ret < 0)
3210 break;
3211 get_extent_info(path, &extent_start, &extent_size, &extent_flags,
3212 &extent_gen);
3213
3214 /* Metadata should not cross stripe boundaries */
3215 if ((extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3216 does_range_cross_boundary(extent_start, extent_size,
3217 logical, map->stripe_len)) {
3218 btrfs_err(fs_info,
3219 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3220 extent_start, logical);
3221 spin_lock(&sctx->stat_lock);
3222 sctx->stat.uncorrectable_errors++;
3223 spin_unlock(&sctx->stat_lock);
3224 cur_logical += extent_size;
3225 continue;
3226 }
3227
3228 /* Skip hole range which doesn't have any extent */
3229 cur_logical = max(extent_start, cur_logical);
3230
3231 /* Truncate the range inside this data stripe */
3232 extent_size = min(extent_start + extent_size,
3233 logical + map->stripe_len) - cur_logical;
3234 extent_start = cur_logical;
3235 ASSERT(extent_size <= U32_MAX);
3236
3237 scrub_parity_mark_sectors_data(sparity, extent_start, extent_size);
3238
3239 mapped_length = extent_size;
3240 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_start,
3241 &mapped_length, &bioc, 0);
3242 if (!ret && (!bioc || mapped_length < extent_size))
3243 ret = -EIO;
3244 if (ret) {
3245 btrfs_put_bioc(bioc);
3246 scrub_parity_mark_sectors_error(sparity, extent_start,
3247 extent_size);
3248 break;
3249 }
3250 extent_physical = bioc->stripes[0].physical;
3251 extent_mirror_num = bioc->mirror_num;
3252 extent_dev = bioc->stripes[0].dev;
3253 btrfs_put_bioc(bioc);
3254
3255 ret = btrfs_lookup_csums_range(csum_root, extent_start,
3256 extent_start + extent_size - 1,
3257 &sctx->csum_list, 1, false);
3258 if (ret) {
3259 scrub_parity_mark_sectors_error(sparity, extent_start,
3260 extent_size);
3261 break;
3262 }
3263
3264 ret = scrub_extent_for_parity(sparity, extent_start,
3265 extent_size, extent_physical,
3266 extent_dev, extent_flags,
3267 extent_gen, extent_mirror_num);
3268 scrub_free_csums(sctx);
3269
3270 if (ret) {
3271 scrub_parity_mark_sectors_error(sparity, extent_start,
3272 extent_size);
3273 break;
3274 }
3275
3276 cond_resched();
3277 cur_logical += extent_size;
3278 }
3279 btrfs_release_path(path);
3280 return ret;
3281 }
3282
scrub_raid56_parity(struct scrub_ctx * sctx,struct map_lookup * map,struct btrfs_device * sdev,u64 logic_start,u64 logic_end)3283 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
3284 struct map_lookup *map,
3285 struct btrfs_device *sdev,
3286 u64 logic_start,
3287 u64 logic_end)
3288 {
3289 struct btrfs_fs_info *fs_info = sctx->fs_info;
3290 struct btrfs_path *path;
3291 u64 cur_logical;
3292 int ret;
3293 struct scrub_parity *sparity;
3294 int nsectors;
3295
3296 path = btrfs_alloc_path();
3297 if (!path) {
3298 spin_lock(&sctx->stat_lock);
3299 sctx->stat.malloc_errors++;
3300 spin_unlock(&sctx->stat_lock);
3301 return -ENOMEM;
3302 }
3303 path->search_commit_root = 1;
3304 path->skip_locking = 1;
3305
3306 ASSERT(map->stripe_len <= U32_MAX);
3307 nsectors = map->stripe_len >> fs_info->sectorsize_bits;
3308 ASSERT(nsectors <= BITS_PER_LONG);
3309 sparity = kzalloc(sizeof(struct scrub_parity), GFP_NOFS);
3310 if (!sparity) {
3311 spin_lock(&sctx->stat_lock);
3312 sctx->stat.malloc_errors++;
3313 spin_unlock(&sctx->stat_lock);
3314 btrfs_free_path(path);
3315 return -ENOMEM;
3316 }
3317
3318 ASSERT(map->stripe_len <= U32_MAX);
3319 sparity->stripe_len = map->stripe_len;
3320 sparity->nsectors = nsectors;
3321 sparity->sctx = sctx;
3322 sparity->scrub_dev = sdev;
3323 sparity->logic_start = logic_start;
3324 sparity->logic_end = logic_end;
3325 refcount_set(&sparity->refs, 1);
3326 INIT_LIST_HEAD(&sparity->sectors_list);
3327
3328 ret = 0;
3329 for (cur_logical = logic_start; cur_logical < logic_end;
3330 cur_logical += map->stripe_len) {
3331 ret = scrub_raid56_data_stripe_for_parity(sctx, sparity, map,
3332 sdev, path, cur_logical);
3333 if (ret < 0)
3334 break;
3335 }
3336
3337 scrub_parity_put(sparity);
3338 scrub_submit(sctx);
3339 mutex_lock(&sctx->wr_lock);
3340 scrub_wr_submit(sctx);
3341 mutex_unlock(&sctx->wr_lock);
3342
3343 btrfs_free_path(path);
3344 return ret < 0 ? ret : 0;
3345 }
3346
sync_replace_for_zoned(struct scrub_ctx * sctx)3347 static void sync_replace_for_zoned(struct scrub_ctx *sctx)
3348 {
3349 if (!btrfs_is_zoned(sctx->fs_info))
3350 return;
3351
3352 sctx->flush_all_writes = true;
3353 scrub_submit(sctx);
3354 mutex_lock(&sctx->wr_lock);
3355 scrub_wr_submit(sctx);
3356 mutex_unlock(&sctx->wr_lock);
3357
3358 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3359 }
3360
sync_write_pointer_for_zoned(struct scrub_ctx * sctx,u64 logical,u64 physical,u64 physical_end)3361 static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
3362 u64 physical, u64 physical_end)
3363 {
3364 struct btrfs_fs_info *fs_info = sctx->fs_info;
3365 int ret = 0;
3366
3367 if (!btrfs_is_zoned(fs_info))
3368 return 0;
3369
3370 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3371
3372 mutex_lock(&sctx->wr_lock);
3373 if (sctx->write_pointer < physical_end) {
3374 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
3375 physical,
3376 sctx->write_pointer);
3377 if (ret)
3378 btrfs_err(fs_info,
3379 "zoned: failed to recover write pointer");
3380 }
3381 mutex_unlock(&sctx->wr_lock);
3382 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
3383
3384 return ret;
3385 }
3386
3387 /*
3388 * Scrub one range which can only has simple mirror based profile.
3389 * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
3390 * RAID0/RAID10).
3391 *
3392 * Since we may need to handle a subset of block group, we need @logical_start
3393 * and @logical_length parameter.
3394 */
scrub_simple_mirror(struct scrub_ctx * sctx,struct btrfs_root * extent_root,struct btrfs_root * csum_root,struct btrfs_block_group * bg,struct map_lookup * map,u64 logical_start,u64 logical_length,struct btrfs_device * device,u64 physical,int mirror_num)3395 static int scrub_simple_mirror(struct scrub_ctx *sctx,
3396 struct btrfs_root *extent_root,
3397 struct btrfs_root *csum_root,
3398 struct btrfs_block_group *bg,
3399 struct map_lookup *map,
3400 u64 logical_start, u64 logical_length,
3401 struct btrfs_device *device,
3402 u64 physical, int mirror_num)
3403 {
3404 struct btrfs_fs_info *fs_info = sctx->fs_info;
3405 const u64 logical_end = logical_start + logical_length;
3406 /* An artificial limit, inherit from old scrub behavior */
3407 const u32 max_length = SZ_64K;
3408 struct btrfs_path path = { 0 };
3409 u64 cur_logical = logical_start;
3410 int ret;
3411
3412 /* The range must be inside the bg */
3413 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
3414
3415 path.search_commit_root = 1;
3416 path.skip_locking = 1;
3417 /* Go through each extent items inside the logical range */
3418 while (cur_logical < logical_end) {
3419 u64 extent_start;
3420 u64 extent_len;
3421 u64 extent_flags;
3422 u64 extent_gen;
3423 u64 scrub_len;
3424
3425 /* Canceled? */
3426 if (atomic_read(&fs_info->scrub_cancel_req) ||
3427 atomic_read(&sctx->cancel_req)) {
3428 ret = -ECANCELED;
3429 break;
3430 }
3431 /* Paused? */
3432 if (atomic_read(&fs_info->scrub_pause_req)) {
3433 /* Push queued extents */
3434 sctx->flush_all_writes = true;
3435 scrub_submit(sctx);
3436 mutex_lock(&sctx->wr_lock);
3437 scrub_wr_submit(sctx);
3438 mutex_unlock(&sctx->wr_lock);
3439 wait_event(sctx->list_wait,
3440 atomic_read(&sctx->bios_in_flight) == 0);
3441 sctx->flush_all_writes = false;
3442 scrub_blocked_if_needed(fs_info);
3443 }
3444 /* Block group removed? */
3445 spin_lock(&bg->lock);
3446 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
3447 spin_unlock(&bg->lock);
3448 ret = 0;
3449 break;
3450 }
3451 spin_unlock(&bg->lock);
3452
3453 ret = find_first_extent_item(extent_root, &path, cur_logical,
3454 logical_end - cur_logical);
3455 if (ret > 0) {
3456 /* No more extent, just update the accounting */
3457 sctx->stat.last_physical = physical + logical_length;
3458 ret = 0;
3459 break;
3460 }
3461 if (ret < 0)
3462 break;
3463 get_extent_info(&path, &extent_start, &extent_len,
3464 &extent_flags, &extent_gen);
3465 /* Skip hole range which doesn't have any extent */
3466 cur_logical = max(extent_start, cur_logical);
3467
3468 /*
3469 * Scrub len has three limits:
3470 * - Extent size limit
3471 * - Scrub range limit
3472 * This is especially imporatant for RAID0/RAID10 to reuse
3473 * this function
3474 * - Max scrub size limit
3475 */
3476 scrub_len = min(min(extent_start + extent_len,
3477 logical_end), cur_logical + max_length) -
3478 cur_logical;
3479
3480 if (extent_flags & BTRFS_EXTENT_FLAG_DATA) {
3481 ret = btrfs_lookup_csums_range(csum_root, cur_logical,
3482 cur_logical + scrub_len - 1,
3483 &sctx->csum_list, 1, false);
3484 if (ret)
3485 break;
3486 }
3487 if ((extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3488 does_range_cross_boundary(extent_start, extent_len,
3489 logical_start, logical_length)) {
3490 btrfs_err(fs_info,
3491 "scrub: tree block %llu spanning boundaries, ignored. boundary=[%llu, %llu)",
3492 extent_start, logical_start, logical_end);
3493 spin_lock(&sctx->stat_lock);
3494 sctx->stat.uncorrectable_errors++;
3495 spin_unlock(&sctx->stat_lock);
3496 cur_logical += scrub_len;
3497 continue;
3498 }
3499 ret = scrub_extent(sctx, map, cur_logical, scrub_len,
3500 cur_logical - logical_start + physical,
3501 device, extent_flags, extent_gen,
3502 mirror_num);
3503 scrub_free_csums(sctx);
3504 if (ret)
3505 break;
3506 if (sctx->is_dev_replace)
3507 sync_replace_for_zoned(sctx);
3508 cur_logical += scrub_len;
3509 /* Don't hold CPU for too long time */
3510 cond_resched();
3511 }
3512 btrfs_release_path(&path);
3513 return ret;
3514 }
3515
3516 /* Calculate the full stripe length for simple stripe based profiles */
simple_stripe_full_stripe_len(const struct map_lookup * map)3517 static u64 simple_stripe_full_stripe_len(const struct map_lookup *map)
3518 {
3519 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3520 BTRFS_BLOCK_GROUP_RAID10));
3521
3522 return map->num_stripes / map->sub_stripes * map->stripe_len;
3523 }
3524
3525 /* Get the logical bytenr for the stripe */
simple_stripe_get_logical(struct map_lookup * map,struct btrfs_block_group * bg,int stripe_index)3526 static u64 simple_stripe_get_logical(struct map_lookup *map,
3527 struct btrfs_block_group *bg,
3528 int stripe_index)
3529 {
3530 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3531 BTRFS_BLOCK_GROUP_RAID10));
3532 ASSERT(stripe_index < map->num_stripes);
3533
3534 /*
3535 * (stripe_index / sub_stripes) gives how many data stripes we need to
3536 * skip.
3537 */
3538 return (stripe_index / map->sub_stripes) * map->stripe_len + bg->start;
3539 }
3540
3541 /* Get the mirror number for the stripe */
simple_stripe_mirror_num(struct map_lookup * map,int stripe_index)3542 static int simple_stripe_mirror_num(struct map_lookup *map, int stripe_index)
3543 {
3544 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3545 BTRFS_BLOCK_GROUP_RAID10));
3546 ASSERT(stripe_index < map->num_stripes);
3547
3548 /* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
3549 return stripe_index % map->sub_stripes + 1;
3550 }
3551
scrub_simple_stripe(struct scrub_ctx * sctx,struct btrfs_root * extent_root,struct btrfs_root * csum_root,struct btrfs_block_group * bg,struct map_lookup * map,struct btrfs_device * device,int stripe_index)3552 static int scrub_simple_stripe(struct scrub_ctx *sctx,
3553 struct btrfs_root *extent_root,
3554 struct btrfs_root *csum_root,
3555 struct btrfs_block_group *bg,
3556 struct map_lookup *map,
3557 struct btrfs_device *device,
3558 int stripe_index)
3559 {
3560 const u64 logical_increment = simple_stripe_full_stripe_len(map);
3561 const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
3562 const u64 orig_physical = map->stripes[stripe_index].physical;
3563 const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
3564 u64 cur_logical = orig_logical;
3565 u64 cur_physical = orig_physical;
3566 int ret = 0;
3567
3568 while (cur_logical < bg->start + bg->length) {
3569 /*
3570 * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
3571 * just RAID1, so we can reuse scrub_simple_mirror() to scrub
3572 * this stripe.
3573 */
3574 ret = scrub_simple_mirror(sctx, extent_root, csum_root, bg, map,
3575 cur_logical, map->stripe_len, device,
3576 cur_physical, mirror_num);
3577 if (ret)
3578 return ret;
3579 /* Skip to next stripe which belongs to the target device */
3580 cur_logical += logical_increment;
3581 /* For physical offset, we just go to next stripe */
3582 cur_physical += map->stripe_len;
3583 }
3584 return ret;
3585 }
3586
scrub_stripe(struct scrub_ctx * sctx,struct btrfs_block_group * bg,struct extent_map * em,struct btrfs_device * scrub_dev,int stripe_index)3587 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3588 struct btrfs_block_group *bg,
3589 struct extent_map *em,
3590 struct btrfs_device *scrub_dev,
3591 int stripe_index)
3592 {
3593 struct btrfs_path *path;
3594 struct btrfs_fs_info *fs_info = sctx->fs_info;
3595 struct btrfs_root *root;
3596 struct btrfs_root *csum_root;
3597 struct blk_plug plug;
3598 struct map_lookup *map = em->map_lookup;
3599 const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
3600 const u64 chunk_logical = bg->start;
3601 int ret;
3602 u64 physical = map->stripes[stripe_index].physical;
3603 const u64 dev_stripe_len = btrfs_calc_stripe_length(em);
3604 const u64 physical_end = physical + dev_stripe_len;
3605 u64 logical;
3606 u64 logic_end;
3607 /* The logical increment after finishing one stripe */
3608 u64 increment;
3609 /* Offset inside the chunk */
3610 u64 offset;
3611 u64 stripe_logical;
3612 u64 stripe_end;
3613 int stop_loop = 0;
3614
3615 path = btrfs_alloc_path();
3616 if (!path)
3617 return -ENOMEM;
3618
3619 /*
3620 * work on commit root. The related disk blocks are static as
3621 * long as COW is applied. This means, it is save to rewrite
3622 * them to repair disk errors without any race conditions
3623 */
3624 path->search_commit_root = 1;
3625 path->skip_locking = 1;
3626 path->reada = READA_FORWARD;
3627
3628 wait_event(sctx->list_wait,
3629 atomic_read(&sctx->bios_in_flight) == 0);
3630 scrub_blocked_if_needed(fs_info);
3631
3632 root = btrfs_extent_root(fs_info, bg->start);
3633 csum_root = btrfs_csum_root(fs_info, bg->start);
3634
3635 /*
3636 * collect all data csums for the stripe to avoid seeking during
3637 * the scrub. This might currently (crc32) end up to be about 1MB
3638 */
3639 blk_start_plug(&plug);
3640
3641 if (sctx->is_dev_replace &&
3642 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
3643 mutex_lock(&sctx->wr_lock);
3644 sctx->write_pointer = physical;
3645 mutex_unlock(&sctx->wr_lock);
3646 sctx->flush_all_writes = true;
3647 }
3648
3649 /*
3650 * There used to be a big double loop to handle all profiles using the
3651 * same routine, which grows larger and more gross over time.
3652 *
3653 * So here we handle each profile differently, so simpler profiles
3654 * have simpler scrubbing function.
3655 */
3656 if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
3657 BTRFS_BLOCK_GROUP_RAID56_MASK))) {
3658 /*
3659 * Above check rules out all complex profile, the remaining
3660 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
3661 * mirrored duplication without stripe.
3662 *
3663 * Only @physical and @mirror_num needs to calculated using
3664 * @stripe_index.
3665 */
3666 ret = scrub_simple_mirror(sctx, root, csum_root, bg, map,
3667 bg->start, bg->length, scrub_dev,
3668 map->stripes[stripe_index].physical,
3669 stripe_index + 1);
3670 offset = 0;
3671 goto out;
3672 }
3673 if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3674 ret = scrub_simple_stripe(sctx, root, csum_root, bg, map,
3675 scrub_dev, stripe_index);
3676 offset = map->stripe_len * (stripe_index / map->sub_stripes);
3677 goto out;
3678 }
3679
3680 /* Only RAID56 goes through the old code */
3681 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
3682 ret = 0;
3683
3684 /* Calculate the logical end of the stripe */
3685 get_raid56_logic_offset(physical_end, stripe_index,
3686 map, &logic_end, NULL);
3687 logic_end += chunk_logical;
3688
3689 /* Initialize @offset in case we need to go to out: label */
3690 get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
3691 increment = map->stripe_len * nr_data_stripes(map);
3692
3693 /*
3694 * Due to the rotation, for RAID56 it's better to iterate each stripe
3695 * using their physical offset.
3696 */
3697 while (physical < physical_end) {
3698 ret = get_raid56_logic_offset(physical, stripe_index, map,
3699 &logical, &stripe_logical);
3700 logical += chunk_logical;
3701 if (ret) {
3702 /* it is parity strip */
3703 stripe_logical += chunk_logical;
3704 stripe_end = stripe_logical + increment;
3705 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3706 stripe_logical,
3707 stripe_end);
3708 if (ret)
3709 goto out;
3710 goto next;
3711 }
3712
3713 /*
3714 * Now we're at a data stripe, scrub each extents in the range.
3715 *
3716 * At this stage, if we ignore the repair part, inside each data
3717 * stripe it is no different than SINGLE profile.
3718 * We can reuse scrub_simple_mirror() here, as the repair part
3719 * is still based on @mirror_num.
3720 */
3721 ret = scrub_simple_mirror(sctx, root, csum_root, bg, map,
3722 logical, map->stripe_len,
3723 scrub_dev, physical, 1);
3724 if (ret < 0)
3725 goto out;
3726 next:
3727 logical += increment;
3728 physical += map->stripe_len;
3729 spin_lock(&sctx->stat_lock);
3730 if (stop_loop)
3731 sctx->stat.last_physical =
3732 map->stripes[stripe_index].physical + dev_stripe_len;
3733 else
3734 sctx->stat.last_physical = physical;
3735 spin_unlock(&sctx->stat_lock);
3736 if (stop_loop)
3737 break;
3738 }
3739 out:
3740 /* push queued extents */
3741 scrub_submit(sctx);
3742 mutex_lock(&sctx->wr_lock);
3743 scrub_wr_submit(sctx);
3744 mutex_unlock(&sctx->wr_lock);
3745
3746 blk_finish_plug(&plug);
3747 btrfs_free_path(path);
3748
3749 if (sctx->is_dev_replace && ret >= 0) {
3750 int ret2;
3751
3752 ret2 = sync_write_pointer_for_zoned(sctx,
3753 chunk_logical + offset,
3754 map->stripes[stripe_index].physical,
3755 physical_end);
3756 if (ret2)
3757 ret = ret2;
3758 }
3759
3760 return ret < 0 ? ret : 0;
3761 }
3762
scrub_chunk(struct scrub_ctx * sctx,struct btrfs_block_group * bg,struct btrfs_device * scrub_dev,u64 dev_offset,u64 dev_extent_len)3763 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3764 struct btrfs_block_group *bg,
3765 struct btrfs_device *scrub_dev,
3766 u64 dev_offset,
3767 u64 dev_extent_len)
3768 {
3769 struct btrfs_fs_info *fs_info = sctx->fs_info;
3770 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
3771 struct map_lookup *map;
3772 struct extent_map *em;
3773 int i;
3774 int ret = 0;
3775
3776 read_lock(&map_tree->lock);
3777 em = lookup_extent_mapping(map_tree, bg->start, bg->length);
3778 read_unlock(&map_tree->lock);
3779
3780 if (!em) {
3781 /*
3782 * Might have been an unused block group deleted by the cleaner
3783 * kthread or relocation.
3784 */
3785 spin_lock(&bg->lock);
3786 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
3787 ret = -EINVAL;
3788 spin_unlock(&bg->lock);
3789
3790 return ret;
3791 }
3792 if (em->start != bg->start)
3793 goto out;
3794 if (em->len < dev_extent_len)
3795 goto out;
3796
3797 map = em->map_lookup;
3798 for (i = 0; i < map->num_stripes; ++i) {
3799 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3800 map->stripes[i].physical == dev_offset) {
3801 ret = scrub_stripe(sctx, bg, em, scrub_dev, i);
3802 if (ret)
3803 goto out;
3804 }
3805 }
3806 out:
3807 free_extent_map(em);
3808
3809 return ret;
3810 }
3811
finish_extent_writes_for_zoned(struct btrfs_root * root,struct btrfs_block_group * cache)3812 static int finish_extent_writes_for_zoned(struct btrfs_root *root,
3813 struct btrfs_block_group *cache)
3814 {
3815 struct btrfs_fs_info *fs_info = cache->fs_info;
3816 struct btrfs_trans_handle *trans;
3817
3818 if (!btrfs_is_zoned(fs_info))
3819 return 0;
3820
3821 btrfs_wait_block_group_reservations(cache);
3822 btrfs_wait_nocow_writers(cache);
3823 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
3824
3825 trans = btrfs_join_transaction(root);
3826 if (IS_ERR(trans))
3827 return PTR_ERR(trans);
3828 return btrfs_commit_transaction(trans);
3829 }
3830
3831 static noinline_for_stack
scrub_enumerate_chunks(struct scrub_ctx * sctx,struct btrfs_device * scrub_dev,u64 start,u64 end)3832 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3833 struct btrfs_device *scrub_dev, u64 start, u64 end)
3834 {
3835 struct btrfs_dev_extent *dev_extent = NULL;
3836 struct btrfs_path *path;
3837 struct btrfs_fs_info *fs_info = sctx->fs_info;
3838 struct btrfs_root *root = fs_info->dev_root;
3839 u64 chunk_offset;
3840 int ret = 0;
3841 int ro_set;
3842 int slot;
3843 struct extent_buffer *l;
3844 struct btrfs_key key;
3845 struct btrfs_key found_key;
3846 struct btrfs_block_group *cache;
3847 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3848
3849 path = btrfs_alloc_path();
3850 if (!path)
3851 return -ENOMEM;
3852
3853 path->reada = READA_FORWARD;
3854 path->search_commit_root = 1;
3855 path->skip_locking = 1;
3856
3857 key.objectid = scrub_dev->devid;
3858 key.offset = 0ull;
3859 key.type = BTRFS_DEV_EXTENT_KEY;
3860
3861 while (1) {
3862 u64 dev_extent_len;
3863
3864 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3865 if (ret < 0)
3866 break;
3867 if (ret > 0) {
3868 if (path->slots[0] >=
3869 btrfs_header_nritems(path->nodes[0])) {
3870 ret = btrfs_next_leaf(root, path);
3871 if (ret < 0)
3872 break;
3873 if (ret > 0) {
3874 ret = 0;
3875 break;
3876 }
3877 } else {
3878 ret = 0;
3879 }
3880 }
3881
3882 l = path->nodes[0];
3883 slot = path->slots[0];
3884
3885 btrfs_item_key_to_cpu(l, &found_key, slot);
3886
3887 if (found_key.objectid != scrub_dev->devid)
3888 break;
3889
3890 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3891 break;
3892
3893 if (found_key.offset >= end)
3894 break;
3895
3896 if (found_key.offset < key.offset)
3897 break;
3898
3899 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3900 dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
3901
3902 if (found_key.offset + dev_extent_len <= start)
3903 goto skip;
3904
3905 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3906
3907 /*
3908 * get a reference on the corresponding block group to prevent
3909 * the chunk from going away while we scrub it
3910 */
3911 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3912
3913 /* some chunks are removed but not committed to disk yet,
3914 * continue scrubbing */
3915 if (!cache)
3916 goto skip;
3917
3918 ASSERT(cache->start <= chunk_offset);
3919 /*
3920 * We are using the commit root to search for device extents, so
3921 * that means we could have found a device extent item from a
3922 * block group that was deleted in the current transaction. The
3923 * logical start offset of the deleted block group, stored at
3924 * @chunk_offset, might be part of the logical address range of
3925 * a new block group (which uses different physical extents).
3926 * In this case btrfs_lookup_block_group() has returned the new
3927 * block group, and its start address is less than @chunk_offset.
3928 *
3929 * We skip such new block groups, because it's pointless to
3930 * process them, as we won't find their extents because we search
3931 * for them using the commit root of the extent tree. For a device
3932 * replace it's also fine to skip it, we won't miss copying them
3933 * to the target device because we have the write duplication
3934 * setup through the regular write path (by btrfs_map_block()),
3935 * and we have committed a transaction when we started the device
3936 * replace, right after setting up the device replace state.
3937 */
3938 if (cache->start < chunk_offset) {
3939 btrfs_put_block_group(cache);
3940 goto skip;
3941 }
3942
3943 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
3944 if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
3945 btrfs_put_block_group(cache);
3946 goto skip;
3947 }
3948 }
3949
3950 /*
3951 * Make sure that while we are scrubbing the corresponding block
3952 * group doesn't get its logical address and its device extents
3953 * reused for another block group, which can possibly be of a
3954 * different type and different profile. We do this to prevent
3955 * false error detections and crashes due to bogus attempts to
3956 * repair extents.
3957 */
3958 spin_lock(&cache->lock);
3959 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
3960 spin_unlock(&cache->lock);
3961 btrfs_put_block_group(cache);
3962 goto skip;
3963 }
3964 btrfs_freeze_block_group(cache);
3965 spin_unlock(&cache->lock);
3966
3967 /*
3968 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3969 * to avoid deadlock caused by:
3970 * btrfs_inc_block_group_ro()
3971 * -> btrfs_wait_for_commit()
3972 * -> btrfs_commit_transaction()
3973 * -> btrfs_scrub_pause()
3974 */
3975 scrub_pause_on(fs_info);
3976
3977 /*
3978 * Don't do chunk preallocation for scrub.
3979 *
3980 * This is especially important for SYSTEM bgs, or we can hit
3981 * -EFBIG from btrfs_finish_chunk_alloc() like:
3982 * 1. The only SYSTEM bg is marked RO.
3983 * Since SYSTEM bg is small, that's pretty common.
3984 * 2. New SYSTEM bg will be allocated
3985 * Due to regular version will allocate new chunk.
3986 * 3. New SYSTEM bg is empty and will get cleaned up
3987 * Before cleanup really happens, it's marked RO again.
3988 * 4. Empty SYSTEM bg get scrubbed
3989 * We go back to 2.
3990 *
3991 * This can easily boost the amount of SYSTEM chunks if cleaner
3992 * thread can't be triggered fast enough, and use up all space
3993 * of btrfs_super_block::sys_chunk_array
3994 *
3995 * While for dev replace, we need to try our best to mark block
3996 * group RO, to prevent race between:
3997 * - Write duplication
3998 * Contains latest data
3999 * - Scrub copy
4000 * Contains data from commit tree
4001 *
4002 * If target block group is not marked RO, nocow writes can
4003 * be overwritten by scrub copy, causing data corruption.
4004 * So for dev-replace, it's not allowed to continue if a block
4005 * group is not RO.
4006 */
4007 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
4008 if (!ret && sctx->is_dev_replace) {
4009 ret = finish_extent_writes_for_zoned(root, cache);
4010 if (ret) {
4011 btrfs_dec_block_group_ro(cache);
4012 scrub_pause_off(fs_info);
4013 btrfs_put_block_group(cache);
4014 break;
4015 }
4016 }
4017
4018 if (ret == 0) {
4019 ro_set = 1;
4020 } else if (ret == -ENOSPC && !sctx->is_dev_replace &&
4021 !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) {
4022 /*
4023 * btrfs_inc_block_group_ro return -ENOSPC when it
4024 * failed in creating new chunk for metadata.
4025 * It is not a problem for scrub, because
4026 * metadata are always cowed, and our scrub paused
4027 * commit_transactions.
4028 *
4029 * For RAID56 chunks, we have to mark them read-only
4030 * for scrub, as later we would use our own cache
4031 * out of RAID56 realm.
4032 * Thus we want the RAID56 bg to be marked RO to
4033 * prevent RMW from screwing up out cache.
4034 */
4035 ro_set = 0;
4036 } else if (ret == -ETXTBSY) {
4037 btrfs_warn(fs_info,
4038 "skipping scrub of block group %llu due to active swapfile",
4039 cache->start);
4040 scrub_pause_off(fs_info);
4041 ret = 0;
4042 goto skip_unfreeze;
4043 } else {
4044 btrfs_warn(fs_info,
4045 "failed setting block group ro: %d", ret);
4046 btrfs_unfreeze_block_group(cache);
4047 btrfs_put_block_group(cache);
4048 scrub_pause_off(fs_info);
4049 break;
4050 }
4051
4052 /*
4053 * Now the target block is marked RO, wait for nocow writes to
4054 * finish before dev-replace.
4055 * COW is fine, as COW never overwrites extents in commit tree.
4056 */
4057 if (sctx->is_dev_replace) {
4058 btrfs_wait_nocow_writers(cache);
4059 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
4060 cache->length);
4061 }
4062
4063 scrub_pause_off(fs_info);
4064 down_write(&dev_replace->rwsem);
4065 dev_replace->cursor_right = found_key.offset + dev_extent_len;
4066 dev_replace->cursor_left = found_key.offset;
4067 dev_replace->item_needs_writeback = 1;
4068 up_write(&dev_replace->rwsem);
4069
4070 ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
4071 dev_extent_len);
4072
4073 /*
4074 * flush, submit all pending read and write bios, afterwards
4075 * wait for them.
4076 * Note that in the dev replace case, a read request causes
4077 * write requests that are submitted in the read completion
4078 * worker. Therefore in the current situation, it is required
4079 * that all write requests are flushed, so that all read and
4080 * write requests are really completed when bios_in_flight
4081 * changes to 0.
4082 */
4083 sctx->flush_all_writes = true;
4084 scrub_submit(sctx);
4085 mutex_lock(&sctx->wr_lock);
4086 scrub_wr_submit(sctx);
4087 mutex_unlock(&sctx->wr_lock);
4088
4089 wait_event(sctx->list_wait,
4090 atomic_read(&sctx->bios_in_flight) == 0);
4091
4092 scrub_pause_on(fs_info);
4093
4094 /*
4095 * must be called before we decrease @scrub_paused.
4096 * make sure we don't block transaction commit while
4097 * we are waiting pending workers finished.
4098 */
4099 wait_event(sctx->list_wait,
4100 atomic_read(&sctx->workers_pending) == 0);
4101 sctx->flush_all_writes = false;
4102
4103 scrub_pause_off(fs_info);
4104
4105 if (sctx->is_dev_replace &&
4106 !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
4107 cache, found_key.offset))
4108 ro_set = 0;
4109
4110 down_write(&dev_replace->rwsem);
4111 dev_replace->cursor_left = dev_replace->cursor_right;
4112 dev_replace->item_needs_writeback = 1;
4113 up_write(&dev_replace->rwsem);
4114
4115 if (ro_set)
4116 btrfs_dec_block_group_ro(cache);
4117
4118 /*
4119 * We might have prevented the cleaner kthread from deleting
4120 * this block group if it was already unused because we raced
4121 * and set it to RO mode first. So add it back to the unused
4122 * list, otherwise it might not ever be deleted unless a manual
4123 * balance is triggered or it becomes used and unused again.
4124 */
4125 spin_lock(&cache->lock);
4126 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
4127 !cache->ro && cache->reserved == 0 && cache->used == 0) {
4128 spin_unlock(&cache->lock);
4129 if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
4130 btrfs_discard_queue_work(&fs_info->discard_ctl,
4131 cache);
4132 else
4133 btrfs_mark_bg_unused(cache);
4134 } else {
4135 spin_unlock(&cache->lock);
4136 }
4137 skip_unfreeze:
4138 btrfs_unfreeze_block_group(cache);
4139 btrfs_put_block_group(cache);
4140 if (ret)
4141 break;
4142 if (sctx->is_dev_replace &&
4143 atomic64_read(&dev_replace->num_write_errors) > 0) {
4144 ret = -EIO;
4145 break;
4146 }
4147 if (sctx->stat.malloc_errors > 0) {
4148 ret = -ENOMEM;
4149 break;
4150 }
4151 skip:
4152 key.offset = found_key.offset + dev_extent_len;
4153 btrfs_release_path(path);
4154 }
4155
4156 btrfs_free_path(path);
4157
4158 return ret;
4159 }
4160
scrub_supers(struct scrub_ctx * sctx,struct btrfs_device * scrub_dev)4161 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
4162 struct btrfs_device *scrub_dev)
4163 {
4164 int i;
4165 u64 bytenr;
4166 u64 gen;
4167 int ret;
4168 struct btrfs_fs_info *fs_info = sctx->fs_info;
4169
4170 if (BTRFS_FS_ERROR(fs_info))
4171 return -EROFS;
4172
4173 /* Seed devices of a new filesystem has their own generation. */
4174 if (scrub_dev->fs_devices != fs_info->fs_devices)
4175 gen = scrub_dev->generation;
4176 else
4177 gen = fs_info->last_trans_committed;
4178
4179 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
4180 bytenr = btrfs_sb_offset(i);
4181 if (bytenr + BTRFS_SUPER_INFO_SIZE >
4182 scrub_dev->commit_total_bytes)
4183 break;
4184 if (!btrfs_check_super_location(scrub_dev, bytenr))
4185 continue;
4186
4187 ret = scrub_sectors(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
4188 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
4189 NULL, bytenr);
4190 if (ret)
4191 return ret;
4192 }
4193 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
4194
4195 return 0;
4196 }
4197
scrub_workers_put(struct btrfs_fs_info * fs_info)4198 static void scrub_workers_put(struct btrfs_fs_info *fs_info)
4199 {
4200 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
4201 &fs_info->scrub_lock)) {
4202 struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
4203 struct workqueue_struct *scrub_wr_comp =
4204 fs_info->scrub_wr_completion_workers;
4205 struct workqueue_struct *scrub_parity =
4206 fs_info->scrub_parity_workers;
4207
4208 fs_info->scrub_workers = NULL;
4209 fs_info->scrub_wr_completion_workers = NULL;
4210 fs_info->scrub_parity_workers = NULL;
4211 mutex_unlock(&fs_info->scrub_lock);
4212
4213 if (scrub_workers)
4214 destroy_workqueue(scrub_workers);
4215 if (scrub_wr_comp)
4216 destroy_workqueue(scrub_wr_comp);
4217 if (scrub_parity)
4218 destroy_workqueue(scrub_parity);
4219 }
4220 }
4221
4222 /*
4223 * get a reference count on fs_info->scrub_workers. start worker if necessary
4224 */
scrub_workers_get(struct btrfs_fs_info * fs_info,int is_dev_replace)4225 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
4226 int is_dev_replace)
4227 {
4228 struct workqueue_struct *scrub_workers = NULL;
4229 struct workqueue_struct *scrub_wr_comp = NULL;
4230 struct workqueue_struct *scrub_parity = NULL;
4231 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
4232 int max_active = fs_info->thread_pool_size;
4233 int ret = -ENOMEM;
4234
4235 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
4236 return 0;
4237
4238 scrub_workers = alloc_workqueue("btrfs-scrub", flags,
4239 is_dev_replace ? 1 : max_active);
4240 if (!scrub_workers)
4241 goto fail_scrub_workers;
4242
4243 scrub_wr_comp = alloc_workqueue("btrfs-scrubwrc", flags, max_active);
4244 if (!scrub_wr_comp)
4245 goto fail_scrub_wr_completion_workers;
4246
4247 scrub_parity = alloc_workqueue("btrfs-scrubparity", flags, max_active);
4248 if (!scrub_parity)
4249 goto fail_scrub_parity_workers;
4250
4251 mutex_lock(&fs_info->scrub_lock);
4252 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
4253 ASSERT(fs_info->scrub_workers == NULL &&
4254 fs_info->scrub_wr_completion_workers == NULL &&
4255 fs_info->scrub_parity_workers == NULL);
4256 fs_info->scrub_workers = scrub_workers;
4257 fs_info->scrub_wr_completion_workers = scrub_wr_comp;
4258 fs_info->scrub_parity_workers = scrub_parity;
4259 refcount_set(&fs_info->scrub_workers_refcnt, 1);
4260 mutex_unlock(&fs_info->scrub_lock);
4261 return 0;
4262 }
4263 /* Other thread raced in and created the workers for us */
4264 refcount_inc(&fs_info->scrub_workers_refcnt);
4265 mutex_unlock(&fs_info->scrub_lock);
4266
4267 ret = 0;
4268 destroy_workqueue(scrub_parity);
4269 fail_scrub_parity_workers:
4270 destroy_workqueue(scrub_wr_comp);
4271 fail_scrub_wr_completion_workers:
4272 destroy_workqueue(scrub_workers);
4273 fail_scrub_workers:
4274 return ret;
4275 }
4276
btrfs_scrub_dev(struct btrfs_fs_info * fs_info,u64 devid,u64 start,u64 end,struct btrfs_scrub_progress * progress,int readonly,int is_dev_replace)4277 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
4278 u64 end, struct btrfs_scrub_progress *progress,
4279 int readonly, int is_dev_replace)
4280 {
4281 struct btrfs_dev_lookup_args args = { .devid = devid };
4282 struct scrub_ctx *sctx;
4283 int ret;
4284 struct btrfs_device *dev;
4285 unsigned int nofs_flag;
4286 bool need_commit = false;
4287
4288 if (btrfs_fs_closing(fs_info))
4289 return -EAGAIN;
4290
4291 /* At mount time we have ensured nodesize is in the range of [4K, 64K]. */
4292 ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
4293
4294 /*
4295 * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible
4296 * value (max nodesize / min sectorsize), thus nodesize should always
4297 * be fine.
4298 */
4299 ASSERT(fs_info->nodesize <=
4300 SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
4301
4302 /* Allocate outside of device_list_mutex */
4303 sctx = scrub_setup_ctx(fs_info, is_dev_replace);
4304 if (IS_ERR(sctx))
4305 return PTR_ERR(sctx);
4306
4307 ret = scrub_workers_get(fs_info, is_dev_replace);
4308 if (ret)
4309 goto out_free_ctx;
4310
4311 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4312 dev = btrfs_find_device(fs_info->fs_devices, &args);
4313 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
4314 !is_dev_replace)) {
4315 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4316 ret = -ENODEV;
4317 goto out;
4318 }
4319
4320 if (!is_dev_replace && !readonly &&
4321 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
4322 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4323 btrfs_err_in_rcu(fs_info,
4324 "scrub on devid %llu: filesystem on %s is not writable",
4325 devid, rcu_str_deref(dev->name));
4326 ret = -EROFS;
4327 goto out;
4328 }
4329
4330 mutex_lock(&fs_info->scrub_lock);
4331 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4332 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
4333 mutex_unlock(&fs_info->scrub_lock);
4334 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4335 ret = -EIO;
4336 goto out;
4337 }
4338
4339 down_read(&fs_info->dev_replace.rwsem);
4340 if (dev->scrub_ctx ||
4341 (!is_dev_replace &&
4342 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
4343 up_read(&fs_info->dev_replace.rwsem);
4344 mutex_unlock(&fs_info->scrub_lock);
4345 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4346 ret = -EINPROGRESS;
4347 goto out;
4348 }
4349 up_read(&fs_info->dev_replace.rwsem);
4350
4351 sctx->readonly = readonly;
4352 dev->scrub_ctx = sctx;
4353 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4354
4355 /*
4356 * checking @scrub_pause_req here, we can avoid
4357 * race between committing transaction and scrubbing.
4358 */
4359 __scrub_blocked_if_needed(fs_info);
4360 atomic_inc(&fs_info->scrubs_running);
4361 mutex_unlock(&fs_info->scrub_lock);
4362
4363 /*
4364 * In order to avoid deadlock with reclaim when there is a transaction
4365 * trying to pause scrub, make sure we use GFP_NOFS for all the
4366 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
4367 * invoked by our callees. The pausing request is done when the
4368 * transaction commit starts, and it blocks the transaction until scrub
4369 * is paused (done at specific points at scrub_stripe() or right above
4370 * before incrementing fs_info->scrubs_running).
4371 */
4372 nofs_flag = memalloc_nofs_save();
4373 if (!is_dev_replace) {
4374 u64 old_super_errors;
4375
4376 spin_lock(&sctx->stat_lock);
4377 old_super_errors = sctx->stat.super_errors;
4378 spin_unlock(&sctx->stat_lock);
4379
4380 btrfs_info(fs_info, "scrub: started on devid %llu", devid);
4381 /*
4382 * by holding device list mutex, we can
4383 * kick off writing super in log tree sync.
4384 */
4385 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4386 ret = scrub_supers(sctx, dev);
4387 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4388
4389 spin_lock(&sctx->stat_lock);
4390 /*
4391 * Super block errors found, but we can not commit transaction
4392 * at current context, since btrfs_commit_transaction() needs
4393 * to pause the current running scrub (hold by ourselves).
4394 */
4395 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
4396 need_commit = true;
4397 spin_unlock(&sctx->stat_lock);
4398 }
4399
4400 if (!ret)
4401 ret = scrub_enumerate_chunks(sctx, dev, start, end);
4402 memalloc_nofs_restore(nofs_flag);
4403
4404 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
4405 atomic_dec(&fs_info->scrubs_running);
4406 wake_up(&fs_info->scrub_pause_wait);
4407
4408 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
4409
4410 if (progress)
4411 memcpy(progress, &sctx->stat, sizeof(*progress));
4412
4413 if (!is_dev_replace)
4414 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
4415 ret ? "not finished" : "finished", devid, ret);
4416
4417 mutex_lock(&fs_info->scrub_lock);
4418 dev->scrub_ctx = NULL;
4419 mutex_unlock(&fs_info->scrub_lock);
4420
4421 scrub_workers_put(fs_info);
4422 scrub_put_ctx(sctx);
4423
4424 /*
4425 * We found some super block errors before, now try to force a
4426 * transaction commit, as scrub has finished.
4427 */
4428 if (need_commit) {
4429 struct btrfs_trans_handle *trans;
4430
4431 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4432 if (IS_ERR(trans)) {
4433 ret = PTR_ERR(trans);
4434 btrfs_err(fs_info,
4435 "scrub: failed to start transaction to fix super block errors: %d", ret);
4436 return ret;
4437 }
4438 ret = btrfs_commit_transaction(trans);
4439 if (ret < 0)
4440 btrfs_err(fs_info,
4441 "scrub: failed to commit transaction to fix super block errors: %d", ret);
4442 }
4443 return ret;
4444 out:
4445 scrub_workers_put(fs_info);
4446 out_free_ctx:
4447 scrub_free_ctx(sctx);
4448
4449 return ret;
4450 }
4451
btrfs_scrub_pause(struct btrfs_fs_info * fs_info)4452 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
4453 {
4454 mutex_lock(&fs_info->scrub_lock);
4455 atomic_inc(&fs_info->scrub_pause_req);
4456 while (atomic_read(&fs_info->scrubs_paused) !=
4457 atomic_read(&fs_info->scrubs_running)) {
4458 mutex_unlock(&fs_info->scrub_lock);
4459 wait_event(fs_info->scrub_pause_wait,
4460 atomic_read(&fs_info->scrubs_paused) ==
4461 atomic_read(&fs_info->scrubs_running));
4462 mutex_lock(&fs_info->scrub_lock);
4463 }
4464 mutex_unlock(&fs_info->scrub_lock);
4465 }
4466
btrfs_scrub_continue(struct btrfs_fs_info * fs_info)4467 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
4468 {
4469 atomic_dec(&fs_info->scrub_pause_req);
4470 wake_up(&fs_info->scrub_pause_wait);
4471 }
4472
btrfs_scrub_cancel(struct btrfs_fs_info * fs_info)4473 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
4474 {
4475 mutex_lock(&fs_info->scrub_lock);
4476 if (!atomic_read(&fs_info->scrubs_running)) {
4477 mutex_unlock(&fs_info->scrub_lock);
4478 return -ENOTCONN;
4479 }
4480
4481 atomic_inc(&fs_info->scrub_cancel_req);
4482 while (atomic_read(&fs_info->scrubs_running)) {
4483 mutex_unlock(&fs_info->scrub_lock);
4484 wait_event(fs_info->scrub_pause_wait,
4485 atomic_read(&fs_info->scrubs_running) == 0);
4486 mutex_lock(&fs_info->scrub_lock);
4487 }
4488 atomic_dec(&fs_info->scrub_cancel_req);
4489 mutex_unlock(&fs_info->scrub_lock);
4490
4491 return 0;
4492 }
4493
btrfs_scrub_cancel_dev(struct btrfs_device * dev)4494 int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
4495 {
4496 struct btrfs_fs_info *fs_info = dev->fs_info;
4497 struct scrub_ctx *sctx;
4498
4499 mutex_lock(&fs_info->scrub_lock);
4500 sctx = dev->scrub_ctx;
4501 if (!sctx) {
4502 mutex_unlock(&fs_info->scrub_lock);
4503 return -ENOTCONN;
4504 }
4505 atomic_inc(&sctx->cancel_req);
4506 while (dev->scrub_ctx) {
4507 mutex_unlock(&fs_info->scrub_lock);
4508 wait_event(fs_info->scrub_pause_wait,
4509 dev->scrub_ctx == NULL);
4510 mutex_lock(&fs_info->scrub_lock);
4511 }
4512 mutex_unlock(&fs_info->scrub_lock);
4513
4514 return 0;
4515 }
4516
btrfs_scrub_progress(struct btrfs_fs_info * fs_info,u64 devid,struct btrfs_scrub_progress * progress)4517 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
4518 struct btrfs_scrub_progress *progress)
4519 {
4520 struct btrfs_dev_lookup_args args = { .devid = devid };
4521 struct btrfs_device *dev;
4522 struct scrub_ctx *sctx = NULL;
4523
4524 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4525 dev = btrfs_find_device(fs_info->fs_devices, &args);
4526 if (dev)
4527 sctx = dev->scrub_ctx;
4528 if (sctx)
4529 memcpy(progress, &sctx->stat, sizeof(*progress));
4530 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4531
4532 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
4533 }
4534
scrub_find_good_copy(struct btrfs_fs_info * fs_info,u64 extent_logical,u32 extent_len,u64 * extent_physical,struct btrfs_device ** extent_dev,int * extent_mirror_num)4535 static void scrub_find_good_copy(struct btrfs_fs_info *fs_info,
4536 u64 extent_logical, u32 extent_len,
4537 u64 *extent_physical,
4538 struct btrfs_device **extent_dev,
4539 int *extent_mirror_num)
4540 {
4541 u64 mapped_length;
4542 struct btrfs_io_context *bioc = NULL;
4543 int ret;
4544
4545 mapped_length = extent_len;
4546 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4547 &mapped_length, &bioc, 0);
4548 if (ret || !bioc || mapped_length < extent_len ||
4549 !bioc->stripes[0].dev->bdev) {
4550 btrfs_put_bioc(bioc);
4551 return;
4552 }
4553
4554 *extent_physical = bioc->stripes[0].physical;
4555 *extent_mirror_num = bioc->mirror_num;
4556 *extent_dev = bioc->stripes[0].dev;
4557 btrfs_put_bioc(bioc);
4558 }
4559