• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
21 #include <linux/sched/mm.h>
22 #include "ctree.h"
23 #include "volumes.h"
24 #include "disk-io.h"
25 #include "ordered-data.h"
26 #include "transaction.h"
27 #include "backref.h"
28 #include "extent_io.h"
29 #include "dev-replace.h"
30 #include "check-integrity.h"
31 #include "rcu-string.h"
32 #include "raid56.h"
33 
34 /*
35  * This is only the first step towards a full-features scrub. It reads all
36  * extent and super block and verifies the checksums. In case a bad checksum
37  * is found or the extent cannot be read, good data will be written back if
38  * any can be found.
39  *
40  * Future enhancements:
41  *  - In case an unrepairable extent is encountered, track which files are
42  *    affected and report them
43  *  - track and record media errors, throw out bad devices
44  *  - add a mode to also read unallocated space
45  */
46 
47 struct scrub_block;
48 struct scrub_ctx;
49 
50 /*
51  * the following three values only influence the performance.
52  * The last one configures the number of parallel and outstanding I/O
53  * operations. The first two values configure an upper limit for the number
54  * of (dynamically allocated) pages that are added to a bio.
55  */
56 #define SCRUB_PAGES_PER_RD_BIO	32	/* 128k per bio */
57 #define SCRUB_PAGES_PER_WR_BIO	32	/* 128k per bio */
58 #define SCRUB_BIOS_PER_SCTX	64	/* 8MB per device in flight */
59 
60 /*
61  * the following value times PAGE_SIZE needs to be large enough to match the
62  * largest node/leaf/sector size that shall be supported.
63  * Values larger than BTRFS_STRIPE_LEN are not supported.
64  */
65 #define SCRUB_MAX_PAGES_PER_BLOCK	16	/* 64k per node/leaf/sector */
66 
67 struct scrub_recover {
68 	refcount_t		refs;
69 	struct btrfs_bio	*bbio;
70 	u64			map_length;
71 };
72 
73 struct scrub_page {
74 	struct scrub_block	*sblock;
75 	struct page		*page;
76 	struct btrfs_device	*dev;
77 	struct list_head	list;
78 	u64			flags;  /* extent flags */
79 	u64			generation;
80 	u64			logical;
81 	u64			physical;
82 	u64			physical_for_dev_replace;
83 	atomic_t		refs;
84 	struct {
85 		unsigned int	mirror_num:8;
86 		unsigned int	have_csum:1;
87 		unsigned int	io_error:1;
88 	};
89 	u8			csum[BTRFS_CSUM_SIZE];
90 
91 	struct scrub_recover	*recover;
92 };
93 
94 struct scrub_bio {
95 	int			index;
96 	struct scrub_ctx	*sctx;
97 	struct btrfs_device	*dev;
98 	struct bio		*bio;
99 	blk_status_t		status;
100 	u64			logical;
101 	u64			physical;
102 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
103 	struct scrub_page	*pagev[SCRUB_PAGES_PER_WR_BIO];
104 #else
105 	struct scrub_page	*pagev[SCRUB_PAGES_PER_RD_BIO];
106 #endif
107 	int			page_count;
108 	int			next_free;
109 	struct btrfs_work	work;
110 };
111 
112 struct scrub_block {
113 	struct scrub_page	*pagev[SCRUB_MAX_PAGES_PER_BLOCK];
114 	int			page_count;
115 	atomic_t		outstanding_pages;
116 	refcount_t		refs; /* free mem on transition to zero */
117 	struct scrub_ctx	*sctx;
118 	struct scrub_parity	*sparity;
119 	struct {
120 		unsigned int	header_error:1;
121 		unsigned int	checksum_error:1;
122 		unsigned int	no_io_error_seen:1;
123 		unsigned int	generation_error:1; /* also sets header_error */
124 
125 		/* The following is for the data used to check parity */
126 		/* It is for the data with checksum */
127 		unsigned int	data_corrected:1;
128 	};
129 	struct btrfs_work	work;
130 };
131 
132 /* Used for the chunks with parity stripe such RAID5/6 */
133 struct scrub_parity {
134 	struct scrub_ctx	*sctx;
135 
136 	struct btrfs_device	*scrub_dev;
137 
138 	u64			logic_start;
139 
140 	u64			logic_end;
141 
142 	int			nsectors;
143 
144 	u64			stripe_len;
145 
146 	refcount_t		refs;
147 
148 	struct list_head	spages;
149 
150 	/* Work of parity check and repair */
151 	struct btrfs_work	work;
152 
153 	/* Mark the parity blocks which have data */
154 	unsigned long		*dbitmap;
155 
156 	/*
157 	 * Mark the parity blocks which have data, but errors happen when
158 	 * read data or check data
159 	 */
160 	unsigned long		*ebitmap;
161 
162 	unsigned long		bitmap[0];
163 };
164 
165 struct scrub_ctx {
166 	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
167 	struct btrfs_fs_info	*fs_info;
168 	int			first_free;
169 	int			curr;
170 	atomic_t		bios_in_flight;
171 	atomic_t		workers_pending;
172 	spinlock_t		list_lock;
173 	wait_queue_head_t	list_wait;
174 	u16			csum_size;
175 	struct list_head	csum_list;
176 	atomic_t		cancel_req;
177 	int			readonly;
178 	int			pages_per_rd_bio;
179 
180 	int			is_dev_replace;
181 
182 	struct scrub_bio        *wr_curr_bio;
183 	struct mutex            wr_lock;
184 	int                     pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
185 	struct btrfs_device     *wr_tgtdev;
186 	bool                    flush_all_writes;
187 
188 	/*
189 	 * statistics
190 	 */
191 	struct btrfs_scrub_progress stat;
192 	spinlock_t		stat_lock;
193 
194 	/*
195 	 * Use a ref counter to avoid use-after-free issues. Scrub workers
196 	 * decrement bios_in_flight and workers_pending and then do a wakeup
197 	 * on the list_wait wait queue. We must ensure the main scrub task
198 	 * doesn't free the scrub context before or while the workers are
199 	 * doing the wakeup() call.
200 	 */
201 	refcount_t              refs;
202 };
203 
204 struct scrub_fixup_nodatasum {
205 	struct scrub_ctx	*sctx;
206 	struct btrfs_device	*dev;
207 	u64			logical;
208 	struct btrfs_root	*root;
209 	struct btrfs_work	work;
210 	int			mirror_num;
211 };
212 
213 struct scrub_nocow_inode {
214 	u64			inum;
215 	u64			offset;
216 	u64			root;
217 	struct list_head	list;
218 };
219 
220 struct scrub_copy_nocow_ctx {
221 	struct scrub_ctx	*sctx;
222 	u64			logical;
223 	u64			len;
224 	int			mirror_num;
225 	u64			physical_for_dev_replace;
226 	struct list_head	inodes;
227 	struct btrfs_work	work;
228 };
229 
230 struct scrub_warning {
231 	struct btrfs_path	*path;
232 	u64			extent_item_size;
233 	const char		*errstr;
234 	sector_t		sector;
235 	u64			logical;
236 	struct btrfs_device	*dev;
237 };
238 
239 struct full_stripe_lock {
240 	struct rb_node node;
241 	u64 logical;
242 	u64 refs;
243 	struct mutex mutex;
244 };
245 
246 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
247 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
248 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
249 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
250 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
251 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
252 				     struct scrub_block *sblocks_for_recheck);
253 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
254 				struct scrub_block *sblock,
255 				int retry_failed_mirror);
256 static void scrub_recheck_block_checksum(struct scrub_block *sblock);
257 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
258 					     struct scrub_block *sblock_good);
259 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
260 					    struct scrub_block *sblock_good,
261 					    int page_num, int force_write);
262 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
263 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
264 					   int page_num);
265 static int scrub_checksum_data(struct scrub_block *sblock);
266 static int scrub_checksum_tree_block(struct scrub_block *sblock);
267 static int scrub_checksum_super(struct scrub_block *sblock);
268 static void scrub_block_get(struct scrub_block *sblock);
269 static void scrub_block_put(struct scrub_block *sblock);
270 static void scrub_page_get(struct scrub_page *spage);
271 static void scrub_page_put(struct scrub_page *spage);
272 static void scrub_parity_get(struct scrub_parity *sparity);
273 static void scrub_parity_put(struct scrub_parity *sparity);
274 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
275 				    struct scrub_page *spage);
276 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
277 		       u64 physical, struct btrfs_device *dev, u64 flags,
278 		       u64 gen, int mirror_num, u8 *csum, int force,
279 		       u64 physical_for_dev_replace);
280 static void scrub_bio_end_io(struct bio *bio);
281 static void scrub_bio_end_io_worker(struct btrfs_work *work);
282 static void scrub_block_complete(struct scrub_block *sblock);
283 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
284 			       u64 extent_logical, u64 extent_len,
285 			       u64 *extent_physical,
286 			       struct btrfs_device **extent_dev,
287 			       int *extent_mirror_num);
288 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
289 				    struct scrub_page *spage);
290 static void scrub_wr_submit(struct scrub_ctx *sctx);
291 static void scrub_wr_bio_end_io(struct bio *bio);
292 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
293 static int write_page_nocow(struct scrub_ctx *sctx,
294 			    u64 physical_for_dev_replace, struct page *page);
295 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
296 				      struct scrub_copy_nocow_ctx *ctx);
297 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
298 			    int mirror_num, u64 physical_for_dev_replace);
299 static void copy_nocow_pages_worker(struct btrfs_work *work);
300 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
301 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
302 static void scrub_put_ctx(struct scrub_ctx *sctx);
303 
scrub_is_page_on_raid56(struct scrub_page * page)304 static inline int scrub_is_page_on_raid56(struct scrub_page *page)
305 {
306 	return page->recover &&
307 	       (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
308 }
309 
scrub_pending_bio_inc(struct scrub_ctx * sctx)310 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
311 {
312 	refcount_inc(&sctx->refs);
313 	atomic_inc(&sctx->bios_in_flight);
314 }
315 
scrub_pending_bio_dec(struct scrub_ctx * sctx)316 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
317 {
318 	atomic_dec(&sctx->bios_in_flight);
319 	wake_up(&sctx->list_wait);
320 	scrub_put_ctx(sctx);
321 }
322 
__scrub_blocked_if_needed(struct btrfs_fs_info * fs_info)323 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
324 {
325 	while (atomic_read(&fs_info->scrub_pause_req)) {
326 		mutex_unlock(&fs_info->scrub_lock);
327 		wait_event(fs_info->scrub_pause_wait,
328 		   atomic_read(&fs_info->scrub_pause_req) == 0);
329 		mutex_lock(&fs_info->scrub_lock);
330 	}
331 }
332 
scrub_pause_on(struct btrfs_fs_info * fs_info)333 static void scrub_pause_on(struct btrfs_fs_info *fs_info)
334 {
335 	atomic_inc(&fs_info->scrubs_paused);
336 	wake_up(&fs_info->scrub_pause_wait);
337 }
338 
scrub_pause_off(struct btrfs_fs_info * fs_info)339 static void scrub_pause_off(struct btrfs_fs_info *fs_info)
340 {
341 	mutex_lock(&fs_info->scrub_lock);
342 	__scrub_blocked_if_needed(fs_info);
343 	atomic_dec(&fs_info->scrubs_paused);
344 	mutex_unlock(&fs_info->scrub_lock);
345 
346 	wake_up(&fs_info->scrub_pause_wait);
347 }
348 
scrub_blocked_if_needed(struct btrfs_fs_info * fs_info)349 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
350 {
351 	scrub_pause_on(fs_info);
352 	scrub_pause_off(fs_info);
353 }
354 
355 /*
356  * Insert new full stripe lock into full stripe locks tree
357  *
358  * Return pointer to existing or newly inserted full_stripe_lock structure if
359  * everything works well.
360  * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
361  *
362  * NOTE: caller must hold full_stripe_locks_root->lock before calling this
363  * function
364  */
insert_full_stripe_lock(struct btrfs_full_stripe_locks_tree * locks_root,u64 fstripe_logical)365 static struct full_stripe_lock *insert_full_stripe_lock(
366 		struct btrfs_full_stripe_locks_tree *locks_root,
367 		u64 fstripe_logical)
368 {
369 	struct rb_node **p;
370 	struct rb_node *parent = NULL;
371 	struct full_stripe_lock *entry;
372 	struct full_stripe_lock *ret;
373 
374 	WARN_ON(!mutex_is_locked(&locks_root->lock));
375 
376 	p = &locks_root->root.rb_node;
377 	while (*p) {
378 		parent = *p;
379 		entry = rb_entry(parent, struct full_stripe_lock, node);
380 		if (fstripe_logical < entry->logical) {
381 			p = &(*p)->rb_left;
382 		} else if (fstripe_logical > entry->logical) {
383 			p = &(*p)->rb_right;
384 		} else {
385 			entry->refs++;
386 			return entry;
387 		}
388 	}
389 
390 	/* Insert new lock */
391 	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
392 	if (!ret)
393 		return ERR_PTR(-ENOMEM);
394 	ret->logical = fstripe_logical;
395 	ret->refs = 1;
396 	mutex_init(&ret->mutex);
397 
398 	rb_link_node(&ret->node, parent, p);
399 	rb_insert_color(&ret->node, &locks_root->root);
400 	return ret;
401 }
402 
403 /*
404  * Search for a full stripe lock of a block group
405  *
406  * Return pointer to existing full stripe lock if found
407  * Return NULL if not found
408  */
search_full_stripe_lock(struct btrfs_full_stripe_locks_tree * locks_root,u64 fstripe_logical)409 static struct full_stripe_lock *search_full_stripe_lock(
410 		struct btrfs_full_stripe_locks_tree *locks_root,
411 		u64 fstripe_logical)
412 {
413 	struct rb_node *node;
414 	struct full_stripe_lock *entry;
415 
416 	WARN_ON(!mutex_is_locked(&locks_root->lock));
417 
418 	node = locks_root->root.rb_node;
419 	while (node) {
420 		entry = rb_entry(node, struct full_stripe_lock, node);
421 		if (fstripe_logical < entry->logical)
422 			node = node->rb_left;
423 		else if (fstripe_logical > entry->logical)
424 			node = node->rb_right;
425 		else
426 			return entry;
427 	}
428 	return NULL;
429 }
430 
431 /*
432  * Helper to get full stripe logical from a normal bytenr.
433  *
434  * Caller must ensure @cache is a RAID56 block group.
435  */
get_full_stripe_logical(struct btrfs_block_group_cache * cache,u64 bytenr)436 static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
437 				   u64 bytenr)
438 {
439 	u64 ret;
440 
441 	/*
442 	 * Due to chunk item size limit, full stripe length should not be
443 	 * larger than U32_MAX. Just a sanity check here.
444 	 */
445 	WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
446 
447 	/*
448 	 * round_down() can only handle power of 2, while RAID56 full
449 	 * stripe length can be 64KiB * n, so we need to manually round down.
450 	 */
451 	ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) *
452 		cache->full_stripe_len + cache->key.objectid;
453 	return ret;
454 }
455 
456 /*
457  * Lock a full stripe to avoid concurrency of recovery and read
458  *
459  * It's only used for profiles with parities (RAID5/6), for other profiles it
460  * does nothing.
461  *
462  * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
463  * So caller must call unlock_full_stripe() at the same context.
464  *
465  * Return <0 if encounters error.
466  */
lock_full_stripe(struct btrfs_fs_info * fs_info,u64 bytenr,bool * locked_ret)467 static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
468 			    bool *locked_ret)
469 {
470 	struct btrfs_block_group_cache *bg_cache;
471 	struct btrfs_full_stripe_locks_tree *locks_root;
472 	struct full_stripe_lock *existing;
473 	u64 fstripe_start;
474 	int ret = 0;
475 
476 	*locked_ret = false;
477 	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
478 	if (!bg_cache) {
479 		ASSERT(0);
480 		return -ENOENT;
481 	}
482 
483 	/* Profiles not based on parity don't need full stripe lock */
484 	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
485 		goto out;
486 	locks_root = &bg_cache->full_stripe_locks_root;
487 
488 	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
489 
490 	/* Now insert the full stripe lock */
491 	mutex_lock(&locks_root->lock);
492 	existing = insert_full_stripe_lock(locks_root, fstripe_start);
493 	mutex_unlock(&locks_root->lock);
494 	if (IS_ERR(existing)) {
495 		ret = PTR_ERR(existing);
496 		goto out;
497 	}
498 	mutex_lock(&existing->mutex);
499 	*locked_ret = true;
500 out:
501 	btrfs_put_block_group(bg_cache);
502 	return ret;
503 }
504 
505 /*
506  * Unlock a full stripe.
507  *
508  * NOTE: Caller must ensure it's the same context calling corresponding
509  * lock_full_stripe().
510  *
511  * Return 0 if we unlock full stripe without problem.
512  * Return <0 for error
513  */
unlock_full_stripe(struct btrfs_fs_info * fs_info,u64 bytenr,bool locked)514 static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
515 			      bool locked)
516 {
517 	struct btrfs_block_group_cache *bg_cache;
518 	struct btrfs_full_stripe_locks_tree *locks_root;
519 	struct full_stripe_lock *fstripe_lock;
520 	u64 fstripe_start;
521 	bool freeit = false;
522 	int ret = 0;
523 
524 	/* If we didn't acquire full stripe lock, no need to continue */
525 	if (!locked)
526 		return 0;
527 
528 	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
529 	if (!bg_cache) {
530 		ASSERT(0);
531 		return -ENOENT;
532 	}
533 	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
534 		goto out;
535 
536 	locks_root = &bg_cache->full_stripe_locks_root;
537 	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
538 
539 	mutex_lock(&locks_root->lock);
540 	fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
541 	/* Unpaired unlock_full_stripe() detected */
542 	if (!fstripe_lock) {
543 		WARN_ON(1);
544 		ret = -ENOENT;
545 		mutex_unlock(&locks_root->lock);
546 		goto out;
547 	}
548 
549 	if (fstripe_lock->refs == 0) {
550 		WARN_ON(1);
551 		btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
552 			fstripe_lock->logical);
553 	} else {
554 		fstripe_lock->refs--;
555 	}
556 
557 	if (fstripe_lock->refs == 0) {
558 		rb_erase(&fstripe_lock->node, &locks_root->root);
559 		freeit = true;
560 	}
561 	mutex_unlock(&locks_root->lock);
562 
563 	mutex_unlock(&fstripe_lock->mutex);
564 	if (freeit)
565 		kfree(fstripe_lock);
566 out:
567 	btrfs_put_block_group(bg_cache);
568 	return ret;
569 }
570 
571 /*
572  * used for workers that require transaction commits (i.e., for the
573  * NOCOW case)
574  */
scrub_pending_trans_workers_inc(struct scrub_ctx * sctx)575 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
576 {
577 	struct btrfs_fs_info *fs_info = sctx->fs_info;
578 
579 	refcount_inc(&sctx->refs);
580 	/*
581 	 * increment scrubs_running to prevent cancel requests from
582 	 * completing as long as a worker is running. we must also
583 	 * increment scrubs_paused to prevent deadlocking on pause
584 	 * requests used for transactions commits (as the worker uses a
585 	 * transaction context). it is safe to regard the worker
586 	 * as paused for all matters practical. effectively, we only
587 	 * avoid cancellation requests from completing.
588 	 */
589 	mutex_lock(&fs_info->scrub_lock);
590 	atomic_inc(&fs_info->scrubs_running);
591 	atomic_inc(&fs_info->scrubs_paused);
592 	mutex_unlock(&fs_info->scrub_lock);
593 
594 	/*
595 	 * check if @scrubs_running=@scrubs_paused condition
596 	 * inside wait_event() is not an atomic operation.
597 	 * which means we may inc/dec @scrub_running/paused
598 	 * at any time. Let's wake up @scrub_pause_wait as
599 	 * much as we can to let commit transaction blocked less.
600 	 */
601 	wake_up(&fs_info->scrub_pause_wait);
602 
603 	atomic_inc(&sctx->workers_pending);
604 }
605 
606 /* used for workers that require transaction commits */
scrub_pending_trans_workers_dec(struct scrub_ctx * sctx)607 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
608 {
609 	struct btrfs_fs_info *fs_info = sctx->fs_info;
610 
611 	/*
612 	 * see scrub_pending_trans_workers_inc() why we're pretending
613 	 * to be paused in the scrub counters
614 	 */
615 	mutex_lock(&fs_info->scrub_lock);
616 	atomic_dec(&fs_info->scrubs_running);
617 	atomic_dec(&fs_info->scrubs_paused);
618 	mutex_unlock(&fs_info->scrub_lock);
619 	atomic_dec(&sctx->workers_pending);
620 	wake_up(&fs_info->scrub_pause_wait);
621 	wake_up(&sctx->list_wait);
622 	scrub_put_ctx(sctx);
623 }
624 
scrub_free_csums(struct scrub_ctx * sctx)625 static void scrub_free_csums(struct scrub_ctx *sctx)
626 {
627 	while (!list_empty(&sctx->csum_list)) {
628 		struct btrfs_ordered_sum *sum;
629 		sum = list_first_entry(&sctx->csum_list,
630 				       struct btrfs_ordered_sum, list);
631 		list_del(&sum->list);
632 		kfree(sum);
633 	}
634 }
635 
scrub_free_ctx(struct scrub_ctx * sctx)636 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
637 {
638 	int i;
639 
640 	if (!sctx)
641 		return;
642 
643 	/* this can happen when scrub is cancelled */
644 	if (sctx->curr != -1) {
645 		struct scrub_bio *sbio = sctx->bios[sctx->curr];
646 
647 		for (i = 0; i < sbio->page_count; i++) {
648 			WARN_ON(!sbio->pagev[i]->page);
649 			scrub_block_put(sbio->pagev[i]->sblock);
650 		}
651 		bio_put(sbio->bio);
652 	}
653 
654 	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
655 		struct scrub_bio *sbio = sctx->bios[i];
656 
657 		if (!sbio)
658 			break;
659 		kfree(sbio);
660 	}
661 
662 	kfree(sctx->wr_curr_bio);
663 	scrub_free_csums(sctx);
664 	kfree(sctx);
665 }
666 
scrub_put_ctx(struct scrub_ctx * sctx)667 static void scrub_put_ctx(struct scrub_ctx *sctx)
668 {
669 	if (refcount_dec_and_test(&sctx->refs))
670 		scrub_free_ctx(sctx);
671 }
672 
673 static noinline_for_stack
scrub_setup_ctx(struct btrfs_device * dev,int is_dev_replace)674 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
675 {
676 	struct scrub_ctx *sctx;
677 	int		i;
678 	struct btrfs_fs_info *fs_info = dev->fs_info;
679 
680 	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
681 	if (!sctx)
682 		goto nomem;
683 	refcount_set(&sctx->refs, 1);
684 	sctx->is_dev_replace = is_dev_replace;
685 	sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
686 	sctx->curr = -1;
687 	sctx->fs_info = dev->fs_info;
688 	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
689 		struct scrub_bio *sbio;
690 
691 		sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
692 		if (!sbio)
693 			goto nomem;
694 		sctx->bios[i] = sbio;
695 
696 		sbio->index = i;
697 		sbio->sctx = sctx;
698 		sbio->page_count = 0;
699 		btrfs_init_work(&sbio->work, btrfs_scrub_helper,
700 				scrub_bio_end_io_worker, NULL, NULL);
701 
702 		if (i != SCRUB_BIOS_PER_SCTX - 1)
703 			sctx->bios[i]->next_free = i + 1;
704 		else
705 			sctx->bios[i]->next_free = -1;
706 	}
707 	sctx->first_free = 0;
708 	atomic_set(&sctx->bios_in_flight, 0);
709 	atomic_set(&sctx->workers_pending, 0);
710 	atomic_set(&sctx->cancel_req, 0);
711 	sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
712 	INIT_LIST_HEAD(&sctx->csum_list);
713 
714 	spin_lock_init(&sctx->list_lock);
715 	spin_lock_init(&sctx->stat_lock);
716 	init_waitqueue_head(&sctx->list_wait);
717 
718 	WARN_ON(sctx->wr_curr_bio != NULL);
719 	mutex_init(&sctx->wr_lock);
720 	sctx->wr_curr_bio = NULL;
721 	if (is_dev_replace) {
722 		WARN_ON(!fs_info->dev_replace.tgtdev);
723 		sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
724 		sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
725 		sctx->flush_all_writes = false;
726 	}
727 
728 	return sctx;
729 
730 nomem:
731 	scrub_free_ctx(sctx);
732 	return ERR_PTR(-ENOMEM);
733 }
734 
scrub_print_warning_inode(u64 inum,u64 offset,u64 root,void * warn_ctx)735 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
736 				     void *warn_ctx)
737 {
738 	u64 isize;
739 	u32 nlink;
740 	int ret;
741 	int i;
742 	unsigned nofs_flag;
743 	struct extent_buffer *eb;
744 	struct btrfs_inode_item *inode_item;
745 	struct scrub_warning *swarn = warn_ctx;
746 	struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
747 	struct inode_fs_paths *ipath = NULL;
748 	struct btrfs_root *local_root;
749 	struct btrfs_key root_key;
750 	struct btrfs_key key;
751 
752 	root_key.objectid = root;
753 	root_key.type = BTRFS_ROOT_ITEM_KEY;
754 	root_key.offset = (u64)-1;
755 	local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
756 	if (IS_ERR(local_root)) {
757 		ret = PTR_ERR(local_root);
758 		goto err;
759 	}
760 
761 	/*
762 	 * this makes the path point to (inum INODE_ITEM ioff)
763 	 */
764 	key.objectid = inum;
765 	key.type = BTRFS_INODE_ITEM_KEY;
766 	key.offset = 0;
767 
768 	ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
769 	if (ret) {
770 		btrfs_release_path(swarn->path);
771 		goto err;
772 	}
773 
774 	eb = swarn->path->nodes[0];
775 	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
776 					struct btrfs_inode_item);
777 	isize = btrfs_inode_size(eb, inode_item);
778 	nlink = btrfs_inode_nlink(eb, inode_item);
779 	btrfs_release_path(swarn->path);
780 
781 	/*
782 	 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
783 	 * uses GFP_NOFS in this context, so we keep it consistent but it does
784 	 * not seem to be strictly necessary.
785 	 */
786 	nofs_flag = memalloc_nofs_save();
787 	ipath = init_ipath(4096, local_root, swarn->path);
788 	memalloc_nofs_restore(nofs_flag);
789 	if (IS_ERR(ipath)) {
790 		ret = PTR_ERR(ipath);
791 		ipath = NULL;
792 		goto err;
793 	}
794 	ret = paths_from_inode(inum, ipath);
795 
796 	if (ret < 0)
797 		goto err;
798 
799 	/*
800 	 * we deliberately ignore the bit ipath might have been too small to
801 	 * hold all of the paths here
802 	 */
803 	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
804 		btrfs_warn_in_rcu(fs_info,
805 				  "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
806 				  swarn->errstr, swarn->logical,
807 				  rcu_str_deref(swarn->dev->name),
808 				  (unsigned long long)swarn->sector,
809 				  root, inum, offset,
810 				  min(isize - offset, (u64)PAGE_SIZE), nlink,
811 				  (char *)(unsigned long)ipath->fspath->val[i]);
812 
813 	free_ipath(ipath);
814 	return 0;
815 
816 err:
817 	btrfs_warn_in_rcu(fs_info,
818 			  "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
819 			  swarn->errstr, swarn->logical,
820 			  rcu_str_deref(swarn->dev->name),
821 			  (unsigned long long)swarn->sector,
822 			  root, inum, offset, ret);
823 
824 	free_ipath(ipath);
825 	return 0;
826 }
827 
scrub_print_warning(const char * errstr,struct scrub_block * sblock)828 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
829 {
830 	struct btrfs_device *dev;
831 	struct btrfs_fs_info *fs_info;
832 	struct btrfs_path *path;
833 	struct btrfs_key found_key;
834 	struct extent_buffer *eb;
835 	struct btrfs_extent_item *ei;
836 	struct scrub_warning swarn;
837 	unsigned long ptr = 0;
838 	u64 extent_item_pos;
839 	u64 flags = 0;
840 	u64 ref_root;
841 	u32 item_size;
842 	u8 ref_level = 0;
843 	int ret;
844 
845 	WARN_ON(sblock->page_count < 1);
846 	dev = sblock->pagev[0]->dev;
847 	fs_info = sblock->sctx->fs_info;
848 
849 	path = btrfs_alloc_path();
850 	if (!path)
851 		return;
852 
853 	swarn.sector = (sblock->pagev[0]->physical) >> 9;
854 	swarn.logical = sblock->pagev[0]->logical;
855 	swarn.errstr = errstr;
856 	swarn.dev = NULL;
857 
858 	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
859 				  &flags);
860 	if (ret < 0)
861 		goto out;
862 
863 	extent_item_pos = swarn.logical - found_key.objectid;
864 	swarn.extent_item_size = found_key.offset;
865 
866 	eb = path->nodes[0];
867 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
868 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
869 
870 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
871 		do {
872 			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
873 						      item_size, &ref_root,
874 						      &ref_level);
875 			btrfs_warn_in_rcu(fs_info,
876 				"%s at logical %llu on dev %s, sector %llu: metadata %s (level %d) in tree %llu",
877 				errstr, swarn.logical,
878 				rcu_str_deref(dev->name),
879 				(unsigned long long)swarn.sector,
880 				ref_level ? "node" : "leaf",
881 				ret < 0 ? -1 : ref_level,
882 				ret < 0 ? -1 : ref_root);
883 		} while (ret != 1);
884 		btrfs_release_path(path);
885 	} else {
886 		btrfs_release_path(path);
887 		swarn.path = path;
888 		swarn.dev = dev;
889 		iterate_extent_inodes(fs_info, found_key.objectid,
890 					extent_item_pos, 1,
891 					scrub_print_warning_inode, &swarn);
892 	}
893 
894 out:
895 	btrfs_free_path(path);
896 }
897 
scrub_fixup_readpage(u64 inum,u64 offset,u64 root,void * fixup_ctx)898 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
899 {
900 	struct page *page = NULL;
901 	unsigned long index;
902 	struct scrub_fixup_nodatasum *fixup = fixup_ctx;
903 	int ret;
904 	int corrected = 0;
905 	struct btrfs_key key;
906 	struct inode *inode = NULL;
907 	struct btrfs_fs_info *fs_info;
908 	u64 end = offset + PAGE_SIZE - 1;
909 	struct btrfs_root *local_root;
910 	int srcu_index;
911 
912 	key.objectid = root;
913 	key.type = BTRFS_ROOT_ITEM_KEY;
914 	key.offset = (u64)-1;
915 
916 	fs_info = fixup->root->fs_info;
917 	srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
918 
919 	local_root = btrfs_read_fs_root_no_name(fs_info, &key);
920 	if (IS_ERR(local_root)) {
921 		srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
922 		return PTR_ERR(local_root);
923 	}
924 
925 	key.type = BTRFS_INODE_ITEM_KEY;
926 	key.objectid = inum;
927 	key.offset = 0;
928 	inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
929 	srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
930 	if (IS_ERR(inode))
931 		return PTR_ERR(inode);
932 
933 	index = offset >> PAGE_SHIFT;
934 
935 	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
936 	if (!page) {
937 		ret = -ENOMEM;
938 		goto out;
939 	}
940 
941 	if (PageUptodate(page)) {
942 		if (PageDirty(page)) {
943 			/*
944 			 * we need to write the data to the defect sector. the
945 			 * data that was in that sector is not in memory,
946 			 * because the page was modified. we must not write the
947 			 * modified page to that sector.
948 			 *
949 			 * TODO: what could be done here: wait for the delalloc
950 			 *       runner to write out that page (might involve
951 			 *       COW) and see whether the sector is still
952 			 *       referenced afterwards.
953 			 *
954 			 * For the meantime, we'll treat this error
955 			 * incorrectable, although there is a chance that a
956 			 * later scrub will find the bad sector again and that
957 			 * there's no dirty page in memory, then.
958 			 */
959 			ret = -EIO;
960 			goto out;
961 		}
962 		ret = repair_io_failure(fs_info, inum, offset, PAGE_SIZE,
963 					fixup->logical, page,
964 					offset - page_offset(page),
965 					fixup->mirror_num);
966 		unlock_page(page);
967 		corrected = !ret;
968 	} else {
969 		/*
970 		 * we need to get good data first. the general readpage path
971 		 * will call repair_io_failure for us, we just have to make
972 		 * sure we read the bad mirror.
973 		 */
974 		ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
975 					EXTENT_DAMAGED);
976 		if (ret) {
977 			/* set_extent_bits should give proper error */
978 			WARN_ON(ret > 0);
979 			if (ret > 0)
980 				ret = -EFAULT;
981 			goto out;
982 		}
983 
984 		ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
985 						btrfs_get_extent,
986 						fixup->mirror_num);
987 		wait_on_page_locked(page);
988 
989 		corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
990 						end, EXTENT_DAMAGED, 0, NULL);
991 		if (!corrected)
992 			clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
993 						EXTENT_DAMAGED);
994 	}
995 
996 out:
997 	if (page)
998 		put_page(page);
999 
1000 	iput(inode);
1001 
1002 	if (ret < 0)
1003 		return ret;
1004 
1005 	if (ret == 0 && corrected) {
1006 		/*
1007 		 * we only need to call readpage for one of the inodes belonging
1008 		 * to this extent. so make iterate_extent_inodes stop
1009 		 */
1010 		return 1;
1011 	}
1012 
1013 	return -EIO;
1014 }
1015 
scrub_fixup_nodatasum(struct btrfs_work * work)1016 static void scrub_fixup_nodatasum(struct btrfs_work *work)
1017 {
1018 	struct btrfs_fs_info *fs_info;
1019 	int ret;
1020 	struct scrub_fixup_nodatasum *fixup;
1021 	struct scrub_ctx *sctx;
1022 	struct btrfs_trans_handle *trans = NULL;
1023 	struct btrfs_path *path;
1024 	int uncorrectable = 0;
1025 
1026 	fixup = container_of(work, struct scrub_fixup_nodatasum, work);
1027 	sctx = fixup->sctx;
1028 	fs_info = fixup->root->fs_info;
1029 
1030 	path = btrfs_alloc_path();
1031 	if (!path) {
1032 		spin_lock(&sctx->stat_lock);
1033 		++sctx->stat.malloc_errors;
1034 		spin_unlock(&sctx->stat_lock);
1035 		uncorrectable = 1;
1036 		goto out;
1037 	}
1038 
1039 	trans = btrfs_join_transaction(fixup->root);
1040 	if (IS_ERR(trans)) {
1041 		uncorrectable = 1;
1042 		goto out;
1043 	}
1044 
1045 	/*
1046 	 * the idea is to trigger a regular read through the standard path. we
1047 	 * read a page from the (failed) logical address by specifying the
1048 	 * corresponding copynum of the failed sector. thus, that readpage is
1049 	 * expected to fail.
1050 	 * that is the point where on-the-fly error correction will kick in
1051 	 * (once it's finished) and rewrite the failed sector if a good copy
1052 	 * can be found.
1053 	 */
1054 	ret = iterate_inodes_from_logical(fixup->logical, fs_info, path,
1055 					  scrub_fixup_readpage, fixup);
1056 	if (ret < 0) {
1057 		uncorrectable = 1;
1058 		goto out;
1059 	}
1060 	WARN_ON(ret != 1);
1061 
1062 	spin_lock(&sctx->stat_lock);
1063 	++sctx->stat.corrected_errors;
1064 	spin_unlock(&sctx->stat_lock);
1065 
1066 out:
1067 	if (trans && !IS_ERR(trans))
1068 		btrfs_end_transaction(trans);
1069 	if (uncorrectable) {
1070 		spin_lock(&sctx->stat_lock);
1071 		++sctx->stat.uncorrectable_errors;
1072 		spin_unlock(&sctx->stat_lock);
1073 		btrfs_dev_replace_stats_inc(
1074 			&fs_info->dev_replace.num_uncorrectable_read_errors);
1075 		btrfs_err_rl_in_rcu(fs_info,
1076 		    "unable to fixup (nodatasum) error at logical %llu on dev %s",
1077 			fixup->logical, rcu_str_deref(fixup->dev->name));
1078 	}
1079 
1080 	btrfs_free_path(path);
1081 	kfree(fixup);
1082 
1083 	scrub_pending_trans_workers_dec(sctx);
1084 }
1085 
scrub_get_recover(struct scrub_recover * recover)1086 static inline void scrub_get_recover(struct scrub_recover *recover)
1087 {
1088 	refcount_inc(&recover->refs);
1089 }
1090 
scrub_put_recover(struct btrfs_fs_info * fs_info,struct scrub_recover * recover)1091 static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
1092 				     struct scrub_recover *recover)
1093 {
1094 	if (refcount_dec_and_test(&recover->refs)) {
1095 		btrfs_bio_counter_dec(fs_info);
1096 		btrfs_put_bbio(recover->bbio);
1097 		kfree(recover);
1098 	}
1099 }
1100 
1101 /*
1102  * scrub_handle_errored_block gets called when either verification of the
1103  * pages failed or the bio failed to read, e.g. with EIO. In the latter
1104  * case, this function handles all pages in the bio, even though only one
1105  * may be bad.
1106  * The goal of this function is to repair the errored block by using the
1107  * contents of one of the mirrors.
1108  */
scrub_handle_errored_block(struct scrub_block * sblock_to_check)1109 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
1110 {
1111 	struct scrub_ctx *sctx = sblock_to_check->sctx;
1112 	struct btrfs_device *dev;
1113 	struct btrfs_fs_info *fs_info;
1114 	u64 length;
1115 	u64 logical;
1116 	unsigned int failed_mirror_index;
1117 	unsigned int is_metadata;
1118 	unsigned int have_csum;
1119 	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
1120 	struct scrub_block *sblock_bad;
1121 	int ret;
1122 	int mirror_index;
1123 	int page_num;
1124 	int success;
1125 	bool full_stripe_locked;
1126 	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
1127 				      DEFAULT_RATELIMIT_BURST);
1128 
1129 	BUG_ON(sblock_to_check->page_count < 1);
1130 	fs_info = sctx->fs_info;
1131 	if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
1132 		/*
1133 		 * if we find an error in a super block, we just report it.
1134 		 * They will get written with the next transaction commit
1135 		 * anyway
1136 		 */
1137 		spin_lock(&sctx->stat_lock);
1138 		++sctx->stat.super_errors;
1139 		spin_unlock(&sctx->stat_lock);
1140 		return 0;
1141 	}
1142 	length = sblock_to_check->page_count * PAGE_SIZE;
1143 	logical = sblock_to_check->pagev[0]->logical;
1144 	BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
1145 	failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
1146 	is_metadata = !(sblock_to_check->pagev[0]->flags &
1147 			BTRFS_EXTENT_FLAG_DATA);
1148 	have_csum = sblock_to_check->pagev[0]->have_csum;
1149 	dev = sblock_to_check->pagev[0]->dev;
1150 
1151 	/*
1152 	 * For RAID5/6, race can happen for a different device scrub thread.
1153 	 * For data corruption, Parity and Data threads will both try
1154 	 * to recovery the data.
1155 	 * Race can lead to doubly added csum error, or even unrecoverable
1156 	 * error.
1157 	 */
1158 	ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
1159 	if (ret < 0) {
1160 		spin_lock(&sctx->stat_lock);
1161 		if (ret == -ENOMEM)
1162 			sctx->stat.malloc_errors++;
1163 		sctx->stat.read_errors++;
1164 		sctx->stat.uncorrectable_errors++;
1165 		spin_unlock(&sctx->stat_lock);
1166 		return ret;
1167 	}
1168 
1169 	/*
1170 	 * read all mirrors one after the other. This includes to
1171 	 * re-read the extent or metadata block that failed (that was
1172 	 * the cause that this fixup code is called) another time,
1173 	 * page by page this time in order to know which pages
1174 	 * caused I/O errors and which ones are good (for all mirrors).
1175 	 * It is the goal to handle the situation when more than one
1176 	 * mirror contains I/O errors, but the errors do not
1177 	 * overlap, i.e. the data can be repaired by selecting the
1178 	 * pages from those mirrors without I/O error on the
1179 	 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
1180 	 * would be that mirror #1 has an I/O error on the first page,
1181 	 * the second page is good, and mirror #2 has an I/O error on
1182 	 * the second page, but the first page is good.
1183 	 * Then the first page of the first mirror can be repaired by
1184 	 * taking the first page of the second mirror, and the
1185 	 * second page of the second mirror can be repaired by
1186 	 * copying the contents of the 2nd page of the 1st mirror.
1187 	 * One more note: if the pages of one mirror contain I/O
1188 	 * errors, the checksum cannot be verified. In order to get
1189 	 * the best data for repairing, the first attempt is to find
1190 	 * a mirror without I/O errors and with a validated checksum.
1191 	 * Only if this is not possible, the pages are picked from
1192 	 * mirrors with I/O errors without considering the checksum.
1193 	 * If the latter is the case, at the end, the checksum of the
1194 	 * repaired area is verified in order to correctly maintain
1195 	 * the statistics.
1196 	 */
1197 
1198 	sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
1199 				      sizeof(*sblocks_for_recheck), GFP_NOFS);
1200 	if (!sblocks_for_recheck) {
1201 		spin_lock(&sctx->stat_lock);
1202 		sctx->stat.malloc_errors++;
1203 		sctx->stat.read_errors++;
1204 		sctx->stat.uncorrectable_errors++;
1205 		spin_unlock(&sctx->stat_lock);
1206 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1207 		goto out;
1208 	}
1209 
1210 	/* setup the context, map the logical blocks and alloc the pages */
1211 	ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
1212 	if (ret) {
1213 		spin_lock(&sctx->stat_lock);
1214 		sctx->stat.read_errors++;
1215 		sctx->stat.uncorrectable_errors++;
1216 		spin_unlock(&sctx->stat_lock);
1217 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1218 		goto out;
1219 	}
1220 	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
1221 	sblock_bad = sblocks_for_recheck + failed_mirror_index;
1222 
1223 	/* build and submit the bios for the failed mirror, check checksums */
1224 	scrub_recheck_block(fs_info, sblock_bad, 1);
1225 
1226 	if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
1227 	    sblock_bad->no_io_error_seen) {
1228 		/*
1229 		 * the error disappeared after reading page by page, or
1230 		 * the area was part of a huge bio and other parts of the
1231 		 * bio caused I/O errors, or the block layer merged several
1232 		 * read requests into one and the error is caused by a
1233 		 * different bio (usually one of the two latter cases is
1234 		 * the cause)
1235 		 */
1236 		spin_lock(&sctx->stat_lock);
1237 		sctx->stat.unverified_errors++;
1238 		sblock_to_check->data_corrected = 1;
1239 		spin_unlock(&sctx->stat_lock);
1240 
1241 		if (sctx->is_dev_replace)
1242 			scrub_write_block_to_dev_replace(sblock_bad);
1243 		goto out;
1244 	}
1245 
1246 	if (!sblock_bad->no_io_error_seen) {
1247 		spin_lock(&sctx->stat_lock);
1248 		sctx->stat.read_errors++;
1249 		spin_unlock(&sctx->stat_lock);
1250 		if (__ratelimit(&_rs))
1251 			scrub_print_warning("i/o error", sblock_to_check);
1252 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1253 	} else if (sblock_bad->checksum_error) {
1254 		spin_lock(&sctx->stat_lock);
1255 		sctx->stat.csum_errors++;
1256 		spin_unlock(&sctx->stat_lock);
1257 		if (__ratelimit(&_rs))
1258 			scrub_print_warning("checksum error", sblock_to_check);
1259 		btrfs_dev_stat_inc_and_print(dev,
1260 					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
1261 	} else if (sblock_bad->header_error) {
1262 		spin_lock(&sctx->stat_lock);
1263 		sctx->stat.verify_errors++;
1264 		spin_unlock(&sctx->stat_lock);
1265 		if (__ratelimit(&_rs))
1266 			scrub_print_warning("checksum/header error",
1267 					    sblock_to_check);
1268 		if (sblock_bad->generation_error)
1269 			btrfs_dev_stat_inc_and_print(dev,
1270 				BTRFS_DEV_STAT_GENERATION_ERRS);
1271 		else
1272 			btrfs_dev_stat_inc_and_print(dev,
1273 				BTRFS_DEV_STAT_CORRUPTION_ERRS);
1274 	}
1275 
1276 	if (sctx->readonly) {
1277 		ASSERT(!sctx->is_dev_replace);
1278 		goto out;
1279 	}
1280 
1281 	/*
1282 	 * NOTE: Even for nodatasum case, it's still possible that it's a
1283 	 * compressed data extent, thus scrub_fixup_nodatasum(), which write
1284 	 * inode page cache onto disk, could cause serious data corruption.
1285 	 *
1286 	 * So here we could only read from disk, and hope our recovery could
1287 	 * reach disk before the newer write.
1288 	 */
1289 	if (0 && !is_metadata && !have_csum) {
1290 		struct scrub_fixup_nodatasum *fixup_nodatasum;
1291 
1292 		WARN_ON(sctx->is_dev_replace);
1293 
1294 		/*
1295 		 * !is_metadata and !have_csum, this means that the data
1296 		 * might not be COWed, that it might be modified
1297 		 * concurrently. The general strategy to work on the
1298 		 * commit root does not help in the case when COW is not
1299 		 * used.
1300 		 */
1301 		fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1302 		if (!fixup_nodatasum)
1303 			goto did_not_correct_error;
1304 		fixup_nodatasum->sctx = sctx;
1305 		fixup_nodatasum->dev = dev;
1306 		fixup_nodatasum->logical = logical;
1307 		fixup_nodatasum->root = fs_info->extent_root;
1308 		fixup_nodatasum->mirror_num = failed_mirror_index + 1;
1309 		scrub_pending_trans_workers_inc(sctx);
1310 		btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1311 				scrub_fixup_nodatasum, NULL, NULL);
1312 		btrfs_queue_work(fs_info->scrub_workers,
1313 				 &fixup_nodatasum->work);
1314 		goto out;
1315 	}
1316 
1317 	/*
1318 	 * now build and submit the bios for the other mirrors, check
1319 	 * checksums.
1320 	 * First try to pick the mirror which is completely without I/O
1321 	 * errors and also does not have a checksum error.
1322 	 * If one is found, and if a checksum is present, the full block
1323 	 * that is known to contain an error is rewritten. Afterwards
1324 	 * the block is known to be corrected.
1325 	 * If a mirror is found which is completely correct, and no
1326 	 * checksum is present, only those pages are rewritten that had
1327 	 * an I/O error in the block to be repaired, since it cannot be
1328 	 * determined, which copy of the other pages is better (and it
1329 	 * could happen otherwise that a correct page would be
1330 	 * overwritten by a bad one).
1331 	 */
1332 	for (mirror_index = 0; ;mirror_index++) {
1333 		struct scrub_block *sblock_other;
1334 
1335 		if (mirror_index == failed_mirror_index)
1336 			continue;
1337 
1338 		/* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
1339 		if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
1340 			if (mirror_index >= BTRFS_MAX_MIRRORS)
1341 				break;
1342 			if (!sblocks_for_recheck[mirror_index].page_count)
1343 				break;
1344 
1345 			sblock_other = sblocks_for_recheck + mirror_index;
1346 		} else {
1347 			struct scrub_recover *r = sblock_bad->pagev[0]->recover;
1348 			int max_allowed = r->bbio->num_stripes -
1349 						r->bbio->num_tgtdevs;
1350 
1351 			if (mirror_index >= max_allowed)
1352 				break;
1353 			if (!sblocks_for_recheck[1].page_count)
1354 				break;
1355 
1356 			ASSERT(failed_mirror_index == 0);
1357 			sblock_other = sblocks_for_recheck + 1;
1358 			sblock_other->pagev[0]->mirror_num = 1 + mirror_index;
1359 		}
1360 
1361 		/* build and submit the bios, check checksums */
1362 		scrub_recheck_block(fs_info, sblock_other, 0);
1363 
1364 		if (!sblock_other->header_error &&
1365 		    !sblock_other->checksum_error &&
1366 		    sblock_other->no_io_error_seen) {
1367 			if (sctx->is_dev_replace) {
1368 				scrub_write_block_to_dev_replace(sblock_other);
1369 				goto corrected_error;
1370 			} else {
1371 				ret = scrub_repair_block_from_good_copy(
1372 						sblock_bad, sblock_other);
1373 				if (!ret)
1374 					goto corrected_error;
1375 			}
1376 		}
1377 	}
1378 
1379 	if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1380 		goto did_not_correct_error;
1381 
1382 	/*
1383 	 * In case of I/O errors in the area that is supposed to be
1384 	 * repaired, continue by picking good copies of those pages.
1385 	 * Select the good pages from mirrors to rewrite bad pages from
1386 	 * the area to fix. Afterwards verify the checksum of the block
1387 	 * that is supposed to be repaired. This verification step is
1388 	 * only done for the purpose of statistic counting and for the
1389 	 * final scrub report, whether errors remain.
1390 	 * A perfect algorithm could make use of the checksum and try
1391 	 * all possible combinations of pages from the different mirrors
1392 	 * until the checksum verification succeeds. For example, when
1393 	 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1394 	 * of mirror #2 is readable but the final checksum test fails,
1395 	 * then the 2nd page of mirror #3 could be tried, whether now
1396 	 * the final checksum succeeds. But this would be a rare
1397 	 * exception and is therefore not implemented. At least it is
1398 	 * avoided that the good copy is overwritten.
1399 	 * A more useful improvement would be to pick the sectors
1400 	 * without I/O error based on sector sizes (512 bytes on legacy
1401 	 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1402 	 * mirror could be repaired by taking 512 byte of a different
1403 	 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1404 	 * area are unreadable.
1405 	 */
1406 	success = 1;
1407 	for (page_num = 0; page_num < sblock_bad->page_count;
1408 	     page_num++) {
1409 		struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1410 		struct scrub_block *sblock_other = NULL;
1411 
1412 		/* skip no-io-error page in scrub */
1413 		if (!page_bad->io_error && !sctx->is_dev_replace)
1414 			continue;
1415 
1416 		/* try to find no-io-error page in mirrors */
1417 		if (page_bad->io_error) {
1418 			for (mirror_index = 0;
1419 			     mirror_index < BTRFS_MAX_MIRRORS &&
1420 			     sblocks_for_recheck[mirror_index].page_count > 0;
1421 			     mirror_index++) {
1422 				if (!sblocks_for_recheck[mirror_index].
1423 				    pagev[page_num]->io_error) {
1424 					sblock_other = sblocks_for_recheck +
1425 						       mirror_index;
1426 					break;
1427 				}
1428 			}
1429 			if (!sblock_other)
1430 				success = 0;
1431 		}
1432 
1433 		if (sctx->is_dev_replace) {
1434 			/*
1435 			 * did not find a mirror to fetch the page
1436 			 * from. scrub_write_page_to_dev_replace()
1437 			 * handles this case (page->io_error), by
1438 			 * filling the block with zeros before
1439 			 * submitting the write request
1440 			 */
1441 			if (!sblock_other)
1442 				sblock_other = sblock_bad;
1443 
1444 			if (scrub_write_page_to_dev_replace(sblock_other,
1445 							    page_num) != 0) {
1446 				btrfs_dev_replace_stats_inc(
1447 					&fs_info->dev_replace.num_write_errors);
1448 				success = 0;
1449 			}
1450 		} else if (sblock_other) {
1451 			ret = scrub_repair_page_from_good_copy(sblock_bad,
1452 							       sblock_other,
1453 							       page_num, 0);
1454 			if (0 == ret)
1455 				page_bad->io_error = 0;
1456 			else
1457 				success = 0;
1458 		}
1459 	}
1460 
1461 	if (success && !sctx->is_dev_replace) {
1462 		if (is_metadata || have_csum) {
1463 			/*
1464 			 * need to verify the checksum now that all
1465 			 * sectors on disk are repaired (the write
1466 			 * request for data to be repaired is on its way).
1467 			 * Just be lazy and use scrub_recheck_block()
1468 			 * which re-reads the data before the checksum
1469 			 * is verified, but most likely the data comes out
1470 			 * of the page cache.
1471 			 */
1472 			scrub_recheck_block(fs_info, sblock_bad, 1);
1473 			if (!sblock_bad->header_error &&
1474 			    !sblock_bad->checksum_error &&
1475 			    sblock_bad->no_io_error_seen)
1476 				goto corrected_error;
1477 			else
1478 				goto did_not_correct_error;
1479 		} else {
1480 corrected_error:
1481 			spin_lock(&sctx->stat_lock);
1482 			sctx->stat.corrected_errors++;
1483 			sblock_to_check->data_corrected = 1;
1484 			spin_unlock(&sctx->stat_lock);
1485 			btrfs_err_rl_in_rcu(fs_info,
1486 				"fixed up error at logical %llu on dev %s",
1487 				logical, rcu_str_deref(dev->name));
1488 		}
1489 	} else {
1490 did_not_correct_error:
1491 		spin_lock(&sctx->stat_lock);
1492 		sctx->stat.uncorrectable_errors++;
1493 		spin_unlock(&sctx->stat_lock);
1494 		btrfs_err_rl_in_rcu(fs_info,
1495 			"unable to fixup (regular) error at logical %llu on dev %s",
1496 			logical, rcu_str_deref(dev->name));
1497 	}
1498 
1499 out:
1500 	if (sblocks_for_recheck) {
1501 		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1502 		     mirror_index++) {
1503 			struct scrub_block *sblock = sblocks_for_recheck +
1504 						     mirror_index;
1505 			struct scrub_recover *recover;
1506 			int page_index;
1507 
1508 			for (page_index = 0; page_index < sblock->page_count;
1509 			     page_index++) {
1510 				sblock->pagev[page_index]->sblock = NULL;
1511 				recover = sblock->pagev[page_index]->recover;
1512 				if (recover) {
1513 					scrub_put_recover(fs_info, recover);
1514 					sblock->pagev[page_index]->recover =
1515 									NULL;
1516 				}
1517 				scrub_page_put(sblock->pagev[page_index]);
1518 			}
1519 		}
1520 		kfree(sblocks_for_recheck);
1521 	}
1522 
1523 	ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
1524 	if (ret < 0)
1525 		return ret;
1526 	return 0;
1527 }
1528 
scrub_nr_raid_mirrors(struct btrfs_bio * bbio)1529 static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1530 {
1531 	if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1532 		return 2;
1533 	else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1534 		return 3;
1535 	else
1536 		return (int)bbio->num_stripes;
1537 }
1538 
scrub_stripe_index_and_offset(u64 logical,u64 map_type,u64 * raid_map,u64 mapped_length,int nstripes,int mirror,int * stripe_index,u64 * stripe_offset)1539 static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1540 						 u64 *raid_map,
1541 						 u64 mapped_length,
1542 						 int nstripes, int mirror,
1543 						 int *stripe_index,
1544 						 u64 *stripe_offset)
1545 {
1546 	int i;
1547 
1548 	if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1549 		/* RAID5/6 */
1550 		for (i = 0; i < nstripes; i++) {
1551 			if (raid_map[i] == RAID6_Q_STRIPE ||
1552 			    raid_map[i] == RAID5_P_STRIPE)
1553 				continue;
1554 
1555 			if (logical >= raid_map[i] &&
1556 			    logical < raid_map[i] + mapped_length)
1557 				break;
1558 		}
1559 
1560 		*stripe_index = i;
1561 		*stripe_offset = logical - raid_map[i];
1562 	} else {
1563 		/* The other RAID type */
1564 		*stripe_index = mirror;
1565 		*stripe_offset = 0;
1566 	}
1567 }
1568 
scrub_setup_recheck_block(struct scrub_block * original_sblock,struct scrub_block * sblocks_for_recheck)1569 static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1570 				     struct scrub_block *sblocks_for_recheck)
1571 {
1572 	struct scrub_ctx *sctx = original_sblock->sctx;
1573 	struct btrfs_fs_info *fs_info = sctx->fs_info;
1574 	u64 length = original_sblock->page_count * PAGE_SIZE;
1575 	u64 logical = original_sblock->pagev[0]->logical;
1576 	u64 generation = original_sblock->pagev[0]->generation;
1577 	u64 flags = original_sblock->pagev[0]->flags;
1578 	u64 have_csum = original_sblock->pagev[0]->have_csum;
1579 	struct scrub_recover *recover;
1580 	struct btrfs_bio *bbio;
1581 	u64 sublen;
1582 	u64 mapped_length;
1583 	u64 stripe_offset;
1584 	int stripe_index;
1585 	int page_index = 0;
1586 	int mirror_index;
1587 	int nmirrors;
1588 	int ret;
1589 
1590 	/*
1591 	 * note: the two members refs and outstanding_pages
1592 	 * are not used (and not set) in the blocks that are used for
1593 	 * the recheck procedure
1594 	 */
1595 
1596 	while (length > 0) {
1597 		sublen = min_t(u64, length, PAGE_SIZE);
1598 		mapped_length = sublen;
1599 		bbio = NULL;
1600 
1601 		/*
1602 		 * with a length of PAGE_SIZE, each returned stripe
1603 		 * represents one mirror
1604 		 */
1605 		btrfs_bio_counter_inc_blocked(fs_info);
1606 		ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1607 				logical, &mapped_length, &bbio);
1608 		if (ret || !bbio || mapped_length < sublen) {
1609 			btrfs_put_bbio(bbio);
1610 			btrfs_bio_counter_dec(fs_info);
1611 			return -EIO;
1612 		}
1613 
1614 		recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1615 		if (!recover) {
1616 			btrfs_put_bbio(bbio);
1617 			btrfs_bio_counter_dec(fs_info);
1618 			return -ENOMEM;
1619 		}
1620 
1621 		refcount_set(&recover->refs, 1);
1622 		recover->bbio = bbio;
1623 		recover->map_length = mapped_length;
1624 
1625 		BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
1626 
1627 		nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
1628 
1629 		for (mirror_index = 0; mirror_index < nmirrors;
1630 		     mirror_index++) {
1631 			struct scrub_block *sblock;
1632 			struct scrub_page *page;
1633 
1634 			sblock = sblocks_for_recheck + mirror_index;
1635 			sblock->sctx = sctx;
1636 
1637 			page = kzalloc(sizeof(*page), GFP_NOFS);
1638 			if (!page) {
1639 leave_nomem:
1640 				spin_lock(&sctx->stat_lock);
1641 				sctx->stat.malloc_errors++;
1642 				spin_unlock(&sctx->stat_lock);
1643 				scrub_put_recover(fs_info, recover);
1644 				return -ENOMEM;
1645 			}
1646 			scrub_page_get(page);
1647 			sblock->pagev[page_index] = page;
1648 			page->sblock = sblock;
1649 			page->flags = flags;
1650 			page->generation = generation;
1651 			page->logical = logical;
1652 			page->have_csum = have_csum;
1653 			if (have_csum)
1654 				memcpy(page->csum,
1655 				       original_sblock->pagev[0]->csum,
1656 				       sctx->csum_size);
1657 
1658 			scrub_stripe_index_and_offset(logical,
1659 						      bbio->map_type,
1660 						      bbio->raid_map,
1661 						      mapped_length,
1662 						      bbio->num_stripes -
1663 						      bbio->num_tgtdevs,
1664 						      mirror_index,
1665 						      &stripe_index,
1666 						      &stripe_offset);
1667 			page->physical = bbio->stripes[stripe_index].physical +
1668 					 stripe_offset;
1669 			page->dev = bbio->stripes[stripe_index].dev;
1670 
1671 			BUG_ON(page_index >= original_sblock->page_count);
1672 			page->physical_for_dev_replace =
1673 				original_sblock->pagev[page_index]->
1674 				physical_for_dev_replace;
1675 			/* for missing devices, dev->bdev is NULL */
1676 			page->mirror_num = mirror_index + 1;
1677 			sblock->page_count++;
1678 			page->page = alloc_page(GFP_NOFS);
1679 			if (!page->page)
1680 				goto leave_nomem;
1681 
1682 			scrub_get_recover(recover);
1683 			page->recover = recover;
1684 		}
1685 		scrub_put_recover(fs_info, recover);
1686 		length -= sublen;
1687 		logical += sublen;
1688 		page_index++;
1689 	}
1690 
1691 	return 0;
1692 }
1693 
1694 struct scrub_bio_ret {
1695 	struct completion event;
1696 	blk_status_t status;
1697 };
1698 
scrub_bio_wait_endio(struct bio * bio)1699 static void scrub_bio_wait_endio(struct bio *bio)
1700 {
1701 	struct scrub_bio_ret *ret = bio->bi_private;
1702 
1703 	ret->status = bio->bi_status;
1704 	complete(&ret->event);
1705 }
1706 
scrub_submit_raid56_bio_wait(struct btrfs_fs_info * fs_info,struct bio * bio,struct scrub_page * page)1707 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1708 					struct bio *bio,
1709 					struct scrub_page *page)
1710 {
1711 	struct scrub_bio_ret done;
1712 	int ret;
1713 	int mirror_num;
1714 
1715 	init_completion(&done.event);
1716 	done.status = 0;
1717 	bio->bi_iter.bi_sector = page->logical >> 9;
1718 	bio->bi_private = &done;
1719 	bio->bi_end_io = scrub_bio_wait_endio;
1720 
1721 	mirror_num = page->sblock->pagev[0]->mirror_num;
1722 	ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
1723 				    page->recover->map_length,
1724 				    mirror_num, 0);
1725 	if (ret)
1726 		return ret;
1727 
1728 	wait_for_completion_io(&done.event);
1729 	if (done.status)
1730 		return -EIO;
1731 
1732 	return 0;
1733 }
1734 
1735 /*
1736  * this function will check the on disk data for checksum errors, header
1737  * errors and read I/O errors. If any I/O errors happen, the exact pages
1738  * which are errored are marked as being bad. The goal is to enable scrub
1739  * to take those pages that are not errored from all the mirrors so that
1740  * the pages that are errored in the just handled mirror can be repaired.
1741  */
scrub_recheck_block(struct btrfs_fs_info * fs_info,struct scrub_block * sblock,int retry_failed_mirror)1742 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1743 				struct scrub_block *sblock,
1744 				int retry_failed_mirror)
1745 {
1746 	int page_num;
1747 
1748 	sblock->no_io_error_seen = 1;
1749 
1750 	for (page_num = 0; page_num < sblock->page_count; page_num++) {
1751 		struct bio *bio;
1752 		struct scrub_page *page = sblock->pagev[page_num];
1753 
1754 		if (page->dev->bdev == NULL) {
1755 			page->io_error = 1;
1756 			sblock->no_io_error_seen = 0;
1757 			continue;
1758 		}
1759 
1760 		WARN_ON(!page->page);
1761 		bio = btrfs_io_bio_alloc(1);
1762 		bio_set_dev(bio, page->dev->bdev);
1763 
1764 		bio_add_page(bio, page->page, PAGE_SIZE, 0);
1765 		if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1766 			if (scrub_submit_raid56_bio_wait(fs_info, bio, page)) {
1767 				page->io_error = 1;
1768 				sblock->no_io_error_seen = 0;
1769 			}
1770 		} else {
1771 			bio->bi_iter.bi_sector = page->physical >> 9;
1772 			bio_set_op_attrs(bio, REQ_OP_READ, 0);
1773 
1774 			if (btrfsic_submit_bio_wait(bio)) {
1775 				page->io_error = 1;
1776 				sblock->no_io_error_seen = 0;
1777 			}
1778 		}
1779 
1780 		bio_put(bio);
1781 	}
1782 
1783 	if (sblock->no_io_error_seen)
1784 		scrub_recheck_block_checksum(sblock);
1785 }
1786 
scrub_check_fsid(u8 fsid[],struct scrub_page * spage)1787 static inline int scrub_check_fsid(u8 fsid[],
1788 				   struct scrub_page *spage)
1789 {
1790 	struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1791 	int ret;
1792 
1793 	ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1794 	return !ret;
1795 }
1796 
scrub_recheck_block_checksum(struct scrub_block * sblock)1797 static void scrub_recheck_block_checksum(struct scrub_block *sblock)
1798 {
1799 	sblock->header_error = 0;
1800 	sblock->checksum_error = 0;
1801 	sblock->generation_error = 0;
1802 
1803 	if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1804 		scrub_checksum_data(sblock);
1805 	else
1806 		scrub_checksum_tree_block(sblock);
1807 }
1808 
scrub_repair_block_from_good_copy(struct scrub_block * sblock_bad,struct scrub_block * sblock_good)1809 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1810 					     struct scrub_block *sblock_good)
1811 {
1812 	int page_num;
1813 	int ret = 0;
1814 
1815 	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1816 		int ret_sub;
1817 
1818 		ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1819 							   sblock_good,
1820 							   page_num, 1);
1821 		if (ret_sub)
1822 			ret = ret_sub;
1823 	}
1824 
1825 	return ret;
1826 }
1827 
scrub_repair_page_from_good_copy(struct scrub_block * sblock_bad,struct scrub_block * sblock_good,int page_num,int force_write)1828 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1829 					    struct scrub_block *sblock_good,
1830 					    int page_num, int force_write)
1831 {
1832 	struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1833 	struct scrub_page *page_good = sblock_good->pagev[page_num];
1834 	struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1835 
1836 	BUG_ON(page_bad->page == NULL);
1837 	BUG_ON(page_good->page == NULL);
1838 	if (force_write || sblock_bad->header_error ||
1839 	    sblock_bad->checksum_error || page_bad->io_error) {
1840 		struct bio *bio;
1841 		int ret;
1842 
1843 		if (!page_bad->dev->bdev) {
1844 			btrfs_warn_rl(fs_info,
1845 				"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1846 			return -EIO;
1847 		}
1848 
1849 		bio = btrfs_io_bio_alloc(1);
1850 		bio_set_dev(bio, page_bad->dev->bdev);
1851 		bio->bi_iter.bi_sector = page_bad->physical >> 9;
1852 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1853 
1854 		ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1855 		if (PAGE_SIZE != ret) {
1856 			bio_put(bio);
1857 			return -EIO;
1858 		}
1859 
1860 		if (btrfsic_submit_bio_wait(bio)) {
1861 			btrfs_dev_stat_inc_and_print(page_bad->dev,
1862 				BTRFS_DEV_STAT_WRITE_ERRS);
1863 			btrfs_dev_replace_stats_inc(
1864 				&fs_info->dev_replace.num_write_errors);
1865 			bio_put(bio);
1866 			return -EIO;
1867 		}
1868 		bio_put(bio);
1869 	}
1870 
1871 	return 0;
1872 }
1873 
scrub_write_block_to_dev_replace(struct scrub_block * sblock)1874 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1875 {
1876 	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1877 	int page_num;
1878 
1879 	/*
1880 	 * This block is used for the check of the parity on the source device,
1881 	 * so the data needn't be written into the destination device.
1882 	 */
1883 	if (sblock->sparity)
1884 		return;
1885 
1886 	for (page_num = 0; page_num < sblock->page_count; page_num++) {
1887 		int ret;
1888 
1889 		ret = scrub_write_page_to_dev_replace(sblock, page_num);
1890 		if (ret)
1891 			btrfs_dev_replace_stats_inc(
1892 				&fs_info->dev_replace.num_write_errors);
1893 	}
1894 }
1895 
scrub_write_page_to_dev_replace(struct scrub_block * sblock,int page_num)1896 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1897 					   int page_num)
1898 {
1899 	struct scrub_page *spage = sblock->pagev[page_num];
1900 
1901 	BUG_ON(spage->page == NULL);
1902 	if (spage->io_error) {
1903 		void *mapped_buffer = kmap_atomic(spage->page);
1904 
1905 		clear_page(mapped_buffer);
1906 		flush_dcache_page(spage->page);
1907 		kunmap_atomic(mapped_buffer);
1908 	}
1909 	return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1910 }
1911 
scrub_add_page_to_wr_bio(struct scrub_ctx * sctx,struct scrub_page * spage)1912 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1913 				    struct scrub_page *spage)
1914 {
1915 	struct scrub_bio *sbio;
1916 	int ret;
1917 
1918 	mutex_lock(&sctx->wr_lock);
1919 again:
1920 	if (!sctx->wr_curr_bio) {
1921 		sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
1922 					      GFP_KERNEL);
1923 		if (!sctx->wr_curr_bio) {
1924 			mutex_unlock(&sctx->wr_lock);
1925 			return -ENOMEM;
1926 		}
1927 		sctx->wr_curr_bio->sctx = sctx;
1928 		sctx->wr_curr_bio->page_count = 0;
1929 	}
1930 	sbio = sctx->wr_curr_bio;
1931 	if (sbio->page_count == 0) {
1932 		struct bio *bio;
1933 
1934 		sbio->physical = spage->physical_for_dev_replace;
1935 		sbio->logical = spage->logical;
1936 		sbio->dev = sctx->wr_tgtdev;
1937 		bio = sbio->bio;
1938 		if (!bio) {
1939 			bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio);
1940 			sbio->bio = bio;
1941 		}
1942 
1943 		bio->bi_private = sbio;
1944 		bio->bi_end_io = scrub_wr_bio_end_io;
1945 		bio_set_dev(bio, sbio->dev->bdev);
1946 		bio->bi_iter.bi_sector = sbio->physical >> 9;
1947 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1948 		sbio->status = 0;
1949 	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1950 		   spage->physical_for_dev_replace ||
1951 		   sbio->logical + sbio->page_count * PAGE_SIZE !=
1952 		   spage->logical) {
1953 		scrub_wr_submit(sctx);
1954 		goto again;
1955 	}
1956 
1957 	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1958 	if (ret != PAGE_SIZE) {
1959 		if (sbio->page_count < 1) {
1960 			bio_put(sbio->bio);
1961 			sbio->bio = NULL;
1962 			mutex_unlock(&sctx->wr_lock);
1963 			return -EIO;
1964 		}
1965 		scrub_wr_submit(sctx);
1966 		goto again;
1967 	}
1968 
1969 	sbio->pagev[sbio->page_count] = spage;
1970 	scrub_page_get(spage);
1971 	sbio->page_count++;
1972 	if (sbio->page_count == sctx->pages_per_wr_bio)
1973 		scrub_wr_submit(sctx);
1974 	mutex_unlock(&sctx->wr_lock);
1975 
1976 	return 0;
1977 }
1978 
scrub_wr_submit(struct scrub_ctx * sctx)1979 static void scrub_wr_submit(struct scrub_ctx *sctx)
1980 {
1981 	struct scrub_bio *sbio;
1982 
1983 	if (!sctx->wr_curr_bio)
1984 		return;
1985 
1986 	sbio = sctx->wr_curr_bio;
1987 	sctx->wr_curr_bio = NULL;
1988 	WARN_ON(!sbio->bio->bi_disk);
1989 	scrub_pending_bio_inc(sctx);
1990 	/* process all writes in a single worker thread. Then the block layer
1991 	 * orders the requests before sending them to the driver which
1992 	 * doubled the write performance on spinning disks when measured
1993 	 * with Linux 3.5 */
1994 	btrfsic_submit_bio(sbio->bio);
1995 }
1996 
scrub_wr_bio_end_io(struct bio * bio)1997 static void scrub_wr_bio_end_io(struct bio *bio)
1998 {
1999 	struct scrub_bio *sbio = bio->bi_private;
2000 	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2001 
2002 	sbio->status = bio->bi_status;
2003 	sbio->bio = bio;
2004 
2005 	btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
2006 			 scrub_wr_bio_end_io_worker, NULL, NULL);
2007 	btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
2008 }
2009 
scrub_wr_bio_end_io_worker(struct btrfs_work * work)2010 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
2011 {
2012 	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2013 	struct scrub_ctx *sctx = sbio->sctx;
2014 	int i;
2015 
2016 	WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
2017 	if (sbio->status) {
2018 		struct btrfs_dev_replace *dev_replace =
2019 			&sbio->sctx->fs_info->dev_replace;
2020 
2021 		for (i = 0; i < sbio->page_count; i++) {
2022 			struct scrub_page *spage = sbio->pagev[i];
2023 
2024 			spage->io_error = 1;
2025 			btrfs_dev_replace_stats_inc(&dev_replace->
2026 						    num_write_errors);
2027 		}
2028 	}
2029 
2030 	for (i = 0; i < sbio->page_count; i++)
2031 		scrub_page_put(sbio->pagev[i]);
2032 
2033 	bio_put(sbio->bio);
2034 	kfree(sbio);
2035 	scrub_pending_bio_dec(sctx);
2036 }
2037 
scrub_checksum(struct scrub_block * sblock)2038 static int scrub_checksum(struct scrub_block *sblock)
2039 {
2040 	u64 flags;
2041 	int ret;
2042 
2043 	/*
2044 	 * No need to initialize these stats currently,
2045 	 * because this function only use return value
2046 	 * instead of these stats value.
2047 	 *
2048 	 * Todo:
2049 	 * always use stats
2050 	 */
2051 	sblock->header_error = 0;
2052 	sblock->generation_error = 0;
2053 	sblock->checksum_error = 0;
2054 
2055 	WARN_ON(sblock->page_count < 1);
2056 	flags = sblock->pagev[0]->flags;
2057 	ret = 0;
2058 	if (flags & BTRFS_EXTENT_FLAG_DATA)
2059 		ret = scrub_checksum_data(sblock);
2060 	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2061 		ret = scrub_checksum_tree_block(sblock);
2062 	else if (flags & BTRFS_EXTENT_FLAG_SUPER)
2063 		(void)scrub_checksum_super(sblock);
2064 	else
2065 		WARN_ON(1);
2066 	if (ret)
2067 		scrub_handle_errored_block(sblock);
2068 
2069 	return ret;
2070 }
2071 
scrub_checksum_data(struct scrub_block * sblock)2072 static int scrub_checksum_data(struct scrub_block *sblock)
2073 {
2074 	struct scrub_ctx *sctx = sblock->sctx;
2075 	u8 csum[BTRFS_CSUM_SIZE];
2076 	u8 *on_disk_csum;
2077 	struct page *page;
2078 	void *buffer;
2079 	u32 crc = ~(u32)0;
2080 	u64 len;
2081 	int index;
2082 
2083 	BUG_ON(sblock->page_count < 1);
2084 	if (!sblock->pagev[0]->have_csum)
2085 		return 0;
2086 
2087 	on_disk_csum = sblock->pagev[0]->csum;
2088 	page = sblock->pagev[0]->page;
2089 	buffer = kmap_atomic(page);
2090 
2091 	len = sctx->fs_info->sectorsize;
2092 	index = 0;
2093 	for (;;) {
2094 		u64 l = min_t(u64, len, PAGE_SIZE);
2095 
2096 		crc = btrfs_csum_data(buffer, crc, l);
2097 		kunmap_atomic(buffer);
2098 		len -= l;
2099 		if (len == 0)
2100 			break;
2101 		index++;
2102 		BUG_ON(index >= sblock->page_count);
2103 		BUG_ON(!sblock->pagev[index]->page);
2104 		page = sblock->pagev[index]->page;
2105 		buffer = kmap_atomic(page);
2106 	}
2107 
2108 	btrfs_csum_final(crc, csum);
2109 	if (memcmp(csum, on_disk_csum, sctx->csum_size))
2110 		sblock->checksum_error = 1;
2111 
2112 	return sblock->checksum_error;
2113 }
2114 
scrub_checksum_tree_block(struct scrub_block * sblock)2115 static int scrub_checksum_tree_block(struct scrub_block *sblock)
2116 {
2117 	struct scrub_ctx *sctx = sblock->sctx;
2118 	struct btrfs_header *h;
2119 	struct btrfs_fs_info *fs_info = sctx->fs_info;
2120 	u8 calculated_csum[BTRFS_CSUM_SIZE];
2121 	u8 on_disk_csum[BTRFS_CSUM_SIZE];
2122 	struct page *page;
2123 	void *mapped_buffer;
2124 	u64 mapped_size;
2125 	void *p;
2126 	u32 crc = ~(u32)0;
2127 	u64 len;
2128 	int index;
2129 
2130 	BUG_ON(sblock->page_count < 1);
2131 	page = sblock->pagev[0]->page;
2132 	mapped_buffer = kmap_atomic(page);
2133 	h = (struct btrfs_header *)mapped_buffer;
2134 	memcpy(on_disk_csum, h->csum, sctx->csum_size);
2135 
2136 	/*
2137 	 * we don't use the getter functions here, as we
2138 	 * a) don't have an extent buffer and
2139 	 * b) the page is already kmapped
2140 	 */
2141 	if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
2142 		sblock->header_error = 1;
2143 
2144 	if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
2145 		sblock->header_error = 1;
2146 		sblock->generation_error = 1;
2147 	}
2148 
2149 	if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
2150 		sblock->header_error = 1;
2151 
2152 	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
2153 		   BTRFS_UUID_SIZE))
2154 		sblock->header_error = 1;
2155 
2156 	len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE;
2157 	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2158 	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2159 	index = 0;
2160 	for (;;) {
2161 		u64 l = min_t(u64, len, mapped_size);
2162 
2163 		crc = btrfs_csum_data(p, crc, l);
2164 		kunmap_atomic(mapped_buffer);
2165 		len -= l;
2166 		if (len == 0)
2167 			break;
2168 		index++;
2169 		BUG_ON(index >= sblock->page_count);
2170 		BUG_ON(!sblock->pagev[index]->page);
2171 		page = sblock->pagev[index]->page;
2172 		mapped_buffer = kmap_atomic(page);
2173 		mapped_size = PAGE_SIZE;
2174 		p = mapped_buffer;
2175 	}
2176 
2177 	btrfs_csum_final(crc, calculated_csum);
2178 	if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
2179 		sblock->checksum_error = 1;
2180 
2181 	return sblock->header_error || sblock->checksum_error;
2182 }
2183 
scrub_checksum_super(struct scrub_block * sblock)2184 static int scrub_checksum_super(struct scrub_block *sblock)
2185 {
2186 	struct btrfs_super_block *s;
2187 	struct scrub_ctx *sctx = sblock->sctx;
2188 	u8 calculated_csum[BTRFS_CSUM_SIZE];
2189 	u8 on_disk_csum[BTRFS_CSUM_SIZE];
2190 	struct page *page;
2191 	void *mapped_buffer;
2192 	u64 mapped_size;
2193 	void *p;
2194 	u32 crc = ~(u32)0;
2195 	int fail_gen = 0;
2196 	int fail_cor = 0;
2197 	u64 len;
2198 	int index;
2199 
2200 	BUG_ON(sblock->page_count < 1);
2201 	page = sblock->pagev[0]->page;
2202 	mapped_buffer = kmap_atomic(page);
2203 	s = (struct btrfs_super_block *)mapped_buffer;
2204 	memcpy(on_disk_csum, s->csum, sctx->csum_size);
2205 
2206 	if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
2207 		++fail_cor;
2208 
2209 	if (sblock->pagev[0]->generation != btrfs_super_generation(s))
2210 		++fail_gen;
2211 
2212 	if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
2213 		++fail_cor;
2214 
2215 	len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
2216 	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2217 	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2218 	index = 0;
2219 	for (;;) {
2220 		u64 l = min_t(u64, len, mapped_size);
2221 
2222 		crc = btrfs_csum_data(p, crc, l);
2223 		kunmap_atomic(mapped_buffer);
2224 		len -= l;
2225 		if (len == 0)
2226 			break;
2227 		index++;
2228 		BUG_ON(index >= sblock->page_count);
2229 		BUG_ON(!sblock->pagev[index]->page);
2230 		page = sblock->pagev[index]->page;
2231 		mapped_buffer = kmap_atomic(page);
2232 		mapped_size = PAGE_SIZE;
2233 		p = mapped_buffer;
2234 	}
2235 
2236 	btrfs_csum_final(crc, calculated_csum);
2237 	if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
2238 		++fail_cor;
2239 
2240 	if (fail_cor + fail_gen) {
2241 		/*
2242 		 * if we find an error in a super block, we just report it.
2243 		 * They will get written with the next transaction commit
2244 		 * anyway
2245 		 */
2246 		spin_lock(&sctx->stat_lock);
2247 		++sctx->stat.super_errors;
2248 		spin_unlock(&sctx->stat_lock);
2249 		if (fail_cor)
2250 			btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2251 				BTRFS_DEV_STAT_CORRUPTION_ERRS);
2252 		else
2253 			btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2254 				BTRFS_DEV_STAT_GENERATION_ERRS);
2255 	}
2256 
2257 	return fail_cor + fail_gen;
2258 }
2259 
scrub_block_get(struct scrub_block * sblock)2260 static void scrub_block_get(struct scrub_block *sblock)
2261 {
2262 	refcount_inc(&sblock->refs);
2263 }
2264 
scrub_block_put(struct scrub_block * sblock)2265 static void scrub_block_put(struct scrub_block *sblock)
2266 {
2267 	if (refcount_dec_and_test(&sblock->refs)) {
2268 		int i;
2269 
2270 		if (sblock->sparity)
2271 			scrub_parity_put(sblock->sparity);
2272 
2273 		for (i = 0; i < sblock->page_count; i++)
2274 			scrub_page_put(sblock->pagev[i]);
2275 		kfree(sblock);
2276 	}
2277 }
2278 
scrub_page_get(struct scrub_page * spage)2279 static void scrub_page_get(struct scrub_page *spage)
2280 {
2281 	atomic_inc(&spage->refs);
2282 }
2283 
scrub_page_put(struct scrub_page * spage)2284 static void scrub_page_put(struct scrub_page *spage)
2285 {
2286 	if (atomic_dec_and_test(&spage->refs)) {
2287 		if (spage->page)
2288 			__free_page(spage->page);
2289 		kfree(spage);
2290 	}
2291 }
2292 
scrub_submit(struct scrub_ctx * sctx)2293 static void scrub_submit(struct scrub_ctx *sctx)
2294 {
2295 	struct scrub_bio *sbio;
2296 
2297 	if (sctx->curr == -1)
2298 		return;
2299 
2300 	sbio = sctx->bios[sctx->curr];
2301 	sctx->curr = -1;
2302 	scrub_pending_bio_inc(sctx);
2303 	btrfsic_submit_bio(sbio->bio);
2304 }
2305 
scrub_add_page_to_rd_bio(struct scrub_ctx * sctx,struct scrub_page * spage)2306 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2307 				    struct scrub_page *spage)
2308 {
2309 	struct scrub_block *sblock = spage->sblock;
2310 	struct scrub_bio *sbio;
2311 	int ret;
2312 
2313 again:
2314 	/*
2315 	 * grab a fresh bio or wait for one to become available
2316 	 */
2317 	while (sctx->curr == -1) {
2318 		spin_lock(&sctx->list_lock);
2319 		sctx->curr = sctx->first_free;
2320 		if (sctx->curr != -1) {
2321 			sctx->first_free = sctx->bios[sctx->curr]->next_free;
2322 			sctx->bios[sctx->curr]->next_free = -1;
2323 			sctx->bios[sctx->curr]->page_count = 0;
2324 			spin_unlock(&sctx->list_lock);
2325 		} else {
2326 			spin_unlock(&sctx->list_lock);
2327 			wait_event(sctx->list_wait, sctx->first_free != -1);
2328 		}
2329 	}
2330 	sbio = sctx->bios[sctx->curr];
2331 	if (sbio->page_count == 0) {
2332 		struct bio *bio;
2333 
2334 		sbio->physical = spage->physical;
2335 		sbio->logical = spage->logical;
2336 		sbio->dev = spage->dev;
2337 		bio = sbio->bio;
2338 		if (!bio) {
2339 			bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio);
2340 			sbio->bio = bio;
2341 		}
2342 
2343 		bio->bi_private = sbio;
2344 		bio->bi_end_io = scrub_bio_end_io;
2345 		bio_set_dev(bio, sbio->dev->bdev);
2346 		bio->bi_iter.bi_sector = sbio->physical >> 9;
2347 		bio_set_op_attrs(bio, REQ_OP_READ, 0);
2348 		sbio->status = 0;
2349 	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2350 		   spage->physical ||
2351 		   sbio->logical + sbio->page_count * PAGE_SIZE !=
2352 		   spage->logical ||
2353 		   sbio->dev != spage->dev) {
2354 		scrub_submit(sctx);
2355 		goto again;
2356 	}
2357 
2358 	sbio->pagev[sbio->page_count] = spage;
2359 	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2360 	if (ret != PAGE_SIZE) {
2361 		if (sbio->page_count < 1) {
2362 			bio_put(sbio->bio);
2363 			sbio->bio = NULL;
2364 			return -EIO;
2365 		}
2366 		scrub_submit(sctx);
2367 		goto again;
2368 	}
2369 
2370 	scrub_block_get(sblock); /* one for the page added to the bio */
2371 	atomic_inc(&sblock->outstanding_pages);
2372 	sbio->page_count++;
2373 	if (sbio->page_count == sctx->pages_per_rd_bio)
2374 		scrub_submit(sctx);
2375 
2376 	return 0;
2377 }
2378 
scrub_missing_raid56_end_io(struct bio * bio)2379 static void scrub_missing_raid56_end_io(struct bio *bio)
2380 {
2381 	struct scrub_block *sblock = bio->bi_private;
2382 	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2383 
2384 	if (bio->bi_status)
2385 		sblock->no_io_error_seen = 0;
2386 
2387 	bio_put(bio);
2388 
2389 	btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2390 }
2391 
scrub_missing_raid56_worker(struct btrfs_work * work)2392 static void scrub_missing_raid56_worker(struct btrfs_work *work)
2393 {
2394 	struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2395 	struct scrub_ctx *sctx = sblock->sctx;
2396 	struct btrfs_fs_info *fs_info = sctx->fs_info;
2397 	u64 logical;
2398 	struct btrfs_device *dev;
2399 
2400 	logical = sblock->pagev[0]->logical;
2401 	dev = sblock->pagev[0]->dev;
2402 
2403 	if (sblock->no_io_error_seen)
2404 		scrub_recheck_block_checksum(sblock);
2405 
2406 	if (!sblock->no_io_error_seen) {
2407 		spin_lock(&sctx->stat_lock);
2408 		sctx->stat.read_errors++;
2409 		spin_unlock(&sctx->stat_lock);
2410 		btrfs_err_rl_in_rcu(fs_info,
2411 			"IO error rebuilding logical %llu for dev %s",
2412 			logical, rcu_str_deref(dev->name));
2413 	} else if (sblock->header_error || sblock->checksum_error) {
2414 		spin_lock(&sctx->stat_lock);
2415 		sctx->stat.uncorrectable_errors++;
2416 		spin_unlock(&sctx->stat_lock);
2417 		btrfs_err_rl_in_rcu(fs_info,
2418 			"failed to rebuild valid logical %llu for dev %s",
2419 			logical, rcu_str_deref(dev->name));
2420 	} else {
2421 		scrub_write_block_to_dev_replace(sblock);
2422 	}
2423 
2424 	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2425 		mutex_lock(&sctx->wr_lock);
2426 		scrub_wr_submit(sctx);
2427 		mutex_unlock(&sctx->wr_lock);
2428 	}
2429 
2430 	scrub_block_put(sblock);
2431 	scrub_pending_bio_dec(sctx);
2432 }
2433 
scrub_missing_raid56_pages(struct scrub_block * sblock)2434 static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2435 {
2436 	struct scrub_ctx *sctx = sblock->sctx;
2437 	struct btrfs_fs_info *fs_info = sctx->fs_info;
2438 	u64 length = sblock->page_count * PAGE_SIZE;
2439 	u64 logical = sblock->pagev[0]->logical;
2440 	struct btrfs_bio *bbio = NULL;
2441 	struct bio *bio;
2442 	struct btrfs_raid_bio *rbio;
2443 	int ret;
2444 	int i;
2445 
2446 	btrfs_bio_counter_inc_blocked(fs_info);
2447 	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2448 			&length, &bbio);
2449 	if (ret || !bbio || !bbio->raid_map)
2450 		goto bbio_out;
2451 
2452 	if (WARN_ON(!sctx->is_dev_replace ||
2453 		    !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2454 		/*
2455 		 * We shouldn't be scrubbing a missing device. Even for dev
2456 		 * replace, we should only get here for RAID 5/6. We either
2457 		 * managed to mount something with no mirrors remaining or
2458 		 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2459 		 */
2460 		goto bbio_out;
2461 	}
2462 
2463 	bio = btrfs_io_bio_alloc(0);
2464 	bio->bi_iter.bi_sector = logical >> 9;
2465 	bio->bi_private = sblock;
2466 	bio->bi_end_io = scrub_missing_raid56_end_io;
2467 
2468 	rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
2469 	if (!rbio)
2470 		goto rbio_out;
2471 
2472 	for (i = 0; i < sblock->page_count; i++) {
2473 		struct scrub_page *spage = sblock->pagev[i];
2474 
2475 		raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2476 	}
2477 
2478 	btrfs_init_work(&sblock->work, btrfs_scrub_helper,
2479 			scrub_missing_raid56_worker, NULL, NULL);
2480 	scrub_block_get(sblock);
2481 	scrub_pending_bio_inc(sctx);
2482 	raid56_submit_missing_rbio(rbio);
2483 	return;
2484 
2485 rbio_out:
2486 	bio_put(bio);
2487 bbio_out:
2488 	btrfs_bio_counter_dec(fs_info);
2489 	btrfs_put_bbio(bbio);
2490 	spin_lock(&sctx->stat_lock);
2491 	sctx->stat.malloc_errors++;
2492 	spin_unlock(&sctx->stat_lock);
2493 }
2494 
scrub_pages(struct scrub_ctx * sctx,u64 logical,u64 len,u64 physical,struct btrfs_device * dev,u64 flags,u64 gen,int mirror_num,u8 * csum,int force,u64 physical_for_dev_replace)2495 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2496 		       u64 physical, struct btrfs_device *dev, u64 flags,
2497 		       u64 gen, int mirror_num, u8 *csum, int force,
2498 		       u64 physical_for_dev_replace)
2499 {
2500 	struct scrub_block *sblock;
2501 	int index;
2502 
2503 	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2504 	if (!sblock) {
2505 		spin_lock(&sctx->stat_lock);
2506 		sctx->stat.malloc_errors++;
2507 		spin_unlock(&sctx->stat_lock);
2508 		return -ENOMEM;
2509 	}
2510 
2511 	/* one ref inside this function, plus one for each page added to
2512 	 * a bio later on */
2513 	refcount_set(&sblock->refs, 1);
2514 	sblock->sctx = sctx;
2515 	sblock->no_io_error_seen = 1;
2516 
2517 	for (index = 0; len > 0; index++) {
2518 		struct scrub_page *spage;
2519 		u64 l = min_t(u64, len, PAGE_SIZE);
2520 
2521 		spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2522 		if (!spage) {
2523 leave_nomem:
2524 			spin_lock(&sctx->stat_lock);
2525 			sctx->stat.malloc_errors++;
2526 			spin_unlock(&sctx->stat_lock);
2527 			scrub_block_put(sblock);
2528 			return -ENOMEM;
2529 		}
2530 		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2531 		scrub_page_get(spage);
2532 		sblock->pagev[index] = spage;
2533 		spage->sblock = sblock;
2534 		spage->dev = dev;
2535 		spage->flags = flags;
2536 		spage->generation = gen;
2537 		spage->logical = logical;
2538 		spage->physical = physical;
2539 		spage->physical_for_dev_replace = physical_for_dev_replace;
2540 		spage->mirror_num = mirror_num;
2541 		if (csum) {
2542 			spage->have_csum = 1;
2543 			memcpy(spage->csum, csum, sctx->csum_size);
2544 		} else {
2545 			spage->have_csum = 0;
2546 		}
2547 		sblock->page_count++;
2548 		spage->page = alloc_page(GFP_KERNEL);
2549 		if (!spage->page)
2550 			goto leave_nomem;
2551 		len -= l;
2552 		logical += l;
2553 		physical += l;
2554 		physical_for_dev_replace += l;
2555 	}
2556 
2557 	WARN_ON(sblock->page_count == 0);
2558 	if (dev->missing) {
2559 		/*
2560 		 * This case should only be hit for RAID 5/6 device replace. See
2561 		 * the comment in scrub_missing_raid56_pages() for details.
2562 		 */
2563 		scrub_missing_raid56_pages(sblock);
2564 	} else {
2565 		for (index = 0; index < sblock->page_count; index++) {
2566 			struct scrub_page *spage = sblock->pagev[index];
2567 			int ret;
2568 
2569 			ret = scrub_add_page_to_rd_bio(sctx, spage);
2570 			if (ret) {
2571 				scrub_block_put(sblock);
2572 				return ret;
2573 			}
2574 		}
2575 
2576 		if (force)
2577 			scrub_submit(sctx);
2578 	}
2579 
2580 	/* last one frees, either here or in bio completion for last page */
2581 	scrub_block_put(sblock);
2582 	return 0;
2583 }
2584 
scrub_bio_end_io(struct bio * bio)2585 static void scrub_bio_end_io(struct bio *bio)
2586 {
2587 	struct scrub_bio *sbio = bio->bi_private;
2588 	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2589 
2590 	sbio->status = bio->bi_status;
2591 	sbio->bio = bio;
2592 
2593 	btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2594 }
2595 
scrub_bio_end_io_worker(struct btrfs_work * work)2596 static void scrub_bio_end_io_worker(struct btrfs_work *work)
2597 {
2598 	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2599 	struct scrub_ctx *sctx = sbio->sctx;
2600 	int i;
2601 
2602 	BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2603 	if (sbio->status) {
2604 		for (i = 0; i < sbio->page_count; i++) {
2605 			struct scrub_page *spage = sbio->pagev[i];
2606 
2607 			spage->io_error = 1;
2608 			spage->sblock->no_io_error_seen = 0;
2609 		}
2610 	}
2611 
2612 	/* now complete the scrub_block items that have all pages completed */
2613 	for (i = 0; i < sbio->page_count; i++) {
2614 		struct scrub_page *spage = sbio->pagev[i];
2615 		struct scrub_block *sblock = spage->sblock;
2616 
2617 		if (atomic_dec_and_test(&sblock->outstanding_pages))
2618 			scrub_block_complete(sblock);
2619 		scrub_block_put(sblock);
2620 	}
2621 
2622 	bio_put(sbio->bio);
2623 	sbio->bio = NULL;
2624 	spin_lock(&sctx->list_lock);
2625 	sbio->next_free = sctx->first_free;
2626 	sctx->first_free = sbio->index;
2627 	spin_unlock(&sctx->list_lock);
2628 
2629 	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2630 		mutex_lock(&sctx->wr_lock);
2631 		scrub_wr_submit(sctx);
2632 		mutex_unlock(&sctx->wr_lock);
2633 	}
2634 
2635 	scrub_pending_bio_dec(sctx);
2636 }
2637 
__scrub_mark_bitmap(struct scrub_parity * sparity,unsigned long * bitmap,u64 start,u64 len)2638 static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2639 				       unsigned long *bitmap,
2640 				       u64 start, u64 len)
2641 {
2642 	u64 offset;
2643 	u64 nsectors64;
2644 	u32 nsectors;
2645 	int sectorsize = sparity->sctx->fs_info->sectorsize;
2646 
2647 	if (len >= sparity->stripe_len) {
2648 		bitmap_set(bitmap, 0, sparity->nsectors);
2649 		return;
2650 	}
2651 
2652 	start -= sparity->logic_start;
2653 	start = div64_u64_rem(start, sparity->stripe_len, &offset);
2654 	offset = div_u64(offset, sectorsize);
2655 	nsectors64 = div_u64(len, sectorsize);
2656 
2657 	ASSERT(nsectors64 < UINT_MAX);
2658 	nsectors = (u32)nsectors64;
2659 
2660 	if (offset + nsectors <= sparity->nsectors) {
2661 		bitmap_set(bitmap, offset, nsectors);
2662 		return;
2663 	}
2664 
2665 	bitmap_set(bitmap, offset, sparity->nsectors - offset);
2666 	bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2667 }
2668 
scrub_parity_mark_sectors_error(struct scrub_parity * sparity,u64 start,u64 len)2669 static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2670 						   u64 start, u64 len)
2671 {
2672 	__scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2673 }
2674 
scrub_parity_mark_sectors_data(struct scrub_parity * sparity,u64 start,u64 len)2675 static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2676 						  u64 start, u64 len)
2677 {
2678 	__scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2679 }
2680 
scrub_block_complete(struct scrub_block * sblock)2681 static void scrub_block_complete(struct scrub_block *sblock)
2682 {
2683 	int corrupted = 0;
2684 
2685 	if (!sblock->no_io_error_seen) {
2686 		corrupted = 1;
2687 		scrub_handle_errored_block(sblock);
2688 	} else {
2689 		/*
2690 		 * if has checksum error, write via repair mechanism in
2691 		 * dev replace case, otherwise write here in dev replace
2692 		 * case.
2693 		 */
2694 		corrupted = scrub_checksum(sblock);
2695 		if (!corrupted && sblock->sctx->is_dev_replace)
2696 			scrub_write_block_to_dev_replace(sblock);
2697 	}
2698 
2699 	if (sblock->sparity && corrupted && !sblock->data_corrected) {
2700 		u64 start = sblock->pagev[0]->logical;
2701 		u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2702 			  PAGE_SIZE;
2703 
2704 		scrub_parity_mark_sectors_error(sblock->sparity,
2705 						start, end - start);
2706 	}
2707 }
2708 
scrub_find_csum(struct scrub_ctx * sctx,u64 logical,u8 * csum)2709 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
2710 {
2711 	struct btrfs_ordered_sum *sum = NULL;
2712 	unsigned long index;
2713 	unsigned long num_sectors;
2714 
2715 	while (!list_empty(&sctx->csum_list)) {
2716 		sum = list_first_entry(&sctx->csum_list,
2717 				       struct btrfs_ordered_sum, list);
2718 		if (sum->bytenr > logical)
2719 			return 0;
2720 		if (sum->bytenr + sum->len > logical)
2721 			break;
2722 
2723 		++sctx->stat.csum_discards;
2724 		list_del(&sum->list);
2725 		kfree(sum);
2726 		sum = NULL;
2727 	}
2728 	if (!sum)
2729 		return 0;
2730 
2731 	index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
2732 	ASSERT(index < UINT_MAX);
2733 
2734 	num_sectors = sum->len / sctx->fs_info->sectorsize;
2735 	memcpy(csum, sum->sums + index, sctx->csum_size);
2736 	if (index == num_sectors - 1) {
2737 		list_del(&sum->list);
2738 		kfree(sum);
2739 	}
2740 	return 1;
2741 }
2742 
2743 /* scrub extent tries to collect up to 64 kB for each bio */
scrub_extent(struct scrub_ctx * sctx,u64 logical,u64 len,u64 physical,struct btrfs_device * dev,u64 flags,u64 gen,int mirror_num,u64 physical_for_dev_replace)2744 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2745 			u64 physical, struct btrfs_device *dev, u64 flags,
2746 			u64 gen, int mirror_num, u64 physical_for_dev_replace)
2747 {
2748 	int ret;
2749 	u8 csum[BTRFS_CSUM_SIZE];
2750 	u32 blocksize;
2751 
2752 	if (flags & BTRFS_EXTENT_FLAG_DATA) {
2753 		blocksize = sctx->fs_info->sectorsize;
2754 		spin_lock(&sctx->stat_lock);
2755 		sctx->stat.data_extents_scrubbed++;
2756 		sctx->stat.data_bytes_scrubbed += len;
2757 		spin_unlock(&sctx->stat_lock);
2758 	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2759 		blocksize = sctx->fs_info->nodesize;
2760 		spin_lock(&sctx->stat_lock);
2761 		sctx->stat.tree_extents_scrubbed++;
2762 		sctx->stat.tree_bytes_scrubbed += len;
2763 		spin_unlock(&sctx->stat_lock);
2764 	} else {
2765 		blocksize = sctx->fs_info->sectorsize;
2766 		WARN_ON(1);
2767 	}
2768 
2769 	while (len) {
2770 		u64 l = min_t(u64, len, blocksize);
2771 		int have_csum = 0;
2772 
2773 		if (flags & BTRFS_EXTENT_FLAG_DATA) {
2774 			/* push csums to sbio */
2775 			have_csum = scrub_find_csum(sctx, logical, csum);
2776 			if (have_csum == 0)
2777 				++sctx->stat.no_csum;
2778 			if (0 && sctx->is_dev_replace && !have_csum) {
2779 				ret = copy_nocow_pages(sctx, logical, l,
2780 						       mirror_num,
2781 						      physical_for_dev_replace);
2782 				goto behind_scrub_pages;
2783 			}
2784 		}
2785 		ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2786 				  mirror_num, have_csum ? csum : NULL, 0,
2787 				  physical_for_dev_replace);
2788 behind_scrub_pages:
2789 		if (ret)
2790 			return ret;
2791 		len -= l;
2792 		logical += l;
2793 		physical += l;
2794 		physical_for_dev_replace += l;
2795 	}
2796 	return 0;
2797 }
2798 
scrub_pages_for_parity(struct scrub_parity * sparity,u64 logical,u64 len,u64 physical,struct btrfs_device * dev,u64 flags,u64 gen,int mirror_num,u8 * csum)2799 static int scrub_pages_for_parity(struct scrub_parity *sparity,
2800 				  u64 logical, u64 len,
2801 				  u64 physical, struct btrfs_device *dev,
2802 				  u64 flags, u64 gen, int mirror_num, u8 *csum)
2803 {
2804 	struct scrub_ctx *sctx = sparity->sctx;
2805 	struct scrub_block *sblock;
2806 	int index;
2807 
2808 	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2809 	if (!sblock) {
2810 		spin_lock(&sctx->stat_lock);
2811 		sctx->stat.malloc_errors++;
2812 		spin_unlock(&sctx->stat_lock);
2813 		return -ENOMEM;
2814 	}
2815 
2816 	/* one ref inside this function, plus one for each page added to
2817 	 * a bio later on */
2818 	refcount_set(&sblock->refs, 1);
2819 	sblock->sctx = sctx;
2820 	sblock->no_io_error_seen = 1;
2821 	sblock->sparity = sparity;
2822 	scrub_parity_get(sparity);
2823 
2824 	for (index = 0; len > 0; index++) {
2825 		struct scrub_page *spage;
2826 		u64 l = min_t(u64, len, PAGE_SIZE);
2827 
2828 		spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2829 		if (!spage) {
2830 leave_nomem:
2831 			spin_lock(&sctx->stat_lock);
2832 			sctx->stat.malloc_errors++;
2833 			spin_unlock(&sctx->stat_lock);
2834 			scrub_block_put(sblock);
2835 			return -ENOMEM;
2836 		}
2837 		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2838 		/* For scrub block */
2839 		scrub_page_get(spage);
2840 		sblock->pagev[index] = spage;
2841 		/* For scrub parity */
2842 		scrub_page_get(spage);
2843 		list_add_tail(&spage->list, &sparity->spages);
2844 		spage->sblock = sblock;
2845 		spage->dev = dev;
2846 		spage->flags = flags;
2847 		spage->generation = gen;
2848 		spage->logical = logical;
2849 		spage->physical = physical;
2850 		spage->mirror_num = mirror_num;
2851 		if (csum) {
2852 			spage->have_csum = 1;
2853 			memcpy(spage->csum, csum, sctx->csum_size);
2854 		} else {
2855 			spage->have_csum = 0;
2856 		}
2857 		sblock->page_count++;
2858 		spage->page = alloc_page(GFP_KERNEL);
2859 		if (!spage->page)
2860 			goto leave_nomem;
2861 		len -= l;
2862 		logical += l;
2863 		physical += l;
2864 	}
2865 
2866 	WARN_ON(sblock->page_count == 0);
2867 	for (index = 0; index < sblock->page_count; index++) {
2868 		struct scrub_page *spage = sblock->pagev[index];
2869 		int ret;
2870 
2871 		ret = scrub_add_page_to_rd_bio(sctx, spage);
2872 		if (ret) {
2873 			scrub_block_put(sblock);
2874 			return ret;
2875 		}
2876 	}
2877 
2878 	/* last one frees, either here or in bio completion for last page */
2879 	scrub_block_put(sblock);
2880 	return 0;
2881 }
2882 
scrub_extent_for_parity(struct scrub_parity * sparity,u64 logical,u64 len,u64 physical,struct btrfs_device * dev,u64 flags,u64 gen,int mirror_num)2883 static int scrub_extent_for_parity(struct scrub_parity *sparity,
2884 				   u64 logical, u64 len,
2885 				   u64 physical, struct btrfs_device *dev,
2886 				   u64 flags, u64 gen, int mirror_num)
2887 {
2888 	struct scrub_ctx *sctx = sparity->sctx;
2889 	int ret;
2890 	u8 csum[BTRFS_CSUM_SIZE];
2891 	u32 blocksize;
2892 
2893 	if (dev->missing) {
2894 		scrub_parity_mark_sectors_error(sparity, logical, len);
2895 		return 0;
2896 	}
2897 
2898 	if (flags & BTRFS_EXTENT_FLAG_DATA) {
2899 		blocksize = sctx->fs_info->sectorsize;
2900 	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2901 		blocksize = sctx->fs_info->nodesize;
2902 	} else {
2903 		blocksize = sctx->fs_info->sectorsize;
2904 		WARN_ON(1);
2905 	}
2906 
2907 	while (len) {
2908 		u64 l = min_t(u64, len, blocksize);
2909 		int have_csum = 0;
2910 
2911 		if (flags & BTRFS_EXTENT_FLAG_DATA) {
2912 			/* push csums to sbio */
2913 			have_csum = scrub_find_csum(sctx, logical, csum);
2914 			if (have_csum == 0)
2915 				goto skip;
2916 		}
2917 		ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2918 					     flags, gen, mirror_num,
2919 					     have_csum ? csum : NULL);
2920 		if (ret)
2921 			return ret;
2922 skip:
2923 		len -= l;
2924 		logical += l;
2925 		physical += l;
2926 	}
2927 	return 0;
2928 }
2929 
2930 /*
2931  * Given a physical address, this will calculate it's
2932  * logical offset. if this is a parity stripe, it will return
2933  * the most left data stripe's logical offset.
2934  *
2935  * return 0 if it is a data stripe, 1 means parity stripe.
2936  */
get_raid56_logic_offset(u64 physical,int num,struct map_lookup * map,u64 * offset,u64 * stripe_start)2937 static int get_raid56_logic_offset(u64 physical, int num,
2938 				   struct map_lookup *map, u64 *offset,
2939 				   u64 *stripe_start)
2940 {
2941 	int i;
2942 	int j = 0;
2943 	u64 stripe_nr;
2944 	u64 last_offset;
2945 	u32 stripe_index;
2946 	u32 rot;
2947 
2948 	last_offset = (physical - map->stripes[num].physical) *
2949 		      nr_data_stripes(map);
2950 	if (stripe_start)
2951 		*stripe_start = last_offset;
2952 
2953 	*offset = last_offset;
2954 	for (i = 0; i < nr_data_stripes(map); i++) {
2955 		*offset = last_offset + i * map->stripe_len;
2956 
2957 		stripe_nr = div64_u64(*offset, map->stripe_len);
2958 		stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
2959 
2960 		/* Work out the disk rotation on this stripe-set */
2961 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2962 		/* calculate which stripe this data locates */
2963 		rot += i;
2964 		stripe_index = rot % map->num_stripes;
2965 		if (stripe_index == num)
2966 			return 0;
2967 		if (stripe_index < num)
2968 			j++;
2969 	}
2970 	*offset = last_offset + j * map->stripe_len;
2971 	return 1;
2972 }
2973 
scrub_free_parity(struct scrub_parity * sparity)2974 static void scrub_free_parity(struct scrub_parity *sparity)
2975 {
2976 	struct scrub_ctx *sctx = sparity->sctx;
2977 	struct scrub_page *curr, *next;
2978 	int nbits;
2979 
2980 	nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2981 	if (nbits) {
2982 		spin_lock(&sctx->stat_lock);
2983 		sctx->stat.read_errors += nbits;
2984 		sctx->stat.uncorrectable_errors += nbits;
2985 		spin_unlock(&sctx->stat_lock);
2986 	}
2987 
2988 	list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2989 		list_del_init(&curr->list);
2990 		scrub_page_put(curr);
2991 	}
2992 
2993 	kfree(sparity);
2994 }
2995 
scrub_parity_bio_endio_worker(struct btrfs_work * work)2996 static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2997 {
2998 	struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2999 						    work);
3000 	struct scrub_ctx *sctx = sparity->sctx;
3001 
3002 	scrub_free_parity(sparity);
3003 	scrub_pending_bio_dec(sctx);
3004 }
3005 
scrub_parity_bio_endio(struct bio * bio)3006 static void scrub_parity_bio_endio(struct bio *bio)
3007 {
3008 	struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
3009 	struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
3010 
3011 	if (bio->bi_status)
3012 		bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
3013 			  sparity->nsectors);
3014 
3015 	bio_put(bio);
3016 
3017 	btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
3018 			scrub_parity_bio_endio_worker, NULL, NULL);
3019 	btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
3020 }
3021 
scrub_parity_check_and_repair(struct scrub_parity * sparity)3022 static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
3023 {
3024 	struct scrub_ctx *sctx = sparity->sctx;
3025 	struct btrfs_fs_info *fs_info = sctx->fs_info;
3026 	struct bio *bio;
3027 	struct btrfs_raid_bio *rbio;
3028 	struct btrfs_bio *bbio = NULL;
3029 	u64 length;
3030 	int ret;
3031 
3032 	if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
3033 			   sparity->nsectors))
3034 		goto out;
3035 
3036 	length = sparity->logic_end - sparity->logic_start;
3037 
3038 	btrfs_bio_counter_inc_blocked(fs_info);
3039 	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
3040 			       &length, &bbio);
3041 	if (ret || !bbio || !bbio->raid_map)
3042 		goto bbio_out;
3043 
3044 	bio = btrfs_io_bio_alloc(0);
3045 	bio->bi_iter.bi_sector = sparity->logic_start >> 9;
3046 	bio->bi_private = sparity;
3047 	bio->bi_end_io = scrub_parity_bio_endio;
3048 
3049 	rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
3050 					      length, sparity->scrub_dev,
3051 					      sparity->dbitmap,
3052 					      sparity->nsectors);
3053 	if (!rbio)
3054 		goto rbio_out;
3055 
3056 	scrub_pending_bio_inc(sctx);
3057 	raid56_parity_submit_scrub_rbio(rbio);
3058 	return;
3059 
3060 rbio_out:
3061 	bio_put(bio);
3062 bbio_out:
3063 	btrfs_bio_counter_dec(fs_info);
3064 	btrfs_put_bbio(bbio);
3065 	bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
3066 		  sparity->nsectors);
3067 	spin_lock(&sctx->stat_lock);
3068 	sctx->stat.malloc_errors++;
3069 	spin_unlock(&sctx->stat_lock);
3070 out:
3071 	scrub_free_parity(sparity);
3072 }
3073 
scrub_calc_parity_bitmap_len(int nsectors)3074 static inline int scrub_calc_parity_bitmap_len(int nsectors)
3075 {
3076 	return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
3077 }
3078 
scrub_parity_get(struct scrub_parity * sparity)3079 static void scrub_parity_get(struct scrub_parity *sparity)
3080 {
3081 	refcount_inc(&sparity->refs);
3082 }
3083 
scrub_parity_put(struct scrub_parity * sparity)3084 static void scrub_parity_put(struct scrub_parity *sparity)
3085 {
3086 	if (!refcount_dec_and_test(&sparity->refs))
3087 		return;
3088 
3089 	scrub_parity_check_and_repair(sparity);
3090 }
3091 
scrub_raid56_parity(struct scrub_ctx * sctx,struct map_lookup * map,struct btrfs_device * sdev,struct btrfs_path * path,u64 logic_start,u64 logic_end)3092 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
3093 						  struct map_lookup *map,
3094 						  struct btrfs_device *sdev,
3095 						  struct btrfs_path *path,
3096 						  u64 logic_start,
3097 						  u64 logic_end)
3098 {
3099 	struct btrfs_fs_info *fs_info = sctx->fs_info;
3100 	struct btrfs_root *root = fs_info->extent_root;
3101 	struct btrfs_root *csum_root = fs_info->csum_root;
3102 	struct btrfs_extent_item *extent;
3103 	struct btrfs_bio *bbio = NULL;
3104 	u64 flags;
3105 	int ret;
3106 	int slot;
3107 	struct extent_buffer *l;
3108 	struct btrfs_key key;
3109 	u64 generation;
3110 	u64 extent_logical;
3111 	u64 extent_physical;
3112 	u64 extent_len;
3113 	u64 mapped_length;
3114 	struct btrfs_device *extent_dev;
3115 	struct scrub_parity *sparity;
3116 	int nsectors;
3117 	int bitmap_len;
3118 	int extent_mirror_num;
3119 	int stop_loop = 0;
3120 
3121 	nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
3122 	bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
3123 	sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
3124 			  GFP_NOFS);
3125 	if (!sparity) {
3126 		spin_lock(&sctx->stat_lock);
3127 		sctx->stat.malloc_errors++;
3128 		spin_unlock(&sctx->stat_lock);
3129 		return -ENOMEM;
3130 	}
3131 
3132 	sparity->stripe_len = map->stripe_len;
3133 	sparity->nsectors = nsectors;
3134 	sparity->sctx = sctx;
3135 	sparity->scrub_dev = sdev;
3136 	sparity->logic_start = logic_start;
3137 	sparity->logic_end = logic_end;
3138 	refcount_set(&sparity->refs, 1);
3139 	INIT_LIST_HEAD(&sparity->spages);
3140 	sparity->dbitmap = sparity->bitmap;
3141 	sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
3142 
3143 	ret = 0;
3144 	while (logic_start < logic_end) {
3145 		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3146 			key.type = BTRFS_METADATA_ITEM_KEY;
3147 		else
3148 			key.type = BTRFS_EXTENT_ITEM_KEY;
3149 		key.objectid = logic_start;
3150 		key.offset = (u64)-1;
3151 
3152 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3153 		if (ret < 0)
3154 			goto out;
3155 
3156 		if (ret > 0) {
3157 			ret = btrfs_previous_extent_item(root, path, 0);
3158 			if (ret < 0)
3159 				goto out;
3160 			if (ret > 0) {
3161 				btrfs_release_path(path);
3162 				ret = btrfs_search_slot(NULL, root, &key,
3163 							path, 0, 0);
3164 				if (ret < 0)
3165 					goto out;
3166 			}
3167 		}
3168 
3169 		stop_loop = 0;
3170 		while (1) {
3171 			u64 bytes;
3172 
3173 			l = path->nodes[0];
3174 			slot = path->slots[0];
3175 			if (slot >= btrfs_header_nritems(l)) {
3176 				ret = btrfs_next_leaf(root, path);
3177 				if (ret == 0)
3178 					continue;
3179 				if (ret < 0)
3180 					goto out;
3181 
3182 				stop_loop = 1;
3183 				break;
3184 			}
3185 			btrfs_item_key_to_cpu(l, &key, slot);
3186 
3187 			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3188 			    key.type != BTRFS_METADATA_ITEM_KEY)
3189 				goto next;
3190 
3191 			if (key.type == BTRFS_METADATA_ITEM_KEY)
3192 				bytes = fs_info->nodesize;
3193 			else
3194 				bytes = key.offset;
3195 
3196 			if (key.objectid + bytes <= logic_start)
3197 				goto next;
3198 
3199 			if (key.objectid >= logic_end) {
3200 				stop_loop = 1;
3201 				break;
3202 			}
3203 
3204 			while (key.objectid >= logic_start + map->stripe_len)
3205 				logic_start += map->stripe_len;
3206 
3207 			extent = btrfs_item_ptr(l, slot,
3208 						struct btrfs_extent_item);
3209 			flags = btrfs_extent_flags(l, extent);
3210 			generation = btrfs_extent_generation(l, extent);
3211 
3212 			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3213 			    (key.objectid < logic_start ||
3214 			     key.objectid + bytes >
3215 			     logic_start + map->stripe_len)) {
3216 				btrfs_err(fs_info,
3217 					  "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3218 					  key.objectid, logic_start);
3219 				spin_lock(&sctx->stat_lock);
3220 				sctx->stat.uncorrectable_errors++;
3221 				spin_unlock(&sctx->stat_lock);
3222 				goto next;
3223 			}
3224 again:
3225 			extent_logical = key.objectid;
3226 			extent_len = bytes;
3227 
3228 			if (extent_logical < logic_start) {
3229 				extent_len -= logic_start - extent_logical;
3230 				extent_logical = logic_start;
3231 			}
3232 
3233 			if (extent_logical + extent_len >
3234 			    logic_start + map->stripe_len)
3235 				extent_len = logic_start + map->stripe_len -
3236 					     extent_logical;
3237 
3238 			scrub_parity_mark_sectors_data(sparity, extent_logical,
3239 						       extent_len);
3240 
3241 			mapped_length = extent_len;
3242 			bbio = NULL;
3243 			ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
3244 					extent_logical, &mapped_length, &bbio,
3245 					0);
3246 			if (!ret) {
3247 				if (!bbio || mapped_length < extent_len)
3248 					ret = -EIO;
3249 			}
3250 			if (ret) {
3251 				btrfs_put_bbio(bbio);
3252 				goto out;
3253 			}
3254 			extent_physical = bbio->stripes[0].physical;
3255 			extent_mirror_num = bbio->mirror_num;
3256 			extent_dev = bbio->stripes[0].dev;
3257 			btrfs_put_bbio(bbio);
3258 
3259 			ret = btrfs_lookup_csums_range(csum_root,
3260 						extent_logical,
3261 						extent_logical + extent_len - 1,
3262 						&sctx->csum_list, 1);
3263 			if (ret)
3264 				goto out;
3265 
3266 			ret = scrub_extent_for_parity(sparity, extent_logical,
3267 						      extent_len,
3268 						      extent_physical,
3269 						      extent_dev, flags,
3270 						      generation,
3271 						      extent_mirror_num);
3272 
3273 			scrub_free_csums(sctx);
3274 
3275 			if (ret)
3276 				goto out;
3277 
3278 			if (extent_logical + extent_len <
3279 			    key.objectid + bytes) {
3280 				logic_start += map->stripe_len;
3281 
3282 				if (logic_start >= logic_end) {
3283 					stop_loop = 1;
3284 					break;
3285 				}
3286 
3287 				if (logic_start < key.objectid + bytes) {
3288 					cond_resched();
3289 					goto again;
3290 				}
3291 			}
3292 next:
3293 			path->slots[0]++;
3294 		}
3295 
3296 		btrfs_release_path(path);
3297 
3298 		if (stop_loop)
3299 			break;
3300 
3301 		logic_start += map->stripe_len;
3302 	}
3303 out:
3304 	if (ret < 0)
3305 		scrub_parity_mark_sectors_error(sparity, logic_start,
3306 						logic_end - logic_start);
3307 	scrub_parity_put(sparity);
3308 	scrub_submit(sctx);
3309 	mutex_lock(&sctx->wr_lock);
3310 	scrub_wr_submit(sctx);
3311 	mutex_unlock(&sctx->wr_lock);
3312 
3313 	btrfs_release_path(path);
3314 	return ret < 0 ? ret : 0;
3315 }
3316 
scrub_stripe(struct scrub_ctx * sctx,struct map_lookup * map,struct btrfs_device * scrub_dev,int num,u64 base,u64 length,int is_dev_replace)3317 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3318 					   struct map_lookup *map,
3319 					   struct btrfs_device *scrub_dev,
3320 					   int num, u64 base, u64 length,
3321 					   int is_dev_replace)
3322 {
3323 	struct btrfs_path *path, *ppath;
3324 	struct btrfs_fs_info *fs_info = sctx->fs_info;
3325 	struct btrfs_root *root = fs_info->extent_root;
3326 	struct btrfs_root *csum_root = fs_info->csum_root;
3327 	struct btrfs_extent_item *extent;
3328 	struct blk_plug plug;
3329 	u64 flags;
3330 	int ret;
3331 	int slot;
3332 	u64 nstripes;
3333 	struct extent_buffer *l;
3334 	u64 physical;
3335 	u64 logical;
3336 	u64 logic_end;
3337 	u64 physical_end;
3338 	u64 generation;
3339 	int mirror_num;
3340 	struct reada_control *reada1;
3341 	struct reada_control *reada2;
3342 	struct btrfs_key key;
3343 	struct btrfs_key key_end;
3344 	u64 increment = map->stripe_len;
3345 	u64 offset;
3346 	u64 extent_logical;
3347 	u64 extent_physical;
3348 	u64 extent_len;
3349 	u64 stripe_logical;
3350 	u64 stripe_end;
3351 	struct btrfs_device *extent_dev;
3352 	int extent_mirror_num;
3353 	int stop_loop = 0;
3354 
3355 	physical = map->stripes[num].physical;
3356 	offset = 0;
3357 	nstripes = div64_u64(length, map->stripe_len);
3358 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3359 		offset = map->stripe_len * num;
3360 		increment = map->stripe_len * map->num_stripes;
3361 		mirror_num = 1;
3362 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3363 		int factor = map->num_stripes / map->sub_stripes;
3364 		offset = map->stripe_len * (num / map->sub_stripes);
3365 		increment = map->stripe_len * factor;
3366 		mirror_num = num % map->sub_stripes + 1;
3367 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3368 		increment = map->stripe_len;
3369 		mirror_num = num % map->num_stripes + 1;
3370 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3371 		increment = map->stripe_len;
3372 		mirror_num = num % map->num_stripes + 1;
3373 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3374 		get_raid56_logic_offset(physical, num, map, &offset, NULL);
3375 		increment = map->stripe_len * nr_data_stripes(map);
3376 		mirror_num = 1;
3377 	} else {
3378 		increment = map->stripe_len;
3379 		mirror_num = 1;
3380 	}
3381 
3382 	path = btrfs_alloc_path();
3383 	if (!path)
3384 		return -ENOMEM;
3385 
3386 	ppath = btrfs_alloc_path();
3387 	if (!ppath) {
3388 		btrfs_free_path(path);
3389 		return -ENOMEM;
3390 	}
3391 
3392 	/*
3393 	 * work on commit root. The related disk blocks are static as
3394 	 * long as COW is applied. This means, it is save to rewrite
3395 	 * them to repair disk errors without any race conditions
3396 	 */
3397 	path->search_commit_root = 1;
3398 	path->skip_locking = 1;
3399 
3400 	ppath->search_commit_root = 1;
3401 	ppath->skip_locking = 1;
3402 	/*
3403 	 * trigger the readahead for extent tree csum tree and wait for
3404 	 * completion. During readahead, the scrub is officially paused
3405 	 * to not hold off transaction commits
3406 	 */
3407 	logical = base + offset;
3408 	physical_end = physical + nstripes * map->stripe_len;
3409 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3410 		get_raid56_logic_offset(physical_end, num,
3411 					map, &logic_end, NULL);
3412 		logic_end += base;
3413 	} else {
3414 		logic_end = logical + increment * nstripes;
3415 	}
3416 	wait_event(sctx->list_wait,
3417 		   atomic_read(&sctx->bios_in_flight) == 0);
3418 	scrub_blocked_if_needed(fs_info);
3419 
3420 	/* FIXME it might be better to start readahead at commit root */
3421 	key.objectid = logical;
3422 	key.type = BTRFS_EXTENT_ITEM_KEY;
3423 	key.offset = (u64)0;
3424 	key_end.objectid = logic_end;
3425 	key_end.type = BTRFS_METADATA_ITEM_KEY;
3426 	key_end.offset = (u64)-1;
3427 	reada1 = btrfs_reada_add(root, &key, &key_end);
3428 
3429 	key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3430 	key.type = BTRFS_EXTENT_CSUM_KEY;
3431 	key.offset = logical;
3432 	key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3433 	key_end.type = BTRFS_EXTENT_CSUM_KEY;
3434 	key_end.offset = logic_end;
3435 	reada2 = btrfs_reada_add(csum_root, &key, &key_end);
3436 
3437 	if (!IS_ERR(reada1))
3438 		btrfs_reada_wait(reada1);
3439 	if (!IS_ERR(reada2))
3440 		btrfs_reada_wait(reada2);
3441 
3442 
3443 	/*
3444 	 * collect all data csums for the stripe to avoid seeking during
3445 	 * the scrub. This might currently (crc32) end up to be about 1MB
3446 	 */
3447 	blk_start_plug(&plug);
3448 
3449 	/*
3450 	 * now find all extents for each stripe and scrub them
3451 	 */
3452 	ret = 0;
3453 	while (physical < physical_end) {
3454 		/*
3455 		 * canceled?
3456 		 */
3457 		if (atomic_read(&fs_info->scrub_cancel_req) ||
3458 		    atomic_read(&sctx->cancel_req)) {
3459 			ret = -ECANCELED;
3460 			goto out;
3461 		}
3462 		/*
3463 		 * check to see if we have to pause
3464 		 */
3465 		if (atomic_read(&fs_info->scrub_pause_req)) {
3466 			/* push queued extents */
3467 			sctx->flush_all_writes = true;
3468 			scrub_submit(sctx);
3469 			mutex_lock(&sctx->wr_lock);
3470 			scrub_wr_submit(sctx);
3471 			mutex_unlock(&sctx->wr_lock);
3472 			wait_event(sctx->list_wait,
3473 				   atomic_read(&sctx->bios_in_flight) == 0);
3474 			sctx->flush_all_writes = false;
3475 			scrub_blocked_if_needed(fs_info);
3476 		}
3477 
3478 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3479 			ret = get_raid56_logic_offset(physical, num, map,
3480 						      &logical,
3481 						      &stripe_logical);
3482 			logical += base;
3483 			if (ret) {
3484 				/* it is parity strip */
3485 				stripe_logical += base;
3486 				stripe_end = stripe_logical + increment;
3487 				ret = scrub_raid56_parity(sctx, map, scrub_dev,
3488 							  ppath, stripe_logical,
3489 							  stripe_end);
3490 				if (ret)
3491 					goto out;
3492 				goto skip;
3493 			}
3494 		}
3495 
3496 		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3497 			key.type = BTRFS_METADATA_ITEM_KEY;
3498 		else
3499 			key.type = BTRFS_EXTENT_ITEM_KEY;
3500 		key.objectid = logical;
3501 		key.offset = (u64)-1;
3502 
3503 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3504 		if (ret < 0)
3505 			goto out;
3506 
3507 		if (ret > 0) {
3508 			ret = btrfs_previous_extent_item(root, path, 0);
3509 			if (ret < 0)
3510 				goto out;
3511 			if (ret > 0) {
3512 				/* there's no smaller item, so stick with the
3513 				 * larger one */
3514 				btrfs_release_path(path);
3515 				ret = btrfs_search_slot(NULL, root, &key,
3516 							path, 0, 0);
3517 				if (ret < 0)
3518 					goto out;
3519 			}
3520 		}
3521 
3522 		stop_loop = 0;
3523 		while (1) {
3524 			u64 bytes;
3525 
3526 			l = path->nodes[0];
3527 			slot = path->slots[0];
3528 			if (slot >= btrfs_header_nritems(l)) {
3529 				ret = btrfs_next_leaf(root, path);
3530 				if (ret == 0)
3531 					continue;
3532 				if (ret < 0)
3533 					goto out;
3534 
3535 				stop_loop = 1;
3536 				break;
3537 			}
3538 			btrfs_item_key_to_cpu(l, &key, slot);
3539 
3540 			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3541 			    key.type != BTRFS_METADATA_ITEM_KEY)
3542 				goto next;
3543 
3544 			if (key.type == BTRFS_METADATA_ITEM_KEY)
3545 				bytes = fs_info->nodesize;
3546 			else
3547 				bytes = key.offset;
3548 
3549 			if (key.objectid + bytes <= logical)
3550 				goto next;
3551 
3552 			if (key.objectid >= logical + map->stripe_len) {
3553 				/* out of this device extent */
3554 				if (key.objectid >= logic_end)
3555 					stop_loop = 1;
3556 				break;
3557 			}
3558 
3559 			extent = btrfs_item_ptr(l, slot,
3560 						struct btrfs_extent_item);
3561 			flags = btrfs_extent_flags(l, extent);
3562 			generation = btrfs_extent_generation(l, extent);
3563 
3564 			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3565 			    (key.objectid < logical ||
3566 			     key.objectid + bytes >
3567 			     logical + map->stripe_len)) {
3568 				btrfs_err(fs_info,
3569 					   "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3570 				       key.objectid, logical);
3571 				spin_lock(&sctx->stat_lock);
3572 				sctx->stat.uncorrectable_errors++;
3573 				spin_unlock(&sctx->stat_lock);
3574 				goto next;
3575 			}
3576 
3577 again:
3578 			extent_logical = key.objectid;
3579 			extent_len = bytes;
3580 
3581 			/*
3582 			 * trim extent to this stripe
3583 			 */
3584 			if (extent_logical < logical) {
3585 				extent_len -= logical - extent_logical;
3586 				extent_logical = logical;
3587 			}
3588 			if (extent_logical + extent_len >
3589 			    logical + map->stripe_len) {
3590 				extent_len = logical + map->stripe_len -
3591 					     extent_logical;
3592 			}
3593 
3594 			extent_physical = extent_logical - logical + physical;
3595 			extent_dev = scrub_dev;
3596 			extent_mirror_num = mirror_num;
3597 			if (is_dev_replace)
3598 				scrub_remap_extent(fs_info, extent_logical,
3599 						   extent_len, &extent_physical,
3600 						   &extent_dev,
3601 						   &extent_mirror_num);
3602 
3603 			ret = btrfs_lookup_csums_range(csum_root,
3604 						       extent_logical,
3605 						       extent_logical +
3606 						       extent_len - 1,
3607 						       &sctx->csum_list, 1);
3608 			if (ret)
3609 				goto out;
3610 
3611 			ret = scrub_extent(sctx, extent_logical, extent_len,
3612 					   extent_physical, extent_dev, flags,
3613 					   generation, extent_mirror_num,
3614 					   extent_logical - logical + physical);
3615 
3616 			scrub_free_csums(sctx);
3617 
3618 			if (ret)
3619 				goto out;
3620 
3621 			if (extent_logical + extent_len <
3622 			    key.objectid + bytes) {
3623 				if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3624 					/*
3625 					 * loop until we find next data stripe
3626 					 * or we have finished all stripes.
3627 					 */
3628 loop:
3629 					physical += map->stripe_len;
3630 					ret = get_raid56_logic_offset(physical,
3631 							num, map, &logical,
3632 							&stripe_logical);
3633 					logical += base;
3634 
3635 					if (ret && physical < physical_end) {
3636 						stripe_logical += base;
3637 						stripe_end = stripe_logical +
3638 								increment;
3639 						ret = scrub_raid56_parity(sctx,
3640 							map, scrub_dev, ppath,
3641 							stripe_logical,
3642 							stripe_end);
3643 						if (ret)
3644 							goto out;
3645 						goto loop;
3646 					}
3647 				} else {
3648 					physical += map->stripe_len;
3649 					logical += increment;
3650 				}
3651 				if (logical < key.objectid + bytes) {
3652 					cond_resched();
3653 					goto again;
3654 				}
3655 
3656 				if (physical >= physical_end) {
3657 					stop_loop = 1;
3658 					break;
3659 				}
3660 			}
3661 next:
3662 			path->slots[0]++;
3663 		}
3664 		btrfs_release_path(path);
3665 skip:
3666 		logical += increment;
3667 		physical += map->stripe_len;
3668 		spin_lock(&sctx->stat_lock);
3669 		if (stop_loop)
3670 			sctx->stat.last_physical = map->stripes[num].physical +
3671 						   length;
3672 		else
3673 			sctx->stat.last_physical = physical;
3674 		spin_unlock(&sctx->stat_lock);
3675 		if (stop_loop)
3676 			break;
3677 	}
3678 out:
3679 	/* push queued extents */
3680 	scrub_submit(sctx);
3681 	mutex_lock(&sctx->wr_lock);
3682 	scrub_wr_submit(sctx);
3683 	mutex_unlock(&sctx->wr_lock);
3684 
3685 	blk_finish_plug(&plug);
3686 	btrfs_free_path(path);
3687 	btrfs_free_path(ppath);
3688 	return ret < 0 ? ret : 0;
3689 }
3690 
scrub_chunk(struct scrub_ctx * sctx,struct btrfs_device * scrub_dev,u64 chunk_offset,u64 length,u64 dev_offset,struct btrfs_block_group_cache * cache,int is_dev_replace)3691 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3692 					  struct btrfs_device *scrub_dev,
3693 					  u64 chunk_offset, u64 length,
3694 					  u64 dev_offset,
3695 					  struct btrfs_block_group_cache *cache,
3696 					  int is_dev_replace)
3697 {
3698 	struct btrfs_fs_info *fs_info = sctx->fs_info;
3699 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
3700 	struct map_lookup *map;
3701 	struct extent_map *em;
3702 	int i;
3703 	int ret = 0;
3704 
3705 	read_lock(&map_tree->map_tree.lock);
3706 	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3707 	read_unlock(&map_tree->map_tree.lock);
3708 
3709 	if (!em) {
3710 		/*
3711 		 * Might have been an unused block group deleted by the cleaner
3712 		 * kthread or relocation.
3713 		 */
3714 		spin_lock(&cache->lock);
3715 		if (!cache->removed)
3716 			ret = -EINVAL;
3717 		spin_unlock(&cache->lock);
3718 
3719 		return ret;
3720 	}
3721 
3722 	map = em->map_lookup;
3723 	if (em->start != chunk_offset)
3724 		goto out;
3725 
3726 	if (em->len < length)
3727 		goto out;
3728 
3729 	for (i = 0; i < map->num_stripes; ++i) {
3730 		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3731 		    map->stripes[i].physical == dev_offset) {
3732 			ret = scrub_stripe(sctx, map, scrub_dev, i,
3733 					   chunk_offset, length,
3734 					   is_dev_replace);
3735 			if (ret)
3736 				goto out;
3737 		}
3738 	}
3739 out:
3740 	free_extent_map(em);
3741 
3742 	return ret;
3743 }
3744 
3745 static noinline_for_stack
scrub_enumerate_chunks(struct scrub_ctx * sctx,struct btrfs_device * scrub_dev,u64 start,u64 end,int is_dev_replace)3746 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3747 			   struct btrfs_device *scrub_dev, u64 start, u64 end,
3748 			   int is_dev_replace)
3749 {
3750 	struct btrfs_dev_extent *dev_extent = NULL;
3751 	struct btrfs_path *path;
3752 	struct btrfs_fs_info *fs_info = sctx->fs_info;
3753 	struct btrfs_root *root = fs_info->dev_root;
3754 	u64 length;
3755 	u64 chunk_offset;
3756 	int ret = 0;
3757 	int ro_set;
3758 	int slot;
3759 	struct extent_buffer *l;
3760 	struct btrfs_key key;
3761 	struct btrfs_key found_key;
3762 	struct btrfs_block_group_cache *cache;
3763 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3764 
3765 	path = btrfs_alloc_path();
3766 	if (!path)
3767 		return -ENOMEM;
3768 
3769 	path->reada = READA_FORWARD;
3770 	path->search_commit_root = 1;
3771 	path->skip_locking = 1;
3772 
3773 	key.objectid = scrub_dev->devid;
3774 	key.offset = 0ull;
3775 	key.type = BTRFS_DEV_EXTENT_KEY;
3776 
3777 	while (1) {
3778 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3779 		if (ret < 0)
3780 			break;
3781 		if (ret > 0) {
3782 			if (path->slots[0] >=
3783 			    btrfs_header_nritems(path->nodes[0])) {
3784 				ret = btrfs_next_leaf(root, path);
3785 				if (ret < 0)
3786 					break;
3787 				if (ret > 0) {
3788 					ret = 0;
3789 					break;
3790 				}
3791 			} else {
3792 				ret = 0;
3793 			}
3794 		}
3795 
3796 		l = path->nodes[0];
3797 		slot = path->slots[0];
3798 
3799 		btrfs_item_key_to_cpu(l, &found_key, slot);
3800 
3801 		if (found_key.objectid != scrub_dev->devid)
3802 			break;
3803 
3804 		if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3805 			break;
3806 
3807 		if (found_key.offset >= end)
3808 			break;
3809 
3810 		if (found_key.offset < key.offset)
3811 			break;
3812 
3813 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3814 		length = btrfs_dev_extent_length(l, dev_extent);
3815 
3816 		if (found_key.offset + length <= start)
3817 			goto skip;
3818 
3819 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3820 
3821 		/*
3822 		 * get a reference on the corresponding block group to prevent
3823 		 * the chunk from going away while we scrub it
3824 		 */
3825 		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3826 
3827 		/* some chunks are removed but not committed to disk yet,
3828 		 * continue scrubbing */
3829 		if (!cache)
3830 			goto skip;
3831 
3832 		/*
3833 		 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3834 		 * to avoid deadlock caused by:
3835 		 * btrfs_inc_block_group_ro()
3836 		 * -> btrfs_wait_for_commit()
3837 		 * -> btrfs_commit_transaction()
3838 		 * -> btrfs_scrub_pause()
3839 		 */
3840 		scrub_pause_on(fs_info);
3841 		ret = btrfs_inc_block_group_ro(fs_info, cache);
3842 		if (!ret && is_dev_replace) {
3843 			/*
3844 			 * If we are doing a device replace wait for any tasks
3845 			 * that started dellaloc right before we set the block
3846 			 * group to RO mode, as they might have just allocated
3847 			 * an extent from it or decided they could do a nocow
3848 			 * write. And if any such tasks did that, wait for their
3849 			 * ordered extents to complete and then commit the
3850 			 * current transaction, so that we can later see the new
3851 			 * extent items in the extent tree - the ordered extents
3852 			 * create delayed data references (for cow writes) when
3853 			 * they complete, which will be run and insert the
3854 			 * corresponding extent items into the extent tree when
3855 			 * we commit the transaction they used when running
3856 			 * inode.c:btrfs_finish_ordered_io(). We later use
3857 			 * the commit root of the extent tree to find extents
3858 			 * to copy from the srcdev into the tgtdev, and we don't
3859 			 * want to miss any new extents.
3860 			 */
3861 			btrfs_wait_block_group_reservations(cache);
3862 			btrfs_wait_nocow_writers(cache);
3863 			ret = btrfs_wait_ordered_roots(fs_info, U64_MAX,
3864 						       cache->key.objectid,
3865 						       cache->key.offset);
3866 			if (ret > 0) {
3867 				struct btrfs_trans_handle *trans;
3868 
3869 				trans = btrfs_join_transaction(root);
3870 				if (IS_ERR(trans))
3871 					ret = PTR_ERR(trans);
3872 				else
3873 					ret = btrfs_commit_transaction(trans);
3874 				if (ret) {
3875 					scrub_pause_off(fs_info);
3876 					btrfs_put_block_group(cache);
3877 					break;
3878 				}
3879 			}
3880 		}
3881 		scrub_pause_off(fs_info);
3882 
3883 		if (ret == 0) {
3884 			ro_set = 1;
3885 		} else if (ret == -ENOSPC) {
3886 			/*
3887 			 * btrfs_inc_block_group_ro return -ENOSPC when it
3888 			 * failed in creating new chunk for metadata.
3889 			 * It is not a problem for scrub/replace, because
3890 			 * metadata are always cowed, and our scrub paused
3891 			 * commit_transactions.
3892 			 */
3893 			ro_set = 0;
3894 		} else {
3895 			btrfs_warn(fs_info,
3896 				   "failed setting block group ro: %d", ret);
3897 			btrfs_put_block_group(cache);
3898 			break;
3899 		}
3900 
3901 		btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3902 		dev_replace->cursor_right = found_key.offset + length;
3903 		dev_replace->cursor_left = found_key.offset;
3904 		dev_replace->item_needs_writeback = 1;
3905 		btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3906 		ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3907 				  found_key.offset, cache, is_dev_replace);
3908 
3909 		/*
3910 		 * flush, submit all pending read and write bios, afterwards
3911 		 * wait for them.
3912 		 * Note that in the dev replace case, a read request causes
3913 		 * write requests that are submitted in the read completion
3914 		 * worker. Therefore in the current situation, it is required
3915 		 * that all write requests are flushed, so that all read and
3916 		 * write requests are really completed when bios_in_flight
3917 		 * changes to 0.
3918 		 */
3919 		sctx->flush_all_writes = true;
3920 		scrub_submit(sctx);
3921 		mutex_lock(&sctx->wr_lock);
3922 		scrub_wr_submit(sctx);
3923 		mutex_unlock(&sctx->wr_lock);
3924 
3925 		wait_event(sctx->list_wait,
3926 			   atomic_read(&sctx->bios_in_flight) == 0);
3927 
3928 		scrub_pause_on(fs_info);
3929 
3930 		/*
3931 		 * must be called before we decrease @scrub_paused.
3932 		 * make sure we don't block transaction commit while
3933 		 * we are waiting pending workers finished.
3934 		 */
3935 		wait_event(sctx->list_wait,
3936 			   atomic_read(&sctx->workers_pending) == 0);
3937 		sctx->flush_all_writes = false;
3938 
3939 		scrub_pause_off(fs_info);
3940 
3941 		btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
3942 		dev_replace->cursor_left = dev_replace->cursor_right;
3943 		dev_replace->item_needs_writeback = 1;
3944 		btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
3945 
3946 		if (ro_set)
3947 			btrfs_dec_block_group_ro(cache);
3948 
3949 		/*
3950 		 * We might have prevented the cleaner kthread from deleting
3951 		 * this block group if it was already unused because we raced
3952 		 * and set it to RO mode first. So add it back to the unused
3953 		 * list, otherwise it might not ever be deleted unless a manual
3954 		 * balance is triggered or it becomes used and unused again.
3955 		 */
3956 		spin_lock(&cache->lock);
3957 		if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3958 		    btrfs_block_group_used(&cache->item) == 0) {
3959 			spin_unlock(&cache->lock);
3960 			spin_lock(&fs_info->unused_bgs_lock);
3961 			if (list_empty(&cache->bg_list)) {
3962 				btrfs_get_block_group(cache);
3963 				list_add_tail(&cache->bg_list,
3964 					      &fs_info->unused_bgs);
3965 			}
3966 			spin_unlock(&fs_info->unused_bgs_lock);
3967 		} else {
3968 			spin_unlock(&cache->lock);
3969 		}
3970 
3971 		btrfs_put_block_group(cache);
3972 		if (ret)
3973 			break;
3974 		if (is_dev_replace &&
3975 		    atomic64_read(&dev_replace->num_write_errors) > 0) {
3976 			ret = -EIO;
3977 			break;
3978 		}
3979 		if (sctx->stat.malloc_errors > 0) {
3980 			ret = -ENOMEM;
3981 			break;
3982 		}
3983 skip:
3984 		key.offset = found_key.offset + length;
3985 		btrfs_release_path(path);
3986 	}
3987 
3988 	btrfs_free_path(path);
3989 
3990 	return ret;
3991 }
3992 
scrub_supers(struct scrub_ctx * sctx,struct btrfs_device * scrub_dev)3993 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3994 					   struct btrfs_device *scrub_dev)
3995 {
3996 	int	i;
3997 	u64	bytenr;
3998 	u64	gen;
3999 	int	ret;
4000 	struct btrfs_fs_info *fs_info = sctx->fs_info;
4001 
4002 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
4003 		return -EIO;
4004 
4005 	/* Seed devices of a new filesystem has their own generation. */
4006 	if (scrub_dev->fs_devices != fs_info->fs_devices)
4007 		gen = scrub_dev->generation;
4008 	else
4009 		gen = fs_info->last_trans_committed;
4010 
4011 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
4012 		bytenr = btrfs_sb_offset(i);
4013 		if (bytenr + BTRFS_SUPER_INFO_SIZE >
4014 		    scrub_dev->commit_total_bytes)
4015 			break;
4016 
4017 		ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
4018 				  scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
4019 				  NULL, 1, bytenr);
4020 		if (ret)
4021 			return ret;
4022 	}
4023 	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
4024 
4025 	return 0;
4026 }
4027 
4028 /*
4029  * get a reference count on fs_info->scrub_workers. start worker if necessary
4030  */
scrub_workers_get(struct btrfs_fs_info * fs_info,int is_dev_replace)4031 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
4032 						int is_dev_replace)
4033 {
4034 	unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
4035 	int max_active = fs_info->thread_pool_size;
4036 
4037 	if (fs_info->scrub_workers_refcnt == 0) {
4038 		fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub",
4039 				flags, is_dev_replace ? 1 : max_active, 4);
4040 		if (!fs_info->scrub_workers)
4041 			goto fail_scrub_workers;
4042 
4043 		fs_info->scrub_wr_completion_workers =
4044 			btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
4045 					      max_active, 2);
4046 		if (!fs_info->scrub_wr_completion_workers)
4047 			goto fail_scrub_wr_completion_workers;
4048 
4049 		fs_info->scrub_nocow_workers =
4050 			btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0);
4051 		if (!fs_info->scrub_nocow_workers)
4052 			goto fail_scrub_nocow_workers;
4053 		fs_info->scrub_parity_workers =
4054 			btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
4055 					      max_active, 2);
4056 		if (!fs_info->scrub_parity_workers)
4057 			goto fail_scrub_parity_workers;
4058 	}
4059 	++fs_info->scrub_workers_refcnt;
4060 	return 0;
4061 
4062 fail_scrub_parity_workers:
4063 	btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
4064 fail_scrub_nocow_workers:
4065 	btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
4066 fail_scrub_wr_completion_workers:
4067 	btrfs_destroy_workqueue(fs_info->scrub_workers);
4068 fail_scrub_workers:
4069 	return -ENOMEM;
4070 }
4071 
scrub_workers_put(struct btrfs_fs_info * fs_info)4072 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
4073 {
4074 	if (--fs_info->scrub_workers_refcnt == 0) {
4075 		btrfs_destroy_workqueue(fs_info->scrub_workers);
4076 		btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
4077 		btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
4078 		btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
4079 	}
4080 	WARN_ON(fs_info->scrub_workers_refcnt < 0);
4081 }
4082 
btrfs_scrub_dev(struct btrfs_fs_info * fs_info,u64 devid,u64 start,u64 end,struct btrfs_scrub_progress * progress,int readonly,int is_dev_replace)4083 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
4084 		    u64 end, struct btrfs_scrub_progress *progress,
4085 		    int readonly, int is_dev_replace)
4086 {
4087 	struct scrub_ctx *sctx;
4088 	int ret;
4089 	struct btrfs_device *dev;
4090 	struct rcu_string *name;
4091 
4092 	if (btrfs_fs_closing(fs_info))
4093 		return -EINVAL;
4094 
4095 	if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
4096 		/*
4097 		 * in this case scrub is unable to calculate the checksum
4098 		 * the way scrub is implemented. Do not handle this
4099 		 * situation at all because it won't ever happen.
4100 		 */
4101 		btrfs_err(fs_info,
4102 			   "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
4103 		       fs_info->nodesize,
4104 		       BTRFS_STRIPE_LEN);
4105 		return -EINVAL;
4106 	}
4107 
4108 	if (fs_info->sectorsize != PAGE_SIZE) {
4109 		/* not supported for data w/o checksums */
4110 		btrfs_err_rl(fs_info,
4111 			   "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
4112 		       fs_info->sectorsize, PAGE_SIZE);
4113 		return -EINVAL;
4114 	}
4115 
4116 	if (fs_info->nodesize >
4117 	    PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
4118 	    fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
4119 		/*
4120 		 * would exhaust the array bounds of pagev member in
4121 		 * struct scrub_block
4122 		 */
4123 		btrfs_err(fs_info,
4124 			  "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
4125 		       fs_info->nodesize,
4126 		       SCRUB_MAX_PAGES_PER_BLOCK,
4127 		       fs_info->sectorsize,
4128 		       SCRUB_MAX_PAGES_PER_BLOCK);
4129 		return -EINVAL;
4130 	}
4131 
4132 
4133 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4134 	dev = btrfs_find_device(fs_info, devid, NULL, NULL);
4135 	if (!dev || (dev->missing && !is_dev_replace)) {
4136 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4137 		return -ENODEV;
4138 	}
4139 
4140 	if (!is_dev_replace && !readonly && !dev->writeable) {
4141 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4142 		rcu_read_lock();
4143 		name = rcu_dereference(dev->name);
4144 		btrfs_err(fs_info, "scrub: device %s is not writable",
4145 			  name->str);
4146 		rcu_read_unlock();
4147 		return -EROFS;
4148 	}
4149 
4150 	mutex_lock(&fs_info->scrub_lock);
4151 	if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
4152 		mutex_unlock(&fs_info->scrub_lock);
4153 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4154 		return -EIO;
4155 	}
4156 
4157 	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
4158 	if (dev->scrub_device ||
4159 	    (!is_dev_replace &&
4160 	     btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
4161 		btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
4162 		mutex_unlock(&fs_info->scrub_lock);
4163 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4164 		return -EINPROGRESS;
4165 	}
4166 	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
4167 
4168 	ret = scrub_workers_get(fs_info, is_dev_replace);
4169 	if (ret) {
4170 		mutex_unlock(&fs_info->scrub_lock);
4171 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4172 		return ret;
4173 	}
4174 
4175 	sctx = scrub_setup_ctx(dev, is_dev_replace);
4176 	if (IS_ERR(sctx)) {
4177 		mutex_unlock(&fs_info->scrub_lock);
4178 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4179 		scrub_workers_put(fs_info);
4180 		return PTR_ERR(sctx);
4181 	}
4182 	sctx->readonly = readonly;
4183 	dev->scrub_device = sctx;
4184 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4185 
4186 	/*
4187 	 * checking @scrub_pause_req here, we can avoid
4188 	 * race between committing transaction and scrubbing.
4189 	 */
4190 	__scrub_blocked_if_needed(fs_info);
4191 	atomic_inc(&fs_info->scrubs_running);
4192 	mutex_unlock(&fs_info->scrub_lock);
4193 
4194 	if (!is_dev_replace) {
4195 		/*
4196 		 * by holding device list mutex, we can
4197 		 * kick off writing super in log tree sync.
4198 		 */
4199 		mutex_lock(&fs_info->fs_devices->device_list_mutex);
4200 		ret = scrub_supers(sctx, dev);
4201 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4202 	}
4203 
4204 	if (!ret)
4205 		ret = scrub_enumerate_chunks(sctx, dev, start, end,
4206 					     is_dev_replace);
4207 
4208 	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
4209 	atomic_dec(&fs_info->scrubs_running);
4210 	wake_up(&fs_info->scrub_pause_wait);
4211 
4212 	wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
4213 
4214 	if (progress)
4215 		memcpy(progress, &sctx->stat, sizeof(*progress));
4216 
4217 	mutex_lock(&fs_info->scrub_lock);
4218 	dev->scrub_device = NULL;
4219 	scrub_workers_put(fs_info);
4220 	mutex_unlock(&fs_info->scrub_lock);
4221 
4222 	scrub_put_ctx(sctx);
4223 
4224 	return ret;
4225 }
4226 
btrfs_scrub_pause(struct btrfs_fs_info * fs_info)4227 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
4228 {
4229 	mutex_lock(&fs_info->scrub_lock);
4230 	atomic_inc(&fs_info->scrub_pause_req);
4231 	while (atomic_read(&fs_info->scrubs_paused) !=
4232 	       atomic_read(&fs_info->scrubs_running)) {
4233 		mutex_unlock(&fs_info->scrub_lock);
4234 		wait_event(fs_info->scrub_pause_wait,
4235 			   atomic_read(&fs_info->scrubs_paused) ==
4236 			   atomic_read(&fs_info->scrubs_running));
4237 		mutex_lock(&fs_info->scrub_lock);
4238 	}
4239 	mutex_unlock(&fs_info->scrub_lock);
4240 }
4241 
btrfs_scrub_continue(struct btrfs_fs_info * fs_info)4242 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
4243 {
4244 	atomic_dec(&fs_info->scrub_pause_req);
4245 	wake_up(&fs_info->scrub_pause_wait);
4246 }
4247 
btrfs_scrub_cancel(struct btrfs_fs_info * fs_info)4248 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
4249 {
4250 	mutex_lock(&fs_info->scrub_lock);
4251 	if (!atomic_read(&fs_info->scrubs_running)) {
4252 		mutex_unlock(&fs_info->scrub_lock);
4253 		return -ENOTCONN;
4254 	}
4255 
4256 	atomic_inc(&fs_info->scrub_cancel_req);
4257 	while (atomic_read(&fs_info->scrubs_running)) {
4258 		mutex_unlock(&fs_info->scrub_lock);
4259 		wait_event(fs_info->scrub_pause_wait,
4260 			   atomic_read(&fs_info->scrubs_running) == 0);
4261 		mutex_lock(&fs_info->scrub_lock);
4262 	}
4263 	atomic_dec(&fs_info->scrub_cancel_req);
4264 	mutex_unlock(&fs_info->scrub_lock);
4265 
4266 	return 0;
4267 }
4268 
btrfs_scrub_cancel_dev(struct btrfs_fs_info * fs_info,struct btrfs_device * dev)4269 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
4270 			   struct btrfs_device *dev)
4271 {
4272 	struct scrub_ctx *sctx;
4273 
4274 	mutex_lock(&fs_info->scrub_lock);
4275 	sctx = dev->scrub_device;
4276 	if (!sctx) {
4277 		mutex_unlock(&fs_info->scrub_lock);
4278 		return -ENOTCONN;
4279 	}
4280 	atomic_inc(&sctx->cancel_req);
4281 	while (dev->scrub_device) {
4282 		mutex_unlock(&fs_info->scrub_lock);
4283 		wait_event(fs_info->scrub_pause_wait,
4284 			   dev->scrub_device == NULL);
4285 		mutex_lock(&fs_info->scrub_lock);
4286 	}
4287 	mutex_unlock(&fs_info->scrub_lock);
4288 
4289 	return 0;
4290 }
4291 
btrfs_scrub_progress(struct btrfs_fs_info * fs_info,u64 devid,struct btrfs_scrub_progress * progress)4292 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
4293 			 struct btrfs_scrub_progress *progress)
4294 {
4295 	struct btrfs_device *dev;
4296 	struct scrub_ctx *sctx = NULL;
4297 
4298 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4299 	dev = btrfs_find_device(fs_info, devid, NULL, NULL);
4300 	if (dev)
4301 		sctx = dev->scrub_device;
4302 	if (sctx)
4303 		memcpy(progress, &sctx->stat, sizeof(*progress));
4304 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4305 
4306 	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
4307 }
4308 
scrub_remap_extent(struct btrfs_fs_info * fs_info,u64 extent_logical,u64 extent_len,u64 * extent_physical,struct btrfs_device ** extent_dev,int * extent_mirror_num)4309 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4310 			       u64 extent_logical, u64 extent_len,
4311 			       u64 *extent_physical,
4312 			       struct btrfs_device **extent_dev,
4313 			       int *extent_mirror_num)
4314 {
4315 	u64 mapped_length;
4316 	struct btrfs_bio *bbio = NULL;
4317 	int ret;
4318 
4319 	mapped_length = extent_len;
4320 	ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4321 			      &mapped_length, &bbio, 0);
4322 	if (ret || !bbio || mapped_length < extent_len ||
4323 	    !bbio->stripes[0].dev->bdev) {
4324 		btrfs_put_bbio(bbio);
4325 		return;
4326 	}
4327 
4328 	*extent_physical = bbio->stripes[0].physical;
4329 	*extent_mirror_num = bbio->mirror_num;
4330 	*extent_dev = bbio->stripes[0].dev;
4331 	btrfs_put_bbio(bbio);
4332 }
4333 
copy_nocow_pages(struct scrub_ctx * sctx,u64 logical,u64 len,int mirror_num,u64 physical_for_dev_replace)4334 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
4335 			    int mirror_num, u64 physical_for_dev_replace)
4336 {
4337 	struct scrub_copy_nocow_ctx *nocow_ctx;
4338 	struct btrfs_fs_info *fs_info = sctx->fs_info;
4339 
4340 	nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
4341 	if (!nocow_ctx) {
4342 		spin_lock(&sctx->stat_lock);
4343 		sctx->stat.malloc_errors++;
4344 		spin_unlock(&sctx->stat_lock);
4345 		return -ENOMEM;
4346 	}
4347 
4348 	scrub_pending_trans_workers_inc(sctx);
4349 
4350 	nocow_ctx->sctx = sctx;
4351 	nocow_ctx->logical = logical;
4352 	nocow_ctx->len = len;
4353 	nocow_ctx->mirror_num = mirror_num;
4354 	nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
4355 	btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
4356 			copy_nocow_pages_worker, NULL, NULL);
4357 	INIT_LIST_HEAD(&nocow_ctx->inodes);
4358 	btrfs_queue_work(fs_info->scrub_nocow_workers,
4359 			 &nocow_ctx->work);
4360 
4361 	return 0;
4362 }
4363 
record_inode_for_nocow(u64 inum,u64 offset,u64 root,void * ctx)4364 static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
4365 {
4366 	struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
4367 	struct scrub_nocow_inode *nocow_inode;
4368 
4369 	nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
4370 	if (!nocow_inode)
4371 		return -ENOMEM;
4372 	nocow_inode->inum = inum;
4373 	nocow_inode->offset = offset;
4374 	nocow_inode->root = root;
4375 	list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
4376 	return 0;
4377 }
4378 
4379 #define COPY_COMPLETE 1
4380 
copy_nocow_pages_worker(struct btrfs_work * work)4381 static void copy_nocow_pages_worker(struct btrfs_work *work)
4382 {
4383 	struct scrub_copy_nocow_ctx *nocow_ctx =
4384 		container_of(work, struct scrub_copy_nocow_ctx, work);
4385 	struct scrub_ctx *sctx = nocow_ctx->sctx;
4386 	struct btrfs_fs_info *fs_info = sctx->fs_info;
4387 	struct btrfs_root *root = fs_info->extent_root;
4388 	u64 logical = nocow_ctx->logical;
4389 	u64 len = nocow_ctx->len;
4390 	int mirror_num = nocow_ctx->mirror_num;
4391 	u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4392 	int ret;
4393 	struct btrfs_trans_handle *trans = NULL;
4394 	struct btrfs_path *path;
4395 	int not_written = 0;
4396 
4397 	path = btrfs_alloc_path();
4398 	if (!path) {
4399 		spin_lock(&sctx->stat_lock);
4400 		sctx->stat.malloc_errors++;
4401 		spin_unlock(&sctx->stat_lock);
4402 		not_written = 1;
4403 		goto out;
4404 	}
4405 
4406 	trans = btrfs_join_transaction(root);
4407 	if (IS_ERR(trans)) {
4408 		not_written = 1;
4409 		goto out;
4410 	}
4411 
4412 	ret = iterate_inodes_from_logical(logical, fs_info, path,
4413 					  record_inode_for_nocow, nocow_ctx);
4414 	if (ret != 0 && ret != -ENOENT) {
4415 		btrfs_warn(fs_info,
4416 			   "iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
4417 			   logical, physical_for_dev_replace, len, mirror_num,
4418 			   ret);
4419 		not_written = 1;
4420 		goto out;
4421 	}
4422 
4423 	btrfs_end_transaction(trans);
4424 	trans = NULL;
4425 	while (!list_empty(&nocow_ctx->inodes)) {
4426 		struct scrub_nocow_inode *entry;
4427 		entry = list_first_entry(&nocow_ctx->inodes,
4428 					 struct scrub_nocow_inode,
4429 					 list);
4430 		list_del_init(&entry->list);
4431 		ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4432 						 entry->root, nocow_ctx);
4433 		kfree(entry);
4434 		if (ret == COPY_COMPLETE) {
4435 			ret = 0;
4436 			break;
4437 		} else if (ret) {
4438 			break;
4439 		}
4440 	}
4441 out:
4442 	while (!list_empty(&nocow_ctx->inodes)) {
4443 		struct scrub_nocow_inode *entry;
4444 		entry = list_first_entry(&nocow_ctx->inodes,
4445 					 struct scrub_nocow_inode,
4446 					 list);
4447 		list_del_init(&entry->list);
4448 		kfree(entry);
4449 	}
4450 	if (trans && !IS_ERR(trans))
4451 		btrfs_end_transaction(trans);
4452 	if (not_written)
4453 		btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4454 					    num_uncorrectable_read_errors);
4455 
4456 	btrfs_free_path(path);
4457 	kfree(nocow_ctx);
4458 
4459 	scrub_pending_trans_workers_dec(sctx);
4460 }
4461 
check_extent_to_block(struct btrfs_inode * inode,u64 start,u64 len,u64 logical)4462 static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len,
4463 				 u64 logical)
4464 {
4465 	struct extent_state *cached_state = NULL;
4466 	struct btrfs_ordered_extent *ordered;
4467 	struct extent_io_tree *io_tree;
4468 	struct extent_map *em;
4469 	u64 lockstart = start, lockend = start + len - 1;
4470 	int ret = 0;
4471 
4472 	io_tree = &inode->io_tree;
4473 
4474 	lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
4475 	ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4476 	if (ordered) {
4477 		btrfs_put_ordered_extent(ordered);
4478 		ret = 1;
4479 		goto out_unlock;
4480 	}
4481 
4482 	em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4483 	if (IS_ERR(em)) {
4484 		ret = PTR_ERR(em);
4485 		goto out_unlock;
4486 	}
4487 
4488 	/*
4489 	 * This extent does not actually cover the logical extent anymore,
4490 	 * move on to the next inode.
4491 	 */
4492 	if (em->block_start > logical ||
4493 	    em->block_start + em->block_len < logical + len) {
4494 		free_extent_map(em);
4495 		ret = 1;
4496 		goto out_unlock;
4497 	}
4498 	free_extent_map(em);
4499 
4500 out_unlock:
4501 	unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4502 			     GFP_NOFS);
4503 	return ret;
4504 }
4505 
copy_nocow_pages_for_inode(u64 inum,u64 offset,u64 root,struct scrub_copy_nocow_ctx * nocow_ctx)4506 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4507 				      struct scrub_copy_nocow_ctx *nocow_ctx)
4508 {
4509 	struct btrfs_fs_info *fs_info = nocow_ctx->sctx->fs_info;
4510 	struct btrfs_key key;
4511 	struct inode *inode;
4512 	struct page *page;
4513 	struct btrfs_root *local_root;
4514 	struct extent_io_tree *io_tree;
4515 	u64 physical_for_dev_replace;
4516 	u64 nocow_ctx_logical;
4517 	u64 len = nocow_ctx->len;
4518 	unsigned long index;
4519 	int srcu_index;
4520 	int ret = 0;
4521 	int err = 0;
4522 
4523 	key.objectid = root;
4524 	key.type = BTRFS_ROOT_ITEM_KEY;
4525 	key.offset = (u64)-1;
4526 
4527 	srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4528 
4529 	local_root = btrfs_read_fs_root_no_name(fs_info, &key);
4530 	if (IS_ERR(local_root)) {
4531 		srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4532 		return PTR_ERR(local_root);
4533 	}
4534 
4535 	key.type = BTRFS_INODE_ITEM_KEY;
4536 	key.objectid = inum;
4537 	key.offset = 0;
4538 	inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
4539 	srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4540 	if (IS_ERR(inode))
4541 		return PTR_ERR(inode);
4542 
4543 	/* Avoid truncate/dio/punch hole.. */
4544 	inode_lock(inode);
4545 	inode_dio_wait(inode);
4546 
4547 	physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4548 	io_tree = &BTRFS_I(inode)->io_tree;
4549 	nocow_ctx_logical = nocow_ctx->logical;
4550 
4551 	ret = check_extent_to_block(BTRFS_I(inode), offset, len,
4552 			nocow_ctx_logical);
4553 	if (ret) {
4554 		ret = ret > 0 ? 0 : ret;
4555 		goto out;
4556 	}
4557 
4558 	while (len >= PAGE_SIZE) {
4559 		index = offset >> PAGE_SHIFT;
4560 again:
4561 		page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4562 		if (!page) {
4563 			btrfs_err(fs_info, "find_or_create_page() failed");
4564 			ret = -ENOMEM;
4565 			goto out;
4566 		}
4567 
4568 		if (PageUptodate(page)) {
4569 			if (PageDirty(page))
4570 				goto next_page;
4571 		} else {
4572 			ClearPageError(page);
4573 			err = extent_read_full_page(io_tree, page,
4574 							   btrfs_get_extent,
4575 							   nocow_ctx->mirror_num);
4576 			if (err) {
4577 				ret = err;
4578 				goto next_page;
4579 			}
4580 
4581 			lock_page(page);
4582 			/*
4583 			 * If the page has been remove from the page cache,
4584 			 * the data on it is meaningless, because it may be
4585 			 * old one, the new data may be written into the new
4586 			 * page in the page cache.
4587 			 */
4588 			if (page->mapping != inode->i_mapping) {
4589 				unlock_page(page);
4590 				put_page(page);
4591 				goto again;
4592 			}
4593 			if (!PageUptodate(page)) {
4594 				ret = -EIO;
4595 				goto next_page;
4596 			}
4597 		}
4598 
4599 		ret = check_extent_to_block(BTRFS_I(inode), offset, len,
4600 					    nocow_ctx_logical);
4601 		if (ret) {
4602 			ret = ret > 0 ? 0 : ret;
4603 			goto next_page;
4604 		}
4605 
4606 		err = write_page_nocow(nocow_ctx->sctx,
4607 				       physical_for_dev_replace, page);
4608 		if (err)
4609 			ret = err;
4610 next_page:
4611 		unlock_page(page);
4612 		put_page(page);
4613 
4614 		if (ret)
4615 			break;
4616 
4617 		offset += PAGE_SIZE;
4618 		physical_for_dev_replace += PAGE_SIZE;
4619 		nocow_ctx_logical += PAGE_SIZE;
4620 		len -= PAGE_SIZE;
4621 	}
4622 	ret = COPY_COMPLETE;
4623 out:
4624 	inode_unlock(inode);
4625 	iput(inode);
4626 	return ret;
4627 }
4628 
write_page_nocow(struct scrub_ctx * sctx,u64 physical_for_dev_replace,struct page * page)4629 static int write_page_nocow(struct scrub_ctx *sctx,
4630 			    u64 physical_for_dev_replace, struct page *page)
4631 {
4632 	struct bio *bio;
4633 	struct btrfs_device *dev;
4634 	int ret;
4635 
4636 	dev = sctx->wr_tgtdev;
4637 	if (!dev)
4638 		return -EIO;
4639 	if (!dev->bdev) {
4640 		btrfs_warn_rl(dev->fs_info,
4641 			"scrub write_page_nocow(bdev == NULL) is unexpected");
4642 		return -EIO;
4643 	}
4644 	bio = btrfs_io_bio_alloc(1);
4645 	bio->bi_iter.bi_size = 0;
4646 	bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
4647 	bio_set_dev(bio, dev->bdev);
4648 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
4649 	ret = bio_add_page(bio, page, PAGE_SIZE, 0);
4650 	if (ret != PAGE_SIZE) {
4651 leave_with_eio:
4652 		bio_put(bio);
4653 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4654 		return -EIO;
4655 	}
4656 
4657 	if (btrfsic_submit_bio_wait(bio))
4658 		goto leave_with_eio;
4659 
4660 	bio_put(bio);
4661 	return 0;
4662 }
4663