1 /*
2 * Copyright (C) 2011 STRATO. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
21 #include "ctree.h"
22 #include "volumes.h"
23 #include "disk-io.h"
24 #include "ordered-data.h"
25 #include "transaction.h"
26 #include "backref.h"
27 #include "extent_io.h"
28 #include "check-integrity.h"
29
30 /*
31 * This is only the first step towards a full-features scrub. It reads all
32 * extent and super block and verifies the checksums. In case a bad checksum
33 * is found or the extent cannot be read, good data will be written back if
34 * any can be found.
35 *
36 * Future enhancements:
37 * - In case an unrepairable extent is encountered, track which files are
38 * affected and report them
39 * - track and record media errors, throw out bad devices
40 * - add a mode to also read unallocated space
41 */
42
43 struct scrub_block;
44 struct scrub_dev;
45
46 #define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */
47 #define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */
48 #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
49
50 struct scrub_page {
51 struct scrub_block *sblock;
52 struct page *page;
53 struct block_device *bdev;
54 u64 flags; /* extent flags */
55 u64 generation;
56 u64 logical;
57 u64 physical;
58 struct {
59 unsigned int mirror_num:8;
60 unsigned int have_csum:1;
61 unsigned int io_error:1;
62 };
63 u8 csum[BTRFS_CSUM_SIZE];
64 };
65
66 struct scrub_bio {
67 int index;
68 struct scrub_dev *sdev;
69 struct bio *bio;
70 int err;
71 u64 logical;
72 u64 physical;
73 struct scrub_page *pagev[SCRUB_PAGES_PER_BIO];
74 int page_count;
75 int next_free;
76 struct btrfs_work work;
77 };
78
79 struct scrub_block {
80 struct scrub_page pagev[SCRUB_MAX_PAGES_PER_BLOCK];
81 int page_count;
82 atomic_t outstanding_pages;
83 atomic_t ref_count; /* free mem on transition to zero */
84 struct scrub_dev *sdev;
85 struct {
86 unsigned int header_error:1;
87 unsigned int checksum_error:1;
88 unsigned int no_io_error_seen:1;
89 };
90 };
91
92 struct scrub_dev {
93 struct scrub_bio *bios[SCRUB_BIOS_PER_DEV];
94 struct btrfs_device *dev;
95 int first_free;
96 int curr;
97 atomic_t in_flight;
98 atomic_t fixup_cnt;
99 spinlock_t list_lock;
100 wait_queue_head_t list_wait;
101 u16 csum_size;
102 struct list_head csum_list;
103 atomic_t cancel_req;
104 int readonly;
105 int pages_per_bio; /* <= SCRUB_PAGES_PER_BIO */
106 u32 sectorsize;
107 u32 nodesize;
108 u32 leafsize;
109 /*
110 * statistics
111 */
112 struct btrfs_scrub_progress stat;
113 spinlock_t stat_lock;
114 };
115
116 struct scrub_fixup_nodatasum {
117 struct scrub_dev *sdev;
118 u64 logical;
119 struct btrfs_root *root;
120 struct btrfs_work work;
121 int mirror_num;
122 };
123
124 struct scrub_warning {
125 struct btrfs_path *path;
126 u64 extent_item_size;
127 char *scratch_buf;
128 char *msg_buf;
129 const char *errstr;
130 sector_t sector;
131 u64 logical;
132 struct btrfs_device *dev;
133 int msg_bufsize;
134 int scratch_bufsize;
135 };
136
137
138 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
139 static int scrub_setup_recheck_block(struct scrub_dev *sdev,
140 struct btrfs_mapping_tree *map_tree,
141 u64 length, u64 logical,
142 struct scrub_block *sblock);
143 static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
144 struct scrub_block *sblock, int is_metadata,
145 int have_csum, u8 *csum, u64 generation,
146 u16 csum_size);
147 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
148 struct scrub_block *sblock,
149 int is_metadata, int have_csum,
150 const u8 *csum, u64 generation,
151 u16 csum_size);
152 static void scrub_complete_bio_end_io(struct bio *bio, int err);
153 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
154 struct scrub_block *sblock_good,
155 int force_write);
156 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
157 struct scrub_block *sblock_good,
158 int page_num, int force_write);
159 static int scrub_checksum_data(struct scrub_block *sblock);
160 static int scrub_checksum_tree_block(struct scrub_block *sblock);
161 static int scrub_checksum_super(struct scrub_block *sblock);
162 static void scrub_block_get(struct scrub_block *sblock);
163 static void scrub_block_put(struct scrub_block *sblock);
164 static int scrub_add_page_to_bio(struct scrub_dev *sdev,
165 struct scrub_page *spage);
166 static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
167 u64 physical, u64 flags, u64 gen, int mirror_num,
168 u8 *csum, int force);
169 static void scrub_bio_end_io(struct bio *bio, int err);
170 static void scrub_bio_end_io_worker(struct btrfs_work *work);
171 static void scrub_block_complete(struct scrub_block *sblock);
172
173
scrub_free_csums(struct scrub_dev * sdev)174 static void scrub_free_csums(struct scrub_dev *sdev)
175 {
176 while (!list_empty(&sdev->csum_list)) {
177 struct btrfs_ordered_sum *sum;
178 sum = list_first_entry(&sdev->csum_list,
179 struct btrfs_ordered_sum, list);
180 list_del(&sum->list);
181 kfree(sum);
182 }
183 }
184
scrub_free_dev(struct scrub_dev * sdev)185 static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
186 {
187 int i;
188
189 if (!sdev)
190 return;
191
192 /* this can happen when scrub is cancelled */
193 if (sdev->curr != -1) {
194 struct scrub_bio *sbio = sdev->bios[sdev->curr];
195
196 for (i = 0; i < sbio->page_count; i++) {
197 BUG_ON(!sbio->pagev[i]);
198 BUG_ON(!sbio->pagev[i]->page);
199 scrub_block_put(sbio->pagev[i]->sblock);
200 }
201 bio_put(sbio->bio);
202 }
203
204 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
205 struct scrub_bio *sbio = sdev->bios[i];
206
207 if (!sbio)
208 break;
209 kfree(sbio);
210 }
211
212 scrub_free_csums(sdev);
213 kfree(sdev);
214 }
215
216 static noinline_for_stack
scrub_setup_dev(struct btrfs_device * dev)217 struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
218 {
219 struct scrub_dev *sdev;
220 int i;
221 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
222 int pages_per_bio;
223
224 pages_per_bio = min_t(int, SCRUB_PAGES_PER_BIO,
225 bio_get_nr_vecs(dev->bdev));
226 sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
227 if (!sdev)
228 goto nomem;
229 sdev->dev = dev;
230 sdev->pages_per_bio = pages_per_bio;
231 sdev->curr = -1;
232 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
233 struct scrub_bio *sbio;
234
235 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
236 if (!sbio)
237 goto nomem;
238 sdev->bios[i] = sbio;
239
240 sbio->index = i;
241 sbio->sdev = sdev;
242 sbio->page_count = 0;
243 sbio->work.func = scrub_bio_end_io_worker;
244
245 if (i != SCRUB_BIOS_PER_DEV-1)
246 sdev->bios[i]->next_free = i + 1;
247 else
248 sdev->bios[i]->next_free = -1;
249 }
250 sdev->first_free = 0;
251 sdev->nodesize = dev->dev_root->nodesize;
252 sdev->leafsize = dev->dev_root->leafsize;
253 sdev->sectorsize = dev->dev_root->sectorsize;
254 atomic_set(&sdev->in_flight, 0);
255 atomic_set(&sdev->fixup_cnt, 0);
256 atomic_set(&sdev->cancel_req, 0);
257 sdev->csum_size = btrfs_super_csum_size(fs_info->super_copy);
258 INIT_LIST_HEAD(&sdev->csum_list);
259
260 spin_lock_init(&sdev->list_lock);
261 spin_lock_init(&sdev->stat_lock);
262 init_waitqueue_head(&sdev->list_wait);
263 return sdev;
264
265 nomem:
266 scrub_free_dev(sdev);
267 return ERR_PTR(-ENOMEM);
268 }
269
scrub_print_warning_inode(u64 inum,u64 offset,u64 root,void * ctx)270 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
271 {
272 u64 isize;
273 u32 nlink;
274 int ret;
275 int i;
276 struct extent_buffer *eb;
277 struct btrfs_inode_item *inode_item;
278 struct scrub_warning *swarn = ctx;
279 struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
280 struct inode_fs_paths *ipath = NULL;
281 struct btrfs_root *local_root;
282 struct btrfs_key root_key;
283
284 root_key.objectid = root;
285 root_key.type = BTRFS_ROOT_ITEM_KEY;
286 root_key.offset = (u64)-1;
287 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
288 if (IS_ERR(local_root)) {
289 ret = PTR_ERR(local_root);
290 goto err;
291 }
292
293 ret = inode_item_info(inum, 0, local_root, swarn->path);
294 if (ret) {
295 btrfs_release_path(swarn->path);
296 goto err;
297 }
298
299 eb = swarn->path->nodes[0];
300 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
301 struct btrfs_inode_item);
302 isize = btrfs_inode_size(eb, inode_item);
303 nlink = btrfs_inode_nlink(eb, inode_item);
304 btrfs_release_path(swarn->path);
305
306 ipath = init_ipath(4096, local_root, swarn->path);
307 if (IS_ERR(ipath)) {
308 ret = PTR_ERR(ipath);
309 ipath = NULL;
310 goto err;
311 }
312 ret = paths_from_inode(inum, ipath);
313
314 if (ret < 0)
315 goto err;
316
317 /*
318 * we deliberately ignore the bit ipath might have been too small to
319 * hold all of the paths here
320 */
321 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
322 printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
323 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
324 "length %llu, links %u (path: %s)\n", swarn->errstr,
325 swarn->logical, swarn->dev->name,
326 (unsigned long long)swarn->sector, root, inum, offset,
327 min(isize - offset, (u64)PAGE_SIZE), nlink,
328 (char *)(unsigned long)ipath->fspath->val[i]);
329
330 free_ipath(ipath);
331 return 0;
332
333 err:
334 printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
335 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
336 "resolving failed with ret=%d\n", swarn->errstr,
337 swarn->logical, swarn->dev->name,
338 (unsigned long long)swarn->sector, root, inum, offset, ret);
339
340 free_ipath(ipath);
341 return 0;
342 }
343
scrub_print_warning(const char * errstr,struct scrub_block * sblock)344 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
345 {
346 struct btrfs_device *dev = sblock->sdev->dev;
347 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
348 struct btrfs_path *path;
349 struct btrfs_key found_key;
350 struct extent_buffer *eb;
351 struct btrfs_extent_item *ei;
352 struct scrub_warning swarn;
353 u32 item_size;
354 int ret;
355 u64 ref_root;
356 u8 ref_level;
357 unsigned long ptr = 0;
358 const int bufsize = 4096;
359 u64 extent_item_pos;
360
361 path = btrfs_alloc_path();
362
363 swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
364 swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
365 BUG_ON(sblock->page_count < 1);
366 swarn.sector = (sblock->pagev[0].physical) >> 9;
367 swarn.logical = sblock->pagev[0].logical;
368 swarn.errstr = errstr;
369 swarn.dev = dev;
370 swarn.msg_bufsize = bufsize;
371 swarn.scratch_bufsize = bufsize;
372
373 if (!path || !swarn.scratch_buf || !swarn.msg_buf)
374 goto out;
375
376 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key);
377 if (ret < 0)
378 goto out;
379
380 extent_item_pos = swarn.logical - found_key.objectid;
381 swarn.extent_item_size = found_key.offset;
382
383 eb = path->nodes[0];
384 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
385 item_size = btrfs_item_size_nr(eb, path->slots[0]);
386
387 if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
388 do {
389 ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
390 &ref_root, &ref_level);
391 printk(KERN_WARNING
392 "btrfs: %s at logical %llu on dev %s, "
393 "sector %llu: metadata %s (level %d) in tree "
394 "%llu\n", errstr, swarn.logical, dev->name,
395 (unsigned long long)swarn.sector,
396 ref_level ? "node" : "leaf",
397 ret < 0 ? -1 : ref_level,
398 ret < 0 ? -1 : ref_root);
399 } while (ret != 1);
400 btrfs_release_path(path);
401 } else {
402 btrfs_release_path(path);
403 swarn.path = path;
404 iterate_extent_inodes(fs_info, found_key.objectid,
405 extent_item_pos, 1,
406 scrub_print_warning_inode, &swarn);
407 }
408
409 out:
410 btrfs_free_path(path);
411 kfree(swarn.scratch_buf);
412 kfree(swarn.msg_buf);
413 }
414
scrub_fixup_readpage(u64 inum,u64 offset,u64 root,void * ctx)415 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx)
416 {
417 struct page *page = NULL;
418 unsigned long index;
419 struct scrub_fixup_nodatasum *fixup = ctx;
420 int ret;
421 int corrected = 0;
422 struct btrfs_key key;
423 struct inode *inode = NULL;
424 u64 end = offset + PAGE_SIZE - 1;
425 struct btrfs_root *local_root;
426
427 key.objectid = root;
428 key.type = BTRFS_ROOT_ITEM_KEY;
429 key.offset = (u64)-1;
430 local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key);
431 if (IS_ERR(local_root))
432 return PTR_ERR(local_root);
433
434 key.type = BTRFS_INODE_ITEM_KEY;
435 key.objectid = inum;
436 key.offset = 0;
437 inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL);
438 if (IS_ERR(inode))
439 return PTR_ERR(inode);
440
441 index = offset >> PAGE_CACHE_SHIFT;
442
443 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
444 if (!page) {
445 ret = -ENOMEM;
446 goto out;
447 }
448
449 if (PageUptodate(page)) {
450 struct btrfs_mapping_tree *map_tree;
451 if (PageDirty(page)) {
452 /*
453 * we need to write the data to the defect sector. the
454 * data that was in that sector is not in memory,
455 * because the page was modified. we must not write the
456 * modified page to that sector.
457 *
458 * TODO: what could be done here: wait for the delalloc
459 * runner to write out that page (might involve
460 * COW) and see whether the sector is still
461 * referenced afterwards.
462 *
463 * For the meantime, we'll treat this error
464 * incorrectable, although there is a chance that a
465 * later scrub will find the bad sector again and that
466 * there's no dirty page in memory, then.
467 */
468 ret = -EIO;
469 goto out;
470 }
471 map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
472 ret = repair_io_failure(map_tree, offset, PAGE_SIZE,
473 fixup->logical, page,
474 fixup->mirror_num);
475 unlock_page(page);
476 corrected = !ret;
477 } else {
478 /*
479 * we need to get good data first. the general readpage path
480 * will call repair_io_failure for us, we just have to make
481 * sure we read the bad mirror.
482 */
483 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
484 EXTENT_DAMAGED, GFP_NOFS);
485 if (ret) {
486 /* set_extent_bits should give proper error */
487 WARN_ON(ret > 0);
488 if (ret > 0)
489 ret = -EFAULT;
490 goto out;
491 }
492
493 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
494 btrfs_get_extent,
495 fixup->mirror_num);
496 wait_on_page_locked(page);
497
498 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
499 end, EXTENT_DAMAGED, 0, NULL);
500 if (!corrected)
501 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
502 EXTENT_DAMAGED, GFP_NOFS);
503 }
504
505 out:
506 if (page)
507 put_page(page);
508 if (inode)
509 iput(inode);
510
511 if (ret < 0)
512 return ret;
513
514 if (ret == 0 && corrected) {
515 /*
516 * we only need to call readpage for one of the inodes belonging
517 * to this extent. so make iterate_extent_inodes stop
518 */
519 return 1;
520 }
521
522 return -EIO;
523 }
524
scrub_fixup_nodatasum(struct btrfs_work * work)525 static void scrub_fixup_nodatasum(struct btrfs_work *work)
526 {
527 int ret;
528 struct scrub_fixup_nodatasum *fixup;
529 struct scrub_dev *sdev;
530 struct btrfs_trans_handle *trans = NULL;
531 struct btrfs_fs_info *fs_info;
532 struct btrfs_path *path;
533 int uncorrectable = 0;
534
535 fixup = container_of(work, struct scrub_fixup_nodatasum, work);
536 sdev = fixup->sdev;
537 fs_info = fixup->root->fs_info;
538
539 path = btrfs_alloc_path();
540 if (!path) {
541 spin_lock(&sdev->stat_lock);
542 ++sdev->stat.malloc_errors;
543 spin_unlock(&sdev->stat_lock);
544 uncorrectable = 1;
545 goto out;
546 }
547
548 trans = btrfs_join_transaction(fixup->root);
549 if (IS_ERR(trans)) {
550 uncorrectable = 1;
551 goto out;
552 }
553
554 /*
555 * the idea is to trigger a regular read through the standard path. we
556 * read a page from the (failed) logical address by specifying the
557 * corresponding copynum of the failed sector. thus, that readpage is
558 * expected to fail.
559 * that is the point where on-the-fly error correction will kick in
560 * (once it's finished) and rewrite the failed sector if a good copy
561 * can be found.
562 */
563 ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
564 path, scrub_fixup_readpage,
565 fixup);
566 if (ret < 0) {
567 uncorrectable = 1;
568 goto out;
569 }
570 WARN_ON(ret != 1);
571
572 spin_lock(&sdev->stat_lock);
573 ++sdev->stat.corrected_errors;
574 spin_unlock(&sdev->stat_lock);
575
576 out:
577 if (trans && !IS_ERR(trans))
578 btrfs_end_transaction(trans, fixup->root);
579 if (uncorrectable) {
580 spin_lock(&sdev->stat_lock);
581 ++sdev->stat.uncorrectable_errors;
582 spin_unlock(&sdev->stat_lock);
583 printk_ratelimited(KERN_ERR
584 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
585 (unsigned long long)fixup->logical, sdev->dev->name);
586 }
587
588 btrfs_free_path(path);
589 kfree(fixup);
590
591 /* see caller why we're pretending to be paused in the scrub counters */
592 mutex_lock(&fs_info->scrub_lock);
593 atomic_dec(&fs_info->scrubs_running);
594 atomic_dec(&fs_info->scrubs_paused);
595 mutex_unlock(&fs_info->scrub_lock);
596 atomic_dec(&sdev->fixup_cnt);
597 wake_up(&fs_info->scrub_pause_wait);
598 wake_up(&sdev->list_wait);
599 }
600
601 /*
602 * scrub_handle_errored_block gets called when either verification of the
603 * pages failed or the bio failed to read, e.g. with EIO. In the latter
604 * case, this function handles all pages in the bio, even though only one
605 * may be bad.
606 * The goal of this function is to repair the errored block by using the
607 * contents of one of the mirrors.
608 */
scrub_handle_errored_block(struct scrub_block * sblock_to_check)609 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
610 {
611 struct scrub_dev *sdev = sblock_to_check->sdev;
612 struct btrfs_fs_info *fs_info;
613 u64 length;
614 u64 logical;
615 u64 generation;
616 unsigned int failed_mirror_index;
617 unsigned int is_metadata;
618 unsigned int have_csum;
619 u8 *csum;
620 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
621 struct scrub_block *sblock_bad;
622 int ret;
623 int mirror_index;
624 int page_num;
625 int success;
626 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
627 DEFAULT_RATELIMIT_BURST);
628
629 BUG_ON(sblock_to_check->page_count < 1);
630 fs_info = sdev->dev->dev_root->fs_info;
631 length = sblock_to_check->page_count * PAGE_SIZE;
632 logical = sblock_to_check->pagev[0].logical;
633 generation = sblock_to_check->pagev[0].generation;
634 BUG_ON(sblock_to_check->pagev[0].mirror_num < 1);
635 failed_mirror_index = sblock_to_check->pagev[0].mirror_num - 1;
636 is_metadata = !(sblock_to_check->pagev[0].flags &
637 BTRFS_EXTENT_FLAG_DATA);
638 have_csum = sblock_to_check->pagev[0].have_csum;
639 csum = sblock_to_check->pagev[0].csum;
640
641 /*
642 * read all mirrors one after the other. This includes to
643 * re-read the extent or metadata block that failed (that was
644 * the cause that this fixup code is called) another time,
645 * page by page this time in order to know which pages
646 * caused I/O errors and which ones are good (for all mirrors).
647 * It is the goal to handle the situation when more than one
648 * mirror contains I/O errors, but the errors do not
649 * overlap, i.e. the data can be repaired by selecting the
650 * pages from those mirrors without I/O error on the
651 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
652 * would be that mirror #1 has an I/O error on the first page,
653 * the second page is good, and mirror #2 has an I/O error on
654 * the second page, but the first page is good.
655 * Then the first page of the first mirror can be repaired by
656 * taking the first page of the second mirror, and the
657 * second page of the second mirror can be repaired by
658 * copying the contents of the 2nd page of the 1st mirror.
659 * One more note: if the pages of one mirror contain I/O
660 * errors, the checksum cannot be verified. In order to get
661 * the best data for repairing, the first attempt is to find
662 * a mirror without I/O errors and with a validated checksum.
663 * Only if this is not possible, the pages are picked from
664 * mirrors with I/O errors without considering the checksum.
665 * If the latter is the case, at the end, the checksum of the
666 * repaired area is verified in order to correctly maintain
667 * the statistics.
668 */
669
670 sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
671 sizeof(*sblocks_for_recheck),
672 GFP_NOFS);
673 if (!sblocks_for_recheck) {
674 spin_lock(&sdev->stat_lock);
675 sdev->stat.malloc_errors++;
676 sdev->stat.read_errors++;
677 sdev->stat.uncorrectable_errors++;
678 spin_unlock(&sdev->stat_lock);
679 goto out;
680 }
681
682 /* setup the context, map the logical blocks and alloc the pages */
683 ret = scrub_setup_recheck_block(sdev, &fs_info->mapping_tree, length,
684 logical, sblocks_for_recheck);
685 if (ret) {
686 spin_lock(&sdev->stat_lock);
687 sdev->stat.read_errors++;
688 sdev->stat.uncorrectable_errors++;
689 spin_unlock(&sdev->stat_lock);
690 goto out;
691 }
692 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
693 sblock_bad = sblocks_for_recheck + failed_mirror_index;
694
695 /* build and submit the bios for the failed mirror, check checksums */
696 ret = scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
697 csum, generation, sdev->csum_size);
698 if (ret) {
699 spin_lock(&sdev->stat_lock);
700 sdev->stat.read_errors++;
701 sdev->stat.uncorrectable_errors++;
702 spin_unlock(&sdev->stat_lock);
703 goto out;
704 }
705
706 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
707 sblock_bad->no_io_error_seen) {
708 /*
709 * the error disappeared after reading page by page, or
710 * the area was part of a huge bio and other parts of the
711 * bio caused I/O errors, or the block layer merged several
712 * read requests into one and the error is caused by a
713 * different bio (usually one of the two latter cases is
714 * the cause)
715 */
716 spin_lock(&sdev->stat_lock);
717 sdev->stat.unverified_errors++;
718 spin_unlock(&sdev->stat_lock);
719
720 goto out;
721 }
722
723 if (!sblock_bad->no_io_error_seen) {
724 spin_lock(&sdev->stat_lock);
725 sdev->stat.read_errors++;
726 spin_unlock(&sdev->stat_lock);
727 if (__ratelimit(&_rs))
728 scrub_print_warning("i/o error", sblock_to_check);
729 } else if (sblock_bad->checksum_error) {
730 spin_lock(&sdev->stat_lock);
731 sdev->stat.csum_errors++;
732 spin_unlock(&sdev->stat_lock);
733 if (__ratelimit(&_rs))
734 scrub_print_warning("checksum error", sblock_to_check);
735 } else if (sblock_bad->header_error) {
736 spin_lock(&sdev->stat_lock);
737 sdev->stat.verify_errors++;
738 spin_unlock(&sdev->stat_lock);
739 if (__ratelimit(&_rs))
740 scrub_print_warning("checksum/header error",
741 sblock_to_check);
742 }
743
744 if (sdev->readonly)
745 goto did_not_correct_error;
746
747 if (!is_metadata && !have_csum) {
748 struct scrub_fixup_nodatasum *fixup_nodatasum;
749
750 /*
751 * !is_metadata and !have_csum, this means that the data
752 * might not be COW'ed, that it might be modified
753 * concurrently. The general strategy to work on the
754 * commit root does not help in the case when COW is not
755 * used.
756 */
757 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
758 if (!fixup_nodatasum)
759 goto did_not_correct_error;
760 fixup_nodatasum->sdev = sdev;
761 fixup_nodatasum->logical = logical;
762 fixup_nodatasum->root = fs_info->extent_root;
763 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
764 /*
765 * increment scrubs_running to prevent cancel requests from
766 * completing as long as a fixup worker is running. we must also
767 * increment scrubs_paused to prevent deadlocking on pause
768 * requests used for transactions commits (as the worker uses a
769 * transaction context). it is safe to regard the fixup worker
770 * as paused for all matters practical. effectively, we only
771 * avoid cancellation requests from completing.
772 */
773 mutex_lock(&fs_info->scrub_lock);
774 atomic_inc(&fs_info->scrubs_running);
775 atomic_inc(&fs_info->scrubs_paused);
776 mutex_unlock(&fs_info->scrub_lock);
777 atomic_inc(&sdev->fixup_cnt);
778 fixup_nodatasum->work.func = scrub_fixup_nodatasum;
779 btrfs_queue_worker(&fs_info->scrub_workers,
780 &fixup_nodatasum->work);
781 goto out;
782 }
783
784 /*
785 * now build and submit the bios for the other mirrors, check
786 * checksums
787 */
788 for (mirror_index = 0;
789 mirror_index < BTRFS_MAX_MIRRORS &&
790 sblocks_for_recheck[mirror_index].page_count > 0;
791 mirror_index++) {
792 if (mirror_index == failed_mirror_index)
793 continue;
794
795 /* build and submit the bios, check checksums */
796 ret = scrub_recheck_block(fs_info,
797 sblocks_for_recheck + mirror_index,
798 is_metadata, have_csum, csum,
799 generation, sdev->csum_size);
800 if (ret)
801 goto did_not_correct_error;
802 }
803
804 /*
805 * first try to pick the mirror which is completely without I/O
806 * errors and also does not have a checksum error.
807 * If one is found, and if a checksum is present, the full block
808 * that is known to contain an error is rewritten. Afterwards
809 * the block is known to be corrected.
810 * If a mirror is found which is completely correct, and no
811 * checksum is present, only those pages are rewritten that had
812 * an I/O error in the block to be repaired, since it cannot be
813 * determined, which copy of the other pages is better (and it
814 * could happen otherwise that a correct page would be
815 * overwritten by a bad one).
816 */
817 for (mirror_index = 0;
818 mirror_index < BTRFS_MAX_MIRRORS &&
819 sblocks_for_recheck[mirror_index].page_count > 0;
820 mirror_index++) {
821 struct scrub_block *sblock_other = sblocks_for_recheck +
822 mirror_index;
823
824 if (!sblock_other->header_error &&
825 !sblock_other->checksum_error &&
826 sblock_other->no_io_error_seen) {
827 int force_write = is_metadata || have_csum;
828
829 ret = scrub_repair_block_from_good_copy(sblock_bad,
830 sblock_other,
831 force_write);
832 if (0 == ret)
833 goto corrected_error;
834 }
835 }
836
837 /*
838 * in case of I/O errors in the area that is supposed to be
839 * repaired, continue by picking good copies of those pages.
840 * Select the good pages from mirrors to rewrite bad pages from
841 * the area to fix. Afterwards verify the checksum of the block
842 * that is supposed to be repaired. This verification step is
843 * only done for the purpose of statistic counting and for the
844 * final scrub report, whether errors remain.
845 * A perfect algorithm could make use of the checksum and try
846 * all possible combinations of pages from the different mirrors
847 * until the checksum verification succeeds. For example, when
848 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
849 * of mirror #2 is readable but the final checksum test fails,
850 * then the 2nd page of mirror #3 could be tried, whether now
851 * the final checksum succeedes. But this would be a rare
852 * exception and is therefore not implemented. At least it is
853 * avoided that the good copy is overwritten.
854 * A more useful improvement would be to pick the sectors
855 * without I/O error based on sector sizes (512 bytes on legacy
856 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
857 * mirror could be repaired by taking 512 byte of a different
858 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
859 * area are unreadable.
860 */
861
862 /* can only fix I/O errors from here on */
863 if (sblock_bad->no_io_error_seen)
864 goto did_not_correct_error;
865
866 success = 1;
867 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
868 struct scrub_page *page_bad = sblock_bad->pagev + page_num;
869
870 if (!page_bad->io_error)
871 continue;
872
873 for (mirror_index = 0;
874 mirror_index < BTRFS_MAX_MIRRORS &&
875 sblocks_for_recheck[mirror_index].page_count > 0;
876 mirror_index++) {
877 struct scrub_block *sblock_other = sblocks_for_recheck +
878 mirror_index;
879 struct scrub_page *page_other = sblock_other->pagev +
880 page_num;
881
882 if (!page_other->io_error) {
883 ret = scrub_repair_page_from_good_copy(
884 sblock_bad, sblock_other, page_num, 0);
885 if (0 == ret) {
886 page_bad->io_error = 0;
887 break; /* succeeded for this page */
888 }
889 }
890 }
891
892 if (page_bad->io_error) {
893 /* did not find a mirror to copy the page from */
894 success = 0;
895 }
896 }
897
898 if (success) {
899 if (is_metadata || have_csum) {
900 /*
901 * need to verify the checksum now that all
902 * sectors on disk are repaired (the write
903 * request for data to be repaired is on its way).
904 * Just be lazy and use scrub_recheck_block()
905 * which re-reads the data before the checksum
906 * is verified, but most likely the data comes out
907 * of the page cache.
908 */
909 ret = scrub_recheck_block(fs_info, sblock_bad,
910 is_metadata, have_csum, csum,
911 generation, sdev->csum_size);
912 if (!ret && !sblock_bad->header_error &&
913 !sblock_bad->checksum_error &&
914 sblock_bad->no_io_error_seen)
915 goto corrected_error;
916 else
917 goto did_not_correct_error;
918 } else {
919 corrected_error:
920 spin_lock(&sdev->stat_lock);
921 sdev->stat.corrected_errors++;
922 spin_unlock(&sdev->stat_lock);
923 printk_ratelimited(KERN_ERR
924 "btrfs: fixed up error at logical %llu on dev %s\n",
925 (unsigned long long)logical, sdev->dev->name);
926 }
927 } else {
928 did_not_correct_error:
929 spin_lock(&sdev->stat_lock);
930 sdev->stat.uncorrectable_errors++;
931 spin_unlock(&sdev->stat_lock);
932 printk_ratelimited(KERN_ERR
933 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
934 (unsigned long long)logical, sdev->dev->name);
935 }
936
937 out:
938 if (sblocks_for_recheck) {
939 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
940 mirror_index++) {
941 struct scrub_block *sblock = sblocks_for_recheck +
942 mirror_index;
943 int page_index;
944
945 for (page_index = 0; page_index < SCRUB_PAGES_PER_BIO;
946 page_index++)
947 if (sblock->pagev[page_index].page)
948 __free_page(
949 sblock->pagev[page_index].page);
950 }
951 kfree(sblocks_for_recheck);
952 }
953
954 return 0;
955 }
956
scrub_setup_recheck_block(struct scrub_dev * sdev,struct btrfs_mapping_tree * map_tree,u64 length,u64 logical,struct scrub_block * sblocks_for_recheck)957 static int scrub_setup_recheck_block(struct scrub_dev *sdev,
958 struct btrfs_mapping_tree *map_tree,
959 u64 length, u64 logical,
960 struct scrub_block *sblocks_for_recheck)
961 {
962 int page_index;
963 int mirror_index;
964 int ret;
965
966 /*
967 * note: the three members sdev, ref_count and outstanding_pages
968 * are not used (and not set) in the blocks that are used for
969 * the recheck procedure
970 */
971
972 page_index = 0;
973 while (length > 0) {
974 u64 sublen = min_t(u64, length, PAGE_SIZE);
975 u64 mapped_length = sublen;
976 struct btrfs_bio *bbio = NULL;
977
978 /*
979 * with a length of PAGE_SIZE, each returned stripe
980 * represents one mirror
981 */
982 ret = btrfs_map_block(map_tree, WRITE, logical, &mapped_length,
983 &bbio, 0);
984 if (ret || !bbio || mapped_length < sublen) {
985 kfree(bbio);
986 return -EIO;
987 }
988
989 BUG_ON(page_index >= SCRUB_PAGES_PER_BIO);
990 for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
991 mirror_index++) {
992 struct scrub_block *sblock;
993 struct scrub_page *page;
994
995 if (mirror_index >= BTRFS_MAX_MIRRORS)
996 continue;
997
998 sblock = sblocks_for_recheck + mirror_index;
999 page = sblock->pagev + page_index;
1000 page->logical = logical;
1001 page->physical = bbio->stripes[mirror_index].physical;
1002 /* for missing devices, bdev is NULL */
1003 page->bdev = bbio->stripes[mirror_index].dev->bdev;
1004 page->mirror_num = mirror_index + 1;
1005 page->page = alloc_page(GFP_NOFS);
1006 if (!page->page) {
1007 spin_lock(&sdev->stat_lock);
1008 sdev->stat.malloc_errors++;
1009 spin_unlock(&sdev->stat_lock);
1010 return -ENOMEM;
1011 }
1012 sblock->page_count++;
1013 }
1014 kfree(bbio);
1015 length -= sublen;
1016 logical += sublen;
1017 page_index++;
1018 }
1019
1020 return 0;
1021 }
1022
1023 /*
1024 * this function will check the on disk data for checksum errors, header
1025 * errors and read I/O errors. If any I/O errors happen, the exact pages
1026 * which are errored are marked as being bad. The goal is to enable scrub
1027 * to take those pages that are not errored from all the mirrors so that
1028 * the pages that are errored in the just handled mirror can be repaired.
1029 */
scrub_recheck_block(struct btrfs_fs_info * fs_info,struct scrub_block * sblock,int is_metadata,int have_csum,u8 * csum,u64 generation,u16 csum_size)1030 static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
1031 struct scrub_block *sblock, int is_metadata,
1032 int have_csum, u8 *csum, u64 generation,
1033 u16 csum_size)
1034 {
1035 int page_num;
1036
1037 sblock->no_io_error_seen = 1;
1038 sblock->header_error = 0;
1039 sblock->checksum_error = 0;
1040
1041 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1042 struct bio *bio;
1043 int ret;
1044 struct scrub_page *page = sblock->pagev + page_num;
1045 DECLARE_COMPLETION_ONSTACK(complete);
1046
1047 if (page->bdev == NULL) {
1048 page->io_error = 1;
1049 sblock->no_io_error_seen = 0;
1050 continue;
1051 }
1052
1053 BUG_ON(!page->page);
1054 bio = bio_alloc(GFP_NOFS, 1);
1055 if (!bio)
1056 return -EIO;
1057 bio->bi_bdev = page->bdev;
1058 bio->bi_sector = page->physical >> 9;
1059 bio->bi_end_io = scrub_complete_bio_end_io;
1060 bio->bi_private = &complete;
1061
1062 ret = bio_add_page(bio, page->page, PAGE_SIZE, 0);
1063 if (PAGE_SIZE != ret) {
1064 bio_put(bio);
1065 return -EIO;
1066 }
1067 btrfsic_submit_bio(READ, bio);
1068
1069 /* this will also unplug the queue */
1070 wait_for_completion(&complete);
1071
1072 page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
1073 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1074 sblock->no_io_error_seen = 0;
1075 bio_put(bio);
1076 }
1077
1078 if (sblock->no_io_error_seen)
1079 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1080 have_csum, csum, generation,
1081 csum_size);
1082
1083 return 0;
1084 }
1085
scrub_recheck_block_checksum(struct btrfs_fs_info * fs_info,struct scrub_block * sblock,int is_metadata,int have_csum,const u8 * csum,u64 generation,u16 csum_size)1086 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1087 struct scrub_block *sblock,
1088 int is_metadata, int have_csum,
1089 const u8 *csum, u64 generation,
1090 u16 csum_size)
1091 {
1092 int page_num;
1093 u8 calculated_csum[BTRFS_CSUM_SIZE];
1094 u32 crc = ~(u32)0;
1095 struct btrfs_root *root = fs_info->extent_root;
1096 void *mapped_buffer;
1097
1098 BUG_ON(!sblock->pagev[0].page);
1099 if (is_metadata) {
1100 struct btrfs_header *h;
1101
1102 mapped_buffer = kmap_atomic(sblock->pagev[0].page);
1103 h = (struct btrfs_header *)mapped_buffer;
1104
1105 if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
1106 generation != le64_to_cpu(h->generation) ||
1107 memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
1108 memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1109 BTRFS_UUID_SIZE))
1110 sblock->header_error = 1;
1111 csum = h->csum;
1112 } else {
1113 if (!have_csum)
1114 return;
1115
1116 mapped_buffer = kmap_atomic(sblock->pagev[0].page);
1117 }
1118
1119 for (page_num = 0;;) {
1120 if (page_num == 0 && is_metadata)
1121 crc = btrfs_csum_data(root,
1122 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1123 crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1124 else
1125 crc = btrfs_csum_data(root, mapped_buffer, crc,
1126 PAGE_SIZE);
1127
1128 kunmap_atomic(mapped_buffer);
1129 page_num++;
1130 if (page_num >= sblock->page_count)
1131 break;
1132 BUG_ON(!sblock->pagev[page_num].page);
1133
1134 mapped_buffer = kmap_atomic(sblock->pagev[page_num].page);
1135 }
1136
1137 btrfs_csum_final(crc, calculated_csum);
1138 if (memcmp(calculated_csum, csum, csum_size))
1139 sblock->checksum_error = 1;
1140 }
1141
scrub_complete_bio_end_io(struct bio * bio,int err)1142 static void scrub_complete_bio_end_io(struct bio *bio, int err)
1143 {
1144 complete((struct completion *)bio->bi_private);
1145 }
1146
scrub_repair_block_from_good_copy(struct scrub_block * sblock_bad,struct scrub_block * sblock_good,int force_write)1147 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1148 struct scrub_block *sblock_good,
1149 int force_write)
1150 {
1151 int page_num;
1152 int ret = 0;
1153
1154 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1155 int ret_sub;
1156
1157 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1158 sblock_good,
1159 page_num,
1160 force_write);
1161 if (ret_sub)
1162 ret = ret_sub;
1163 }
1164
1165 return ret;
1166 }
1167
scrub_repair_page_from_good_copy(struct scrub_block * sblock_bad,struct scrub_block * sblock_good,int page_num,int force_write)1168 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1169 struct scrub_block *sblock_good,
1170 int page_num, int force_write)
1171 {
1172 struct scrub_page *page_bad = sblock_bad->pagev + page_num;
1173 struct scrub_page *page_good = sblock_good->pagev + page_num;
1174
1175 BUG_ON(sblock_bad->pagev[page_num].page == NULL);
1176 BUG_ON(sblock_good->pagev[page_num].page == NULL);
1177 if (force_write || sblock_bad->header_error ||
1178 sblock_bad->checksum_error || page_bad->io_error) {
1179 struct bio *bio;
1180 int ret;
1181 DECLARE_COMPLETION_ONSTACK(complete);
1182
1183 bio = bio_alloc(GFP_NOFS, 1);
1184 if (!bio)
1185 return -EIO;
1186 bio->bi_bdev = page_bad->bdev;
1187 bio->bi_sector = page_bad->physical >> 9;
1188 bio->bi_end_io = scrub_complete_bio_end_io;
1189 bio->bi_private = &complete;
1190
1191 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1192 if (PAGE_SIZE != ret) {
1193 bio_put(bio);
1194 return -EIO;
1195 }
1196 btrfsic_submit_bio(WRITE, bio);
1197
1198 /* this will also unplug the queue */
1199 wait_for_completion(&complete);
1200 bio_put(bio);
1201 }
1202
1203 return 0;
1204 }
1205
scrub_checksum(struct scrub_block * sblock)1206 static void scrub_checksum(struct scrub_block *sblock)
1207 {
1208 u64 flags;
1209 int ret;
1210
1211 BUG_ON(sblock->page_count < 1);
1212 flags = sblock->pagev[0].flags;
1213 ret = 0;
1214 if (flags & BTRFS_EXTENT_FLAG_DATA)
1215 ret = scrub_checksum_data(sblock);
1216 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1217 ret = scrub_checksum_tree_block(sblock);
1218 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1219 (void)scrub_checksum_super(sblock);
1220 else
1221 WARN_ON(1);
1222 if (ret)
1223 scrub_handle_errored_block(sblock);
1224 }
1225
scrub_checksum_data(struct scrub_block * sblock)1226 static int scrub_checksum_data(struct scrub_block *sblock)
1227 {
1228 struct scrub_dev *sdev = sblock->sdev;
1229 u8 csum[BTRFS_CSUM_SIZE];
1230 u8 *on_disk_csum;
1231 struct page *page;
1232 void *buffer;
1233 u32 crc = ~(u32)0;
1234 int fail = 0;
1235 struct btrfs_root *root = sdev->dev->dev_root;
1236 u64 len;
1237 int index;
1238
1239 BUG_ON(sblock->page_count < 1);
1240 if (!sblock->pagev[0].have_csum)
1241 return 0;
1242
1243 on_disk_csum = sblock->pagev[0].csum;
1244 page = sblock->pagev[0].page;
1245 buffer = kmap_atomic(page);
1246
1247 len = sdev->sectorsize;
1248 index = 0;
1249 for (;;) {
1250 u64 l = min_t(u64, len, PAGE_SIZE);
1251
1252 crc = btrfs_csum_data(root, buffer, crc, l);
1253 kunmap_atomic(buffer);
1254 len -= l;
1255 if (len == 0)
1256 break;
1257 index++;
1258 BUG_ON(index >= sblock->page_count);
1259 BUG_ON(!sblock->pagev[index].page);
1260 page = sblock->pagev[index].page;
1261 buffer = kmap_atomic(page);
1262 }
1263
1264 btrfs_csum_final(crc, csum);
1265 if (memcmp(csum, on_disk_csum, sdev->csum_size))
1266 fail = 1;
1267
1268 return fail;
1269 }
1270
scrub_checksum_tree_block(struct scrub_block * sblock)1271 static int scrub_checksum_tree_block(struct scrub_block *sblock)
1272 {
1273 struct scrub_dev *sdev = sblock->sdev;
1274 struct btrfs_header *h;
1275 struct btrfs_root *root = sdev->dev->dev_root;
1276 struct btrfs_fs_info *fs_info = root->fs_info;
1277 u8 calculated_csum[BTRFS_CSUM_SIZE];
1278 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1279 struct page *page;
1280 void *mapped_buffer;
1281 u64 mapped_size;
1282 void *p;
1283 u32 crc = ~(u32)0;
1284 int fail = 0;
1285 int crc_fail = 0;
1286 u64 len;
1287 int index;
1288
1289 BUG_ON(sblock->page_count < 1);
1290 page = sblock->pagev[0].page;
1291 mapped_buffer = kmap_atomic(page);
1292 h = (struct btrfs_header *)mapped_buffer;
1293 memcpy(on_disk_csum, h->csum, sdev->csum_size);
1294
1295 /*
1296 * we don't use the getter functions here, as we
1297 * a) don't have an extent buffer and
1298 * b) the page is already kmapped
1299 */
1300
1301 if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr))
1302 ++fail;
1303
1304 if (sblock->pagev[0].generation != le64_to_cpu(h->generation))
1305 ++fail;
1306
1307 if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1308 ++fail;
1309
1310 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1311 BTRFS_UUID_SIZE))
1312 ++fail;
1313
1314 BUG_ON(sdev->nodesize != sdev->leafsize);
1315 len = sdev->nodesize - BTRFS_CSUM_SIZE;
1316 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1317 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1318 index = 0;
1319 for (;;) {
1320 u64 l = min_t(u64, len, mapped_size);
1321
1322 crc = btrfs_csum_data(root, p, crc, l);
1323 kunmap_atomic(mapped_buffer);
1324 len -= l;
1325 if (len == 0)
1326 break;
1327 index++;
1328 BUG_ON(index >= sblock->page_count);
1329 BUG_ON(!sblock->pagev[index].page);
1330 page = sblock->pagev[index].page;
1331 mapped_buffer = kmap_atomic(page);
1332 mapped_size = PAGE_SIZE;
1333 p = mapped_buffer;
1334 }
1335
1336 btrfs_csum_final(crc, calculated_csum);
1337 if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
1338 ++crc_fail;
1339
1340 return fail || crc_fail;
1341 }
1342
scrub_checksum_super(struct scrub_block * sblock)1343 static int scrub_checksum_super(struct scrub_block *sblock)
1344 {
1345 struct btrfs_super_block *s;
1346 struct scrub_dev *sdev = sblock->sdev;
1347 struct btrfs_root *root = sdev->dev->dev_root;
1348 struct btrfs_fs_info *fs_info = root->fs_info;
1349 u8 calculated_csum[BTRFS_CSUM_SIZE];
1350 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1351 struct page *page;
1352 void *mapped_buffer;
1353 u64 mapped_size;
1354 void *p;
1355 u32 crc = ~(u32)0;
1356 int fail = 0;
1357 u64 len;
1358 int index;
1359
1360 BUG_ON(sblock->page_count < 1);
1361 page = sblock->pagev[0].page;
1362 mapped_buffer = kmap_atomic(page);
1363 s = (struct btrfs_super_block *)mapped_buffer;
1364 memcpy(on_disk_csum, s->csum, sdev->csum_size);
1365
1366 if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr))
1367 ++fail;
1368
1369 if (sblock->pagev[0].generation != le64_to_cpu(s->generation))
1370 ++fail;
1371
1372 if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1373 ++fail;
1374
1375 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1376 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1377 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1378 index = 0;
1379 for (;;) {
1380 u64 l = min_t(u64, len, mapped_size);
1381
1382 crc = btrfs_csum_data(root, p, crc, l);
1383 kunmap_atomic(mapped_buffer);
1384 len -= l;
1385 if (len == 0)
1386 break;
1387 index++;
1388 BUG_ON(index >= sblock->page_count);
1389 BUG_ON(!sblock->pagev[index].page);
1390 page = sblock->pagev[index].page;
1391 mapped_buffer = kmap_atomic(page);
1392 mapped_size = PAGE_SIZE;
1393 p = mapped_buffer;
1394 }
1395
1396 btrfs_csum_final(crc, calculated_csum);
1397 if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
1398 ++fail;
1399
1400 if (fail) {
1401 /*
1402 * if we find an error in a super block, we just report it.
1403 * They will get written with the next transaction commit
1404 * anyway
1405 */
1406 spin_lock(&sdev->stat_lock);
1407 ++sdev->stat.super_errors;
1408 spin_unlock(&sdev->stat_lock);
1409 }
1410
1411 return fail;
1412 }
1413
scrub_block_get(struct scrub_block * sblock)1414 static void scrub_block_get(struct scrub_block *sblock)
1415 {
1416 atomic_inc(&sblock->ref_count);
1417 }
1418
scrub_block_put(struct scrub_block * sblock)1419 static void scrub_block_put(struct scrub_block *sblock)
1420 {
1421 if (atomic_dec_and_test(&sblock->ref_count)) {
1422 int i;
1423
1424 for (i = 0; i < sblock->page_count; i++)
1425 if (sblock->pagev[i].page)
1426 __free_page(sblock->pagev[i].page);
1427 kfree(sblock);
1428 }
1429 }
1430
scrub_submit(struct scrub_dev * sdev)1431 static void scrub_submit(struct scrub_dev *sdev)
1432 {
1433 struct scrub_bio *sbio;
1434
1435 if (sdev->curr == -1)
1436 return;
1437
1438 sbio = sdev->bios[sdev->curr];
1439 sdev->curr = -1;
1440 atomic_inc(&sdev->in_flight);
1441
1442 btrfsic_submit_bio(READ, sbio->bio);
1443 }
1444
scrub_add_page_to_bio(struct scrub_dev * sdev,struct scrub_page * spage)1445 static int scrub_add_page_to_bio(struct scrub_dev *sdev,
1446 struct scrub_page *spage)
1447 {
1448 struct scrub_block *sblock = spage->sblock;
1449 struct scrub_bio *sbio;
1450 int ret;
1451
1452 again:
1453 /*
1454 * grab a fresh bio or wait for one to become available
1455 */
1456 while (sdev->curr == -1) {
1457 spin_lock(&sdev->list_lock);
1458 sdev->curr = sdev->first_free;
1459 if (sdev->curr != -1) {
1460 sdev->first_free = sdev->bios[sdev->curr]->next_free;
1461 sdev->bios[sdev->curr]->next_free = -1;
1462 sdev->bios[sdev->curr]->page_count = 0;
1463 spin_unlock(&sdev->list_lock);
1464 } else {
1465 spin_unlock(&sdev->list_lock);
1466 wait_event(sdev->list_wait, sdev->first_free != -1);
1467 }
1468 }
1469 sbio = sdev->bios[sdev->curr];
1470 if (sbio->page_count == 0) {
1471 struct bio *bio;
1472
1473 sbio->physical = spage->physical;
1474 sbio->logical = spage->logical;
1475 bio = sbio->bio;
1476 if (!bio) {
1477 bio = bio_alloc(GFP_NOFS, sdev->pages_per_bio);
1478 if (!bio)
1479 return -ENOMEM;
1480 sbio->bio = bio;
1481 }
1482
1483 bio->bi_private = sbio;
1484 bio->bi_end_io = scrub_bio_end_io;
1485 bio->bi_bdev = sdev->dev->bdev;
1486 bio->bi_sector = spage->physical >> 9;
1487 sbio->err = 0;
1488 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1489 spage->physical ||
1490 sbio->logical + sbio->page_count * PAGE_SIZE !=
1491 spage->logical) {
1492 scrub_submit(sdev);
1493 goto again;
1494 }
1495
1496 sbio->pagev[sbio->page_count] = spage;
1497 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1498 if (ret != PAGE_SIZE) {
1499 if (sbio->page_count < 1) {
1500 bio_put(sbio->bio);
1501 sbio->bio = NULL;
1502 return -EIO;
1503 }
1504 scrub_submit(sdev);
1505 goto again;
1506 }
1507
1508 scrub_block_get(sblock); /* one for the added page */
1509 atomic_inc(&sblock->outstanding_pages);
1510 sbio->page_count++;
1511 if (sbio->page_count == sdev->pages_per_bio)
1512 scrub_submit(sdev);
1513
1514 return 0;
1515 }
1516
scrub_pages(struct scrub_dev * sdev,u64 logical,u64 len,u64 physical,u64 flags,u64 gen,int mirror_num,u8 * csum,int force)1517 static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
1518 u64 physical, u64 flags, u64 gen, int mirror_num,
1519 u8 *csum, int force)
1520 {
1521 struct scrub_block *sblock;
1522 int index;
1523
1524 sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
1525 if (!sblock) {
1526 spin_lock(&sdev->stat_lock);
1527 sdev->stat.malloc_errors++;
1528 spin_unlock(&sdev->stat_lock);
1529 return -ENOMEM;
1530 }
1531
1532 /* one ref inside this function, plus one for each page later on */
1533 atomic_set(&sblock->ref_count, 1);
1534 sblock->sdev = sdev;
1535 sblock->no_io_error_seen = 1;
1536
1537 for (index = 0; len > 0; index++) {
1538 struct scrub_page *spage = sblock->pagev + index;
1539 u64 l = min_t(u64, len, PAGE_SIZE);
1540
1541 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
1542 spage->page = alloc_page(GFP_NOFS);
1543 if (!spage->page) {
1544 spin_lock(&sdev->stat_lock);
1545 sdev->stat.malloc_errors++;
1546 spin_unlock(&sdev->stat_lock);
1547 while (index > 0) {
1548 index--;
1549 __free_page(sblock->pagev[index].page);
1550 }
1551 kfree(sblock);
1552 return -ENOMEM;
1553 }
1554 spage->sblock = sblock;
1555 spage->bdev = sdev->dev->bdev;
1556 spage->flags = flags;
1557 spage->generation = gen;
1558 spage->logical = logical;
1559 spage->physical = physical;
1560 spage->mirror_num = mirror_num;
1561 if (csum) {
1562 spage->have_csum = 1;
1563 memcpy(spage->csum, csum, sdev->csum_size);
1564 } else {
1565 spage->have_csum = 0;
1566 }
1567 sblock->page_count++;
1568 len -= l;
1569 logical += l;
1570 physical += l;
1571 }
1572
1573 BUG_ON(sblock->page_count == 0);
1574 for (index = 0; index < sblock->page_count; index++) {
1575 struct scrub_page *spage = sblock->pagev + index;
1576 int ret;
1577
1578 ret = scrub_add_page_to_bio(sdev, spage);
1579 if (ret) {
1580 scrub_block_put(sblock);
1581 return ret;
1582 }
1583 }
1584
1585 if (force)
1586 scrub_submit(sdev);
1587
1588 /* last one frees, either here or in bio completion for last page */
1589 scrub_block_put(sblock);
1590 return 0;
1591 }
1592
scrub_bio_end_io(struct bio * bio,int err)1593 static void scrub_bio_end_io(struct bio *bio, int err)
1594 {
1595 struct scrub_bio *sbio = bio->bi_private;
1596 struct scrub_dev *sdev = sbio->sdev;
1597 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
1598
1599 sbio->err = err;
1600 sbio->bio = bio;
1601
1602 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
1603 }
1604
scrub_bio_end_io_worker(struct btrfs_work * work)1605 static void scrub_bio_end_io_worker(struct btrfs_work *work)
1606 {
1607 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1608 struct scrub_dev *sdev = sbio->sdev;
1609 int i;
1610
1611 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_BIO);
1612 if (sbio->err) {
1613 for (i = 0; i < sbio->page_count; i++) {
1614 struct scrub_page *spage = sbio->pagev[i];
1615
1616 spage->io_error = 1;
1617 spage->sblock->no_io_error_seen = 0;
1618 }
1619 }
1620
1621 /* now complete the scrub_block items that have all pages completed */
1622 for (i = 0; i < sbio->page_count; i++) {
1623 struct scrub_page *spage = sbio->pagev[i];
1624 struct scrub_block *sblock = spage->sblock;
1625
1626 if (atomic_dec_and_test(&sblock->outstanding_pages))
1627 scrub_block_complete(sblock);
1628 scrub_block_put(sblock);
1629 }
1630
1631 if (sbio->err) {
1632 /* what is this good for??? */
1633 sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
1634 sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
1635 sbio->bio->bi_phys_segments = 0;
1636 sbio->bio->bi_idx = 0;
1637
1638 for (i = 0; i < sbio->page_count; i++) {
1639 struct bio_vec *bi;
1640 bi = &sbio->bio->bi_io_vec[i];
1641 bi->bv_offset = 0;
1642 bi->bv_len = PAGE_SIZE;
1643 }
1644 }
1645
1646 bio_put(sbio->bio);
1647 sbio->bio = NULL;
1648 spin_lock(&sdev->list_lock);
1649 sbio->next_free = sdev->first_free;
1650 sdev->first_free = sbio->index;
1651 spin_unlock(&sdev->list_lock);
1652 atomic_dec(&sdev->in_flight);
1653 wake_up(&sdev->list_wait);
1654 }
1655
scrub_block_complete(struct scrub_block * sblock)1656 static void scrub_block_complete(struct scrub_block *sblock)
1657 {
1658 if (!sblock->no_io_error_seen)
1659 scrub_handle_errored_block(sblock);
1660 else
1661 scrub_checksum(sblock);
1662 }
1663
scrub_find_csum(struct scrub_dev * sdev,u64 logical,u64 len,u8 * csum)1664 static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
1665 u8 *csum)
1666 {
1667 struct btrfs_ordered_sum *sum = NULL;
1668 int ret = 0;
1669 unsigned long i;
1670 unsigned long num_sectors;
1671
1672 while (!list_empty(&sdev->csum_list)) {
1673 sum = list_first_entry(&sdev->csum_list,
1674 struct btrfs_ordered_sum, list);
1675 if (sum->bytenr > logical)
1676 return 0;
1677 if (sum->bytenr + sum->len > logical)
1678 break;
1679
1680 ++sdev->stat.csum_discards;
1681 list_del(&sum->list);
1682 kfree(sum);
1683 sum = NULL;
1684 }
1685 if (!sum)
1686 return 0;
1687
1688 num_sectors = sum->len / sdev->sectorsize;
1689 for (i = 0; i < num_sectors; ++i) {
1690 if (sum->sums[i].bytenr == logical) {
1691 memcpy(csum, &sum->sums[i].sum, sdev->csum_size);
1692 ret = 1;
1693 break;
1694 }
1695 }
1696 if (ret && i == num_sectors - 1) {
1697 list_del(&sum->list);
1698 kfree(sum);
1699 }
1700 return ret;
1701 }
1702
1703 /* scrub extent tries to collect up to 64 kB for each bio */
scrub_extent(struct scrub_dev * sdev,u64 logical,u64 len,u64 physical,u64 flags,u64 gen,int mirror_num)1704 static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
1705 u64 physical, u64 flags, u64 gen, int mirror_num)
1706 {
1707 int ret;
1708 u8 csum[BTRFS_CSUM_SIZE];
1709 u32 blocksize;
1710
1711 if (flags & BTRFS_EXTENT_FLAG_DATA) {
1712 blocksize = sdev->sectorsize;
1713 spin_lock(&sdev->stat_lock);
1714 sdev->stat.data_extents_scrubbed++;
1715 sdev->stat.data_bytes_scrubbed += len;
1716 spin_unlock(&sdev->stat_lock);
1717 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1718 BUG_ON(sdev->nodesize != sdev->leafsize);
1719 blocksize = sdev->nodesize;
1720 spin_lock(&sdev->stat_lock);
1721 sdev->stat.tree_extents_scrubbed++;
1722 sdev->stat.tree_bytes_scrubbed += len;
1723 spin_unlock(&sdev->stat_lock);
1724 } else {
1725 blocksize = sdev->sectorsize;
1726 BUG_ON(1);
1727 }
1728
1729 while (len) {
1730 u64 l = min_t(u64, len, blocksize);
1731 int have_csum = 0;
1732
1733 if (flags & BTRFS_EXTENT_FLAG_DATA) {
1734 /* push csums to sbio */
1735 have_csum = scrub_find_csum(sdev, logical, l, csum);
1736 if (have_csum == 0)
1737 ++sdev->stat.no_csum;
1738 }
1739 ret = scrub_pages(sdev, logical, l, physical, flags, gen,
1740 mirror_num, have_csum ? csum : NULL, 0);
1741 if (ret)
1742 return ret;
1743 len -= l;
1744 logical += l;
1745 physical += l;
1746 }
1747 return 0;
1748 }
1749
scrub_stripe(struct scrub_dev * sdev,struct map_lookup * map,int num,u64 base,u64 length)1750 static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
1751 struct map_lookup *map, int num, u64 base, u64 length)
1752 {
1753 struct btrfs_path *path;
1754 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
1755 struct btrfs_root *root = fs_info->extent_root;
1756 struct btrfs_root *csum_root = fs_info->csum_root;
1757 struct btrfs_extent_item *extent;
1758 struct blk_plug plug;
1759 u64 flags;
1760 int ret;
1761 int slot;
1762 int i;
1763 u64 nstripes;
1764 struct extent_buffer *l;
1765 struct btrfs_key key;
1766 u64 physical;
1767 u64 logical;
1768 u64 generation;
1769 int mirror_num;
1770 struct reada_control *reada1;
1771 struct reada_control *reada2;
1772 struct btrfs_key key_start;
1773 struct btrfs_key key_end;
1774
1775 u64 increment = map->stripe_len;
1776 u64 offset;
1777
1778 nstripes = length;
1779 offset = 0;
1780 do_div(nstripes, map->stripe_len);
1781 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
1782 offset = map->stripe_len * num;
1783 increment = map->stripe_len * map->num_stripes;
1784 mirror_num = 1;
1785 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1786 int factor = map->num_stripes / map->sub_stripes;
1787 offset = map->stripe_len * (num / map->sub_stripes);
1788 increment = map->stripe_len * factor;
1789 mirror_num = num % map->sub_stripes + 1;
1790 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1791 increment = map->stripe_len;
1792 mirror_num = num % map->num_stripes + 1;
1793 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1794 increment = map->stripe_len;
1795 mirror_num = num % map->num_stripes + 1;
1796 } else {
1797 increment = map->stripe_len;
1798 mirror_num = 1;
1799 }
1800
1801 path = btrfs_alloc_path();
1802 if (!path)
1803 return -ENOMEM;
1804
1805 /*
1806 * work on commit root. The related disk blocks are static as
1807 * long as COW is applied. This means, it is save to rewrite
1808 * them to repair disk errors without any race conditions
1809 */
1810 path->search_commit_root = 1;
1811 path->skip_locking = 1;
1812
1813 /*
1814 * trigger the readahead for extent tree csum tree and wait for
1815 * completion. During readahead, the scrub is officially paused
1816 * to not hold off transaction commits
1817 */
1818 logical = base + offset;
1819
1820 wait_event(sdev->list_wait,
1821 atomic_read(&sdev->in_flight) == 0);
1822 atomic_inc(&fs_info->scrubs_paused);
1823 wake_up(&fs_info->scrub_pause_wait);
1824
1825 /* FIXME it might be better to start readahead at commit root */
1826 key_start.objectid = logical;
1827 key_start.type = BTRFS_EXTENT_ITEM_KEY;
1828 key_start.offset = (u64)0;
1829 key_end.objectid = base + offset + nstripes * increment;
1830 key_end.type = BTRFS_EXTENT_ITEM_KEY;
1831 key_end.offset = (u64)0;
1832 reada1 = btrfs_reada_add(root, &key_start, &key_end);
1833
1834 key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1835 key_start.type = BTRFS_EXTENT_CSUM_KEY;
1836 key_start.offset = logical;
1837 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1838 key_end.type = BTRFS_EXTENT_CSUM_KEY;
1839 key_end.offset = base + offset + nstripes * increment;
1840 reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
1841
1842 if (!IS_ERR(reada1))
1843 btrfs_reada_wait(reada1);
1844 if (!IS_ERR(reada2))
1845 btrfs_reada_wait(reada2);
1846
1847 mutex_lock(&fs_info->scrub_lock);
1848 while (atomic_read(&fs_info->scrub_pause_req)) {
1849 mutex_unlock(&fs_info->scrub_lock);
1850 wait_event(fs_info->scrub_pause_wait,
1851 atomic_read(&fs_info->scrub_pause_req) == 0);
1852 mutex_lock(&fs_info->scrub_lock);
1853 }
1854 atomic_dec(&fs_info->scrubs_paused);
1855 mutex_unlock(&fs_info->scrub_lock);
1856 wake_up(&fs_info->scrub_pause_wait);
1857
1858 /*
1859 * collect all data csums for the stripe to avoid seeking during
1860 * the scrub. This might currently (crc32) end up to be about 1MB
1861 */
1862 blk_start_plug(&plug);
1863
1864 /*
1865 * now find all extents for each stripe and scrub them
1866 */
1867 logical = base + offset;
1868 physical = map->stripes[num].physical;
1869 ret = 0;
1870 for (i = 0; i < nstripes; ++i) {
1871 /*
1872 * canceled?
1873 */
1874 if (atomic_read(&fs_info->scrub_cancel_req) ||
1875 atomic_read(&sdev->cancel_req)) {
1876 ret = -ECANCELED;
1877 goto out;
1878 }
1879 /*
1880 * check to see if we have to pause
1881 */
1882 if (atomic_read(&fs_info->scrub_pause_req)) {
1883 /* push queued extents */
1884 scrub_submit(sdev);
1885 wait_event(sdev->list_wait,
1886 atomic_read(&sdev->in_flight) == 0);
1887 atomic_inc(&fs_info->scrubs_paused);
1888 wake_up(&fs_info->scrub_pause_wait);
1889 mutex_lock(&fs_info->scrub_lock);
1890 while (atomic_read(&fs_info->scrub_pause_req)) {
1891 mutex_unlock(&fs_info->scrub_lock);
1892 wait_event(fs_info->scrub_pause_wait,
1893 atomic_read(&fs_info->scrub_pause_req) == 0);
1894 mutex_lock(&fs_info->scrub_lock);
1895 }
1896 atomic_dec(&fs_info->scrubs_paused);
1897 mutex_unlock(&fs_info->scrub_lock);
1898 wake_up(&fs_info->scrub_pause_wait);
1899 }
1900
1901 ret = btrfs_lookup_csums_range(csum_root, logical,
1902 logical + map->stripe_len - 1,
1903 &sdev->csum_list, 1);
1904 if (ret)
1905 goto out;
1906
1907 key.objectid = logical;
1908 key.type = BTRFS_EXTENT_ITEM_KEY;
1909 key.offset = (u64)0;
1910
1911 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1912 if (ret < 0)
1913 goto out;
1914 if (ret > 0) {
1915 ret = btrfs_previous_item(root, path, 0,
1916 BTRFS_EXTENT_ITEM_KEY);
1917 if (ret < 0)
1918 goto out;
1919 if (ret > 0) {
1920 /* there's no smaller item, so stick with the
1921 * larger one */
1922 btrfs_release_path(path);
1923 ret = btrfs_search_slot(NULL, root, &key,
1924 path, 0, 0);
1925 if (ret < 0)
1926 goto out;
1927 }
1928 }
1929
1930 while (1) {
1931 l = path->nodes[0];
1932 slot = path->slots[0];
1933 if (slot >= btrfs_header_nritems(l)) {
1934 ret = btrfs_next_leaf(root, path);
1935 if (ret == 0)
1936 continue;
1937 if (ret < 0)
1938 goto out;
1939
1940 break;
1941 }
1942 btrfs_item_key_to_cpu(l, &key, slot);
1943
1944 if (key.objectid + key.offset <= logical)
1945 goto next;
1946
1947 if (key.objectid >= logical + map->stripe_len)
1948 break;
1949
1950 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
1951 goto next;
1952
1953 extent = btrfs_item_ptr(l, slot,
1954 struct btrfs_extent_item);
1955 flags = btrfs_extent_flags(l, extent);
1956 generation = btrfs_extent_generation(l, extent);
1957
1958 if (key.objectid < logical &&
1959 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
1960 printk(KERN_ERR
1961 "btrfs scrub: tree block %llu spanning "
1962 "stripes, ignored. logical=%llu\n",
1963 (unsigned long long)key.objectid,
1964 (unsigned long long)logical);
1965 goto next;
1966 }
1967
1968 /*
1969 * trim extent to this stripe
1970 */
1971 if (key.objectid < logical) {
1972 key.offset -= logical - key.objectid;
1973 key.objectid = logical;
1974 }
1975 if (key.objectid + key.offset >
1976 logical + map->stripe_len) {
1977 key.offset = logical + map->stripe_len -
1978 key.objectid;
1979 }
1980
1981 ret = scrub_extent(sdev, key.objectid, key.offset,
1982 key.objectid - logical + physical,
1983 flags, generation, mirror_num);
1984 if (ret)
1985 goto out;
1986
1987 next:
1988 path->slots[0]++;
1989 }
1990 btrfs_release_path(path);
1991 logical += increment;
1992 physical += map->stripe_len;
1993 spin_lock(&sdev->stat_lock);
1994 sdev->stat.last_physical = physical;
1995 spin_unlock(&sdev->stat_lock);
1996 }
1997 /* push queued extents */
1998 scrub_submit(sdev);
1999
2000 out:
2001 blk_finish_plug(&plug);
2002 btrfs_free_path(path);
2003 return ret < 0 ? ret : 0;
2004 }
2005
scrub_chunk(struct scrub_dev * sdev,u64 chunk_tree,u64 chunk_objectid,u64 chunk_offset,u64 length,u64 dev_offset)2006 static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
2007 u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length,
2008 u64 dev_offset)
2009 {
2010 struct btrfs_mapping_tree *map_tree =
2011 &sdev->dev->dev_root->fs_info->mapping_tree;
2012 struct map_lookup *map;
2013 struct extent_map *em;
2014 int i;
2015 int ret = -EINVAL;
2016
2017 read_lock(&map_tree->map_tree.lock);
2018 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2019 read_unlock(&map_tree->map_tree.lock);
2020
2021 if (!em)
2022 return -EINVAL;
2023
2024 map = (struct map_lookup *)em->bdev;
2025 if (em->start != chunk_offset)
2026 goto out;
2027
2028 if (em->len < length)
2029 goto out;
2030
2031 for (i = 0; i < map->num_stripes; ++i) {
2032 if (map->stripes[i].dev == sdev->dev &&
2033 map->stripes[i].physical == dev_offset) {
2034 ret = scrub_stripe(sdev, map, i, chunk_offset, length);
2035 if (ret)
2036 goto out;
2037 }
2038 }
2039 out:
2040 free_extent_map(em);
2041
2042 return ret;
2043 }
2044
2045 static noinline_for_stack
scrub_enumerate_chunks(struct scrub_dev * sdev,u64 start,u64 end)2046 int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
2047 {
2048 struct btrfs_dev_extent *dev_extent = NULL;
2049 struct btrfs_path *path;
2050 struct btrfs_root *root = sdev->dev->dev_root;
2051 struct btrfs_fs_info *fs_info = root->fs_info;
2052 u64 length;
2053 u64 chunk_tree;
2054 u64 chunk_objectid;
2055 u64 chunk_offset;
2056 int ret;
2057 int slot;
2058 struct extent_buffer *l;
2059 struct btrfs_key key;
2060 struct btrfs_key found_key;
2061 struct btrfs_block_group_cache *cache;
2062
2063 path = btrfs_alloc_path();
2064 if (!path)
2065 return -ENOMEM;
2066
2067 path->reada = 2;
2068 path->search_commit_root = 1;
2069 path->skip_locking = 1;
2070
2071 key.objectid = sdev->dev->devid;
2072 key.offset = 0ull;
2073 key.type = BTRFS_DEV_EXTENT_KEY;
2074
2075
2076 while (1) {
2077 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2078 if (ret < 0)
2079 break;
2080 if (ret > 0) {
2081 if (path->slots[0] >=
2082 btrfs_header_nritems(path->nodes[0])) {
2083 ret = btrfs_next_leaf(root, path);
2084 if (ret)
2085 break;
2086 }
2087 }
2088
2089 l = path->nodes[0];
2090 slot = path->slots[0];
2091
2092 btrfs_item_key_to_cpu(l, &found_key, slot);
2093
2094 if (found_key.objectid != sdev->dev->devid)
2095 break;
2096
2097 if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
2098 break;
2099
2100 if (found_key.offset >= end)
2101 break;
2102
2103 if (found_key.offset < key.offset)
2104 break;
2105
2106 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2107 length = btrfs_dev_extent_length(l, dev_extent);
2108
2109 if (found_key.offset + length <= start) {
2110 key.offset = found_key.offset + length;
2111 btrfs_release_path(path);
2112 continue;
2113 }
2114
2115 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2116 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2117 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2118
2119 /*
2120 * get a reference on the corresponding block group to prevent
2121 * the chunk from going away while we scrub it
2122 */
2123 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2124 if (!cache) {
2125 ret = -ENOENT;
2126 break;
2127 }
2128 ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
2129 chunk_offset, length, found_key.offset);
2130 btrfs_put_block_group(cache);
2131 if (ret)
2132 break;
2133
2134 key.offset = found_key.offset + length;
2135 btrfs_release_path(path);
2136 }
2137
2138 btrfs_free_path(path);
2139
2140 /*
2141 * ret can still be 1 from search_slot or next_leaf,
2142 * that's not an error
2143 */
2144 return ret < 0 ? ret : 0;
2145 }
2146
scrub_supers(struct scrub_dev * sdev)2147 static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
2148 {
2149 int i;
2150 u64 bytenr;
2151 u64 gen;
2152 int ret;
2153 struct btrfs_device *device = sdev->dev;
2154 struct btrfs_root *root = device->dev_root;
2155
2156 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
2157 return -EIO;
2158
2159 gen = root->fs_info->last_trans_committed;
2160
2161 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2162 bytenr = btrfs_sb_offset(i);
2163 if (bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
2164 break;
2165
2166 ret = scrub_pages(sdev, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
2167 BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
2168 if (ret)
2169 return ret;
2170 }
2171 wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
2172
2173 return 0;
2174 }
2175
2176 /*
2177 * get a reference count on fs_info->scrub_workers. start worker if necessary
2178 */
scrub_workers_get(struct btrfs_root * root)2179 static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
2180 {
2181 struct btrfs_fs_info *fs_info = root->fs_info;
2182 int ret = 0;
2183
2184 mutex_lock(&fs_info->scrub_lock);
2185 if (fs_info->scrub_workers_refcnt == 0) {
2186 btrfs_init_workers(&fs_info->scrub_workers, "scrub",
2187 fs_info->thread_pool_size, &fs_info->generic_worker);
2188 fs_info->scrub_workers.idle_thresh = 4;
2189 ret = btrfs_start_workers(&fs_info->scrub_workers);
2190 if (ret)
2191 goto out;
2192 }
2193 ++fs_info->scrub_workers_refcnt;
2194 out:
2195 mutex_unlock(&fs_info->scrub_lock);
2196
2197 return ret;
2198 }
2199
scrub_workers_put(struct btrfs_root * root)2200 static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
2201 {
2202 struct btrfs_fs_info *fs_info = root->fs_info;
2203
2204 mutex_lock(&fs_info->scrub_lock);
2205 if (--fs_info->scrub_workers_refcnt == 0)
2206 btrfs_stop_workers(&fs_info->scrub_workers);
2207 WARN_ON(fs_info->scrub_workers_refcnt < 0);
2208 mutex_unlock(&fs_info->scrub_lock);
2209 }
2210
2211
btrfs_scrub_dev(struct btrfs_root * root,u64 devid,u64 start,u64 end,struct btrfs_scrub_progress * progress,int readonly)2212 int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
2213 struct btrfs_scrub_progress *progress, int readonly)
2214 {
2215 struct scrub_dev *sdev;
2216 struct btrfs_fs_info *fs_info = root->fs_info;
2217 int ret;
2218 struct btrfs_device *dev;
2219
2220 if (btrfs_fs_closing(root->fs_info))
2221 return -EINVAL;
2222
2223 /*
2224 * check some assumptions
2225 */
2226 if (root->nodesize != root->leafsize) {
2227 printk(KERN_ERR
2228 "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
2229 root->nodesize, root->leafsize);
2230 return -EINVAL;
2231 }
2232
2233 if (root->nodesize > BTRFS_STRIPE_LEN) {
2234 /*
2235 * in this case scrub is unable to calculate the checksum
2236 * the way scrub is implemented. Do not handle this
2237 * situation at all because it won't ever happen.
2238 */
2239 printk(KERN_ERR
2240 "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
2241 root->nodesize, BTRFS_STRIPE_LEN);
2242 return -EINVAL;
2243 }
2244
2245 if (root->sectorsize != PAGE_SIZE) {
2246 /* not supported for data w/o checksums */
2247 printk(KERN_ERR
2248 "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
2249 root->sectorsize, (unsigned long long)PAGE_SIZE);
2250 return -EINVAL;
2251 }
2252
2253 ret = scrub_workers_get(root);
2254 if (ret)
2255 return ret;
2256
2257 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2258 dev = btrfs_find_device(root, devid, NULL, NULL);
2259 if (!dev || dev->missing) {
2260 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2261 scrub_workers_put(root);
2262 return -ENODEV;
2263 }
2264 mutex_lock(&fs_info->scrub_lock);
2265
2266 if (!dev->in_fs_metadata) {
2267 mutex_unlock(&fs_info->scrub_lock);
2268 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2269 scrub_workers_put(root);
2270 return -ENODEV;
2271 }
2272
2273 if (dev->scrub_device) {
2274 mutex_unlock(&fs_info->scrub_lock);
2275 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2276 scrub_workers_put(root);
2277 return -EINPROGRESS;
2278 }
2279 sdev = scrub_setup_dev(dev);
2280 if (IS_ERR(sdev)) {
2281 mutex_unlock(&fs_info->scrub_lock);
2282 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2283 scrub_workers_put(root);
2284 return PTR_ERR(sdev);
2285 }
2286 sdev->readonly = readonly;
2287 dev->scrub_device = sdev;
2288
2289 atomic_inc(&fs_info->scrubs_running);
2290 mutex_unlock(&fs_info->scrub_lock);
2291 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2292
2293 down_read(&fs_info->scrub_super_lock);
2294 ret = scrub_supers(sdev);
2295 up_read(&fs_info->scrub_super_lock);
2296
2297 if (!ret)
2298 ret = scrub_enumerate_chunks(sdev, start, end);
2299
2300 wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
2301 atomic_dec(&fs_info->scrubs_running);
2302 wake_up(&fs_info->scrub_pause_wait);
2303
2304 wait_event(sdev->list_wait, atomic_read(&sdev->fixup_cnt) == 0);
2305
2306 if (progress)
2307 memcpy(progress, &sdev->stat, sizeof(*progress));
2308
2309 mutex_lock(&fs_info->scrub_lock);
2310 dev->scrub_device = NULL;
2311 mutex_unlock(&fs_info->scrub_lock);
2312
2313 scrub_free_dev(sdev);
2314 scrub_workers_put(root);
2315
2316 return ret;
2317 }
2318
btrfs_scrub_pause(struct btrfs_root * root)2319 void btrfs_scrub_pause(struct btrfs_root *root)
2320 {
2321 struct btrfs_fs_info *fs_info = root->fs_info;
2322
2323 mutex_lock(&fs_info->scrub_lock);
2324 atomic_inc(&fs_info->scrub_pause_req);
2325 while (atomic_read(&fs_info->scrubs_paused) !=
2326 atomic_read(&fs_info->scrubs_running)) {
2327 mutex_unlock(&fs_info->scrub_lock);
2328 wait_event(fs_info->scrub_pause_wait,
2329 atomic_read(&fs_info->scrubs_paused) ==
2330 atomic_read(&fs_info->scrubs_running));
2331 mutex_lock(&fs_info->scrub_lock);
2332 }
2333 mutex_unlock(&fs_info->scrub_lock);
2334 }
2335
btrfs_scrub_continue(struct btrfs_root * root)2336 void btrfs_scrub_continue(struct btrfs_root *root)
2337 {
2338 struct btrfs_fs_info *fs_info = root->fs_info;
2339
2340 atomic_dec(&fs_info->scrub_pause_req);
2341 wake_up(&fs_info->scrub_pause_wait);
2342 }
2343
btrfs_scrub_pause_super(struct btrfs_root * root)2344 void btrfs_scrub_pause_super(struct btrfs_root *root)
2345 {
2346 down_write(&root->fs_info->scrub_super_lock);
2347 }
2348
btrfs_scrub_continue_super(struct btrfs_root * root)2349 void btrfs_scrub_continue_super(struct btrfs_root *root)
2350 {
2351 up_write(&root->fs_info->scrub_super_lock);
2352 }
2353
__btrfs_scrub_cancel(struct btrfs_fs_info * fs_info)2354 int __btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
2355 {
2356
2357 mutex_lock(&fs_info->scrub_lock);
2358 if (!atomic_read(&fs_info->scrubs_running)) {
2359 mutex_unlock(&fs_info->scrub_lock);
2360 return -ENOTCONN;
2361 }
2362
2363 atomic_inc(&fs_info->scrub_cancel_req);
2364 while (atomic_read(&fs_info->scrubs_running)) {
2365 mutex_unlock(&fs_info->scrub_lock);
2366 wait_event(fs_info->scrub_pause_wait,
2367 atomic_read(&fs_info->scrubs_running) == 0);
2368 mutex_lock(&fs_info->scrub_lock);
2369 }
2370 atomic_dec(&fs_info->scrub_cancel_req);
2371 mutex_unlock(&fs_info->scrub_lock);
2372
2373 return 0;
2374 }
2375
btrfs_scrub_cancel(struct btrfs_root * root)2376 int btrfs_scrub_cancel(struct btrfs_root *root)
2377 {
2378 return __btrfs_scrub_cancel(root->fs_info);
2379 }
2380
btrfs_scrub_cancel_dev(struct btrfs_root * root,struct btrfs_device * dev)2381 int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
2382 {
2383 struct btrfs_fs_info *fs_info = root->fs_info;
2384 struct scrub_dev *sdev;
2385
2386 mutex_lock(&fs_info->scrub_lock);
2387 sdev = dev->scrub_device;
2388 if (!sdev) {
2389 mutex_unlock(&fs_info->scrub_lock);
2390 return -ENOTCONN;
2391 }
2392 atomic_inc(&sdev->cancel_req);
2393 while (dev->scrub_device) {
2394 mutex_unlock(&fs_info->scrub_lock);
2395 wait_event(fs_info->scrub_pause_wait,
2396 dev->scrub_device == NULL);
2397 mutex_lock(&fs_info->scrub_lock);
2398 }
2399 mutex_unlock(&fs_info->scrub_lock);
2400
2401 return 0;
2402 }
2403
btrfs_scrub_cancel_devid(struct btrfs_root * root,u64 devid)2404 int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
2405 {
2406 struct btrfs_fs_info *fs_info = root->fs_info;
2407 struct btrfs_device *dev;
2408 int ret;
2409
2410 /*
2411 * we have to hold the device_list_mutex here so the device
2412 * does not go away in cancel_dev. FIXME: find a better solution
2413 */
2414 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2415 dev = btrfs_find_device(root, devid, NULL, NULL);
2416 if (!dev) {
2417 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2418 return -ENODEV;
2419 }
2420 ret = btrfs_scrub_cancel_dev(root, dev);
2421 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2422
2423 return ret;
2424 }
2425
btrfs_scrub_progress(struct btrfs_root * root,u64 devid,struct btrfs_scrub_progress * progress)2426 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
2427 struct btrfs_scrub_progress *progress)
2428 {
2429 struct btrfs_device *dev;
2430 struct scrub_dev *sdev = NULL;
2431
2432 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2433 dev = btrfs_find_device(root, devid, NULL, NULL);
2434 if (dev)
2435 sdev = dev->scrub_device;
2436 if (sdev)
2437 memcpy(progress, &sdev->stat, sizeof(*progress));
2438 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2439
2440 return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV;
2441 }
2442