1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <linux/fs.h>
7 #include <linux/blkdev.h>
8 #include <linux/radix-tree.h>
9 #include <linux/writeback.h>
10 #include <linux/buffer_head.h>
11 #include <linux/workqueue.h>
12 #include <linux/kthread.h>
13 #include <linux/slab.h>
14 #include <linux/migrate.h>
15 #include <linux/ratelimit.h>
16 #include <linux/uuid.h>
17 #include <linux/semaphore.h>
18 #include <linux/error-injection.h>
19 #include <linux/crc32c.h>
20 #include <linux/sched/mm.h>
21 #include <asm/unaligned.h>
22 #include <crypto/hash.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "btrfs_inode.h"
27 #include "volumes.h"
28 #include "print-tree.h"
29 #include "locking.h"
30 #include "tree-log.h"
31 #include "free-space-cache.h"
32 #include "free-space-tree.h"
33 #include "inode-map.h"
34 #include "check-integrity.h"
35 #include "rcu-string.h"
36 #include "dev-replace.h"
37 #include "raid56.h"
38 #include "sysfs.h"
39 #include "qgroup.h"
40 #include "compression.h"
41 #include "tree-checker.h"
42 #include "ref-verify.h"
43 #include "block-group.h"
44
45 #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
46 BTRFS_HEADER_FLAG_RELOC |\
47 BTRFS_SUPER_FLAG_ERROR |\
48 BTRFS_SUPER_FLAG_SEEDING |\
49 BTRFS_SUPER_FLAG_METADUMP |\
50 BTRFS_SUPER_FLAG_METADUMP_V2)
51
52 static const struct extent_io_ops btree_extent_io_ops;
53 static void end_workqueue_fn(struct btrfs_work *work);
54 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
55 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
56 struct btrfs_fs_info *fs_info);
57 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
58 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
59 struct extent_io_tree *dirty_pages,
60 int mark);
61 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
62 struct extent_io_tree *pinned_extents);
63 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
64 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
65
66 /*
67 * btrfs_end_io_wq structs are used to do processing in task context when an IO
68 * is complete. This is used during reads to verify checksums, and it is used
69 * by writes to insert metadata for new file extents after IO is complete.
70 */
71 struct btrfs_end_io_wq {
72 struct bio *bio;
73 bio_end_io_t *end_io;
74 void *private;
75 struct btrfs_fs_info *info;
76 blk_status_t status;
77 enum btrfs_wq_endio_type metadata;
78 struct btrfs_work work;
79 };
80
81 static struct kmem_cache *btrfs_end_io_wq_cache;
82
btrfs_end_io_wq_init(void)83 int __init btrfs_end_io_wq_init(void)
84 {
85 btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
86 sizeof(struct btrfs_end_io_wq),
87 0,
88 SLAB_MEM_SPREAD,
89 NULL);
90 if (!btrfs_end_io_wq_cache)
91 return -ENOMEM;
92 return 0;
93 }
94
btrfs_end_io_wq_exit(void)95 void __cold btrfs_end_io_wq_exit(void)
96 {
97 kmem_cache_destroy(btrfs_end_io_wq_cache);
98 }
99
100 /*
101 * async submit bios are used to offload expensive checksumming
102 * onto the worker threads. They checksum file and metadata bios
103 * just before they are sent down the IO stack.
104 */
105 struct async_submit_bio {
106 void *private_data;
107 struct bio *bio;
108 extent_submit_bio_start_t *submit_bio_start;
109 int mirror_num;
110 /*
111 * bio_offset is optional, can be used if the pages in the bio
112 * can't tell us where in the file the bio should go
113 */
114 u64 bio_offset;
115 struct btrfs_work work;
116 blk_status_t status;
117 };
118
119 /*
120 * Lockdep class keys for extent_buffer->lock's in this root. For a given
121 * eb, the lockdep key is determined by the btrfs_root it belongs to and
122 * the level the eb occupies in the tree.
123 *
124 * Different roots are used for different purposes and may nest inside each
125 * other and they require separate keysets. As lockdep keys should be
126 * static, assign keysets according to the purpose of the root as indicated
127 * by btrfs_root->root_key.objectid. This ensures that all special purpose
128 * roots have separate keysets.
129 *
130 * Lock-nesting across peer nodes is always done with the immediate parent
131 * node locked thus preventing deadlock. As lockdep doesn't know this, use
132 * subclass to avoid triggering lockdep warning in such cases.
133 *
134 * The key is set by the readpage_end_io_hook after the buffer has passed
135 * csum validation but before the pages are unlocked. It is also set by
136 * btrfs_init_new_buffer on freshly allocated blocks.
137 *
138 * We also add a check to make sure the highest level of the tree is the
139 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
140 * needs update as well.
141 */
142 #ifdef CONFIG_DEBUG_LOCK_ALLOC
143 # if BTRFS_MAX_LEVEL != 8
144 # error
145 # endif
146
147 static struct btrfs_lockdep_keyset {
148 u64 id; /* root objectid */
149 const char *name_stem; /* lock name stem */
150 char names[BTRFS_MAX_LEVEL + 1][20];
151 struct lock_class_key keys[BTRFS_MAX_LEVEL + 1];
152 } btrfs_lockdep_keysets[] = {
153 { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" },
154 { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" },
155 { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" },
156 { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" },
157 { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" },
158 { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" },
159 { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" },
160 { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
161 { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
162 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
163 { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" },
164 { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" },
165 { .id = 0, .name_stem = "tree" },
166 };
167
btrfs_init_lockdep(void)168 void __init btrfs_init_lockdep(void)
169 {
170 int i, j;
171
172 /* initialize lockdep class names */
173 for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
174 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
175
176 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
177 snprintf(ks->names[j], sizeof(ks->names[j]),
178 "btrfs-%s-%02d", ks->name_stem, j);
179 }
180 }
181
btrfs_set_buffer_lockdep_class(u64 objectid,struct extent_buffer * eb,int level)182 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
183 int level)
184 {
185 struct btrfs_lockdep_keyset *ks;
186
187 BUG_ON(level >= ARRAY_SIZE(ks->keys));
188
189 /* find the matching keyset, id 0 is the default entry */
190 for (ks = btrfs_lockdep_keysets; ks->id; ks++)
191 if (ks->id == objectid)
192 break;
193
194 lockdep_set_class_and_name(&eb->lock,
195 &ks->keys[level], ks->names[level]);
196 }
197
198 #endif
199
200 /*
201 * extents on the btree inode are pretty simple, there's one extent
202 * that covers the entire device
203 */
btree_get_extent(struct btrfs_inode * inode,struct page * page,size_t pg_offset,u64 start,u64 len,int create)204 struct extent_map *btree_get_extent(struct btrfs_inode *inode,
205 struct page *page, size_t pg_offset, u64 start, u64 len,
206 int create)
207 {
208 struct btrfs_fs_info *fs_info = inode->root->fs_info;
209 struct extent_map_tree *em_tree = &inode->extent_tree;
210 struct extent_map *em;
211 int ret;
212
213 read_lock(&em_tree->lock);
214 em = lookup_extent_mapping(em_tree, start, len);
215 if (em) {
216 em->bdev = fs_info->fs_devices->latest_bdev;
217 read_unlock(&em_tree->lock);
218 goto out;
219 }
220 read_unlock(&em_tree->lock);
221
222 em = alloc_extent_map();
223 if (!em) {
224 em = ERR_PTR(-ENOMEM);
225 goto out;
226 }
227 em->start = 0;
228 em->len = (u64)-1;
229 em->block_len = (u64)-1;
230 em->block_start = 0;
231 em->bdev = fs_info->fs_devices->latest_bdev;
232
233 write_lock(&em_tree->lock);
234 ret = add_extent_mapping(em_tree, em, 0);
235 if (ret == -EEXIST) {
236 free_extent_map(em);
237 em = lookup_extent_mapping(em_tree, start, len);
238 if (!em)
239 em = ERR_PTR(-EIO);
240 } else if (ret) {
241 free_extent_map(em);
242 em = ERR_PTR(ret);
243 }
244 write_unlock(&em_tree->lock);
245
246 out:
247 return em;
248 }
249
250 /*
251 * Compute the csum of a btree block and store the result to provided buffer.
252 *
253 * Returns error if the extent buffer cannot be mapped.
254 */
csum_tree_block(struct extent_buffer * buf,u8 * result)255 static int csum_tree_block(struct extent_buffer *buf, u8 *result)
256 {
257 struct btrfs_fs_info *fs_info = buf->fs_info;
258 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
259 unsigned long len;
260 unsigned long cur_len;
261 unsigned long offset = BTRFS_CSUM_SIZE;
262 char *kaddr;
263 unsigned long map_start;
264 unsigned long map_len;
265 int err;
266
267 shash->tfm = fs_info->csum_shash;
268 crypto_shash_init(shash);
269
270 len = buf->len - offset;
271
272 while (len > 0) {
273 /*
274 * Note: we don't need to check for the err == 1 case here, as
275 * with the given combination of 'start = BTRFS_CSUM_SIZE (32)'
276 * and 'min_len = 32' and the currently implemented mapping
277 * algorithm we cannot cross a page boundary.
278 */
279 err = map_private_extent_buffer(buf, offset, 32,
280 &kaddr, &map_start, &map_len);
281 if (WARN_ON(err))
282 return err;
283 cur_len = min(len, map_len - (offset - map_start));
284 crypto_shash_update(shash, kaddr + offset - map_start, cur_len);
285 len -= cur_len;
286 offset += cur_len;
287 }
288 memset(result, 0, BTRFS_CSUM_SIZE);
289
290 crypto_shash_final(shash, result);
291
292 return 0;
293 }
294
295 /*
296 * we can't consider a given block up to date unless the transid of the
297 * block matches the transid in the parent node's pointer. This is how we
298 * detect blocks that either didn't get written at all or got written
299 * in the wrong place.
300 */
verify_parent_transid(struct extent_io_tree * io_tree,struct extent_buffer * eb,u64 parent_transid,int atomic)301 static int verify_parent_transid(struct extent_io_tree *io_tree,
302 struct extent_buffer *eb, u64 parent_transid,
303 int atomic)
304 {
305 struct extent_state *cached_state = NULL;
306 int ret;
307 bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
308
309 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
310 return 0;
311
312 if (atomic)
313 return -EAGAIN;
314
315 if (need_lock) {
316 btrfs_tree_read_lock(eb);
317 btrfs_set_lock_blocking_read(eb);
318 }
319
320 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
321 &cached_state);
322 if (extent_buffer_uptodate(eb) &&
323 btrfs_header_generation(eb) == parent_transid) {
324 ret = 0;
325 goto out;
326 }
327 btrfs_err_rl(eb->fs_info,
328 "parent transid verify failed on %llu wanted %llu found %llu",
329 eb->start,
330 parent_transid, btrfs_header_generation(eb));
331 ret = 1;
332
333 /*
334 * Things reading via commit roots that don't have normal protection,
335 * like send, can have a really old block in cache that may point at a
336 * block that has been freed and re-allocated. So don't clear uptodate
337 * if we find an eb that is under IO (dirty/writeback) because we could
338 * end up reading in the stale data and then writing it back out and
339 * making everybody very sad.
340 */
341 if (!extent_buffer_under_io(eb))
342 clear_extent_buffer_uptodate(eb);
343 out:
344 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
345 &cached_state);
346 if (need_lock)
347 btrfs_tree_read_unlock_blocking(eb);
348 return ret;
349 }
350
btrfs_supported_super_csum(u16 csum_type)351 static bool btrfs_supported_super_csum(u16 csum_type)
352 {
353 switch (csum_type) {
354 case BTRFS_CSUM_TYPE_CRC32:
355 return true;
356 default:
357 return false;
358 }
359 }
360
361 /*
362 * Return 0 if the superblock checksum type matches the checksum value of that
363 * algorithm. Pass the raw disk superblock data.
364 */
btrfs_check_super_csum(struct btrfs_fs_info * fs_info,char * raw_disk_sb)365 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
366 char *raw_disk_sb)
367 {
368 struct btrfs_super_block *disk_sb =
369 (struct btrfs_super_block *)raw_disk_sb;
370 char result[BTRFS_CSUM_SIZE];
371 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
372
373 shash->tfm = fs_info->csum_shash;
374 crypto_shash_init(shash);
375
376 /*
377 * The super_block structure does not span the whole
378 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
379 * filled with zeros and is included in the checksum.
380 */
381 crypto_shash_update(shash, raw_disk_sb + BTRFS_CSUM_SIZE,
382 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
383 crypto_shash_final(shash, result);
384
385 if (memcmp(disk_sb->csum, result, btrfs_super_csum_size(disk_sb)))
386 return 1;
387
388 return 0;
389 }
390
btrfs_verify_level_key(struct extent_buffer * eb,int level,struct btrfs_key * first_key,u64 parent_transid)391 int btrfs_verify_level_key(struct extent_buffer *eb, int level,
392 struct btrfs_key *first_key, u64 parent_transid)
393 {
394 struct btrfs_fs_info *fs_info = eb->fs_info;
395 int found_level;
396 struct btrfs_key found_key;
397 int ret;
398
399 found_level = btrfs_header_level(eb);
400 if (found_level != level) {
401 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
402 KERN_ERR "BTRFS: tree level check failed\n");
403 btrfs_err(fs_info,
404 "tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
405 eb->start, level, found_level);
406 return -EIO;
407 }
408
409 if (!first_key)
410 return 0;
411
412 /*
413 * For live tree block (new tree blocks in current transaction),
414 * we need proper lock context to avoid race, which is impossible here.
415 * So we only checks tree blocks which is read from disk, whose
416 * generation <= fs_info->last_trans_committed.
417 */
418 if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
419 return 0;
420
421 /* We have @first_key, so this @eb must have at least one item */
422 if (btrfs_header_nritems(eb) == 0) {
423 btrfs_err(fs_info,
424 "invalid tree nritems, bytenr=%llu nritems=0 expect >0",
425 eb->start);
426 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
427 return -EUCLEAN;
428 }
429
430 if (found_level)
431 btrfs_node_key_to_cpu(eb, &found_key, 0);
432 else
433 btrfs_item_key_to_cpu(eb, &found_key, 0);
434 ret = btrfs_comp_cpu_keys(first_key, &found_key);
435
436 if (ret) {
437 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
438 KERN_ERR "BTRFS: tree first key check failed\n");
439 btrfs_err(fs_info,
440 "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
441 eb->start, parent_transid, first_key->objectid,
442 first_key->type, first_key->offset,
443 found_key.objectid, found_key.type,
444 found_key.offset);
445 }
446 return ret;
447 }
448
449 /*
450 * helper to read a given tree block, doing retries as required when
451 * the checksums don't match and we have alternate mirrors to try.
452 *
453 * @parent_transid: expected transid, skip check if 0
454 * @level: expected level, mandatory check
455 * @first_key: expected key of first slot, skip check if NULL
456 */
btree_read_extent_buffer_pages(struct extent_buffer * eb,u64 parent_transid,int level,struct btrfs_key * first_key)457 static int btree_read_extent_buffer_pages(struct extent_buffer *eb,
458 u64 parent_transid, int level,
459 struct btrfs_key *first_key)
460 {
461 struct btrfs_fs_info *fs_info = eb->fs_info;
462 struct extent_io_tree *io_tree;
463 int failed = 0;
464 int ret;
465 int num_copies = 0;
466 int mirror_num = 0;
467 int failed_mirror = 0;
468
469 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
470 while (1) {
471 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
472 ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num);
473 if (!ret) {
474 if (verify_parent_transid(io_tree, eb,
475 parent_transid, 0))
476 ret = -EIO;
477 else if (btrfs_verify_level_key(eb, level,
478 first_key, parent_transid))
479 ret = -EUCLEAN;
480 else
481 break;
482 }
483
484 num_copies = btrfs_num_copies(fs_info,
485 eb->start, eb->len);
486 if (num_copies == 1)
487 break;
488
489 if (!failed_mirror) {
490 failed = 1;
491 failed_mirror = eb->read_mirror;
492 }
493
494 mirror_num++;
495 if (mirror_num == failed_mirror)
496 mirror_num++;
497
498 if (mirror_num > num_copies)
499 break;
500 }
501
502 if (failed && !ret && failed_mirror)
503 btrfs_repair_eb_io_failure(eb, failed_mirror);
504
505 return ret;
506 }
507
508 /*
509 * checksum a dirty tree block before IO. This has extra checks to make sure
510 * we only fill in the checksum field in the first page of a multi-page block
511 */
512
csum_dirty_buffer(struct btrfs_fs_info * fs_info,struct page * page)513 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
514 {
515 u64 start = page_offset(page);
516 u64 found_start;
517 u8 result[BTRFS_CSUM_SIZE];
518 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
519 struct extent_buffer *eb;
520 int ret;
521
522 eb = (struct extent_buffer *)page->private;
523 if (page != eb->pages[0])
524 return 0;
525
526 found_start = btrfs_header_bytenr(eb);
527 /*
528 * Please do not consolidate these warnings into a single if.
529 * It is useful to know what went wrong.
530 */
531 if (WARN_ON(found_start != start))
532 return -EUCLEAN;
533 if (WARN_ON(!PageUptodate(page)))
534 return -EUCLEAN;
535
536 ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
537 btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
538
539 if (csum_tree_block(eb, result))
540 return -EINVAL;
541
542 if (btrfs_header_level(eb))
543 ret = btrfs_check_node(eb);
544 else
545 ret = btrfs_check_leaf_full(eb);
546
547 if (ret < 0) {
548 btrfs_err(fs_info,
549 "block=%llu write time tree block corruption detected",
550 eb->start);
551 return ret;
552 }
553 write_extent_buffer(eb, result, 0, csum_size);
554
555 return 0;
556 }
557
check_tree_block_fsid(struct extent_buffer * eb)558 static int check_tree_block_fsid(struct extent_buffer *eb)
559 {
560 struct btrfs_fs_info *fs_info = eb->fs_info;
561 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
562 u8 fsid[BTRFS_FSID_SIZE];
563 int ret = 1;
564
565 read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
566 while (fs_devices) {
567 u8 *metadata_uuid;
568
569 /*
570 * Checking the incompat flag is only valid for the current
571 * fs. For seed devices it's forbidden to have their uuid
572 * changed so reading ->fsid in this case is fine
573 */
574 if (fs_devices == fs_info->fs_devices &&
575 btrfs_fs_incompat(fs_info, METADATA_UUID))
576 metadata_uuid = fs_devices->metadata_uuid;
577 else
578 metadata_uuid = fs_devices->fsid;
579
580 if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE)) {
581 ret = 0;
582 break;
583 }
584 fs_devices = fs_devices->seed;
585 }
586 return ret;
587 }
588
btree_readpage_end_io_hook(struct btrfs_io_bio * io_bio,u64 phy_offset,struct page * page,u64 start,u64 end,int mirror)589 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
590 u64 phy_offset, struct page *page,
591 u64 start, u64 end, int mirror)
592 {
593 u64 found_start;
594 int found_level;
595 struct extent_buffer *eb;
596 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
597 struct btrfs_fs_info *fs_info = root->fs_info;
598 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
599 int ret = 0;
600 u8 result[BTRFS_CSUM_SIZE];
601 int reads_done;
602
603 if (!page->private)
604 goto out;
605
606 eb = (struct extent_buffer *)page->private;
607
608 /* the pending IO might have been the only thing that kept this buffer
609 * in memory. Make sure we have a ref for all this other checks
610 */
611 extent_buffer_get(eb);
612
613 reads_done = atomic_dec_and_test(&eb->io_pages);
614 if (!reads_done)
615 goto err;
616
617 eb->read_mirror = mirror;
618 if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
619 ret = -EIO;
620 goto err;
621 }
622
623 found_start = btrfs_header_bytenr(eb);
624 if (found_start != eb->start) {
625 btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
626 eb->start, found_start);
627 ret = -EIO;
628 goto err;
629 }
630 if (check_tree_block_fsid(eb)) {
631 btrfs_err_rl(fs_info, "bad fsid on block %llu",
632 eb->start);
633 ret = -EIO;
634 goto err;
635 }
636 found_level = btrfs_header_level(eb);
637 if (found_level >= BTRFS_MAX_LEVEL) {
638 btrfs_err(fs_info, "bad tree block level %d on %llu",
639 (int)btrfs_header_level(eb), eb->start);
640 ret = -EIO;
641 goto err;
642 }
643
644 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
645 eb, found_level);
646
647 ret = csum_tree_block(eb, result);
648 if (ret)
649 goto err;
650
651 if (memcmp_extent_buffer(eb, result, 0, csum_size)) {
652 u8 val[BTRFS_CSUM_SIZE] = { 0 };
653
654 read_extent_buffer(eb, &val, 0, csum_size);
655 btrfs_warn_rl(fs_info,
656 "%s checksum verify failed on %llu wanted " CSUM_FMT " found " CSUM_FMT " level %d",
657 fs_info->sb->s_id, eb->start,
658 CSUM_FMT_VALUE(csum_size, val),
659 CSUM_FMT_VALUE(csum_size, result),
660 btrfs_header_level(eb));
661 ret = -EUCLEAN;
662 goto err;
663 }
664
665 /*
666 * If this is a leaf block and it is corrupt, set the corrupt bit so
667 * that we don't try and read the other copies of this block, just
668 * return -EIO.
669 */
670 if (found_level == 0 && btrfs_check_leaf_full(eb)) {
671 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
672 ret = -EIO;
673 }
674
675 if (found_level > 0 && btrfs_check_node(eb))
676 ret = -EIO;
677
678 if (!ret)
679 set_extent_buffer_uptodate(eb);
680 else
681 btrfs_err(fs_info,
682 "block=%llu read time tree block corruption detected",
683 eb->start);
684 err:
685 if (reads_done &&
686 test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
687 btree_readahead_hook(eb, ret);
688
689 if (ret) {
690 /*
691 * our io error hook is going to dec the io pages
692 * again, we have to make sure it has something
693 * to decrement
694 */
695 atomic_inc(&eb->io_pages);
696 clear_extent_buffer_uptodate(eb);
697 }
698 free_extent_buffer(eb);
699 out:
700 return ret;
701 }
702
end_workqueue_bio(struct bio * bio)703 static void end_workqueue_bio(struct bio *bio)
704 {
705 struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
706 struct btrfs_fs_info *fs_info;
707 struct btrfs_workqueue *wq;
708
709 fs_info = end_io_wq->info;
710 end_io_wq->status = bio->bi_status;
711
712 if (bio_op(bio) == REQ_OP_WRITE) {
713 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
714 wq = fs_info->endio_meta_write_workers;
715 else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
716 wq = fs_info->endio_freespace_worker;
717 else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
718 wq = fs_info->endio_raid56_workers;
719 else
720 wq = fs_info->endio_write_workers;
721 } else {
722 if (unlikely(end_io_wq->metadata == BTRFS_WQ_ENDIO_DIO_REPAIR))
723 wq = fs_info->endio_repair_workers;
724 else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
725 wq = fs_info->endio_raid56_workers;
726 else if (end_io_wq->metadata)
727 wq = fs_info->endio_meta_workers;
728 else
729 wq = fs_info->endio_workers;
730 }
731
732 btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
733 btrfs_queue_work(wq, &end_io_wq->work);
734 }
735
btrfs_bio_wq_end_io(struct btrfs_fs_info * info,struct bio * bio,enum btrfs_wq_endio_type metadata)736 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
737 enum btrfs_wq_endio_type metadata)
738 {
739 struct btrfs_end_io_wq *end_io_wq;
740
741 end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
742 if (!end_io_wq)
743 return BLK_STS_RESOURCE;
744
745 end_io_wq->private = bio->bi_private;
746 end_io_wq->end_io = bio->bi_end_io;
747 end_io_wq->info = info;
748 end_io_wq->status = 0;
749 end_io_wq->bio = bio;
750 end_io_wq->metadata = metadata;
751
752 bio->bi_private = end_io_wq;
753 bio->bi_end_io = end_workqueue_bio;
754 return 0;
755 }
756
run_one_async_start(struct btrfs_work * work)757 static void run_one_async_start(struct btrfs_work *work)
758 {
759 struct async_submit_bio *async;
760 blk_status_t ret;
761
762 async = container_of(work, struct async_submit_bio, work);
763 ret = async->submit_bio_start(async->private_data, async->bio,
764 async->bio_offset);
765 if (ret)
766 async->status = ret;
767 }
768
769 /*
770 * In order to insert checksums into the metadata in large chunks, we wait
771 * until bio submission time. All the pages in the bio are checksummed and
772 * sums are attached onto the ordered extent record.
773 *
774 * At IO completion time the csums attached on the ordered extent record are
775 * inserted into the tree.
776 */
run_one_async_done(struct btrfs_work * work)777 static void run_one_async_done(struct btrfs_work *work)
778 {
779 struct async_submit_bio *async;
780 struct inode *inode;
781 blk_status_t ret;
782
783 async = container_of(work, struct async_submit_bio, work);
784 inode = async->private_data;
785
786 /* If an error occurred we just want to clean up the bio and move on */
787 if (async->status) {
788 async->bio->bi_status = async->status;
789 bio_endio(async->bio);
790 return;
791 }
792
793 ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio,
794 async->mirror_num, 1);
795 if (ret) {
796 async->bio->bi_status = ret;
797 bio_endio(async->bio);
798 }
799 }
800
run_one_async_free(struct btrfs_work * work)801 static void run_one_async_free(struct btrfs_work *work)
802 {
803 struct async_submit_bio *async;
804
805 async = container_of(work, struct async_submit_bio, work);
806 kfree(async);
807 }
808
btrfs_wq_submit_bio(struct btrfs_fs_info * fs_info,struct bio * bio,int mirror_num,unsigned long bio_flags,u64 bio_offset,void * private_data,extent_submit_bio_start_t * submit_bio_start)809 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
810 int mirror_num, unsigned long bio_flags,
811 u64 bio_offset, void *private_data,
812 extent_submit_bio_start_t *submit_bio_start)
813 {
814 struct async_submit_bio *async;
815
816 async = kmalloc(sizeof(*async), GFP_NOFS);
817 if (!async)
818 return BLK_STS_RESOURCE;
819
820 async->private_data = private_data;
821 async->bio = bio;
822 async->mirror_num = mirror_num;
823 async->submit_bio_start = submit_bio_start;
824
825 btrfs_init_work(&async->work, run_one_async_start, run_one_async_done,
826 run_one_async_free);
827
828 async->bio_offset = bio_offset;
829
830 async->status = 0;
831
832 if (op_is_sync(bio->bi_opf))
833 btrfs_set_work_high_priority(&async->work);
834
835 btrfs_queue_work(fs_info->workers, &async->work);
836 return 0;
837 }
838
btree_csum_one_bio(struct bio * bio)839 static blk_status_t btree_csum_one_bio(struct bio *bio)
840 {
841 struct bio_vec *bvec;
842 struct btrfs_root *root;
843 int ret = 0;
844 struct bvec_iter_all iter_all;
845
846 ASSERT(!bio_flagged(bio, BIO_CLONED));
847 bio_for_each_segment_all(bvec, bio, iter_all) {
848 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
849 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
850 if (ret)
851 break;
852 }
853
854 return errno_to_blk_status(ret);
855 }
856
btree_submit_bio_start(void * private_data,struct bio * bio,u64 bio_offset)857 static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
858 u64 bio_offset)
859 {
860 /*
861 * when we're called for a write, we're already in the async
862 * submission context. Just jump into btrfs_map_bio
863 */
864 return btree_csum_one_bio(bio);
865 }
866
check_async_write(struct btrfs_fs_info * fs_info,struct btrfs_inode * bi)867 static int check_async_write(struct btrfs_fs_info *fs_info,
868 struct btrfs_inode *bi)
869 {
870 if (atomic_read(&bi->sync_writers))
871 return 0;
872 if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags))
873 return 0;
874 return 1;
875 }
876
btree_submit_bio_hook(struct inode * inode,struct bio * bio,int mirror_num,unsigned long bio_flags)877 static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
878 int mirror_num,
879 unsigned long bio_flags)
880 {
881 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
882 int async = check_async_write(fs_info, BTRFS_I(inode));
883 blk_status_t ret;
884
885 if (bio_op(bio) != REQ_OP_WRITE) {
886 /*
887 * called for a read, do the setup so that checksum validation
888 * can happen in the async kernel threads
889 */
890 ret = btrfs_bio_wq_end_io(fs_info, bio,
891 BTRFS_WQ_ENDIO_METADATA);
892 if (ret)
893 goto out_w_error;
894 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
895 } else if (!async) {
896 ret = btree_csum_one_bio(bio);
897 if (ret)
898 goto out_w_error;
899 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
900 } else {
901 /*
902 * kthread helpers are used to submit writes so that
903 * checksumming can happen in parallel across all CPUs
904 */
905 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
906 0, inode, btree_submit_bio_start);
907 }
908
909 if (ret)
910 goto out_w_error;
911 return 0;
912
913 out_w_error:
914 bio->bi_status = ret;
915 bio_endio(bio);
916 return ret;
917 }
918
919 #ifdef CONFIG_MIGRATION
btree_migratepage(struct address_space * mapping,struct page * newpage,struct page * page,enum migrate_mode mode)920 static int btree_migratepage(struct address_space *mapping,
921 struct page *newpage, struct page *page,
922 enum migrate_mode mode)
923 {
924 /*
925 * we can't safely write a btree page from here,
926 * we haven't done the locking hook
927 */
928 if (PageDirty(page))
929 return -EAGAIN;
930 /*
931 * Buffers may be managed in a filesystem specific way.
932 * We must have no buffers or drop them.
933 */
934 if (page_has_private(page) &&
935 !try_to_release_page(page, GFP_KERNEL))
936 return -EAGAIN;
937 return migrate_page(mapping, newpage, page, mode);
938 }
939 #endif
940
941
btree_writepages(struct address_space * mapping,struct writeback_control * wbc)942 static int btree_writepages(struct address_space *mapping,
943 struct writeback_control *wbc)
944 {
945 struct btrfs_fs_info *fs_info;
946 int ret;
947
948 if (wbc->sync_mode == WB_SYNC_NONE) {
949
950 if (wbc->for_kupdate)
951 return 0;
952
953 fs_info = BTRFS_I(mapping->host)->root->fs_info;
954 /* this is a bit racy, but that's ok */
955 ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
956 BTRFS_DIRTY_METADATA_THRESH,
957 fs_info->dirty_metadata_batch);
958 if (ret < 0)
959 return 0;
960 }
961 return btree_write_cache_pages(mapping, wbc);
962 }
963
btree_readpage(struct file * file,struct page * page)964 static int btree_readpage(struct file *file, struct page *page)
965 {
966 struct extent_io_tree *tree;
967 tree = &BTRFS_I(page->mapping->host)->io_tree;
968 return extent_read_full_page(tree, page, btree_get_extent, 0);
969 }
970
btree_releasepage(struct page * page,gfp_t gfp_flags)971 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
972 {
973 if (PageWriteback(page) || PageDirty(page))
974 return 0;
975
976 return try_release_extent_buffer(page);
977 }
978
btree_invalidatepage(struct page * page,unsigned int offset,unsigned int length)979 static void btree_invalidatepage(struct page *page, unsigned int offset,
980 unsigned int length)
981 {
982 struct extent_io_tree *tree;
983 tree = &BTRFS_I(page->mapping->host)->io_tree;
984 extent_invalidatepage(tree, page, offset);
985 btree_releasepage(page, GFP_NOFS);
986 if (PagePrivate(page)) {
987 btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
988 "page private not zero on page %llu",
989 (unsigned long long)page_offset(page));
990 ClearPagePrivate(page);
991 set_page_private(page, 0);
992 put_page(page);
993 }
994 }
995
btree_set_page_dirty(struct page * page)996 static int btree_set_page_dirty(struct page *page)
997 {
998 #ifdef DEBUG
999 struct extent_buffer *eb;
1000
1001 BUG_ON(!PagePrivate(page));
1002 eb = (struct extent_buffer *)page->private;
1003 BUG_ON(!eb);
1004 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1005 BUG_ON(!atomic_read(&eb->refs));
1006 btrfs_assert_tree_locked(eb);
1007 #endif
1008 return __set_page_dirty_nobuffers(page);
1009 }
1010
1011 static const struct address_space_operations btree_aops = {
1012 .readpage = btree_readpage,
1013 .writepages = btree_writepages,
1014 .releasepage = btree_releasepage,
1015 .invalidatepage = btree_invalidatepage,
1016 #ifdef CONFIG_MIGRATION
1017 .migratepage = btree_migratepage,
1018 #endif
1019 .set_page_dirty = btree_set_page_dirty,
1020 };
1021
readahead_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr)1022 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
1023 {
1024 struct extent_buffer *buf = NULL;
1025 int ret;
1026
1027 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1028 if (IS_ERR(buf))
1029 return;
1030
1031 ret = read_extent_buffer_pages(buf, WAIT_NONE, 0);
1032 if (ret < 0)
1033 free_extent_buffer_stale(buf);
1034 else
1035 free_extent_buffer(buf);
1036 }
1037
btrfs_find_create_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr)1038 struct extent_buffer *btrfs_find_create_tree_block(
1039 struct btrfs_fs_info *fs_info,
1040 u64 bytenr)
1041 {
1042 if (btrfs_is_testing(fs_info))
1043 return alloc_test_extent_buffer(fs_info, bytenr);
1044 return alloc_extent_buffer(fs_info, bytenr);
1045 }
1046
1047 /*
1048 * Read tree block at logical address @bytenr and do variant basic but critical
1049 * verification.
1050 *
1051 * @parent_transid: expected transid of this tree block, skip check if 0
1052 * @level: expected level, mandatory check
1053 * @first_key: expected key in slot 0, skip check if NULL
1054 */
read_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr,u64 parent_transid,int level,struct btrfs_key * first_key)1055 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
1056 u64 parent_transid, int level,
1057 struct btrfs_key *first_key)
1058 {
1059 struct extent_buffer *buf = NULL;
1060 int ret;
1061
1062 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1063 if (IS_ERR(buf))
1064 return buf;
1065
1066 ret = btree_read_extent_buffer_pages(buf, parent_transid,
1067 level, first_key);
1068 if (ret) {
1069 free_extent_buffer_stale(buf);
1070 return ERR_PTR(ret);
1071 }
1072 return buf;
1073
1074 }
1075
btrfs_clean_tree_block(struct extent_buffer * buf)1076 void btrfs_clean_tree_block(struct extent_buffer *buf)
1077 {
1078 struct btrfs_fs_info *fs_info = buf->fs_info;
1079 if (btrfs_header_generation(buf) ==
1080 fs_info->running_transaction->transid) {
1081 btrfs_assert_tree_locked(buf);
1082
1083 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1084 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1085 -buf->len,
1086 fs_info->dirty_metadata_batch);
1087 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1088 btrfs_set_lock_blocking_write(buf);
1089 clear_extent_buffer_dirty(buf);
1090 }
1091 }
1092 }
1093
btrfs_alloc_subvolume_writers(void)1094 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1095 {
1096 struct btrfs_subvolume_writers *writers;
1097 int ret;
1098
1099 writers = kmalloc(sizeof(*writers), GFP_NOFS);
1100 if (!writers)
1101 return ERR_PTR(-ENOMEM);
1102
1103 ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
1104 if (ret < 0) {
1105 kfree(writers);
1106 return ERR_PTR(ret);
1107 }
1108
1109 init_waitqueue_head(&writers->wait);
1110 return writers;
1111 }
1112
1113 static void
btrfs_free_subvolume_writers(struct btrfs_subvolume_writers * writers)1114 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1115 {
1116 percpu_counter_destroy(&writers->counter);
1117 kfree(writers);
1118 }
1119
__setup_root(struct btrfs_root * root,struct btrfs_fs_info * fs_info,u64 objectid)1120 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1121 u64 objectid)
1122 {
1123 bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1124 root->node = NULL;
1125 root->commit_root = NULL;
1126 root->state = 0;
1127 root->orphan_cleanup_state = 0;
1128
1129 root->last_trans = 0;
1130 root->highest_objectid = 0;
1131 root->nr_delalloc_inodes = 0;
1132 root->nr_ordered_extents = 0;
1133 root->inode_tree = RB_ROOT;
1134 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1135 root->block_rsv = NULL;
1136
1137 INIT_LIST_HEAD(&root->dirty_list);
1138 INIT_LIST_HEAD(&root->root_list);
1139 INIT_LIST_HEAD(&root->delalloc_inodes);
1140 INIT_LIST_HEAD(&root->delalloc_root);
1141 INIT_LIST_HEAD(&root->ordered_extents);
1142 INIT_LIST_HEAD(&root->ordered_root);
1143 INIT_LIST_HEAD(&root->reloc_dirty_list);
1144 INIT_LIST_HEAD(&root->logged_list[0]);
1145 INIT_LIST_HEAD(&root->logged_list[1]);
1146 spin_lock_init(&root->inode_lock);
1147 spin_lock_init(&root->delalloc_lock);
1148 spin_lock_init(&root->ordered_extent_lock);
1149 spin_lock_init(&root->accounting_lock);
1150 spin_lock_init(&root->log_extents_lock[0]);
1151 spin_lock_init(&root->log_extents_lock[1]);
1152 spin_lock_init(&root->qgroup_meta_rsv_lock);
1153 mutex_init(&root->objectid_mutex);
1154 mutex_init(&root->log_mutex);
1155 mutex_init(&root->ordered_extent_mutex);
1156 mutex_init(&root->delalloc_mutex);
1157 init_waitqueue_head(&root->qgroup_flush_wait);
1158 init_waitqueue_head(&root->log_writer_wait);
1159 init_waitqueue_head(&root->log_commit_wait[0]);
1160 init_waitqueue_head(&root->log_commit_wait[1]);
1161 INIT_LIST_HEAD(&root->log_ctxs[0]);
1162 INIT_LIST_HEAD(&root->log_ctxs[1]);
1163 atomic_set(&root->log_commit[0], 0);
1164 atomic_set(&root->log_commit[1], 0);
1165 atomic_set(&root->log_writers, 0);
1166 atomic_set(&root->log_batch, 0);
1167 refcount_set(&root->refs, 1);
1168 atomic_set(&root->will_be_snapshotted, 0);
1169 atomic_set(&root->snapshot_force_cow, 0);
1170 atomic_set(&root->nr_swapfiles, 0);
1171 root->log_transid = 0;
1172 root->log_transid_committed = -1;
1173 root->last_log_commit = 0;
1174 if (!dummy)
1175 extent_io_tree_init(fs_info, &root->dirty_log_pages,
1176 IO_TREE_ROOT_DIRTY_LOG_PAGES, NULL);
1177
1178 memset(&root->root_key, 0, sizeof(root->root_key));
1179 memset(&root->root_item, 0, sizeof(root->root_item));
1180 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1181 if (!dummy)
1182 root->defrag_trans_start = fs_info->generation;
1183 else
1184 root->defrag_trans_start = 0;
1185 root->root_key.objectid = objectid;
1186 root->anon_dev = 0;
1187
1188 spin_lock_init(&root->root_item_lock);
1189 btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
1190 }
1191
btrfs_alloc_root(struct btrfs_fs_info * fs_info,gfp_t flags)1192 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1193 gfp_t flags)
1194 {
1195 struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1196 if (root)
1197 root->fs_info = fs_info;
1198 return root;
1199 }
1200
1201 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1202 /* Should only be used by the testing infrastructure */
btrfs_alloc_dummy_root(struct btrfs_fs_info * fs_info)1203 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1204 {
1205 struct btrfs_root *root;
1206
1207 if (!fs_info)
1208 return ERR_PTR(-EINVAL);
1209
1210 root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1211 if (!root)
1212 return ERR_PTR(-ENOMEM);
1213
1214 /* We don't use the stripesize in selftest, set it as sectorsize */
1215 __setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
1216 root->alloc_bytenr = 0;
1217
1218 return root;
1219 }
1220 #endif
1221
btrfs_create_tree(struct btrfs_trans_handle * trans,u64 objectid)1222 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1223 u64 objectid)
1224 {
1225 struct btrfs_fs_info *fs_info = trans->fs_info;
1226 struct extent_buffer *leaf;
1227 struct btrfs_root *tree_root = fs_info->tree_root;
1228 struct btrfs_root *root;
1229 struct btrfs_key key;
1230 unsigned int nofs_flag;
1231 int ret = 0;
1232 uuid_le uuid = NULL_UUID_LE;
1233
1234 /*
1235 * We're holding a transaction handle, so use a NOFS memory allocation
1236 * context to avoid deadlock if reclaim happens.
1237 */
1238 nofs_flag = memalloc_nofs_save();
1239 root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1240 memalloc_nofs_restore(nofs_flag);
1241 if (!root)
1242 return ERR_PTR(-ENOMEM);
1243
1244 __setup_root(root, fs_info, objectid);
1245 root->root_key.objectid = objectid;
1246 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1247 root->root_key.offset = 0;
1248
1249 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1250 if (IS_ERR(leaf)) {
1251 ret = PTR_ERR(leaf);
1252 leaf = NULL;
1253 goto fail;
1254 }
1255
1256 root->node = leaf;
1257 btrfs_mark_buffer_dirty(leaf);
1258
1259 root->commit_root = btrfs_root_node(root);
1260 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1261
1262 root->root_item.flags = 0;
1263 root->root_item.byte_limit = 0;
1264 btrfs_set_root_bytenr(&root->root_item, leaf->start);
1265 btrfs_set_root_generation(&root->root_item, trans->transid);
1266 btrfs_set_root_level(&root->root_item, 0);
1267 btrfs_set_root_refs(&root->root_item, 1);
1268 btrfs_set_root_used(&root->root_item, leaf->len);
1269 btrfs_set_root_last_snapshot(&root->root_item, 0);
1270 btrfs_set_root_dirid(&root->root_item, 0);
1271 if (is_fstree(objectid))
1272 uuid_le_gen(&uuid);
1273 memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1274 root->root_item.drop_level = 0;
1275
1276 key.objectid = objectid;
1277 key.type = BTRFS_ROOT_ITEM_KEY;
1278 key.offset = 0;
1279 ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1280 if (ret)
1281 goto fail;
1282
1283 btrfs_tree_unlock(leaf);
1284
1285 return root;
1286
1287 fail:
1288 if (leaf) {
1289 btrfs_tree_unlock(leaf);
1290 free_extent_buffer(root->commit_root);
1291 free_extent_buffer(leaf);
1292 }
1293 kfree(root);
1294
1295 return ERR_PTR(ret);
1296 }
1297
alloc_log_tree(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)1298 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1299 struct btrfs_fs_info *fs_info)
1300 {
1301 struct btrfs_root *root;
1302 struct extent_buffer *leaf;
1303
1304 root = btrfs_alloc_root(fs_info, GFP_NOFS);
1305 if (!root)
1306 return ERR_PTR(-ENOMEM);
1307
1308 __setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1309
1310 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1311 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1312 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1313
1314 /*
1315 * DON'T set REF_COWS for log trees
1316 *
1317 * log trees do not get reference counted because they go away
1318 * before a real commit is actually done. They do store pointers
1319 * to file data extents, and those reference counts still get
1320 * updated (along with back refs to the log tree).
1321 */
1322
1323 leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1324 NULL, 0, 0, 0);
1325 if (IS_ERR(leaf)) {
1326 kfree(root);
1327 return ERR_CAST(leaf);
1328 }
1329
1330 root->node = leaf;
1331
1332 btrfs_mark_buffer_dirty(root->node);
1333 btrfs_tree_unlock(root->node);
1334 return root;
1335 }
1336
btrfs_init_log_root_tree(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)1337 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1338 struct btrfs_fs_info *fs_info)
1339 {
1340 struct btrfs_root *log_root;
1341
1342 log_root = alloc_log_tree(trans, fs_info);
1343 if (IS_ERR(log_root))
1344 return PTR_ERR(log_root);
1345 WARN_ON(fs_info->log_root_tree);
1346 fs_info->log_root_tree = log_root;
1347 return 0;
1348 }
1349
btrfs_add_log_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root)1350 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1351 struct btrfs_root *root)
1352 {
1353 struct btrfs_fs_info *fs_info = root->fs_info;
1354 struct btrfs_root *log_root;
1355 struct btrfs_inode_item *inode_item;
1356
1357 log_root = alloc_log_tree(trans, fs_info);
1358 if (IS_ERR(log_root))
1359 return PTR_ERR(log_root);
1360
1361 log_root->last_trans = trans->transid;
1362 log_root->root_key.offset = root->root_key.objectid;
1363
1364 inode_item = &log_root->root_item.inode;
1365 btrfs_set_stack_inode_generation(inode_item, 1);
1366 btrfs_set_stack_inode_size(inode_item, 3);
1367 btrfs_set_stack_inode_nlink(inode_item, 1);
1368 btrfs_set_stack_inode_nbytes(inode_item,
1369 fs_info->nodesize);
1370 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1371
1372 btrfs_set_root_node(&log_root->root_item, log_root->node);
1373
1374 WARN_ON(root->log_root);
1375 root->log_root = log_root;
1376 root->log_transid = 0;
1377 root->log_transid_committed = -1;
1378 root->last_log_commit = 0;
1379 return 0;
1380 }
1381
btrfs_read_tree_root(struct btrfs_root * tree_root,struct btrfs_key * key)1382 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1383 struct btrfs_key *key)
1384 {
1385 struct btrfs_root *root;
1386 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1387 struct btrfs_path *path;
1388 u64 generation;
1389 int ret;
1390 int level;
1391
1392 path = btrfs_alloc_path();
1393 if (!path)
1394 return ERR_PTR(-ENOMEM);
1395
1396 root = btrfs_alloc_root(fs_info, GFP_NOFS);
1397 if (!root) {
1398 ret = -ENOMEM;
1399 goto alloc_fail;
1400 }
1401
1402 __setup_root(root, fs_info, key->objectid);
1403
1404 ret = btrfs_find_root(tree_root, key, path,
1405 &root->root_item, &root->root_key);
1406 if (ret) {
1407 if (ret > 0)
1408 ret = -ENOENT;
1409 goto find_fail;
1410 }
1411
1412 generation = btrfs_root_generation(&root->root_item);
1413 level = btrfs_root_level(&root->root_item);
1414 root->node = read_tree_block(fs_info,
1415 btrfs_root_bytenr(&root->root_item),
1416 generation, level, NULL);
1417 if (IS_ERR(root->node)) {
1418 ret = PTR_ERR(root->node);
1419 goto find_fail;
1420 } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1421 ret = -EIO;
1422 free_extent_buffer(root->node);
1423 goto find_fail;
1424 }
1425 root->commit_root = btrfs_root_node(root);
1426 out:
1427 btrfs_free_path(path);
1428 return root;
1429
1430 find_fail:
1431 kfree(root);
1432 alloc_fail:
1433 root = ERR_PTR(ret);
1434 goto out;
1435 }
1436
btrfs_read_fs_root(struct btrfs_root * tree_root,struct btrfs_key * location)1437 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1438 struct btrfs_key *location)
1439 {
1440 struct btrfs_root *root;
1441
1442 root = btrfs_read_tree_root(tree_root, location);
1443 if (IS_ERR(root))
1444 return root;
1445
1446 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1447 set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1448 btrfs_check_and_init_root_item(&root->root_item);
1449 }
1450
1451 return root;
1452 }
1453
btrfs_init_fs_root(struct btrfs_root * root)1454 int btrfs_init_fs_root(struct btrfs_root *root)
1455 {
1456 int ret;
1457 struct btrfs_subvolume_writers *writers;
1458
1459 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1460 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1461 GFP_NOFS);
1462 if (!root->free_ino_pinned || !root->free_ino_ctl) {
1463 ret = -ENOMEM;
1464 goto fail;
1465 }
1466
1467 writers = btrfs_alloc_subvolume_writers();
1468 if (IS_ERR(writers)) {
1469 ret = PTR_ERR(writers);
1470 goto fail;
1471 }
1472 root->subv_writers = writers;
1473
1474 btrfs_init_free_ino_ctl(root);
1475 spin_lock_init(&root->ino_cache_lock);
1476 init_waitqueue_head(&root->ino_cache_wait);
1477
1478 /*
1479 * Don't assign anonymous block device to roots that are not exposed to
1480 * userspace, the id pool is limited to 1M
1481 */
1482 if (is_fstree(root->root_key.objectid) &&
1483 btrfs_root_refs(&root->root_item) > 0) {
1484 ret = get_anon_bdev(&root->anon_dev);
1485 if (ret)
1486 goto fail;
1487 }
1488
1489 mutex_lock(&root->objectid_mutex);
1490 ret = btrfs_find_highest_objectid(root,
1491 &root->highest_objectid);
1492 if (ret) {
1493 mutex_unlock(&root->objectid_mutex);
1494 goto fail;
1495 }
1496
1497 ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1498
1499 mutex_unlock(&root->objectid_mutex);
1500
1501 return 0;
1502 fail:
1503 /* The caller is responsible to call btrfs_free_fs_root */
1504 return ret;
1505 }
1506
btrfs_lookup_fs_root(struct btrfs_fs_info * fs_info,u64 root_id)1507 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1508 u64 root_id)
1509 {
1510 struct btrfs_root *root;
1511
1512 spin_lock(&fs_info->fs_roots_radix_lock);
1513 root = radix_tree_lookup(&fs_info->fs_roots_radix,
1514 (unsigned long)root_id);
1515 spin_unlock(&fs_info->fs_roots_radix_lock);
1516 return root;
1517 }
1518
btrfs_insert_fs_root(struct btrfs_fs_info * fs_info,struct btrfs_root * root)1519 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1520 struct btrfs_root *root)
1521 {
1522 int ret;
1523
1524 ret = radix_tree_preload(GFP_NOFS);
1525 if (ret)
1526 return ret;
1527
1528 spin_lock(&fs_info->fs_roots_radix_lock);
1529 ret = radix_tree_insert(&fs_info->fs_roots_radix,
1530 (unsigned long)root->root_key.objectid,
1531 root);
1532 if (ret == 0)
1533 set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1534 spin_unlock(&fs_info->fs_roots_radix_lock);
1535 radix_tree_preload_end();
1536
1537 return ret;
1538 }
1539
btrfs_get_fs_root(struct btrfs_fs_info * fs_info,struct btrfs_key * location,bool check_ref)1540 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1541 struct btrfs_key *location,
1542 bool check_ref)
1543 {
1544 struct btrfs_root *root;
1545 struct btrfs_path *path;
1546 struct btrfs_key key;
1547 int ret;
1548
1549 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1550 return fs_info->tree_root;
1551 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1552 return fs_info->extent_root;
1553 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1554 return fs_info->chunk_root;
1555 if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1556 return fs_info->dev_root;
1557 if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1558 return fs_info->csum_root;
1559 if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1560 return fs_info->quota_root ? fs_info->quota_root :
1561 ERR_PTR(-ENOENT);
1562 if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1563 return fs_info->uuid_root ? fs_info->uuid_root :
1564 ERR_PTR(-ENOENT);
1565 if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1566 return fs_info->free_space_root ? fs_info->free_space_root :
1567 ERR_PTR(-ENOENT);
1568 again:
1569 root = btrfs_lookup_fs_root(fs_info, location->objectid);
1570 if (root) {
1571 if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1572 return ERR_PTR(-ENOENT);
1573 return root;
1574 }
1575
1576 root = btrfs_read_fs_root(fs_info->tree_root, location);
1577 if (IS_ERR(root))
1578 return root;
1579
1580 if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1581 ret = -ENOENT;
1582 goto fail;
1583 }
1584
1585 ret = btrfs_init_fs_root(root);
1586 if (ret)
1587 goto fail;
1588
1589 path = btrfs_alloc_path();
1590 if (!path) {
1591 ret = -ENOMEM;
1592 goto fail;
1593 }
1594 key.objectid = BTRFS_ORPHAN_OBJECTID;
1595 key.type = BTRFS_ORPHAN_ITEM_KEY;
1596 key.offset = location->objectid;
1597
1598 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1599 btrfs_free_path(path);
1600 if (ret < 0)
1601 goto fail;
1602 if (ret == 0)
1603 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1604
1605 ret = btrfs_insert_fs_root(fs_info, root);
1606 if (ret) {
1607 if (ret == -EEXIST) {
1608 btrfs_free_fs_root(root);
1609 goto again;
1610 }
1611 goto fail;
1612 }
1613 return root;
1614 fail:
1615 btrfs_free_fs_root(root);
1616 return ERR_PTR(ret);
1617 }
1618
btrfs_congested_fn(void * congested_data,int bdi_bits)1619 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1620 {
1621 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1622 int ret = 0;
1623 struct btrfs_device *device;
1624 struct backing_dev_info *bdi;
1625
1626 rcu_read_lock();
1627 list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1628 if (!device->bdev)
1629 continue;
1630 bdi = device->bdev->bd_bdi;
1631 if (bdi_congested(bdi, bdi_bits)) {
1632 ret = 1;
1633 break;
1634 }
1635 }
1636 rcu_read_unlock();
1637 return ret;
1638 }
1639
1640 /*
1641 * called by the kthread helper functions to finally call the bio end_io
1642 * functions. This is where read checksum verification actually happens
1643 */
end_workqueue_fn(struct btrfs_work * work)1644 static void end_workqueue_fn(struct btrfs_work *work)
1645 {
1646 struct bio *bio;
1647 struct btrfs_end_io_wq *end_io_wq;
1648
1649 end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1650 bio = end_io_wq->bio;
1651
1652 bio->bi_status = end_io_wq->status;
1653 bio->bi_private = end_io_wq->private;
1654 bio->bi_end_io = end_io_wq->end_io;
1655 bio_endio(bio);
1656 kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1657 }
1658
cleaner_kthread(void * arg)1659 static int cleaner_kthread(void *arg)
1660 {
1661 struct btrfs_root *root = arg;
1662 struct btrfs_fs_info *fs_info = root->fs_info;
1663 int again;
1664
1665 while (1) {
1666 again = 0;
1667
1668 set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1669
1670 /* Make the cleaner go to sleep early. */
1671 if (btrfs_need_cleaner_sleep(fs_info))
1672 goto sleep;
1673
1674 /*
1675 * Do not do anything if we might cause open_ctree() to block
1676 * before we have finished mounting the filesystem.
1677 */
1678 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1679 goto sleep;
1680
1681 if (!mutex_trylock(&fs_info->cleaner_mutex))
1682 goto sleep;
1683
1684 /*
1685 * Avoid the problem that we change the status of the fs
1686 * during the above check and trylock.
1687 */
1688 if (btrfs_need_cleaner_sleep(fs_info)) {
1689 mutex_unlock(&fs_info->cleaner_mutex);
1690 goto sleep;
1691 }
1692
1693 btrfs_run_delayed_iputs(fs_info);
1694
1695 again = btrfs_clean_one_deleted_snapshot(root);
1696 mutex_unlock(&fs_info->cleaner_mutex);
1697
1698 /*
1699 * The defragger has dealt with the R/O remount and umount,
1700 * needn't do anything special here.
1701 */
1702 btrfs_run_defrag_inodes(fs_info);
1703
1704 /*
1705 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1706 * with relocation (btrfs_relocate_chunk) and relocation
1707 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1708 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1709 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1710 * unused block groups.
1711 */
1712 btrfs_delete_unused_bgs(fs_info);
1713 sleep:
1714 clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1715 if (kthread_should_park())
1716 kthread_parkme();
1717 if (kthread_should_stop())
1718 return 0;
1719 if (!again) {
1720 set_current_state(TASK_INTERRUPTIBLE);
1721 schedule();
1722 __set_current_state(TASK_RUNNING);
1723 }
1724 }
1725 }
1726
transaction_kthread(void * arg)1727 static int transaction_kthread(void *arg)
1728 {
1729 struct btrfs_root *root = arg;
1730 struct btrfs_fs_info *fs_info = root->fs_info;
1731 struct btrfs_trans_handle *trans;
1732 struct btrfs_transaction *cur;
1733 u64 transid;
1734 time64_t now;
1735 unsigned long delay;
1736 bool cannot_commit;
1737
1738 do {
1739 cannot_commit = false;
1740 delay = HZ * fs_info->commit_interval;
1741 mutex_lock(&fs_info->transaction_kthread_mutex);
1742
1743 spin_lock(&fs_info->trans_lock);
1744 cur = fs_info->running_transaction;
1745 if (!cur) {
1746 spin_unlock(&fs_info->trans_lock);
1747 goto sleep;
1748 }
1749
1750 now = ktime_get_seconds();
1751 if (cur->state < TRANS_STATE_COMMIT_START &&
1752 (now < cur->start_time ||
1753 now - cur->start_time < fs_info->commit_interval)) {
1754 spin_unlock(&fs_info->trans_lock);
1755 delay = HZ * 5;
1756 goto sleep;
1757 }
1758 transid = cur->transid;
1759 spin_unlock(&fs_info->trans_lock);
1760
1761 /* If the file system is aborted, this will always fail. */
1762 trans = btrfs_attach_transaction(root);
1763 if (IS_ERR(trans)) {
1764 if (PTR_ERR(trans) != -ENOENT)
1765 cannot_commit = true;
1766 goto sleep;
1767 }
1768 if (transid == trans->transid) {
1769 btrfs_commit_transaction(trans);
1770 } else {
1771 btrfs_end_transaction(trans);
1772 }
1773 sleep:
1774 wake_up_process(fs_info->cleaner_kthread);
1775 mutex_unlock(&fs_info->transaction_kthread_mutex);
1776
1777 if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1778 &fs_info->fs_state)))
1779 btrfs_cleanup_transaction(fs_info);
1780 if (!kthread_should_stop() &&
1781 (!btrfs_transaction_blocked(fs_info) ||
1782 cannot_commit))
1783 schedule_timeout_interruptible(delay);
1784 } while (!kthread_should_stop());
1785 return 0;
1786 }
1787
1788 /*
1789 * this will find the highest generation in the array of
1790 * root backups. The index of the highest array is returned,
1791 * or -1 if we can't find anything.
1792 *
1793 * We check to make sure the array is valid by comparing the
1794 * generation of the latest root in the array with the generation
1795 * in the super block. If they don't match we pitch it.
1796 */
find_newest_super_backup(struct btrfs_fs_info * info,u64 newest_gen)1797 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1798 {
1799 u64 cur;
1800 int newest_index = -1;
1801 struct btrfs_root_backup *root_backup;
1802 int i;
1803
1804 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1805 root_backup = info->super_copy->super_roots + i;
1806 cur = btrfs_backup_tree_root_gen(root_backup);
1807 if (cur == newest_gen)
1808 newest_index = i;
1809 }
1810
1811 /* check to see if we actually wrapped around */
1812 if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1813 root_backup = info->super_copy->super_roots;
1814 cur = btrfs_backup_tree_root_gen(root_backup);
1815 if (cur == newest_gen)
1816 newest_index = 0;
1817 }
1818 return newest_index;
1819 }
1820
1821
1822 /*
1823 * find the oldest backup so we know where to store new entries
1824 * in the backup array. This will set the backup_root_index
1825 * field in the fs_info struct
1826 */
find_oldest_super_backup(struct btrfs_fs_info * info,u64 newest_gen)1827 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1828 u64 newest_gen)
1829 {
1830 int newest_index = -1;
1831
1832 newest_index = find_newest_super_backup(info, newest_gen);
1833 /* if there was garbage in there, just move along */
1834 if (newest_index == -1) {
1835 info->backup_root_index = 0;
1836 } else {
1837 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1838 }
1839 }
1840
1841 /*
1842 * copy all the root pointers into the super backup array.
1843 * this will bump the backup pointer by one when it is
1844 * done
1845 */
backup_super_roots(struct btrfs_fs_info * info)1846 static void backup_super_roots(struct btrfs_fs_info *info)
1847 {
1848 int next_backup;
1849 struct btrfs_root_backup *root_backup;
1850 int last_backup;
1851
1852 next_backup = info->backup_root_index;
1853 last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1854 BTRFS_NUM_BACKUP_ROOTS;
1855
1856 /*
1857 * just overwrite the last backup if we're at the same generation
1858 * this happens only at umount
1859 */
1860 root_backup = info->super_for_commit->super_roots + last_backup;
1861 if (btrfs_backup_tree_root_gen(root_backup) ==
1862 btrfs_header_generation(info->tree_root->node))
1863 next_backup = last_backup;
1864
1865 root_backup = info->super_for_commit->super_roots + next_backup;
1866
1867 /*
1868 * make sure all of our padding and empty slots get zero filled
1869 * regardless of which ones we use today
1870 */
1871 memset(root_backup, 0, sizeof(*root_backup));
1872
1873 info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1874
1875 btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1876 btrfs_set_backup_tree_root_gen(root_backup,
1877 btrfs_header_generation(info->tree_root->node));
1878
1879 btrfs_set_backup_tree_root_level(root_backup,
1880 btrfs_header_level(info->tree_root->node));
1881
1882 btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1883 btrfs_set_backup_chunk_root_gen(root_backup,
1884 btrfs_header_generation(info->chunk_root->node));
1885 btrfs_set_backup_chunk_root_level(root_backup,
1886 btrfs_header_level(info->chunk_root->node));
1887
1888 btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1889 btrfs_set_backup_extent_root_gen(root_backup,
1890 btrfs_header_generation(info->extent_root->node));
1891 btrfs_set_backup_extent_root_level(root_backup,
1892 btrfs_header_level(info->extent_root->node));
1893
1894 /*
1895 * we might commit during log recovery, which happens before we set
1896 * the fs_root. Make sure it is valid before we fill it in.
1897 */
1898 if (info->fs_root && info->fs_root->node) {
1899 btrfs_set_backup_fs_root(root_backup,
1900 info->fs_root->node->start);
1901 btrfs_set_backup_fs_root_gen(root_backup,
1902 btrfs_header_generation(info->fs_root->node));
1903 btrfs_set_backup_fs_root_level(root_backup,
1904 btrfs_header_level(info->fs_root->node));
1905 }
1906
1907 btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1908 btrfs_set_backup_dev_root_gen(root_backup,
1909 btrfs_header_generation(info->dev_root->node));
1910 btrfs_set_backup_dev_root_level(root_backup,
1911 btrfs_header_level(info->dev_root->node));
1912
1913 btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1914 btrfs_set_backup_csum_root_gen(root_backup,
1915 btrfs_header_generation(info->csum_root->node));
1916 btrfs_set_backup_csum_root_level(root_backup,
1917 btrfs_header_level(info->csum_root->node));
1918
1919 btrfs_set_backup_total_bytes(root_backup,
1920 btrfs_super_total_bytes(info->super_copy));
1921 btrfs_set_backup_bytes_used(root_backup,
1922 btrfs_super_bytes_used(info->super_copy));
1923 btrfs_set_backup_num_devices(root_backup,
1924 btrfs_super_num_devices(info->super_copy));
1925
1926 /*
1927 * if we don't copy this out to the super_copy, it won't get remembered
1928 * for the next commit
1929 */
1930 memcpy(&info->super_copy->super_roots,
1931 &info->super_for_commit->super_roots,
1932 sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1933 }
1934
1935 /*
1936 * this copies info out of the root backup array and back into
1937 * the in-memory super block. It is meant to help iterate through
1938 * the array, so you send it the number of backups you've already
1939 * tried and the last backup index you used.
1940 *
1941 * this returns -1 when it has tried all the backups
1942 */
next_root_backup(struct btrfs_fs_info * info,struct btrfs_super_block * super,int * num_backups_tried,int * backup_index)1943 static noinline int next_root_backup(struct btrfs_fs_info *info,
1944 struct btrfs_super_block *super,
1945 int *num_backups_tried, int *backup_index)
1946 {
1947 struct btrfs_root_backup *root_backup;
1948 int newest = *backup_index;
1949
1950 if (*num_backups_tried == 0) {
1951 u64 gen = btrfs_super_generation(super);
1952
1953 newest = find_newest_super_backup(info, gen);
1954 if (newest == -1)
1955 return -1;
1956
1957 *backup_index = newest;
1958 *num_backups_tried = 1;
1959 } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1960 /* we've tried all the backups, all done */
1961 return -1;
1962 } else {
1963 /* jump to the next oldest backup */
1964 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1965 BTRFS_NUM_BACKUP_ROOTS;
1966 *backup_index = newest;
1967 *num_backups_tried += 1;
1968 }
1969 root_backup = super->super_roots + newest;
1970
1971 btrfs_set_super_generation(super,
1972 btrfs_backup_tree_root_gen(root_backup));
1973 btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1974 btrfs_set_super_root_level(super,
1975 btrfs_backup_tree_root_level(root_backup));
1976 btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1977
1978 /*
1979 * fixme: the total bytes and num_devices need to match or we should
1980 * need a fsck
1981 */
1982 btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1983 btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1984 return 0;
1985 }
1986
1987 /* helper to cleanup workers */
btrfs_stop_all_workers(struct btrfs_fs_info * fs_info)1988 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1989 {
1990 btrfs_destroy_workqueue(fs_info->fixup_workers);
1991 btrfs_destroy_workqueue(fs_info->delalloc_workers);
1992 btrfs_destroy_workqueue(fs_info->workers);
1993 btrfs_destroy_workqueue(fs_info->endio_workers);
1994 btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
1995 btrfs_destroy_workqueue(fs_info->endio_repair_workers);
1996 btrfs_destroy_workqueue(fs_info->rmw_workers);
1997 btrfs_destroy_workqueue(fs_info->endio_write_workers);
1998 btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
1999 btrfs_destroy_workqueue(fs_info->submit_workers);
2000 btrfs_destroy_workqueue(fs_info->delayed_workers);
2001 btrfs_destroy_workqueue(fs_info->caching_workers);
2002 btrfs_destroy_workqueue(fs_info->readahead_workers);
2003 btrfs_destroy_workqueue(fs_info->flush_workers);
2004 btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
2005 /*
2006 * Now that all other work queues are destroyed, we can safely destroy
2007 * the queues used for metadata I/O, since tasks from those other work
2008 * queues can do metadata I/O operations.
2009 */
2010 btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2011 btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2012 }
2013
free_root_extent_buffers(struct btrfs_root * root)2014 static void free_root_extent_buffers(struct btrfs_root *root)
2015 {
2016 if (root) {
2017 free_extent_buffer(root->node);
2018 free_extent_buffer(root->commit_root);
2019 root->node = NULL;
2020 root->commit_root = NULL;
2021 }
2022 }
2023
2024 /* helper to cleanup tree roots */
free_root_pointers(struct btrfs_fs_info * info,bool free_chunk_root)2025 static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
2026 {
2027 free_root_extent_buffers(info->tree_root);
2028
2029 free_root_extent_buffers(info->dev_root);
2030 free_root_extent_buffers(info->extent_root);
2031 free_root_extent_buffers(info->csum_root);
2032 free_root_extent_buffers(info->quota_root);
2033 free_root_extent_buffers(info->uuid_root);
2034 if (free_chunk_root)
2035 free_root_extent_buffers(info->chunk_root);
2036 free_root_extent_buffers(info->free_space_root);
2037 }
2038
btrfs_free_fs_roots(struct btrfs_fs_info * fs_info)2039 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2040 {
2041 int ret;
2042 struct btrfs_root *gang[8];
2043 int i;
2044
2045 while (!list_empty(&fs_info->dead_roots)) {
2046 gang[0] = list_entry(fs_info->dead_roots.next,
2047 struct btrfs_root, root_list);
2048 list_del(&gang[0]->root_list);
2049
2050 if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2051 btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2052 } else {
2053 free_extent_buffer(gang[0]->node);
2054 free_extent_buffer(gang[0]->commit_root);
2055 btrfs_put_fs_root(gang[0]);
2056 }
2057 }
2058
2059 while (1) {
2060 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2061 (void **)gang, 0,
2062 ARRAY_SIZE(gang));
2063 if (!ret)
2064 break;
2065 for (i = 0; i < ret; i++)
2066 btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2067 }
2068
2069 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2070 btrfs_free_log_root_tree(NULL, fs_info);
2071 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
2072 }
2073 }
2074
btrfs_init_scrub(struct btrfs_fs_info * fs_info)2075 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2076 {
2077 mutex_init(&fs_info->scrub_lock);
2078 atomic_set(&fs_info->scrubs_running, 0);
2079 atomic_set(&fs_info->scrub_pause_req, 0);
2080 atomic_set(&fs_info->scrubs_paused, 0);
2081 atomic_set(&fs_info->scrub_cancel_req, 0);
2082 init_waitqueue_head(&fs_info->scrub_pause_wait);
2083 refcount_set(&fs_info->scrub_workers_refcnt, 0);
2084 }
2085
btrfs_init_balance(struct btrfs_fs_info * fs_info)2086 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2087 {
2088 spin_lock_init(&fs_info->balance_lock);
2089 mutex_init(&fs_info->balance_mutex);
2090 atomic_set(&fs_info->balance_pause_req, 0);
2091 atomic_set(&fs_info->balance_cancel_req, 0);
2092 fs_info->balance_ctl = NULL;
2093 init_waitqueue_head(&fs_info->balance_wait_q);
2094 }
2095
btrfs_init_btree_inode(struct btrfs_fs_info * fs_info)2096 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2097 {
2098 struct inode *inode = fs_info->btree_inode;
2099
2100 inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2101 set_nlink(inode, 1);
2102 /*
2103 * we set the i_size on the btree inode to the max possible int.
2104 * the real end of the address space is determined by all of
2105 * the devices in the system
2106 */
2107 inode->i_size = OFFSET_MAX;
2108 inode->i_mapping->a_ops = &btree_aops;
2109
2110 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2111 extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
2112 IO_TREE_INODE_IO, inode);
2113 BTRFS_I(inode)->io_tree.track_uptodate = false;
2114 extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2115
2116 BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
2117
2118 BTRFS_I(inode)->root = fs_info->tree_root;
2119 memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
2120 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
2121 btrfs_insert_inode_hash(inode);
2122 }
2123
btrfs_init_dev_replace_locks(struct btrfs_fs_info * fs_info)2124 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2125 {
2126 mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2127 init_rwsem(&fs_info->dev_replace.rwsem);
2128 init_waitqueue_head(&fs_info->dev_replace.replace_wait);
2129 }
2130
btrfs_init_qgroup(struct btrfs_fs_info * fs_info)2131 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2132 {
2133 spin_lock_init(&fs_info->qgroup_lock);
2134 mutex_init(&fs_info->qgroup_ioctl_lock);
2135 fs_info->qgroup_tree = RB_ROOT;
2136 INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2137 fs_info->qgroup_seq = 1;
2138 fs_info->qgroup_ulist = NULL;
2139 fs_info->qgroup_rescan_running = false;
2140 mutex_init(&fs_info->qgroup_rescan_lock);
2141 }
2142
btrfs_init_workqueues(struct btrfs_fs_info * fs_info,struct btrfs_fs_devices * fs_devices)2143 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2144 struct btrfs_fs_devices *fs_devices)
2145 {
2146 u32 max_active = fs_info->thread_pool_size;
2147 unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2148
2149 fs_info->workers =
2150 btrfs_alloc_workqueue(fs_info, "worker",
2151 flags | WQ_HIGHPRI, max_active, 16);
2152
2153 fs_info->delalloc_workers =
2154 btrfs_alloc_workqueue(fs_info, "delalloc",
2155 flags, max_active, 2);
2156
2157 fs_info->flush_workers =
2158 btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2159 flags, max_active, 0);
2160
2161 fs_info->caching_workers =
2162 btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2163
2164 /*
2165 * a higher idle thresh on the submit workers makes it much more
2166 * likely that bios will be send down in a sane order to the
2167 * devices
2168 */
2169 fs_info->submit_workers =
2170 btrfs_alloc_workqueue(fs_info, "submit", flags,
2171 min_t(u64, fs_devices->num_devices,
2172 max_active), 64);
2173
2174 fs_info->fixup_workers =
2175 btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2176
2177 /*
2178 * endios are largely parallel and should have a very
2179 * low idle thresh
2180 */
2181 fs_info->endio_workers =
2182 btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2183 fs_info->endio_meta_workers =
2184 btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2185 max_active, 4);
2186 fs_info->endio_meta_write_workers =
2187 btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2188 max_active, 2);
2189 fs_info->endio_raid56_workers =
2190 btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2191 max_active, 4);
2192 fs_info->endio_repair_workers =
2193 btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
2194 fs_info->rmw_workers =
2195 btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2196 fs_info->endio_write_workers =
2197 btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2198 max_active, 2);
2199 fs_info->endio_freespace_worker =
2200 btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2201 max_active, 0);
2202 fs_info->delayed_workers =
2203 btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2204 max_active, 0);
2205 fs_info->readahead_workers =
2206 btrfs_alloc_workqueue(fs_info, "readahead", flags,
2207 max_active, 2);
2208 fs_info->qgroup_rescan_workers =
2209 btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2210
2211 if (!(fs_info->workers && fs_info->delalloc_workers &&
2212 fs_info->submit_workers && fs_info->flush_workers &&
2213 fs_info->endio_workers && fs_info->endio_meta_workers &&
2214 fs_info->endio_meta_write_workers &&
2215 fs_info->endio_repair_workers &&
2216 fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2217 fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2218 fs_info->caching_workers && fs_info->readahead_workers &&
2219 fs_info->fixup_workers && fs_info->delayed_workers &&
2220 fs_info->qgroup_rescan_workers)) {
2221 return -ENOMEM;
2222 }
2223
2224 return 0;
2225 }
2226
btrfs_init_csum_hash(struct btrfs_fs_info * fs_info,u16 csum_type)2227 static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
2228 {
2229 struct crypto_shash *csum_shash;
2230 const char *csum_name = btrfs_super_csum_name(csum_type);
2231
2232 csum_shash = crypto_alloc_shash(csum_name, 0, 0);
2233
2234 if (IS_ERR(csum_shash)) {
2235 btrfs_err(fs_info, "error allocating %s hash for checksum",
2236 csum_name);
2237 return PTR_ERR(csum_shash);
2238 }
2239
2240 fs_info->csum_shash = csum_shash;
2241
2242 /*
2243 * Check if the checksum implementation is a fast accelerated one.
2244 * As-is this is a bit of a hack and should be replaced once the csum
2245 * implementations provide that information themselves.
2246 */
2247 switch (csum_type) {
2248 case BTRFS_CSUM_TYPE_CRC32:
2249 if (!strstr(crypto_shash_driver_name(csum_shash), "generic"))
2250 set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
2251 break;
2252 default:
2253 break;
2254 }
2255
2256 btrfs_info(fs_info, "using %s (%s) checksum algorithm",
2257 btrfs_super_csum_name(csum_type),
2258 crypto_shash_driver_name(csum_shash));
2259 return 0;
2260 }
2261
btrfs_free_csum_hash(struct btrfs_fs_info * fs_info)2262 static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
2263 {
2264 crypto_free_shash(fs_info->csum_shash);
2265 }
2266
btrfs_replay_log(struct btrfs_fs_info * fs_info,struct btrfs_fs_devices * fs_devices)2267 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2268 struct btrfs_fs_devices *fs_devices)
2269 {
2270 int ret;
2271 struct btrfs_root *log_tree_root;
2272 struct btrfs_super_block *disk_super = fs_info->super_copy;
2273 u64 bytenr = btrfs_super_log_root(disk_super);
2274 int level = btrfs_super_log_root_level(disk_super);
2275
2276 if (fs_devices->rw_devices == 0) {
2277 btrfs_warn(fs_info, "log replay required on RO media");
2278 return -EIO;
2279 }
2280
2281 log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2282 if (!log_tree_root)
2283 return -ENOMEM;
2284
2285 __setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2286
2287 log_tree_root->node = read_tree_block(fs_info, bytenr,
2288 fs_info->generation + 1,
2289 level, NULL);
2290 if (IS_ERR(log_tree_root->node)) {
2291 btrfs_warn(fs_info, "failed to read log tree");
2292 ret = PTR_ERR(log_tree_root->node);
2293 kfree(log_tree_root);
2294 return ret;
2295 } else if (!extent_buffer_uptodate(log_tree_root->node)) {
2296 btrfs_err(fs_info, "failed to read log tree");
2297 free_extent_buffer(log_tree_root->node);
2298 kfree(log_tree_root);
2299 return -EIO;
2300 }
2301 /* returns with log_tree_root freed on success */
2302 ret = btrfs_recover_log_trees(log_tree_root);
2303 if (ret) {
2304 btrfs_handle_fs_error(fs_info, ret,
2305 "Failed to recover log tree");
2306 free_extent_buffer(log_tree_root->node);
2307 kfree(log_tree_root);
2308 return ret;
2309 }
2310
2311 if (sb_rdonly(fs_info->sb)) {
2312 ret = btrfs_commit_super(fs_info);
2313 if (ret)
2314 return ret;
2315 }
2316
2317 return 0;
2318 }
2319
btrfs_read_roots(struct btrfs_fs_info * fs_info)2320 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2321 {
2322 struct btrfs_root *tree_root = fs_info->tree_root;
2323 struct btrfs_root *root;
2324 struct btrfs_key location;
2325 int ret;
2326
2327 BUG_ON(!fs_info->tree_root);
2328
2329 location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2330 location.type = BTRFS_ROOT_ITEM_KEY;
2331 location.offset = 0;
2332
2333 root = btrfs_read_tree_root(tree_root, &location);
2334 if (IS_ERR(root)) {
2335 ret = PTR_ERR(root);
2336 goto out;
2337 }
2338 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2339 fs_info->extent_root = root;
2340
2341 location.objectid = BTRFS_DEV_TREE_OBJECTID;
2342 root = btrfs_read_tree_root(tree_root, &location);
2343 if (IS_ERR(root)) {
2344 ret = PTR_ERR(root);
2345 goto out;
2346 }
2347 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2348 fs_info->dev_root = root;
2349 btrfs_init_devices_late(fs_info);
2350
2351 location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2352 root = btrfs_read_tree_root(tree_root, &location);
2353 if (IS_ERR(root)) {
2354 ret = PTR_ERR(root);
2355 goto out;
2356 }
2357 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2358 fs_info->csum_root = root;
2359
2360 location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2361 root = btrfs_read_tree_root(tree_root, &location);
2362 if (!IS_ERR(root)) {
2363 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2364 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2365 fs_info->quota_root = root;
2366 }
2367
2368 location.objectid = BTRFS_UUID_TREE_OBJECTID;
2369 root = btrfs_read_tree_root(tree_root, &location);
2370 if (IS_ERR(root)) {
2371 ret = PTR_ERR(root);
2372 if (ret != -ENOENT)
2373 goto out;
2374 } else {
2375 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2376 fs_info->uuid_root = root;
2377 }
2378
2379 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2380 location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2381 root = btrfs_read_tree_root(tree_root, &location);
2382 if (IS_ERR(root)) {
2383 ret = PTR_ERR(root);
2384 goto out;
2385 }
2386 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2387 fs_info->free_space_root = root;
2388 }
2389
2390 return 0;
2391 out:
2392 btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2393 location.objectid, ret);
2394 return ret;
2395 }
2396
2397 /*
2398 * Real super block validation
2399 * NOTE: super csum type and incompat features will not be checked here.
2400 *
2401 * @sb: super block to check
2402 * @mirror_num: the super block number to check its bytenr:
2403 * 0 the primary (1st) sb
2404 * 1, 2 2nd and 3rd backup copy
2405 * -1 skip bytenr check
2406 */
validate_super(struct btrfs_fs_info * fs_info,struct btrfs_super_block * sb,int mirror_num)2407 static int validate_super(struct btrfs_fs_info *fs_info,
2408 struct btrfs_super_block *sb, int mirror_num)
2409 {
2410 u64 nodesize = btrfs_super_nodesize(sb);
2411 u64 sectorsize = btrfs_super_sectorsize(sb);
2412 int ret = 0;
2413
2414 if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2415 btrfs_err(fs_info, "no valid FS found");
2416 ret = -EINVAL;
2417 }
2418 if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
2419 btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
2420 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2421 ret = -EINVAL;
2422 }
2423 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2424 btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2425 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2426 ret = -EINVAL;
2427 }
2428 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2429 btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2430 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2431 ret = -EINVAL;
2432 }
2433 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2434 btrfs_err(fs_info, "log_root level too big: %d >= %d",
2435 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2436 ret = -EINVAL;
2437 }
2438
2439 /*
2440 * Check sectorsize and nodesize first, other check will need it.
2441 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2442 */
2443 if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
2444 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2445 btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2446 ret = -EINVAL;
2447 }
2448 /* Only PAGE SIZE is supported yet */
2449 if (sectorsize != PAGE_SIZE) {
2450 btrfs_err(fs_info,
2451 "sectorsize %llu not supported yet, only support %lu",
2452 sectorsize, PAGE_SIZE);
2453 ret = -EINVAL;
2454 }
2455 if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2456 nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2457 btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2458 ret = -EINVAL;
2459 }
2460 if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2461 btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2462 le32_to_cpu(sb->__unused_leafsize), nodesize);
2463 ret = -EINVAL;
2464 }
2465
2466 /* Root alignment check */
2467 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2468 btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2469 btrfs_super_root(sb));
2470 ret = -EINVAL;
2471 }
2472 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2473 btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2474 btrfs_super_chunk_root(sb));
2475 ret = -EINVAL;
2476 }
2477 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2478 btrfs_warn(fs_info, "log_root block unaligned: %llu",
2479 btrfs_super_log_root(sb));
2480 ret = -EINVAL;
2481 }
2482
2483 if (memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
2484 btrfs_err(fs_info,
2485 "superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
2486 sb->fsid, fs_info->fs_devices->fsid);
2487 ret = -EINVAL;
2488 }
2489
2490 if (memcmp(fs_info->fs_devices->metadata_uuid, btrfs_sb_fsid_ptr(sb),
2491 BTRFS_FSID_SIZE) != 0) {
2492 btrfs_err(fs_info,
2493 "superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
2494 btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid);
2495 ret = -EINVAL;
2496 }
2497
2498 if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
2499 BTRFS_FSID_SIZE) != 0) {
2500 btrfs_err(fs_info,
2501 "dev_item UUID does not match metadata fsid: %pU != %pU",
2502 fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
2503 ret = -EINVAL;
2504 }
2505
2506 /*
2507 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2508 * done later
2509 */
2510 if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2511 btrfs_err(fs_info, "bytes_used is too small %llu",
2512 btrfs_super_bytes_used(sb));
2513 ret = -EINVAL;
2514 }
2515 if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2516 btrfs_err(fs_info, "invalid stripesize %u",
2517 btrfs_super_stripesize(sb));
2518 ret = -EINVAL;
2519 }
2520 if (btrfs_super_num_devices(sb) > (1UL << 31))
2521 btrfs_warn(fs_info, "suspicious number of devices: %llu",
2522 btrfs_super_num_devices(sb));
2523 if (btrfs_super_num_devices(sb) == 0) {
2524 btrfs_err(fs_info, "number of devices is 0");
2525 ret = -EINVAL;
2526 }
2527
2528 if (mirror_num >= 0 &&
2529 btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2530 btrfs_err(fs_info, "super offset mismatch %llu != %u",
2531 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2532 ret = -EINVAL;
2533 }
2534
2535 /*
2536 * Obvious sys_chunk_array corruptions, it must hold at least one key
2537 * and one chunk
2538 */
2539 if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2540 btrfs_err(fs_info, "system chunk array too big %u > %u",
2541 btrfs_super_sys_array_size(sb),
2542 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2543 ret = -EINVAL;
2544 }
2545 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2546 + sizeof(struct btrfs_chunk)) {
2547 btrfs_err(fs_info, "system chunk array too small %u < %zu",
2548 btrfs_super_sys_array_size(sb),
2549 sizeof(struct btrfs_disk_key)
2550 + sizeof(struct btrfs_chunk));
2551 ret = -EINVAL;
2552 }
2553
2554 /*
2555 * The generation is a global counter, we'll trust it more than the others
2556 * but it's still possible that it's the one that's wrong.
2557 */
2558 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2559 btrfs_warn(fs_info,
2560 "suspicious: generation < chunk_root_generation: %llu < %llu",
2561 btrfs_super_generation(sb),
2562 btrfs_super_chunk_root_generation(sb));
2563 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2564 && btrfs_super_cache_generation(sb) != (u64)-1)
2565 btrfs_warn(fs_info,
2566 "suspicious: generation < cache_generation: %llu < %llu",
2567 btrfs_super_generation(sb),
2568 btrfs_super_cache_generation(sb));
2569
2570 return ret;
2571 }
2572
2573 /*
2574 * Validation of super block at mount time.
2575 * Some checks already done early at mount time, like csum type and incompat
2576 * flags will be skipped.
2577 */
btrfs_validate_mount_super(struct btrfs_fs_info * fs_info)2578 static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2579 {
2580 return validate_super(fs_info, fs_info->super_copy, 0);
2581 }
2582
2583 /*
2584 * Validation of super block at write time.
2585 * Some checks like bytenr check will be skipped as their values will be
2586 * overwritten soon.
2587 * Extra checks like csum type and incompat flags will be done here.
2588 */
btrfs_validate_write_super(struct btrfs_fs_info * fs_info,struct btrfs_super_block * sb)2589 static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2590 struct btrfs_super_block *sb)
2591 {
2592 int ret;
2593
2594 ret = validate_super(fs_info, sb, -1);
2595 if (ret < 0)
2596 goto out;
2597 if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) {
2598 ret = -EUCLEAN;
2599 btrfs_err(fs_info, "invalid csum type, has %u want %u",
2600 btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2601 goto out;
2602 }
2603 if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2604 ret = -EUCLEAN;
2605 btrfs_err(fs_info,
2606 "invalid incompat flags, has 0x%llx valid mask 0x%llx",
2607 btrfs_super_incompat_flags(sb),
2608 (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2609 goto out;
2610 }
2611 out:
2612 if (ret < 0)
2613 btrfs_err(fs_info,
2614 "super block corruption detected before writing it to disk");
2615 return ret;
2616 }
2617
open_ctree(struct super_block * sb,struct btrfs_fs_devices * fs_devices,char * options)2618 int open_ctree(struct super_block *sb,
2619 struct btrfs_fs_devices *fs_devices,
2620 char *options)
2621 {
2622 u32 sectorsize;
2623 u32 nodesize;
2624 u32 stripesize;
2625 u64 generation;
2626 u64 features;
2627 u16 csum_type;
2628 struct btrfs_key location;
2629 struct buffer_head *bh;
2630 struct btrfs_super_block *disk_super;
2631 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2632 struct btrfs_root *tree_root;
2633 struct btrfs_root *chunk_root;
2634 int ret;
2635 int err = -EINVAL;
2636 int num_backups_tried = 0;
2637 int backup_index = 0;
2638 int clear_free_space_tree = 0;
2639 int level;
2640
2641 tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2642 chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2643 if (!tree_root || !chunk_root) {
2644 err = -ENOMEM;
2645 goto fail;
2646 }
2647
2648 ret = init_srcu_struct(&fs_info->subvol_srcu);
2649 if (ret) {
2650 err = ret;
2651 goto fail;
2652 }
2653
2654 ret = percpu_counter_init(&fs_info->dio_bytes, 0, GFP_KERNEL);
2655 if (ret) {
2656 err = ret;
2657 goto fail_srcu;
2658 }
2659
2660 ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2661 if (ret) {
2662 err = ret;
2663 goto fail_dio_bytes;
2664 }
2665 fs_info->dirty_metadata_batch = PAGE_SIZE *
2666 (1 + ilog2(nr_cpu_ids));
2667
2668 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2669 if (ret) {
2670 err = ret;
2671 goto fail_dirty_metadata_bytes;
2672 }
2673
2674 ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2675 GFP_KERNEL);
2676 if (ret) {
2677 err = ret;
2678 goto fail_delalloc_bytes;
2679 }
2680
2681 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2682 INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2683 INIT_LIST_HEAD(&fs_info->trans_list);
2684 INIT_LIST_HEAD(&fs_info->dead_roots);
2685 INIT_LIST_HEAD(&fs_info->delayed_iputs);
2686 INIT_LIST_HEAD(&fs_info->delalloc_roots);
2687 INIT_LIST_HEAD(&fs_info->caching_block_groups);
2688 spin_lock_init(&fs_info->delalloc_root_lock);
2689 spin_lock_init(&fs_info->trans_lock);
2690 spin_lock_init(&fs_info->fs_roots_radix_lock);
2691 spin_lock_init(&fs_info->delayed_iput_lock);
2692 spin_lock_init(&fs_info->defrag_inodes_lock);
2693 spin_lock_init(&fs_info->super_lock);
2694 spin_lock_init(&fs_info->buffer_lock);
2695 spin_lock_init(&fs_info->unused_bgs_lock);
2696 rwlock_init(&fs_info->tree_mod_log_lock);
2697 mutex_init(&fs_info->unused_bg_unpin_mutex);
2698 mutex_init(&fs_info->delete_unused_bgs_mutex);
2699 mutex_init(&fs_info->reloc_mutex);
2700 mutex_init(&fs_info->delalloc_root_mutex);
2701 seqlock_init(&fs_info->profiles_lock);
2702
2703 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2704 INIT_LIST_HEAD(&fs_info->space_info);
2705 INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2706 INIT_LIST_HEAD(&fs_info->unused_bgs);
2707 extent_map_tree_init(&fs_info->mapping_tree);
2708 btrfs_init_block_rsv(&fs_info->global_block_rsv,
2709 BTRFS_BLOCK_RSV_GLOBAL);
2710 btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2711 btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2712 btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2713 btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2714 BTRFS_BLOCK_RSV_DELOPS);
2715 btrfs_init_block_rsv(&fs_info->delayed_refs_rsv,
2716 BTRFS_BLOCK_RSV_DELREFS);
2717
2718 atomic_set(&fs_info->async_delalloc_pages, 0);
2719 atomic_set(&fs_info->defrag_running, 0);
2720 atomic_set(&fs_info->reada_works_cnt, 0);
2721 atomic_set(&fs_info->nr_delayed_iputs, 0);
2722 atomic64_set(&fs_info->tree_mod_seq, 0);
2723 fs_info->sb = sb;
2724 fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2725 fs_info->metadata_ratio = 0;
2726 fs_info->defrag_inodes = RB_ROOT;
2727 atomic64_set(&fs_info->free_chunk_space, 0);
2728 fs_info->tree_mod_log = RB_ROOT;
2729 fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2730 fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2731 /* readahead state */
2732 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2733 spin_lock_init(&fs_info->reada_lock);
2734 btrfs_init_ref_verify(fs_info);
2735
2736 fs_info->thread_pool_size = min_t(unsigned long,
2737 num_online_cpus() + 2, 8);
2738
2739 INIT_LIST_HEAD(&fs_info->ordered_roots);
2740 spin_lock_init(&fs_info->ordered_root_lock);
2741
2742 fs_info->btree_inode = new_inode(sb);
2743 if (!fs_info->btree_inode) {
2744 err = -ENOMEM;
2745 goto fail_bio_counter;
2746 }
2747 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2748
2749 fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2750 GFP_KERNEL);
2751 if (!fs_info->delayed_root) {
2752 err = -ENOMEM;
2753 goto fail_iput;
2754 }
2755 btrfs_init_delayed_root(fs_info->delayed_root);
2756
2757 btrfs_init_scrub(fs_info);
2758 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2759 fs_info->check_integrity_print_mask = 0;
2760 #endif
2761 btrfs_init_balance(fs_info);
2762 btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2763
2764 sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2765 sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2766
2767 btrfs_init_btree_inode(fs_info);
2768
2769 spin_lock_init(&fs_info->block_group_cache_lock);
2770 fs_info->block_group_cache_tree = RB_ROOT;
2771 fs_info->first_logical_byte = (u64)-1;
2772
2773 extent_io_tree_init(fs_info, &fs_info->freed_extents[0],
2774 IO_TREE_FS_INFO_FREED_EXTENTS0, NULL);
2775 extent_io_tree_init(fs_info, &fs_info->freed_extents[1],
2776 IO_TREE_FS_INFO_FREED_EXTENTS1, NULL);
2777 fs_info->pinned_extents = &fs_info->freed_extents[0];
2778 set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2779
2780 mutex_init(&fs_info->ordered_operations_mutex);
2781 mutex_init(&fs_info->tree_log_mutex);
2782 mutex_init(&fs_info->chunk_mutex);
2783 mutex_init(&fs_info->transaction_kthread_mutex);
2784 mutex_init(&fs_info->cleaner_mutex);
2785 mutex_init(&fs_info->ro_block_group_mutex);
2786 init_rwsem(&fs_info->commit_root_sem);
2787 init_rwsem(&fs_info->cleanup_work_sem);
2788 init_rwsem(&fs_info->subvol_sem);
2789 sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2790
2791 btrfs_init_dev_replace_locks(fs_info);
2792 btrfs_init_qgroup(fs_info);
2793
2794 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2795 btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2796
2797 init_waitqueue_head(&fs_info->transaction_throttle);
2798 init_waitqueue_head(&fs_info->transaction_wait);
2799 init_waitqueue_head(&fs_info->transaction_blocked_wait);
2800 init_waitqueue_head(&fs_info->async_submit_wait);
2801 init_waitqueue_head(&fs_info->delayed_iputs_wait);
2802
2803 /* Usable values until the real ones are cached from the superblock */
2804 fs_info->nodesize = 4096;
2805 fs_info->sectorsize = 4096;
2806 fs_info->stripesize = 4096;
2807
2808 spin_lock_init(&fs_info->swapfile_pins_lock);
2809 fs_info->swapfile_pins = RB_ROOT;
2810
2811 fs_info->send_in_progress = 0;
2812
2813 ret = btrfs_alloc_stripe_hash_table(fs_info);
2814 if (ret) {
2815 err = ret;
2816 goto fail_alloc;
2817 }
2818
2819 __setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
2820
2821 invalidate_bdev(fs_devices->latest_bdev);
2822
2823 /*
2824 * Read super block and check the signature bytes only
2825 */
2826 bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2827 if (IS_ERR(bh)) {
2828 err = PTR_ERR(bh);
2829 goto fail_alloc;
2830 }
2831
2832 /*
2833 * Verify the type first, if that or the checksum value are
2834 * corrupted, we'll find out
2835 */
2836 csum_type = btrfs_super_csum_type((struct btrfs_super_block *)bh->b_data);
2837 if (!btrfs_supported_super_csum(csum_type)) {
2838 btrfs_err(fs_info, "unsupported checksum algorithm: %u",
2839 csum_type);
2840 err = -EINVAL;
2841 brelse(bh);
2842 goto fail_alloc;
2843 }
2844
2845 ret = btrfs_init_csum_hash(fs_info, csum_type);
2846 if (ret) {
2847 err = ret;
2848 goto fail_alloc;
2849 }
2850
2851 /*
2852 * We want to check superblock checksum, the type is stored inside.
2853 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2854 */
2855 if (btrfs_check_super_csum(fs_info, bh->b_data)) {
2856 btrfs_err(fs_info, "superblock checksum mismatch");
2857 err = -EINVAL;
2858 brelse(bh);
2859 goto fail_csum;
2860 }
2861
2862 /*
2863 * super_copy is zeroed at allocation time and we never touch the
2864 * following bytes up to INFO_SIZE, the checksum is calculated from
2865 * the whole block of INFO_SIZE
2866 */
2867 memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2868 brelse(bh);
2869
2870 disk_super = fs_info->super_copy;
2871
2872
2873 features = btrfs_super_flags(disk_super);
2874 if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
2875 features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2;
2876 btrfs_set_super_flags(disk_super, features);
2877 btrfs_info(fs_info,
2878 "found metadata UUID change in progress flag, clearing");
2879 }
2880
2881 memcpy(fs_info->super_for_commit, fs_info->super_copy,
2882 sizeof(*fs_info->super_for_commit));
2883
2884 ret = btrfs_validate_mount_super(fs_info);
2885 if (ret) {
2886 btrfs_err(fs_info, "superblock contains fatal errors");
2887 err = -EINVAL;
2888 goto fail_csum;
2889 }
2890
2891 if (!btrfs_super_root(disk_super))
2892 goto fail_csum;
2893
2894 /* check FS state, whether FS is broken. */
2895 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2896 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2897
2898 /*
2899 * run through our array of backup supers and setup
2900 * our ring pointer to the oldest one
2901 */
2902 generation = btrfs_super_generation(disk_super);
2903 find_oldest_super_backup(fs_info, generation);
2904
2905 /*
2906 * In the long term, we'll store the compression type in the super
2907 * block, and it'll be used for per file compression control.
2908 */
2909 fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2910
2911 /*
2912 * Flag our filesystem as having big metadata blocks if they are bigger
2913 * than the page size
2914 */
2915 if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2916 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2917 btrfs_info(fs_info,
2918 "flagging fs with big metadata feature");
2919 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2920 }
2921
2922 /* Set up fs_info before parsing mount options */
2923 nodesize = btrfs_super_nodesize(disk_super);
2924 sectorsize = btrfs_super_sectorsize(disk_super);
2925 stripesize = sectorsize;
2926 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2927 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2928
2929 /* Cache block sizes */
2930 fs_info->nodesize = nodesize;
2931 fs_info->sectorsize = sectorsize;
2932 fs_info->stripesize = stripesize;
2933
2934 ret = btrfs_parse_options(fs_info, options, sb->s_flags);
2935 if (ret) {
2936 err = ret;
2937 goto fail_csum;
2938 }
2939
2940 features = btrfs_super_incompat_flags(disk_super) &
2941 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2942 if (features) {
2943 btrfs_err(fs_info,
2944 "cannot mount because of unsupported optional features (0x%llx)",
2945 features);
2946 err = -EINVAL;
2947 goto fail_csum;
2948 }
2949
2950 features = btrfs_super_incompat_flags(disk_super);
2951 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2952 if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
2953 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2954 else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
2955 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
2956
2957 if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2958 btrfs_info(fs_info, "has skinny extents");
2959
2960 /*
2961 * mixed block groups end up with duplicate but slightly offset
2962 * extent buffers for the same range. It leads to corruptions
2963 */
2964 if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2965 (sectorsize != nodesize)) {
2966 btrfs_err(fs_info,
2967 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
2968 nodesize, sectorsize);
2969 goto fail_csum;
2970 }
2971
2972 /*
2973 * Needn't use the lock because there is no other task which will
2974 * update the flag.
2975 */
2976 btrfs_set_super_incompat_flags(disk_super, features);
2977
2978 features = btrfs_super_compat_ro_flags(disk_super) &
2979 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2980 if (!sb_rdonly(sb) && features) {
2981 btrfs_err(fs_info,
2982 "cannot mount read-write because of unsupported optional features (0x%llx)",
2983 features);
2984 err = -EINVAL;
2985 goto fail_csum;
2986 }
2987 /*
2988 * We have unsupported RO compat features, although RO mounted, we
2989 * should not cause any metadata write, including log replay.
2990 * Or we could screw up whatever the new feature requires.
2991 */
2992 if (unlikely(features && btrfs_super_log_root(disk_super) &&
2993 !btrfs_test_opt(fs_info, NOLOGREPLAY))) {
2994 btrfs_err(fs_info,
2995 "cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
2996 features);
2997 err = -EINVAL;
2998 goto fail_alloc;
2999 }
3000
3001
3002 ret = btrfs_init_workqueues(fs_info, fs_devices);
3003 if (ret) {
3004 err = ret;
3005 goto fail_sb_buffer;
3006 }
3007
3008 sb->s_bdi->congested_fn = btrfs_congested_fn;
3009 sb->s_bdi->congested_data = fs_info;
3010 sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
3011 sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
3012 sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
3013 sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
3014
3015 sb->s_blocksize = sectorsize;
3016 sb->s_blocksize_bits = blksize_bits(sectorsize);
3017 memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
3018
3019 mutex_lock(&fs_info->chunk_mutex);
3020 ret = btrfs_read_sys_array(fs_info);
3021 mutex_unlock(&fs_info->chunk_mutex);
3022 if (ret) {
3023 btrfs_err(fs_info, "failed to read the system array: %d", ret);
3024 goto fail_sb_buffer;
3025 }
3026
3027 generation = btrfs_super_chunk_root_generation(disk_super);
3028 level = btrfs_super_chunk_root_level(disk_super);
3029
3030 __setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
3031
3032 chunk_root->node = read_tree_block(fs_info,
3033 btrfs_super_chunk_root(disk_super),
3034 generation, level, NULL);
3035 if (IS_ERR(chunk_root->node) ||
3036 !extent_buffer_uptodate(chunk_root->node)) {
3037 btrfs_err(fs_info, "failed to read chunk root");
3038 if (!IS_ERR(chunk_root->node))
3039 free_extent_buffer(chunk_root->node);
3040 chunk_root->node = NULL;
3041 goto fail_tree_roots;
3042 }
3043 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
3044 chunk_root->commit_root = btrfs_root_node(chunk_root);
3045
3046 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
3047 btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
3048
3049 ret = btrfs_read_chunk_tree(fs_info);
3050 if (ret) {
3051 btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
3052 goto fail_tree_roots;
3053 }
3054
3055 /*
3056 * Keep the devid that is marked to be the target device for the
3057 * device replace procedure
3058 */
3059 btrfs_free_extra_devids(fs_devices, 0);
3060
3061 if (!fs_devices->latest_bdev) {
3062 btrfs_err(fs_info, "failed to read devices");
3063 goto fail_tree_roots;
3064 }
3065
3066 retry_root_backup:
3067 generation = btrfs_super_generation(disk_super);
3068 level = btrfs_super_root_level(disk_super);
3069
3070 tree_root->node = read_tree_block(fs_info,
3071 btrfs_super_root(disk_super),
3072 generation, level, NULL);
3073 if (IS_ERR(tree_root->node) ||
3074 !extent_buffer_uptodate(tree_root->node)) {
3075 btrfs_warn(fs_info, "failed to read tree root");
3076 if (!IS_ERR(tree_root->node))
3077 free_extent_buffer(tree_root->node);
3078 tree_root->node = NULL;
3079 goto recovery_tree_root;
3080 }
3081
3082 btrfs_set_root_node(&tree_root->root_item, tree_root->node);
3083 tree_root->commit_root = btrfs_root_node(tree_root);
3084 btrfs_set_root_refs(&tree_root->root_item, 1);
3085
3086 mutex_lock(&tree_root->objectid_mutex);
3087 ret = btrfs_find_highest_objectid(tree_root,
3088 &tree_root->highest_objectid);
3089 if (ret) {
3090 mutex_unlock(&tree_root->objectid_mutex);
3091 goto recovery_tree_root;
3092 }
3093
3094 ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
3095
3096 mutex_unlock(&tree_root->objectid_mutex);
3097
3098 ret = btrfs_read_roots(fs_info);
3099 if (ret)
3100 goto recovery_tree_root;
3101
3102 fs_info->generation = generation;
3103 fs_info->last_trans_committed = generation;
3104
3105 /*
3106 * If we have a uuid root and we're not being told to rescan we need to
3107 * check the generation here so we can set the
3108 * BTRFS_FS_UPDATE_UUID_TREE_GEN bit. Otherwise we could commit the
3109 * transaction during a balance or the log replay without updating the
3110 * uuid generation, and then if we crash we would rescan the uuid tree,
3111 * even though it was perfectly fine.
3112 */
3113 if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) &&
3114 fs_info->generation == btrfs_super_uuid_tree_generation(disk_super))
3115 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3116
3117 ret = btrfs_verify_dev_extents(fs_info);
3118 if (ret) {
3119 btrfs_err(fs_info,
3120 "failed to verify dev extents against chunks: %d",
3121 ret);
3122 goto fail_block_groups;
3123 }
3124 ret = btrfs_recover_balance(fs_info);
3125 if (ret) {
3126 btrfs_err(fs_info, "failed to recover balance: %d", ret);
3127 goto fail_block_groups;
3128 }
3129
3130 ret = btrfs_init_dev_stats(fs_info);
3131 if (ret) {
3132 btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3133 goto fail_block_groups;
3134 }
3135
3136 ret = btrfs_init_dev_replace(fs_info);
3137 if (ret) {
3138 btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3139 goto fail_block_groups;
3140 }
3141
3142 btrfs_free_extra_devids(fs_devices, 1);
3143
3144 ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
3145 if (ret) {
3146 btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3147 ret);
3148 goto fail_block_groups;
3149 }
3150
3151 ret = btrfs_sysfs_add_device(fs_devices);
3152 if (ret) {
3153 btrfs_err(fs_info, "failed to init sysfs device interface: %d",
3154 ret);
3155 goto fail_fsdev_sysfs;
3156 }
3157
3158 ret = btrfs_sysfs_add_mounted(fs_info);
3159 if (ret) {
3160 btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3161 goto fail_fsdev_sysfs;
3162 }
3163
3164 ret = btrfs_init_space_info(fs_info);
3165 if (ret) {
3166 btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3167 goto fail_sysfs;
3168 }
3169
3170 ret = btrfs_read_block_groups(fs_info);
3171 if (ret) {
3172 btrfs_err(fs_info, "failed to read block groups: %d", ret);
3173 goto fail_sysfs;
3174 }
3175
3176 if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices &&
3177 !btrfs_check_rw_degradable(fs_info, NULL)) {
3178 btrfs_warn(fs_info,
3179 "writable mount is not allowed due to too many missing devices");
3180 goto fail_sysfs;
3181 }
3182
3183 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
3184 "btrfs-cleaner");
3185 if (IS_ERR(fs_info->cleaner_kthread))
3186 goto fail_sysfs;
3187
3188 fs_info->transaction_kthread = kthread_run(transaction_kthread,
3189 tree_root,
3190 "btrfs-transaction");
3191 if (IS_ERR(fs_info->transaction_kthread))
3192 goto fail_cleaner;
3193
3194 if (!btrfs_test_opt(fs_info, NOSSD) &&
3195 !fs_info->fs_devices->rotating) {
3196 btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
3197 }
3198
3199 /*
3200 * Mount does not set all options immediately, we can do it now and do
3201 * not have to wait for transaction commit
3202 */
3203 btrfs_apply_pending_changes(fs_info);
3204
3205 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3206 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
3207 ret = btrfsic_mount(fs_info, fs_devices,
3208 btrfs_test_opt(fs_info,
3209 CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
3210 1 : 0,
3211 fs_info->check_integrity_print_mask);
3212 if (ret)
3213 btrfs_warn(fs_info,
3214 "failed to initialize integrity check module: %d",
3215 ret);
3216 }
3217 #endif
3218 ret = btrfs_read_qgroup_config(fs_info);
3219 if (ret)
3220 goto fail_trans_kthread;
3221
3222 if (btrfs_build_ref_tree(fs_info))
3223 btrfs_err(fs_info, "couldn't build ref tree");
3224
3225 /* do not make disk changes in broken FS or nologreplay is given */
3226 if (btrfs_super_log_root(disk_super) != 0 &&
3227 !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3228 btrfs_info(fs_info, "start tree-log replay");
3229 ret = btrfs_replay_log(fs_info, fs_devices);
3230 if (ret) {
3231 err = ret;
3232 goto fail_qgroup;
3233 }
3234 }
3235
3236 ret = btrfs_find_orphan_roots(fs_info);
3237 if (ret)
3238 goto fail_qgroup;
3239
3240 if (!sb_rdonly(sb)) {
3241 ret = btrfs_cleanup_fs_roots(fs_info);
3242 if (ret)
3243 goto fail_qgroup;
3244
3245 mutex_lock(&fs_info->cleaner_mutex);
3246 ret = btrfs_recover_relocation(tree_root);
3247 mutex_unlock(&fs_info->cleaner_mutex);
3248 if (ret < 0) {
3249 btrfs_warn(fs_info, "failed to recover relocation: %d",
3250 ret);
3251 err = -EINVAL;
3252 goto fail_qgroup;
3253 }
3254 }
3255
3256 location.objectid = BTRFS_FS_TREE_OBJECTID;
3257 location.type = BTRFS_ROOT_ITEM_KEY;
3258 location.offset = 0;
3259
3260 fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
3261 if (IS_ERR(fs_info->fs_root)) {
3262 err = PTR_ERR(fs_info->fs_root);
3263 btrfs_warn(fs_info, "failed to read fs tree: %d", err);
3264 fs_info->fs_root = NULL;
3265 goto fail_qgroup;
3266 }
3267
3268 if (sb_rdonly(sb))
3269 return 0;
3270
3271 if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3272 btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3273 clear_free_space_tree = 1;
3274 } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3275 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
3276 btrfs_warn(fs_info, "free space tree is invalid");
3277 clear_free_space_tree = 1;
3278 }
3279
3280 if (clear_free_space_tree) {
3281 btrfs_info(fs_info, "clearing free space tree");
3282 ret = btrfs_clear_free_space_tree(fs_info);
3283 if (ret) {
3284 btrfs_warn(fs_info,
3285 "failed to clear free space tree: %d", ret);
3286 close_ctree(fs_info);
3287 return ret;
3288 }
3289 }
3290
3291 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3292 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3293 btrfs_info(fs_info, "creating free space tree");
3294 ret = btrfs_create_free_space_tree(fs_info);
3295 if (ret) {
3296 btrfs_warn(fs_info,
3297 "failed to create free space tree: %d", ret);
3298 close_ctree(fs_info);
3299 return ret;
3300 }
3301 }
3302
3303 down_read(&fs_info->cleanup_work_sem);
3304 if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3305 (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3306 up_read(&fs_info->cleanup_work_sem);
3307 close_ctree(fs_info);
3308 return ret;
3309 }
3310 up_read(&fs_info->cleanup_work_sem);
3311
3312 ret = btrfs_resume_balance_async(fs_info);
3313 if (ret) {
3314 btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3315 close_ctree(fs_info);
3316 return ret;
3317 }
3318
3319 ret = btrfs_resume_dev_replace_async(fs_info);
3320 if (ret) {
3321 btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3322 close_ctree(fs_info);
3323 return ret;
3324 }
3325
3326 btrfs_qgroup_rescan_resume(fs_info);
3327
3328 if (!fs_info->uuid_root) {
3329 btrfs_info(fs_info, "creating UUID tree");
3330 ret = btrfs_create_uuid_tree(fs_info);
3331 if (ret) {
3332 btrfs_warn(fs_info,
3333 "failed to create the UUID tree: %d", ret);
3334 close_ctree(fs_info);
3335 return ret;
3336 }
3337 } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3338 fs_info->generation !=
3339 btrfs_super_uuid_tree_generation(disk_super)) {
3340 btrfs_info(fs_info, "checking UUID tree");
3341 ret = btrfs_check_uuid_tree(fs_info);
3342 if (ret) {
3343 btrfs_warn(fs_info,
3344 "failed to check the UUID tree: %d", ret);
3345 close_ctree(fs_info);
3346 return ret;
3347 }
3348 }
3349 set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3350
3351 /*
3352 * backuproot only affect mount behavior, and if open_ctree succeeded,
3353 * no need to keep the flag
3354 */
3355 btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3356
3357 return 0;
3358
3359 fail_qgroup:
3360 btrfs_free_qgroup_config(fs_info);
3361 fail_trans_kthread:
3362 kthread_stop(fs_info->transaction_kthread);
3363 btrfs_cleanup_transaction(fs_info);
3364 btrfs_free_fs_roots(fs_info);
3365 fail_cleaner:
3366 kthread_stop(fs_info->cleaner_kthread);
3367
3368 /*
3369 * make sure we're done with the btree inode before we stop our
3370 * kthreads
3371 */
3372 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3373
3374 fail_sysfs:
3375 btrfs_sysfs_remove_mounted(fs_info);
3376
3377 fail_fsdev_sysfs:
3378 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3379
3380 fail_block_groups:
3381 btrfs_put_block_group_cache(fs_info);
3382
3383 fail_tree_roots:
3384 free_root_pointers(fs_info, true);
3385 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3386
3387 fail_sb_buffer:
3388 btrfs_stop_all_workers(fs_info);
3389 btrfs_free_block_groups(fs_info);
3390 fail_csum:
3391 btrfs_free_csum_hash(fs_info);
3392 fail_alloc:
3393 fail_iput:
3394 btrfs_mapping_tree_free(&fs_info->mapping_tree);
3395
3396 iput(fs_info->btree_inode);
3397 fail_bio_counter:
3398 percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
3399 fail_delalloc_bytes:
3400 percpu_counter_destroy(&fs_info->delalloc_bytes);
3401 fail_dirty_metadata_bytes:
3402 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3403 fail_dio_bytes:
3404 percpu_counter_destroy(&fs_info->dio_bytes);
3405 fail_srcu:
3406 cleanup_srcu_struct(&fs_info->subvol_srcu);
3407 fail:
3408 btrfs_free_stripe_hash_table(fs_info);
3409 btrfs_close_devices(fs_info->fs_devices);
3410 return err;
3411
3412 recovery_tree_root:
3413 if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
3414 goto fail_tree_roots;
3415
3416 free_root_pointers(fs_info, false);
3417
3418 /* don't use the log in recovery mode, it won't be valid */
3419 btrfs_set_super_log_root(disk_super, 0);
3420
3421 /* we can't trust the free space cache either */
3422 btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3423
3424 ret = next_root_backup(fs_info, fs_info->super_copy,
3425 &num_backups_tried, &backup_index);
3426 if (ret == -1)
3427 goto fail_block_groups;
3428 goto retry_root_backup;
3429 }
3430 ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3431
btrfs_end_buffer_write_sync(struct buffer_head * bh,int uptodate)3432 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3433 {
3434 if (uptodate) {
3435 set_buffer_uptodate(bh);
3436 } else {
3437 struct btrfs_device *device = (struct btrfs_device *)
3438 bh->b_private;
3439
3440 btrfs_warn_rl_in_rcu(device->fs_info,
3441 "lost page write due to IO error on %s",
3442 rcu_str_deref(device->name));
3443 /* note, we don't set_buffer_write_io_error because we have
3444 * our own ways of dealing with the IO errors
3445 */
3446 clear_buffer_uptodate(bh);
3447 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3448 }
3449 unlock_buffer(bh);
3450 put_bh(bh);
3451 }
3452
btrfs_read_dev_one_super(struct block_device * bdev,int copy_num,struct buffer_head ** bh_ret)3453 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
3454 struct buffer_head **bh_ret)
3455 {
3456 struct buffer_head *bh;
3457 struct btrfs_super_block *super;
3458 u64 bytenr;
3459
3460 bytenr = btrfs_sb_offset(copy_num);
3461 if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3462 return -EINVAL;
3463
3464 bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE);
3465 /*
3466 * If we fail to read from the underlying devices, as of now
3467 * the best option we have is to mark it EIO.
3468 */
3469 if (!bh)
3470 return -EIO;
3471
3472 super = (struct btrfs_super_block *)bh->b_data;
3473 if (btrfs_super_bytenr(super) != bytenr ||
3474 btrfs_super_magic(super) != BTRFS_MAGIC) {
3475 brelse(bh);
3476 return -EINVAL;
3477 }
3478
3479 *bh_ret = bh;
3480 return 0;
3481 }
3482
3483
btrfs_read_dev_super(struct block_device * bdev)3484 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3485 {
3486 struct buffer_head *bh;
3487 struct buffer_head *latest = NULL;
3488 struct btrfs_super_block *super;
3489 int i;
3490 u64 transid = 0;
3491 int ret = -EINVAL;
3492
3493 /* we would like to check all the supers, but that would make
3494 * a btrfs mount succeed after a mkfs from a different FS.
3495 * So, we need to add a special mount option to scan for
3496 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3497 */
3498 for (i = 0; i < 1; i++) {
3499 ret = btrfs_read_dev_one_super(bdev, i, &bh);
3500 if (ret)
3501 continue;
3502
3503 super = (struct btrfs_super_block *)bh->b_data;
3504
3505 if (!latest || btrfs_super_generation(super) > transid) {
3506 brelse(latest);
3507 latest = bh;
3508 transid = btrfs_super_generation(super);
3509 } else {
3510 brelse(bh);
3511 }
3512 }
3513
3514 if (!latest)
3515 return ERR_PTR(ret);
3516
3517 return latest;
3518 }
3519
3520 /*
3521 * Write superblock @sb to the @device. Do not wait for completion, all the
3522 * buffer heads we write are pinned.
3523 *
3524 * Write @max_mirrors copies of the superblock, where 0 means default that fit
3525 * the expected device size at commit time. Note that max_mirrors must be
3526 * same for write and wait phases.
3527 *
3528 * Return number of errors when buffer head is not found or submission fails.
3529 */
write_dev_supers(struct btrfs_device * device,struct btrfs_super_block * sb,int max_mirrors)3530 static int write_dev_supers(struct btrfs_device *device,
3531 struct btrfs_super_block *sb, int max_mirrors)
3532 {
3533 struct btrfs_fs_info *fs_info = device->fs_info;
3534 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3535 struct buffer_head *bh;
3536 int i;
3537 int ret;
3538 int errors = 0;
3539 u64 bytenr;
3540 int op_flags;
3541
3542 if (max_mirrors == 0)
3543 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3544
3545 shash->tfm = fs_info->csum_shash;
3546
3547 for (i = 0; i < max_mirrors; i++) {
3548 bytenr = btrfs_sb_offset(i);
3549 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3550 device->commit_total_bytes)
3551 break;
3552
3553 btrfs_set_super_bytenr(sb, bytenr);
3554
3555 crypto_shash_init(shash);
3556 crypto_shash_update(shash, (const char *)sb + BTRFS_CSUM_SIZE,
3557 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
3558 crypto_shash_final(shash, sb->csum);
3559
3560 /* One reference for us, and we leave it for the caller */
3561 bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE,
3562 BTRFS_SUPER_INFO_SIZE);
3563 if (!bh) {
3564 btrfs_err(device->fs_info,
3565 "couldn't get super buffer head for bytenr %llu",
3566 bytenr);
3567 errors++;
3568 continue;
3569 }
3570
3571 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3572
3573 /* one reference for submit_bh */
3574 get_bh(bh);
3575
3576 set_buffer_uptodate(bh);
3577 lock_buffer(bh);
3578 bh->b_end_io = btrfs_end_buffer_write_sync;
3579 bh->b_private = device;
3580
3581 /*
3582 * we fua the first super. The others we allow
3583 * to go down lazy.
3584 */
3585 op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
3586 if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3587 op_flags |= REQ_FUA;
3588 ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh);
3589 if (ret)
3590 errors++;
3591 }
3592 return errors < i ? 0 : -1;
3593 }
3594
3595 /*
3596 * Wait for write completion of superblocks done by write_dev_supers,
3597 * @max_mirrors same for write and wait phases.
3598 *
3599 * Return number of errors when buffer head is not found or not marked up to
3600 * date.
3601 */
wait_dev_supers(struct btrfs_device * device,int max_mirrors)3602 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3603 {
3604 struct buffer_head *bh;
3605 int i;
3606 int errors = 0;
3607 bool primary_failed = false;
3608 u64 bytenr;
3609
3610 if (max_mirrors == 0)
3611 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3612
3613 for (i = 0; i < max_mirrors; i++) {
3614 bytenr = btrfs_sb_offset(i);
3615 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3616 device->commit_total_bytes)
3617 break;
3618
3619 bh = __find_get_block(device->bdev,
3620 bytenr / BTRFS_BDEV_BLOCKSIZE,
3621 BTRFS_SUPER_INFO_SIZE);
3622 if (!bh) {
3623 errors++;
3624 if (i == 0)
3625 primary_failed = true;
3626 continue;
3627 }
3628 wait_on_buffer(bh);
3629 if (!buffer_uptodate(bh)) {
3630 errors++;
3631 if (i == 0)
3632 primary_failed = true;
3633 }
3634
3635 /* drop our reference */
3636 brelse(bh);
3637
3638 /* drop the reference from the writing run */
3639 brelse(bh);
3640 }
3641
3642 /* log error, force error return */
3643 if (primary_failed) {
3644 btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3645 device->devid);
3646 return -1;
3647 }
3648
3649 return errors < i ? 0 : -1;
3650 }
3651
3652 /*
3653 * endio for the write_dev_flush, this will wake anyone waiting
3654 * for the barrier when it is done
3655 */
btrfs_end_empty_barrier(struct bio * bio)3656 static void btrfs_end_empty_barrier(struct bio *bio)
3657 {
3658 complete(bio->bi_private);
3659 }
3660
3661 /*
3662 * Submit a flush request to the device if it supports it. Error handling is
3663 * done in the waiting counterpart.
3664 */
write_dev_flush(struct btrfs_device * device)3665 static void write_dev_flush(struct btrfs_device *device)
3666 {
3667 struct bio *bio = device->flush_bio;
3668
3669 #ifndef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3670 /*
3671 * When a disk has write caching disabled, we skip submission of a bio
3672 * with flush and sync requests before writing the superblock, since
3673 * it's not needed. However when the integrity checker is enabled, this
3674 * results in reports that there are metadata blocks referred by a
3675 * superblock that were not properly flushed. So don't skip the bio
3676 * submission only when the integrity checker is enabled for the sake
3677 * of simplicity, since this is a debug tool and not meant for use in
3678 * non-debug builds.
3679 */
3680 struct request_queue *q = bdev_get_queue(device->bdev);
3681 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3682 return;
3683 #endif
3684
3685 bio_reset(bio);
3686 bio->bi_end_io = btrfs_end_empty_barrier;
3687 bio_set_dev(bio, device->bdev);
3688 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3689 init_completion(&device->flush_wait);
3690 bio->bi_private = &device->flush_wait;
3691
3692 btrfsic_submit_bio(bio);
3693 set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3694 }
3695
3696 /*
3697 * If the flush bio has been submitted by write_dev_flush, wait for it.
3698 */
wait_dev_flush(struct btrfs_device * device)3699 static blk_status_t wait_dev_flush(struct btrfs_device *device)
3700 {
3701 struct bio *bio = device->flush_bio;
3702
3703 if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3704 return BLK_STS_OK;
3705
3706 clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3707 wait_for_completion_io(&device->flush_wait);
3708
3709 return bio->bi_status;
3710 }
3711
check_barrier_error(struct btrfs_fs_info * fs_info)3712 static int check_barrier_error(struct btrfs_fs_info *fs_info)
3713 {
3714 if (!btrfs_check_rw_degradable(fs_info, NULL))
3715 return -EIO;
3716 return 0;
3717 }
3718
3719 /*
3720 * send an empty flush down to each device in parallel,
3721 * then wait for them
3722 */
barrier_all_devices(struct btrfs_fs_info * info)3723 static int barrier_all_devices(struct btrfs_fs_info *info)
3724 {
3725 struct list_head *head;
3726 struct btrfs_device *dev;
3727 int errors_wait = 0;
3728 blk_status_t ret;
3729
3730 lockdep_assert_held(&info->fs_devices->device_list_mutex);
3731 /* send down all the barriers */
3732 head = &info->fs_devices->devices;
3733 list_for_each_entry(dev, head, dev_list) {
3734 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3735 continue;
3736 if (!dev->bdev)
3737 continue;
3738 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3739 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3740 continue;
3741
3742 write_dev_flush(dev);
3743 dev->last_flush_error = BLK_STS_OK;
3744 }
3745
3746 /* wait for all the barriers */
3747 list_for_each_entry(dev, head, dev_list) {
3748 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3749 continue;
3750 if (!dev->bdev) {
3751 errors_wait++;
3752 continue;
3753 }
3754 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3755 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3756 continue;
3757
3758 ret = wait_dev_flush(dev);
3759 if (ret) {
3760 dev->last_flush_error = ret;
3761 btrfs_dev_stat_inc_and_print(dev,
3762 BTRFS_DEV_STAT_FLUSH_ERRS);
3763 errors_wait++;
3764 }
3765 }
3766
3767 if (errors_wait) {
3768 /*
3769 * At some point we need the status of all disks
3770 * to arrive at the volume status. So error checking
3771 * is being pushed to a separate loop.
3772 */
3773 return check_barrier_error(info);
3774 }
3775 return 0;
3776 }
3777
btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)3778 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3779 {
3780 int raid_type;
3781 int min_tolerated = INT_MAX;
3782
3783 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3784 (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3785 min_tolerated = min_t(int, min_tolerated,
3786 btrfs_raid_array[BTRFS_RAID_SINGLE].
3787 tolerated_failures);
3788
3789 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3790 if (raid_type == BTRFS_RAID_SINGLE)
3791 continue;
3792 if (!(flags & btrfs_raid_array[raid_type].bg_flag))
3793 continue;
3794 min_tolerated = min_t(int, min_tolerated,
3795 btrfs_raid_array[raid_type].
3796 tolerated_failures);
3797 }
3798
3799 if (min_tolerated == INT_MAX) {
3800 pr_warn("BTRFS: unknown raid flag: %llu", flags);
3801 min_tolerated = 0;
3802 }
3803
3804 return min_tolerated;
3805 }
3806
write_all_supers(struct btrfs_fs_info * fs_info,int max_mirrors)3807 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3808 {
3809 struct list_head *head;
3810 struct btrfs_device *dev;
3811 struct btrfs_super_block *sb;
3812 struct btrfs_dev_item *dev_item;
3813 int ret;
3814 int do_barriers;
3815 int max_errors;
3816 int total_errors = 0;
3817 u64 flags;
3818
3819 do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3820
3821 /*
3822 * max_mirrors == 0 indicates we're from commit_transaction,
3823 * not from fsync where the tree roots in fs_info have not
3824 * been consistent on disk.
3825 */
3826 if (max_mirrors == 0)
3827 backup_super_roots(fs_info);
3828
3829 sb = fs_info->super_for_commit;
3830 dev_item = &sb->dev_item;
3831
3832 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3833 head = &fs_info->fs_devices->devices;
3834 max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
3835
3836 if (do_barriers) {
3837 ret = barrier_all_devices(fs_info);
3838 if (ret) {
3839 mutex_unlock(
3840 &fs_info->fs_devices->device_list_mutex);
3841 btrfs_handle_fs_error(fs_info, ret,
3842 "errors while submitting device barriers.");
3843 return ret;
3844 }
3845 }
3846
3847 list_for_each_entry(dev, head, dev_list) {
3848 if (!dev->bdev) {
3849 total_errors++;
3850 continue;
3851 }
3852 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3853 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3854 continue;
3855
3856 btrfs_set_stack_device_generation(dev_item, 0);
3857 btrfs_set_stack_device_type(dev_item, dev->type);
3858 btrfs_set_stack_device_id(dev_item, dev->devid);
3859 btrfs_set_stack_device_total_bytes(dev_item,
3860 dev->commit_total_bytes);
3861 btrfs_set_stack_device_bytes_used(dev_item,
3862 dev->commit_bytes_used);
3863 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3864 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3865 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3866 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3867 memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
3868 BTRFS_FSID_SIZE);
3869
3870 flags = btrfs_super_flags(sb);
3871 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3872
3873 ret = btrfs_validate_write_super(fs_info, sb);
3874 if (ret < 0) {
3875 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3876 btrfs_handle_fs_error(fs_info, -EUCLEAN,
3877 "unexpected superblock corruption detected");
3878 return -EUCLEAN;
3879 }
3880
3881 ret = write_dev_supers(dev, sb, max_mirrors);
3882 if (ret)
3883 total_errors++;
3884 }
3885 if (total_errors > max_errors) {
3886 btrfs_err(fs_info, "%d errors while writing supers",
3887 total_errors);
3888 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3889
3890 /* FUA is masked off if unsupported and can't be the reason */
3891 btrfs_handle_fs_error(fs_info, -EIO,
3892 "%d errors while writing supers",
3893 total_errors);
3894 return -EIO;
3895 }
3896
3897 total_errors = 0;
3898 list_for_each_entry(dev, head, dev_list) {
3899 if (!dev->bdev)
3900 continue;
3901 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3902 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3903 continue;
3904
3905 ret = wait_dev_supers(dev, max_mirrors);
3906 if (ret)
3907 total_errors++;
3908 }
3909 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3910 if (total_errors > max_errors) {
3911 btrfs_handle_fs_error(fs_info, -EIO,
3912 "%d errors while writing supers",
3913 total_errors);
3914 return -EIO;
3915 }
3916 return 0;
3917 }
3918
3919 /* Drop a fs root from the radix tree and free it. */
btrfs_drop_and_free_fs_root(struct btrfs_fs_info * fs_info,struct btrfs_root * root)3920 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3921 struct btrfs_root *root)
3922 {
3923 spin_lock(&fs_info->fs_roots_radix_lock);
3924 radix_tree_delete(&fs_info->fs_roots_radix,
3925 (unsigned long)root->root_key.objectid);
3926 spin_unlock(&fs_info->fs_roots_radix_lock);
3927
3928 if (btrfs_root_refs(&root->root_item) == 0)
3929 synchronize_srcu(&fs_info->subvol_srcu);
3930
3931 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3932 btrfs_free_log(NULL, root);
3933 if (root->reloc_root) {
3934 free_extent_buffer(root->reloc_root->node);
3935 free_extent_buffer(root->reloc_root->commit_root);
3936 btrfs_put_fs_root(root->reloc_root);
3937 root->reloc_root = NULL;
3938 }
3939 }
3940
3941 if (root->free_ino_pinned)
3942 __btrfs_remove_free_space_cache(root->free_ino_pinned);
3943 if (root->free_ino_ctl)
3944 __btrfs_remove_free_space_cache(root->free_ino_ctl);
3945 btrfs_free_fs_root(root);
3946 }
3947
btrfs_free_fs_root(struct btrfs_root * root)3948 void btrfs_free_fs_root(struct btrfs_root *root)
3949 {
3950 iput(root->ino_cache_inode);
3951 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3952 if (root->anon_dev)
3953 free_anon_bdev(root->anon_dev);
3954 if (root->subv_writers)
3955 btrfs_free_subvolume_writers(root->subv_writers);
3956 free_extent_buffer(root->node);
3957 free_extent_buffer(root->commit_root);
3958 kfree(root->free_ino_ctl);
3959 kfree(root->free_ino_pinned);
3960 btrfs_put_fs_root(root);
3961 }
3962
btrfs_cleanup_fs_roots(struct btrfs_fs_info * fs_info)3963 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3964 {
3965 u64 root_objectid = 0;
3966 struct btrfs_root *gang[8];
3967 int i = 0;
3968 int err = 0;
3969 unsigned int ret = 0;
3970 int index;
3971
3972 while (1) {
3973 index = srcu_read_lock(&fs_info->subvol_srcu);
3974 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3975 (void **)gang, root_objectid,
3976 ARRAY_SIZE(gang));
3977 if (!ret) {
3978 srcu_read_unlock(&fs_info->subvol_srcu, index);
3979 break;
3980 }
3981 root_objectid = gang[ret - 1]->root_key.objectid + 1;
3982
3983 for (i = 0; i < ret; i++) {
3984 /* Avoid to grab roots in dead_roots */
3985 if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3986 gang[i] = NULL;
3987 continue;
3988 }
3989 /* grab all the search result for later use */
3990 gang[i] = btrfs_grab_fs_root(gang[i]);
3991 }
3992 srcu_read_unlock(&fs_info->subvol_srcu, index);
3993
3994 for (i = 0; i < ret; i++) {
3995 if (!gang[i])
3996 continue;
3997 root_objectid = gang[i]->root_key.objectid;
3998 err = btrfs_orphan_cleanup(gang[i]);
3999 if (err)
4000 break;
4001 btrfs_put_fs_root(gang[i]);
4002 }
4003 root_objectid++;
4004 }
4005
4006 /* release the uncleaned roots due to error */
4007 for (; i < ret; i++) {
4008 if (gang[i])
4009 btrfs_put_fs_root(gang[i]);
4010 }
4011 return err;
4012 }
4013
btrfs_commit_super(struct btrfs_fs_info * fs_info)4014 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
4015 {
4016 struct btrfs_root *root = fs_info->tree_root;
4017 struct btrfs_trans_handle *trans;
4018
4019 mutex_lock(&fs_info->cleaner_mutex);
4020 btrfs_run_delayed_iputs(fs_info);
4021 mutex_unlock(&fs_info->cleaner_mutex);
4022 wake_up_process(fs_info->cleaner_kthread);
4023
4024 /* wait until ongoing cleanup work done */
4025 down_write(&fs_info->cleanup_work_sem);
4026 up_write(&fs_info->cleanup_work_sem);
4027
4028 trans = btrfs_join_transaction(root);
4029 if (IS_ERR(trans))
4030 return PTR_ERR(trans);
4031 return btrfs_commit_transaction(trans);
4032 }
4033
close_ctree(struct btrfs_fs_info * fs_info)4034 void close_ctree(struct btrfs_fs_info *fs_info)
4035 {
4036 int ret;
4037
4038 set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
4039 /*
4040 * We don't want the cleaner to start new transactions, add more delayed
4041 * iputs, etc. while we're closing. We can't use kthread_stop() yet
4042 * because that frees the task_struct, and the transaction kthread might
4043 * still try to wake up the cleaner.
4044 */
4045 kthread_park(fs_info->cleaner_kthread);
4046
4047 /* wait for the qgroup rescan worker to stop */
4048 btrfs_qgroup_wait_for_completion(fs_info, false);
4049
4050 /* wait for the uuid_scan task to finish */
4051 down(&fs_info->uuid_tree_rescan_sem);
4052 /* avoid complains from lockdep et al., set sem back to initial state */
4053 up(&fs_info->uuid_tree_rescan_sem);
4054
4055 /* pause restriper - we want to resume on mount */
4056 btrfs_pause_balance(fs_info);
4057
4058 btrfs_dev_replace_suspend_for_unmount(fs_info);
4059
4060 btrfs_scrub_cancel(fs_info);
4061
4062 /* wait for any defraggers to finish */
4063 wait_event(fs_info->transaction_wait,
4064 (atomic_read(&fs_info->defrag_running) == 0));
4065
4066 /* clear out the rbtree of defraggable inodes */
4067 btrfs_cleanup_defrag_inodes(fs_info);
4068
4069 cancel_work_sync(&fs_info->async_reclaim_work);
4070
4071 if (!sb_rdonly(fs_info->sb)) {
4072 /*
4073 * The cleaner kthread is stopped, so do one final pass over
4074 * unused block groups.
4075 */
4076 btrfs_delete_unused_bgs(fs_info);
4077
4078 /*
4079 * There might be existing delayed inode workers still running
4080 * and holding an empty delayed inode item. We must wait for
4081 * them to complete first because they can create a transaction.
4082 * This happens when someone calls btrfs_balance_delayed_items()
4083 * and then a transaction commit runs the same delayed nodes
4084 * before any delayed worker has done something with the nodes.
4085 * We must wait for any worker here and not at transaction
4086 * commit time since that could cause a deadlock.
4087 * This is a very rare case.
4088 */
4089 btrfs_flush_workqueue(fs_info->delayed_workers);
4090
4091 ret = btrfs_commit_super(fs_info);
4092 if (ret)
4093 btrfs_err(fs_info, "commit super ret %d", ret);
4094 }
4095
4096 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) ||
4097 test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state))
4098 btrfs_error_commit_super(fs_info);
4099
4100 kthread_stop(fs_info->transaction_kthread);
4101 kthread_stop(fs_info->cleaner_kthread);
4102
4103 ASSERT(list_empty(&fs_info->delayed_iputs));
4104 set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
4105
4106 if (btrfs_check_quota_leak(fs_info)) {
4107 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4108 btrfs_err(fs_info, "qgroup reserved space leaked");
4109 }
4110
4111 btrfs_free_qgroup_config(fs_info);
4112 ASSERT(list_empty(&fs_info->delalloc_roots));
4113
4114 if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
4115 btrfs_info(fs_info, "at unmount delalloc count %lld",
4116 percpu_counter_sum(&fs_info->delalloc_bytes));
4117 }
4118
4119 if (percpu_counter_sum(&fs_info->dio_bytes))
4120 btrfs_info(fs_info, "at unmount dio bytes count %lld",
4121 percpu_counter_sum(&fs_info->dio_bytes));
4122
4123 btrfs_sysfs_remove_mounted(fs_info);
4124 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
4125
4126 btrfs_free_fs_roots(fs_info);
4127
4128 btrfs_put_block_group_cache(fs_info);
4129
4130 /*
4131 * we must make sure there is not any read request to
4132 * submit after we stopping all workers.
4133 */
4134 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
4135 btrfs_stop_all_workers(fs_info);
4136
4137 clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
4138 free_root_pointers(fs_info, true);
4139
4140 /*
4141 * We must free the block groups after dropping the fs_roots as we could
4142 * have had an IO error and have left over tree log blocks that aren't
4143 * cleaned up until the fs roots are freed. This makes the block group
4144 * accounting appear to be wrong because there's pending reserved bytes,
4145 * so make sure we do the block group cleanup afterwards.
4146 */
4147 btrfs_free_block_groups(fs_info);
4148
4149 iput(fs_info->btree_inode);
4150
4151 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4152 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
4153 btrfsic_unmount(fs_info->fs_devices);
4154 #endif
4155
4156 btrfs_mapping_tree_free(&fs_info->mapping_tree);
4157 btrfs_close_devices(fs_info->fs_devices);
4158
4159 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
4160 percpu_counter_destroy(&fs_info->delalloc_bytes);
4161 percpu_counter_destroy(&fs_info->dio_bytes);
4162 percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
4163 cleanup_srcu_struct(&fs_info->subvol_srcu);
4164
4165 btrfs_free_csum_hash(fs_info);
4166 btrfs_free_stripe_hash_table(fs_info);
4167 btrfs_free_ref_cache(fs_info);
4168 }
4169
btrfs_buffer_uptodate(struct extent_buffer * buf,u64 parent_transid,int atomic)4170 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
4171 int atomic)
4172 {
4173 int ret;
4174 struct inode *btree_inode = buf->pages[0]->mapping->host;
4175
4176 ret = extent_buffer_uptodate(buf);
4177 if (!ret)
4178 return ret;
4179
4180 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
4181 parent_transid, atomic);
4182 if (ret == -EAGAIN)
4183 return ret;
4184 return !ret;
4185 }
4186
btrfs_mark_buffer_dirty(struct extent_buffer * buf)4187 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
4188 {
4189 struct btrfs_fs_info *fs_info;
4190 struct btrfs_root *root;
4191 u64 transid = btrfs_header_generation(buf);
4192 int was_dirty;
4193
4194 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4195 /*
4196 * This is a fast path so only do this check if we have sanity tests
4197 * enabled. Normal people shouldn't be using unmapped buffers as dirty
4198 * outside of the sanity tests.
4199 */
4200 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4201 return;
4202 #endif
4203 root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4204 fs_info = root->fs_info;
4205 btrfs_assert_tree_locked(buf);
4206 if (transid != fs_info->generation)
4207 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
4208 buf->start, transid, fs_info->generation);
4209 was_dirty = set_extent_buffer_dirty(buf);
4210 if (!was_dirty)
4211 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4212 buf->len,
4213 fs_info->dirty_metadata_batch);
4214 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4215 /*
4216 * Since btrfs_mark_buffer_dirty() can be called with item pointer set
4217 * but item data not updated.
4218 * So here we should only check item pointers, not item data.
4219 */
4220 if (btrfs_header_level(buf) == 0 &&
4221 btrfs_check_leaf_relaxed(buf)) {
4222 btrfs_print_leaf(buf);
4223 ASSERT(0);
4224 }
4225 #endif
4226 }
4227
__btrfs_btree_balance_dirty(struct btrfs_fs_info * fs_info,int flush_delayed)4228 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4229 int flush_delayed)
4230 {
4231 /*
4232 * looks as though older kernels can get into trouble with
4233 * this code, they end up stuck in balance_dirty_pages forever
4234 */
4235 int ret;
4236
4237 if (current->flags & PF_MEMALLOC)
4238 return;
4239
4240 if (flush_delayed)
4241 btrfs_balance_delayed_items(fs_info);
4242
4243 ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4244 BTRFS_DIRTY_METADATA_THRESH,
4245 fs_info->dirty_metadata_batch);
4246 if (ret > 0) {
4247 balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4248 }
4249 }
4250
btrfs_btree_balance_dirty(struct btrfs_fs_info * fs_info)4251 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4252 {
4253 __btrfs_btree_balance_dirty(fs_info, 1);
4254 }
4255
btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info * fs_info)4256 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4257 {
4258 __btrfs_btree_balance_dirty(fs_info, 0);
4259 }
4260
btrfs_read_buffer(struct extent_buffer * buf,u64 parent_transid,int level,struct btrfs_key * first_key)4261 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level,
4262 struct btrfs_key *first_key)
4263 {
4264 return btree_read_extent_buffer_pages(buf, parent_transid,
4265 level, first_key);
4266 }
4267
btrfs_error_commit_super(struct btrfs_fs_info * fs_info)4268 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4269 {
4270 /* cleanup FS via transaction */
4271 btrfs_cleanup_transaction(fs_info);
4272
4273 mutex_lock(&fs_info->cleaner_mutex);
4274 btrfs_run_delayed_iputs(fs_info);
4275 mutex_unlock(&fs_info->cleaner_mutex);
4276
4277 down_write(&fs_info->cleanup_work_sem);
4278 up_write(&fs_info->cleanup_work_sem);
4279 }
4280
btrfs_destroy_ordered_extents(struct btrfs_root * root)4281 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4282 {
4283 struct btrfs_ordered_extent *ordered;
4284
4285 spin_lock(&root->ordered_extent_lock);
4286 /*
4287 * This will just short circuit the ordered completion stuff which will
4288 * make sure the ordered extent gets properly cleaned up.
4289 */
4290 list_for_each_entry(ordered, &root->ordered_extents,
4291 root_extent_list)
4292 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4293 spin_unlock(&root->ordered_extent_lock);
4294 }
4295
btrfs_destroy_all_ordered_extents(struct btrfs_fs_info * fs_info)4296 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4297 {
4298 struct btrfs_root *root;
4299 struct list_head splice;
4300
4301 INIT_LIST_HEAD(&splice);
4302
4303 spin_lock(&fs_info->ordered_root_lock);
4304 list_splice_init(&fs_info->ordered_roots, &splice);
4305 while (!list_empty(&splice)) {
4306 root = list_first_entry(&splice, struct btrfs_root,
4307 ordered_root);
4308 list_move_tail(&root->ordered_root,
4309 &fs_info->ordered_roots);
4310
4311 spin_unlock(&fs_info->ordered_root_lock);
4312 btrfs_destroy_ordered_extents(root);
4313
4314 cond_resched();
4315 spin_lock(&fs_info->ordered_root_lock);
4316 }
4317 spin_unlock(&fs_info->ordered_root_lock);
4318
4319 /*
4320 * We need this here because if we've been flipped read-only we won't
4321 * get sync() from the umount, so we need to make sure any ordered
4322 * extents that haven't had their dirty pages IO start writeout yet
4323 * actually get run and error out properly.
4324 */
4325 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
4326 }
4327
btrfs_destroy_delayed_refs(struct btrfs_transaction * trans,struct btrfs_fs_info * fs_info)4328 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4329 struct btrfs_fs_info *fs_info)
4330 {
4331 struct rb_node *node;
4332 struct btrfs_delayed_ref_root *delayed_refs;
4333 struct btrfs_delayed_ref_node *ref;
4334 int ret = 0;
4335
4336 delayed_refs = &trans->delayed_refs;
4337
4338 spin_lock(&delayed_refs->lock);
4339 if (atomic_read(&delayed_refs->num_entries) == 0) {
4340 spin_unlock(&delayed_refs->lock);
4341 btrfs_info(fs_info, "delayed_refs has NO entry");
4342 return ret;
4343 }
4344
4345 while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
4346 struct btrfs_delayed_ref_head *head;
4347 struct rb_node *n;
4348 bool pin_bytes = false;
4349
4350 head = rb_entry(node, struct btrfs_delayed_ref_head,
4351 href_node);
4352 if (btrfs_delayed_ref_lock(delayed_refs, head))
4353 continue;
4354
4355 spin_lock(&head->lock);
4356 while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
4357 ref = rb_entry(n, struct btrfs_delayed_ref_node,
4358 ref_node);
4359 ref->in_tree = 0;
4360 rb_erase_cached(&ref->ref_node, &head->ref_tree);
4361 RB_CLEAR_NODE(&ref->ref_node);
4362 if (!list_empty(&ref->add_list))
4363 list_del(&ref->add_list);
4364 atomic_dec(&delayed_refs->num_entries);
4365 btrfs_put_delayed_ref(ref);
4366 }
4367 if (head->must_insert_reserved)
4368 pin_bytes = true;
4369 btrfs_free_delayed_extent_op(head->extent_op);
4370 btrfs_delete_ref_head(delayed_refs, head);
4371 spin_unlock(&head->lock);
4372 spin_unlock(&delayed_refs->lock);
4373 mutex_unlock(&head->mutex);
4374
4375 if (pin_bytes)
4376 btrfs_pin_extent(fs_info, head->bytenr,
4377 head->num_bytes, 1);
4378 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
4379 btrfs_put_delayed_ref_head(head);
4380 cond_resched();
4381 spin_lock(&delayed_refs->lock);
4382 }
4383 btrfs_qgroup_destroy_extent_records(trans);
4384
4385 spin_unlock(&delayed_refs->lock);
4386
4387 return ret;
4388 }
4389
btrfs_destroy_delalloc_inodes(struct btrfs_root * root)4390 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4391 {
4392 struct btrfs_inode *btrfs_inode;
4393 struct list_head splice;
4394
4395 INIT_LIST_HEAD(&splice);
4396
4397 spin_lock(&root->delalloc_lock);
4398 list_splice_init(&root->delalloc_inodes, &splice);
4399
4400 while (!list_empty(&splice)) {
4401 struct inode *inode = NULL;
4402 btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4403 delalloc_inodes);
4404 __btrfs_del_delalloc_inode(root, btrfs_inode);
4405 spin_unlock(&root->delalloc_lock);
4406
4407 /*
4408 * Make sure we get a live inode and that it'll not disappear
4409 * meanwhile.
4410 */
4411 inode = igrab(&btrfs_inode->vfs_inode);
4412 if (inode) {
4413 unsigned int nofs_flag;
4414
4415 nofs_flag = memalloc_nofs_save();
4416 invalidate_inode_pages2(inode->i_mapping);
4417 memalloc_nofs_restore(nofs_flag);
4418 iput(inode);
4419 }
4420 spin_lock(&root->delalloc_lock);
4421 }
4422 spin_unlock(&root->delalloc_lock);
4423 }
4424
btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info * fs_info)4425 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4426 {
4427 struct btrfs_root *root;
4428 struct list_head splice;
4429
4430 INIT_LIST_HEAD(&splice);
4431
4432 spin_lock(&fs_info->delalloc_root_lock);
4433 list_splice_init(&fs_info->delalloc_roots, &splice);
4434 while (!list_empty(&splice)) {
4435 root = list_first_entry(&splice, struct btrfs_root,
4436 delalloc_root);
4437 root = btrfs_grab_fs_root(root);
4438 BUG_ON(!root);
4439 spin_unlock(&fs_info->delalloc_root_lock);
4440
4441 btrfs_destroy_delalloc_inodes(root);
4442 btrfs_put_fs_root(root);
4443
4444 spin_lock(&fs_info->delalloc_root_lock);
4445 }
4446 spin_unlock(&fs_info->delalloc_root_lock);
4447 }
4448
btrfs_destroy_marked_extents(struct btrfs_fs_info * fs_info,struct extent_io_tree * dirty_pages,int mark)4449 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4450 struct extent_io_tree *dirty_pages,
4451 int mark)
4452 {
4453 int ret;
4454 struct extent_buffer *eb;
4455 u64 start = 0;
4456 u64 end;
4457
4458 while (1) {
4459 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4460 mark, NULL);
4461 if (ret)
4462 break;
4463
4464 clear_extent_bits(dirty_pages, start, end, mark);
4465 while (start <= end) {
4466 eb = find_extent_buffer(fs_info, start);
4467 start += fs_info->nodesize;
4468 if (!eb)
4469 continue;
4470 wait_on_extent_buffer_writeback(eb);
4471
4472 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4473 &eb->bflags))
4474 clear_extent_buffer_dirty(eb);
4475 free_extent_buffer_stale(eb);
4476 }
4477 }
4478
4479 return ret;
4480 }
4481
btrfs_destroy_pinned_extent(struct btrfs_fs_info * fs_info,struct extent_io_tree * pinned_extents)4482 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4483 struct extent_io_tree *pinned_extents)
4484 {
4485 struct extent_io_tree *unpin;
4486 u64 start;
4487 u64 end;
4488 int ret;
4489 bool loop = true;
4490
4491 unpin = pinned_extents;
4492 again:
4493 while (1) {
4494 struct extent_state *cached_state = NULL;
4495
4496 /*
4497 * The btrfs_finish_extent_commit() may get the same range as
4498 * ours between find_first_extent_bit and clear_extent_dirty.
4499 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4500 * the same extent range.
4501 */
4502 mutex_lock(&fs_info->unused_bg_unpin_mutex);
4503 ret = find_first_extent_bit(unpin, 0, &start, &end,
4504 EXTENT_DIRTY, &cached_state);
4505 if (ret) {
4506 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4507 break;
4508 }
4509
4510 clear_extent_dirty(unpin, start, end, &cached_state);
4511 free_extent_state(cached_state);
4512 btrfs_error_unpin_extent_range(fs_info, start, end);
4513 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4514 cond_resched();
4515 }
4516
4517 if (loop) {
4518 if (unpin == &fs_info->freed_extents[0])
4519 unpin = &fs_info->freed_extents[1];
4520 else
4521 unpin = &fs_info->freed_extents[0];
4522 loop = false;
4523 goto again;
4524 }
4525
4526 return 0;
4527 }
4528
btrfs_cleanup_bg_io(struct btrfs_block_group_cache * cache)4529 static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
4530 {
4531 struct inode *inode;
4532
4533 inode = cache->io_ctl.inode;
4534 if (inode) {
4535 unsigned int nofs_flag;
4536
4537 nofs_flag = memalloc_nofs_save();
4538 invalidate_inode_pages2(inode->i_mapping);
4539 memalloc_nofs_restore(nofs_flag);
4540
4541 BTRFS_I(inode)->generation = 0;
4542 cache->io_ctl.inode = NULL;
4543 iput(inode);
4544 }
4545 ASSERT(cache->io_ctl.pages == NULL);
4546 btrfs_put_block_group(cache);
4547 }
4548
btrfs_cleanup_dirty_bgs(struct btrfs_transaction * cur_trans,struct btrfs_fs_info * fs_info)4549 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4550 struct btrfs_fs_info *fs_info)
4551 {
4552 struct btrfs_block_group_cache *cache;
4553
4554 spin_lock(&cur_trans->dirty_bgs_lock);
4555 while (!list_empty(&cur_trans->dirty_bgs)) {
4556 cache = list_first_entry(&cur_trans->dirty_bgs,
4557 struct btrfs_block_group_cache,
4558 dirty_list);
4559
4560 if (!list_empty(&cache->io_list)) {
4561 spin_unlock(&cur_trans->dirty_bgs_lock);
4562 list_del_init(&cache->io_list);
4563 btrfs_cleanup_bg_io(cache);
4564 spin_lock(&cur_trans->dirty_bgs_lock);
4565 }
4566
4567 list_del_init(&cache->dirty_list);
4568 spin_lock(&cache->lock);
4569 cache->disk_cache_state = BTRFS_DC_ERROR;
4570 spin_unlock(&cache->lock);
4571
4572 spin_unlock(&cur_trans->dirty_bgs_lock);
4573 btrfs_put_block_group(cache);
4574 btrfs_delayed_refs_rsv_release(fs_info, 1);
4575 spin_lock(&cur_trans->dirty_bgs_lock);
4576 }
4577 spin_unlock(&cur_trans->dirty_bgs_lock);
4578
4579 /*
4580 * Refer to the definition of io_bgs member for details why it's safe
4581 * to use it without any locking
4582 */
4583 while (!list_empty(&cur_trans->io_bgs)) {
4584 cache = list_first_entry(&cur_trans->io_bgs,
4585 struct btrfs_block_group_cache,
4586 io_list);
4587
4588 list_del_init(&cache->io_list);
4589 spin_lock(&cache->lock);
4590 cache->disk_cache_state = BTRFS_DC_ERROR;
4591 spin_unlock(&cache->lock);
4592 btrfs_cleanup_bg_io(cache);
4593 }
4594 }
4595
btrfs_cleanup_one_transaction(struct btrfs_transaction * cur_trans,struct btrfs_fs_info * fs_info)4596 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4597 struct btrfs_fs_info *fs_info)
4598 {
4599 struct btrfs_device *dev, *tmp;
4600
4601 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4602 ASSERT(list_empty(&cur_trans->dirty_bgs));
4603 ASSERT(list_empty(&cur_trans->io_bgs));
4604
4605 list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list,
4606 post_commit_list) {
4607 list_del_init(&dev->post_commit_list);
4608 }
4609
4610 btrfs_destroy_delayed_refs(cur_trans, fs_info);
4611
4612 cur_trans->state = TRANS_STATE_COMMIT_START;
4613 wake_up(&fs_info->transaction_blocked_wait);
4614
4615 cur_trans->state = TRANS_STATE_UNBLOCKED;
4616 wake_up(&fs_info->transaction_wait);
4617
4618 btrfs_destroy_delayed_inodes(fs_info);
4619
4620 btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4621 EXTENT_DIRTY);
4622 btrfs_destroy_pinned_extent(fs_info,
4623 fs_info->pinned_extents);
4624
4625 cur_trans->state =TRANS_STATE_COMPLETED;
4626 wake_up(&cur_trans->commit_wait);
4627 }
4628
btrfs_cleanup_transaction(struct btrfs_fs_info * fs_info)4629 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4630 {
4631 struct btrfs_transaction *t;
4632
4633 mutex_lock(&fs_info->transaction_kthread_mutex);
4634
4635 spin_lock(&fs_info->trans_lock);
4636 while (!list_empty(&fs_info->trans_list)) {
4637 t = list_first_entry(&fs_info->trans_list,
4638 struct btrfs_transaction, list);
4639 if (t->state >= TRANS_STATE_COMMIT_START) {
4640 refcount_inc(&t->use_count);
4641 spin_unlock(&fs_info->trans_lock);
4642 btrfs_wait_for_commit(fs_info, t->transid);
4643 btrfs_put_transaction(t);
4644 spin_lock(&fs_info->trans_lock);
4645 continue;
4646 }
4647 if (t == fs_info->running_transaction) {
4648 t->state = TRANS_STATE_COMMIT_DOING;
4649 spin_unlock(&fs_info->trans_lock);
4650 /*
4651 * We wait for 0 num_writers since we don't hold a trans
4652 * handle open currently for this transaction.
4653 */
4654 wait_event(t->writer_wait,
4655 atomic_read(&t->num_writers) == 0);
4656 } else {
4657 spin_unlock(&fs_info->trans_lock);
4658 }
4659 btrfs_cleanup_one_transaction(t, fs_info);
4660
4661 spin_lock(&fs_info->trans_lock);
4662 if (t == fs_info->running_transaction)
4663 fs_info->running_transaction = NULL;
4664 list_del_init(&t->list);
4665 spin_unlock(&fs_info->trans_lock);
4666
4667 btrfs_put_transaction(t);
4668 trace_btrfs_transaction_commit(fs_info->tree_root);
4669 spin_lock(&fs_info->trans_lock);
4670 }
4671 spin_unlock(&fs_info->trans_lock);
4672 btrfs_destroy_all_ordered_extents(fs_info);
4673 btrfs_destroy_delayed_inodes(fs_info);
4674 btrfs_assert_delayed_root_empty(fs_info);
4675 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
4676 btrfs_destroy_all_delalloc_inodes(fs_info);
4677 mutex_unlock(&fs_info->transaction_kthread_mutex);
4678
4679 return 0;
4680 }
4681
4682 static const struct extent_io_ops btree_extent_io_ops = {
4683 /* mandatory callbacks */
4684 .submit_bio_hook = btree_submit_bio_hook,
4685 .readpage_end_io_hook = btree_readpage_end_io_hook,
4686 };
4687