1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/blkdev.h>
8 #include <linux/radix-tree.h>
9 #include <linux/writeback.h>
10 #include <linux/workqueue.h>
11 #include <linux/kthread.h>
12 #include <linux/slab.h>
13 #include <linux/migrate.h>
14 #include <linux/ratelimit.h>
15 #include <linux/uuid.h>
16 #include <linux/semaphore.h>
17 #include <linux/error-injection.h>
18 #include <linux/crc32c.h>
19 #include <linux/sched/mm.h>
20 #include <linux/unaligned.h>
21 #include <crypto/hash.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "bio.h"
27 #include "print-tree.h"
28 #include "locking.h"
29 #include "tree-log.h"
30 #include "free-space-cache.h"
31 #include "free-space-tree.h"
32 #include "dev-replace.h"
33 #include "raid56.h"
34 #include "sysfs.h"
35 #include "qgroup.h"
36 #include "compression.h"
37 #include "tree-checker.h"
38 #include "ref-verify.h"
39 #include "block-group.h"
40 #include "discard.h"
41 #include "space-info.h"
42 #include "zoned.h"
43 #include "subpage.h"
44 #include "fs.h"
45 #include "accessors.h"
46 #include "extent-tree.h"
47 #include "root-tree.h"
48 #include "defrag.h"
49 #include "uuid-tree.h"
50 #include "relocation.h"
51 #include "scrub.h"
52 #include "super.h"
53 
54 #define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
55 				 BTRFS_HEADER_FLAG_RELOC |\
56 				 BTRFS_SUPER_FLAG_ERROR |\
57 				 BTRFS_SUPER_FLAG_SEEDING |\
58 				 BTRFS_SUPER_FLAG_METADUMP |\
59 				 BTRFS_SUPER_FLAG_METADUMP_V2)
60 
61 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
62 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
63 
btrfs_free_csum_hash(struct btrfs_fs_info * fs_info)64 static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
65 {
66 	if (fs_info->csum_shash)
67 		crypto_free_shash(fs_info->csum_shash);
68 }
69 
70 /*
71  * Compute the csum of a btree block and store the result to provided buffer.
72  */
csum_tree_block(struct extent_buffer * buf,u8 * result)73 static void csum_tree_block(struct extent_buffer *buf, u8 *result)
74 {
75 	struct btrfs_fs_info *fs_info = buf->fs_info;
76 	int num_pages;
77 	u32 first_page_part;
78 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
79 	char *kaddr;
80 	int i;
81 
82 	shash->tfm = fs_info->csum_shash;
83 	crypto_shash_init(shash);
84 
85 	if (buf->addr) {
86 		/* Pages are contiguous, handle them as a big one. */
87 		kaddr = buf->addr;
88 		first_page_part = fs_info->nodesize;
89 		num_pages = 1;
90 	} else {
91 		kaddr = folio_address(buf->folios[0]);
92 		first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
93 		num_pages = num_extent_pages(buf);
94 	}
95 
96 	crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
97 			    first_page_part - BTRFS_CSUM_SIZE);
98 
99 	/*
100 	 * Multiple single-page folios case would reach here.
101 	 *
102 	 * nodesize <= PAGE_SIZE and large folio all handled by above
103 	 * crypto_shash_update() already.
104 	 */
105 	for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) {
106 		kaddr = folio_address(buf->folios[i]);
107 		crypto_shash_update(shash, kaddr, PAGE_SIZE);
108 	}
109 	memset(result, 0, BTRFS_CSUM_SIZE);
110 	crypto_shash_final(shash, result);
111 }
112 
113 /*
114  * we can't consider a given block up to date unless the transid of the
115  * block matches the transid in the parent node's pointer.  This is how we
116  * detect blocks that either didn't get written at all or got written
117  * in the wrong place.
118  */
btrfs_buffer_uptodate(struct extent_buffer * eb,u64 parent_transid,int atomic)119 int btrfs_buffer_uptodate(struct extent_buffer *eb, u64 parent_transid, int atomic)
120 {
121 	if (!extent_buffer_uptodate(eb))
122 		return 0;
123 
124 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
125 		return 1;
126 
127 	if (atomic)
128 		return -EAGAIN;
129 
130 	if (!extent_buffer_uptodate(eb) ||
131 	    btrfs_header_generation(eb) != parent_transid) {
132 		btrfs_err_rl(eb->fs_info,
133 "parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
134 			eb->start, eb->read_mirror,
135 			parent_transid, btrfs_header_generation(eb));
136 		clear_extent_buffer_uptodate(eb);
137 		return 0;
138 	}
139 	return 1;
140 }
141 
btrfs_supported_super_csum(u16 csum_type)142 static bool btrfs_supported_super_csum(u16 csum_type)
143 {
144 	switch (csum_type) {
145 	case BTRFS_CSUM_TYPE_CRC32:
146 	case BTRFS_CSUM_TYPE_XXHASH:
147 	case BTRFS_CSUM_TYPE_SHA256:
148 	case BTRFS_CSUM_TYPE_BLAKE2:
149 		return true;
150 	default:
151 		return false;
152 	}
153 }
154 
155 /*
156  * Return 0 if the superblock checksum type matches the checksum value of that
157  * algorithm. Pass the raw disk superblock data.
158  */
btrfs_check_super_csum(struct btrfs_fs_info * fs_info,const struct btrfs_super_block * disk_sb)159 int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
160 			   const struct btrfs_super_block *disk_sb)
161 {
162 	char result[BTRFS_CSUM_SIZE];
163 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
164 
165 	shash->tfm = fs_info->csum_shash;
166 
167 	/*
168 	 * The super_block structure does not span the whole
169 	 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
170 	 * filled with zeros and is included in the checksum.
171 	 */
172 	crypto_shash_digest(shash, (const u8 *)disk_sb + BTRFS_CSUM_SIZE,
173 			    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result);
174 
175 	if (memcmp(disk_sb->csum, result, fs_info->csum_size))
176 		return 1;
177 
178 	return 0;
179 }
180 
btrfs_repair_eb_io_failure(const struct extent_buffer * eb,int mirror_num)181 static int btrfs_repair_eb_io_failure(const struct extent_buffer *eb,
182 				      int mirror_num)
183 {
184 	struct btrfs_fs_info *fs_info = eb->fs_info;
185 	int num_folios = num_extent_folios(eb);
186 	int ret = 0;
187 
188 	if (sb_rdonly(fs_info->sb))
189 		return -EROFS;
190 
191 	for (int i = 0; i < num_folios; i++) {
192 		struct folio *folio = eb->folios[i];
193 		u64 start = max_t(u64, eb->start, folio_pos(folio));
194 		u64 end = min_t(u64, eb->start + eb->len,
195 				folio_pos(folio) + eb->folio_size);
196 		u32 len = end - start;
197 
198 		ret = btrfs_repair_io_failure(fs_info, 0, start, len,
199 					      start, folio, offset_in_folio(folio, start),
200 					      mirror_num);
201 		if (ret)
202 			break;
203 	}
204 
205 	return ret;
206 }
207 
208 /*
209  * helper to read a given tree block, doing retries as required when
210  * the checksums don't match and we have alternate mirrors to try.
211  *
212  * @check:		expected tree parentness check, see the comments of the
213  *			structure for details.
214  */
btrfs_read_extent_buffer(struct extent_buffer * eb,const struct btrfs_tree_parent_check * check)215 int btrfs_read_extent_buffer(struct extent_buffer *eb,
216 			     const struct btrfs_tree_parent_check *check)
217 {
218 	struct btrfs_fs_info *fs_info = eb->fs_info;
219 	int failed = 0;
220 	int ret;
221 	int num_copies = 0;
222 	int mirror_num = 0;
223 	int failed_mirror = 0;
224 
225 	ASSERT(check);
226 
227 	while (1) {
228 		clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
229 		ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num, check);
230 		if (!ret)
231 			break;
232 
233 		num_copies = btrfs_num_copies(fs_info,
234 					      eb->start, eb->len);
235 		if (num_copies == 1)
236 			break;
237 
238 		if (!failed_mirror) {
239 			failed = 1;
240 			failed_mirror = eb->read_mirror;
241 		}
242 
243 		mirror_num++;
244 		if (mirror_num == failed_mirror)
245 			mirror_num++;
246 
247 		if (mirror_num > num_copies)
248 			break;
249 	}
250 
251 	if (failed && !ret && failed_mirror)
252 		btrfs_repair_eb_io_failure(eb, failed_mirror);
253 
254 	return ret;
255 }
256 
257 /*
258  * Checksum a dirty tree block before IO.
259  */
btree_csum_one_bio(struct btrfs_bio * bbio)260 blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
261 {
262 	struct extent_buffer *eb = bbio->private;
263 	struct btrfs_fs_info *fs_info = eb->fs_info;
264 	u64 found_start = btrfs_header_bytenr(eb);
265 	u64 last_trans;
266 	u8 result[BTRFS_CSUM_SIZE];
267 	int ret;
268 
269 	/* Btree blocks are always contiguous on disk. */
270 	if (WARN_ON_ONCE(bbio->file_offset != eb->start))
271 		return BLK_STS_IOERR;
272 	if (WARN_ON_ONCE(bbio->bio.bi_iter.bi_size != eb->len))
273 		return BLK_STS_IOERR;
274 
275 	/*
276 	 * If an extent_buffer is marked as EXTENT_BUFFER_ZONED_ZEROOUT, don't
277 	 * checksum it but zero-out its content. This is done to preserve
278 	 * ordering of I/O without unnecessarily writing out data.
279 	 */
280 	if (test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)) {
281 		memzero_extent_buffer(eb, 0, eb->len);
282 		return BLK_STS_OK;
283 	}
284 
285 	if (WARN_ON_ONCE(found_start != eb->start))
286 		return BLK_STS_IOERR;
287 	if (WARN_ON(!btrfs_folio_test_uptodate(fs_info, eb->folios[0],
288 					       eb->start, eb->len)))
289 		return BLK_STS_IOERR;
290 
291 	ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
292 				    offsetof(struct btrfs_header, fsid),
293 				    BTRFS_FSID_SIZE) == 0);
294 	csum_tree_block(eb, result);
295 
296 	if (btrfs_header_level(eb))
297 		ret = btrfs_check_node(eb);
298 	else
299 		ret = btrfs_check_leaf(eb);
300 
301 	if (ret < 0)
302 		goto error;
303 
304 	/*
305 	 * Also check the generation, the eb reached here must be newer than
306 	 * last committed. Or something seriously wrong happened.
307 	 */
308 	last_trans = btrfs_get_last_trans_committed(fs_info);
309 	if (unlikely(btrfs_header_generation(eb) <= last_trans)) {
310 		ret = -EUCLEAN;
311 		btrfs_err(fs_info,
312 			"block=%llu bad generation, have %llu expect > %llu",
313 			  eb->start, btrfs_header_generation(eb), last_trans);
314 		goto error;
315 	}
316 	write_extent_buffer(eb, result, 0, fs_info->csum_size);
317 	return BLK_STS_OK;
318 
319 error:
320 	btrfs_print_tree(eb, 0);
321 	btrfs_err(fs_info, "block=%llu write time tree block corruption detected",
322 		  eb->start);
323 	/*
324 	 * Be noisy if this is an extent buffer from a log tree. We don't abort
325 	 * a transaction in case there's a bad log tree extent buffer, we just
326 	 * fallback to a transaction commit. Still we want to know when there is
327 	 * a bad log tree extent buffer, as that may signal a bug somewhere.
328 	 */
329 	WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) ||
330 		btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID);
331 	return errno_to_blk_status(ret);
332 }
333 
check_tree_block_fsid(struct extent_buffer * eb)334 static bool check_tree_block_fsid(struct extent_buffer *eb)
335 {
336 	struct btrfs_fs_info *fs_info = eb->fs_info;
337 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
338 	u8 fsid[BTRFS_FSID_SIZE];
339 
340 	read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid),
341 			   BTRFS_FSID_SIZE);
342 
343 	/*
344 	 * alloc_fsid_devices() copies the fsid into fs_devices::metadata_uuid.
345 	 * This is then overwritten by metadata_uuid if it is present in the
346 	 * device_list_add(). The same true for a seed device as well. So use of
347 	 * fs_devices::metadata_uuid is appropriate here.
348 	 */
349 	if (memcmp(fsid, fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0)
350 		return false;
351 
352 	list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list)
353 		if (!memcmp(fsid, seed_devs->fsid, BTRFS_FSID_SIZE))
354 			return false;
355 
356 	return true;
357 }
358 
359 /* Do basic extent buffer checks at read time */
btrfs_validate_extent_buffer(struct extent_buffer * eb,const struct btrfs_tree_parent_check * check)360 int btrfs_validate_extent_buffer(struct extent_buffer *eb,
361 				 const struct btrfs_tree_parent_check *check)
362 {
363 	struct btrfs_fs_info *fs_info = eb->fs_info;
364 	u64 found_start;
365 	const u32 csum_size = fs_info->csum_size;
366 	u8 found_level;
367 	u8 result[BTRFS_CSUM_SIZE];
368 	const u8 *header_csum;
369 	int ret = 0;
370 	const bool ignore_csum = btrfs_test_opt(fs_info, IGNOREMETACSUMS);
371 
372 	ASSERT(check);
373 
374 	found_start = btrfs_header_bytenr(eb);
375 	if (found_start != eb->start) {
376 		btrfs_err_rl(fs_info,
377 			"bad tree block start, mirror %u want %llu have %llu",
378 			     eb->read_mirror, eb->start, found_start);
379 		ret = -EIO;
380 		goto out;
381 	}
382 	if (check_tree_block_fsid(eb)) {
383 		btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u",
384 			     eb->start, eb->read_mirror);
385 		ret = -EIO;
386 		goto out;
387 	}
388 	found_level = btrfs_header_level(eb);
389 	if (found_level >= BTRFS_MAX_LEVEL) {
390 		btrfs_err(fs_info,
391 			"bad tree block level, mirror %u level %d on logical %llu",
392 			eb->read_mirror, btrfs_header_level(eb), eb->start);
393 		ret = -EIO;
394 		goto out;
395 	}
396 
397 	csum_tree_block(eb, result);
398 	header_csum = folio_address(eb->folios[0]) +
399 		get_eb_offset_in_folio(eb, offsetof(struct btrfs_header, csum));
400 
401 	if (memcmp(result, header_csum, csum_size) != 0) {
402 		btrfs_warn_rl(fs_info,
403 "checksum verify failed on logical %llu mirror %u wanted " CSUM_FMT " found " CSUM_FMT " level %d%s",
404 			      eb->start, eb->read_mirror,
405 			      CSUM_FMT_VALUE(csum_size, header_csum),
406 			      CSUM_FMT_VALUE(csum_size, result),
407 			      btrfs_header_level(eb),
408 			      ignore_csum ? ", ignored" : "");
409 		if (!ignore_csum) {
410 			ret = -EUCLEAN;
411 			goto out;
412 		}
413 	}
414 
415 	if (found_level != check->level) {
416 		btrfs_err(fs_info,
417 		"level verify failed on logical %llu mirror %u wanted %u found %u",
418 			  eb->start, eb->read_mirror, check->level, found_level);
419 		ret = -EIO;
420 		goto out;
421 	}
422 	if (unlikely(check->transid &&
423 		     btrfs_header_generation(eb) != check->transid)) {
424 		btrfs_err_rl(eb->fs_info,
425 "parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
426 				eb->start, eb->read_mirror, check->transid,
427 				btrfs_header_generation(eb));
428 		ret = -EIO;
429 		goto out;
430 	}
431 	if (check->has_first_key) {
432 		const struct btrfs_key *expect_key = &check->first_key;
433 		struct btrfs_key found_key;
434 
435 		if (found_level)
436 			btrfs_node_key_to_cpu(eb, &found_key, 0);
437 		else
438 			btrfs_item_key_to_cpu(eb, &found_key, 0);
439 		if (unlikely(btrfs_comp_cpu_keys(expect_key, &found_key))) {
440 			btrfs_err(fs_info,
441 "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
442 				  eb->start, check->transid,
443 				  expect_key->objectid,
444 				  expect_key->type, expect_key->offset,
445 				  found_key.objectid, found_key.type,
446 				  found_key.offset);
447 			ret = -EUCLEAN;
448 			goto out;
449 		}
450 	}
451 	if (check->owner_root) {
452 		ret = btrfs_check_eb_owner(eb, check->owner_root);
453 		if (ret < 0)
454 			goto out;
455 	}
456 
457 	/*
458 	 * If this is a leaf block and it is corrupt, set the corrupt bit so
459 	 * that we don't try and read the other copies of this block, just
460 	 * return -EIO.
461 	 */
462 	if (found_level == 0 && btrfs_check_leaf(eb)) {
463 		set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
464 		ret = -EIO;
465 	}
466 
467 	if (found_level > 0 && btrfs_check_node(eb))
468 		ret = -EIO;
469 
470 	if (ret)
471 		btrfs_err(fs_info,
472 		"read time tree block corruption detected on logical %llu mirror %u",
473 			  eb->start, eb->read_mirror);
474 out:
475 	return ret;
476 }
477 
478 #ifdef CONFIG_MIGRATION
btree_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)479 static int btree_migrate_folio(struct address_space *mapping,
480 		struct folio *dst, struct folio *src, enum migrate_mode mode)
481 {
482 	/*
483 	 * we can't safely write a btree page from here,
484 	 * we haven't done the locking hook
485 	 */
486 	if (folio_test_dirty(src))
487 		return -EAGAIN;
488 	/*
489 	 * Buffers may be managed in a filesystem specific way.
490 	 * We must have no buffers or drop them.
491 	 */
492 	if (folio_get_private(src) &&
493 	    !filemap_release_folio(src, GFP_KERNEL))
494 		return -EAGAIN;
495 	return migrate_folio(mapping, dst, src, mode);
496 }
497 #else
498 #define btree_migrate_folio NULL
499 #endif
500 
btree_writepages(struct address_space * mapping,struct writeback_control * wbc)501 static int btree_writepages(struct address_space *mapping,
502 			    struct writeback_control *wbc)
503 {
504 	int ret;
505 
506 	if (wbc->sync_mode == WB_SYNC_NONE) {
507 		struct btrfs_fs_info *fs_info;
508 
509 		if (wbc->for_kupdate)
510 			return 0;
511 
512 		fs_info = inode_to_fs_info(mapping->host);
513 		/* this is a bit racy, but that's ok */
514 		ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
515 					     BTRFS_DIRTY_METADATA_THRESH,
516 					     fs_info->dirty_metadata_batch);
517 		if (ret < 0)
518 			return 0;
519 	}
520 	return btree_write_cache_pages(mapping, wbc);
521 }
522 
btree_release_folio(struct folio * folio,gfp_t gfp_flags)523 static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags)
524 {
525 	if (folio_test_writeback(folio) || folio_test_dirty(folio))
526 		return false;
527 
528 	return try_release_extent_buffer(folio);
529 }
530 
btree_invalidate_folio(struct folio * folio,size_t offset,size_t length)531 static void btree_invalidate_folio(struct folio *folio, size_t offset,
532 				 size_t length)
533 {
534 	struct extent_io_tree *tree;
535 
536 	tree = &folio_to_inode(folio)->io_tree;
537 	extent_invalidate_folio(tree, folio, offset);
538 	btree_release_folio(folio, GFP_NOFS);
539 	if (folio_get_private(folio)) {
540 		btrfs_warn(folio_to_fs_info(folio),
541 			   "folio private not zero on folio %llu",
542 			   (unsigned long long)folio_pos(folio));
543 		folio_detach_private(folio);
544 	}
545 }
546 
547 #ifdef DEBUG
btree_dirty_folio(struct address_space * mapping,struct folio * folio)548 static bool btree_dirty_folio(struct address_space *mapping,
549 		struct folio *folio)
550 {
551 	struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
552 	struct btrfs_subpage_info *spi = fs_info->subpage_info;
553 	struct btrfs_subpage *subpage;
554 	struct extent_buffer *eb;
555 	int cur_bit = 0;
556 	u64 page_start = folio_pos(folio);
557 
558 	if (fs_info->sectorsize == PAGE_SIZE) {
559 		eb = folio_get_private(folio);
560 		BUG_ON(!eb);
561 		BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
562 		BUG_ON(!atomic_read(&eb->refs));
563 		btrfs_assert_tree_write_locked(eb);
564 		return filemap_dirty_folio(mapping, folio);
565 	}
566 
567 	ASSERT(spi);
568 	subpage = folio_get_private(folio);
569 
570 	for (cur_bit = spi->dirty_offset;
571 	     cur_bit < spi->dirty_offset + spi->bitmap_nr_bits;
572 	     cur_bit++) {
573 		unsigned long flags;
574 		u64 cur;
575 
576 		spin_lock_irqsave(&subpage->lock, flags);
577 		if (!test_bit(cur_bit, subpage->bitmaps)) {
578 			spin_unlock_irqrestore(&subpage->lock, flags);
579 			continue;
580 		}
581 		spin_unlock_irqrestore(&subpage->lock, flags);
582 		cur = page_start + cur_bit * fs_info->sectorsize;
583 
584 		eb = find_extent_buffer(fs_info, cur);
585 		ASSERT(eb);
586 		ASSERT(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
587 		ASSERT(atomic_read(&eb->refs));
588 		btrfs_assert_tree_write_locked(eb);
589 		free_extent_buffer(eb);
590 
591 		cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1;
592 	}
593 	return filemap_dirty_folio(mapping, folio);
594 }
595 #else
596 #define btree_dirty_folio filemap_dirty_folio
597 #endif
598 
599 static const struct address_space_operations btree_aops = {
600 	.writepages	= btree_writepages,
601 	.release_folio	= btree_release_folio,
602 	.invalidate_folio = btree_invalidate_folio,
603 	.migrate_folio	= btree_migrate_folio,
604 	.dirty_folio	= btree_dirty_folio,
605 };
606 
btrfs_find_create_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr,u64 owner_root,int level)607 struct extent_buffer *btrfs_find_create_tree_block(
608 						struct btrfs_fs_info *fs_info,
609 						u64 bytenr, u64 owner_root,
610 						int level)
611 {
612 	if (btrfs_is_testing(fs_info))
613 		return alloc_test_extent_buffer(fs_info, bytenr);
614 	return alloc_extent_buffer(fs_info, bytenr, owner_root, level);
615 }
616 
617 /*
618  * Read tree block at logical address @bytenr and do variant basic but critical
619  * verification.
620  *
621  * @check:		expected tree parentness check, see comments of the
622  *			structure for details.
623  */
read_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr,struct btrfs_tree_parent_check * check)624 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
625 				      struct btrfs_tree_parent_check *check)
626 {
627 	struct extent_buffer *buf = NULL;
628 	int ret;
629 
630 	ASSERT(check);
631 
632 	buf = btrfs_find_create_tree_block(fs_info, bytenr, check->owner_root,
633 					   check->level);
634 	if (IS_ERR(buf))
635 		return buf;
636 
637 	ret = btrfs_read_extent_buffer(buf, check);
638 	if (ret) {
639 		free_extent_buffer_stale(buf);
640 		return ERR_PTR(ret);
641 	}
642 	return buf;
643 
644 }
645 
__setup_root(struct btrfs_root * root,struct btrfs_fs_info * fs_info,u64 objectid)646 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
647 			 u64 objectid)
648 {
649 	bool dummy = btrfs_is_testing(fs_info);
650 
651 	memset(&root->root_key, 0, sizeof(root->root_key));
652 	memset(&root->root_item, 0, sizeof(root->root_item));
653 	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
654 	root->fs_info = fs_info;
655 	root->root_key.objectid = objectid;
656 	root->node = NULL;
657 	root->commit_root = NULL;
658 	root->state = 0;
659 	RB_CLEAR_NODE(&root->rb_node);
660 
661 	btrfs_set_root_last_trans(root, 0);
662 	root->free_objectid = 0;
663 	root->nr_delalloc_inodes = 0;
664 	root->nr_ordered_extents = 0;
665 	xa_init(&root->inodes);
666 	xa_init(&root->delayed_nodes);
667 
668 	btrfs_init_root_block_rsv(root);
669 
670 	INIT_LIST_HEAD(&root->dirty_list);
671 	INIT_LIST_HEAD(&root->root_list);
672 	INIT_LIST_HEAD(&root->delalloc_inodes);
673 	INIT_LIST_HEAD(&root->delalloc_root);
674 	INIT_LIST_HEAD(&root->ordered_extents);
675 	INIT_LIST_HEAD(&root->ordered_root);
676 	INIT_LIST_HEAD(&root->reloc_dirty_list);
677 	spin_lock_init(&root->delalloc_lock);
678 	spin_lock_init(&root->ordered_extent_lock);
679 	spin_lock_init(&root->accounting_lock);
680 	spin_lock_init(&root->qgroup_meta_rsv_lock);
681 	mutex_init(&root->objectid_mutex);
682 	mutex_init(&root->log_mutex);
683 	mutex_init(&root->ordered_extent_mutex);
684 	mutex_init(&root->delalloc_mutex);
685 	init_waitqueue_head(&root->qgroup_flush_wait);
686 	init_waitqueue_head(&root->log_writer_wait);
687 	init_waitqueue_head(&root->log_commit_wait[0]);
688 	init_waitqueue_head(&root->log_commit_wait[1]);
689 	INIT_LIST_HEAD(&root->log_ctxs[0]);
690 	INIT_LIST_HEAD(&root->log_ctxs[1]);
691 	atomic_set(&root->log_commit[0], 0);
692 	atomic_set(&root->log_commit[1], 0);
693 	atomic_set(&root->log_writers, 0);
694 	atomic_set(&root->log_batch, 0);
695 	refcount_set(&root->refs, 1);
696 	atomic_set(&root->snapshot_force_cow, 0);
697 	atomic_set(&root->nr_swapfiles, 0);
698 	btrfs_set_root_log_transid(root, 0);
699 	root->log_transid_committed = -1;
700 	btrfs_set_root_last_log_commit(root, 0);
701 	root->anon_dev = 0;
702 	if (!dummy) {
703 		extent_io_tree_init(fs_info, &root->dirty_log_pages,
704 				    IO_TREE_ROOT_DIRTY_LOG_PAGES);
705 		extent_io_tree_init(fs_info, &root->log_csum_range,
706 				    IO_TREE_LOG_CSUM_RANGE);
707 	}
708 
709 	spin_lock_init(&root->root_item_lock);
710 	btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
711 #ifdef CONFIG_BTRFS_DEBUG
712 	INIT_LIST_HEAD(&root->leak_list);
713 	spin_lock(&fs_info->fs_roots_radix_lock);
714 	list_add_tail(&root->leak_list, &fs_info->allocated_roots);
715 	spin_unlock(&fs_info->fs_roots_radix_lock);
716 #endif
717 }
718 
btrfs_alloc_root(struct btrfs_fs_info * fs_info,u64 objectid,gfp_t flags)719 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
720 					   u64 objectid, gfp_t flags)
721 {
722 	struct btrfs_root *root = kzalloc(sizeof(*root), flags);
723 	if (root)
724 		__setup_root(root, fs_info, objectid);
725 	return root;
726 }
727 
728 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
729 /* Should only be used by the testing infrastructure */
btrfs_alloc_dummy_root(struct btrfs_fs_info * fs_info)730 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
731 {
732 	struct btrfs_root *root;
733 
734 	if (!fs_info)
735 		return ERR_PTR(-EINVAL);
736 
737 	root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL);
738 	if (!root)
739 		return ERR_PTR(-ENOMEM);
740 
741 	/* We don't use the stripesize in selftest, set it as sectorsize */
742 	root->alloc_bytenr = 0;
743 
744 	return root;
745 }
746 #endif
747 
global_root_cmp(struct rb_node * a_node,const struct rb_node * b_node)748 static int global_root_cmp(struct rb_node *a_node, const struct rb_node *b_node)
749 {
750 	const struct btrfs_root *a = rb_entry(a_node, struct btrfs_root, rb_node);
751 	const struct btrfs_root *b = rb_entry(b_node, struct btrfs_root, rb_node);
752 
753 	return btrfs_comp_cpu_keys(&a->root_key, &b->root_key);
754 }
755 
global_root_key_cmp(const void * k,const struct rb_node * node)756 static int global_root_key_cmp(const void *k, const struct rb_node *node)
757 {
758 	const struct btrfs_key *key = k;
759 	const struct btrfs_root *root = rb_entry(node, struct btrfs_root, rb_node);
760 
761 	return btrfs_comp_cpu_keys(key, &root->root_key);
762 }
763 
btrfs_global_root_insert(struct btrfs_root * root)764 int btrfs_global_root_insert(struct btrfs_root *root)
765 {
766 	struct btrfs_fs_info *fs_info = root->fs_info;
767 	struct rb_node *tmp;
768 	int ret = 0;
769 
770 	write_lock(&fs_info->global_root_lock);
771 	tmp = rb_find_add(&root->rb_node, &fs_info->global_root_tree, global_root_cmp);
772 	write_unlock(&fs_info->global_root_lock);
773 
774 	if (tmp) {
775 		ret = -EEXIST;
776 		btrfs_warn(fs_info, "global root %llu %llu already exists",
777 			   btrfs_root_id(root), root->root_key.offset);
778 	}
779 	return ret;
780 }
781 
btrfs_global_root_delete(struct btrfs_root * root)782 void btrfs_global_root_delete(struct btrfs_root *root)
783 {
784 	struct btrfs_fs_info *fs_info = root->fs_info;
785 
786 	write_lock(&fs_info->global_root_lock);
787 	rb_erase(&root->rb_node, &fs_info->global_root_tree);
788 	write_unlock(&fs_info->global_root_lock);
789 }
790 
btrfs_global_root(struct btrfs_fs_info * fs_info,struct btrfs_key * key)791 struct btrfs_root *btrfs_global_root(struct btrfs_fs_info *fs_info,
792 				     struct btrfs_key *key)
793 {
794 	struct rb_node *node;
795 	struct btrfs_root *root = NULL;
796 
797 	read_lock(&fs_info->global_root_lock);
798 	node = rb_find(key, &fs_info->global_root_tree, global_root_key_cmp);
799 	if (node)
800 		root = container_of(node, struct btrfs_root, rb_node);
801 	read_unlock(&fs_info->global_root_lock);
802 
803 	return root;
804 }
805 
btrfs_global_root_id(struct btrfs_fs_info * fs_info,u64 bytenr)806 static u64 btrfs_global_root_id(struct btrfs_fs_info *fs_info, u64 bytenr)
807 {
808 	struct btrfs_block_group *block_group;
809 	u64 ret;
810 
811 	if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
812 		return 0;
813 
814 	if (bytenr)
815 		block_group = btrfs_lookup_block_group(fs_info, bytenr);
816 	else
817 		block_group = btrfs_lookup_first_block_group(fs_info, bytenr);
818 	ASSERT(block_group);
819 	if (!block_group)
820 		return 0;
821 	ret = block_group->global_root_id;
822 	btrfs_put_block_group(block_group);
823 
824 	return ret;
825 }
826 
btrfs_csum_root(struct btrfs_fs_info * fs_info,u64 bytenr)827 struct btrfs_root *btrfs_csum_root(struct btrfs_fs_info *fs_info, u64 bytenr)
828 {
829 	struct btrfs_key key = {
830 		.objectid = BTRFS_CSUM_TREE_OBJECTID,
831 		.type = BTRFS_ROOT_ITEM_KEY,
832 		.offset = btrfs_global_root_id(fs_info, bytenr),
833 	};
834 
835 	return btrfs_global_root(fs_info, &key);
836 }
837 
btrfs_extent_root(struct btrfs_fs_info * fs_info,u64 bytenr)838 struct btrfs_root *btrfs_extent_root(struct btrfs_fs_info *fs_info, u64 bytenr)
839 {
840 	struct btrfs_key key = {
841 		.objectid = BTRFS_EXTENT_TREE_OBJECTID,
842 		.type = BTRFS_ROOT_ITEM_KEY,
843 		.offset = btrfs_global_root_id(fs_info, bytenr),
844 	};
845 
846 	return btrfs_global_root(fs_info, &key);
847 }
848 
btrfs_create_tree(struct btrfs_trans_handle * trans,u64 objectid)849 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
850 				     u64 objectid)
851 {
852 	struct btrfs_fs_info *fs_info = trans->fs_info;
853 	struct extent_buffer *leaf;
854 	struct btrfs_root *tree_root = fs_info->tree_root;
855 	struct btrfs_root *root;
856 	struct btrfs_key key;
857 	unsigned int nofs_flag;
858 	int ret = 0;
859 
860 	/*
861 	 * We're holding a transaction handle, so use a NOFS memory allocation
862 	 * context to avoid deadlock if reclaim happens.
863 	 */
864 	nofs_flag = memalloc_nofs_save();
865 	root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL);
866 	memalloc_nofs_restore(nofs_flag);
867 	if (!root)
868 		return ERR_PTR(-ENOMEM);
869 
870 	root->root_key.objectid = objectid;
871 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
872 	root->root_key.offset = 0;
873 
874 	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
875 				      0, BTRFS_NESTING_NORMAL);
876 	if (IS_ERR(leaf)) {
877 		ret = PTR_ERR(leaf);
878 		leaf = NULL;
879 		goto fail;
880 	}
881 
882 	root->node = leaf;
883 	btrfs_mark_buffer_dirty(trans, leaf);
884 
885 	root->commit_root = btrfs_root_node(root);
886 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
887 
888 	btrfs_set_root_flags(&root->root_item, 0);
889 	btrfs_set_root_limit(&root->root_item, 0);
890 	btrfs_set_root_bytenr(&root->root_item, leaf->start);
891 	btrfs_set_root_generation(&root->root_item, trans->transid);
892 	btrfs_set_root_level(&root->root_item, 0);
893 	btrfs_set_root_refs(&root->root_item, 1);
894 	btrfs_set_root_used(&root->root_item, leaf->len);
895 	btrfs_set_root_last_snapshot(&root->root_item, 0);
896 	btrfs_set_root_dirid(&root->root_item, 0);
897 	if (is_fstree(objectid))
898 		generate_random_guid(root->root_item.uuid);
899 	else
900 		export_guid(root->root_item.uuid, &guid_null);
901 	btrfs_set_root_drop_level(&root->root_item, 0);
902 
903 	btrfs_tree_unlock(leaf);
904 
905 	key.objectid = objectid;
906 	key.type = BTRFS_ROOT_ITEM_KEY;
907 	key.offset = 0;
908 	ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
909 	if (ret)
910 		goto fail;
911 
912 	return root;
913 
914 fail:
915 	btrfs_put_root(root);
916 
917 	return ERR_PTR(ret);
918 }
919 
alloc_log_tree(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)920 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
921 					 struct btrfs_fs_info *fs_info)
922 {
923 	struct btrfs_root *root;
924 
925 	root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS);
926 	if (!root)
927 		return ERR_PTR(-ENOMEM);
928 
929 	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
930 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
931 	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
932 
933 	return root;
934 }
935 
btrfs_alloc_log_tree_node(struct btrfs_trans_handle * trans,struct btrfs_root * root)936 int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
937 			      struct btrfs_root *root)
938 {
939 	struct extent_buffer *leaf;
940 
941 	/*
942 	 * DON'T set SHAREABLE bit for log trees.
943 	 *
944 	 * Log trees are not exposed to user space thus can't be snapshotted,
945 	 * and they go away before a real commit is actually done.
946 	 *
947 	 * They do store pointers to file data extents, and those reference
948 	 * counts still get updated (along with back refs to the log tree).
949 	 */
950 
951 	leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
952 			NULL, 0, 0, 0, 0, BTRFS_NESTING_NORMAL);
953 	if (IS_ERR(leaf))
954 		return PTR_ERR(leaf);
955 
956 	root->node = leaf;
957 
958 	btrfs_mark_buffer_dirty(trans, root->node);
959 	btrfs_tree_unlock(root->node);
960 
961 	return 0;
962 }
963 
btrfs_init_log_root_tree(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)964 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
965 			     struct btrfs_fs_info *fs_info)
966 {
967 	struct btrfs_root *log_root;
968 
969 	log_root = alloc_log_tree(trans, fs_info);
970 	if (IS_ERR(log_root))
971 		return PTR_ERR(log_root);
972 
973 	if (!btrfs_is_zoned(fs_info)) {
974 		int ret = btrfs_alloc_log_tree_node(trans, log_root);
975 
976 		if (ret) {
977 			btrfs_put_root(log_root);
978 			return ret;
979 		}
980 	}
981 
982 	WARN_ON(fs_info->log_root_tree);
983 	fs_info->log_root_tree = log_root;
984 	return 0;
985 }
986 
btrfs_add_log_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root)987 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
988 		       struct btrfs_root *root)
989 {
990 	struct btrfs_fs_info *fs_info = root->fs_info;
991 	struct btrfs_root *log_root;
992 	struct btrfs_inode_item *inode_item;
993 	int ret;
994 
995 	log_root = alloc_log_tree(trans, fs_info);
996 	if (IS_ERR(log_root))
997 		return PTR_ERR(log_root);
998 
999 	ret = btrfs_alloc_log_tree_node(trans, log_root);
1000 	if (ret) {
1001 		btrfs_put_root(log_root);
1002 		return ret;
1003 	}
1004 
1005 	btrfs_set_root_last_trans(log_root, trans->transid);
1006 	log_root->root_key.offset = btrfs_root_id(root);
1007 
1008 	inode_item = &log_root->root_item.inode;
1009 	btrfs_set_stack_inode_generation(inode_item, 1);
1010 	btrfs_set_stack_inode_size(inode_item, 3);
1011 	btrfs_set_stack_inode_nlink(inode_item, 1);
1012 	btrfs_set_stack_inode_nbytes(inode_item,
1013 				     fs_info->nodesize);
1014 	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1015 
1016 	btrfs_set_root_node(&log_root->root_item, log_root->node);
1017 
1018 	WARN_ON(root->log_root);
1019 	root->log_root = log_root;
1020 	btrfs_set_root_log_transid(root, 0);
1021 	root->log_transid_committed = -1;
1022 	btrfs_set_root_last_log_commit(root, 0);
1023 	return 0;
1024 }
1025 
read_tree_root_path(struct btrfs_root * tree_root,struct btrfs_path * path,const struct btrfs_key * key)1026 static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
1027 					      struct btrfs_path *path,
1028 					      const struct btrfs_key *key)
1029 {
1030 	struct btrfs_root *root;
1031 	struct btrfs_tree_parent_check check = { 0 };
1032 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1033 	u64 generation;
1034 	int ret;
1035 	int level;
1036 
1037 	root = btrfs_alloc_root(fs_info, key->objectid, GFP_NOFS);
1038 	if (!root)
1039 		return ERR_PTR(-ENOMEM);
1040 
1041 	ret = btrfs_find_root(tree_root, key, path,
1042 			      &root->root_item, &root->root_key);
1043 	if (ret) {
1044 		if (ret > 0)
1045 			ret = -ENOENT;
1046 		goto fail;
1047 	}
1048 
1049 	generation = btrfs_root_generation(&root->root_item);
1050 	level = btrfs_root_level(&root->root_item);
1051 	check.level = level;
1052 	check.transid = generation;
1053 	check.owner_root = key->objectid;
1054 	root->node = read_tree_block(fs_info, btrfs_root_bytenr(&root->root_item),
1055 				     &check);
1056 	if (IS_ERR(root->node)) {
1057 		ret = PTR_ERR(root->node);
1058 		root->node = NULL;
1059 		goto fail;
1060 	}
1061 	if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1062 		ret = -EIO;
1063 		goto fail;
1064 	}
1065 
1066 	/*
1067 	 * For real fs, and not log/reloc trees, root owner must
1068 	 * match its root node owner
1069 	 */
1070 	if (!btrfs_is_testing(fs_info) &&
1071 	    btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
1072 	    btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID &&
1073 	    btrfs_root_id(root) != btrfs_header_owner(root->node)) {
1074 		btrfs_crit(fs_info,
1075 "root=%llu block=%llu, tree root owner mismatch, have %llu expect %llu",
1076 			   btrfs_root_id(root), root->node->start,
1077 			   btrfs_header_owner(root->node),
1078 			   btrfs_root_id(root));
1079 		ret = -EUCLEAN;
1080 		goto fail;
1081 	}
1082 	root->commit_root = btrfs_root_node(root);
1083 	return root;
1084 fail:
1085 	btrfs_put_root(root);
1086 	return ERR_PTR(ret);
1087 }
1088 
btrfs_read_tree_root(struct btrfs_root * tree_root,const struct btrfs_key * key)1089 struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1090 					const struct btrfs_key *key)
1091 {
1092 	struct btrfs_root *root;
1093 	struct btrfs_path *path;
1094 
1095 	path = btrfs_alloc_path();
1096 	if (!path)
1097 		return ERR_PTR(-ENOMEM);
1098 	root = read_tree_root_path(tree_root, path, key);
1099 	btrfs_free_path(path);
1100 
1101 	return root;
1102 }
1103 
1104 /*
1105  * Initialize subvolume root in-memory structure
1106  *
1107  * @anon_dev:	anonymous device to attach to the root, if zero, allocate new
1108  */
btrfs_init_fs_root(struct btrfs_root * root,dev_t anon_dev)1109 static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
1110 {
1111 	int ret;
1112 
1113 	btrfs_drew_lock_init(&root->snapshot_lock);
1114 
1115 	if (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
1116 	    !btrfs_is_data_reloc_root(root) &&
1117 	    is_fstree(btrfs_root_id(root))) {
1118 		set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
1119 		btrfs_check_and_init_root_item(&root->root_item);
1120 	}
1121 
1122 	/*
1123 	 * Don't assign anonymous block device to roots that are not exposed to
1124 	 * userspace, the id pool is limited to 1M
1125 	 */
1126 	if (is_fstree(btrfs_root_id(root)) &&
1127 	    btrfs_root_refs(&root->root_item) > 0) {
1128 		if (!anon_dev) {
1129 			ret = get_anon_bdev(&root->anon_dev);
1130 			if (ret)
1131 				goto fail;
1132 		} else {
1133 			root->anon_dev = anon_dev;
1134 		}
1135 	}
1136 
1137 	mutex_lock(&root->objectid_mutex);
1138 	ret = btrfs_init_root_free_objectid(root);
1139 	if (ret) {
1140 		mutex_unlock(&root->objectid_mutex);
1141 		goto fail;
1142 	}
1143 
1144 	ASSERT(root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
1145 
1146 	mutex_unlock(&root->objectid_mutex);
1147 
1148 	return 0;
1149 fail:
1150 	/* The caller is responsible to call btrfs_free_fs_root */
1151 	return ret;
1152 }
1153 
btrfs_lookup_fs_root(struct btrfs_fs_info * fs_info,u64 root_id)1154 static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1155 					       u64 root_id)
1156 {
1157 	struct btrfs_root *root;
1158 
1159 	spin_lock(&fs_info->fs_roots_radix_lock);
1160 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1161 				 (unsigned long)root_id);
1162 	root = btrfs_grab_root(root);
1163 	spin_unlock(&fs_info->fs_roots_radix_lock);
1164 	return root;
1165 }
1166 
btrfs_get_global_root(struct btrfs_fs_info * fs_info,u64 objectid)1167 static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info,
1168 						u64 objectid)
1169 {
1170 	struct btrfs_key key = {
1171 		.objectid = objectid,
1172 		.type = BTRFS_ROOT_ITEM_KEY,
1173 		.offset = 0,
1174 	};
1175 
1176 	switch (objectid) {
1177 	case BTRFS_ROOT_TREE_OBJECTID:
1178 		return btrfs_grab_root(fs_info->tree_root);
1179 	case BTRFS_EXTENT_TREE_OBJECTID:
1180 		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1181 	case BTRFS_CHUNK_TREE_OBJECTID:
1182 		return btrfs_grab_root(fs_info->chunk_root);
1183 	case BTRFS_DEV_TREE_OBJECTID:
1184 		return btrfs_grab_root(fs_info->dev_root);
1185 	case BTRFS_CSUM_TREE_OBJECTID:
1186 		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1187 	case BTRFS_QUOTA_TREE_OBJECTID:
1188 		return btrfs_grab_root(fs_info->quota_root);
1189 	case BTRFS_UUID_TREE_OBJECTID:
1190 		return btrfs_grab_root(fs_info->uuid_root);
1191 	case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
1192 		return btrfs_grab_root(fs_info->block_group_root);
1193 	case BTRFS_FREE_SPACE_TREE_OBJECTID:
1194 		return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1195 	case BTRFS_RAID_STRIPE_TREE_OBJECTID:
1196 		return btrfs_grab_root(fs_info->stripe_root);
1197 	default:
1198 		return NULL;
1199 	}
1200 }
1201 
btrfs_insert_fs_root(struct btrfs_fs_info * fs_info,struct btrfs_root * root)1202 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1203 			 struct btrfs_root *root)
1204 {
1205 	int ret;
1206 
1207 	ret = radix_tree_preload(GFP_NOFS);
1208 	if (ret)
1209 		return ret;
1210 
1211 	spin_lock(&fs_info->fs_roots_radix_lock);
1212 	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1213 				(unsigned long)btrfs_root_id(root),
1214 				root);
1215 	if (ret == 0) {
1216 		btrfs_grab_root(root);
1217 		set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1218 	}
1219 	spin_unlock(&fs_info->fs_roots_radix_lock);
1220 	radix_tree_preload_end();
1221 
1222 	return ret;
1223 }
1224 
btrfs_check_leaked_roots(const struct btrfs_fs_info * fs_info)1225 void btrfs_check_leaked_roots(const struct btrfs_fs_info *fs_info)
1226 {
1227 #ifdef CONFIG_BTRFS_DEBUG
1228 	struct btrfs_root *root;
1229 
1230 	while (!list_empty(&fs_info->allocated_roots)) {
1231 		char buf[BTRFS_ROOT_NAME_BUF_LEN];
1232 
1233 		root = list_first_entry(&fs_info->allocated_roots,
1234 					struct btrfs_root, leak_list);
1235 		btrfs_err(fs_info, "leaked root %s refcount %d",
1236 			  btrfs_root_name(&root->root_key, buf),
1237 			  refcount_read(&root->refs));
1238 		WARN_ON_ONCE(1);
1239 		while (refcount_read(&root->refs) > 1)
1240 			btrfs_put_root(root);
1241 		btrfs_put_root(root);
1242 	}
1243 #endif
1244 }
1245 
free_global_roots(struct btrfs_fs_info * fs_info)1246 static void free_global_roots(struct btrfs_fs_info *fs_info)
1247 {
1248 	struct btrfs_root *root;
1249 	struct rb_node *node;
1250 
1251 	while ((node = rb_first_postorder(&fs_info->global_root_tree)) != NULL) {
1252 		root = rb_entry(node, struct btrfs_root, rb_node);
1253 		rb_erase(&root->rb_node, &fs_info->global_root_tree);
1254 		btrfs_put_root(root);
1255 	}
1256 }
1257 
btrfs_free_fs_info(struct btrfs_fs_info * fs_info)1258 void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
1259 {
1260 	struct percpu_counter *em_counter = &fs_info->evictable_extent_maps;
1261 
1262 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
1263 	percpu_counter_destroy(&fs_info->delalloc_bytes);
1264 	percpu_counter_destroy(&fs_info->ordered_bytes);
1265 	if (percpu_counter_initialized(em_counter))
1266 		ASSERT(percpu_counter_sum_positive(em_counter) == 0);
1267 	percpu_counter_destroy(em_counter);
1268 	percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
1269 	btrfs_free_csum_hash(fs_info);
1270 	btrfs_free_stripe_hash_table(fs_info);
1271 	btrfs_free_ref_cache(fs_info);
1272 	kfree(fs_info->balance_ctl);
1273 	kfree(fs_info->delayed_root);
1274 	free_global_roots(fs_info);
1275 	btrfs_put_root(fs_info->tree_root);
1276 	btrfs_put_root(fs_info->chunk_root);
1277 	btrfs_put_root(fs_info->dev_root);
1278 	btrfs_put_root(fs_info->quota_root);
1279 	btrfs_put_root(fs_info->uuid_root);
1280 	btrfs_put_root(fs_info->fs_root);
1281 	btrfs_put_root(fs_info->data_reloc_root);
1282 	btrfs_put_root(fs_info->block_group_root);
1283 	btrfs_put_root(fs_info->stripe_root);
1284 	btrfs_check_leaked_roots(fs_info);
1285 	btrfs_extent_buffer_leak_debug_check(fs_info);
1286 	kfree(fs_info->super_copy);
1287 	kfree(fs_info->super_for_commit);
1288 	kvfree(fs_info);
1289 }
1290 
1291 
1292 /*
1293  * Get an in-memory reference of a root structure.
1294  *
1295  * For essential trees like root/extent tree, we grab it from fs_info directly.
1296  * For subvolume trees, we check the cached filesystem roots first. If not
1297  * found, then read it from disk and add it to cached fs roots.
1298  *
1299  * Caller should release the root by calling btrfs_put_root() after the usage.
1300  *
1301  * NOTE: Reloc and log trees can't be read by this function as they share the
1302  *	 same root objectid.
1303  *
1304  * @objectid:	root id
1305  * @anon_dev:	preallocated anonymous block device number for new roots,
1306  *		pass NULL for a new allocation.
1307  * @check_ref:	whether to check root item references, If true, return -ENOENT
1308  *		for orphan roots
1309  */
btrfs_get_root_ref(struct btrfs_fs_info * fs_info,u64 objectid,dev_t * anon_dev,bool check_ref)1310 static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
1311 					     u64 objectid, dev_t *anon_dev,
1312 					     bool check_ref)
1313 {
1314 	struct btrfs_root *root;
1315 	struct btrfs_path *path;
1316 	struct btrfs_key key;
1317 	int ret;
1318 
1319 	root = btrfs_get_global_root(fs_info, objectid);
1320 	if (root)
1321 		return root;
1322 
1323 	/*
1324 	 * If we're called for non-subvolume trees, and above function didn't
1325 	 * find one, do not try to read it from disk.
1326 	 *
1327 	 * This is namely for free-space-tree and quota tree, which can change
1328 	 * at runtime and should only be grabbed from fs_info.
1329 	 */
1330 	if (!is_fstree(objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID)
1331 		return ERR_PTR(-ENOENT);
1332 again:
1333 	root = btrfs_lookup_fs_root(fs_info, objectid);
1334 	if (root) {
1335 		/*
1336 		 * Some other caller may have read out the newly inserted
1337 		 * subvolume already (for things like backref walk etc).  Not
1338 		 * that common but still possible.  In that case, we just need
1339 		 * to free the anon_dev.
1340 		 */
1341 		if (unlikely(anon_dev && *anon_dev)) {
1342 			free_anon_bdev(*anon_dev);
1343 			*anon_dev = 0;
1344 		}
1345 
1346 		if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1347 			btrfs_put_root(root);
1348 			return ERR_PTR(-ENOENT);
1349 		}
1350 		return root;
1351 	}
1352 
1353 	key.objectid = objectid;
1354 	key.type = BTRFS_ROOT_ITEM_KEY;
1355 	key.offset = (u64)-1;
1356 	root = btrfs_read_tree_root(fs_info->tree_root, &key);
1357 	if (IS_ERR(root))
1358 		return root;
1359 
1360 	if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1361 		ret = -ENOENT;
1362 		goto fail;
1363 	}
1364 
1365 	ret = btrfs_init_fs_root(root, anon_dev ? *anon_dev : 0);
1366 	if (ret)
1367 		goto fail;
1368 
1369 	path = btrfs_alloc_path();
1370 	if (!path) {
1371 		ret = -ENOMEM;
1372 		goto fail;
1373 	}
1374 	key.objectid = BTRFS_ORPHAN_OBJECTID;
1375 	key.type = BTRFS_ORPHAN_ITEM_KEY;
1376 	key.offset = objectid;
1377 
1378 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1379 	btrfs_free_path(path);
1380 	if (ret < 0)
1381 		goto fail;
1382 	if (ret == 0)
1383 		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1384 
1385 	ret = btrfs_insert_fs_root(fs_info, root);
1386 	if (ret) {
1387 		if (ret == -EEXIST) {
1388 			btrfs_put_root(root);
1389 			goto again;
1390 		}
1391 		goto fail;
1392 	}
1393 	return root;
1394 fail:
1395 	/*
1396 	 * If our caller provided us an anonymous device, then it's his
1397 	 * responsibility to free it in case we fail. So we have to set our
1398 	 * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
1399 	 * and once again by our caller.
1400 	 */
1401 	if (anon_dev && *anon_dev)
1402 		root->anon_dev = 0;
1403 	btrfs_put_root(root);
1404 	return ERR_PTR(ret);
1405 }
1406 
1407 /*
1408  * Get in-memory reference of a root structure
1409  *
1410  * @objectid:	tree objectid
1411  * @check_ref:	if set, verify that the tree exists and the item has at least
1412  *		one reference
1413  */
btrfs_get_fs_root(struct btrfs_fs_info * fs_info,u64 objectid,bool check_ref)1414 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1415 				     u64 objectid, bool check_ref)
1416 {
1417 	return btrfs_get_root_ref(fs_info, objectid, NULL, check_ref);
1418 }
1419 
1420 /*
1421  * Get in-memory reference of a root structure, created as new, optionally pass
1422  * the anonymous block device id
1423  *
1424  * @objectid:	tree objectid
1425  * @anon_dev:	if NULL, allocate a new anonymous block device or use the
1426  *		parameter value if not NULL
1427  */
btrfs_get_new_fs_root(struct btrfs_fs_info * fs_info,u64 objectid,dev_t * anon_dev)1428 struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
1429 					 u64 objectid, dev_t *anon_dev)
1430 {
1431 	return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
1432 }
1433 
1434 /*
1435  * Return a root for the given objectid.
1436  *
1437  * @fs_info:	the fs_info
1438  * @objectid:	the objectid we need to lookup
1439  *
1440  * This is exclusively used for backref walking, and exists specifically because
1441  * of how qgroups does lookups.  Qgroups will do a backref lookup at delayed ref
1442  * creation time, which means we may have to read the tree_root in order to look
1443  * up a fs root that is not in memory.  If the root is not in memory we will
1444  * read the tree root commit root and look up the fs root from there.  This is a
1445  * temporary root, it will not be inserted into the radix tree as it doesn't
1446  * have the most uptodate information, it'll simply be discarded once the
1447  * backref code is finished using the root.
1448  */
btrfs_get_fs_root_commit_root(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 objectid)1449 struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
1450 						 struct btrfs_path *path,
1451 						 u64 objectid)
1452 {
1453 	struct btrfs_root *root;
1454 	struct btrfs_key key;
1455 
1456 	ASSERT(path->search_commit_root && path->skip_locking);
1457 
1458 	/*
1459 	 * This can return -ENOENT if we ask for a root that doesn't exist, but
1460 	 * since this is called via the backref walking code we won't be looking
1461 	 * up a root that doesn't exist, unless there's corruption.  So if root
1462 	 * != NULL just return it.
1463 	 */
1464 	root = btrfs_get_global_root(fs_info, objectid);
1465 	if (root)
1466 		return root;
1467 
1468 	root = btrfs_lookup_fs_root(fs_info, objectid);
1469 	if (root)
1470 		return root;
1471 
1472 	key.objectid = objectid;
1473 	key.type = BTRFS_ROOT_ITEM_KEY;
1474 	key.offset = (u64)-1;
1475 	root = read_tree_root_path(fs_info->tree_root, path, &key);
1476 	btrfs_release_path(path);
1477 
1478 	return root;
1479 }
1480 
cleaner_kthread(void * arg)1481 static int cleaner_kthread(void *arg)
1482 {
1483 	struct btrfs_fs_info *fs_info = arg;
1484 	int again;
1485 
1486 	while (1) {
1487 		again = 0;
1488 
1489 		set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1490 
1491 		/* Make the cleaner go to sleep early. */
1492 		if (btrfs_need_cleaner_sleep(fs_info))
1493 			goto sleep;
1494 
1495 		/*
1496 		 * Do not do anything if we might cause open_ctree() to block
1497 		 * before we have finished mounting the filesystem.
1498 		 */
1499 		if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1500 			goto sleep;
1501 
1502 		if (!mutex_trylock(&fs_info->cleaner_mutex))
1503 			goto sleep;
1504 
1505 		/*
1506 		 * Avoid the problem that we change the status of the fs
1507 		 * during the above check and trylock.
1508 		 */
1509 		if (btrfs_need_cleaner_sleep(fs_info)) {
1510 			mutex_unlock(&fs_info->cleaner_mutex);
1511 			goto sleep;
1512 		}
1513 
1514 		if (test_and_clear_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags))
1515 			btrfs_sysfs_feature_update(fs_info);
1516 
1517 		btrfs_run_delayed_iputs(fs_info);
1518 
1519 		again = btrfs_clean_one_deleted_snapshot(fs_info);
1520 		mutex_unlock(&fs_info->cleaner_mutex);
1521 
1522 		/*
1523 		 * The defragger has dealt with the R/O remount and umount,
1524 		 * needn't do anything special here.
1525 		 */
1526 		btrfs_run_defrag_inodes(fs_info);
1527 
1528 		/*
1529 		 * Acquires fs_info->reclaim_bgs_lock to avoid racing
1530 		 * with relocation (btrfs_relocate_chunk) and relocation
1531 		 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1532 		 * after acquiring fs_info->reclaim_bgs_lock. So we
1533 		 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1534 		 * unused block groups.
1535 		 */
1536 		btrfs_delete_unused_bgs(fs_info);
1537 
1538 		/*
1539 		 * Reclaim block groups in the reclaim_bgs list after we deleted
1540 		 * all unused block_groups. This possibly gives us some more free
1541 		 * space.
1542 		 */
1543 		btrfs_reclaim_bgs(fs_info);
1544 sleep:
1545 		clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1546 		if (kthread_should_park())
1547 			kthread_parkme();
1548 		if (kthread_should_stop())
1549 			return 0;
1550 		if (!again) {
1551 			set_current_state(TASK_INTERRUPTIBLE);
1552 			schedule();
1553 			__set_current_state(TASK_RUNNING);
1554 		}
1555 	}
1556 }
1557 
transaction_kthread(void * arg)1558 static int transaction_kthread(void *arg)
1559 {
1560 	struct btrfs_root *root = arg;
1561 	struct btrfs_fs_info *fs_info = root->fs_info;
1562 	struct btrfs_trans_handle *trans;
1563 	struct btrfs_transaction *cur;
1564 	u64 transid;
1565 	time64_t delta;
1566 	unsigned long delay;
1567 	bool cannot_commit;
1568 
1569 	do {
1570 		cannot_commit = false;
1571 		delay = msecs_to_jiffies(fs_info->commit_interval * 1000);
1572 		mutex_lock(&fs_info->transaction_kthread_mutex);
1573 
1574 		spin_lock(&fs_info->trans_lock);
1575 		cur = fs_info->running_transaction;
1576 		if (!cur) {
1577 			spin_unlock(&fs_info->trans_lock);
1578 			goto sleep;
1579 		}
1580 
1581 		delta = ktime_get_seconds() - cur->start_time;
1582 		if (!test_and_clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags) &&
1583 		    cur->state < TRANS_STATE_COMMIT_PREP &&
1584 		    delta < fs_info->commit_interval) {
1585 			spin_unlock(&fs_info->trans_lock);
1586 			delay -= msecs_to_jiffies((delta - 1) * 1000);
1587 			delay = min(delay,
1588 				    msecs_to_jiffies(fs_info->commit_interval * 1000));
1589 			goto sleep;
1590 		}
1591 		transid = cur->transid;
1592 		spin_unlock(&fs_info->trans_lock);
1593 
1594 		/* If the file system is aborted, this will always fail. */
1595 		trans = btrfs_attach_transaction(root);
1596 		if (IS_ERR(trans)) {
1597 			if (PTR_ERR(trans) != -ENOENT)
1598 				cannot_commit = true;
1599 			goto sleep;
1600 		}
1601 		if (transid == trans->transid) {
1602 			btrfs_commit_transaction(trans);
1603 		} else {
1604 			btrfs_end_transaction(trans);
1605 		}
1606 sleep:
1607 		wake_up_process(fs_info->cleaner_kthread);
1608 		mutex_unlock(&fs_info->transaction_kthread_mutex);
1609 
1610 		if (BTRFS_FS_ERROR(fs_info))
1611 			btrfs_cleanup_transaction(fs_info);
1612 		if (!kthread_should_stop() &&
1613 				(!btrfs_transaction_blocked(fs_info) ||
1614 				 cannot_commit))
1615 			schedule_timeout_interruptible(delay);
1616 	} while (!kthread_should_stop());
1617 	return 0;
1618 }
1619 
1620 /*
1621  * This will find the highest generation in the array of root backups.  The
1622  * index of the highest array is returned, or -EINVAL if we can't find
1623  * anything.
1624  *
1625  * We check to make sure the array is valid by comparing the
1626  * generation of the latest  root in the array with the generation
1627  * in the super block.  If they don't match we pitch it.
1628  */
find_newest_super_backup(struct btrfs_fs_info * info)1629 static int find_newest_super_backup(struct btrfs_fs_info *info)
1630 {
1631 	const u64 newest_gen = btrfs_super_generation(info->super_copy);
1632 	u64 cur;
1633 	struct btrfs_root_backup *root_backup;
1634 	int i;
1635 
1636 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1637 		root_backup = info->super_copy->super_roots + i;
1638 		cur = btrfs_backup_tree_root_gen(root_backup);
1639 		if (cur == newest_gen)
1640 			return i;
1641 	}
1642 
1643 	return -EINVAL;
1644 }
1645 
1646 /*
1647  * copy all the root pointers into the super backup array.
1648  * this will bump the backup pointer by one when it is
1649  * done
1650  */
backup_super_roots(struct btrfs_fs_info * info)1651 static void backup_super_roots(struct btrfs_fs_info *info)
1652 {
1653 	const int next_backup = info->backup_root_index;
1654 	struct btrfs_root_backup *root_backup;
1655 
1656 	root_backup = info->super_for_commit->super_roots + next_backup;
1657 
1658 	/*
1659 	 * make sure all of our padding and empty slots get zero filled
1660 	 * regardless of which ones we use today
1661 	 */
1662 	memset(root_backup, 0, sizeof(*root_backup));
1663 
1664 	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1665 
1666 	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1667 	btrfs_set_backup_tree_root_gen(root_backup,
1668 			       btrfs_header_generation(info->tree_root->node));
1669 
1670 	btrfs_set_backup_tree_root_level(root_backup,
1671 			       btrfs_header_level(info->tree_root->node));
1672 
1673 	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1674 	btrfs_set_backup_chunk_root_gen(root_backup,
1675 			       btrfs_header_generation(info->chunk_root->node));
1676 	btrfs_set_backup_chunk_root_level(root_backup,
1677 			       btrfs_header_level(info->chunk_root->node));
1678 
1679 	if (!btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE)) {
1680 		struct btrfs_root *extent_root = btrfs_extent_root(info, 0);
1681 		struct btrfs_root *csum_root = btrfs_csum_root(info, 0);
1682 
1683 		btrfs_set_backup_extent_root(root_backup,
1684 					     extent_root->node->start);
1685 		btrfs_set_backup_extent_root_gen(root_backup,
1686 				btrfs_header_generation(extent_root->node));
1687 		btrfs_set_backup_extent_root_level(root_backup,
1688 					btrfs_header_level(extent_root->node));
1689 
1690 		btrfs_set_backup_csum_root(root_backup, csum_root->node->start);
1691 		btrfs_set_backup_csum_root_gen(root_backup,
1692 					       btrfs_header_generation(csum_root->node));
1693 		btrfs_set_backup_csum_root_level(root_backup,
1694 						 btrfs_header_level(csum_root->node));
1695 	}
1696 
1697 	/*
1698 	 * we might commit during log recovery, which happens before we set
1699 	 * the fs_root.  Make sure it is valid before we fill it in.
1700 	 */
1701 	if (info->fs_root && info->fs_root->node) {
1702 		btrfs_set_backup_fs_root(root_backup,
1703 					 info->fs_root->node->start);
1704 		btrfs_set_backup_fs_root_gen(root_backup,
1705 			       btrfs_header_generation(info->fs_root->node));
1706 		btrfs_set_backup_fs_root_level(root_backup,
1707 			       btrfs_header_level(info->fs_root->node));
1708 	}
1709 
1710 	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1711 	btrfs_set_backup_dev_root_gen(root_backup,
1712 			       btrfs_header_generation(info->dev_root->node));
1713 	btrfs_set_backup_dev_root_level(root_backup,
1714 				       btrfs_header_level(info->dev_root->node));
1715 
1716 	btrfs_set_backup_total_bytes(root_backup,
1717 			     btrfs_super_total_bytes(info->super_copy));
1718 	btrfs_set_backup_bytes_used(root_backup,
1719 			     btrfs_super_bytes_used(info->super_copy));
1720 	btrfs_set_backup_num_devices(root_backup,
1721 			     btrfs_super_num_devices(info->super_copy));
1722 
1723 	/*
1724 	 * if we don't copy this out to the super_copy, it won't get remembered
1725 	 * for the next commit
1726 	 */
1727 	memcpy(&info->super_copy->super_roots,
1728 	       &info->super_for_commit->super_roots,
1729 	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1730 }
1731 
1732 /*
1733  * Reads a backup root based on the passed priority. Prio 0 is the newest, prio
1734  * 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
1735  *
1736  * @fs_info:  filesystem whose backup roots need to be read
1737  * @priority: priority of backup root required
1738  *
1739  * Returns backup root index on success and -EINVAL otherwise.
1740  */
read_backup_root(struct btrfs_fs_info * fs_info,u8 priority)1741 static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority)
1742 {
1743 	int backup_index = find_newest_super_backup(fs_info);
1744 	struct btrfs_super_block *super = fs_info->super_copy;
1745 	struct btrfs_root_backup *root_backup;
1746 
1747 	if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) {
1748 		if (priority == 0)
1749 			return backup_index;
1750 
1751 		backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority;
1752 		backup_index %= BTRFS_NUM_BACKUP_ROOTS;
1753 	} else {
1754 		return -EINVAL;
1755 	}
1756 
1757 	root_backup = super->super_roots + backup_index;
1758 
1759 	btrfs_set_super_generation(super,
1760 				   btrfs_backup_tree_root_gen(root_backup));
1761 	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1762 	btrfs_set_super_root_level(super,
1763 				   btrfs_backup_tree_root_level(root_backup));
1764 	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1765 
1766 	/*
1767 	 * Fixme: the total bytes and num_devices need to match or we should
1768 	 * need a fsck
1769 	 */
1770 	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1771 	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1772 
1773 	return backup_index;
1774 }
1775 
1776 /* helper to cleanup workers */
btrfs_stop_all_workers(struct btrfs_fs_info * fs_info)1777 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1778 {
1779 	btrfs_destroy_workqueue(fs_info->fixup_workers);
1780 	btrfs_destroy_workqueue(fs_info->delalloc_workers);
1781 	btrfs_destroy_workqueue(fs_info->workers);
1782 	if (fs_info->endio_workers)
1783 		destroy_workqueue(fs_info->endio_workers);
1784 	if (fs_info->rmw_workers)
1785 		destroy_workqueue(fs_info->rmw_workers);
1786 	if (fs_info->compressed_write_workers)
1787 		destroy_workqueue(fs_info->compressed_write_workers);
1788 	btrfs_destroy_workqueue(fs_info->endio_write_workers);
1789 	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
1790 	btrfs_destroy_workqueue(fs_info->delayed_workers);
1791 	btrfs_destroy_workqueue(fs_info->caching_workers);
1792 	btrfs_destroy_workqueue(fs_info->flush_workers);
1793 	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
1794 	if (fs_info->discard_ctl.discard_workers)
1795 		destroy_workqueue(fs_info->discard_ctl.discard_workers);
1796 	/*
1797 	 * Now that all other work queues are destroyed, we can safely destroy
1798 	 * the queues used for metadata I/O, since tasks from those other work
1799 	 * queues can do metadata I/O operations.
1800 	 */
1801 	if (fs_info->endio_meta_workers)
1802 		destroy_workqueue(fs_info->endio_meta_workers);
1803 }
1804 
free_root_extent_buffers(struct btrfs_root * root)1805 static void free_root_extent_buffers(struct btrfs_root *root)
1806 {
1807 	if (root) {
1808 		free_extent_buffer(root->node);
1809 		free_extent_buffer(root->commit_root);
1810 		root->node = NULL;
1811 		root->commit_root = NULL;
1812 	}
1813 }
1814 
free_global_root_pointers(struct btrfs_fs_info * fs_info)1815 static void free_global_root_pointers(struct btrfs_fs_info *fs_info)
1816 {
1817 	struct btrfs_root *root, *tmp;
1818 
1819 	rbtree_postorder_for_each_entry_safe(root, tmp,
1820 					     &fs_info->global_root_tree,
1821 					     rb_node)
1822 		free_root_extent_buffers(root);
1823 }
1824 
1825 /* helper to cleanup tree roots */
free_root_pointers(struct btrfs_fs_info * info,bool free_chunk_root)1826 static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
1827 {
1828 	free_root_extent_buffers(info->tree_root);
1829 
1830 	free_global_root_pointers(info);
1831 	free_root_extent_buffers(info->dev_root);
1832 	free_root_extent_buffers(info->quota_root);
1833 	free_root_extent_buffers(info->uuid_root);
1834 	free_root_extent_buffers(info->fs_root);
1835 	free_root_extent_buffers(info->data_reloc_root);
1836 	free_root_extent_buffers(info->block_group_root);
1837 	free_root_extent_buffers(info->stripe_root);
1838 	if (free_chunk_root)
1839 		free_root_extent_buffers(info->chunk_root);
1840 }
1841 
btrfs_put_root(struct btrfs_root * root)1842 void btrfs_put_root(struct btrfs_root *root)
1843 {
1844 	if (!root)
1845 		return;
1846 
1847 	if (refcount_dec_and_test(&root->refs)) {
1848 		if (WARN_ON(!xa_empty(&root->inodes)))
1849 			xa_destroy(&root->inodes);
1850 		WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state));
1851 		if (root->anon_dev)
1852 			free_anon_bdev(root->anon_dev);
1853 		free_root_extent_buffers(root);
1854 #ifdef CONFIG_BTRFS_DEBUG
1855 		spin_lock(&root->fs_info->fs_roots_radix_lock);
1856 		list_del_init(&root->leak_list);
1857 		spin_unlock(&root->fs_info->fs_roots_radix_lock);
1858 #endif
1859 		kfree(root);
1860 	}
1861 }
1862 
btrfs_free_fs_roots(struct btrfs_fs_info * fs_info)1863 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
1864 {
1865 	int ret;
1866 	struct btrfs_root *gang[8];
1867 	int i;
1868 
1869 	while (!list_empty(&fs_info->dead_roots)) {
1870 		gang[0] = list_entry(fs_info->dead_roots.next,
1871 				     struct btrfs_root, root_list);
1872 		list_del(&gang[0]->root_list);
1873 
1874 		if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state))
1875 			btrfs_drop_and_free_fs_root(fs_info, gang[0]);
1876 		btrfs_put_root(gang[0]);
1877 	}
1878 
1879 	while (1) {
1880 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1881 					     (void **)gang, 0,
1882 					     ARRAY_SIZE(gang));
1883 		if (!ret)
1884 			break;
1885 		for (i = 0; i < ret; i++)
1886 			btrfs_drop_and_free_fs_root(fs_info, gang[i]);
1887 	}
1888 }
1889 
btrfs_init_scrub(struct btrfs_fs_info * fs_info)1890 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
1891 {
1892 	mutex_init(&fs_info->scrub_lock);
1893 	atomic_set(&fs_info->scrubs_running, 0);
1894 	atomic_set(&fs_info->scrub_pause_req, 0);
1895 	atomic_set(&fs_info->scrubs_paused, 0);
1896 	atomic_set(&fs_info->scrub_cancel_req, 0);
1897 	init_waitqueue_head(&fs_info->scrub_pause_wait);
1898 	refcount_set(&fs_info->scrub_workers_refcnt, 0);
1899 }
1900 
btrfs_init_balance(struct btrfs_fs_info * fs_info)1901 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
1902 {
1903 	spin_lock_init(&fs_info->balance_lock);
1904 	mutex_init(&fs_info->balance_mutex);
1905 	atomic_set(&fs_info->balance_pause_req, 0);
1906 	atomic_set(&fs_info->balance_cancel_req, 0);
1907 	fs_info->balance_ctl = NULL;
1908 	init_waitqueue_head(&fs_info->balance_wait_q);
1909 	atomic_set(&fs_info->reloc_cancel_req, 0);
1910 }
1911 
btrfs_init_btree_inode(struct super_block * sb)1912 static int btrfs_init_btree_inode(struct super_block *sb)
1913 {
1914 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1915 	unsigned long hash = btrfs_inode_hash(BTRFS_BTREE_INODE_OBJECTID,
1916 					      fs_info->tree_root);
1917 	struct inode *inode;
1918 
1919 	inode = new_inode(sb);
1920 	if (!inode)
1921 		return -ENOMEM;
1922 
1923 	btrfs_set_inode_number(BTRFS_I(inode), BTRFS_BTREE_INODE_OBJECTID);
1924 	set_nlink(inode, 1);
1925 	/*
1926 	 * we set the i_size on the btree inode to the max possible int.
1927 	 * the real end of the address space is determined by all of
1928 	 * the devices in the system
1929 	 */
1930 	inode->i_size = OFFSET_MAX;
1931 	inode->i_mapping->a_ops = &btree_aops;
1932 	mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
1933 
1934 	extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
1935 			    IO_TREE_BTREE_INODE_IO);
1936 	extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
1937 
1938 	BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
1939 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
1940 	__insert_inode_hash(inode, hash);
1941 	fs_info->btree_inode = inode;
1942 
1943 	return 0;
1944 }
1945 
btrfs_init_dev_replace_locks(struct btrfs_fs_info * fs_info)1946 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
1947 {
1948 	mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
1949 	init_rwsem(&fs_info->dev_replace.rwsem);
1950 	init_waitqueue_head(&fs_info->dev_replace.replace_wait);
1951 }
1952 
btrfs_init_qgroup(struct btrfs_fs_info * fs_info)1953 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
1954 {
1955 	spin_lock_init(&fs_info->qgroup_lock);
1956 	mutex_init(&fs_info->qgroup_ioctl_lock);
1957 	fs_info->qgroup_tree = RB_ROOT;
1958 	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
1959 	fs_info->qgroup_seq = 1;
1960 	fs_info->qgroup_ulist = NULL;
1961 	fs_info->qgroup_rescan_running = false;
1962 	fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT;
1963 	mutex_init(&fs_info->qgroup_rescan_lock);
1964 }
1965 
btrfs_init_workqueues(struct btrfs_fs_info * fs_info)1966 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
1967 {
1968 	u32 max_active = fs_info->thread_pool_size;
1969 	unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
1970 	unsigned int ordered_flags = WQ_MEM_RECLAIM | WQ_FREEZABLE;
1971 
1972 	fs_info->workers =
1973 		btrfs_alloc_workqueue(fs_info, "worker", flags, max_active, 16);
1974 
1975 	fs_info->delalloc_workers =
1976 		btrfs_alloc_workqueue(fs_info, "delalloc",
1977 				      flags, max_active, 2);
1978 
1979 	fs_info->flush_workers =
1980 		btrfs_alloc_workqueue(fs_info, "flush_delalloc",
1981 				      flags, max_active, 0);
1982 
1983 	fs_info->caching_workers =
1984 		btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
1985 
1986 	fs_info->fixup_workers =
1987 		btrfs_alloc_ordered_workqueue(fs_info, "fixup", ordered_flags);
1988 
1989 	fs_info->endio_workers =
1990 		alloc_workqueue("btrfs-endio", flags, max_active);
1991 	fs_info->endio_meta_workers =
1992 		alloc_workqueue("btrfs-endio-meta", flags, max_active);
1993 	fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active);
1994 	fs_info->endio_write_workers =
1995 		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
1996 				      max_active, 2);
1997 	fs_info->compressed_write_workers =
1998 		alloc_workqueue("btrfs-compressed-write", flags, max_active);
1999 	fs_info->endio_freespace_worker =
2000 		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2001 				      max_active, 0);
2002 	fs_info->delayed_workers =
2003 		btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2004 				      max_active, 0);
2005 	fs_info->qgroup_rescan_workers =
2006 		btrfs_alloc_ordered_workqueue(fs_info, "qgroup-rescan",
2007 					      ordered_flags);
2008 	fs_info->discard_ctl.discard_workers =
2009 		alloc_ordered_workqueue("btrfs_discard", WQ_FREEZABLE);
2010 
2011 	if (!(fs_info->workers &&
2012 	      fs_info->delalloc_workers && fs_info->flush_workers &&
2013 	      fs_info->endio_workers && fs_info->endio_meta_workers &&
2014 	      fs_info->compressed_write_workers &&
2015 	      fs_info->endio_write_workers &&
2016 	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2017 	      fs_info->caching_workers && fs_info->fixup_workers &&
2018 	      fs_info->delayed_workers && fs_info->qgroup_rescan_workers &&
2019 	      fs_info->discard_ctl.discard_workers)) {
2020 		return -ENOMEM;
2021 	}
2022 
2023 	return 0;
2024 }
2025 
btrfs_init_csum_hash(struct btrfs_fs_info * fs_info,u16 csum_type)2026 static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
2027 {
2028 	struct crypto_shash *csum_shash;
2029 	const char *csum_driver = btrfs_super_csum_driver(csum_type);
2030 
2031 	csum_shash = crypto_alloc_shash(csum_driver, 0, 0);
2032 
2033 	if (IS_ERR(csum_shash)) {
2034 		btrfs_err(fs_info, "error allocating %s hash for checksum",
2035 			  csum_driver);
2036 		return PTR_ERR(csum_shash);
2037 	}
2038 
2039 	fs_info->csum_shash = csum_shash;
2040 
2041 	/*
2042 	 * Check if the checksum implementation is a fast accelerated one.
2043 	 * As-is this is a bit of a hack and should be replaced once the csum
2044 	 * implementations provide that information themselves.
2045 	 */
2046 	switch (csum_type) {
2047 	case BTRFS_CSUM_TYPE_CRC32:
2048 		if (!strstr(crypto_shash_driver_name(csum_shash), "generic"))
2049 			set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
2050 		break;
2051 	case BTRFS_CSUM_TYPE_XXHASH:
2052 		set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
2053 		break;
2054 	default:
2055 		break;
2056 	}
2057 
2058 	btrfs_info(fs_info, "using %s (%s) checksum algorithm",
2059 			btrfs_super_csum_name(csum_type),
2060 			crypto_shash_driver_name(csum_shash));
2061 	return 0;
2062 }
2063 
btrfs_replay_log(struct btrfs_fs_info * fs_info,struct btrfs_fs_devices * fs_devices)2064 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2065 			    struct btrfs_fs_devices *fs_devices)
2066 {
2067 	int ret;
2068 	struct btrfs_tree_parent_check check = { 0 };
2069 	struct btrfs_root *log_tree_root;
2070 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2071 	u64 bytenr = btrfs_super_log_root(disk_super);
2072 	int level = btrfs_super_log_root_level(disk_super);
2073 
2074 	if (fs_devices->rw_devices == 0) {
2075 		btrfs_warn(fs_info, "log replay required on RO media");
2076 		return -EIO;
2077 	}
2078 
2079 	log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID,
2080 					 GFP_KERNEL);
2081 	if (!log_tree_root)
2082 		return -ENOMEM;
2083 
2084 	check.level = level;
2085 	check.transid = fs_info->generation + 1;
2086 	check.owner_root = BTRFS_TREE_LOG_OBJECTID;
2087 	log_tree_root->node = read_tree_block(fs_info, bytenr, &check);
2088 	if (IS_ERR(log_tree_root->node)) {
2089 		btrfs_warn(fs_info, "failed to read log tree");
2090 		ret = PTR_ERR(log_tree_root->node);
2091 		log_tree_root->node = NULL;
2092 		btrfs_put_root(log_tree_root);
2093 		return ret;
2094 	}
2095 	if (!extent_buffer_uptodate(log_tree_root->node)) {
2096 		btrfs_err(fs_info, "failed to read log tree");
2097 		btrfs_put_root(log_tree_root);
2098 		return -EIO;
2099 	}
2100 
2101 	/* returns with log_tree_root freed on success */
2102 	ret = btrfs_recover_log_trees(log_tree_root);
2103 	if (ret) {
2104 		btrfs_handle_fs_error(fs_info, ret,
2105 				      "Failed to recover log tree");
2106 		btrfs_put_root(log_tree_root);
2107 		return ret;
2108 	}
2109 
2110 	if (sb_rdonly(fs_info->sb)) {
2111 		ret = btrfs_commit_super(fs_info);
2112 		if (ret)
2113 			return ret;
2114 	}
2115 
2116 	return 0;
2117 }
2118 
load_global_roots_objectid(struct btrfs_root * tree_root,struct btrfs_path * path,u64 objectid,const char * name)2119 static int load_global_roots_objectid(struct btrfs_root *tree_root,
2120 				      struct btrfs_path *path, u64 objectid,
2121 				      const char *name)
2122 {
2123 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
2124 	struct btrfs_root *root;
2125 	u64 max_global_id = 0;
2126 	int ret;
2127 	struct btrfs_key key = {
2128 		.objectid = objectid,
2129 		.type = BTRFS_ROOT_ITEM_KEY,
2130 		.offset = 0,
2131 	};
2132 	bool found = false;
2133 
2134 	/* If we have IGNOREDATACSUMS skip loading these roots. */
2135 	if (objectid == BTRFS_CSUM_TREE_OBJECTID &&
2136 	    btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
2137 		set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state);
2138 		return 0;
2139 	}
2140 
2141 	while (1) {
2142 		ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
2143 		if (ret < 0)
2144 			break;
2145 
2146 		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2147 			ret = btrfs_next_leaf(tree_root, path);
2148 			if (ret) {
2149 				if (ret > 0)
2150 					ret = 0;
2151 				break;
2152 			}
2153 		}
2154 		ret = 0;
2155 
2156 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2157 		if (key.objectid != objectid)
2158 			break;
2159 		btrfs_release_path(path);
2160 
2161 		/*
2162 		 * Just worry about this for extent tree, it'll be the same for
2163 		 * everybody.
2164 		 */
2165 		if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2166 			max_global_id = max(max_global_id, key.offset);
2167 
2168 		found = true;
2169 		root = read_tree_root_path(tree_root, path, &key);
2170 		if (IS_ERR(root)) {
2171 			ret = PTR_ERR(root);
2172 			break;
2173 		}
2174 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2175 		ret = btrfs_global_root_insert(root);
2176 		if (ret) {
2177 			btrfs_put_root(root);
2178 			break;
2179 		}
2180 		key.offset++;
2181 	}
2182 	btrfs_release_path(path);
2183 
2184 	if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2185 		fs_info->nr_global_roots = max_global_id + 1;
2186 
2187 	if (!found || ret) {
2188 		if (objectid == BTRFS_CSUM_TREE_OBJECTID)
2189 			set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state);
2190 
2191 		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
2192 			ret = ret ? ret : -ENOENT;
2193 		else
2194 			ret = 0;
2195 		btrfs_err(fs_info, "failed to load root %s", name);
2196 	}
2197 	return ret;
2198 }
2199 
load_global_roots(struct btrfs_root * tree_root)2200 static int load_global_roots(struct btrfs_root *tree_root)
2201 {
2202 	struct btrfs_path *path;
2203 	int ret = 0;
2204 
2205 	path = btrfs_alloc_path();
2206 	if (!path)
2207 		return -ENOMEM;
2208 
2209 	ret = load_global_roots_objectid(tree_root, path,
2210 					 BTRFS_EXTENT_TREE_OBJECTID, "extent");
2211 	if (ret)
2212 		goto out;
2213 	ret = load_global_roots_objectid(tree_root, path,
2214 					 BTRFS_CSUM_TREE_OBJECTID, "csum");
2215 	if (ret)
2216 		goto out;
2217 	if (!btrfs_fs_compat_ro(tree_root->fs_info, FREE_SPACE_TREE))
2218 		goto out;
2219 	ret = load_global_roots_objectid(tree_root, path,
2220 					 BTRFS_FREE_SPACE_TREE_OBJECTID,
2221 					 "free space");
2222 out:
2223 	btrfs_free_path(path);
2224 	return ret;
2225 }
2226 
btrfs_read_roots(struct btrfs_fs_info * fs_info)2227 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2228 {
2229 	struct btrfs_root *tree_root = fs_info->tree_root;
2230 	struct btrfs_root *root;
2231 	struct btrfs_key location;
2232 	int ret;
2233 
2234 	ASSERT(fs_info->tree_root);
2235 
2236 	ret = load_global_roots(tree_root);
2237 	if (ret)
2238 		return ret;
2239 
2240 	location.type = BTRFS_ROOT_ITEM_KEY;
2241 	location.offset = 0;
2242 
2243 	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
2244 		location.objectid = BTRFS_BLOCK_GROUP_TREE_OBJECTID;
2245 		root = btrfs_read_tree_root(tree_root, &location);
2246 		if (IS_ERR(root)) {
2247 			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2248 				ret = PTR_ERR(root);
2249 				goto out;
2250 			}
2251 		} else {
2252 			set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2253 			fs_info->block_group_root = root;
2254 		}
2255 	}
2256 
2257 	location.objectid = BTRFS_DEV_TREE_OBJECTID;
2258 	root = btrfs_read_tree_root(tree_root, &location);
2259 	if (IS_ERR(root)) {
2260 		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2261 			ret = PTR_ERR(root);
2262 			goto out;
2263 		}
2264 	} else {
2265 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2266 		fs_info->dev_root = root;
2267 	}
2268 	/* Initialize fs_info for all devices in any case */
2269 	ret = btrfs_init_devices_late(fs_info);
2270 	if (ret)
2271 		goto out;
2272 
2273 	/*
2274 	 * This tree can share blocks with some other fs tree during relocation
2275 	 * and we need a proper setup by btrfs_get_fs_root
2276 	 */
2277 	root = btrfs_get_fs_root(tree_root->fs_info,
2278 				 BTRFS_DATA_RELOC_TREE_OBJECTID, true);
2279 	if (IS_ERR(root)) {
2280 		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2281 			ret = PTR_ERR(root);
2282 			goto out;
2283 		}
2284 	} else {
2285 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2286 		fs_info->data_reloc_root = root;
2287 	}
2288 
2289 	location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2290 	root = btrfs_read_tree_root(tree_root, &location);
2291 	if (!IS_ERR(root)) {
2292 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2293 		fs_info->quota_root = root;
2294 	}
2295 
2296 	location.objectid = BTRFS_UUID_TREE_OBJECTID;
2297 	root = btrfs_read_tree_root(tree_root, &location);
2298 	if (IS_ERR(root)) {
2299 		if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2300 			ret = PTR_ERR(root);
2301 			if (ret != -ENOENT)
2302 				goto out;
2303 		}
2304 	} else {
2305 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2306 		fs_info->uuid_root = root;
2307 	}
2308 
2309 	if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
2310 		location.objectid = BTRFS_RAID_STRIPE_TREE_OBJECTID;
2311 		root = btrfs_read_tree_root(tree_root, &location);
2312 		if (IS_ERR(root)) {
2313 			if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2314 				ret = PTR_ERR(root);
2315 				goto out;
2316 			}
2317 		} else {
2318 			set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2319 			fs_info->stripe_root = root;
2320 		}
2321 	}
2322 
2323 	return 0;
2324 out:
2325 	btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2326 		   location.objectid, ret);
2327 	return ret;
2328 }
2329 
2330 /*
2331  * Real super block validation
2332  * NOTE: super csum type and incompat features will not be checked here.
2333  *
2334  * @sb:		super block to check
2335  * @mirror_num:	the super block number to check its bytenr:
2336  * 		0	the primary (1st) sb
2337  * 		1, 2	2nd and 3rd backup copy
2338  * 	       -1	skip bytenr check
2339  */
btrfs_validate_super(const struct btrfs_fs_info * fs_info,const struct btrfs_super_block * sb,int mirror_num)2340 int btrfs_validate_super(const struct btrfs_fs_info *fs_info,
2341 			 const struct btrfs_super_block *sb, int mirror_num)
2342 {
2343 	u64 nodesize = btrfs_super_nodesize(sb);
2344 	u64 sectorsize = btrfs_super_sectorsize(sb);
2345 	int ret = 0;
2346 	const bool ignore_flags = btrfs_test_opt(fs_info, IGNORESUPERFLAGS);
2347 
2348 	if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2349 		btrfs_err(fs_info, "no valid FS found");
2350 		ret = -EINVAL;
2351 	}
2352 	if ((btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)) {
2353 		if (!ignore_flags) {
2354 			btrfs_err(fs_info,
2355 			"unrecognized or unsupported super flag 0x%llx",
2356 				  btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2357 			ret = -EINVAL;
2358 		} else {
2359 			btrfs_info(fs_info,
2360 			"unrecognized or unsupported super flags: 0x%llx, ignored",
2361 				   btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2362 		}
2363 	}
2364 	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2365 		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2366 				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2367 		ret = -EINVAL;
2368 	}
2369 	if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2370 		btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2371 				btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2372 		ret = -EINVAL;
2373 	}
2374 	if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2375 		btrfs_err(fs_info, "log_root level too big: %d >= %d",
2376 				btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2377 		ret = -EINVAL;
2378 	}
2379 
2380 	/*
2381 	 * Check sectorsize and nodesize first, other check will need it.
2382 	 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2383 	 */
2384 	if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
2385 	    sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2386 		btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2387 		ret = -EINVAL;
2388 	}
2389 
2390 	/*
2391 	 * We only support at most two sectorsizes: 4K and PAGE_SIZE.
2392 	 *
2393 	 * We can support 16K sectorsize with 64K page size without problem,
2394 	 * but such sectorsize/pagesize combination doesn't make much sense.
2395 	 * 4K will be our future standard, PAGE_SIZE is supported from the very
2396 	 * beginning.
2397 	 */
2398 	if (sectorsize > PAGE_SIZE || (sectorsize != SZ_4K && sectorsize != PAGE_SIZE)) {
2399 		btrfs_err(fs_info,
2400 			"sectorsize %llu not yet supported for page size %lu",
2401 			sectorsize, PAGE_SIZE);
2402 		ret = -EINVAL;
2403 	}
2404 
2405 	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2406 	    nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2407 		btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2408 		ret = -EINVAL;
2409 	}
2410 	if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2411 		btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2412 			  le32_to_cpu(sb->__unused_leafsize), nodesize);
2413 		ret = -EINVAL;
2414 	}
2415 
2416 	/* Root alignment check */
2417 	if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2418 		btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2419 			   btrfs_super_root(sb));
2420 		ret = -EINVAL;
2421 	}
2422 	if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2423 		btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2424 			   btrfs_super_chunk_root(sb));
2425 		ret = -EINVAL;
2426 	}
2427 	if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2428 		btrfs_warn(fs_info, "log_root block unaligned: %llu",
2429 			   btrfs_super_log_root(sb));
2430 		ret = -EINVAL;
2431 	}
2432 
2433 	if (!fs_info->fs_devices->temp_fsid &&
2434 	    memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
2435 		btrfs_err(fs_info,
2436 		"superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
2437 			  sb->fsid, fs_info->fs_devices->fsid);
2438 		ret = -EINVAL;
2439 	}
2440 
2441 	if (memcmp(fs_info->fs_devices->metadata_uuid, btrfs_sb_fsid_ptr(sb),
2442 		   BTRFS_FSID_SIZE) != 0) {
2443 		btrfs_err(fs_info,
2444 "superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
2445 			  btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid);
2446 		ret = -EINVAL;
2447 	}
2448 
2449 	if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
2450 		   BTRFS_FSID_SIZE) != 0) {
2451 		btrfs_err(fs_info,
2452 			"dev_item UUID does not match metadata fsid: %pU != %pU",
2453 			fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
2454 		ret = -EINVAL;
2455 	}
2456 
2457 	/*
2458 	 * Artificial requirement for block-group-tree to force newer features
2459 	 * (free-space-tree, no-holes) so the test matrix is smaller.
2460 	 */
2461 	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
2462 	    (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID) ||
2463 	     !btrfs_fs_incompat(fs_info, NO_HOLES))) {
2464 		btrfs_err(fs_info,
2465 		"block-group-tree feature requires free-space-tree and no-holes");
2466 		ret = -EINVAL;
2467 	}
2468 
2469 	/*
2470 	 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2471 	 * done later
2472 	 */
2473 	if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2474 		btrfs_err(fs_info, "bytes_used is too small %llu",
2475 			  btrfs_super_bytes_used(sb));
2476 		ret = -EINVAL;
2477 	}
2478 	if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2479 		btrfs_err(fs_info, "invalid stripesize %u",
2480 			  btrfs_super_stripesize(sb));
2481 		ret = -EINVAL;
2482 	}
2483 	if (btrfs_super_num_devices(sb) > (1UL << 31))
2484 		btrfs_warn(fs_info, "suspicious number of devices: %llu",
2485 			   btrfs_super_num_devices(sb));
2486 	if (btrfs_super_num_devices(sb) == 0) {
2487 		btrfs_err(fs_info, "number of devices is 0");
2488 		ret = -EINVAL;
2489 	}
2490 
2491 	if (mirror_num >= 0 &&
2492 	    btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2493 		btrfs_err(fs_info, "super offset mismatch %llu != %u",
2494 			  btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2495 		ret = -EINVAL;
2496 	}
2497 
2498 	/*
2499 	 * Obvious sys_chunk_array corruptions, it must hold at least one key
2500 	 * and one chunk
2501 	 */
2502 	if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2503 		btrfs_err(fs_info, "system chunk array too big %u > %u",
2504 			  btrfs_super_sys_array_size(sb),
2505 			  BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2506 		ret = -EINVAL;
2507 	}
2508 	if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2509 			+ sizeof(struct btrfs_chunk)) {
2510 		btrfs_err(fs_info, "system chunk array too small %u < %zu",
2511 			  btrfs_super_sys_array_size(sb),
2512 			  sizeof(struct btrfs_disk_key)
2513 			  + sizeof(struct btrfs_chunk));
2514 		ret = -EINVAL;
2515 	}
2516 
2517 	/*
2518 	 * The generation is a global counter, we'll trust it more than the others
2519 	 * but it's still possible that it's the one that's wrong.
2520 	 */
2521 	if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2522 		btrfs_warn(fs_info,
2523 			"suspicious: generation < chunk_root_generation: %llu < %llu",
2524 			btrfs_super_generation(sb),
2525 			btrfs_super_chunk_root_generation(sb));
2526 	if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2527 	    && btrfs_super_cache_generation(sb) != (u64)-1)
2528 		btrfs_warn(fs_info,
2529 			"suspicious: generation < cache_generation: %llu < %llu",
2530 			btrfs_super_generation(sb),
2531 			btrfs_super_cache_generation(sb));
2532 
2533 	return ret;
2534 }
2535 
2536 /*
2537  * Validation of super block at mount time.
2538  * Some checks already done early at mount time, like csum type and incompat
2539  * flags will be skipped.
2540  */
btrfs_validate_mount_super(struct btrfs_fs_info * fs_info)2541 static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2542 {
2543 	return btrfs_validate_super(fs_info, fs_info->super_copy, 0);
2544 }
2545 
2546 /*
2547  * Validation of super block at write time.
2548  * Some checks like bytenr check will be skipped as their values will be
2549  * overwritten soon.
2550  * Extra checks like csum type and incompat flags will be done here.
2551  */
btrfs_validate_write_super(struct btrfs_fs_info * fs_info,struct btrfs_super_block * sb)2552 static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2553 				      struct btrfs_super_block *sb)
2554 {
2555 	int ret;
2556 
2557 	ret = btrfs_validate_super(fs_info, sb, -1);
2558 	if (ret < 0)
2559 		goto out;
2560 	if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) {
2561 		ret = -EUCLEAN;
2562 		btrfs_err(fs_info, "invalid csum type, has %u want %u",
2563 			  btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2564 		goto out;
2565 	}
2566 	if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2567 		ret = -EUCLEAN;
2568 		btrfs_err(fs_info,
2569 		"invalid incompat flags, has 0x%llx valid mask 0x%llx",
2570 			  btrfs_super_incompat_flags(sb),
2571 			  (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2572 		goto out;
2573 	}
2574 out:
2575 	if (ret < 0)
2576 		btrfs_err(fs_info,
2577 		"super block corruption detected before writing it to disk");
2578 	return ret;
2579 }
2580 
load_super_root(struct btrfs_root * root,u64 bytenr,u64 gen,int level)2581 static int load_super_root(struct btrfs_root *root, u64 bytenr, u64 gen, int level)
2582 {
2583 	struct btrfs_tree_parent_check check = {
2584 		.level = level,
2585 		.transid = gen,
2586 		.owner_root = btrfs_root_id(root)
2587 	};
2588 	int ret = 0;
2589 
2590 	root->node = read_tree_block(root->fs_info, bytenr, &check);
2591 	if (IS_ERR(root->node)) {
2592 		ret = PTR_ERR(root->node);
2593 		root->node = NULL;
2594 		return ret;
2595 	}
2596 	if (!extent_buffer_uptodate(root->node)) {
2597 		free_extent_buffer(root->node);
2598 		root->node = NULL;
2599 		return -EIO;
2600 	}
2601 
2602 	btrfs_set_root_node(&root->root_item, root->node);
2603 	root->commit_root = btrfs_root_node(root);
2604 	btrfs_set_root_refs(&root->root_item, 1);
2605 	return ret;
2606 }
2607 
load_important_roots(struct btrfs_fs_info * fs_info)2608 static int load_important_roots(struct btrfs_fs_info *fs_info)
2609 {
2610 	struct btrfs_super_block *sb = fs_info->super_copy;
2611 	u64 gen, bytenr;
2612 	int level, ret;
2613 
2614 	bytenr = btrfs_super_root(sb);
2615 	gen = btrfs_super_generation(sb);
2616 	level = btrfs_super_root_level(sb);
2617 	ret = load_super_root(fs_info->tree_root, bytenr, gen, level);
2618 	if (ret) {
2619 		btrfs_warn(fs_info, "couldn't read tree root");
2620 		return ret;
2621 	}
2622 	return 0;
2623 }
2624 
init_tree_roots(struct btrfs_fs_info * fs_info)2625 static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
2626 {
2627 	int backup_index = find_newest_super_backup(fs_info);
2628 	struct btrfs_super_block *sb = fs_info->super_copy;
2629 	struct btrfs_root *tree_root = fs_info->tree_root;
2630 	bool handle_error = false;
2631 	int ret = 0;
2632 	int i;
2633 
2634 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
2635 		if (handle_error) {
2636 			if (!IS_ERR(tree_root->node))
2637 				free_extent_buffer(tree_root->node);
2638 			tree_root->node = NULL;
2639 
2640 			if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
2641 				break;
2642 
2643 			free_root_pointers(fs_info, 0);
2644 
2645 			/*
2646 			 * Don't use the log in recovery mode, it won't be
2647 			 * valid
2648 			 */
2649 			btrfs_set_super_log_root(sb, 0);
2650 
2651 			btrfs_warn(fs_info, "try to load backup roots slot %d", i);
2652 			ret = read_backup_root(fs_info, i);
2653 			backup_index = ret;
2654 			if (ret < 0)
2655 				return ret;
2656 		}
2657 
2658 		ret = load_important_roots(fs_info);
2659 		if (ret) {
2660 			handle_error = true;
2661 			continue;
2662 		}
2663 
2664 		/*
2665 		 * No need to hold btrfs_root::objectid_mutex since the fs
2666 		 * hasn't been fully initialised and we are the only user
2667 		 */
2668 		ret = btrfs_init_root_free_objectid(tree_root);
2669 		if (ret < 0) {
2670 			handle_error = true;
2671 			continue;
2672 		}
2673 
2674 		ASSERT(tree_root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
2675 
2676 		ret = btrfs_read_roots(fs_info);
2677 		if (ret < 0) {
2678 			handle_error = true;
2679 			continue;
2680 		}
2681 
2682 		/* All successful */
2683 		fs_info->generation = btrfs_header_generation(tree_root->node);
2684 		btrfs_set_last_trans_committed(fs_info, fs_info->generation);
2685 		fs_info->last_reloc_trans = 0;
2686 
2687 		/* Always begin writing backup roots after the one being used */
2688 		if (backup_index < 0) {
2689 			fs_info->backup_root_index = 0;
2690 		} else {
2691 			fs_info->backup_root_index = backup_index + 1;
2692 			fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS;
2693 		}
2694 		break;
2695 	}
2696 
2697 	return ret;
2698 }
2699 
btrfs_init_fs_info(struct btrfs_fs_info * fs_info)2700 void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
2701 {
2702 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2703 	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2704 	INIT_LIST_HEAD(&fs_info->trans_list);
2705 	INIT_LIST_HEAD(&fs_info->dead_roots);
2706 	INIT_LIST_HEAD(&fs_info->delayed_iputs);
2707 	INIT_LIST_HEAD(&fs_info->delalloc_roots);
2708 	INIT_LIST_HEAD(&fs_info->caching_block_groups);
2709 	spin_lock_init(&fs_info->delalloc_root_lock);
2710 	spin_lock_init(&fs_info->trans_lock);
2711 	spin_lock_init(&fs_info->fs_roots_radix_lock);
2712 	spin_lock_init(&fs_info->delayed_iput_lock);
2713 	spin_lock_init(&fs_info->defrag_inodes_lock);
2714 	spin_lock_init(&fs_info->super_lock);
2715 	spin_lock_init(&fs_info->buffer_lock);
2716 	spin_lock_init(&fs_info->unused_bgs_lock);
2717 	spin_lock_init(&fs_info->treelog_bg_lock);
2718 	spin_lock_init(&fs_info->zone_active_bgs_lock);
2719 	spin_lock_init(&fs_info->relocation_bg_lock);
2720 	rwlock_init(&fs_info->tree_mod_log_lock);
2721 	rwlock_init(&fs_info->global_root_lock);
2722 	mutex_init(&fs_info->unused_bg_unpin_mutex);
2723 	mutex_init(&fs_info->reclaim_bgs_lock);
2724 	mutex_init(&fs_info->reloc_mutex);
2725 	mutex_init(&fs_info->delalloc_root_mutex);
2726 	mutex_init(&fs_info->zoned_meta_io_lock);
2727 	mutex_init(&fs_info->zoned_data_reloc_io_lock);
2728 	seqlock_init(&fs_info->profiles_lock);
2729 
2730 	btrfs_lockdep_init_map(fs_info, btrfs_trans_num_writers);
2731 	btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters);
2732 	btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered);
2733 	btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent);
2734 	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_prep,
2735 				     BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2736 	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked,
2737 				     BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2738 	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_super_committed,
2739 				     BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2740 	btrfs_state_lockdep_init_map(fs_info, btrfs_trans_completed,
2741 				     BTRFS_LOCKDEP_TRANS_COMPLETED);
2742 
2743 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2744 	INIT_LIST_HEAD(&fs_info->space_info);
2745 	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2746 	INIT_LIST_HEAD(&fs_info->unused_bgs);
2747 	INIT_LIST_HEAD(&fs_info->reclaim_bgs);
2748 	INIT_LIST_HEAD(&fs_info->zone_active_bgs);
2749 #ifdef CONFIG_BTRFS_DEBUG
2750 	INIT_LIST_HEAD(&fs_info->allocated_roots);
2751 	INIT_LIST_HEAD(&fs_info->allocated_ebs);
2752 	spin_lock_init(&fs_info->eb_leak_lock);
2753 #endif
2754 	fs_info->mapping_tree = RB_ROOT_CACHED;
2755 	rwlock_init(&fs_info->mapping_tree_lock);
2756 	btrfs_init_block_rsv(&fs_info->global_block_rsv,
2757 			     BTRFS_BLOCK_RSV_GLOBAL);
2758 	btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2759 	btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2760 	btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2761 	btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2762 			     BTRFS_BLOCK_RSV_DELOPS);
2763 	btrfs_init_block_rsv(&fs_info->delayed_refs_rsv,
2764 			     BTRFS_BLOCK_RSV_DELREFS);
2765 
2766 	atomic_set(&fs_info->async_delalloc_pages, 0);
2767 	atomic_set(&fs_info->defrag_running, 0);
2768 	atomic_set(&fs_info->nr_delayed_iputs, 0);
2769 	atomic64_set(&fs_info->tree_mod_seq, 0);
2770 	fs_info->global_root_tree = RB_ROOT;
2771 	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2772 	fs_info->metadata_ratio = 0;
2773 	fs_info->defrag_inodes = RB_ROOT;
2774 	atomic64_set(&fs_info->free_chunk_space, 0);
2775 	fs_info->tree_mod_log = RB_ROOT;
2776 	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2777 	btrfs_init_ref_verify(fs_info);
2778 
2779 	fs_info->thread_pool_size = min_t(unsigned long,
2780 					  num_online_cpus() + 2, 8);
2781 
2782 	INIT_LIST_HEAD(&fs_info->ordered_roots);
2783 	spin_lock_init(&fs_info->ordered_root_lock);
2784 
2785 	btrfs_init_scrub(fs_info);
2786 	btrfs_init_balance(fs_info);
2787 	btrfs_init_async_reclaim_work(fs_info);
2788 	btrfs_init_extent_map_shrinker_work(fs_info);
2789 
2790 	rwlock_init(&fs_info->block_group_cache_lock);
2791 	fs_info->block_group_cache_tree = RB_ROOT_CACHED;
2792 
2793 	extent_io_tree_init(fs_info, &fs_info->excluded_extents,
2794 			    IO_TREE_FS_EXCLUDED_EXTENTS);
2795 
2796 	mutex_init(&fs_info->ordered_operations_mutex);
2797 	mutex_init(&fs_info->tree_log_mutex);
2798 	mutex_init(&fs_info->chunk_mutex);
2799 	mutex_init(&fs_info->transaction_kthread_mutex);
2800 	mutex_init(&fs_info->cleaner_mutex);
2801 	mutex_init(&fs_info->ro_block_group_mutex);
2802 	init_rwsem(&fs_info->commit_root_sem);
2803 	init_rwsem(&fs_info->cleanup_work_sem);
2804 	init_rwsem(&fs_info->subvol_sem);
2805 	sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2806 
2807 	btrfs_init_dev_replace_locks(fs_info);
2808 	btrfs_init_qgroup(fs_info);
2809 	btrfs_discard_init(fs_info);
2810 
2811 	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2812 	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2813 
2814 	init_waitqueue_head(&fs_info->transaction_throttle);
2815 	init_waitqueue_head(&fs_info->transaction_wait);
2816 	init_waitqueue_head(&fs_info->transaction_blocked_wait);
2817 	init_waitqueue_head(&fs_info->async_submit_wait);
2818 	init_waitqueue_head(&fs_info->delayed_iputs_wait);
2819 
2820 	/* Usable values until the real ones are cached from the superblock */
2821 	fs_info->nodesize = 4096;
2822 	fs_info->sectorsize = 4096;
2823 	fs_info->sectorsize_bits = ilog2(4096);
2824 	fs_info->stripesize = 4096;
2825 
2826 	/* Default compress algorithm when user does -o compress */
2827 	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2828 
2829 	fs_info->max_extent_size = BTRFS_MAX_EXTENT_SIZE;
2830 
2831 	spin_lock_init(&fs_info->swapfile_pins_lock);
2832 	fs_info->swapfile_pins = RB_ROOT;
2833 
2834 	fs_info->bg_reclaim_threshold = BTRFS_DEFAULT_RECLAIM_THRESH;
2835 	INIT_WORK(&fs_info->reclaim_bgs_work, btrfs_reclaim_bgs_work);
2836 }
2837 
init_mount_fs_info(struct btrfs_fs_info * fs_info,struct super_block * sb)2838 static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb)
2839 {
2840 	int ret;
2841 
2842 	fs_info->sb = sb;
2843 	/* Temporary fixed values for block size until we read the superblock. */
2844 	sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2845 	sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2846 
2847 	ret = percpu_counter_init(&fs_info->ordered_bytes, 0, GFP_KERNEL);
2848 	if (ret)
2849 		return ret;
2850 
2851 	ret = percpu_counter_init(&fs_info->evictable_extent_maps, 0, GFP_KERNEL);
2852 	if (ret)
2853 		return ret;
2854 
2855 	spin_lock_init(&fs_info->extent_map_shrinker_lock);
2856 
2857 	ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2858 	if (ret)
2859 		return ret;
2860 
2861 	fs_info->dirty_metadata_batch = PAGE_SIZE *
2862 					(1 + ilog2(nr_cpu_ids));
2863 
2864 	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2865 	if (ret)
2866 		return ret;
2867 
2868 	ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2869 			GFP_KERNEL);
2870 	if (ret)
2871 		return ret;
2872 
2873 	fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2874 					GFP_KERNEL);
2875 	if (!fs_info->delayed_root)
2876 		return -ENOMEM;
2877 	btrfs_init_delayed_root(fs_info->delayed_root);
2878 
2879 	if (sb_rdonly(sb))
2880 		set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
2881 	if (btrfs_test_opt(fs_info, IGNOREMETACSUMS))
2882 		set_bit(BTRFS_FS_STATE_SKIP_META_CSUMS, &fs_info->fs_state);
2883 
2884 	return btrfs_alloc_stripe_hash_table(fs_info);
2885 }
2886 
btrfs_uuid_rescan_kthread(void * data)2887 static int btrfs_uuid_rescan_kthread(void *data)
2888 {
2889 	struct btrfs_fs_info *fs_info = data;
2890 	int ret;
2891 
2892 	/*
2893 	 * 1st step is to iterate through the existing UUID tree and
2894 	 * to delete all entries that contain outdated data.
2895 	 * 2nd step is to add all missing entries to the UUID tree.
2896 	 */
2897 	ret = btrfs_uuid_tree_iterate(fs_info);
2898 	if (ret < 0) {
2899 		if (ret != -EINTR)
2900 			btrfs_warn(fs_info, "iterating uuid_tree failed %d",
2901 				   ret);
2902 		up(&fs_info->uuid_tree_rescan_sem);
2903 		return ret;
2904 	}
2905 	return btrfs_uuid_scan_kthread(data);
2906 }
2907 
btrfs_check_uuid_tree(struct btrfs_fs_info * fs_info)2908 static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
2909 {
2910 	struct task_struct *task;
2911 
2912 	down(&fs_info->uuid_tree_rescan_sem);
2913 	task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
2914 	if (IS_ERR(task)) {
2915 		/* fs_info->update_uuid_tree_gen remains 0 in all error case */
2916 		btrfs_warn(fs_info, "failed to start uuid_rescan task");
2917 		up(&fs_info->uuid_tree_rescan_sem);
2918 		return PTR_ERR(task);
2919 	}
2920 
2921 	return 0;
2922 }
2923 
btrfs_cleanup_fs_roots(struct btrfs_fs_info * fs_info)2924 static int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2925 {
2926 	u64 root_objectid = 0;
2927 	struct btrfs_root *gang[8];
2928 	int ret = 0;
2929 
2930 	while (1) {
2931 		unsigned int found;
2932 
2933 		spin_lock(&fs_info->fs_roots_radix_lock);
2934 		found = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2935 					     (void **)gang, root_objectid,
2936 					     ARRAY_SIZE(gang));
2937 		if (!found) {
2938 			spin_unlock(&fs_info->fs_roots_radix_lock);
2939 			break;
2940 		}
2941 		root_objectid = btrfs_root_id(gang[found - 1]) + 1;
2942 
2943 		for (int i = 0; i < found; i++) {
2944 			/* Avoid to grab roots in dead_roots. */
2945 			if (btrfs_root_refs(&gang[i]->root_item) == 0) {
2946 				gang[i] = NULL;
2947 				continue;
2948 			}
2949 			/* Grab all the search result for later use. */
2950 			gang[i] = btrfs_grab_root(gang[i]);
2951 		}
2952 		spin_unlock(&fs_info->fs_roots_radix_lock);
2953 
2954 		for (int i = 0; i < found; i++) {
2955 			if (!gang[i])
2956 				continue;
2957 			root_objectid = btrfs_root_id(gang[i]);
2958 			/*
2959 			 * Continue to release the remaining roots after the first
2960 			 * error without cleanup and preserve the first error
2961 			 * for the return.
2962 			 */
2963 			if (!ret)
2964 				ret = btrfs_orphan_cleanup(gang[i]);
2965 			btrfs_put_root(gang[i]);
2966 		}
2967 		if (ret)
2968 			break;
2969 
2970 		root_objectid++;
2971 	}
2972 	return ret;
2973 }
2974 
2975 /*
2976  * Mounting logic specific to read-write file systems. Shared by open_ctree
2977  * and btrfs_remount when remounting from read-only to read-write.
2978  */
btrfs_start_pre_rw_mount(struct btrfs_fs_info * fs_info)2979 int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
2980 {
2981 	int ret;
2982 	const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
2983 	bool rebuild_free_space_tree = false;
2984 
2985 	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
2986 	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2987 		if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
2988 			btrfs_warn(fs_info,
2989 				   "'clear_cache' option is ignored with extent tree v2");
2990 		else
2991 			rebuild_free_space_tree = true;
2992 	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
2993 		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
2994 		btrfs_warn(fs_info, "free space tree is invalid");
2995 		rebuild_free_space_tree = true;
2996 	}
2997 
2998 	if (rebuild_free_space_tree) {
2999 		btrfs_info(fs_info, "rebuilding free space tree");
3000 		ret = btrfs_rebuild_free_space_tree(fs_info);
3001 		if (ret) {
3002 			btrfs_warn(fs_info,
3003 				   "failed to rebuild free space tree: %d", ret);
3004 			goto out;
3005 		}
3006 	}
3007 
3008 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3009 	    !btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
3010 		btrfs_info(fs_info, "disabling free space tree");
3011 		ret = btrfs_delete_free_space_tree(fs_info);
3012 		if (ret) {
3013 			btrfs_warn(fs_info,
3014 				   "failed to disable free space tree: %d", ret);
3015 			goto out;
3016 		}
3017 	}
3018 
3019 	/*
3020 	 * btrfs_find_orphan_roots() is responsible for finding all the dead
3021 	 * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load
3022 	 * them into the fs_info->fs_roots_radix tree. This must be done before
3023 	 * calling btrfs_orphan_cleanup() on the tree root. If we don't do it
3024 	 * first, then btrfs_orphan_cleanup() will delete a dead root's orphan
3025 	 * item before the root's tree is deleted - this means that if we unmount
3026 	 * or crash before the deletion completes, on the next mount we will not
3027 	 * delete what remains of the tree because the orphan item does not
3028 	 * exists anymore, which is what tells us we have a pending deletion.
3029 	 */
3030 	ret = btrfs_find_orphan_roots(fs_info);
3031 	if (ret)
3032 		goto out;
3033 
3034 	ret = btrfs_cleanup_fs_roots(fs_info);
3035 	if (ret)
3036 		goto out;
3037 
3038 	down_read(&fs_info->cleanup_work_sem);
3039 	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3040 	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3041 		up_read(&fs_info->cleanup_work_sem);
3042 		goto out;
3043 	}
3044 	up_read(&fs_info->cleanup_work_sem);
3045 
3046 	mutex_lock(&fs_info->cleaner_mutex);
3047 	ret = btrfs_recover_relocation(fs_info);
3048 	mutex_unlock(&fs_info->cleaner_mutex);
3049 	if (ret < 0) {
3050 		btrfs_warn(fs_info, "failed to recover relocation: %d", ret);
3051 		goto out;
3052 	}
3053 
3054 	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3055 	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3056 		btrfs_info(fs_info, "creating free space tree");
3057 		ret = btrfs_create_free_space_tree(fs_info);
3058 		if (ret) {
3059 			btrfs_warn(fs_info,
3060 				"failed to create free space tree: %d", ret);
3061 			goto out;
3062 		}
3063 	}
3064 
3065 	if (cache_opt != btrfs_free_space_cache_v1_active(fs_info)) {
3066 		ret = btrfs_set_free_space_cache_v1_active(fs_info, cache_opt);
3067 		if (ret)
3068 			goto out;
3069 	}
3070 
3071 	ret = btrfs_resume_balance_async(fs_info);
3072 	if (ret)
3073 		goto out;
3074 
3075 	ret = btrfs_resume_dev_replace_async(fs_info);
3076 	if (ret) {
3077 		btrfs_warn(fs_info, "failed to resume dev_replace");
3078 		goto out;
3079 	}
3080 
3081 	btrfs_qgroup_rescan_resume(fs_info);
3082 
3083 	if (!fs_info->uuid_root) {
3084 		btrfs_info(fs_info, "creating UUID tree");
3085 		ret = btrfs_create_uuid_tree(fs_info);
3086 		if (ret) {
3087 			btrfs_warn(fs_info,
3088 				   "failed to create the UUID tree %d", ret);
3089 			goto out;
3090 		}
3091 	}
3092 
3093 out:
3094 	return ret;
3095 }
3096 
3097 /*
3098  * Do various sanity and dependency checks of different features.
3099  *
3100  * @is_rw_mount:	If the mount is read-write.
3101  *
3102  * This is the place for less strict checks (like for subpage or artificial
3103  * feature dependencies).
3104  *
3105  * For strict checks or possible corruption detection, see
3106  * btrfs_validate_super().
3107  *
3108  * This should be called after btrfs_parse_options(), as some mount options
3109  * (space cache related) can modify on-disk format like free space tree and
3110  * screw up certain feature dependencies.
3111  */
btrfs_check_features(struct btrfs_fs_info * fs_info,bool is_rw_mount)3112 int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
3113 {
3114 	struct btrfs_super_block *disk_super = fs_info->super_copy;
3115 	u64 incompat = btrfs_super_incompat_flags(disk_super);
3116 	const u64 compat_ro = btrfs_super_compat_ro_flags(disk_super);
3117 	const u64 compat_ro_unsupp = (compat_ro & ~BTRFS_FEATURE_COMPAT_RO_SUPP);
3118 
3119 	if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
3120 		btrfs_err(fs_info,
3121 		"cannot mount because of unknown incompat features (0x%llx)",
3122 		    incompat);
3123 		return -EINVAL;
3124 	}
3125 
3126 	/* Runtime limitation for mixed block groups. */
3127 	if ((incompat & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
3128 	    (fs_info->sectorsize != fs_info->nodesize)) {
3129 		btrfs_err(fs_info,
3130 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
3131 			fs_info->nodesize, fs_info->sectorsize);
3132 		return -EINVAL;
3133 	}
3134 
3135 	/* Mixed backref is an always-enabled feature. */
3136 	incompat |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
3137 
3138 	/* Set compression related flags just in case. */
3139 	if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
3140 		incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
3141 	else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
3142 		incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
3143 
3144 	/*
3145 	 * An ancient flag, which should really be marked deprecated.
3146 	 * Such runtime limitation doesn't really need a incompat flag.
3147 	 */
3148 	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE)
3149 		incompat |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
3150 
3151 	if (compat_ro_unsupp && is_rw_mount) {
3152 		btrfs_err(fs_info,
3153 	"cannot mount read-write because of unknown compat_ro features (0x%llx)",
3154 		       compat_ro);
3155 		return -EINVAL;
3156 	}
3157 
3158 	/*
3159 	 * We have unsupported RO compat features, although RO mounted, we
3160 	 * should not cause any metadata writes, including log replay.
3161 	 * Or we could screw up whatever the new feature requires.
3162 	 */
3163 	if (compat_ro_unsupp && btrfs_super_log_root(disk_super) &&
3164 	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3165 		btrfs_err(fs_info,
3166 "cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
3167 			  compat_ro);
3168 		return -EINVAL;
3169 	}
3170 
3171 	/*
3172 	 * Artificial limitations for block group tree, to force
3173 	 * block-group-tree to rely on no-holes and free-space-tree.
3174 	 */
3175 	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
3176 	    (!btrfs_fs_incompat(fs_info, NO_HOLES) ||
3177 	     !btrfs_test_opt(fs_info, FREE_SPACE_TREE))) {
3178 		btrfs_err(fs_info,
3179 "block-group-tree feature requires no-holes and free-space-tree features");
3180 		return -EINVAL;
3181 	}
3182 
3183 	/*
3184 	 * Subpage runtime limitation on v1 cache.
3185 	 *
3186 	 * V1 space cache still has some hard codeed PAGE_SIZE usage, while
3187 	 * we're already defaulting to v2 cache, no need to bother v1 as it's
3188 	 * going to be deprecated anyway.
3189 	 */
3190 	if (fs_info->sectorsize < PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
3191 		btrfs_warn(fs_info,
3192 	"v1 space cache is not supported for page size %lu with sectorsize %u",
3193 			   PAGE_SIZE, fs_info->sectorsize);
3194 		return -EINVAL;
3195 	}
3196 
3197 	/* This can be called by remount, we need to protect the super block. */
3198 	spin_lock(&fs_info->super_lock);
3199 	btrfs_set_super_incompat_flags(disk_super, incompat);
3200 	spin_unlock(&fs_info->super_lock);
3201 
3202 	return 0;
3203 }
3204 
open_ctree(struct super_block * sb,struct btrfs_fs_devices * fs_devices)3205 int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices)
3206 {
3207 	u32 sectorsize;
3208 	u32 nodesize;
3209 	u32 stripesize;
3210 	u64 generation;
3211 	u16 csum_type;
3212 	struct btrfs_super_block *disk_super;
3213 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
3214 	struct btrfs_root *tree_root;
3215 	struct btrfs_root *chunk_root;
3216 	int ret;
3217 	int level;
3218 
3219 	ret = init_mount_fs_info(fs_info, sb);
3220 	if (ret)
3221 		goto fail;
3222 
3223 	/* These need to be init'ed before we start creating inodes and such. */
3224 	tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
3225 				     GFP_KERNEL);
3226 	fs_info->tree_root = tree_root;
3227 	chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID,
3228 				      GFP_KERNEL);
3229 	fs_info->chunk_root = chunk_root;
3230 	if (!tree_root || !chunk_root) {
3231 		ret = -ENOMEM;
3232 		goto fail;
3233 	}
3234 
3235 	ret = btrfs_init_btree_inode(sb);
3236 	if (ret)
3237 		goto fail;
3238 
3239 	invalidate_bdev(fs_devices->latest_dev->bdev);
3240 
3241 	/*
3242 	 * Read super block and check the signature bytes only
3243 	 */
3244 	disk_super = btrfs_read_dev_super(fs_devices->latest_dev->bdev);
3245 	if (IS_ERR(disk_super)) {
3246 		ret = PTR_ERR(disk_super);
3247 		goto fail_alloc;
3248 	}
3249 
3250 	btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
3251 	/*
3252 	 * Verify the type first, if that or the checksum value are
3253 	 * corrupted, we'll find out
3254 	 */
3255 	csum_type = btrfs_super_csum_type(disk_super);
3256 	if (!btrfs_supported_super_csum(csum_type)) {
3257 		btrfs_err(fs_info, "unsupported checksum algorithm: %u",
3258 			  csum_type);
3259 		ret = -EINVAL;
3260 		btrfs_release_disk_super(disk_super);
3261 		goto fail_alloc;
3262 	}
3263 
3264 	fs_info->csum_size = btrfs_super_csum_size(disk_super);
3265 
3266 	ret = btrfs_init_csum_hash(fs_info, csum_type);
3267 	if (ret) {
3268 		btrfs_release_disk_super(disk_super);
3269 		goto fail_alloc;
3270 	}
3271 
3272 	/*
3273 	 * We want to check superblock checksum, the type is stored inside.
3274 	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
3275 	 */
3276 	if (btrfs_check_super_csum(fs_info, disk_super)) {
3277 		btrfs_err(fs_info, "superblock checksum mismatch");
3278 		ret = -EINVAL;
3279 		btrfs_release_disk_super(disk_super);
3280 		goto fail_alloc;
3281 	}
3282 
3283 	/*
3284 	 * super_copy is zeroed at allocation time and we never touch the
3285 	 * following bytes up to INFO_SIZE, the checksum is calculated from
3286 	 * the whole block of INFO_SIZE
3287 	 */
3288 	memcpy(fs_info->super_copy, disk_super, sizeof(*fs_info->super_copy));
3289 	btrfs_release_disk_super(disk_super);
3290 
3291 	disk_super = fs_info->super_copy;
3292 
3293 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
3294 	       sizeof(*fs_info->super_for_commit));
3295 
3296 	ret = btrfs_validate_mount_super(fs_info);
3297 	if (ret) {
3298 		btrfs_err(fs_info, "superblock contains fatal errors");
3299 		ret = -EINVAL;
3300 		goto fail_alloc;
3301 	}
3302 
3303 	if (!btrfs_super_root(disk_super)) {
3304 		btrfs_err(fs_info, "invalid superblock tree root bytenr");
3305 		ret = -EINVAL;
3306 		goto fail_alloc;
3307 	}
3308 
3309 	/* check FS state, whether FS is broken. */
3310 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
3311 		WRITE_ONCE(fs_info->fs_error, -EUCLEAN);
3312 
3313 	/* Set up fs_info before parsing mount options */
3314 	nodesize = btrfs_super_nodesize(disk_super);
3315 	sectorsize = btrfs_super_sectorsize(disk_super);
3316 	stripesize = sectorsize;
3317 	fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
3318 	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
3319 
3320 	fs_info->nodesize = nodesize;
3321 	fs_info->sectorsize = sectorsize;
3322 	fs_info->sectorsize_bits = ilog2(sectorsize);
3323 	fs_info->sectors_per_page = (PAGE_SIZE >> fs_info->sectorsize_bits);
3324 	fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
3325 	fs_info->stripesize = stripesize;
3326 
3327 	/*
3328 	 * Handle the space caching options appropriately now that we have the
3329 	 * super block loaded and validated.
3330 	 */
3331 	btrfs_set_free_space_cache_settings(fs_info);
3332 
3333 	if (!btrfs_check_options(fs_info, &fs_info->mount_opt, sb->s_flags)) {
3334 		ret = -EINVAL;
3335 		goto fail_alloc;
3336 	}
3337 
3338 	ret = btrfs_check_features(fs_info, !sb_rdonly(sb));
3339 	if (ret < 0)
3340 		goto fail_alloc;
3341 
3342 	/*
3343 	 * At this point our mount options are validated, if we set ->max_inline
3344 	 * to something non-standard make sure we truncate it to sectorsize.
3345 	 */
3346 	fs_info->max_inline = min_t(u64, fs_info->max_inline, fs_info->sectorsize);
3347 
3348 	if (sectorsize < PAGE_SIZE)
3349 		btrfs_warn(fs_info,
3350 		"read-write for sector size %u with page size %lu is experimental",
3351 			   sectorsize, PAGE_SIZE);
3352 
3353 	ret = btrfs_init_workqueues(fs_info);
3354 	if (ret)
3355 		goto fail_sb_buffer;
3356 
3357 	sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
3358 	sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
3359 
3360 	/* Update the values for the current filesystem. */
3361 	sb->s_blocksize = sectorsize;
3362 	sb->s_blocksize_bits = blksize_bits(sectorsize);
3363 	memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
3364 
3365 	mutex_lock(&fs_info->chunk_mutex);
3366 	ret = btrfs_read_sys_array(fs_info);
3367 	mutex_unlock(&fs_info->chunk_mutex);
3368 	if (ret) {
3369 		btrfs_err(fs_info, "failed to read the system array: %d", ret);
3370 		goto fail_sb_buffer;
3371 	}
3372 
3373 	generation = btrfs_super_chunk_root_generation(disk_super);
3374 	level = btrfs_super_chunk_root_level(disk_super);
3375 	ret = load_super_root(chunk_root, btrfs_super_chunk_root(disk_super),
3376 			      generation, level);
3377 	if (ret) {
3378 		btrfs_err(fs_info, "failed to read chunk root");
3379 		goto fail_tree_roots;
3380 	}
3381 
3382 	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
3383 			   offsetof(struct btrfs_header, chunk_tree_uuid),
3384 			   BTRFS_UUID_SIZE);
3385 
3386 	ret = btrfs_read_chunk_tree(fs_info);
3387 	if (ret) {
3388 		btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
3389 		goto fail_tree_roots;
3390 	}
3391 
3392 	/*
3393 	 * At this point we know all the devices that make this filesystem,
3394 	 * including the seed devices but we don't know yet if the replace
3395 	 * target is required. So free devices that are not part of this
3396 	 * filesystem but skip the replace target device which is checked
3397 	 * below in btrfs_init_dev_replace().
3398 	 */
3399 	btrfs_free_extra_devids(fs_devices);
3400 	if (!fs_devices->latest_dev->bdev) {
3401 		btrfs_err(fs_info, "failed to read devices");
3402 		ret = -EIO;
3403 		goto fail_tree_roots;
3404 	}
3405 
3406 	ret = init_tree_roots(fs_info);
3407 	if (ret)
3408 		goto fail_tree_roots;
3409 
3410 	/*
3411 	 * Get zone type information of zoned block devices. This will also
3412 	 * handle emulation of a zoned filesystem if a regular device has the
3413 	 * zoned incompat feature flag set.
3414 	 */
3415 	ret = btrfs_get_dev_zone_info_all_devices(fs_info);
3416 	if (ret) {
3417 		btrfs_err(fs_info,
3418 			  "zoned: failed to read device zone info: %d", ret);
3419 		goto fail_block_groups;
3420 	}
3421 
3422 	/*
3423 	 * If we have a uuid root and we're not being told to rescan we need to
3424 	 * check the generation here so we can set the
3425 	 * BTRFS_FS_UPDATE_UUID_TREE_GEN bit.  Otherwise we could commit the
3426 	 * transaction during a balance or the log replay without updating the
3427 	 * uuid generation, and then if we crash we would rescan the uuid tree,
3428 	 * even though it was perfectly fine.
3429 	 */
3430 	if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) &&
3431 	    fs_info->generation == btrfs_super_uuid_tree_generation(disk_super))
3432 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3433 
3434 	ret = btrfs_verify_dev_extents(fs_info);
3435 	if (ret) {
3436 		btrfs_err(fs_info,
3437 			  "failed to verify dev extents against chunks: %d",
3438 			  ret);
3439 		goto fail_block_groups;
3440 	}
3441 	ret = btrfs_recover_balance(fs_info);
3442 	if (ret) {
3443 		btrfs_err(fs_info, "failed to recover balance: %d", ret);
3444 		goto fail_block_groups;
3445 	}
3446 
3447 	ret = btrfs_init_dev_stats(fs_info);
3448 	if (ret) {
3449 		btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3450 		goto fail_block_groups;
3451 	}
3452 
3453 	ret = btrfs_init_dev_replace(fs_info);
3454 	if (ret) {
3455 		btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3456 		goto fail_block_groups;
3457 	}
3458 
3459 	ret = btrfs_check_zoned_mode(fs_info);
3460 	if (ret) {
3461 		btrfs_err(fs_info, "failed to initialize zoned mode: %d",
3462 			  ret);
3463 		goto fail_block_groups;
3464 	}
3465 
3466 	ret = btrfs_sysfs_add_fsid(fs_devices);
3467 	if (ret) {
3468 		btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3469 				ret);
3470 		goto fail_block_groups;
3471 	}
3472 
3473 	ret = btrfs_sysfs_add_mounted(fs_info);
3474 	if (ret) {
3475 		btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3476 		goto fail_fsdev_sysfs;
3477 	}
3478 
3479 	ret = btrfs_init_space_info(fs_info);
3480 	if (ret) {
3481 		btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3482 		goto fail_sysfs;
3483 	}
3484 
3485 	ret = btrfs_read_block_groups(fs_info);
3486 	if (ret) {
3487 		btrfs_err(fs_info, "failed to read block groups: %d", ret);
3488 		goto fail_sysfs;
3489 	}
3490 
3491 	btrfs_free_zone_cache(fs_info);
3492 
3493 	btrfs_check_active_zone_reservation(fs_info);
3494 
3495 	if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices &&
3496 	    !btrfs_check_rw_degradable(fs_info, NULL)) {
3497 		btrfs_warn(fs_info,
3498 		"writable mount is not allowed due to too many missing devices");
3499 		ret = -EINVAL;
3500 		goto fail_sysfs;
3501 	}
3502 
3503 	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, fs_info,
3504 					       "btrfs-cleaner");
3505 	if (IS_ERR(fs_info->cleaner_kthread)) {
3506 		ret = PTR_ERR(fs_info->cleaner_kthread);
3507 		goto fail_sysfs;
3508 	}
3509 
3510 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
3511 						   tree_root,
3512 						   "btrfs-transaction");
3513 	if (IS_ERR(fs_info->transaction_kthread)) {
3514 		ret = PTR_ERR(fs_info->transaction_kthread);
3515 		goto fail_cleaner;
3516 	}
3517 
3518 	ret = btrfs_read_qgroup_config(fs_info);
3519 	if (ret)
3520 		goto fail_trans_kthread;
3521 
3522 	if (btrfs_build_ref_tree(fs_info))
3523 		btrfs_err(fs_info, "couldn't build ref tree");
3524 
3525 	/* do not make disk changes in broken FS or nologreplay is given */
3526 	if (btrfs_super_log_root(disk_super) != 0 &&
3527 	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3528 		btrfs_info(fs_info, "start tree-log replay");
3529 		ret = btrfs_replay_log(fs_info, fs_devices);
3530 		if (ret)
3531 			goto fail_qgroup;
3532 	}
3533 
3534 	fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
3535 	if (IS_ERR(fs_info->fs_root)) {
3536 		ret = PTR_ERR(fs_info->fs_root);
3537 		btrfs_warn(fs_info, "failed to read fs tree: %d", ret);
3538 		fs_info->fs_root = NULL;
3539 		goto fail_qgroup;
3540 	}
3541 
3542 	if (sb_rdonly(sb))
3543 		return 0;
3544 
3545 	ret = btrfs_start_pre_rw_mount(fs_info);
3546 	if (ret) {
3547 		close_ctree(fs_info);
3548 		return ret;
3549 	}
3550 	btrfs_discard_resume(fs_info);
3551 
3552 	if (fs_info->uuid_root &&
3553 	    (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3554 	     fs_info->generation != btrfs_super_uuid_tree_generation(disk_super))) {
3555 		btrfs_info(fs_info, "checking UUID tree");
3556 		ret = btrfs_check_uuid_tree(fs_info);
3557 		if (ret) {
3558 			btrfs_warn(fs_info,
3559 				"failed to check the UUID tree: %d", ret);
3560 			close_ctree(fs_info);
3561 			return ret;
3562 		}
3563 	}
3564 
3565 	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3566 
3567 	/* Kick the cleaner thread so it'll start deleting snapshots. */
3568 	if (test_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags))
3569 		wake_up_process(fs_info->cleaner_kthread);
3570 
3571 	return 0;
3572 
3573 fail_qgroup:
3574 	btrfs_free_qgroup_config(fs_info);
3575 fail_trans_kthread:
3576 	kthread_stop(fs_info->transaction_kthread);
3577 	btrfs_cleanup_transaction(fs_info);
3578 	btrfs_free_fs_roots(fs_info);
3579 fail_cleaner:
3580 	kthread_stop(fs_info->cleaner_kthread);
3581 
3582 	/*
3583 	 * make sure we're done with the btree inode before we stop our
3584 	 * kthreads
3585 	 */
3586 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3587 
3588 fail_sysfs:
3589 	btrfs_sysfs_remove_mounted(fs_info);
3590 
3591 fail_fsdev_sysfs:
3592 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3593 
3594 fail_block_groups:
3595 	btrfs_put_block_group_cache(fs_info);
3596 
3597 fail_tree_roots:
3598 	if (fs_info->data_reloc_root)
3599 		btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root);
3600 	free_root_pointers(fs_info, true);
3601 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3602 
3603 fail_sb_buffer:
3604 	btrfs_stop_all_workers(fs_info);
3605 	btrfs_free_block_groups(fs_info);
3606 fail_alloc:
3607 	btrfs_mapping_tree_free(fs_info);
3608 
3609 	iput(fs_info->btree_inode);
3610 fail:
3611 	btrfs_close_devices(fs_info->fs_devices);
3612 	ASSERT(ret < 0);
3613 	return ret;
3614 }
3615 ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3616 
btrfs_end_super_write(struct bio * bio)3617 static void btrfs_end_super_write(struct bio *bio)
3618 {
3619 	struct btrfs_device *device = bio->bi_private;
3620 	struct folio_iter fi;
3621 
3622 	bio_for_each_folio_all(fi, bio) {
3623 		if (bio->bi_status) {
3624 			btrfs_warn_rl_in_rcu(device->fs_info,
3625 				"lost super block write due to IO error on %s (%d)",
3626 				btrfs_dev_name(device),
3627 				blk_status_to_errno(bio->bi_status));
3628 			btrfs_dev_stat_inc_and_print(device,
3629 						     BTRFS_DEV_STAT_WRITE_ERRS);
3630 			/* Ensure failure if the primary sb fails. */
3631 			if (bio->bi_opf & REQ_FUA)
3632 				atomic_add(BTRFS_SUPER_PRIMARY_WRITE_ERROR,
3633 					   &device->sb_write_errors);
3634 			else
3635 				atomic_inc(&device->sb_write_errors);
3636 		}
3637 		folio_unlock(fi.folio);
3638 		folio_put(fi.folio);
3639 	}
3640 
3641 	bio_put(bio);
3642 }
3643 
btrfs_read_dev_one_super(struct block_device * bdev,int copy_num,bool drop_cache)3644 struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
3645 						   int copy_num, bool drop_cache)
3646 {
3647 	struct btrfs_super_block *super;
3648 	struct page *page;
3649 	u64 bytenr, bytenr_orig;
3650 	struct address_space *mapping = bdev->bd_mapping;
3651 	int ret;
3652 
3653 	bytenr_orig = btrfs_sb_offset(copy_num);
3654 	ret = btrfs_sb_log_location_bdev(bdev, copy_num, READ, &bytenr);
3655 	if (ret == -ENOENT)
3656 		return ERR_PTR(-EINVAL);
3657 	else if (ret)
3658 		return ERR_PTR(ret);
3659 
3660 	if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev))
3661 		return ERR_PTR(-EINVAL);
3662 
3663 	if (drop_cache) {
3664 		/* This should only be called with the primary sb. */
3665 		ASSERT(copy_num == 0);
3666 
3667 		/*
3668 		 * Drop the page of the primary superblock, so later read will
3669 		 * always read from the device.
3670 		 */
3671 		invalidate_inode_pages2_range(mapping,
3672 				bytenr >> PAGE_SHIFT,
3673 				(bytenr + BTRFS_SUPER_INFO_SIZE) >> PAGE_SHIFT);
3674 	}
3675 
3676 	page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS);
3677 	if (IS_ERR(page))
3678 		return ERR_CAST(page);
3679 
3680 	super = page_address(page);
3681 	if (btrfs_super_magic(super) != BTRFS_MAGIC) {
3682 		btrfs_release_disk_super(super);
3683 		return ERR_PTR(-ENODATA);
3684 	}
3685 
3686 	if (btrfs_super_bytenr(super) != bytenr_orig) {
3687 		btrfs_release_disk_super(super);
3688 		return ERR_PTR(-EINVAL);
3689 	}
3690 
3691 	return super;
3692 }
3693 
3694 
btrfs_read_dev_super(struct block_device * bdev)3695 struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev)
3696 {
3697 	struct btrfs_super_block *super, *latest = NULL;
3698 	int i;
3699 	u64 transid = 0;
3700 
3701 	/* we would like to check all the supers, but that would make
3702 	 * a btrfs mount succeed after a mkfs from a different FS.
3703 	 * So, we need to add a special mount option to scan for
3704 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3705 	 */
3706 	for (i = 0; i < 1; i++) {
3707 		super = btrfs_read_dev_one_super(bdev, i, false);
3708 		if (IS_ERR(super))
3709 			continue;
3710 
3711 		if (!latest || btrfs_super_generation(super) > transid) {
3712 			if (latest)
3713 				btrfs_release_disk_super(super);
3714 
3715 			latest = super;
3716 			transid = btrfs_super_generation(super);
3717 		}
3718 	}
3719 
3720 	return super;
3721 }
3722 
3723 /*
3724  * Write superblock @sb to the @device. Do not wait for completion, all the
3725  * folios we use for writing are locked.
3726  *
3727  * Write @max_mirrors copies of the superblock, where 0 means default that fit
3728  * the expected device size at commit time. Note that max_mirrors must be
3729  * same for write and wait phases.
3730  *
3731  * Return number of errors when folio is not found or submission fails.
3732  */
write_dev_supers(struct btrfs_device * device,struct btrfs_super_block * sb,int max_mirrors)3733 static int write_dev_supers(struct btrfs_device *device,
3734 			    struct btrfs_super_block *sb, int max_mirrors)
3735 {
3736 	struct btrfs_fs_info *fs_info = device->fs_info;
3737 	struct address_space *mapping = device->bdev->bd_mapping;
3738 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3739 	int i;
3740 	int ret;
3741 	u64 bytenr, bytenr_orig;
3742 
3743 	atomic_set(&device->sb_write_errors, 0);
3744 
3745 	if (max_mirrors == 0)
3746 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3747 
3748 	shash->tfm = fs_info->csum_shash;
3749 
3750 	for (i = 0; i < max_mirrors; i++) {
3751 		struct folio *folio;
3752 		struct bio *bio;
3753 		struct btrfs_super_block *disk_super;
3754 		size_t offset;
3755 
3756 		bytenr_orig = btrfs_sb_offset(i);
3757 		ret = btrfs_sb_log_location(device, i, WRITE, &bytenr);
3758 		if (ret == -ENOENT) {
3759 			continue;
3760 		} else if (ret < 0) {
3761 			btrfs_err(device->fs_info,
3762 				"couldn't get super block location for mirror %d",
3763 				i);
3764 			atomic_inc(&device->sb_write_errors);
3765 			continue;
3766 		}
3767 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3768 		    device->commit_total_bytes)
3769 			break;
3770 
3771 		btrfs_set_super_bytenr(sb, bytenr_orig);
3772 
3773 		crypto_shash_digest(shash, (const char *)sb + BTRFS_CSUM_SIZE,
3774 				    BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE,
3775 				    sb->csum);
3776 
3777 		folio = __filemap_get_folio(mapping, bytenr >> PAGE_SHIFT,
3778 					    FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
3779 					    GFP_NOFS);
3780 		if (IS_ERR(folio)) {
3781 			btrfs_err(device->fs_info,
3782 			    "couldn't get super block page for bytenr %llu",
3783 			    bytenr);
3784 			atomic_inc(&device->sb_write_errors);
3785 			continue;
3786 		}
3787 		ASSERT(folio_order(folio) == 0);
3788 
3789 		offset = offset_in_folio(folio, bytenr);
3790 		disk_super = folio_address(folio) + offset;
3791 		memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE);
3792 
3793 		/*
3794 		 * Directly use bios here instead of relying on the page cache
3795 		 * to do I/O, so we don't lose the ability to do integrity
3796 		 * checking.
3797 		 */
3798 		bio = bio_alloc(device->bdev, 1,
3799 				REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO,
3800 				GFP_NOFS);
3801 		bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
3802 		bio->bi_private = device;
3803 		bio->bi_end_io = btrfs_end_super_write;
3804 		bio_add_folio_nofail(bio, folio, BTRFS_SUPER_INFO_SIZE, offset);
3805 
3806 		/*
3807 		 * We FUA only the first super block.  The others we allow to
3808 		 * go down lazy and there's a short window where the on-disk
3809 		 * copies might still contain the older version.
3810 		 */
3811 		if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3812 			bio->bi_opf |= REQ_FUA;
3813 		submit_bio(bio);
3814 
3815 		if (btrfs_advance_sb_log(device, i))
3816 			atomic_inc(&device->sb_write_errors);
3817 	}
3818 	return atomic_read(&device->sb_write_errors) < i ? 0 : -1;
3819 }
3820 
3821 /*
3822  * Wait for write completion of superblocks done by write_dev_supers,
3823  * @max_mirrors same for write and wait phases.
3824  *
3825  * Return -1 if primary super block write failed or when there were no super block
3826  * copies written. Otherwise 0.
3827  */
wait_dev_supers(struct btrfs_device * device,int max_mirrors)3828 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3829 {
3830 	int i;
3831 	int errors = 0;
3832 	bool primary_failed = false;
3833 	int ret;
3834 	u64 bytenr;
3835 
3836 	if (max_mirrors == 0)
3837 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3838 
3839 	for (i = 0; i < max_mirrors; i++) {
3840 		struct folio *folio;
3841 
3842 		ret = btrfs_sb_log_location(device, i, READ, &bytenr);
3843 		if (ret == -ENOENT) {
3844 			break;
3845 		} else if (ret < 0) {
3846 			errors++;
3847 			if (i == 0)
3848 				primary_failed = true;
3849 			continue;
3850 		}
3851 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3852 		    device->commit_total_bytes)
3853 			break;
3854 
3855 		folio = filemap_get_folio(device->bdev->bd_mapping,
3856 					  bytenr >> PAGE_SHIFT);
3857 		/* If the folio has been removed, then we know it completed. */
3858 		if (IS_ERR(folio))
3859 			continue;
3860 		ASSERT(folio_order(folio) == 0);
3861 
3862 		/* Folio will be unlocked once the write completes. */
3863 		folio_wait_locked(folio);
3864 		folio_put(folio);
3865 	}
3866 
3867 	errors += atomic_read(&device->sb_write_errors);
3868 	if (errors >= BTRFS_SUPER_PRIMARY_WRITE_ERROR)
3869 		primary_failed = true;
3870 	if (primary_failed) {
3871 		btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3872 			  device->devid);
3873 		return -1;
3874 	}
3875 
3876 	return errors < i ? 0 : -1;
3877 }
3878 
3879 /*
3880  * endio for the write_dev_flush, this will wake anyone waiting
3881  * for the barrier when it is done
3882  */
btrfs_end_empty_barrier(struct bio * bio)3883 static void btrfs_end_empty_barrier(struct bio *bio)
3884 {
3885 	bio_uninit(bio);
3886 	complete(bio->bi_private);
3887 }
3888 
3889 /*
3890  * Submit a flush request to the device if it supports it. Error handling is
3891  * done in the waiting counterpart.
3892  */
write_dev_flush(struct btrfs_device * device)3893 static void write_dev_flush(struct btrfs_device *device)
3894 {
3895 	struct bio *bio = &device->flush_bio;
3896 
3897 	device->last_flush_error = BLK_STS_OK;
3898 
3899 	bio_init(bio, device->bdev, NULL, 0,
3900 		 REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH);
3901 	bio->bi_end_io = btrfs_end_empty_barrier;
3902 	init_completion(&device->flush_wait);
3903 	bio->bi_private = &device->flush_wait;
3904 	submit_bio(bio);
3905 	set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3906 }
3907 
3908 /*
3909  * If the flush bio has been submitted by write_dev_flush, wait for it.
3910  * Return true for any error, and false otherwise.
3911  */
wait_dev_flush(struct btrfs_device * device)3912 static bool wait_dev_flush(struct btrfs_device *device)
3913 {
3914 	struct bio *bio = &device->flush_bio;
3915 
3916 	if (!test_and_clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3917 		return false;
3918 
3919 	wait_for_completion_io(&device->flush_wait);
3920 
3921 	if (bio->bi_status) {
3922 		device->last_flush_error = bio->bi_status;
3923 		btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_FLUSH_ERRS);
3924 		return true;
3925 	}
3926 
3927 	return false;
3928 }
3929 
3930 /*
3931  * send an empty flush down to each device in parallel,
3932  * then wait for them
3933  */
barrier_all_devices(struct btrfs_fs_info * info)3934 static int barrier_all_devices(struct btrfs_fs_info *info)
3935 {
3936 	struct list_head *head;
3937 	struct btrfs_device *dev;
3938 	int errors_wait = 0;
3939 
3940 	lockdep_assert_held(&info->fs_devices->device_list_mutex);
3941 	/* send down all the barriers */
3942 	head = &info->fs_devices->devices;
3943 	list_for_each_entry(dev, head, dev_list) {
3944 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3945 			continue;
3946 		if (!dev->bdev)
3947 			continue;
3948 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3949 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3950 			continue;
3951 
3952 		write_dev_flush(dev);
3953 	}
3954 
3955 	/* wait for all the barriers */
3956 	list_for_each_entry(dev, head, dev_list) {
3957 		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3958 			continue;
3959 		if (!dev->bdev) {
3960 			errors_wait++;
3961 			continue;
3962 		}
3963 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3964 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3965 			continue;
3966 
3967 		if (wait_dev_flush(dev))
3968 			errors_wait++;
3969 	}
3970 
3971 	/*
3972 	 * Checks last_flush_error of disks in order to determine the device
3973 	 * state.
3974 	 */
3975 	if (errors_wait && !btrfs_check_rw_degradable(info, NULL))
3976 		return -EIO;
3977 
3978 	return 0;
3979 }
3980 
btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)3981 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3982 {
3983 	int raid_type;
3984 	int min_tolerated = INT_MAX;
3985 
3986 	if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3987 	    (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3988 		min_tolerated = min_t(int, min_tolerated,
3989 				    btrfs_raid_array[BTRFS_RAID_SINGLE].
3990 				    tolerated_failures);
3991 
3992 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3993 		if (raid_type == BTRFS_RAID_SINGLE)
3994 			continue;
3995 		if (!(flags & btrfs_raid_array[raid_type].bg_flag))
3996 			continue;
3997 		min_tolerated = min_t(int, min_tolerated,
3998 				    btrfs_raid_array[raid_type].
3999 				    tolerated_failures);
4000 	}
4001 
4002 	if (min_tolerated == INT_MAX) {
4003 		pr_warn("BTRFS: unknown raid flag: %llu", flags);
4004 		min_tolerated = 0;
4005 	}
4006 
4007 	return min_tolerated;
4008 }
4009 
write_all_supers(struct btrfs_fs_info * fs_info,int max_mirrors)4010 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
4011 {
4012 	struct list_head *head;
4013 	struct btrfs_device *dev;
4014 	struct btrfs_super_block *sb;
4015 	struct btrfs_dev_item *dev_item;
4016 	int ret;
4017 	int do_barriers;
4018 	int max_errors;
4019 	int total_errors = 0;
4020 	u64 flags;
4021 
4022 	do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
4023 
4024 	/*
4025 	 * max_mirrors == 0 indicates we're from commit_transaction,
4026 	 * not from fsync where the tree roots in fs_info have not
4027 	 * been consistent on disk.
4028 	 */
4029 	if (max_mirrors == 0)
4030 		backup_super_roots(fs_info);
4031 
4032 	sb = fs_info->super_for_commit;
4033 	dev_item = &sb->dev_item;
4034 
4035 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4036 	head = &fs_info->fs_devices->devices;
4037 	max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
4038 
4039 	if (do_barriers) {
4040 		ret = barrier_all_devices(fs_info);
4041 		if (ret) {
4042 			mutex_unlock(
4043 				&fs_info->fs_devices->device_list_mutex);
4044 			btrfs_handle_fs_error(fs_info, ret,
4045 					      "errors while submitting device barriers.");
4046 			return ret;
4047 		}
4048 	}
4049 
4050 	list_for_each_entry(dev, head, dev_list) {
4051 		if (!dev->bdev) {
4052 			total_errors++;
4053 			continue;
4054 		}
4055 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4056 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4057 			continue;
4058 
4059 		btrfs_set_stack_device_generation(dev_item, 0);
4060 		btrfs_set_stack_device_type(dev_item, dev->type);
4061 		btrfs_set_stack_device_id(dev_item, dev->devid);
4062 		btrfs_set_stack_device_total_bytes(dev_item,
4063 						   dev->commit_total_bytes);
4064 		btrfs_set_stack_device_bytes_used(dev_item,
4065 						  dev->commit_bytes_used);
4066 		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
4067 		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
4068 		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
4069 		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
4070 		memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
4071 		       BTRFS_FSID_SIZE);
4072 
4073 		flags = btrfs_super_flags(sb);
4074 		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
4075 
4076 		ret = btrfs_validate_write_super(fs_info, sb);
4077 		if (ret < 0) {
4078 			mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4079 			btrfs_handle_fs_error(fs_info, -EUCLEAN,
4080 				"unexpected superblock corruption detected");
4081 			return -EUCLEAN;
4082 		}
4083 
4084 		ret = write_dev_supers(dev, sb, max_mirrors);
4085 		if (ret)
4086 			total_errors++;
4087 	}
4088 	if (total_errors > max_errors) {
4089 		btrfs_err(fs_info, "%d errors while writing supers",
4090 			  total_errors);
4091 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4092 
4093 		/* FUA is masked off if unsupported and can't be the reason */
4094 		btrfs_handle_fs_error(fs_info, -EIO,
4095 				      "%d errors while writing supers",
4096 				      total_errors);
4097 		return -EIO;
4098 	}
4099 
4100 	total_errors = 0;
4101 	list_for_each_entry(dev, head, dev_list) {
4102 		if (!dev->bdev)
4103 			continue;
4104 		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4105 		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4106 			continue;
4107 
4108 		ret = wait_dev_supers(dev, max_mirrors);
4109 		if (ret)
4110 			total_errors++;
4111 	}
4112 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4113 	if (total_errors > max_errors) {
4114 		btrfs_handle_fs_error(fs_info, -EIO,
4115 				      "%d errors while writing supers",
4116 				      total_errors);
4117 		return -EIO;
4118 	}
4119 	return 0;
4120 }
4121 
4122 /* Drop a fs root from the radix tree and free it. */
btrfs_drop_and_free_fs_root(struct btrfs_fs_info * fs_info,struct btrfs_root * root)4123 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
4124 				  struct btrfs_root *root)
4125 {
4126 	bool drop_ref = false;
4127 
4128 	spin_lock(&fs_info->fs_roots_radix_lock);
4129 	radix_tree_delete(&fs_info->fs_roots_radix,
4130 			  (unsigned long)btrfs_root_id(root));
4131 	if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state))
4132 		drop_ref = true;
4133 	spin_unlock(&fs_info->fs_roots_radix_lock);
4134 
4135 	if (BTRFS_FS_ERROR(fs_info)) {
4136 		ASSERT(root->log_root == NULL);
4137 		if (root->reloc_root) {
4138 			btrfs_put_root(root->reloc_root);
4139 			root->reloc_root = NULL;
4140 		}
4141 	}
4142 
4143 	if (drop_ref)
4144 		btrfs_put_root(root);
4145 }
4146 
btrfs_commit_super(struct btrfs_fs_info * fs_info)4147 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
4148 {
4149 	mutex_lock(&fs_info->cleaner_mutex);
4150 	btrfs_run_delayed_iputs(fs_info);
4151 	mutex_unlock(&fs_info->cleaner_mutex);
4152 	wake_up_process(fs_info->cleaner_kthread);
4153 
4154 	/* wait until ongoing cleanup work done */
4155 	down_write(&fs_info->cleanup_work_sem);
4156 	up_write(&fs_info->cleanup_work_sem);
4157 
4158 	return btrfs_commit_current_transaction(fs_info->tree_root);
4159 }
4160 
warn_about_uncommitted_trans(struct btrfs_fs_info * fs_info)4161 static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info)
4162 {
4163 	struct btrfs_transaction *trans;
4164 	struct btrfs_transaction *tmp;
4165 	bool found = false;
4166 
4167 	/*
4168 	 * This function is only called at the very end of close_ctree(),
4169 	 * thus no other running transaction, no need to take trans_lock.
4170 	 */
4171 	ASSERT(test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags));
4172 	list_for_each_entry_safe(trans, tmp, &fs_info->trans_list, list) {
4173 		struct extent_state *cached = NULL;
4174 		u64 dirty_bytes = 0;
4175 		u64 cur = 0;
4176 		u64 found_start;
4177 		u64 found_end;
4178 
4179 		found = true;
4180 		while (find_first_extent_bit(&trans->dirty_pages, cur,
4181 			&found_start, &found_end, EXTENT_DIRTY, &cached)) {
4182 			dirty_bytes += found_end + 1 - found_start;
4183 			cur = found_end + 1;
4184 		}
4185 		btrfs_warn(fs_info,
4186 	"transaction %llu (with %llu dirty metadata bytes) is not committed",
4187 			   trans->transid, dirty_bytes);
4188 		btrfs_cleanup_one_transaction(trans, fs_info);
4189 
4190 		if (trans == fs_info->running_transaction)
4191 			fs_info->running_transaction = NULL;
4192 		list_del_init(&trans->list);
4193 
4194 		btrfs_put_transaction(trans);
4195 		trace_btrfs_transaction_commit(fs_info);
4196 	}
4197 	ASSERT(!found);
4198 }
4199 
close_ctree(struct btrfs_fs_info * fs_info)4200 void __cold close_ctree(struct btrfs_fs_info *fs_info)
4201 {
4202 	int ret;
4203 
4204 	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
4205 
4206 	/*
4207 	 * If we had UNFINISHED_DROPS we could still be processing them, so
4208 	 * clear that bit and wake up relocation so it can stop.
4209 	 * We must do this before stopping the block group reclaim task, because
4210 	 * at btrfs_relocate_block_group() we wait for this bit, and after the
4211 	 * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
4212 	 * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
4213 	 * return 1.
4214 	 */
4215 	btrfs_wake_unfinished_drop(fs_info);
4216 
4217 	/*
4218 	 * We may have the reclaim task running and relocating a data block group,
4219 	 * in which case it may create delayed iputs. So stop it before we park
4220 	 * the cleaner kthread otherwise we can get new delayed iputs after
4221 	 * parking the cleaner, and that can make the async reclaim task to hang
4222 	 * if it's waiting for delayed iputs to complete, since the cleaner is
4223 	 * parked and can not run delayed iputs - this will make us hang when
4224 	 * trying to stop the async reclaim task.
4225 	 */
4226 	cancel_work_sync(&fs_info->reclaim_bgs_work);
4227 	/*
4228 	 * We don't want the cleaner to start new transactions, add more delayed
4229 	 * iputs, etc. while we're closing. We can't use kthread_stop() yet
4230 	 * because that frees the task_struct, and the transaction kthread might
4231 	 * still try to wake up the cleaner.
4232 	 */
4233 	kthread_park(fs_info->cleaner_kthread);
4234 
4235 	/* wait for the qgroup rescan worker to stop */
4236 	btrfs_qgroup_wait_for_completion(fs_info, false);
4237 
4238 	/* wait for the uuid_scan task to finish */
4239 	down(&fs_info->uuid_tree_rescan_sem);
4240 	/* avoid complains from lockdep et al., set sem back to initial state */
4241 	up(&fs_info->uuid_tree_rescan_sem);
4242 
4243 	/* pause restriper - we want to resume on mount */
4244 	btrfs_pause_balance(fs_info);
4245 
4246 	btrfs_dev_replace_suspend_for_unmount(fs_info);
4247 
4248 	btrfs_scrub_cancel(fs_info);
4249 
4250 	/* wait for any defraggers to finish */
4251 	wait_event(fs_info->transaction_wait,
4252 		   (atomic_read(&fs_info->defrag_running) == 0));
4253 
4254 	/* clear out the rbtree of defraggable inodes */
4255 	btrfs_cleanup_defrag_inodes(fs_info);
4256 
4257 	/*
4258 	 * Handle the error fs first, as it will flush and wait for all ordered
4259 	 * extents.  This will generate delayed iputs, thus we want to handle
4260 	 * it first.
4261 	 */
4262 	if (unlikely(BTRFS_FS_ERROR(fs_info)))
4263 		btrfs_error_commit_super(fs_info);
4264 
4265 	/*
4266 	 * Wait for any fixup workers to complete.
4267 	 * If we don't wait for them here and they are still running by the time
4268 	 * we call kthread_stop() against the cleaner kthread further below, we
4269 	 * get an use-after-free on the cleaner because the fixup worker adds an
4270 	 * inode to the list of delayed iputs and then attempts to wakeup the
4271 	 * cleaner kthread, which was already stopped and destroyed. We parked
4272 	 * already the cleaner, but below we run all pending delayed iputs.
4273 	 */
4274 	btrfs_flush_workqueue(fs_info->fixup_workers);
4275 	/*
4276 	 * Similar case here, we have to wait for delalloc workers before we
4277 	 * proceed below and stop the cleaner kthread, otherwise we trigger a
4278 	 * use-after-tree on the cleaner kthread task_struct when a delalloc
4279 	 * worker running submit_compressed_extents() adds a delayed iput, which
4280 	 * does a wake up on the cleaner kthread, which was already freed below
4281 	 * when we call kthread_stop().
4282 	 */
4283 	btrfs_flush_workqueue(fs_info->delalloc_workers);
4284 
4285 	/*
4286 	 * We can have ordered extents getting their last reference dropped from
4287 	 * the fs_info->workers queue because for async writes for data bios we
4288 	 * queue a work for that queue, at btrfs_wq_submit_bio(), that runs
4289 	 * run_one_async_done() which calls btrfs_bio_end_io() in case the bio
4290 	 * has an error, and that later function can do the final
4291 	 * btrfs_put_ordered_extent() on the ordered extent attached to the bio,
4292 	 * which adds a delayed iput for the inode. So we must flush the queue
4293 	 * so that we don't have delayed iputs after committing the current
4294 	 * transaction below and stopping the cleaner and transaction kthreads.
4295 	 */
4296 	btrfs_flush_workqueue(fs_info->workers);
4297 
4298 	/*
4299 	 * When finishing a compressed write bio we schedule a work queue item
4300 	 * to finish an ordered extent - btrfs_finish_compressed_write_work()
4301 	 * calls btrfs_finish_ordered_extent() which in turns does a call to
4302 	 * btrfs_queue_ordered_fn(), and that queues the ordered extent
4303 	 * completion either in the endio_write_workers work queue or in the
4304 	 * fs_info->endio_freespace_worker work queue. We flush those queues
4305 	 * below, so before we flush them we must flush this queue for the
4306 	 * workers of compressed writes.
4307 	 */
4308 	flush_workqueue(fs_info->compressed_write_workers);
4309 
4310 	/*
4311 	 * After we parked the cleaner kthread, ordered extents may have
4312 	 * completed and created new delayed iputs. If one of the async reclaim
4313 	 * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
4314 	 * can hang forever trying to stop it, because if a delayed iput is
4315 	 * added after it ran btrfs_run_delayed_iputs() and before it called
4316 	 * btrfs_wait_on_delayed_iputs(), it will hang forever since there is
4317 	 * no one else to run iputs.
4318 	 *
4319 	 * So wait for all ongoing ordered extents to complete and then run
4320 	 * delayed iputs. This works because once we reach this point no one
4321 	 * can either create new ordered extents nor create delayed iputs
4322 	 * through some other means.
4323 	 *
4324 	 * Also note that btrfs_wait_ordered_roots() is not safe here, because
4325 	 * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
4326 	 * but the delayed iput for the respective inode is made only when doing
4327 	 * the final btrfs_put_ordered_extent() (which must happen at
4328 	 * btrfs_finish_ordered_io() when we are unmounting).
4329 	 */
4330 	btrfs_flush_workqueue(fs_info->endio_write_workers);
4331 	/* Ordered extents for free space inodes. */
4332 	btrfs_flush_workqueue(fs_info->endio_freespace_worker);
4333 	btrfs_run_delayed_iputs(fs_info);
4334 
4335 	cancel_work_sync(&fs_info->async_reclaim_work);
4336 	cancel_work_sync(&fs_info->async_data_reclaim_work);
4337 	cancel_work_sync(&fs_info->preempt_reclaim_work);
4338 	cancel_work_sync(&fs_info->extent_map_shrinker_work);
4339 
4340 	/* Cancel or finish ongoing discard work */
4341 	btrfs_discard_cleanup(fs_info);
4342 
4343 	if (!sb_rdonly(fs_info->sb)) {
4344 		/*
4345 		 * The cleaner kthread is stopped, so do one final pass over
4346 		 * unused block groups.
4347 		 */
4348 		btrfs_delete_unused_bgs(fs_info);
4349 
4350 		/*
4351 		 * There might be existing delayed inode workers still running
4352 		 * and holding an empty delayed inode item. We must wait for
4353 		 * them to complete first because they can create a transaction.
4354 		 * This happens when someone calls btrfs_balance_delayed_items()
4355 		 * and then a transaction commit runs the same delayed nodes
4356 		 * before any delayed worker has done something with the nodes.
4357 		 * We must wait for any worker here and not at transaction
4358 		 * commit time since that could cause a deadlock.
4359 		 * This is a very rare case.
4360 		 */
4361 		btrfs_flush_workqueue(fs_info->delayed_workers);
4362 
4363 		ret = btrfs_commit_super(fs_info);
4364 		if (ret)
4365 			btrfs_err(fs_info, "commit super ret %d", ret);
4366 	}
4367 
4368 	kthread_stop(fs_info->transaction_kthread);
4369 	kthread_stop(fs_info->cleaner_kthread);
4370 
4371 	ASSERT(list_empty(&fs_info->delayed_iputs));
4372 	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
4373 
4374 	if (btrfs_check_quota_leak(fs_info)) {
4375 		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4376 		btrfs_err(fs_info, "qgroup reserved space leaked");
4377 	}
4378 
4379 	btrfs_free_qgroup_config(fs_info);
4380 	ASSERT(list_empty(&fs_info->delalloc_roots));
4381 
4382 	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
4383 		btrfs_info(fs_info, "at unmount delalloc count %lld",
4384 		       percpu_counter_sum(&fs_info->delalloc_bytes));
4385 	}
4386 
4387 	if (percpu_counter_sum(&fs_info->ordered_bytes))
4388 		btrfs_info(fs_info, "at unmount dio bytes count %lld",
4389 			   percpu_counter_sum(&fs_info->ordered_bytes));
4390 
4391 	btrfs_sysfs_remove_mounted(fs_info);
4392 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
4393 
4394 	btrfs_put_block_group_cache(fs_info);
4395 
4396 	/*
4397 	 * we must make sure there is not any read request to
4398 	 * submit after we stopping all workers.
4399 	 */
4400 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
4401 	btrfs_stop_all_workers(fs_info);
4402 
4403 	/* We shouldn't have any transaction open at this point */
4404 	warn_about_uncommitted_trans(fs_info);
4405 
4406 	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
4407 	free_root_pointers(fs_info, true);
4408 	btrfs_free_fs_roots(fs_info);
4409 
4410 	/*
4411 	 * We must free the block groups after dropping the fs_roots as we could
4412 	 * have had an IO error and have left over tree log blocks that aren't
4413 	 * cleaned up until the fs roots are freed.  This makes the block group
4414 	 * accounting appear to be wrong because there's pending reserved bytes,
4415 	 * so make sure we do the block group cleanup afterwards.
4416 	 */
4417 	btrfs_free_block_groups(fs_info);
4418 
4419 	iput(fs_info->btree_inode);
4420 
4421 	btrfs_mapping_tree_free(fs_info);
4422 	btrfs_close_devices(fs_info->fs_devices);
4423 }
4424 
btrfs_mark_buffer_dirty(struct btrfs_trans_handle * trans,struct extent_buffer * buf)4425 void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
4426 			     struct extent_buffer *buf)
4427 {
4428 	struct btrfs_fs_info *fs_info = buf->fs_info;
4429 	u64 transid = btrfs_header_generation(buf);
4430 
4431 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4432 	/*
4433 	 * This is a fast path so only do this check if we have sanity tests
4434 	 * enabled.  Normal people shouldn't be using unmapped buffers as dirty
4435 	 * outside of the sanity tests.
4436 	 */
4437 	if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4438 		return;
4439 #endif
4440 	/* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
4441 	ASSERT(trans->transid == fs_info->generation);
4442 	btrfs_assert_tree_write_locked(buf);
4443 	if (unlikely(transid != fs_info->generation)) {
4444 		btrfs_abort_transaction(trans, -EUCLEAN);
4445 		btrfs_crit(fs_info,
4446 "dirty buffer transid mismatch, logical %llu found transid %llu running transid %llu",
4447 			   buf->start, transid, fs_info->generation);
4448 	}
4449 	set_extent_buffer_dirty(buf);
4450 }
4451 
__btrfs_btree_balance_dirty(struct btrfs_fs_info * fs_info,int flush_delayed)4452 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4453 					int flush_delayed)
4454 {
4455 	/*
4456 	 * looks as though older kernels can get into trouble with
4457 	 * this code, they end up stuck in balance_dirty_pages forever
4458 	 */
4459 	int ret;
4460 
4461 	if (current->flags & PF_MEMALLOC)
4462 		return;
4463 
4464 	if (flush_delayed)
4465 		btrfs_balance_delayed_items(fs_info);
4466 
4467 	ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4468 				     BTRFS_DIRTY_METADATA_THRESH,
4469 				     fs_info->dirty_metadata_batch);
4470 	if (ret > 0) {
4471 		balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4472 	}
4473 }
4474 
btrfs_btree_balance_dirty(struct btrfs_fs_info * fs_info)4475 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4476 {
4477 	__btrfs_btree_balance_dirty(fs_info, 1);
4478 }
4479 
btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info * fs_info)4480 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4481 {
4482 	__btrfs_btree_balance_dirty(fs_info, 0);
4483 }
4484 
btrfs_error_commit_super(struct btrfs_fs_info * fs_info)4485 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4486 {
4487 	/* cleanup FS via transaction */
4488 	btrfs_cleanup_transaction(fs_info);
4489 
4490 	down_write(&fs_info->cleanup_work_sem);
4491 	up_write(&fs_info->cleanup_work_sem);
4492 }
4493 
btrfs_drop_all_logs(struct btrfs_fs_info * fs_info)4494 static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info)
4495 {
4496 	struct btrfs_root *gang[8];
4497 	u64 root_objectid = 0;
4498 	int ret;
4499 
4500 	spin_lock(&fs_info->fs_roots_radix_lock);
4501 	while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
4502 					     (void **)gang, root_objectid,
4503 					     ARRAY_SIZE(gang))) != 0) {
4504 		int i;
4505 
4506 		for (i = 0; i < ret; i++)
4507 			gang[i] = btrfs_grab_root(gang[i]);
4508 		spin_unlock(&fs_info->fs_roots_radix_lock);
4509 
4510 		for (i = 0; i < ret; i++) {
4511 			if (!gang[i])
4512 				continue;
4513 			root_objectid = btrfs_root_id(gang[i]);
4514 			btrfs_free_log(NULL, gang[i]);
4515 			btrfs_put_root(gang[i]);
4516 		}
4517 		root_objectid++;
4518 		spin_lock(&fs_info->fs_roots_radix_lock);
4519 	}
4520 	spin_unlock(&fs_info->fs_roots_radix_lock);
4521 	btrfs_free_log_root_tree(NULL, fs_info);
4522 }
4523 
btrfs_destroy_ordered_extents(struct btrfs_root * root)4524 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4525 {
4526 	struct btrfs_ordered_extent *ordered;
4527 
4528 	spin_lock(&root->ordered_extent_lock);
4529 	/*
4530 	 * This will just short circuit the ordered completion stuff which will
4531 	 * make sure the ordered extent gets properly cleaned up.
4532 	 */
4533 	list_for_each_entry(ordered, &root->ordered_extents,
4534 			    root_extent_list)
4535 		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4536 	spin_unlock(&root->ordered_extent_lock);
4537 }
4538 
btrfs_destroy_all_ordered_extents(struct btrfs_fs_info * fs_info)4539 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4540 {
4541 	struct btrfs_root *root;
4542 	LIST_HEAD(splice);
4543 
4544 	spin_lock(&fs_info->ordered_root_lock);
4545 	list_splice_init(&fs_info->ordered_roots, &splice);
4546 	while (!list_empty(&splice)) {
4547 		root = list_first_entry(&splice, struct btrfs_root,
4548 					ordered_root);
4549 		list_move_tail(&root->ordered_root,
4550 			       &fs_info->ordered_roots);
4551 
4552 		spin_unlock(&fs_info->ordered_root_lock);
4553 		btrfs_destroy_ordered_extents(root);
4554 
4555 		cond_resched();
4556 		spin_lock(&fs_info->ordered_root_lock);
4557 	}
4558 	spin_unlock(&fs_info->ordered_root_lock);
4559 
4560 	/*
4561 	 * We need this here because if we've been flipped read-only we won't
4562 	 * get sync() from the umount, so we need to make sure any ordered
4563 	 * extents that haven't had their dirty pages IO start writeout yet
4564 	 * actually get run and error out properly.
4565 	 */
4566 	btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
4567 }
4568 
btrfs_destroy_delayed_refs(struct btrfs_transaction * trans,struct btrfs_fs_info * fs_info)4569 static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4570 				       struct btrfs_fs_info *fs_info)
4571 {
4572 	struct rb_node *node;
4573 	struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
4574 	struct btrfs_delayed_ref_node *ref;
4575 
4576 	spin_lock(&delayed_refs->lock);
4577 	while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
4578 		struct btrfs_delayed_ref_head *head;
4579 		struct rb_node *n;
4580 		bool pin_bytes = false;
4581 
4582 		head = rb_entry(node, struct btrfs_delayed_ref_head,
4583 				href_node);
4584 		if (btrfs_delayed_ref_lock(delayed_refs, head))
4585 			continue;
4586 
4587 		spin_lock(&head->lock);
4588 		while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
4589 			ref = rb_entry(n, struct btrfs_delayed_ref_node,
4590 				       ref_node);
4591 			rb_erase_cached(&ref->ref_node, &head->ref_tree);
4592 			RB_CLEAR_NODE(&ref->ref_node);
4593 			if (!list_empty(&ref->add_list))
4594 				list_del(&ref->add_list);
4595 			atomic_dec(&delayed_refs->num_entries);
4596 			btrfs_put_delayed_ref(ref);
4597 			btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
4598 		}
4599 		if (head->must_insert_reserved)
4600 			pin_bytes = true;
4601 		btrfs_free_delayed_extent_op(head->extent_op);
4602 		btrfs_delete_ref_head(delayed_refs, head);
4603 		spin_unlock(&head->lock);
4604 		spin_unlock(&delayed_refs->lock);
4605 		mutex_unlock(&head->mutex);
4606 
4607 		if (pin_bytes) {
4608 			struct btrfs_block_group *cache;
4609 
4610 			cache = btrfs_lookup_block_group(fs_info, head->bytenr);
4611 			BUG_ON(!cache);
4612 
4613 			spin_lock(&cache->space_info->lock);
4614 			spin_lock(&cache->lock);
4615 			cache->pinned += head->num_bytes;
4616 			btrfs_space_info_update_bytes_pinned(fs_info,
4617 				cache->space_info, head->num_bytes);
4618 			cache->reserved -= head->num_bytes;
4619 			cache->space_info->bytes_reserved -= head->num_bytes;
4620 			spin_unlock(&cache->lock);
4621 			spin_unlock(&cache->space_info->lock);
4622 
4623 			btrfs_put_block_group(cache);
4624 
4625 			btrfs_error_unpin_extent_range(fs_info, head->bytenr,
4626 				head->bytenr + head->num_bytes - 1);
4627 		}
4628 		btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
4629 		btrfs_put_delayed_ref_head(head);
4630 		cond_resched();
4631 		spin_lock(&delayed_refs->lock);
4632 	}
4633 	btrfs_qgroup_destroy_extent_records(trans);
4634 
4635 	spin_unlock(&delayed_refs->lock);
4636 }
4637 
btrfs_destroy_delalloc_inodes(struct btrfs_root * root)4638 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4639 {
4640 	struct btrfs_inode *btrfs_inode;
4641 	LIST_HEAD(splice);
4642 
4643 	spin_lock(&root->delalloc_lock);
4644 	list_splice_init(&root->delalloc_inodes, &splice);
4645 
4646 	while (!list_empty(&splice)) {
4647 		struct inode *inode = NULL;
4648 		btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4649 					       delalloc_inodes);
4650 		btrfs_del_delalloc_inode(btrfs_inode);
4651 		spin_unlock(&root->delalloc_lock);
4652 
4653 		/*
4654 		 * Make sure we get a live inode and that it'll not disappear
4655 		 * meanwhile.
4656 		 */
4657 		inode = igrab(&btrfs_inode->vfs_inode);
4658 		if (inode) {
4659 			unsigned int nofs_flag;
4660 
4661 			nofs_flag = memalloc_nofs_save();
4662 			invalidate_inode_pages2(inode->i_mapping);
4663 			memalloc_nofs_restore(nofs_flag);
4664 			iput(inode);
4665 		}
4666 		spin_lock(&root->delalloc_lock);
4667 	}
4668 	spin_unlock(&root->delalloc_lock);
4669 }
4670 
btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info * fs_info)4671 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4672 {
4673 	struct btrfs_root *root;
4674 	LIST_HEAD(splice);
4675 
4676 	spin_lock(&fs_info->delalloc_root_lock);
4677 	list_splice_init(&fs_info->delalloc_roots, &splice);
4678 	while (!list_empty(&splice)) {
4679 		root = list_first_entry(&splice, struct btrfs_root,
4680 					 delalloc_root);
4681 		root = btrfs_grab_root(root);
4682 		BUG_ON(!root);
4683 		spin_unlock(&fs_info->delalloc_root_lock);
4684 
4685 		btrfs_destroy_delalloc_inodes(root);
4686 		btrfs_put_root(root);
4687 
4688 		spin_lock(&fs_info->delalloc_root_lock);
4689 	}
4690 	spin_unlock(&fs_info->delalloc_root_lock);
4691 }
4692 
btrfs_destroy_marked_extents(struct btrfs_fs_info * fs_info,struct extent_io_tree * dirty_pages,int mark)4693 static void btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4694 					 struct extent_io_tree *dirty_pages,
4695 					 int mark)
4696 {
4697 	struct extent_buffer *eb;
4698 	u64 start = 0;
4699 	u64 end;
4700 
4701 	while (find_first_extent_bit(dirty_pages, start, &start, &end,
4702 				     mark, NULL)) {
4703 		clear_extent_bits(dirty_pages, start, end, mark);
4704 		while (start <= end) {
4705 			eb = find_extent_buffer(fs_info, start);
4706 			start += fs_info->nodesize;
4707 			if (!eb)
4708 				continue;
4709 
4710 			btrfs_tree_lock(eb);
4711 			wait_on_extent_buffer_writeback(eb);
4712 			btrfs_clear_buffer_dirty(NULL, eb);
4713 			btrfs_tree_unlock(eb);
4714 
4715 			free_extent_buffer_stale(eb);
4716 		}
4717 	}
4718 }
4719 
btrfs_destroy_pinned_extent(struct btrfs_fs_info * fs_info,struct extent_io_tree * unpin)4720 static void btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4721 					struct extent_io_tree *unpin)
4722 {
4723 	u64 start;
4724 	u64 end;
4725 
4726 	while (1) {
4727 		struct extent_state *cached_state = NULL;
4728 
4729 		/*
4730 		 * The btrfs_finish_extent_commit() may get the same range as
4731 		 * ours between find_first_extent_bit and clear_extent_dirty.
4732 		 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4733 		 * the same extent range.
4734 		 */
4735 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
4736 		if (!find_first_extent_bit(unpin, 0, &start, &end,
4737 					   EXTENT_DIRTY, &cached_state)) {
4738 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4739 			break;
4740 		}
4741 
4742 		clear_extent_dirty(unpin, start, end, &cached_state);
4743 		free_extent_state(cached_state);
4744 		btrfs_error_unpin_extent_range(fs_info, start, end);
4745 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4746 		cond_resched();
4747 	}
4748 }
4749 
btrfs_cleanup_bg_io(struct btrfs_block_group * cache)4750 static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
4751 {
4752 	struct inode *inode;
4753 
4754 	inode = cache->io_ctl.inode;
4755 	if (inode) {
4756 		unsigned int nofs_flag;
4757 
4758 		nofs_flag = memalloc_nofs_save();
4759 		invalidate_inode_pages2(inode->i_mapping);
4760 		memalloc_nofs_restore(nofs_flag);
4761 
4762 		BTRFS_I(inode)->generation = 0;
4763 		cache->io_ctl.inode = NULL;
4764 		iput(inode);
4765 	}
4766 	ASSERT(cache->io_ctl.pages == NULL);
4767 	btrfs_put_block_group(cache);
4768 }
4769 
btrfs_cleanup_dirty_bgs(struct btrfs_transaction * cur_trans,struct btrfs_fs_info * fs_info)4770 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4771 			     struct btrfs_fs_info *fs_info)
4772 {
4773 	struct btrfs_block_group *cache;
4774 
4775 	spin_lock(&cur_trans->dirty_bgs_lock);
4776 	while (!list_empty(&cur_trans->dirty_bgs)) {
4777 		cache = list_first_entry(&cur_trans->dirty_bgs,
4778 					 struct btrfs_block_group,
4779 					 dirty_list);
4780 
4781 		if (!list_empty(&cache->io_list)) {
4782 			spin_unlock(&cur_trans->dirty_bgs_lock);
4783 			list_del_init(&cache->io_list);
4784 			btrfs_cleanup_bg_io(cache);
4785 			spin_lock(&cur_trans->dirty_bgs_lock);
4786 		}
4787 
4788 		list_del_init(&cache->dirty_list);
4789 		spin_lock(&cache->lock);
4790 		cache->disk_cache_state = BTRFS_DC_ERROR;
4791 		spin_unlock(&cache->lock);
4792 
4793 		spin_unlock(&cur_trans->dirty_bgs_lock);
4794 		btrfs_put_block_group(cache);
4795 		btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
4796 		spin_lock(&cur_trans->dirty_bgs_lock);
4797 	}
4798 	spin_unlock(&cur_trans->dirty_bgs_lock);
4799 
4800 	/*
4801 	 * Refer to the definition of io_bgs member for details why it's safe
4802 	 * to use it without any locking
4803 	 */
4804 	while (!list_empty(&cur_trans->io_bgs)) {
4805 		cache = list_first_entry(&cur_trans->io_bgs,
4806 					 struct btrfs_block_group,
4807 					 io_list);
4808 
4809 		list_del_init(&cache->io_list);
4810 		spin_lock(&cache->lock);
4811 		cache->disk_cache_state = BTRFS_DC_ERROR;
4812 		spin_unlock(&cache->lock);
4813 		btrfs_cleanup_bg_io(cache);
4814 	}
4815 }
4816 
btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info * fs_info)4817 static void btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info *fs_info)
4818 {
4819 	struct btrfs_root *gang[8];
4820 	int i;
4821 	int ret;
4822 
4823 	spin_lock(&fs_info->fs_roots_radix_lock);
4824 	while (1) {
4825 		ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
4826 						 (void **)gang, 0,
4827 						 ARRAY_SIZE(gang),
4828 						 BTRFS_ROOT_TRANS_TAG);
4829 		if (ret == 0)
4830 			break;
4831 		for (i = 0; i < ret; i++) {
4832 			struct btrfs_root *root = gang[i];
4833 
4834 			btrfs_qgroup_free_meta_all_pertrans(root);
4835 			radix_tree_tag_clear(&fs_info->fs_roots_radix,
4836 					(unsigned long)btrfs_root_id(root),
4837 					BTRFS_ROOT_TRANS_TAG);
4838 		}
4839 	}
4840 	spin_unlock(&fs_info->fs_roots_radix_lock);
4841 }
4842 
btrfs_cleanup_one_transaction(struct btrfs_transaction * cur_trans,struct btrfs_fs_info * fs_info)4843 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4844 				   struct btrfs_fs_info *fs_info)
4845 {
4846 	struct btrfs_device *dev, *tmp;
4847 
4848 	btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4849 	ASSERT(list_empty(&cur_trans->dirty_bgs));
4850 	ASSERT(list_empty(&cur_trans->io_bgs));
4851 
4852 	list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list,
4853 				 post_commit_list) {
4854 		list_del_init(&dev->post_commit_list);
4855 	}
4856 
4857 	btrfs_destroy_delayed_refs(cur_trans, fs_info);
4858 
4859 	cur_trans->state = TRANS_STATE_COMMIT_START;
4860 	wake_up(&fs_info->transaction_blocked_wait);
4861 
4862 	cur_trans->state = TRANS_STATE_UNBLOCKED;
4863 	wake_up(&fs_info->transaction_wait);
4864 
4865 	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4866 				     EXTENT_DIRTY);
4867 	btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents);
4868 
4869 	cur_trans->state =TRANS_STATE_COMPLETED;
4870 	wake_up(&cur_trans->commit_wait);
4871 }
4872 
btrfs_cleanup_transaction(struct btrfs_fs_info * fs_info)4873 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4874 {
4875 	struct btrfs_transaction *t;
4876 
4877 	mutex_lock(&fs_info->transaction_kthread_mutex);
4878 
4879 	spin_lock(&fs_info->trans_lock);
4880 	while (!list_empty(&fs_info->trans_list)) {
4881 		t = list_first_entry(&fs_info->trans_list,
4882 				     struct btrfs_transaction, list);
4883 		if (t->state >= TRANS_STATE_COMMIT_PREP) {
4884 			refcount_inc(&t->use_count);
4885 			spin_unlock(&fs_info->trans_lock);
4886 			btrfs_wait_for_commit(fs_info, t->transid);
4887 			btrfs_put_transaction(t);
4888 			spin_lock(&fs_info->trans_lock);
4889 			continue;
4890 		}
4891 		if (t == fs_info->running_transaction) {
4892 			t->state = TRANS_STATE_COMMIT_DOING;
4893 			spin_unlock(&fs_info->trans_lock);
4894 			/*
4895 			 * We wait for 0 num_writers since we don't hold a trans
4896 			 * handle open currently for this transaction.
4897 			 */
4898 			wait_event(t->writer_wait,
4899 				   atomic_read(&t->num_writers) == 0);
4900 		} else {
4901 			spin_unlock(&fs_info->trans_lock);
4902 		}
4903 		btrfs_cleanup_one_transaction(t, fs_info);
4904 
4905 		spin_lock(&fs_info->trans_lock);
4906 		if (t == fs_info->running_transaction)
4907 			fs_info->running_transaction = NULL;
4908 		list_del_init(&t->list);
4909 		spin_unlock(&fs_info->trans_lock);
4910 
4911 		btrfs_put_transaction(t);
4912 		trace_btrfs_transaction_commit(fs_info);
4913 		spin_lock(&fs_info->trans_lock);
4914 	}
4915 	spin_unlock(&fs_info->trans_lock);
4916 	btrfs_destroy_all_ordered_extents(fs_info);
4917 	btrfs_destroy_delayed_inodes(fs_info);
4918 	btrfs_assert_delayed_root_empty(fs_info);
4919 	btrfs_destroy_all_delalloc_inodes(fs_info);
4920 	btrfs_drop_all_logs(fs_info);
4921 	btrfs_free_all_qgroup_pertrans(fs_info);
4922 	mutex_unlock(&fs_info->transaction_kthread_mutex);
4923 
4924 	return 0;
4925 }
4926 
btrfs_init_root_free_objectid(struct btrfs_root * root)4927 int btrfs_init_root_free_objectid(struct btrfs_root *root)
4928 {
4929 	struct btrfs_path *path;
4930 	int ret;
4931 	struct extent_buffer *l;
4932 	struct btrfs_key search_key;
4933 	struct btrfs_key found_key;
4934 	int slot;
4935 
4936 	path = btrfs_alloc_path();
4937 	if (!path)
4938 		return -ENOMEM;
4939 
4940 	search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
4941 	search_key.type = -1;
4942 	search_key.offset = (u64)-1;
4943 	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
4944 	if (ret < 0)
4945 		goto error;
4946 	if (ret == 0) {
4947 		/*
4948 		 * Key with offset -1 found, there would have to exist a root
4949 		 * with such id, but this is out of valid range.
4950 		 */
4951 		ret = -EUCLEAN;
4952 		goto error;
4953 	}
4954 	if (path->slots[0] > 0) {
4955 		slot = path->slots[0] - 1;
4956 		l = path->nodes[0];
4957 		btrfs_item_key_to_cpu(l, &found_key, slot);
4958 		root->free_objectid = max_t(u64, found_key.objectid + 1,
4959 					    BTRFS_FIRST_FREE_OBJECTID);
4960 	} else {
4961 		root->free_objectid = BTRFS_FIRST_FREE_OBJECTID;
4962 	}
4963 	ret = 0;
4964 error:
4965 	btrfs_free_path(path);
4966 	return ret;
4967 }
4968 
btrfs_get_free_objectid(struct btrfs_root * root,u64 * objectid)4969 int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid)
4970 {
4971 	int ret;
4972 	mutex_lock(&root->objectid_mutex);
4973 
4974 	if (unlikely(root->free_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
4975 		btrfs_warn(root->fs_info,
4976 			   "the objectid of root %llu reaches its highest value",
4977 			   btrfs_root_id(root));
4978 		ret = -ENOSPC;
4979 		goto out;
4980 	}
4981 
4982 	*objectid = root->free_objectid++;
4983 	ret = 0;
4984 out:
4985 	mutex_unlock(&root->objectid_mutex);
4986 	return ret;
4987 }
4988