• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/slab.h>
29 #include <linux/migrate.h>
30 #include <linux/ratelimit.h>
31 #include <linux/uuid.h>
32 #include <linux/semaphore.h>
33 #include <asm/unaligned.h>
34 #include "ctree.h"
35 #include "disk-io.h"
36 #include "hash.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "print-tree.h"
41 #include "locking.h"
42 #include "tree-log.h"
43 #include "free-space-cache.h"
44 #include "free-space-tree.h"
45 #include "inode-map.h"
46 #include "check-integrity.h"
47 #include "rcu-string.h"
48 #include "dev-replace.h"
49 #include "raid56.h"
50 #include "sysfs.h"
51 #include "qgroup.h"
52 #include "compression.h"
53 #include "tree-checker.h"
54 
55 #ifdef CONFIG_X86
56 #include <asm/cpufeature.h>
57 #endif
58 
59 #define BTRFS_SUPER_FLAG_SUPP	(BTRFS_HEADER_FLAG_WRITTEN |\
60 				 BTRFS_HEADER_FLAG_RELOC |\
61 				 BTRFS_SUPER_FLAG_ERROR |\
62 				 BTRFS_SUPER_FLAG_SEEDING |\
63 				 BTRFS_SUPER_FLAG_METADUMP |\
64 				 BTRFS_SUPER_FLAG_METADUMP_V2)
65 
66 static const struct extent_io_ops btree_extent_io_ops;
67 static void end_workqueue_fn(struct btrfs_work *work);
68 static void free_fs_root(struct btrfs_root *root);
69 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info);
70 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
71 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
72 				      struct btrfs_fs_info *fs_info);
73 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
74 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
75 					struct extent_io_tree *dirty_pages,
76 					int mark);
77 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
78 				       struct extent_io_tree *pinned_extents);
79 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
80 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
81 
82 /*
83  * btrfs_end_io_wq structs are used to do processing in task context when an IO
84  * is complete.  This is used during reads to verify checksums, and it is used
85  * by writes to insert metadata for new file extents after IO is complete.
86  */
87 struct btrfs_end_io_wq {
88 	struct bio *bio;
89 	bio_end_io_t *end_io;
90 	void *private;
91 	struct btrfs_fs_info *info;
92 	blk_status_t status;
93 	enum btrfs_wq_endio_type metadata;
94 	struct btrfs_work work;
95 };
96 
97 static struct kmem_cache *btrfs_end_io_wq_cache;
98 
btrfs_end_io_wq_init(void)99 int __init btrfs_end_io_wq_init(void)
100 {
101 	btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
102 					sizeof(struct btrfs_end_io_wq),
103 					0,
104 					SLAB_MEM_SPREAD,
105 					NULL);
106 	if (!btrfs_end_io_wq_cache)
107 		return -ENOMEM;
108 	return 0;
109 }
110 
btrfs_end_io_wq_exit(void)111 void btrfs_end_io_wq_exit(void)
112 {
113 	kmem_cache_destroy(btrfs_end_io_wq_cache);
114 }
115 
116 /*
117  * async submit bios are used to offload expensive checksumming
118  * onto the worker threads.  They checksum file and metadata bios
119  * just before they are sent down the IO stack.
120  */
121 struct async_submit_bio {
122 	void *private_data;
123 	struct btrfs_fs_info *fs_info;
124 	struct bio *bio;
125 	extent_submit_bio_hook_t *submit_bio_start;
126 	extent_submit_bio_hook_t *submit_bio_done;
127 	int mirror_num;
128 	unsigned long bio_flags;
129 	/*
130 	 * bio_offset is optional, can be used if the pages in the bio
131 	 * can't tell us where in the file the bio should go
132 	 */
133 	u64 bio_offset;
134 	struct btrfs_work work;
135 	blk_status_t status;
136 };
137 
138 /*
139  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
140  * eb, the lockdep key is determined by the btrfs_root it belongs to and
141  * the level the eb occupies in the tree.
142  *
143  * Different roots are used for different purposes and may nest inside each
144  * other and they require separate keysets.  As lockdep keys should be
145  * static, assign keysets according to the purpose of the root as indicated
146  * by btrfs_root->objectid.  This ensures that all special purpose roots
147  * have separate keysets.
148  *
149  * Lock-nesting across peer nodes is always done with the immediate parent
150  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
151  * subclass to avoid triggering lockdep warning in such cases.
152  *
153  * The key is set by the readpage_end_io_hook after the buffer has passed
154  * csum validation but before the pages are unlocked.  It is also set by
155  * btrfs_init_new_buffer on freshly allocated blocks.
156  *
157  * We also add a check to make sure the highest level of the tree is the
158  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
159  * needs update as well.
160  */
161 #ifdef CONFIG_DEBUG_LOCK_ALLOC
162 # if BTRFS_MAX_LEVEL != 8
163 #  error
164 # endif
165 
166 static struct btrfs_lockdep_keyset {
167 	u64			id;		/* root objectid */
168 	const char		*name_stem;	/* lock name stem */
169 	char			names[BTRFS_MAX_LEVEL + 1][20];
170 	struct lock_class_key	keys[BTRFS_MAX_LEVEL + 1];
171 } btrfs_lockdep_keysets[] = {
172 	{ .id = BTRFS_ROOT_TREE_OBJECTID,	.name_stem = "root"	},
173 	{ .id = BTRFS_EXTENT_TREE_OBJECTID,	.name_stem = "extent"	},
174 	{ .id = BTRFS_CHUNK_TREE_OBJECTID,	.name_stem = "chunk"	},
175 	{ .id = BTRFS_DEV_TREE_OBJECTID,	.name_stem = "dev"	},
176 	{ .id = BTRFS_FS_TREE_OBJECTID,		.name_stem = "fs"	},
177 	{ .id = BTRFS_CSUM_TREE_OBJECTID,	.name_stem = "csum"	},
178 	{ .id = BTRFS_QUOTA_TREE_OBJECTID,	.name_stem = "quota"	},
179 	{ .id = BTRFS_TREE_LOG_OBJECTID,	.name_stem = "log"	},
180 	{ .id = BTRFS_TREE_RELOC_OBJECTID,	.name_stem = "treloc"	},
181 	{ .id = BTRFS_DATA_RELOC_TREE_OBJECTID,	.name_stem = "dreloc"	},
182 	{ .id = BTRFS_UUID_TREE_OBJECTID,	.name_stem = "uuid"	},
183 	{ .id = BTRFS_FREE_SPACE_TREE_OBJECTID,	.name_stem = "free-space" },
184 	{ .id = 0,				.name_stem = "tree"	},
185 };
186 
btrfs_init_lockdep(void)187 void __init btrfs_init_lockdep(void)
188 {
189 	int i, j;
190 
191 	/* initialize lockdep class names */
192 	for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
193 		struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
194 
195 		for (j = 0; j < ARRAY_SIZE(ks->names); j++)
196 			snprintf(ks->names[j], sizeof(ks->names[j]),
197 				 "btrfs-%s-%02d", ks->name_stem, j);
198 	}
199 }
200 
btrfs_set_buffer_lockdep_class(u64 objectid,struct extent_buffer * eb,int level)201 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
202 				    int level)
203 {
204 	struct btrfs_lockdep_keyset *ks;
205 
206 	BUG_ON(level >= ARRAY_SIZE(ks->keys));
207 
208 	/* find the matching keyset, id 0 is the default entry */
209 	for (ks = btrfs_lockdep_keysets; ks->id; ks++)
210 		if (ks->id == objectid)
211 			break;
212 
213 	lockdep_set_class_and_name(&eb->lock,
214 				   &ks->keys[level], ks->names[level]);
215 }
216 
217 #endif
218 
219 /*
220  * extents on the btree inode are pretty simple, there's one extent
221  * that covers the entire device
222  */
btree_get_extent(struct btrfs_inode * inode,struct page * page,size_t pg_offset,u64 start,u64 len,int create)223 static struct extent_map *btree_get_extent(struct btrfs_inode *inode,
224 		struct page *page, size_t pg_offset, u64 start, u64 len,
225 		int create)
226 {
227 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
228 	struct extent_map_tree *em_tree = &inode->extent_tree;
229 	struct extent_map *em;
230 	int ret;
231 
232 	read_lock(&em_tree->lock);
233 	em = lookup_extent_mapping(em_tree, start, len);
234 	if (em) {
235 		em->bdev = fs_info->fs_devices->latest_bdev;
236 		read_unlock(&em_tree->lock);
237 		goto out;
238 	}
239 	read_unlock(&em_tree->lock);
240 
241 	em = alloc_extent_map();
242 	if (!em) {
243 		em = ERR_PTR(-ENOMEM);
244 		goto out;
245 	}
246 	em->start = 0;
247 	em->len = (u64)-1;
248 	em->block_len = (u64)-1;
249 	em->block_start = 0;
250 	em->bdev = fs_info->fs_devices->latest_bdev;
251 
252 	write_lock(&em_tree->lock);
253 	ret = add_extent_mapping(em_tree, em, 0);
254 	if (ret == -EEXIST) {
255 		free_extent_map(em);
256 		em = lookup_extent_mapping(em_tree, start, len);
257 		if (!em)
258 			em = ERR_PTR(-EIO);
259 	} else if (ret) {
260 		free_extent_map(em);
261 		em = ERR_PTR(ret);
262 	}
263 	write_unlock(&em_tree->lock);
264 
265 out:
266 	return em;
267 }
268 
btrfs_csum_data(const char * data,u32 seed,size_t len)269 u32 btrfs_csum_data(const char *data, u32 seed, size_t len)
270 {
271 	return btrfs_crc32c(seed, data, len);
272 }
273 
btrfs_csum_final(u32 crc,u8 * result)274 void btrfs_csum_final(u32 crc, u8 *result)
275 {
276 	put_unaligned_le32(~crc, result);
277 }
278 
279 /*
280  * compute the csum for a btree block, and either verify it or write it
281  * into the csum field of the block.
282  */
csum_tree_block(struct btrfs_fs_info * fs_info,struct extent_buffer * buf,int verify)283 static int csum_tree_block(struct btrfs_fs_info *fs_info,
284 			   struct extent_buffer *buf,
285 			   int verify)
286 {
287 	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
288 	char *result = NULL;
289 	unsigned long len;
290 	unsigned long cur_len;
291 	unsigned long offset = BTRFS_CSUM_SIZE;
292 	char *kaddr;
293 	unsigned long map_start;
294 	unsigned long map_len;
295 	int err;
296 	u32 crc = ~(u32)0;
297 	unsigned long inline_result;
298 
299 	len = buf->len - offset;
300 	while (len > 0) {
301 		err = map_private_extent_buffer(buf, offset, 32,
302 					&kaddr, &map_start, &map_len);
303 		if (err)
304 			return err;
305 		cur_len = min(len, map_len - (offset - map_start));
306 		crc = btrfs_csum_data(kaddr + offset - map_start,
307 				      crc, cur_len);
308 		len -= cur_len;
309 		offset += cur_len;
310 	}
311 	if (csum_size > sizeof(inline_result)) {
312 		result = kzalloc(csum_size, GFP_NOFS);
313 		if (!result)
314 			return -ENOMEM;
315 	} else {
316 		result = (char *)&inline_result;
317 	}
318 
319 	btrfs_csum_final(crc, result);
320 
321 	if (verify) {
322 		if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
323 			u32 val;
324 			u32 found = 0;
325 			memcpy(&found, result, csum_size);
326 
327 			read_extent_buffer(buf, &val, 0, csum_size);
328 			btrfs_warn_rl(fs_info,
329 				"%s checksum verify failed on %llu wanted %X found %X level %d",
330 				fs_info->sb->s_id, buf->start,
331 				val, found, btrfs_header_level(buf));
332 			if (result != (char *)&inline_result)
333 				kfree(result);
334 			return -EUCLEAN;
335 		}
336 	} else {
337 		write_extent_buffer(buf, result, 0, csum_size);
338 	}
339 	if (result != (char *)&inline_result)
340 		kfree(result);
341 	return 0;
342 }
343 
344 /*
345  * we can't consider a given block up to date unless the transid of the
346  * block matches the transid in the parent node's pointer.  This is how we
347  * detect blocks that either didn't get written at all or got written
348  * in the wrong place.
349  */
verify_parent_transid(struct extent_io_tree * io_tree,struct extent_buffer * eb,u64 parent_transid,int atomic)350 static int verify_parent_transid(struct extent_io_tree *io_tree,
351 				 struct extent_buffer *eb, u64 parent_transid,
352 				 int atomic)
353 {
354 	struct extent_state *cached_state = NULL;
355 	int ret;
356 	bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
357 
358 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
359 		return 0;
360 
361 	if (atomic)
362 		return -EAGAIN;
363 
364 	if (need_lock) {
365 		btrfs_tree_read_lock(eb);
366 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
367 	}
368 
369 	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
370 			 &cached_state);
371 	if (extent_buffer_uptodate(eb) &&
372 	    btrfs_header_generation(eb) == parent_transid) {
373 		ret = 0;
374 		goto out;
375 	}
376 	btrfs_err_rl(eb->fs_info,
377 		"parent transid verify failed on %llu wanted %llu found %llu",
378 			eb->start,
379 			parent_transid, btrfs_header_generation(eb));
380 	ret = 1;
381 
382 	/*
383 	 * Things reading via commit roots that don't have normal protection,
384 	 * like send, can have a really old block in cache that may point at a
385 	 * block that has been freed and re-allocated.  So don't clear uptodate
386 	 * if we find an eb that is under IO (dirty/writeback) because we could
387 	 * end up reading in the stale data and then writing it back out and
388 	 * making everybody very sad.
389 	 */
390 	if (!extent_buffer_under_io(eb))
391 		clear_extent_buffer_uptodate(eb);
392 out:
393 	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
394 			     &cached_state, GFP_NOFS);
395 	if (need_lock)
396 		btrfs_tree_read_unlock_blocking(eb);
397 	return ret;
398 }
399 
400 /*
401  * Return 0 if the superblock checksum type matches the checksum value of that
402  * algorithm. Pass the raw disk superblock data.
403  */
btrfs_check_super_csum(struct btrfs_fs_info * fs_info,char * raw_disk_sb)404 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
405 				  char *raw_disk_sb)
406 {
407 	struct btrfs_super_block *disk_sb =
408 		(struct btrfs_super_block *)raw_disk_sb;
409 	u16 csum_type = btrfs_super_csum_type(disk_sb);
410 	int ret = 0;
411 
412 	if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
413 		u32 crc = ~(u32)0;
414 		const int csum_size = sizeof(crc);
415 		char result[csum_size];
416 
417 		/*
418 		 * The super_block structure does not span the whole
419 		 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
420 		 * is filled with zeros and is included in the checksum.
421 		 */
422 		crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
423 				crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
424 		btrfs_csum_final(crc, result);
425 
426 		if (memcmp(raw_disk_sb, result, csum_size))
427 			ret = 1;
428 	}
429 
430 	if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
431 		btrfs_err(fs_info, "unsupported checksum algorithm %u",
432 				csum_type);
433 		ret = 1;
434 	}
435 
436 	return ret;
437 }
438 
439 /*
440  * helper to read a given tree block, doing retries as required when
441  * the checksums don't match and we have alternate mirrors to try.
442  */
btree_read_extent_buffer_pages(struct btrfs_fs_info * fs_info,struct extent_buffer * eb,u64 parent_transid)443 static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
444 					  struct extent_buffer *eb,
445 					  u64 parent_transid)
446 {
447 	struct extent_io_tree *io_tree;
448 	int failed = 0;
449 	int ret;
450 	int num_copies = 0;
451 	int mirror_num = 0;
452 	int failed_mirror = 0;
453 
454 	io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
455 	while (1) {
456 		clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
457 		ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
458 					       btree_get_extent, mirror_num);
459 		if (!ret) {
460 			if (!verify_parent_transid(io_tree, eb,
461 						   parent_transid, 0))
462 				break;
463 			else
464 				ret = -EIO;
465 		}
466 
467 		num_copies = btrfs_num_copies(fs_info,
468 					      eb->start, eb->len);
469 		if (num_copies == 1)
470 			break;
471 
472 		if (!failed_mirror) {
473 			failed = 1;
474 			failed_mirror = eb->read_mirror;
475 		}
476 
477 		mirror_num++;
478 		if (mirror_num == failed_mirror)
479 			mirror_num++;
480 
481 		if (mirror_num > num_copies)
482 			break;
483 	}
484 
485 	if (failed && !ret && failed_mirror)
486 		repair_eb_io_failure(fs_info, eb, failed_mirror);
487 
488 	return ret;
489 }
490 
491 /*
492  * checksum a dirty tree block before IO.  This has extra checks to make sure
493  * we only fill in the checksum field in the first page of a multi-page block
494  */
495 
csum_dirty_buffer(struct btrfs_fs_info * fs_info,struct page * page)496 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
497 {
498 	u64 start = page_offset(page);
499 	u64 found_start;
500 	struct extent_buffer *eb;
501 
502 	eb = (struct extent_buffer *)page->private;
503 	if (page != eb->pages[0])
504 		return 0;
505 
506 	found_start = btrfs_header_bytenr(eb);
507 	/*
508 	 * Please do not consolidate these warnings into a single if.
509 	 * It is useful to know what went wrong.
510 	 */
511 	if (WARN_ON(found_start != start))
512 		return -EUCLEAN;
513 	if (WARN_ON(!PageUptodate(page)))
514 		return -EUCLEAN;
515 
516 	ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
517 			btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
518 
519 	return csum_tree_block(fs_info, eb, 0);
520 }
521 
check_tree_block_fsid(struct btrfs_fs_info * fs_info,struct extent_buffer * eb)522 static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
523 				 struct extent_buffer *eb)
524 {
525 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
526 	u8 fsid[BTRFS_FSID_SIZE];
527 	int ret = 1;
528 
529 	read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
530 	while (fs_devices) {
531 		if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
532 			ret = 0;
533 			break;
534 		}
535 		fs_devices = fs_devices->seed;
536 	}
537 	return ret;
538 }
539 
btree_readpage_end_io_hook(struct btrfs_io_bio * io_bio,u64 phy_offset,struct page * page,u64 start,u64 end,int mirror)540 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
541 				      u64 phy_offset, struct page *page,
542 				      u64 start, u64 end, int mirror)
543 {
544 	u64 found_start;
545 	int found_level;
546 	struct extent_buffer *eb;
547 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
548 	struct btrfs_fs_info *fs_info = root->fs_info;
549 	int ret = 0;
550 	int reads_done;
551 
552 	if (!page->private)
553 		goto out;
554 
555 	eb = (struct extent_buffer *)page->private;
556 
557 	/* the pending IO might have been the only thing that kept this buffer
558 	 * in memory.  Make sure we have a ref for all this other checks
559 	 */
560 	extent_buffer_get(eb);
561 
562 	reads_done = atomic_dec_and_test(&eb->io_pages);
563 	if (!reads_done)
564 		goto err;
565 
566 	eb->read_mirror = mirror;
567 	if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
568 		ret = -EIO;
569 		goto err;
570 	}
571 
572 	found_start = btrfs_header_bytenr(eb);
573 	if (found_start != eb->start) {
574 		btrfs_err_rl(fs_info, "bad tree block start %llu %llu",
575 			     found_start, eb->start);
576 		ret = -EIO;
577 		goto err;
578 	}
579 	if (check_tree_block_fsid(fs_info, eb)) {
580 		btrfs_err_rl(fs_info, "bad fsid on block %llu",
581 			     eb->start);
582 		ret = -EIO;
583 		goto err;
584 	}
585 	found_level = btrfs_header_level(eb);
586 	if (found_level >= BTRFS_MAX_LEVEL) {
587 		btrfs_err(fs_info, "bad tree block level %d",
588 			  (int)btrfs_header_level(eb));
589 		ret = -EIO;
590 		goto err;
591 	}
592 
593 	btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
594 				       eb, found_level);
595 
596 	ret = csum_tree_block(fs_info, eb, 1);
597 	if (ret)
598 		goto err;
599 
600 	/*
601 	 * If this is a leaf block and it is corrupt, set the corrupt bit so
602 	 * that we don't try and read the other copies of this block, just
603 	 * return -EIO.
604 	 */
605 	if (found_level == 0 && btrfs_check_leaf_full(root, eb)) {
606 		set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
607 		ret = -EIO;
608 	}
609 
610 	if (found_level > 0 && btrfs_check_node(root, eb))
611 		ret = -EIO;
612 
613 	if (!ret)
614 		set_extent_buffer_uptodate(eb);
615 err:
616 	if (reads_done &&
617 	    test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
618 		btree_readahead_hook(eb, ret);
619 
620 	if (ret) {
621 		/*
622 		 * our io error hook is going to dec the io pages
623 		 * again, we have to make sure it has something
624 		 * to decrement
625 		 */
626 		atomic_inc(&eb->io_pages);
627 		clear_extent_buffer_uptodate(eb);
628 	}
629 	free_extent_buffer(eb);
630 out:
631 	return ret;
632 }
633 
btree_io_failed_hook(struct page * page,int failed_mirror)634 static int btree_io_failed_hook(struct page *page, int failed_mirror)
635 {
636 	struct extent_buffer *eb;
637 
638 	eb = (struct extent_buffer *)page->private;
639 	set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
640 	eb->read_mirror = failed_mirror;
641 	atomic_dec(&eb->io_pages);
642 	if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
643 		btree_readahead_hook(eb, -EIO);
644 	return -EIO;	/* we fixed nothing */
645 }
646 
end_workqueue_bio(struct bio * bio)647 static void end_workqueue_bio(struct bio *bio)
648 {
649 	struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
650 	struct btrfs_fs_info *fs_info;
651 	struct btrfs_workqueue *wq;
652 	btrfs_work_func_t func;
653 
654 	fs_info = end_io_wq->info;
655 	end_io_wq->status = bio->bi_status;
656 
657 	if (bio_op(bio) == REQ_OP_WRITE) {
658 		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
659 			wq = fs_info->endio_meta_write_workers;
660 			func = btrfs_endio_meta_write_helper;
661 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
662 			wq = fs_info->endio_freespace_worker;
663 			func = btrfs_freespace_write_helper;
664 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
665 			wq = fs_info->endio_raid56_workers;
666 			func = btrfs_endio_raid56_helper;
667 		} else {
668 			wq = fs_info->endio_write_workers;
669 			func = btrfs_endio_write_helper;
670 		}
671 	} else {
672 		if (unlikely(end_io_wq->metadata ==
673 			     BTRFS_WQ_ENDIO_DIO_REPAIR)) {
674 			wq = fs_info->endio_repair_workers;
675 			func = btrfs_endio_repair_helper;
676 		} else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
677 			wq = fs_info->endio_raid56_workers;
678 			func = btrfs_endio_raid56_helper;
679 		} else if (end_io_wq->metadata) {
680 			wq = fs_info->endio_meta_workers;
681 			func = btrfs_endio_meta_helper;
682 		} else {
683 			wq = fs_info->endio_workers;
684 			func = btrfs_endio_helper;
685 		}
686 	}
687 
688 	btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
689 	btrfs_queue_work(wq, &end_io_wq->work);
690 }
691 
btrfs_bio_wq_end_io(struct btrfs_fs_info * info,struct bio * bio,enum btrfs_wq_endio_type metadata)692 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
693 			enum btrfs_wq_endio_type metadata)
694 {
695 	struct btrfs_end_io_wq *end_io_wq;
696 
697 	end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
698 	if (!end_io_wq)
699 		return BLK_STS_RESOURCE;
700 
701 	end_io_wq->private = bio->bi_private;
702 	end_io_wq->end_io = bio->bi_end_io;
703 	end_io_wq->info = info;
704 	end_io_wq->status = 0;
705 	end_io_wq->bio = bio;
706 	end_io_wq->metadata = metadata;
707 
708 	bio->bi_private = end_io_wq;
709 	bio->bi_end_io = end_workqueue_bio;
710 	return 0;
711 }
712 
btrfs_async_submit_limit(struct btrfs_fs_info * info)713 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
714 {
715 	unsigned long limit = min_t(unsigned long,
716 				    info->thread_pool_size,
717 				    info->fs_devices->open_devices);
718 	return 256 * limit;
719 }
720 
run_one_async_start(struct btrfs_work * work)721 static void run_one_async_start(struct btrfs_work *work)
722 {
723 	struct async_submit_bio *async;
724 	blk_status_t ret;
725 
726 	async = container_of(work, struct  async_submit_bio, work);
727 	ret = async->submit_bio_start(async->private_data, async->bio,
728 				      async->mirror_num, async->bio_flags,
729 				      async->bio_offset);
730 	if (ret)
731 		async->status = ret;
732 }
733 
run_one_async_done(struct btrfs_work * work)734 static void run_one_async_done(struct btrfs_work *work)
735 {
736 	struct btrfs_fs_info *fs_info;
737 	struct async_submit_bio *async;
738 	int limit;
739 
740 	async = container_of(work, struct  async_submit_bio, work);
741 	fs_info = async->fs_info;
742 
743 	limit = btrfs_async_submit_limit(fs_info);
744 	limit = limit * 2 / 3;
745 
746 	/*
747 	 * atomic_dec_return implies a barrier for waitqueue_active
748 	 */
749 	if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
750 	    waitqueue_active(&fs_info->async_submit_wait))
751 		wake_up(&fs_info->async_submit_wait);
752 
753 	/* If an error occurred we just want to clean up the bio and move on */
754 	if (async->status) {
755 		async->bio->bi_status = async->status;
756 		bio_endio(async->bio);
757 		return;
758 	}
759 
760 	async->submit_bio_done(async->private_data, async->bio, async->mirror_num,
761 			       async->bio_flags, async->bio_offset);
762 }
763 
run_one_async_free(struct btrfs_work * work)764 static void run_one_async_free(struct btrfs_work *work)
765 {
766 	struct async_submit_bio *async;
767 
768 	async = container_of(work, struct  async_submit_bio, work);
769 	kfree(async);
770 }
771 
btrfs_wq_submit_bio(struct btrfs_fs_info * fs_info,struct bio * bio,int mirror_num,unsigned long bio_flags,u64 bio_offset,void * private_data,extent_submit_bio_hook_t * submit_bio_start,extent_submit_bio_hook_t * submit_bio_done)772 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
773 				 int mirror_num, unsigned long bio_flags,
774 				 u64 bio_offset, void *private_data,
775 				 extent_submit_bio_hook_t *submit_bio_start,
776 				 extent_submit_bio_hook_t *submit_bio_done)
777 {
778 	struct async_submit_bio *async;
779 
780 	async = kmalloc(sizeof(*async), GFP_NOFS);
781 	if (!async)
782 		return BLK_STS_RESOURCE;
783 
784 	async->private_data = private_data;
785 	async->fs_info = fs_info;
786 	async->bio = bio;
787 	async->mirror_num = mirror_num;
788 	async->submit_bio_start = submit_bio_start;
789 	async->submit_bio_done = submit_bio_done;
790 
791 	btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
792 			run_one_async_done, run_one_async_free);
793 
794 	async->bio_flags = bio_flags;
795 	async->bio_offset = bio_offset;
796 
797 	async->status = 0;
798 
799 	atomic_inc(&fs_info->nr_async_submits);
800 
801 	if (op_is_sync(bio->bi_opf))
802 		btrfs_set_work_high_priority(&async->work);
803 
804 	btrfs_queue_work(fs_info->workers, &async->work);
805 
806 	while (atomic_read(&fs_info->async_submit_draining) &&
807 	      atomic_read(&fs_info->nr_async_submits)) {
808 		wait_event(fs_info->async_submit_wait,
809 			   (atomic_read(&fs_info->nr_async_submits) == 0));
810 	}
811 
812 	return 0;
813 }
814 
btree_csum_one_bio(struct bio * bio)815 static blk_status_t btree_csum_one_bio(struct bio *bio)
816 {
817 	struct bio_vec *bvec;
818 	struct btrfs_root *root;
819 	int i, ret = 0;
820 
821 	ASSERT(!bio_flagged(bio, BIO_CLONED));
822 	bio_for_each_segment_all(bvec, bio, i) {
823 		root = BTRFS_I(bvec->bv_page->mapping->host)->root;
824 		ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
825 		if (ret)
826 			break;
827 	}
828 
829 	return errno_to_blk_status(ret);
830 }
831 
__btree_submit_bio_start(void * private_data,struct bio * bio,int mirror_num,unsigned long bio_flags,u64 bio_offset)832 static blk_status_t __btree_submit_bio_start(void *private_data, struct bio *bio,
833 					     int mirror_num, unsigned long bio_flags,
834 					     u64 bio_offset)
835 {
836 	/*
837 	 * when we're called for a write, we're already in the async
838 	 * submission context.  Just jump into btrfs_map_bio
839 	 */
840 	return btree_csum_one_bio(bio);
841 }
842 
__btree_submit_bio_done(void * private_data,struct bio * bio,int mirror_num,unsigned long bio_flags,u64 bio_offset)843 static blk_status_t __btree_submit_bio_done(void *private_data, struct bio *bio,
844 					    int mirror_num, unsigned long bio_flags,
845 					    u64 bio_offset)
846 {
847 	struct inode *inode = private_data;
848 	blk_status_t ret;
849 
850 	/*
851 	 * when we're called for a write, we're already in the async
852 	 * submission context.  Just jump into btrfs_map_bio
853 	 */
854 	ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
855 	if (ret) {
856 		bio->bi_status = ret;
857 		bio_endio(bio);
858 	}
859 	return ret;
860 }
861 
check_async_write(unsigned long bio_flags)862 static int check_async_write(unsigned long bio_flags)
863 {
864 	if (bio_flags & EXTENT_BIO_TREE_LOG)
865 		return 0;
866 #ifdef CONFIG_X86
867 	if (static_cpu_has(X86_FEATURE_XMM4_2))
868 		return 0;
869 #endif
870 	return 1;
871 }
872 
btree_submit_bio_hook(void * private_data,struct bio * bio,int mirror_num,unsigned long bio_flags,u64 bio_offset)873 static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
874 					  int mirror_num, unsigned long bio_flags,
875 					  u64 bio_offset)
876 {
877 	struct inode *inode = private_data;
878 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
879 	int async = check_async_write(bio_flags);
880 	blk_status_t ret;
881 
882 	if (bio_op(bio) != REQ_OP_WRITE) {
883 		/*
884 		 * called for a read, do the setup so that checksum validation
885 		 * can happen in the async kernel threads
886 		 */
887 		ret = btrfs_bio_wq_end_io(fs_info, bio,
888 					  BTRFS_WQ_ENDIO_METADATA);
889 		if (ret)
890 			goto out_w_error;
891 		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
892 	} else if (!async) {
893 		ret = btree_csum_one_bio(bio);
894 		if (ret)
895 			goto out_w_error;
896 		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
897 	} else {
898 		/*
899 		 * kthread helpers are used to submit writes so that
900 		 * checksumming can happen in parallel across all CPUs
901 		 */
902 		ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
903 					  bio_offset, private_data,
904 					  __btree_submit_bio_start,
905 					  __btree_submit_bio_done);
906 	}
907 
908 	if (ret)
909 		goto out_w_error;
910 	return 0;
911 
912 out_w_error:
913 	bio->bi_status = ret;
914 	bio_endio(bio);
915 	return ret;
916 }
917 
918 #ifdef CONFIG_MIGRATION
btree_migratepage(struct address_space * mapping,struct page * newpage,struct page * page,enum migrate_mode mode)919 static int btree_migratepage(struct address_space *mapping,
920 			struct page *newpage, struct page *page,
921 			enum migrate_mode mode)
922 {
923 	/*
924 	 * we can't safely write a btree page from here,
925 	 * we haven't done the locking hook
926 	 */
927 	if (PageDirty(page))
928 		return -EAGAIN;
929 	/*
930 	 * Buffers may be managed in a filesystem specific way.
931 	 * We must have no buffers or drop them.
932 	 */
933 	if (page_has_private(page) &&
934 	    !try_to_release_page(page, GFP_KERNEL))
935 		return -EAGAIN;
936 	return migrate_page(mapping, newpage, page, mode);
937 }
938 #endif
939 
940 
btree_writepages(struct address_space * mapping,struct writeback_control * wbc)941 static int btree_writepages(struct address_space *mapping,
942 			    struct writeback_control *wbc)
943 {
944 	struct btrfs_fs_info *fs_info;
945 	int ret;
946 
947 	if (wbc->sync_mode == WB_SYNC_NONE) {
948 
949 		if (wbc->for_kupdate)
950 			return 0;
951 
952 		fs_info = BTRFS_I(mapping->host)->root->fs_info;
953 		/* this is a bit racy, but that's ok */
954 		ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
955 					     BTRFS_DIRTY_METADATA_THRESH,
956 					     fs_info->dirty_metadata_batch);
957 		if (ret < 0)
958 			return 0;
959 	}
960 	return btree_write_cache_pages(mapping, wbc);
961 }
962 
btree_readpage(struct file * file,struct page * page)963 static int btree_readpage(struct file *file, struct page *page)
964 {
965 	struct extent_io_tree *tree;
966 	tree = &BTRFS_I(page->mapping->host)->io_tree;
967 	return extent_read_full_page(tree, page, btree_get_extent, 0);
968 }
969 
btree_releasepage(struct page * page,gfp_t gfp_flags)970 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
971 {
972 	if (PageWriteback(page) || PageDirty(page))
973 		return 0;
974 
975 	return try_release_extent_buffer(page);
976 }
977 
btree_invalidatepage(struct page * page,unsigned int offset,unsigned int length)978 static void btree_invalidatepage(struct page *page, unsigned int offset,
979 				 unsigned int length)
980 {
981 	struct extent_io_tree *tree;
982 	tree = &BTRFS_I(page->mapping->host)->io_tree;
983 	extent_invalidatepage(tree, page, offset);
984 	btree_releasepage(page, GFP_NOFS);
985 	if (PagePrivate(page)) {
986 		btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
987 			   "page private not zero on page %llu",
988 			   (unsigned long long)page_offset(page));
989 		ClearPagePrivate(page);
990 		set_page_private(page, 0);
991 		put_page(page);
992 	}
993 }
994 
btree_set_page_dirty(struct page * page)995 static int btree_set_page_dirty(struct page *page)
996 {
997 #ifdef DEBUG
998 	struct extent_buffer *eb;
999 
1000 	BUG_ON(!PagePrivate(page));
1001 	eb = (struct extent_buffer *)page->private;
1002 	BUG_ON(!eb);
1003 	BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1004 	BUG_ON(!atomic_read(&eb->refs));
1005 	btrfs_assert_tree_locked(eb);
1006 #endif
1007 	return __set_page_dirty_nobuffers(page);
1008 }
1009 
1010 static const struct address_space_operations btree_aops = {
1011 	.readpage	= btree_readpage,
1012 	.writepages	= btree_writepages,
1013 	.releasepage	= btree_releasepage,
1014 	.invalidatepage = btree_invalidatepage,
1015 #ifdef CONFIG_MIGRATION
1016 	.migratepage	= btree_migratepage,
1017 #endif
1018 	.set_page_dirty = btree_set_page_dirty,
1019 };
1020 
readahead_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr)1021 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
1022 {
1023 	struct extent_buffer *buf = NULL;
1024 	struct inode *btree_inode = fs_info->btree_inode;
1025 
1026 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1027 	if (IS_ERR(buf))
1028 		return;
1029 	read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1030 				 buf, WAIT_NONE, btree_get_extent, 0);
1031 	free_extent_buffer(buf);
1032 }
1033 
reada_tree_block_flagged(struct btrfs_fs_info * fs_info,u64 bytenr,int mirror_num,struct extent_buffer ** eb)1034 int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
1035 			 int mirror_num, struct extent_buffer **eb)
1036 {
1037 	struct extent_buffer *buf = NULL;
1038 	struct inode *btree_inode = fs_info->btree_inode;
1039 	struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1040 	int ret;
1041 
1042 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1043 	if (IS_ERR(buf))
1044 		return 0;
1045 
1046 	set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1047 
1048 	ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
1049 				       btree_get_extent, mirror_num);
1050 	if (ret) {
1051 		free_extent_buffer(buf);
1052 		return ret;
1053 	}
1054 
1055 	if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1056 		free_extent_buffer(buf);
1057 		return -EIO;
1058 	} else if (extent_buffer_uptodate(buf)) {
1059 		*eb = buf;
1060 	} else {
1061 		free_extent_buffer(buf);
1062 	}
1063 	return 0;
1064 }
1065 
btrfs_find_create_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr)1066 struct extent_buffer *btrfs_find_create_tree_block(
1067 						struct btrfs_fs_info *fs_info,
1068 						u64 bytenr)
1069 {
1070 	if (btrfs_is_testing(fs_info))
1071 		return alloc_test_extent_buffer(fs_info, bytenr);
1072 	return alloc_extent_buffer(fs_info, bytenr);
1073 }
1074 
1075 
btrfs_write_tree_block(struct extent_buffer * buf)1076 int btrfs_write_tree_block(struct extent_buffer *buf)
1077 {
1078 	return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1079 					buf->start + buf->len - 1);
1080 }
1081 
btrfs_wait_tree_block_writeback(struct extent_buffer * buf)1082 void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1083 {
1084 	filemap_fdatawait_range(buf->pages[0]->mapping,
1085 			        buf->start, buf->start + buf->len - 1);
1086 }
1087 
read_tree_block(struct btrfs_fs_info * fs_info,u64 bytenr,u64 parent_transid)1088 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
1089 				      u64 parent_transid)
1090 {
1091 	struct extent_buffer *buf = NULL;
1092 	int ret;
1093 
1094 	buf = btrfs_find_create_tree_block(fs_info, bytenr);
1095 	if (IS_ERR(buf))
1096 		return buf;
1097 
1098 	ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
1099 	if (ret) {
1100 		free_extent_buffer(buf);
1101 		return ERR_PTR(ret);
1102 	}
1103 	return buf;
1104 
1105 }
1106 
clean_tree_block(struct btrfs_fs_info * fs_info,struct extent_buffer * buf)1107 void clean_tree_block(struct btrfs_fs_info *fs_info,
1108 		      struct extent_buffer *buf)
1109 {
1110 	if (btrfs_header_generation(buf) ==
1111 	    fs_info->running_transaction->transid) {
1112 		btrfs_assert_tree_locked(buf);
1113 
1114 		if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1115 			percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1116 						 -buf->len,
1117 						 fs_info->dirty_metadata_batch);
1118 			/* ugh, clear_extent_buffer_dirty needs to lock the page */
1119 			btrfs_set_lock_blocking(buf);
1120 			clear_extent_buffer_dirty(buf);
1121 		}
1122 	}
1123 }
1124 
btrfs_alloc_subvolume_writers(void)1125 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1126 {
1127 	struct btrfs_subvolume_writers *writers;
1128 	int ret;
1129 
1130 	writers = kmalloc(sizeof(*writers), GFP_NOFS);
1131 	if (!writers)
1132 		return ERR_PTR(-ENOMEM);
1133 
1134 	ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
1135 	if (ret < 0) {
1136 		kfree(writers);
1137 		return ERR_PTR(ret);
1138 	}
1139 
1140 	init_waitqueue_head(&writers->wait);
1141 	return writers;
1142 }
1143 
1144 static void
btrfs_free_subvolume_writers(struct btrfs_subvolume_writers * writers)1145 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1146 {
1147 	percpu_counter_destroy(&writers->counter);
1148 	kfree(writers);
1149 }
1150 
__setup_root(struct btrfs_root * root,struct btrfs_fs_info * fs_info,u64 objectid)1151 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1152 			 u64 objectid)
1153 {
1154 	bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1155 	root->node = NULL;
1156 	root->commit_root = NULL;
1157 	root->state = 0;
1158 	root->orphan_cleanup_state = 0;
1159 
1160 	root->objectid = objectid;
1161 	root->last_trans = 0;
1162 	root->highest_objectid = 0;
1163 	root->nr_delalloc_inodes = 0;
1164 	root->nr_ordered_extents = 0;
1165 	root->name = NULL;
1166 	root->inode_tree = RB_ROOT;
1167 	INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1168 	root->block_rsv = NULL;
1169 	root->orphan_block_rsv = NULL;
1170 
1171 	INIT_LIST_HEAD(&root->dirty_list);
1172 	INIT_LIST_HEAD(&root->root_list);
1173 	INIT_LIST_HEAD(&root->delalloc_inodes);
1174 	INIT_LIST_HEAD(&root->delalloc_root);
1175 	INIT_LIST_HEAD(&root->ordered_extents);
1176 	INIT_LIST_HEAD(&root->ordered_root);
1177 	INIT_LIST_HEAD(&root->logged_list[0]);
1178 	INIT_LIST_HEAD(&root->logged_list[1]);
1179 	spin_lock_init(&root->orphan_lock);
1180 	spin_lock_init(&root->inode_lock);
1181 	spin_lock_init(&root->delalloc_lock);
1182 	spin_lock_init(&root->ordered_extent_lock);
1183 	spin_lock_init(&root->accounting_lock);
1184 	spin_lock_init(&root->log_extents_lock[0]);
1185 	spin_lock_init(&root->log_extents_lock[1]);
1186 	mutex_init(&root->objectid_mutex);
1187 	mutex_init(&root->log_mutex);
1188 	mutex_init(&root->ordered_extent_mutex);
1189 	mutex_init(&root->delalloc_mutex);
1190 	init_waitqueue_head(&root->log_writer_wait);
1191 	init_waitqueue_head(&root->log_commit_wait[0]);
1192 	init_waitqueue_head(&root->log_commit_wait[1]);
1193 	INIT_LIST_HEAD(&root->log_ctxs[0]);
1194 	INIT_LIST_HEAD(&root->log_ctxs[1]);
1195 	atomic_set(&root->log_commit[0], 0);
1196 	atomic_set(&root->log_commit[1], 0);
1197 	atomic_set(&root->log_writers, 0);
1198 	atomic_set(&root->log_batch, 0);
1199 	atomic_set(&root->orphan_inodes, 0);
1200 	refcount_set(&root->refs, 1);
1201 	atomic_set(&root->will_be_snapshotted, 0);
1202 	atomic64_set(&root->qgroup_meta_rsv, 0);
1203 	root->log_transid = 0;
1204 	root->log_transid_committed = -1;
1205 	root->last_log_commit = 0;
1206 	if (!dummy)
1207 		extent_io_tree_init(&root->dirty_log_pages, NULL);
1208 
1209 	memset(&root->root_key, 0, sizeof(root->root_key));
1210 	memset(&root->root_item, 0, sizeof(root->root_item));
1211 	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1212 	if (!dummy)
1213 		root->defrag_trans_start = fs_info->generation;
1214 	else
1215 		root->defrag_trans_start = 0;
1216 	root->root_key.objectid = objectid;
1217 	root->anon_dev = 0;
1218 
1219 	spin_lock_init(&root->root_item_lock);
1220 }
1221 
btrfs_alloc_root(struct btrfs_fs_info * fs_info,gfp_t flags)1222 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1223 		gfp_t flags)
1224 {
1225 	struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1226 	if (root)
1227 		root->fs_info = fs_info;
1228 	return root;
1229 }
1230 
1231 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1232 /* Should only be used by the testing infrastructure */
btrfs_alloc_dummy_root(struct btrfs_fs_info * fs_info)1233 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1234 {
1235 	struct btrfs_root *root;
1236 
1237 	if (!fs_info)
1238 		return ERR_PTR(-EINVAL);
1239 
1240 	root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1241 	if (!root)
1242 		return ERR_PTR(-ENOMEM);
1243 
1244 	/* We don't use the stripesize in selftest, set it as sectorsize */
1245 	__setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
1246 	root->alloc_bytenr = 0;
1247 
1248 	return root;
1249 }
1250 #endif
1251 
btrfs_create_tree(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 objectid)1252 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1253 				     struct btrfs_fs_info *fs_info,
1254 				     u64 objectid)
1255 {
1256 	struct extent_buffer *leaf;
1257 	struct btrfs_root *tree_root = fs_info->tree_root;
1258 	struct btrfs_root *root;
1259 	struct btrfs_key key;
1260 	int ret = 0;
1261 	uuid_le uuid;
1262 
1263 	root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1264 	if (!root)
1265 		return ERR_PTR(-ENOMEM);
1266 
1267 	__setup_root(root, fs_info, objectid);
1268 	root->root_key.objectid = objectid;
1269 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1270 	root->root_key.offset = 0;
1271 
1272 	leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1273 	if (IS_ERR(leaf)) {
1274 		ret = PTR_ERR(leaf);
1275 		leaf = NULL;
1276 		goto fail;
1277 	}
1278 
1279 	memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
1280 	btrfs_set_header_bytenr(leaf, leaf->start);
1281 	btrfs_set_header_generation(leaf, trans->transid);
1282 	btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1283 	btrfs_set_header_owner(leaf, objectid);
1284 	root->node = leaf;
1285 
1286 	write_extent_buffer_fsid(leaf, fs_info->fsid);
1287 	write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
1288 	btrfs_mark_buffer_dirty(leaf);
1289 
1290 	root->commit_root = btrfs_root_node(root);
1291 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1292 
1293 	root->root_item.flags = 0;
1294 	root->root_item.byte_limit = 0;
1295 	btrfs_set_root_bytenr(&root->root_item, leaf->start);
1296 	btrfs_set_root_generation(&root->root_item, trans->transid);
1297 	btrfs_set_root_level(&root->root_item, 0);
1298 	btrfs_set_root_refs(&root->root_item, 1);
1299 	btrfs_set_root_used(&root->root_item, leaf->len);
1300 	btrfs_set_root_last_snapshot(&root->root_item, 0);
1301 	btrfs_set_root_dirid(&root->root_item, 0);
1302 	uuid_le_gen(&uuid);
1303 	memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1304 	root->root_item.drop_level = 0;
1305 
1306 	key.objectid = objectid;
1307 	key.type = BTRFS_ROOT_ITEM_KEY;
1308 	key.offset = 0;
1309 	ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1310 	if (ret)
1311 		goto fail;
1312 
1313 	btrfs_tree_unlock(leaf);
1314 
1315 	return root;
1316 
1317 fail:
1318 	if (leaf) {
1319 		btrfs_tree_unlock(leaf);
1320 		free_extent_buffer(root->commit_root);
1321 		free_extent_buffer(leaf);
1322 	}
1323 	kfree(root);
1324 
1325 	return ERR_PTR(ret);
1326 }
1327 
alloc_log_tree(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)1328 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1329 					 struct btrfs_fs_info *fs_info)
1330 {
1331 	struct btrfs_root *root;
1332 	struct extent_buffer *leaf;
1333 
1334 	root = btrfs_alloc_root(fs_info, GFP_NOFS);
1335 	if (!root)
1336 		return ERR_PTR(-ENOMEM);
1337 
1338 	__setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1339 
1340 	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1341 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1342 	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1343 
1344 	/*
1345 	 * DON'T set REF_COWS for log trees
1346 	 *
1347 	 * log trees do not get reference counted because they go away
1348 	 * before a real commit is actually done.  They do store pointers
1349 	 * to file data extents, and those reference counts still get
1350 	 * updated (along with back refs to the log tree).
1351 	 */
1352 
1353 	leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1354 			NULL, 0, 0, 0);
1355 	if (IS_ERR(leaf)) {
1356 		kfree(root);
1357 		return ERR_CAST(leaf);
1358 	}
1359 
1360 	memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
1361 	btrfs_set_header_bytenr(leaf, leaf->start);
1362 	btrfs_set_header_generation(leaf, trans->transid);
1363 	btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1364 	btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1365 	root->node = leaf;
1366 
1367 	write_extent_buffer_fsid(root->node, fs_info->fsid);
1368 	btrfs_mark_buffer_dirty(root->node);
1369 	btrfs_tree_unlock(root->node);
1370 	return root;
1371 }
1372 
btrfs_init_log_root_tree(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)1373 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1374 			     struct btrfs_fs_info *fs_info)
1375 {
1376 	struct btrfs_root *log_root;
1377 
1378 	log_root = alloc_log_tree(trans, fs_info);
1379 	if (IS_ERR(log_root))
1380 		return PTR_ERR(log_root);
1381 	WARN_ON(fs_info->log_root_tree);
1382 	fs_info->log_root_tree = log_root;
1383 	return 0;
1384 }
1385 
btrfs_add_log_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root)1386 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1387 		       struct btrfs_root *root)
1388 {
1389 	struct btrfs_fs_info *fs_info = root->fs_info;
1390 	struct btrfs_root *log_root;
1391 	struct btrfs_inode_item *inode_item;
1392 
1393 	log_root = alloc_log_tree(trans, fs_info);
1394 	if (IS_ERR(log_root))
1395 		return PTR_ERR(log_root);
1396 
1397 	log_root->last_trans = trans->transid;
1398 	log_root->root_key.offset = root->root_key.objectid;
1399 
1400 	inode_item = &log_root->root_item.inode;
1401 	btrfs_set_stack_inode_generation(inode_item, 1);
1402 	btrfs_set_stack_inode_size(inode_item, 3);
1403 	btrfs_set_stack_inode_nlink(inode_item, 1);
1404 	btrfs_set_stack_inode_nbytes(inode_item,
1405 				     fs_info->nodesize);
1406 	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1407 
1408 	btrfs_set_root_node(&log_root->root_item, log_root->node);
1409 
1410 	WARN_ON(root->log_root);
1411 	root->log_root = log_root;
1412 	root->log_transid = 0;
1413 	root->log_transid_committed = -1;
1414 	root->last_log_commit = 0;
1415 	return 0;
1416 }
1417 
btrfs_read_tree_root(struct btrfs_root * tree_root,struct btrfs_key * key)1418 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1419 					       struct btrfs_key *key)
1420 {
1421 	struct btrfs_root *root;
1422 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
1423 	struct btrfs_path *path;
1424 	u64 generation;
1425 	int ret;
1426 
1427 	path = btrfs_alloc_path();
1428 	if (!path)
1429 		return ERR_PTR(-ENOMEM);
1430 
1431 	root = btrfs_alloc_root(fs_info, GFP_NOFS);
1432 	if (!root) {
1433 		ret = -ENOMEM;
1434 		goto alloc_fail;
1435 	}
1436 
1437 	__setup_root(root, fs_info, key->objectid);
1438 
1439 	ret = btrfs_find_root(tree_root, key, path,
1440 			      &root->root_item, &root->root_key);
1441 	if (ret) {
1442 		if (ret > 0)
1443 			ret = -ENOENT;
1444 		goto find_fail;
1445 	}
1446 
1447 	generation = btrfs_root_generation(&root->root_item);
1448 	root->node = read_tree_block(fs_info,
1449 				     btrfs_root_bytenr(&root->root_item),
1450 				     generation);
1451 	if (IS_ERR(root->node)) {
1452 		ret = PTR_ERR(root->node);
1453 		goto find_fail;
1454 	} else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1455 		ret = -EIO;
1456 		free_extent_buffer(root->node);
1457 		goto find_fail;
1458 	}
1459 	root->commit_root = btrfs_root_node(root);
1460 out:
1461 	btrfs_free_path(path);
1462 	return root;
1463 
1464 find_fail:
1465 	kfree(root);
1466 alloc_fail:
1467 	root = ERR_PTR(ret);
1468 	goto out;
1469 }
1470 
btrfs_read_fs_root(struct btrfs_root * tree_root,struct btrfs_key * location)1471 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1472 				      struct btrfs_key *location)
1473 {
1474 	struct btrfs_root *root;
1475 
1476 	root = btrfs_read_tree_root(tree_root, location);
1477 	if (IS_ERR(root))
1478 		return root;
1479 
1480 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1481 		set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1482 		btrfs_check_and_init_root_item(&root->root_item);
1483 	}
1484 
1485 	return root;
1486 }
1487 
btrfs_init_fs_root(struct btrfs_root * root)1488 int btrfs_init_fs_root(struct btrfs_root *root)
1489 {
1490 	int ret;
1491 	struct btrfs_subvolume_writers *writers;
1492 
1493 	root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1494 	root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1495 					GFP_NOFS);
1496 	if (!root->free_ino_pinned || !root->free_ino_ctl) {
1497 		ret = -ENOMEM;
1498 		goto fail;
1499 	}
1500 
1501 	writers = btrfs_alloc_subvolume_writers();
1502 	if (IS_ERR(writers)) {
1503 		ret = PTR_ERR(writers);
1504 		goto fail;
1505 	}
1506 	root->subv_writers = writers;
1507 
1508 	btrfs_init_free_ino_ctl(root);
1509 	spin_lock_init(&root->ino_cache_lock);
1510 	init_waitqueue_head(&root->ino_cache_wait);
1511 
1512 	ret = get_anon_bdev(&root->anon_dev);
1513 	if (ret)
1514 		goto fail;
1515 
1516 	mutex_lock(&root->objectid_mutex);
1517 	ret = btrfs_find_highest_objectid(root,
1518 					&root->highest_objectid);
1519 	if (ret) {
1520 		mutex_unlock(&root->objectid_mutex);
1521 		goto fail;
1522 	}
1523 
1524 	ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1525 
1526 	mutex_unlock(&root->objectid_mutex);
1527 
1528 	return 0;
1529 fail:
1530 	/* the caller is responsible to call free_fs_root */
1531 	return ret;
1532 }
1533 
btrfs_lookup_fs_root(struct btrfs_fs_info * fs_info,u64 root_id)1534 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1535 					u64 root_id)
1536 {
1537 	struct btrfs_root *root;
1538 
1539 	spin_lock(&fs_info->fs_roots_radix_lock);
1540 	root = radix_tree_lookup(&fs_info->fs_roots_radix,
1541 				 (unsigned long)root_id);
1542 	spin_unlock(&fs_info->fs_roots_radix_lock);
1543 	return root;
1544 }
1545 
btrfs_insert_fs_root(struct btrfs_fs_info * fs_info,struct btrfs_root * root)1546 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1547 			 struct btrfs_root *root)
1548 {
1549 	int ret;
1550 
1551 	ret = radix_tree_preload(GFP_NOFS);
1552 	if (ret)
1553 		return ret;
1554 
1555 	spin_lock(&fs_info->fs_roots_radix_lock);
1556 	ret = radix_tree_insert(&fs_info->fs_roots_radix,
1557 				(unsigned long)root->root_key.objectid,
1558 				root);
1559 	if (ret == 0)
1560 		set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1561 	spin_unlock(&fs_info->fs_roots_radix_lock);
1562 	radix_tree_preload_end();
1563 
1564 	return ret;
1565 }
1566 
btrfs_get_fs_root(struct btrfs_fs_info * fs_info,struct btrfs_key * location,bool check_ref)1567 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1568 				     struct btrfs_key *location,
1569 				     bool check_ref)
1570 {
1571 	struct btrfs_root *root;
1572 	struct btrfs_path *path;
1573 	struct btrfs_key key;
1574 	int ret;
1575 
1576 	if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1577 		return fs_info->tree_root;
1578 	if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1579 		return fs_info->extent_root;
1580 	if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1581 		return fs_info->chunk_root;
1582 	if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1583 		return fs_info->dev_root;
1584 	if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1585 		return fs_info->csum_root;
1586 	if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1587 		return fs_info->quota_root ? fs_info->quota_root :
1588 					     ERR_PTR(-ENOENT);
1589 	if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1590 		return fs_info->uuid_root ? fs_info->uuid_root :
1591 					    ERR_PTR(-ENOENT);
1592 	if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1593 		return fs_info->free_space_root ? fs_info->free_space_root :
1594 						  ERR_PTR(-ENOENT);
1595 again:
1596 	root = btrfs_lookup_fs_root(fs_info, location->objectid);
1597 	if (root) {
1598 		if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1599 			return ERR_PTR(-ENOENT);
1600 		return root;
1601 	}
1602 
1603 	root = btrfs_read_fs_root(fs_info->tree_root, location);
1604 	if (IS_ERR(root))
1605 		return root;
1606 
1607 	if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1608 		ret = -ENOENT;
1609 		goto fail;
1610 	}
1611 
1612 	ret = btrfs_init_fs_root(root);
1613 	if (ret)
1614 		goto fail;
1615 
1616 	path = btrfs_alloc_path();
1617 	if (!path) {
1618 		ret = -ENOMEM;
1619 		goto fail;
1620 	}
1621 	key.objectid = BTRFS_ORPHAN_OBJECTID;
1622 	key.type = BTRFS_ORPHAN_ITEM_KEY;
1623 	key.offset = location->objectid;
1624 
1625 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1626 	btrfs_free_path(path);
1627 	if (ret < 0)
1628 		goto fail;
1629 	if (ret == 0)
1630 		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1631 
1632 	ret = btrfs_insert_fs_root(fs_info, root);
1633 	if (ret) {
1634 		if (ret == -EEXIST) {
1635 			free_fs_root(root);
1636 			goto again;
1637 		}
1638 		goto fail;
1639 	}
1640 	return root;
1641 fail:
1642 	free_fs_root(root);
1643 	return ERR_PTR(ret);
1644 }
1645 
btrfs_congested_fn(void * congested_data,int bdi_bits)1646 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1647 {
1648 	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1649 	int ret = 0;
1650 	struct btrfs_device *device;
1651 	struct backing_dev_info *bdi;
1652 
1653 	rcu_read_lock();
1654 	list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1655 		if (!device->bdev)
1656 			continue;
1657 		bdi = device->bdev->bd_bdi;
1658 		if (bdi_congested(bdi, bdi_bits)) {
1659 			ret = 1;
1660 			break;
1661 		}
1662 	}
1663 	rcu_read_unlock();
1664 	return ret;
1665 }
1666 
1667 /*
1668  * called by the kthread helper functions to finally call the bio end_io
1669  * functions.  This is where read checksum verification actually happens
1670  */
end_workqueue_fn(struct btrfs_work * work)1671 static void end_workqueue_fn(struct btrfs_work *work)
1672 {
1673 	struct bio *bio;
1674 	struct btrfs_end_io_wq *end_io_wq;
1675 
1676 	end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1677 	bio = end_io_wq->bio;
1678 
1679 	bio->bi_status = end_io_wq->status;
1680 	bio->bi_private = end_io_wq->private;
1681 	bio->bi_end_io = end_io_wq->end_io;
1682 	bio_endio(bio);
1683 	kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1684 }
1685 
cleaner_kthread(void * arg)1686 static int cleaner_kthread(void *arg)
1687 {
1688 	struct btrfs_root *root = arg;
1689 	struct btrfs_fs_info *fs_info = root->fs_info;
1690 	int again;
1691 
1692 	while (1) {
1693 		again = 0;
1694 
1695 		/* Make the cleaner go to sleep early. */
1696 		if (btrfs_need_cleaner_sleep(fs_info))
1697 			goto sleep;
1698 
1699 		/*
1700 		 * Do not do anything if we might cause open_ctree() to block
1701 		 * before we have finished mounting the filesystem.
1702 		 */
1703 		if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1704 			goto sleep;
1705 
1706 		if (!mutex_trylock(&fs_info->cleaner_mutex))
1707 			goto sleep;
1708 
1709 		/*
1710 		 * Avoid the problem that we change the status of the fs
1711 		 * during the above check and trylock.
1712 		 */
1713 		if (btrfs_need_cleaner_sleep(fs_info)) {
1714 			mutex_unlock(&fs_info->cleaner_mutex);
1715 			goto sleep;
1716 		}
1717 
1718 		mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
1719 		btrfs_run_delayed_iputs(fs_info);
1720 		mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
1721 
1722 		again = btrfs_clean_one_deleted_snapshot(root);
1723 		mutex_unlock(&fs_info->cleaner_mutex);
1724 
1725 		/*
1726 		 * The defragger has dealt with the R/O remount and umount,
1727 		 * needn't do anything special here.
1728 		 */
1729 		btrfs_run_defrag_inodes(fs_info);
1730 
1731 		/*
1732 		 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1733 		 * with relocation (btrfs_relocate_chunk) and relocation
1734 		 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1735 		 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1736 		 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1737 		 * unused block groups.
1738 		 */
1739 		btrfs_delete_unused_bgs(fs_info);
1740 sleep:
1741 		if (kthread_should_park())
1742 			kthread_parkme();
1743 		if (kthread_should_stop())
1744 			return 0;
1745 		if (!again) {
1746 			set_current_state(TASK_INTERRUPTIBLE);
1747 			schedule();
1748 			__set_current_state(TASK_RUNNING);
1749 		}
1750 	}
1751 }
1752 
transaction_kthread(void * arg)1753 static int transaction_kthread(void *arg)
1754 {
1755 	struct btrfs_root *root = arg;
1756 	struct btrfs_fs_info *fs_info = root->fs_info;
1757 	struct btrfs_trans_handle *trans;
1758 	struct btrfs_transaction *cur;
1759 	u64 transid;
1760 	unsigned long now;
1761 	unsigned long delay;
1762 	bool cannot_commit;
1763 
1764 	do {
1765 		cannot_commit = false;
1766 		delay = HZ * fs_info->commit_interval;
1767 		mutex_lock(&fs_info->transaction_kthread_mutex);
1768 
1769 		spin_lock(&fs_info->trans_lock);
1770 		cur = fs_info->running_transaction;
1771 		if (!cur) {
1772 			spin_unlock(&fs_info->trans_lock);
1773 			goto sleep;
1774 		}
1775 
1776 		now = get_seconds();
1777 		if (cur->state < TRANS_STATE_BLOCKED &&
1778 		    (now < cur->start_time ||
1779 		     now - cur->start_time < fs_info->commit_interval)) {
1780 			spin_unlock(&fs_info->trans_lock);
1781 			delay = HZ * 5;
1782 			goto sleep;
1783 		}
1784 		transid = cur->transid;
1785 		spin_unlock(&fs_info->trans_lock);
1786 
1787 		/* If the file system is aborted, this will always fail. */
1788 		trans = btrfs_attach_transaction(root);
1789 		if (IS_ERR(trans)) {
1790 			if (PTR_ERR(trans) != -ENOENT)
1791 				cannot_commit = true;
1792 			goto sleep;
1793 		}
1794 		if (transid == trans->transid) {
1795 			btrfs_commit_transaction(trans);
1796 		} else {
1797 			btrfs_end_transaction(trans);
1798 		}
1799 sleep:
1800 		wake_up_process(fs_info->cleaner_kthread);
1801 		mutex_unlock(&fs_info->transaction_kthread_mutex);
1802 
1803 		if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1804 				      &fs_info->fs_state)))
1805 			btrfs_cleanup_transaction(fs_info);
1806 		set_current_state(TASK_INTERRUPTIBLE);
1807 		if (!kthread_should_stop() &&
1808 				(!btrfs_transaction_blocked(fs_info) ||
1809 				 cannot_commit))
1810 			schedule_timeout(delay);
1811 		__set_current_state(TASK_RUNNING);
1812 	} while (!kthread_should_stop());
1813 	return 0;
1814 }
1815 
1816 /*
1817  * this will find the highest generation in the array of
1818  * root backups.  The index of the highest array is returned,
1819  * or -1 if we can't find anything.
1820  *
1821  * We check to make sure the array is valid by comparing the
1822  * generation of the latest  root in the array with the generation
1823  * in the super block.  If they don't match we pitch it.
1824  */
find_newest_super_backup(struct btrfs_fs_info * info,u64 newest_gen)1825 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1826 {
1827 	u64 cur;
1828 	int newest_index = -1;
1829 	struct btrfs_root_backup *root_backup;
1830 	int i;
1831 
1832 	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1833 		root_backup = info->super_copy->super_roots + i;
1834 		cur = btrfs_backup_tree_root_gen(root_backup);
1835 		if (cur == newest_gen)
1836 			newest_index = i;
1837 	}
1838 
1839 	/* check to see if we actually wrapped around */
1840 	if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1841 		root_backup = info->super_copy->super_roots;
1842 		cur = btrfs_backup_tree_root_gen(root_backup);
1843 		if (cur == newest_gen)
1844 			newest_index = 0;
1845 	}
1846 	return newest_index;
1847 }
1848 
1849 
1850 /*
1851  * find the oldest backup so we know where to store new entries
1852  * in the backup array.  This will set the backup_root_index
1853  * field in the fs_info struct
1854  */
find_oldest_super_backup(struct btrfs_fs_info * info,u64 newest_gen)1855 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1856 				     u64 newest_gen)
1857 {
1858 	int newest_index = -1;
1859 
1860 	newest_index = find_newest_super_backup(info, newest_gen);
1861 	/* if there was garbage in there, just move along */
1862 	if (newest_index == -1) {
1863 		info->backup_root_index = 0;
1864 	} else {
1865 		info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1866 	}
1867 }
1868 
1869 /*
1870  * copy all the root pointers into the super backup array.
1871  * this will bump the backup pointer by one when it is
1872  * done
1873  */
backup_super_roots(struct btrfs_fs_info * info)1874 static void backup_super_roots(struct btrfs_fs_info *info)
1875 {
1876 	int next_backup;
1877 	struct btrfs_root_backup *root_backup;
1878 	int last_backup;
1879 
1880 	next_backup = info->backup_root_index;
1881 	last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1882 		BTRFS_NUM_BACKUP_ROOTS;
1883 
1884 	/*
1885 	 * just overwrite the last backup if we're at the same generation
1886 	 * this happens only at umount
1887 	 */
1888 	root_backup = info->super_for_commit->super_roots + last_backup;
1889 	if (btrfs_backup_tree_root_gen(root_backup) ==
1890 	    btrfs_header_generation(info->tree_root->node))
1891 		next_backup = last_backup;
1892 
1893 	root_backup = info->super_for_commit->super_roots + next_backup;
1894 
1895 	/*
1896 	 * make sure all of our padding and empty slots get zero filled
1897 	 * regardless of which ones we use today
1898 	 */
1899 	memset(root_backup, 0, sizeof(*root_backup));
1900 
1901 	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1902 
1903 	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1904 	btrfs_set_backup_tree_root_gen(root_backup,
1905 			       btrfs_header_generation(info->tree_root->node));
1906 
1907 	btrfs_set_backup_tree_root_level(root_backup,
1908 			       btrfs_header_level(info->tree_root->node));
1909 
1910 	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1911 	btrfs_set_backup_chunk_root_gen(root_backup,
1912 			       btrfs_header_generation(info->chunk_root->node));
1913 	btrfs_set_backup_chunk_root_level(root_backup,
1914 			       btrfs_header_level(info->chunk_root->node));
1915 
1916 	btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1917 	btrfs_set_backup_extent_root_gen(root_backup,
1918 			       btrfs_header_generation(info->extent_root->node));
1919 	btrfs_set_backup_extent_root_level(root_backup,
1920 			       btrfs_header_level(info->extent_root->node));
1921 
1922 	/*
1923 	 * we might commit during log recovery, which happens before we set
1924 	 * the fs_root.  Make sure it is valid before we fill it in.
1925 	 */
1926 	if (info->fs_root && info->fs_root->node) {
1927 		btrfs_set_backup_fs_root(root_backup,
1928 					 info->fs_root->node->start);
1929 		btrfs_set_backup_fs_root_gen(root_backup,
1930 			       btrfs_header_generation(info->fs_root->node));
1931 		btrfs_set_backup_fs_root_level(root_backup,
1932 			       btrfs_header_level(info->fs_root->node));
1933 	}
1934 
1935 	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1936 	btrfs_set_backup_dev_root_gen(root_backup,
1937 			       btrfs_header_generation(info->dev_root->node));
1938 	btrfs_set_backup_dev_root_level(root_backup,
1939 				       btrfs_header_level(info->dev_root->node));
1940 
1941 	btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1942 	btrfs_set_backup_csum_root_gen(root_backup,
1943 			       btrfs_header_generation(info->csum_root->node));
1944 	btrfs_set_backup_csum_root_level(root_backup,
1945 			       btrfs_header_level(info->csum_root->node));
1946 
1947 	btrfs_set_backup_total_bytes(root_backup,
1948 			     btrfs_super_total_bytes(info->super_copy));
1949 	btrfs_set_backup_bytes_used(root_backup,
1950 			     btrfs_super_bytes_used(info->super_copy));
1951 	btrfs_set_backup_num_devices(root_backup,
1952 			     btrfs_super_num_devices(info->super_copy));
1953 
1954 	/*
1955 	 * if we don't copy this out to the super_copy, it won't get remembered
1956 	 * for the next commit
1957 	 */
1958 	memcpy(&info->super_copy->super_roots,
1959 	       &info->super_for_commit->super_roots,
1960 	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1961 }
1962 
1963 /*
1964  * this copies info out of the root backup array and back into
1965  * the in-memory super block.  It is meant to help iterate through
1966  * the array, so you send it the number of backups you've already
1967  * tried and the last backup index you used.
1968  *
1969  * this returns -1 when it has tried all the backups
1970  */
next_root_backup(struct btrfs_fs_info * info,struct btrfs_super_block * super,int * num_backups_tried,int * backup_index)1971 static noinline int next_root_backup(struct btrfs_fs_info *info,
1972 				     struct btrfs_super_block *super,
1973 				     int *num_backups_tried, int *backup_index)
1974 {
1975 	struct btrfs_root_backup *root_backup;
1976 	int newest = *backup_index;
1977 
1978 	if (*num_backups_tried == 0) {
1979 		u64 gen = btrfs_super_generation(super);
1980 
1981 		newest = find_newest_super_backup(info, gen);
1982 		if (newest == -1)
1983 			return -1;
1984 
1985 		*backup_index = newest;
1986 		*num_backups_tried = 1;
1987 	} else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1988 		/* we've tried all the backups, all done */
1989 		return -1;
1990 	} else {
1991 		/* jump to the next oldest backup */
1992 		newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1993 			BTRFS_NUM_BACKUP_ROOTS;
1994 		*backup_index = newest;
1995 		*num_backups_tried += 1;
1996 	}
1997 	root_backup = super->super_roots + newest;
1998 
1999 	btrfs_set_super_generation(super,
2000 				   btrfs_backup_tree_root_gen(root_backup));
2001 	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
2002 	btrfs_set_super_root_level(super,
2003 				   btrfs_backup_tree_root_level(root_backup));
2004 	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
2005 
2006 	/*
2007 	 * fixme: the total bytes and num_devices need to match or we should
2008 	 * need a fsck
2009 	 */
2010 	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
2011 	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
2012 	return 0;
2013 }
2014 
2015 /* helper to cleanup workers */
btrfs_stop_all_workers(struct btrfs_fs_info * fs_info)2016 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
2017 {
2018 	btrfs_destroy_workqueue(fs_info->fixup_workers);
2019 	btrfs_destroy_workqueue(fs_info->delalloc_workers);
2020 	btrfs_destroy_workqueue(fs_info->workers);
2021 	btrfs_destroy_workqueue(fs_info->endio_workers);
2022 	btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
2023 	btrfs_destroy_workqueue(fs_info->endio_repair_workers);
2024 	btrfs_destroy_workqueue(fs_info->rmw_workers);
2025 	btrfs_destroy_workqueue(fs_info->endio_write_workers);
2026 	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
2027 	btrfs_destroy_workqueue(fs_info->submit_workers);
2028 	btrfs_destroy_workqueue(fs_info->delayed_workers);
2029 	btrfs_destroy_workqueue(fs_info->caching_workers);
2030 	btrfs_destroy_workqueue(fs_info->readahead_workers);
2031 	btrfs_destroy_workqueue(fs_info->flush_workers);
2032 	btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
2033 	btrfs_destroy_workqueue(fs_info->extent_workers);
2034 	/*
2035 	 * Now that all other work queues are destroyed, we can safely destroy
2036 	 * the queues used for metadata I/O, since tasks from those other work
2037 	 * queues can do metadata I/O operations.
2038 	 */
2039 	btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2040 	btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2041 }
2042 
free_root_extent_buffers(struct btrfs_root * root)2043 static void free_root_extent_buffers(struct btrfs_root *root)
2044 {
2045 	if (root) {
2046 		free_extent_buffer(root->node);
2047 		free_extent_buffer(root->commit_root);
2048 		root->node = NULL;
2049 		root->commit_root = NULL;
2050 	}
2051 }
2052 
2053 /* helper to cleanup tree roots */
free_root_pointers(struct btrfs_fs_info * info,bool free_chunk_root)2054 static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
2055 {
2056 	free_root_extent_buffers(info->tree_root);
2057 
2058 	free_root_extent_buffers(info->dev_root);
2059 	free_root_extent_buffers(info->extent_root);
2060 	free_root_extent_buffers(info->csum_root);
2061 	free_root_extent_buffers(info->quota_root);
2062 	free_root_extent_buffers(info->uuid_root);
2063 	if (free_chunk_root)
2064 		free_root_extent_buffers(info->chunk_root);
2065 	free_root_extent_buffers(info->free_space_root);
2066 }
2067 
btrfs_free_fs_roots(struct btrfs_fs_info * fs_info)2068 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2069 {
2070 	int ret;
2071 	struct btrfs_root *gang[8];
2072 	int i;
2073 
2074 	while (!list_empty(&fs_info->dead_roots)) {
2075 		gang[0] = list_entry(fs_info->dead_roots.next,
2076 				     struct btrfs_root, root_list);
2077 		list_del(&gang[0]->root_list);
2078 
2079 		if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2080 			btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2081 		} else {
2082 			free_extent_buffer(gang[0]->node);
2083 			free_extent_buffer(gang[0]->commit_root);
2084 			btrfs_put_fs_root(gang[0]);
2085 		}
2086 	}
2087 
2088 	while (1) {
2089 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2090 					     (void **)gang, 0,
2091 					     ARRAY_SIZE(gang));
2092 		if (!ret)
2093 			break;
2094 		for (i = 0; i < ret; i++)
2095 			btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2096 	}
2097 
2098 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2099 		btrfs_free_log_root_tree(NULL, fs_info);
2100 		btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
2101 	}
2102 }
2103 
btrfs_init_scrub(struct btrfs_fs_info * fs_info)2104 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2105 {
2106 	mutex_init(&fs_info->scrub_lock);
2107 	atomic_set(&fs_info->scrubs_running, 0);
2108 	atomic_set(&fs_info->scrub_pause_req, 0);
2109 	atomic_set(&fs_info->scrubs_paused, 0);
2110 	atomic_set(&fs_info->scrub_cancel_req, 0);
2111 	init_waitqueue_head(&fs_info->scrub_pause_wait);
2112 	fs_info->scrub_workers_refcnt = 0;
2113 }
2114 
btrfs_init_balance(struct btrfs_fs_info * fs_info)2115 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2116 {
2117 	spin_lock_init(&fs_info->balance_lock);
2118 	mutex_init(&fs_info->balance_mutex);
2119 	atomic_set(&fs_info->balance_running, 0);
2120 	atomic_set(&fs_info->balance_pause_req, 0);
2121 	atomic_set(&fs_info->balance_cancel_req, 0);
2122 	fs_info->balance_ctl = NULL;
2123 	init_waitqueue_head(&fs_info->balance_wait_q);
2124 }
2125 
btrfs_init_btree_inode(struct btrfs_fs_info * fs_info)2126 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2127 {
2128 	struct inode *inode = fs_info->btree_inode;
2129 
2130 	inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2131 	set_nlink(inode, 1);
2132 	/*
2133 	 * we set the i_size on the btree inode to the max possible int.
2134 	 * the real end of the address space is determined by all of
2135 	 * the devices in the system
2136 	 */
2137 	inode->i_size = OFFSET_MAX;
2138 	inode->i_mapping->a_ops = &btree_aops;
2139 
2140 	RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2141 	extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode);
2142 	BTRFS_I(inode)->io_tree.track_uptodate = 0;
2143 	extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2144 
2145 	BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
2146 
2147 	BTRFS_I(inode)->root = fs_info->tree_root;
2148 	memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
2149 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
2150 	btrfs_insert_inode_hash(inode);
2151 }
2152 
btrfs_init_dev_replace_locks(struct btrfs_fs_info * fs_info)2153 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2154 {
2155 	fs_info->dev_replace.lock_owner = 0;
2156 	atomic_set(&fs_info->dev_replace.nesting_level, 0);
2157 	mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2158 	rwlock_init(&fs_info->dev_replace.lock);
2159 	atomic_set(&fs_info->dev_replace.read_locks, 0);
2160 	atomic_set(&fs_info->dev_replace.blocking_readers, 0);
2161 	init_waitqueue_head(&fs_info->replace_wait);
2162 	init_waitqueue_head(&fs_info->dev_replace.read_lock_wq);
2163 }
2164 
btrfs_init_qgroup(struct btrfs_fs_info * fs_info)2165 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2166 {
2167 	spin_lock_init(&fs_info->qgroup_lock);
2168 	mutex_init(&fs_info->qgroup_ioctl_lock);
2169 	fs_info->qgroup_tree = RB_ROOT;
2170 	fs_info->qgroup_op_tree = RB_ROOT;
2171 	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2172 	fs_info->qgroup_seq = 1;
2173 	fs_info->qgroup_ulist = NULL;
2174 	fs_info->qgroup_rescan_running = false;
2175 	mutex_init(&fs_info->qgroup_rescan_lock);
2176 }
2177 
btrfs_init_workqueues(struct btrfs_fs_info * fs_info,struct btrfs_fs_devices * fs_devices)2178 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2179 		struct btrfs_fs_devices *fs_devices)
2180 {
2181 	int max_active = fs_info->thread_pool_size;
2182 	unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2183 
2184 	fs_info->workers =
2185 		btrfs_alloc_workqueue(fs_info, "worker",
2186 				      flags | WQ_HIGHPRI, max_active, 16);
2187 
2188 	fs_info->delalloc_workers =
2189 		btrfs_alloc_workqueue(fs_info, "delalloc",
2190 				      flags, max_active, 2);
2191 
2192 	fs_info->flush_workers =
2193 		btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2194 				      flags, max_active, 0);
2195 
2196 	fs_info->caching_workers =
2197 		btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2198 
2199 	/*
2200 	 * a higher idle thresh on the submit workers makes it much more
2201 	 * likely that bios will be send down in a sane order to the
2202 	 * devices
2203 	 */
2204 	fs_info->submit_workers =
2205 		btrfs_alloc_workqueue(fs_info, "submit", flags,
2206 				      min_t(u64, fs_devices->num_devices,
2207 					    max_active), 64);
2208 
2209 	fs_info->fixup_workers =
2210 		btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2211 
2212 	/*
2213 	 * endios are largely parallel and should have a very
2214 	 * low idle thresh
2215 	 */
2216 	fs_info->endio_workers =
2217 		btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2218 	fs_info->endio_meta_workers =
2219 		btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2220 				      max_active, 4);
2221 	fs_info->endio_meta_write_workers =
2222 		btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2223 				      max_active, 2);
2224 	fs_info->endio_raid56_workers =
2225 		btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2226 				      max_active, 4);
2227 	fs_info->endio_repair_workers =
2228 		btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
2229 	fs_info->rmw_workers =
2230 		btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2231 	fs_info->endio_write_workers =
2232 		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2233 				      max_active, 2);
2234 	fs_info->endio_freespace_worker =
2235 		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2236 				      max_active, 0);
2237 	fs_info->delayed_workers =
2238 		btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2239 				      max_active, 0);
2240 	fs_info->readahead_workers =
2241 		btrfs_alloc_workqueue(fs_info, "readahead", flags,
2242 				      max_active, 2);
2243 	fs_info->qgroup_rescan_workers =
2244 		btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2245 	fs_info->extent_workers =
2246 		btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
2247 				      min_t(u64, fs_devices->num_devices,
2248 					    max_active), 8);
2249 
2250 	if (!(fs_info->workers && fs_info->delalloc_workers &&
2251 	      fs_info->submit_workers && fs_info->flush_workers &&
2252 	      fs_info->endio_workers && fs_info->endio_meta_workers &&
2253 	      fs_info->endio_meta_write_workers &&
2254 	      fs_info->endio_repair_workers &&
2255 	      fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2256 	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2257 	      fs_info->caching_workers && fs_info->readahead_workers &&
2258 	      fs_info->fixup_workers && fs_info->delayed_workers &&
2259 	      fs_info->extent_workers &&
2260 	      fs_info->qgroup_rescan_workers)) {
2261 		return -ENOMEM;
2262 	}
2263 
2264 	return 0;
2265 }
2266 
btrfs_replay_log(struct btrfs_fs_info * fs_info,struct btrfs_fs_devices * fs_devices)2267 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2268 			    struct btrfs_fs_devices *fs_devices)
2269 {
2270 	int ret;
2271 	struct btrfs_root *log_tree_root;
2272 	struct btrfs_super_block *disk_super = fs_info->super_copy;
2273 	u64 bytenr = btrfs_super_log_root(disk_super);
2274 
2275 	if (fs_devices->rw_devices == 0) {
2276 		btrfs_warn(fs_info, "log replay required on RO media");
2277 		return -EIO;
2278 	}
2279 
2280 	log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2281 	if (!log_tree_root)
2282 		return -ENOMEM;
2283 
2284 	__setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2285 
2286 	log_tree_root->node = read_tree_block(fs_info, bytenr,
2287 					      fs_info->generation + 1);
2288 	if (IS_ERR(log_tree_root->node)) {
2289 		btrfs_warn(fs_info, "failed to read log tree");
2290 		ret = PTR_ERR(log_tree_root->node);
2291 		kfree(log_tree_root);
2292 		return ret;
2293 	} else if (!extent_buffer_uptodate(log_tree_root->node)) {
2294 		btrfs_err(fs_info, "failed to read log tree");
2295 		free_extent_buffer(log_tree_root->node);
2296 		kfree(log_tree_root);
2297 		return -EIO;
2298 	}
2299 	/* returns with log_tree_root freed on success */
2300 	ret = btrfs_recover_log_trees(log_tree_root);
2301 	if (ret) {
2302 		btrfs_handle_fs_error(fs_info, ret,
2303 				      "Failed to recover log tree");
2304 		free_extent_buffer(log_tree_root->node);
2305 		kfree(log_tree_root);
2306 		return ret;
2307 	}
2308 
2309 	if (sb_rdonly(fs_info->sb)) {
2310 		ret = btrfs_commit_super(fs_info);
2311 		if (ret)
2312 			return ret;
2313 	}
2314 
2315 	return 0;
2316 }
2317 
btrfs_read_roots(struct btrfs_fs_info * fs_info)2318 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2319 {
2320 	struct btrfs_root *tree_root = fs_info->tree_root;
2321 	struct btrfs_root *root;
2322 	struct btrfs_key location;
2323 	int ret;
2324 
2325 	BUG_ON(!fs_info->tree_root);
2326 
2327 	location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2328 	location.type = BTRFS_ROOT_ITEM_KEY;
2329 	location.offset = 0;
2330 
2331 	root = btrfs_read_tree_root(tree_root, &location);
2332 	if (IS_ERR(root))
2333 		return PTR_ERR(root);
2334 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2335 	fs_info->extent_root = root;
2336 
2337 	location.objectid = BTRFS_DEV_TREE_OBJECTID;
2338 	root = btrfs_read_tree_root(tree_root, &location);
2339 	if (IS_ERR(root))
2340 		return PTR_ERR(root);
2341 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2342 	fs_info->dev_root = root;
2343 	btrfs_init_devices_late(fs_info);
2344 
2345 	location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2346 	root = btrfs_read_tree_root(tree_root, &location);
2347 	if (IS_ERR(root))
2348 		return PTR_ERR(root);
2349 	set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2350 	fs_info->csum_root = root;
2351 
2352 	location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2353 	root = btrfs_read_tree_root(tree_root, &location);
2354 	if (!IS_ERR(root)) {
2355 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2356 		set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2357 		fs_info->quota_root = root;
2358 	}
2359 
2360 	location.objectid = BTRFS_UUID_TREE_OBJECTID;
2361 	root = btrfs_read_tree_root(tree_root, &location);
2362 	if (IS_ERR(root)) {
2363 		ret = PTR_ERR(root);
2364 		if (ret != -ENOENT)
2365 			return ret;
2366 	} else {
2367 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2368 		fs_info->uuid_root = root;
2369 	}
2370 
2371 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2372 		location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2373 		root = btrfs_read_tree_root(tree_root, &location);
2374 		if (IS_ERR(root))
2375 			return PTR_ERR(root);
2376 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2377 		fs_info->free_space_root = root;
2378 	}
2379 
2380 	return 0;
2381 }
2382 
open_ctree(struct super_block * sb,struct btrfs_fs_devices * fs_devices,char * options)2383 int open_ctree(struct super_block *sb,
2384 	       struct btrfs_fs_devices *fs_devices,
2385 	       char *options)
2386 {
2387 	u32 sectorsize;
2388 	u32 nodesize;
2389 	u32 stripesize;
2390 	u64 generation;
2391 	u64 features;
2392 	struct btrfs_key location;
2393 	struct buffer_head *bh;
2394 	struct btrfs_super_block *disk_super;
2395 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2396 	struct btrfs_root *tree_root;
2397 	struct btrfs_root *chunk_root;
2398 	int ret;
2399 	int err = -EINVAL;
2400 	int num_backups_tried = 0;
2401 	int backup_index = 0;
2402 	int max_active;
2403 	int clear_free_space_tree = 0;
2404 
2405 	tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2406 	chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2407 	if (!tree_root || !chunk_root) {
2408 		err = -ENOMEM;
2409 		goto fail;
2410 	}
2411 
2412 	ret = init_srcu_struct(&fs_info->subvol_srcu);
2413 	if (ret) {
2414 		err = ret;
2415 		goto fail;
2416 	}
2417 
2418 	ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2419 	if (ret) {
2420 		err = ret;
2421 		goto fail_srcu;
2422 	}
2423 	fs_info->dirty_metadata_batch = PAGE_SIZE *
2424 					(1 + ilog2(nr_cpu_ids));
2425 
2426 	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2427 	if (ret) {
2428 		err = ret;
2429 		goto fail_dirty_metadata_bytes;
2430 	}
2431 
2432 	ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL);
2433 	if (ret) {
2434 		err = ret;
2435 		goto fail_delalloc_bytes;
2436 	}
2437 
2438 	fs_info->btree_inode = new_inode(sb);
2439 	if (!fs_info->btree_inode) {
2440 		err = -ENOMEM;
2441 		goto fail_bio_counter;
2442 	}
2443 
2444 	mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2445 
2446 	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2447 	INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2448 	INIT_LIST_HEAD(&fs_info->trans_list);
2449 	INIT_LIST_HEAD(&fs_info->dead_roots);
2450 	INIT_LIST_HEAD(&fs_info->delayed_iputs);
2451 	INIT_LIST_HEAD(&fs_info->delalloc_roots);
2452 	INIT_LIST_HEAD(&fs_info->caching_block_groups);
2453 	spin_lock_init(&fs_info->delalloc_root_lock);
2454 	spin_lock_init(&fs_info->trans_lock);
2455 	spin_lock_init(&fs_info->fs_roots_radix_lock);
2456 	spin_lock_init(&fs_info->delayed_iput_lock);
2457 	spin_lock_init(&fs_info->defrag_inodes_lock);
2458 	spin_lock_init(&fs_info->super_lock);
2459 	spin_lock_init(&fs_info->qgroup_op_lock);
2460 	spin_lock_init(&fs_info->buffer_lock);
2461 	spin_lock_init(&fs_info->unused_bgs_lock);
2462 	rwlock_init(&fs_info->tree_mod_log_lock);
2463 	mutex_init(&fs_info->unused_bg_unpin_mutex);
2464 	mutex_init(&fs_info->delete_unused_bgs_mutex);
2465 	mutex_init(&fs_info->reloc_mutex);
2466 	mutex_init(&fs_info->delalloc_root_mutex);
2467 	mutex_init(&fs_info->cleaner_delayed_iput_mutex);
2468 	seqlock_init(&fs_info->profiles_lock);
2469 
2470 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2471 	INIT_LIST_HEAD(&fs_info->space_info);
2472 	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2473 	INIT_LIST_HEAD(&fs_info->unused_bgs);
2474 	btrfs_mapping_init(&fs_info->mapping_tree);
2475 	btrfs_init_block_rsv(&fs_info->global_block_rsv,
2476 			     BTRFS_BLOCK_RSV_GLOBAL);
2477 	btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
2478 			     BTRFS_BLOCK_RSV_DELALLOC);
2479 	btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2480 	btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2481 	btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2482 	btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2483 			     BTRFS_BLOCK_RSV_DELOPS);
2484 	atomic_set(&fs_info->nr_async_submits, 0);
2485 	atomic_set(&fs_info->async_delalloc_pages, 0);
2486 	atomic_set(&fs_info->async_submit_draining, 0);
2487 	atomic_set(&fs_info->nr_async_bios, 0);
2488 	atomic_set(&fs_info->defrag_running, 0);
2489 	atomic_set(&fs_info->qgroup_op_seq, 0);
2490 	atomic_set(&fs_info->reada_works_cnt, 0);
2491 	atomic64_set(&fs_info->tree_mod_seq, 0);
2492 	fs_info->sb = sb;
2493 	fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2494 	fs_info->metadata_ratio = 0;
2495 	fs_info->defrag_inodes = RB_ROOT;
2496 	atomic64_set(&fs_info->free_chunk_space, 0);
2497 	fs_info->tree_mod_log = RB_ROOT;
2498 	fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2499 	fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2500 	/* readahead state */
2501 	INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2502 	spin_lock_init(&fs_info->reada_lock);
2503 
2504 	fs_info->thread_pool_size = min_t(unsigned long,
2505 					  num_online_cpus() + 2, 8);
2506 
2507 	INIT_LIST_HEAD(&fs_info->ordered_roots);
2508 	spin_lock_init(&fs_info->ordered_root_lock);
2509 	fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2510 					GFP_KERNEL);
2511 	if (!fs_info->delayed_root) {
2512 		err = -ENOMEM;
2513 		goto fail_iput;
2514 	}
2515 	btrfs_init_delayed_root(fs_info->delayed_root);
2516 
2517 	btrfs_init_scrub(fs_info);
2518 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2519 	fs_info->check_integrity_print_mask = 0;
2520 #endif
2521 	btrfs_init_balance(fs_info);
2522 	btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2523 
2524 	sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2525 	sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2526 
2527 	btrfs_init_btree_inode(fs_info);
2528 
2529 	spin_lock_init(&fs_info->block_group_cache_lock);
2530 	fs_info->block_group_cache_tree = RB_ROOT;
2531 	fs_info->first_logical_byte = (u64)-1;
2532 
2533 	extent_io_tree_init(&fs_info->freed_extents[0], NULL);
2534 	extent_io_tree_init(&fs_info->freed_extents[1], NULL);
2535 	fs_info->pinned_extents = &fs_info->freed_extents[0];
2536 	set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2537 
2538 	mutex_init(&fs_info->ordered_operations_mutex);
2539 	mutex_init(&fs_info->tree_log_mutex);
2540 	mutex_init(&fs_info->chunk_mutex);
2541 	mutex_init(&fs_info->transaction_kthread_mutex);
2542 	mutex_init(&fs_info->cleaner_mutex);
2543 	mutex_init(&fs_info->volume_mutex);
2544 	mutex_init(&fs_info->ro_block_group_mutex);
2545 	init_rwsem(&fs_info->commit_root_sem);
2546 	init_rwsem(&fs_info->cleanup_work_sem);
2547 	init_rwsem(&fs_info->subvol_sem);
2548 	sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2549 
2550 	btrfs_init_dev_replace_locks(fs_info);
2551 	btrfs_init_qgroup(fs_info);
2552 
2553 	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2554 	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2555 
2556 	init_waitqueue_head(&fs_info->transaction_throttle);
2557 	init_waitqueue_head(&fs_info->transaction_wait);
2558 	init_waitqueue_head(&fs_info->transaction_blocked_wait);
2559 	init_waitqueue_head(&fs_info->async_submit_wait);
2560 
2561 	INIT_LIST_HEAD(&fs_info->pinned_chunks);
2562 
2563 	/* Usable values until the real ones are cached from the superblock */
2564 	fs_info->nodesize = 4096;
2565 	fs_info->sectorsize = 4096;
2566 	fs_info->stripesize = 4096;
2567 
2568 	ret = btrfs_alloc_stripe_hash_table(fs_info);
2569 	if (ret) {
2570 		err = ret;
2571 		goto fail_alloc;
2572 	}
2573 
2574 	__setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
2575 
2576 	invalidate_bdev(fs_devices->latest_bdev);
2577 
2578 	/*
2579 	 * Read super block and check the signature bytes only
2580 	 */
2581 	bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2582 	if (IS_ERR(bh)) {
2583 		err = PTR_ERR(bh);
2584 		goto fail_alloc;
2585 	}
2586 
2587 	/*
2588 	 * We want to check superblock checksum, the type is stored inside.
2589 	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2590 	 */
2591 	if (btrfs_check_super_csum(fs_info, bh->b_data)) {
2592 		btrfs_err(fs_info, "superblock checksum mismatch");
2593 		err = -EINVAL;
2594 		brelse(bh);
2595 		goto fail_alloc;
2596 	}
2597 
2598 	/*
2599 	 * super_copy is zeroed at allocation time and we never touch the
2600 	 * following bytes up to INFO_SIZE, the checksum is calculated from
2601 	 * the whole block of INFO_SIZE
2602 	 */
2603 	memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2604 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
2605 	       sizeof(*fs_info->super_for_commit));
2606 	brelse(bh);
2607 
2608 	memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2609 
2610 	ret = btrfs_check_super_valid(fs_info);
2611 	if (ret) {
2612 		btrfs_err(fs_info, "superblock contains fatal errors");
2613 		err = -EINVAL;
2614 		goto fail_alloc;
2615 	}
2616 
2617 	disk_super = fs_info->super_copy;
2618 	if (!btrfs_super_root(disk_super))
2619 		goto fail_alloc;
2620 
2621 	/* check FS state, whether FS is broken. */
2622 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2623 		set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2624 
2625 	/*
2626 	 * run through our array of backup supers and setup
2627 	 * our ring pointer to the oldest one
2628 	 */
2629 	generation = btrfs_super_generation(disk_super);
2630 	find_oldest_super_backup(fs_info, generation);
2631 
2632 	/*
2633 	 * In the long term, we'll store the compression type in the super
2634 	 * block, and it'll be used for per file compression control.
2635 	 */
2636 	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2637 
2638 	ret = btrfs_parse_options(fs_info, options, sb->s_flags);
2639 	if (ret) {
2640 		err = ret;
2641 		goto fail_alloc;
2642 	}
2643 
2644 	features = btrfs_super_incompat_flags(disk_super) &
2645 		~BTRFS_FEATURE_INCOMPAT_SUPP;
2646 	if (features) {
2647 		btrfs_err(fs_info,
2648 		    "cannot mount because of unsupported optional features (%llx)",
2649 		    features);
2650 		err = -EINVAL;
2651 		goto fail_alloc;
2652 	}
2653 
2654 	features = btrfs_super_incompat_flags(disk_super);
2655 	features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2656 	if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
2657 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2658 	else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
2659 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
2660 
2661 	if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2662 		btrfs_info(fs_info, "has skinny extents");
2663 
2664 	/*
2665 	 * flag our filesystem as having big metadata blocks if
2666 	 * they are bigger than the page size
2667 	 */
2668 	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2669 		if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2670 			btrfs_info(fs_info,
2671 				"flagging fs with big metadata feature");
2672 		features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2673 	}
2674 
2675 	nodesize = btrfs_super_nodesize(disk_super);
2676 	sectorsize = btrfs_super_sectorsize(disk_super);
2677 	stripesize = sectorsize;
2678 	fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2679 	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2680 
2681 	/* Cache block sizes */
2682 	fs_info->nodesize = nodesize;
2683 	fs_info->sectorsize = sectorsize;
2684 	fs_info->stripesize = stripesize;
2685 
2686 	/*
2687 	 * mixed block groups end up with duplicate but slightly offset
2688 	 * extent buffers for the same range.  It leads to corruptions
2689 	 */
2690 	if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2691 	    (sectorsize != nodesize)) {
2692 		btrfs_err(fs_info,
2693 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
2694 			nodesize, sectorsize);
2695 		goto fail_alloc;
2696 	}
2697 
2698 	/*
2699 	 * Needn't use the lock because there is no other task which will
2700 	 * update the flag.
2701 	 */
2702 	btrfs_set_super_incompat_flags(disk_super, features);
2703 
2704 	features = btrfs_super_compat_ro_flags(disk_super) &
2705 		~BTRFS_FEATURE_COMPAT_RO_SUPP;
2706 	if (!sb_rdonly(sb) && features) {
2707 		btrfs_err(fs_info,
2708 	"cannot mount read-write because of unsupported optional features (%llx)",
2709 		       features);
2710 		err = -EINVAL;
2711 		goto fail_alloc;
2712 	}
2713 
2714 	max_active = fs_info->thread_pool_size;
2715 
2716 	ret = btrfs_init_workqueues(fs_info, fs_devices);
2717 	if (ret) {
2718 		err = ret;
2719 		goto fail_sb_buffer;
2720 	}
2721 
2722 	sb->s_bdi->congested_fn = btrfs_congested_fn;
2723 	sb->s_bdi->congested_data = fs_info;
2724 	sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
2725 	sb->s_bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
2726 	sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
2727 	sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
2728 
2729 	sb->s_blocksize = sectorsize;
2730 	sb->s_blocksize_bits = blksize_bits(sectorsize);
2731 
2732 	mutex_lock(&fs_info->chunk_mutex);
2733 	ret = btrfs_read_sys_array(fs_info);
2734 	mutex_unlock(&fs_info->chunk_mutex);
2735 	if (ret) {
2736 		btrfs_err(fs_info, "failed to read the system array: %d", ret);
2737 		goto fail_sb_buffer;
2738 	}
2739 
2740 	generation = btrfs_super_chunk_root_generation(disk_super);
2741 
2742 	__setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2743 
2744 	chunk_root->node = read_tree_block(fs_info,
2745 					   btrfs_super_chunk_root(disk_super),
2746 					   generation);
2747 	if (IS_ERR(chunk_root->node) ||
2748 	    !extent_buffer_uptodate(chunk_root->node)) {
2749 		btrfs_err(fs_info, "failed to read chunk root");
2750 		if (!IS_ERR(chunk_root->node))
2751 			free_extent_buffer(chunk_root->node);
2752 		chunk_root->node = NULL;
2753 		goto fail_tree_roots;
2754 	}
2755 	btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2756 	chunk_root->commit_root = btrfs_root_node(chunk_root);
2757 
2758 	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2759 	   btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
2760 
2761 	ret = btrfs_read_chunk_tree(fs_info);
2762 	if (ret) {
2763 		btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
2764 		goto fail_tree_roots;
2765 	}
2766 
2767 	/*
2768 	 * keep the device that is marked to be the target device for the
2769 	 * dev_replace procedure
2770 	 */
2771 	btrfs_close_extra_devices(fs_devices, 0);
2772 
2773 	if (!fs_devices->latest_bdev) {
2774 		btrfs_err(fs_info, "failed to read devices");
2775 		goto fail_tree_roots;
2776 	}
2777 
2778 retry_root_backup:
2779 	generation = btrfs_super_generation(disk_super);
2780 
2781 	tree_root->node = read_tree_block(fs_info,
2782 					  btrfs_super_root(disk_super),
2783 					  generation);
2784 	if (IS_ERR(tree_root->node) ||
2785 	    !extent_buffer_uptodate(tree_root->node)) {
2786 		btrfs_warn(fs_info, "failed to read tree root");
2787 		if (!IS_ERR(tree_root->node))
2788 			free_extent_buffer(tree_root->node);
2789 		tree_root->node = NULL;
2790 		goto recovery_tree_root;
2791 	}
2792 
2793 	btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2794 	tree_root->commit_root = btrfs_root_node(tree_root);
2795 	btrfs_set_root_refs(&tree_root->root_item, 1);
2796 
2797 	mutex_lock(&tree_root->objectid_mutex);
2798 	ret = btrfs_find_highest_objectid(tree_root,
2799 					&tree_root->highest_objectid);
2800 	if (ret) {
2801 		mutex_unlock(&tree_root->objectid_mutex);
2802 		goto recovery_tree_root;
2803 	}
2804 
2805 	ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
2806 
2807 	mutex_unlock(&tree_root->objectid_mutex);
2808 
2809 	ret = btrfs_read_roots(fs_info);
2810 	if (ret)
2811 		goto recovery_tree_root;
2812 
2813 	fs_info->generation = generation;
2814 	fs_info->last_trans_committed = generation;
2815 
2816 	ret = btrfs_recover_balance(fs_info);
2817 	if (ret) {
2818 		btrfs_err(fs_info, "failed to recover balance: %d", ret);
2819 		goto fail_block_groups;
2820 	}
2821 
2822 	ret = btrfs_init_dev_stats(fs_info);
2823 	if (ret) {
2824 		btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
2825 		goto fail_block_groups;
2826 	}
2827 
2828 	ret = btrfs_init_dev_replace(fs_info);
2829 	if (ret) {
2830 		btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
2831 		goto fail_block_groups;
2832 	}
2833 
2834 	btrfs_close_extra_devices(fs_devices, 1);
2835 
2836 	ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
2837 	if (ret) {
2838 		btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
2839 				ret);
2840 		goto fail_block_groups;
2841 	}
2842 
2843 	ret = btrfs_sysfs_add_device(fs_devices);
2844 	if (ret) {
2845 		btrfs_err(fs_info, "failed to init sysfs device interface: %d",
2846 				ret);
2847 		goto fail_fsdev_sysfs;
2848 	}
2849 
2850 	ret = btrfs_sysfs_add_mounted(fs_info);
2851 	if (ret) {
2852 		btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
2853 		goto fail_fsdev_sysfs;
2854 	}
2855 
2856 	ret = btrfs_init_space_info(fs_info);
2857 	if (ret) {
2858 		btrfs_err(fs_info, "failed to initialize space info: %d", ret);
2859 		goto fail_sysfs;
2860 	}
2861 
2862 	ret = btrfs_read_block_groups(fs_info);
2863 	if (ret) {
2864 		btrfs_err(fs_info, "failed to read block groups: %d", ret);
2865 		goto fail_sysfs;
2866 	}
2867 
2868 	if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info)) {
2869 		btrfs_warn(fs_info,
2870 		"writeable mount is not allowed due to too many missing devices");
2871 		goto fail_sysfs;
2872 	}
2873 
2874 	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2875 					       "btrfs-cleaner");
2876 	if (IS_ERR(fs_info->cleaner_kthread))
2877 		goto fail_sysfs;
2878 
2879 	fs_info->transaction_kthread = kthread_run(transaction_kthread,
2880 						   tree_root,
2881 						   "btrfs-transaction");
2882 	if (IS_ERR(fs_info->transaction_kthread))
2883 		goto fail_cleaner;
2884 
2885 	if (!btrfs_test_opt(fs_info, NOSSD) &&
2886 	    !fs_info->fs_devices->rotating) {
2887 		btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
2888 	}
2889 
2890 	/*
2891 	 * Mount does not set all options immediately, we can do it now and do
2892 	 * not have to wait for transaction commit
2893 	 */
2894 	btrfs_apply_pending_changes(fs_info);
2895 
2896 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2897 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
2898 		ret = btrfsic_mount(fs_info, fs_devices,
2899 				    btrfs_test_opt(fs_info,
2900 					CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2901 				    1 : 0,
2902 				    fs_info->check_integrity_print_mask);
2903 		if (ret)
2904 			btrfs_warn(fs_info,
2905 				"failed to initialize integrity check module: %d",
2906 				ret);
2907 	}
2908 #endif
2909 	ret = btrfs_read_qgroup_config(fs_info);
2910 	if (ret)
2911 		goto fail_trans_kthread;
2912 
2913 	/* do not make disk changes in broken FS or nologreplay is given */
2914 	if (btrfs_super_log_root(disk_super) != 0 &&
2915 	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
2916 		btrfs_info(fs_info, "start tree-log replay");
2917 		ret = btrfs_replay_log(fs_info, fs_devices);
2918 		if (ret) {
2919 			err = ret;
2920 			goto fail_qgroup;
2921 		}
2922 	}
2923 
2924 	ret = btrfs_find_orphan_roots(fs_info);
2925 	if (ret)
2926 		goto fail_qgroup;
2927 
2928 	if (!sb_rdonly(sb)) {
2929 		ret = btrfs_cleanup_fs_roots(fs_info);
2930 		if (ret)
2931 			goto fail_qgroup;
2932 
2933 		mutex_lock(&fs_info->cleaner_mutex);
2934 		ret = btrfs_recover_relocation(tree_root);
2935 		mutex_unlock(&fs_info->cleaner_mutex);
2936 		if (ret < 0) {
2937 			btrfs_warn(fs_info, "failed to recover relocation: %d",
2938 					ret);
2939 			err = -EINVAL;
2940 			goto fail_qgroup;
2941 		}
2942 	}
2943 
2944 	location.objectid = BTRFS_FS_TREE_OBJECTID;
2945 	location.type = BTRFS_ROOT_ITEM_KEY;
2946 	location.offset = 0;
2947 
2948 	fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2949 	if (IS_ERR(fs_info->fs_root)) {
2950 		err = PTR_ERR(fs_info->fs_root);
2951 		goto fail_qgroup;
2952 	}
2953 
2954 	if (sb_rdonly(sb))
2955 		return 0;
2956 
2957 	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
2958 	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2959 		clear_free_space_tree = 1;
2960 	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
2961 		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
2962 		btrfs_warn(fs_info, "free space tree is invalid");
2963 		clear_free_space_tree = 1;
2964 	}
2965 
2966 	if (clear_free_space_tree) {
2967 		btrfs_info(fs_info, "clearing free space tree");
2968 		ret = btrfs_clear_free_space_tree(fs_info);
2969 		if (ret) {
2970 			btrfs_warn(fs_info,
2971 				   "failed to clear free space tree: %d", ret);
2972 			close_ctree(fs_info);
2973 			return ret;
2974 		}
2975 	}
2976 
2977 	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
2978 	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2979 		btrfs_info(fs_info, "creating free space tree");
2980 		ret = btrfs_create_free_space_tree(fs_info);
2981 		if (ret) {
2982 			btrfs_warn(fs_info,
2983 				"failed to create free space tree: %d", ret);
2984 			close_ctree(fs_info);
2985 			return ret;
2986 		}
2987 	}
2988 
2989 	down_read(&fs_info->cleanup_work_sem);
2990 	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
2991 	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
2992 		up_read(&fs_info->cleanup_work_sem);
2993 		close_ctree(fs_info);
2994 		return ret;
2995 	}
2996 	up_read(&fs_info->cleanup_work_sem);
2997 
2998 	ret = btrfs_resume_balance_async(fs_info);
2999 	if (ret) {
3000 		btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3001 		close_ctree(fs_info);
3002 		return ret;
3003 	}
3004 
3005 	ret = btrfs_resume_dev_replace_async(fs_info);
3006 	if (ret) {
3007 		btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3008 		close_ctree(fs_info);
3009 		return ret;
3010 	}
3011 
3012 	btrfs_qgroup_rescan_resume(fs_info);
3013 
3014 	if (!fs_info->uuid_root) {
3015 		btrfs_info(fs_info, "creating UUID tree");
3016 		ret = btrfs_create_uuid_tree(fs_info);
3017 		if (ret) {
3018 			btrfs_warn(fs_info,
3019 				"failed to create the UUID tree: %d", ret);
3020 			close_ctree(fs_info);
3021 			return ret;
3022 		}
3023 	} else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3024 		   fs_info->generation !=
3025 				btrfs_super_uuid_tree_generation(disk_super)) {
3026 		btrfs_info(fs_info, "checking UUID tree");
3027 		ret = btrfs_check_uuid_tree(fs_info);
3028 		if (ret) {
3029 			btrfs_warn(fs_info,
3030 				"failed to check the UUID tree: %d", ret);
3031 			close_ctree(fs_info);
3032 			return ret;
3033 		}
3034 	} else {
3035 		set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3036 	}
3037 	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3038 
3039 	/*
3040 	 * backuproot only affect mount behavior, and if open_ctree succeeded,
3041 	 * no need to keep the flag
3042 	 */
3043 	btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3044 
3045 	return 0;
3046 
3047 fail_qgroup:
3048 	btrfs_free_qgroup_config(fs_info);
3049 fail_trans_kthread:
3050 	kthread_stop(fs_info->transaction_kthread);
3051 	btrfs_cleanup_transaction(fs_info);
3052 	btrfs_free_fs_roots(fs_info);
3053 fail_cleaner:
3054 	kthread_stop(fs_info->cleaner_kthread);
3055 
3056 	/*
3057 	 * make sure we're done with the btree inode before we stop our
3058 	 * kthreads
3059 	 */
3060 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3061 
3062 fail_sysfs:
3063 	btrfs_sysfs_remove_mounted(fs_info);
3064 
3065 fail_fsdev_sysfs:
3066 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3067 
3068 fail_block_groups:
3069 	btrfs_put_block_group_cache(fs_info);
3070 
3071 fail_tree_roots:
3072 	free_root_pointers(fs_info, true);
3073 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3074 
3075 fail_sb_buffer:
3076 	btrfs_stop_all_workers(fs_info);
3077 	btrfs_free_block_groups(fs_info);
3078 fail_alloc:
3079 fail_iput:
3080 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
3081 
3082 	iput(fs_info->btree_inode);
3083 fail_bio_counter:
3084 	percpu_counter_destroy(&fs_info->bio_counter);
3085 fail_delalloc_bytes:
3086 	percpu_counter_destroy(&fs_info->delalloc_bytes);
3087 fail_dirty_metadata_bytes:
3088 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3089 fail_srcu:
3090 	cleanup_srcu_struct(&fs_info->subvol_srcu);
3091 fail:
3092 	btrfs_free_stripe_hash_table(fs_info);
3093 	btrfs_close_devices(fs_info->fs_devices);
3094 	return err;
3095 
3096 recovery_tree_root:
3097 	if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
3098 		goto fail_tree_roots;
3099 
3100 	free_root_pointers(fs_info, false);
3101 
3102 	/* don't use the log in recovery mode, it won't be valid */
3103 	btrfs_set_super_log_root(disk_super, 0);
3104 
3105 	/* we can't trust the free space cache either */
3106 	btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3107 
3108 	ret = next_root_backup(fs_info, fs_info->super_copy,
3109 			       &num_backups_tried, &backup_index);
3110 	if (ret == -1)
3111 		goto fail_block_groups;
3112 	goto retry_root_backup;
3113 }
3114 
btrfs_end_buffer_write_sync(struct buffer_head * bh,int uptodate)3115 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3116 {
3117 	if (uptodate) {
3118 		set_buffer_uptodate(bh);
3119 	} else {
3120 		struct btrfs_device *device = (struct btrfs_device *)
3121 			bh->b_private;
3122 
3123 		btrfs_warn_rl_in_rcu(device->fs_info,
3124 				"lost page write due to IO error on %s",
3125 					  rcu_str_deref(device->name));
3126 		/* note, we don't set_buffer_write_io_error because we have
3127 		 * our own ways of dealing with the IO errors
3128 		 */
3129 		clear_buffer_uptodate(bh);
3130 		btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3131 	}
3132 	unlock_buffer(bh);
3133 	put_bh(bh);
3134 }
3135 
btrfs_read_dev_one_super(struct block_device * bdev,int copy_num,struct buffer_head ** bh_ret)3136 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
3137 			struct buffer_head **bh_ret)
3138 {
3139 	struct buffer_head *bh;
3140 	struct btrfs_super_block *super;
3141 	u64 bytenr;
3142 
3143 	bytenr = btrfs_sb_offset(copy_num);
3144 	if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3145 		return -EINVAL;
3146 
3147 	bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE);
3148 	/*
3149 	 * If we fail to read from the underlying devices, as of now
3150 	 * the best option we have is to mark it EIO.
3151 	 */
3152 	if (!bh)
3153 		return -EIO;
3154 
3155 	super = (struct btrfs_super_block *)bh->b_data;
3156 	if (btrfs_super_bytenr(super) != bytenr ||
3157 		    btrfs_super_magic(super) != BTRFS_MAGIC) {
3158 		brelse(bh);
3159 		return -EINVAL;
3160 	}
3161 
3162 	*bh_ret = bh;
3163 	return 0;
3164 }
3165 
3166 
btrfs_read_dev_super(struct block_device * bdev)3167 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3168 {
3169 	struct buffer_head *bh;
3170 	struct buffer_head *latest = NULL;
3171 	struct btrfs_super_block *super;
3172 	int i;
3173 	u64 transid = 0;
3174 	int ret = -EINVAL;
3175 
3176 	/* we would like to check all the supers, but that would make
3177 	 * a btrfs mount succeed after a mkfs from a different FS.
3178 	 * So, we need to add a special mount option to scan for
3179 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3180 	 */
3181 	for (i = 0; i < 1; i++) {
3182 		ret = btrfs_read_dev_one_super(bdev, i, &bh);
3183 		if (ret)
3184 			continue;
3185 
3186 		super = (struct btrfs_super_block *)bh->b_data;
3187 
3188 		if (!latest || btrfs_super_generation(super) > transid) {
3189 			brelse(latest);
3190 			latest = bh;
3191 			transid = btrfs_super_generation(super);
3192 		} else {
3193 			brelse(bh);
3194 		}
3195 	}
3196 
3197 	if (!latest)
3198 		return ERR_PTR(ret);
3199 
3200 	return latest;
3201 }
3202 
3203 /*
3204  * Write superblock @sb to the @device. Do not wait for completion, all the
3205  * buffer heads we write are pinned.
3206  *
3207  * Write @max_mirrors copies of the superblock, where 0 means default that fit
3208  * the expected device size at commit time. Note that max_mirrors must be
3209  * same for write and wait phases.
3210  *
3211  * Return number of errors when buffer head is not found or submission fails.
3212  */
write_dev_supers(struct btrfs_device * device,struct btrfs_super_block * sb,int max_mirrors)3213 static int write_dev_supers(struct btrfs_device *device,
3214 			    struct btrfs_super_block *sb, int max_mirrors)
3215 {
3216 	struct buffer_head *bh;
3217 	int i;
3218 	int ret;
3219 	int errors = 0;
3220 	u32 crc;
3221 	u64 bytenr;
3222 	int op_flags;
3223 
3224 	if (max_mirrors == 0)
3225 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3226 
3227 	for (i = 0; i < max_mirrors; i++) {
3228 		bytenr = btrfs_sb_offset(i);
3229 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3230 		    device->commit_total_bytes)
3231 			break;
3232 
3233 		btrfs_set_super_bytenr(sb, bytenr);
3234 
3235 		crc = ~(u32)0;
3236 		crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc,
3237 				      BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
3238 		btrfs_csum_final(crc, sb->csum);
3239 
3240 		/* One reference for us, and we leave it for the caller */
3241 		bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE,
3242 			      BTRFS_SUPER_INFO_SIZE);
3243 		if (!bh) {
3244 			btrfs_err(device->fs_info,
3245 			    "couldn't get super buffer head for bytenr %llu",
3246 			    bytenr);
3247 			errors++;
3248 			continue;
3249 		}
3250 
3251 		memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3252 
3253 		/* one reference for submit_bh */
3254 		get_bh(bh);
3255 
3256 		set_buffer_uptodate(bh);
3257 		lock_buffer(bh);
3258 		bh->b_end_io = btrfs_end_buffer_write_sync;
3259 		bh->b_private = device;
3260 
3261 		/*
3262 		 * we fua the first super.  The others we allow
3263 		 * to go down lazy.
3264 		 */
3265 		op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
3266 		if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3267 			op_flags |= REQ_FUA;
3268 		ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh);
3269 		if (ret)
3270 			errors++;
3271 	}
3272 	return errors < i ? 0 : -1;
3273 }
3274 
3275 /*
3276  * Wait for write completion of superblocks done by write_dev_supers,
3277  * @max_mirrors same for write and wait phases.
3278  *
3279  * Return number of errors when buffer head is not found or not marked up to
3280  * date.
3281  */
wait_dev_supers(struct btrfs_device * device,int max_mirrors)3282 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3283 {
3284 	struct buffer_head *bh;
3285 	int i;
3286 	int errors = 0;
3287 	u64 bytenr;
3288 
3289 	if (max_mirrors == 0)
3290 		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3291 
3292 	for (i = 0; i < max_mirrors; i++) {
3293 		bytenr = btrfs_sb_offset(i);
3294 		if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3295 		    device->commit_total_bytes)
3296 			break;
3297 
3298 		bh = __find_get_block(device->bdev,
3299 				      bytenr / BTRFS_BDEV_BLOCKSIZE,
3300 				      BTRFS_SUPER_INFO_SIZE);
3301 		if (!bh) {
3302 			errors++;
3303 			continue;
3304 		}
3305 		wait_on_buffer(bh);
3306 		if (!buffer_uptodate(bh))
3307 			errors++;
3308 
3309 		/* drop our reference */
3310 		brelse(bh);
3311 
3312 		/* drop the reference from the writing run */
3313 		brelse(bh);
3314 	}
3315 
3316 	return errors < i ? 0 : -1;
3317 }
3318 
3319 /*
3320  * endio for the write_dev_flush, this will wake anyone waiting
3321  * for the barrier when it is done
3322  */
btrfs_end_empty_barrier(struct bio * bio)3323 static void btrfs_end_empty_barrier(struct bio *bio)
3324 {
3325 	complete(bio->bi_private);
3326 }
3327 
3328 /*
3329  * Submit a flush request to the device if it supports it. Error handling is
3330  * done in the waiting counterpart.
3331  */
write_dev_flush(struct btrfs_device * device)3332 static void write_dev_flush(struct btrfs_device *device)
3333 {
3334 	struct request_queue *q = bdev_get_queue(device->bdev);
3335 	struct bio *bio = device->flush_bio;
3336 
3337 	if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3338 		return;
3339 
3340 	bio_reset(bio);
3341 	bio->bi_end_io = btrfs_end_empty_barrier;
3342 	bio_set_dev(bio, device->bdev);
3343 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3344 	init_completion(&device->flush_wait);
3345 	bio->bi_private = &device->flush_wait;
3346 
3347 	btrfsic_submit_bio(bio);
3348 	device->flush_bio_sent = 1;
3349 }
3350 
3351 /*
3352  * If the flush bio has been submitted by write_dev_flush, wait for it.
3353  */
wait_dev_flush(struct btrfs_device * device)3354 static blk_status_t wait_dev_flush(struct btrfs_device *device)
3355 {
3356 	struct bio *bio = device->flush_bio;
3357 
3358 	if (!device->flush_bio_sent)
3359 		return BLK_STS_OK;
3360 
3361 	device->flush_bio_sent = 0;
3362 	wait_for_completion_io(&device->flush_wait);
3363 
3364 	return bio->bi_status;
3365 }
3366 
check_barrier_error(struct btrfs_fs_info * fs_info)3367 static int check_barrier_error(struct btrfs_fs_info *fs_info)
3368 {
3369 	if (!btrfs_check_rw_degradable(fs_info))
3370 		return -EIO;
3371 	return 0;
3372 }
3373 
3374 /*
3375  * send an empty flush down to each device in parallel,
3376  * then wait for them
3377  */
barrier_all_devices(struct btrfs_fs_info * info)3378 static int barrier_all_devices(struct btrfs_fs_info *info)
3379 {
3380 	struct list_head *head;
3381 	struct btrfs_device *dev;
3382 	int errors_wait = 0;
3383 	blk_status_t ret;
3384 
3385 	/* send down all the barriers */
3386 	head = &info->fs_devices->devices;
3387 	list_for_each_entry_rcu(dev, head, dev_list) {
3388 		if (dev->missing)
3389 			continue;
3390 		if (!dev->bdev)
3391 			continue;
3392 		if (!dev->in_fs_metadata || !dev->writeable)
3393 			continue;
3394 
3395 		write_dev_flush(dev);
3396 		dev->last_flush_error = BLK_STS_OK;
3397 	}
3398 
3399 	/* wait for all the barriers */
3400 	list_for_each_entry_rcu(dev, head, dev_list) {
3401 		if (dev->missing)
3402 			continue;
3403 		if (!dev->bdev) {
3404 			errors_wait++;
3405 			continue;
3406 		}
3407 		if (!dev->in_fs_metadata || !dev->writeable)
3408 			continue;
3409 
3410 		ret = wait_dev_flush(dev);
3411 		if (ret) {
3412 			dev->last_flush_error = ret;
3413 			btrfs_dev_stat_inc_and_print(dev,
3414 					BTRFS_DEV_STAT_FLUSH_ERRS);
3415 			errors_wait++;
3416 		}
3417 	}
3418 
3419 	if (errors_wait) {
3420 		/*
3421 		 * At some point we need the status of all disks
3422 		 * to arrive at the volume status. So error checking
3423 		 * is being pushed to a separate loop.
3424 		 */
3425 		return check_barrier_error(info);
3426 	}
3427 	return 0;
3428 }
3429 
btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)3430 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3431 {
3432 	int raid_type;
3433 	int min_tolerated = INT_MAX;
3434 
3435 	if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3436 	    (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3437 		min_tolerated = min(min_tolerated,
3438 				    btrfs_raid_array[BTRFS_RAID_SINGLE].
3439 				    tolerated_failures);
3440 
3441 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3442 		if (raid_type == BTRFS_RAID_SINGLE)
3443 			continue;
3444 		if (!(flags & btrfs_raid_group[raid_type]))
3445 			continue;
3446 		min_tolerated = min(min_tolerated,
3447 				    btrfs_raid_array[raid_type].
3448 				    tolerated_failures);
3449 	}
3450 
3451 	if (min_tolerated == INT_MAX) {
3452 		pr_warn("BTRFS: unknown raid flag: %llu", flags);
3453 		min_tolerated = 0;
3454 	}
3455 
3456 	return min_tolerated;
3457 }
3458 
write_all_supers(struct btrfs_fs_info * fs_info,int max_mirrors)3459 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3460 {
3461 	struct list_head *head;
3462 	struct btrfs_device *dev;
3463 	struct btrfs_super_block *sb;
3464 	struct btrfs_dev_item *dev_item;
3465 	int ret;
3466 	int do_barriers;
3467 	int max_errors;
3468 	int total_errors = 0;
3469 	u64 flags;
3470 
3471 	do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3472 
3473 	/*
3474 	 * max_mirrors == 0 indicates we're from commit_transaction,
3475 	 * not from fsync where the tree roots in fs_info have not
3476 	 * been consistent on disk.
3477 	 */
3478 	if (max_mirrors == 0)
3479 		backup_super_roots(fs_info);
3480 
3481 	sb = fs_info->super_for_commit;
3482 	dev_item = &sb->dev_item;
3483 
3484 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
3485 	head = &fs_info->fs_devices->devices;
3486 	max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
3487 
3488 	if (do_barriers) {
3489 		ret = barrier_all_devices(fs_info);
3490 		if (ret) {
3491 			mutex_unlock(
3492 				&fs_info->fs_devices->device_list_mutex);
3493 			btrfs_handle_fs_error(fs_info, ret,
3494 					      "errors while submitting device barriers.");
3495 			return ret;
3496 		}
3497 	}
3498 
3499 	list_for_each_entry_rcu(dev, head, dev_list) {
3500 		if (!dev->bdev) {
3501 			total_errors++;
3502 			continue;
3503 		}
3504 		if (!dev->in_fs_metadata || !dev->writeable)
3505 			continue;
3506 
3507 		btrfs_set_stack_device_generation(dev_item, 0);
3508 		btrfs_set_stack_device_type(dev_item, dev->type);
3509 		btrfs_set_stack_device_id(dev_item, dev->devid);
3510 		btrfs_set_stack_device_total_bytes(dev_item,
3511 						   dev->commit_total_bytes);
3512 		btrfs_set_stack_device_bytes_used(dev_item,
3513 						  dev->commit_bytes_used);
3514 		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3515 		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3516 		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3517 		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3518 		memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_FSID_SIZE);
3519 
3520 		flags = btrfs_super_flags(sb);
3521 		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3522 
3523 		ret = write_dev_supers(dev, sb, max_mirrors);
3524 		if (ret)
3525 			total_errors++;
3526 	}
3527 	if (total_errors > max_errors) {
3528 		btrfs_err(fs_info, "%d errors while writing supers",
3529 			  total_errors);
3530 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3531 
3532 		/* FUA is masked off if unsupported and can't be the reason */
3533 		btrfs_handle_fs_error(fs_info, -EIO,
3534 				      "%d errors while writing supers",
3535 				      total_errors);
3536 		return -EIO;
3537 	}
3538 
3539 	total_errors = 0;
3540 	list_for_each_entry_rcu(dev, head, dev_list) {
3541 		if (!dev->bdev)
3542 			continue;
3543 		if (!dev->in_fs_metadata || !dev->writeable)
3544 			continue;
3545 
3546 		ret = wait_dev_supers(dev, max_mirrors);
3547 		if (ret)
3548 			total_errors++;
3549 	}
3550 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3551 	if (total_errors > max_errors) {
3552 		btrfs_handle_fs_error(fs_info, -EIO,
3553 				      "%d errors while writing supers",
3554 				      total_errors);
3555 		return -EIO;
3556 	}
3557 	return 0;
3558 }
3559 
3560 /* Drop a fs root from the radix tree and free it. */
btrfs_drop_and_free_fs_root(struct btrfs_fs_info * fs_info,struct btrfs_root * root)3561 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3562 				  struct btrfs_root *root)
3563 {
3564 	spin_lock(&fs_info->fs_roots_radix_lock);
3565 	radix_tree_delete(&fs_info->fs_roots_radix,
3566 			  (unsigned long)root->root_key.objectid);
3567 	spin_unlock(&fs_info->fs_roots_radix_lock);
3568 
3569 	if (btrfs_root_refs(&root->root_item) == 0)
3570 		synchronize_srcu(&fs_info->subvol_srcu);
3571 
3572 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3573 		btrfs_free_log(NULL, root);
3574 		if (root->reloc_root) {
3575 			free_extent_buffer(root->reloc_root->node);
3576 			free_extent_buffer(root->reloc_root->commit_root);
3577 			btrfs_put_fs_root(root->reloc_root);
3578 			root->reloc_root = NULL;
3579 		}
3580 	}
3581 
3582 	if (root->free_ino_pinned)
3583 		__btrfs_remove_free_space_cache(root->free_ino_pinned);
3584 	if (root->free_ino_ctl)
3585 		__btrfs_remove_free_space_cache(root->free_ino_ctl);
3586 	free_fs_root(root);
3587 }
3588 
free_fs_root(struct btrfs_root * root)3589 static void free_fs_root(struct btrfs_root *root)
3590 {
3591 	iput(root->ino_cache_inode);
3592 	WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3593 	btrfs_free_block_rsv(root->fs_info, root->orphan_block_rsv);
3594 	root->orphan_block_rsv = NULL;
3595 	if (root->anon_dev)
3596 		free_anon_bdev(root->anon_dev);
3597 	if (root->subv_writers)
3598 		btrfs_free_subvolume_writers(root->subv_writers);
3599 	free_extent_buffer(root->node);
3600 	free_extent_buffer(root->commit_root);
3601 	kfree(root->free_ino_ctl);
3602 	kfree(root->free_ino_pinned);
3603 	kfree(root->name);
3604 	btrfs_put_fs_root(root);
3605 }
3606 
btrfs_free_fs_root(struct btrfs_root * root)3607 void btrfs_free_fs_root(struct btrfs_root *root)
3608 {
3609 	free_fs_root(root);
3610 }
3611 
btrfs_cleanup_fs_roots(struct btrfs_fs_info * fs_info)3612 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3613 {
3614 	u64 root_objectid = 0;
3615 	struct btrfs_root *gang[8];
3616 	int i = 0;
3617 	int err = 0;
3618 	unsigned int ret = 0;
3619 	int index;
3620 
3621 	while (1) {
3622 		index = srcu_read_lock(&fs_info->subvol_srcu);
3623 		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3624 					     (void **)gang, root_objectid,
3625 					     ARRAY_SIZE(gang));
3626 		if (!ret) {
3627 			srcu_read_unlock(&fs_info->subvol_srcu, index);
3628 			break;
3629 		}
3630 		root_objectid = gang[ret - 1]->root_key.objectid + 1;
3631 
3632 		for (i = 0; i < ret; i++) {
3633 			/* Avoid to grab roots in dead_roots */
3634 			if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3635 				gang[i] = NULL;
3636 				continue;
3637 			}
3638 			/* grab all the search result for later use */
3639 			gang[i] = btrfs_grab_fs_root(gang[i]);
3640 		}
3641 		srcu_read_unlock(&fs_info->subvol_srcu, index);
3642 
3643 		for (i = 0; i < ret; i++) {
3644 			if (!gang[i])
3645 				continue;
3646 			root_objectid = gang[i]->root_key.objectid;
3647 			err = btrfs_orphan_cleanup(gang[i]);
3648 			if (err)
3649 				break;
3650 			btrfs_put_fs_root(gang[i]);
3651 		}
3652 		root_objectid++;
3653 	}
3654 
3655 	/* release the uncleaned roots due to error */
3656 	for (; i < ret; i++) {
3657 		if (gang[i])
3658 			btrfs_put_fs_root(gang[i]);
3659 	}
3660 	return err;
3661 }
3662 
btrfs_commit_super(struct btrfs_fs_info * fs_info)3663 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
3664 {
3665 	struct btrfs_root *root = fs_info->tree_root;
3666 	struct btrfs_trans_handle *trans;
3667 
3668 	mutex_lock(&fs_info->cleaner_mutex);
3669 	btrfs_run_delayed_iputs(fs_info);
3670 	mutex_unlock(&fs_info->cleaner_mutex);
3671 	wake_up_process(fs_info->cleaner_kthread);
3672 
3673 	/* wait until ongoing cleanup work done */
3674 	down_write(&fs_info->cleanup_work_sem);
3675 	up_write(&fs_info->cleanup_work_sem);
3676 
3677 	trans = btrfs_join_transaction(root);
3678 	if (IS_ERR(trans))
3679 		return PTR_ERR(trans);
3680 	return btrfs_commit_transaction(trans);
3681 }
3682 
close_ctree(struct btrfs_fs_info * fs_info)3683 void close_ctree(struct btrfs_fs_info *fs_info)
3684 {
3685 	struct btrfs_root *root = fs_info->tree_root;
3686 	int ret;
3687 
3688 	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3689 	/*
3690 	 * We don't want the cleaner to start new transactions, add more delayed
3691 	 * iputs, etc. while we're closing. We can't use kthread_stop() yet
3692 	 * because that frees the task_struct, and the transaction kthread might
3693 	 * still try to wake up the cleaner.
3694 	 */
3695 	kthread_park(fs_info->cleaner_kthread);
3696 
3697 	/* wait for the qgroup rescan worker to stop */
3698 	btrfs_qgroup_wait_for_completion(fs_info, false);
3699 
3700 	/* wait for the uuid_scan task to finish */
3701 	down(&fs_info->uuid_tree_rescan_sem);
3702 	/* avoid complains from lockdep et al., set sem back to initial state */
3703 	up(&fs_info->uuid_tree_rescan_sem);
3704 
3705 	/* pause restriper - we want to resume on mount */
3706 	btrfs_pause_balance(fs_info);
3707 
3708 	btrfs_dev_replace_suspend_for_unmount(fs_info);
3709 
3710 	btrfs_scrub_cancel(fs_info);
3711 
3712 	/* wait for any defraggers to finish */
3713 	wait_event(fs_info->transaction_wait,
3714 		   (atomic_read(&fs_info->defrag_running) == 0));
3715 
3716 	/* clear out the rbtree of defraggable inodes */
3717 	btrfs_cleanup_defrag_inodes(fs_info);
3718 
3719 	cancel_work_sync(&fs_info->async_reclaim_work);
3720 
3721 	if (!sb_rdonly(fs_info->sb)) {
3722 		/*
3723 		 * The cleaner kthread is stopped, so do one final pass over
3724 		 * unused block groups.
3725 		 */
3726 		btrfs_delete_unused_bgs(fs_info);
3727 
3728 		ret = btrfs_commit_super(fs_info);
3729 		if (ret)
3730 			btrfs_err(fs_info, "commit super ret %d", ret);
3731 	}
3732 
3733 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) ||
3734 	    test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state))
3735 		btrfs_error_commit_super(fs_info);
3736 
3737 	kthread_stop(fs_info->transaction_kthread);
3738 	kthread_stop(fs_info->cleaner_kthread);
3739 
3740 	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
3741 
3742 	btrfs_free_qgroup_config(fs_info);
3743 	ASSERT(list_empty(&fs_info->delalloc_roots));
3744 
3745 	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3746 		btrfs_info(fs_info, "at unmount delalloc count %lld",
3747 		       percpu_counter_sum(&fs_info->delalloc_bytes));
3748 	}
3749 
3750 	btrfs_sysfs_remove_mounted(fs_info);
3751 	btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3752 
3753 	btrfs_free_fs_roots(fs_info);
3754 
3755 	btrfs_put_block_group_cache(fs_info);
3756 
3757 	/*
3758 	 * we must make sure there is not any read request to
3759 	 * submit after we stopping all workers.
3760 	 */
3761 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3762 	btrfs_stop_all_workers(fs_info);
3763 
3764 	clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
3765 	free_root_pointers(fs_info, true);
3766 
3767 	/*
3768 	 * We must free the block groups after dropping the fs_roots as we could
3769 	 * have had an IO error and have left over tree log blocks that aren't
3770 	 * cleaned up until the fs roots are freed.  This makes the block group
3771 	 * accounting appear to be wrong because there's pending reserved bytes,
3772 	 * so make sure we do the block group cleanup afterwards.
3773 	 */
3774 	btrfs_free_block_groups(fs_info);
3775 
3776 	iput(fs_info->btree_inode);
3777 
3778 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3779 	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
3780 		btrfsic_unmount(fs_info->fs_devices);
3781 #endif
3782 
3783 	btrfs_close_devices(fs_info->fs_devices);
3784 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
3785 
3786 	percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3787 	percpu_counter_destroy(&fs_info->delalloc_bytes);
3788 	percpu_counter_destroy(&fs_info->bio_counter);
3789 	cleanup_srcu_struct(&fs_info->subvol_srcu);
3790 
3791 	btrfs_free_stripe_hash_table(fs_info);
3792 
3793 	__btrfs_free_block_rsv(root->orphan_block_rsv);
3794 	root->orphan_block_rsv = NULL;
3795 
3796 	while (!list_empty(&fs_info->pinned_chunks)) {
3797 		struct extent_map *em;
3798 
3799 		em = list_first_entry(&fs_info->pinned_chunks,
3800 				      struct extent_map, list);
3801 		list_del_init(&em->list);
3802 		free_extent_map(em);
3803 	}
3804 }
3805 
btrfs_buffer_uptodate(struct extent_buffer * buf,u64 parent_transid,int atomic)3806 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3807 			  int atomic)
3808 {
3809 	int ret;
3810 	struct inode *btree_inode = buf->pages[0]->mapping->host;
3811 
3812 	ret = extent_buffer_uptodate(buf);
3813 	if (!ret)
3814 		return ret;
3815 
3816 	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3817 				    parent_transid, atomic);
3818 	if (ret == -EAGAIN)
3819 		return ret;
3820 	return !ret;
3821 }
3822 
btrfs_mark_buffer_dirty(struct extent_buffer * buf)3823 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3824 {
3825 	struct btrfs_fs_info *fs_info;
3826 	struct btrfs_root *root;
3827 	u64 transid = btrfs_header_generation(buf);
3828 	int was_dirty;
3829 
3830 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3831 	/*
3832 	 * This is a fast path so only do this check if we have sanity tests
3833 	 * enabled.  Normal people shouldn't be marking dummy buffers as dirty
3834 	 * outside of the sanity tests.
3835 	 */
3836 	if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags)))
3837 		return;
3838 #endif
3839 	root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3840 	fs_info = root->fs_info;
3841 	btrfs_assert_tree_locked(buf);
3842 	if (transid != fs_info->generation)
3843 		WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
3844 			buf->start, transid, fs_info->generation);
3845 	was_dirty = set_extent_buffer_dirty(buf);
3846 	if (!was_dirty)
3847 		percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3848 					 buf->len,
3849 					 fs_info->dirty_metadata_batch);
3850 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3851 	/*
3852 	 * Since btrfs_mark_buffer_dirty() can be called with item pointer set
3853 	 * but item data not updated.
3854 	 * So here we should only check item pointers, not item data.
3855 	 */
3856 	if (btrfs_header_level(buf) == 0 &&
3857 	    btrfs_check_leaf_relaxed(root, buf)) {
3858 		btrfs_print_leaf(buf);
3859 		ASSERT(0);
3860 	}
3861 #endif
3862 }
3863 
__btrfs_btree_balance_dirty(struct btrfs_fs_info * fs_info,int flush_delayed)3864 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
3865 					int flush_delayed)
3866 {
3867 	/*
3868 	 * looks as though older kernels can get into trouble with
3869 	 * this code, they end up stuck in balance_dirty_pages forever
3870 	 */
3871 	int ret;
3872 
3873 	if (current->flags & PF_MEMALLOC)
3874 		return;
3875 
3876 	if (flush_delayed)
3877 		btrfs_balance_delayed_items(fs_info);
3878 
3879 	ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
3880 				     BTRFS_DIRTY_METADATA_THRESH,
3881 				     fs_info->dirty_metadata_batch);
3882 	if (ret > 0) {
3883 		balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
3884 	}
3885 }
3886 
btrfs_btree_balance_dirty(struct btrfs_fs_info * fs_info)3887 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
3888 {
3889 	__btrfs_btree_balance_dirty(fs_info, 1);
3890 }
3891 
btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info * fs_info)3892 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
3893 {
3894 	__btrfs_btree_balance_dirty(fs_info, 0);
3895 }
3896 
btrfs_read_buffer(struct extent_buffer * buf,u64 parent_transid)3897 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
3898 {
3899 	struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3900 	struct btrfs_fs_info *fs_info = root->fs_info;
3901 
3902 	return btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
3903 }
3904 
btrfs_check_super_valid(struct btrfs_fs_info * fs_info)3905 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info)
3906 {
3907 	struct btrfs_super_block *sb = fs_info->super_copy;
3908 	u64 nodesize = btrfs_super_nodesize(sb);
3909 	u64 sectorsize = btrfs_super_sectorsize(sb);
3910 	int ret = 0;
3911 
3912 	if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
3913 		btrfs_err(fs_info, "no valid FS found");
3914 		ret = -EINVAL;
3915 	}
3916 	if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
3917 		btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
3918 				btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
3919 		ret = -EINVAL;
3920 	}
3921 	if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
3922 		btrfs_err(fs_info, "tree_root level too big: %d >= %d",
3923 				btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
3924 		ret = -EINVAL;
3925 	}
3926 	if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
3927 		btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
3928 				btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
3929 		ret = -EINVAL;
3930 	}
3931 	if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
3932 		btrfs_err(fs_info, "log_root level too big: %d >= %d",
3933 				btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
3934 		ret = -EINVAL;
3935 	}
3936 
3937 	/*
3938 	 * Check sectorsize and nodesize first, other check will need it.
3939 	 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
3940 	 */
3941 	if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
3942 	    sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
3943 		btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
3944 		ret = -EINVAL;
3945 	}
3946 	/* Only PAGE SIZE is supported yet */
3947 	if (sectorsize != PAGE_SIZE) {
3948 		btrfs_err(fs_info,
3949 			"sectorsize %llu not supported yet, only support %lu",
3950 			sectorsize, PAGE_SIZE);
3951 		ret = -EINVAL;
3952 	}
3953 	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
3954 	    nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
3955 		btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
3956 		ret = -EINVAL;
3957 	}
3958 	if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
3959 		btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
3960 			  le32_to_cpu(sb->__unused_leafsize), nodesize);
3961 		ret = -EINVAL;
3962 	}
3963 
3964 	/* Root alignment check */
3965 	if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
3966 		btrfs_warn(fs_info, "tree_root block unaligned: %llu",
3967 			   btrfs_super_root(sb));
3968 		ret = -EINVAL;
3969 	}
3970 	if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
3971 		btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
3972 			   btrfs_super_chunk_root(sb));
3973 		ret = -EINVAL;
3974 	}
3975 	if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
3976 		btrfs_warn(fs_info, "log_root block unaligned: %llu",
3977 			   btrfs_super_log_root(sb));
3978 		ret = -EINVAL;
3979 	}
3980 
3981 	if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_FSID_SIZE) != 0) {
3982 		btrfs_err(fs_info,
3983 			   "dev_item UUID does not match fsid: %pU != %pU",
3984 			   fs_info->fsid, sb->dev_item.fsid);
3985 		ret = -EINVAL;
3986 	}
3987 
3988 	/*
3989 	 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
3990 	 * done later
3991 	 */
3992 	if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
3993 		btrfs_err(fs_info, "bytes_used is too small %llu",
3994 			  btrfs_super_bytes_used(sb));
3995 		ret = -EINVAL;
3996 	}
3997 	if (!is_power_of_2(btrfs_super_stripesize(sb))) {
3998 		btrfs_err(fs_info, "invalid stripesize %u",
3999 			  btrfs_super_stripesize(sb));
4000 		ret = -EINVAL;
4001 	}
4002 	if (btrfs_super_num_devices(sb) > (1UL << 31))
4003 		btrfs_warn(fs_info, "suspicious number of devices: %llu",
4004 			   btrfs_super_num_devices(sb));
4005 	if (btrfs_super_num_devices(sb) == 0) {
4006 		btrfs_err(fs_info, "number of devices is 0");
4007 		ret = -EINVAL;
4008 	}
4009 
4010 	if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
4011 		btrfs_err(fs_info, "super offset mismatch %llu != %u",
4012 			  btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
4013 		ret = -EINVAL;
4014 	}
4015 
4016 	/*
4017 	 * Obvious sys_chunk_array corruptions, it must hold at least one key
4018 	 * and one chunk
4019 	 */
4020 	if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4021 		btrfs_err(fs_info, "system chunk array too big %u > %u",
4022 			  btrfs_super_sys_array_size(sb),
4023 			  BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
4024 		ret = -EINVAL;
4025 	}
4026 	if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
4027 			+ sizeof(struct btrfs_chunk)) {
4028 		btrfs_err(fs_info, "system chunk array too small %u < %zu",
4029 			  btrfs_super_sys_array_size(sb),
4030 			  sizeof(struct btrfs_disk_key)
4031 			  + sizeof(struct btrfs_chunk));
4032 		ret = -EINVAL;
4033 	}
4034 
4035 	/*
4036 	 * The generation is a global counter, we'll trust it more than the others
4037 	 * but it's still possible that it's the one that's wrong.
4038 	 */
4039 	if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
4040 		btrfs_warn(fs_info,
4041 			"suspicious: generation < chunk_root_generation: %llu < %llu",
4042 			btrfs_super_generation(sb),
4043 			btrfs_super_chunk_root_generation(sb));
4044 	if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
4045 	    && btrfs_super_cache_generation(sb) != (u64)-1)
4046 		btrfs_warn(fs_info,
4047 			"suspicious: generation < cache_generation: %llu < %llu",
4048 			btrfs_super_generation(sb),
4049 			btrfs_super_cache_generation(sb));
4050 
4051 	return ret;
4052 }
4053 
btrfs_error_commit_super(struct btrfs_fs_info * fs_info)4054 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4055 {
4056 	/* cleanup FS via transaction */
4057 	btrfs_cleanup_transaction(fs_info);
4058 
4059 	mutex_lock(&fs_info->cleaner_mutex);
4060 	btrfs_run_delayed_iputs(fs_info);
4061 	mutex_unlock(&fs_info->cleaner_mutex);
4062 
4063 	down_write(&fs_info->cleanup_work_sem);
4064 	up_write(&fs_info->cleanup_work_sem);
4065 }
4066 
btrfs_destroy_ordered_extents(struct btrfs_root * root)4067 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4068 {
4069 	struct btrfs_ordered_extent *ordered;
4070 
4071 	spin_lock(&root->ordered_extent_lock);
4072 	/*
4073 	 * This will just short circuit the ordered completion stuff which will
4074 	 * make sure the ordered extent gets properly cleaned up.
4075 	 */
4076 	list_for_each_entry(ordered, &root->ordered_extents,
4077 			    root_extent_list)
4078 		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4079 	spin_unlock(&root->ordered_extent_lock);
4080 }
4081 
btrfs_destroy_all_ordered_extents(struct btrfs_fs_info * fs_info)4082 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4083 {
4084 	struct btrfs_root *root;
4085 	struct list_head splice;
4086 
4087 	INIT_LIST_HEAD(&splice);
4088 
4089 	spin_lock(&fs_info->ordered_root_lock);
4090 	list_splice_init(&fs_info->ordered_roots, &splice);
4091 	while (!list_empty(&splice)) {
4092 		root = list_first_entry(&splice, struct btrfs_root,
4093 					ordered_root);
4094 		list_move_tail(&root->ordered_root,
4095 			       &fs_info->ordered_roots);
4096 
4097 		spin_unlock(&fs_info->ordered_root_lock);
4098 		btrfs_destroy_ordered_extents(root);
4099 
4100 		cond_resched();
4101 		spin_lock(&fs_info->ordered_root_lock);
4102 	}
4103 	spin_unlock(&fs_info->ordered_root_lock);
4104 
4105 	/*
4106 	 * We need this here because if we've been flipped read-only we won't
4107 	 * get sync() from the umount, so we need to make sure any ordered
4108 	 * extents that haven't had their dirty pages IO start writeout yet
4109 	 * actually get run and error out properly.
4110 	 */
4111 	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
4112 }
4113 
btrfs_destroy_delayed_refs(struct btrfs_transaction * trans,struct btrfs_fs_info * fs_info)4114 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4115 				      struct btrfs_fs_info *fs_info)
4116 {
4117 	struct rb_node *node;
4118 	struct btrfs_delayed_ref_root *delayed_refs;
4119 	struct btrfs_delayed_ref_node *ref;
4120 	int ret = 0;
4121 
4122 	delayed_refs = &trans->delayed_refs;
4123 
4124 	spin_lock(&delayed_refs->lock);
4125 	if (atomic_read(&delayed_refs->num_entries) == 0) {
4126 		spin_unlock(&delayed_refs->lock);
4127 		btrfs_info(fs_info, "delayed_refs has NO entry");
4128 		return ret;
4129 	}
4130 
4131 	while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
4132 		struct btrfs_delayed_ref_head *head;
4133 		struct btrfs_delayed_ref_node *tmp;
4134 		bool pin_bytes = false;
4135 
4136 		head = rb_entry(node, struct btrfs_delayed_ref_head,
4137 				href_node);
4138 		if (!mutex_trylock(&head->mutex)) {
4139 			refcount_inc(&head->node.refs);
4140 			spin_unlock(&delayed_refs->lock);
4141 
4142 			mutex_lock(&head->mutex);
4143 			mutex_unlock(&head->mutex);
4144 			btrfs_put_delayed_ref(&head->node);
4145 			spin_lock(&delayed_refs->lock);
4146 			continue;
4147 		}
4148 		spin_lock(&head->lock);
4149 		list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list,
4150 						 list) {
4151 			ref->in_tree = 0;
4152 			list_del(&ref->list);
4153 			if (!list_empty(&ref->add_list))
4154 				list_del(&ref->add_list);
4155 			atomic_dec(&delayed_refs->num_entries);
4156 			btrfs_put_delayed_ref(ref);
4157 		}
4158 		if (head->must_insert_reserved)
4159 			pin_bytes = true;
4160 		btrfs_free_delayed_extent_op(head->extent_op);
4161 		delayed_refs->num_heads--;
4162 		if (head->processing == 0)
4163 			delayed_refs->num_heads_ready--;
4164 		atomic_dec(&delayed_refs->num_entries);
4165 		head->node.in_tree = 0;
4166 		rb_erase(&head->href_node, &delayed_refs->href_root);
4167 		spin_unlock(&head->lock);
4168 		spin_unlock(&delayed_refs->lock);
4169 		mutex_unlock(&head->mutex);
4170 
4171 		if (pin_bytes)
4172 			btrfs_pin_extent(fs_info, head->node.bytenr,
4173 					 head->node.num_bytes, 1);
4174 		btrfs_put_delayed_ref(&head->node);
4175 		cond_resched();
4176 		spin_lock(&delayed_refs->lock);
4177 	}
4178 
4179 	spin_unlock(&delayed_refs->lock);
4180 
4181 	return ret;
4182 }
4183 
btrfs_destroy_delalloc_inodes(struct btrfs_root * root)4184 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4185 {
4186 	struct btrfs_inode *btrfs_inode;
4187 	struct list_head splice;
4188 
4189 	INIT_LIST_HEAD(&splice);
4190 
4191 	spin_lock(&root->delalloc_lock);
4192 	list_splice_init(&root->delalloc_inodes, &splice);
4193 
4194 	while (!list_empty(&splice)) {
4195 		struct inode *inode = NULL;
4196 		btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4197 					       delalloc_inodes);
4198 		__btrfs_del_delalloc_inode(root, btrfs_inode);
4199 		spin_unlock(&root->delalloc_lock);
4200 
4201 		/*
4202 		 * Make sure we get a live inode and that it'll not disappear
4203 		 * meanwhile.
4204 		 */
4205 		inode = igrab(&btrfs_inode->vfs_inode);
4206 		if (inode) {
4207 			invalidate_inode_pages2(inode->i_mapping);
4208 			iput(inode);
4209 		}
4210 		spin_lock(&root->delalloc_lock);
4211 	}
4212 	spin_unlock(&root->delalloc_lock);
4213 }
4214 
btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info * fs_info)4215 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4216 {
4217 	struct btrfs_root *root;
4218 	struct list_head splice;
4219 
4220 	INIT_LIST_HEAD(&splice);
4221 
4222 	spin_lock(&fs_info->delalloc_root_lock);
4223 	list_splice_init(&fs_info->delalloc_roots, &splice);
4224 	while (!list_empty(&splice)) {
4225 		root = list_first_entry(&splice, struct btrfs_root,
4226 					 delalloc_root);
4227 		root = btrfs_grab_fs_root(root);
4228 		BUG_ON(!root);
4229 		spin_unlock(&fs_info->delalloc_root_lock);
4230 
4231 		btrfs_destroy_delalloc_inodes(root);
4232 		btrfs_put_fs_root(root);
4233 
4234 		spin_lock(&fs_info->delalloc_root_lock);
4235 	}
4236 	spin_unlock(&fs_info->delalloc_root_lock);
4237 }
4238 
btrfs_destroy_marked_extents(struct btrfs_fs_info * fs_info,struct extent_io_tree * dirty_pages,int mark)4239 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4240 					struct extent_io_tree *dirty_pages,
4241 					int mark)
4242 {
4243 	int ret;
4244 	struct extent_buffer *eb;
4245 	u64 start = 0;
4246 	u64 end;
4247 
4248 	while (1) {
4249 		ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4250 					    mark, NULL);
4251 		if (ret)
4252 			break;
4253 
4254 		clear_extent_bits(dirty_pages, start, end, mark);
4255 		while (start <= end) {
4256 			eb = find_extent_buffer(fs_info, start);
4257 			start += fs_info->nodesize;
4258 			if (!eb)
4259 				continue;
4260 			wait_on_extent_buffer_writeback(eb);
4261 
4262 			if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4263 					       &eb->bflags))
4264 				clear_extent_buffer_dirty(eb);
4265 			free_extent_buffer_stale(eb);
4266 		}
4267 	}
4268 
4269 	return ret;
4270 }
4271 
btrfs_destroy_pinned_extent(struct btrfs_fs_info * fs_info,struct extent_io_tree * pinned_extents)4272 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4273 				       struct extent_io_tree *pinned_extents)
4274 {
4275 	struct extent_io_tree *unpin;
4276 	u64 start;
4277 	u64 end;
4278 	int ret;
4279 	bool loop = true;
4280 
4281 	unpin = pinned_extents;
4282 again:
4283 	while (1) {
4284 		/*
4285 		 * The btrfs_finish_extent_commit() may get the same range as
4286 		 * ours between find_first_extent_bit and clear_extent_dirty.
4287 		 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4288 		 * the same extent range.
4289 		 */
4290 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
4291 		ret = find_first_extent_bit(unpin, 0, &start, &end,
4292 					    EXTENT_DIRTY, NULL);
4293 		if (ret) {
4294 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4295 			break;
4296 		}
4297 
4298 		clear_extent_dirty(unpin, start, end);
4299 		btrfs_error_unpin_extent_range(fs_info, start, end);
4300 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4301 		cond_resched();
4302 	}
4303 
4304 	if (loop) {
4305 		if (unpin == &fs_info->freed_extents[0])
4306 			unpin = &fs_info->freed_extents[1];
4307 		else
4308 			unpin = &fs_info->freed_extents[0];
4309 		loop = false;
4310 		goto again;
4311 	}
4312 
4313 	return 0;
4314 }
4315 
btrfs_cleanup_bg_io(struct btrfs_block_group_cache * cache)4316 static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
4317 {
4318 	struct inode *inode;
4319 
4320 	inode = cache->io_ctl.inode;
4321 	if (inode) {
4322 		invalidate_inode_pages2(inode->i_mapping);
4323 		BTRFS_I(inode)->generation = 0;
4324 		cache->io_ctl.inode = NULL;
4325 		iput(inode);
4326 	}
4327 	btrfs_put_block_group(cache);
4328 }
4329 
btrfs_cleanup_dirty_bgs(struct btrfs_transaction * cur_trans,struct btrfs_fs_info * fs_info)4330 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4331 			     struct btrfs_fs_info *fs_info)
4332 {
4333 	struct btrfs_block_group_cache *cache;
4334 
4335 	spin_lock(&cur_trans->dirty_bgs_lock);
4336 	while (!list_empty(&cur_trans->dirty_bgs)) {
4337 		cache = list_first_entry(&cur_trans->dirty_bgs,
4338 					 struct btrfs_block_group_cache,
4339 					 dirty_list);
4340 		if (!cache) {
4341 			btrfs_err(fs_info, "orphan block group dirty_bgs list");
4342 			spin_unlock(&cur_trans->dirty_bgs_lock);
4343 			return;
4344 		}
4345 
4346 		if (!list_empty(&cache->io_list)) {
4347 			spin_unlock(&cur_trans->dirty_bgs_lock);
4348 			list_del_init(&cache->io_list);
4349 			btrfs_cleanup_bg_io(cache);
4350 			spin_lock(&cur_trans->dirty_bgs_lock);
4351 		}
4352 
4353 		list_del_init(&cache->dirty_list);
4354 		spin_lock(&cache->lock);
4355 		cache->disk_cache_state = BTRFS_DC_ERROR;
4356 		spin_unlock(&cache->lock);
4357 
4358 		spin_unlock(&cur_trans->dirty_bgs_lock);
4359 		btrfs_put_block_group(cache);
4360 		spin_lock(&cur_trans->dirty_bgs_lock);
4361 	}
4362 	spin_unlock(&cur_trans->dirty_bgs_lock);
4363 
4364 	while (!list_empty(&cur_trans->io_bgs)) {
4365 		cache = list_first_entry(&cur_trans->io_bgs,
4366 					 struct btrfs_block_group_cache,
4367 					 io_list);
4368 		if (!cache) {
4369 			btrfs_err(fs_info, "orphan block group on io_bgs list");
4370 			return;
4371 		}
4372 
4373 		list_del_init(&cache->io_list);
4374 		spin_lock(&cache->lock);
4375 		cache->disk_cache_state = BTRFS_DC_ERROR;
4376 		spin_unlock(&cache->lock);
4377 		btrfs_cleanup_bg_io(cache);
4378 	}
4379 }
4380 
btrfs_cleanup_one_transaction(struct btrfs_transaction * cur_trans,struct btrfs_fs_info * fs_info)4381 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4382 				   struct btrfs_fs_info *fs_info)
4383 {
4384 	btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4385 	ASSERT(list_empty(&cur_trans->dirty_bgs));
4386 	ASSERT(list_empty(&cur_trans->io_bgs));
4387 
4388 	btrfs_destroy_delayed_refs(cur_trans, fs_info);
4389 
4390 	cur_trans->state = TRANS_STATE_COMMIT_START;
4391 	wake_up(&fs_info->transaction_blocked_wait);
4392 
4393 	cur_trans->state = TRANS_STATE_UNBLOCKED;
4394 	wake_up(&fs_info->transaction_wait);
4395 
4396 	btrfs_destroy_delayed_inodes(fs_info);
4397 
4398 	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4399 				     EXTENT_DIRTY);
4400 	btrfs_destroy_pinned_extent(fs_info,
4401 				    fs_info->pinned_extents);
4402 
4403 	cur_trans->state =TRANS_STATE_COMPLETED;
4404 	wake_up(&cur_trans->commit_wait);
4405 }
4406 
btrfs_cleanup_transaction(struct btrfs_fs_info * fs_info)4407 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4408 {
4409 	struct btrfs_transaction *t;
4410 
4411 	mutex_lock(&fs_info->transaction_kthread_mutex);
4412 
4413 	spin_lock(&fs_info->trans_lock);
4414 	while (!list_empty(&fs_info->trans_list)) {
4415 		t = list_first_entry(&fs_info->trans_list,
4416 				     struct btrfs_transaction, list);
4417 		if (t->state >= TRANS_STATE_COMMIT_START) {
4418 			refcount_inc(&t->use_count);
4419 			spin_unlock(&fs_info->trans_lock);
4420 			btrfs_wait_for_commit(fs_info, t->transid);
4421 			btrfs_put_transaction(t);
4422 			spin_lock(&fs_info->trans_lock);
4423 			continue;
4424 		}
4425 		if (t == fs_info->running_transaction) {
4426 			t->state = TRANS_STATE_COMMIT_DOING;
4427 			spin_unlock(&fs_info->trans_lock);
4428 			/*
4429 			 * We wait for 0 num_writers since we don't hold a trans
4430 			 * handle open currently for this transaction.
4431 			 */
4432 			wait_event(t->writer_wait,
4433 				   atomic_read(&t->num_writers) == 0);
4434 		} else {
4435 			spin_unlock(&fs_info->trans_lock);
4436 		}
4437 		btrfs_cleanup_one_transaction(t, fs_info);
4438 
4439 		spin_lock(&fs_info->trans_lock);
4440 		if (t == fs_info->running_transaction)
4441 			fs_info->running_transaction = NULL;
4442 		list_del_init(&t->list);
4443 		spin_unlock(&fs_info->trans_lock);
4444 
4445 		btrfs_put_transaction(t);
4446 		trace_btrfs_transaction_commit(fs_info->tree_root);
4447 		spin_lock(&fs_info->trans_lock);
4448 	}
4449 	spin_unlock(&fs_info->trans_lock);
4450 	btrfs_destroy_all_ordered_extents(fs_info);
4451 	btrfs_destroy_delayed_inodes(fs_info);
4452 	btrfs_assert_delayed_root_empty(fs_info);
4453 	btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
4454 	btrfs_destroy_all_delalloc_inodes(fs_info);
4455 	mutex_unlock(&fs_info->transaction_kthread_mutex);
4456 
4457 	return 0;
4458 }
4459 
btree_fs_info(void * private_data)4460 static struct btrfs_fs_info *btree_fs_info(void *private_data)
4461 {
4462 	struct inode *inode = private_data;
4463 	return btrfs_sb(inode->i_sb);
4464 }
4465 
4466 static const struct extent_io_ops btree_extent_io_ops = {
4467 	/* mandatory callbacks */
4468 	.submit_bio_hook = btree_submit_bio_hook,
4469 	.readpage_end_io_hook = btree_readpage_end_io_hook,
4470 	/* note we're sharing with inode.c for the merge bio hook */
4471 	.merge_bio_hook = btrfs_merge_bio_hook,
4472 	.readpage_io_failed_hook = btree_io_failed_hook,
4473 	.set_range_writeback = btrfs_set_range_writeback,
4474 	.tree_fs_info = btree_fs_info,
4475 
4476 	/* optional callbacks */
4477 };
4478