• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/sched/signal.h>
8 #include <linux/pagemap.h>
9 #include <linux/writeback.h>
10 #include <linux/blkdev.h>
11 #include <linux/sort.h>
12 #include <linux/rcupdate.h>
13 #include <linux/kthread.h>
14 #include <linux/slab.h>
15 #include <linux/ratelimit.h>
16 #include <linux/percpu_counter.h>
17 #include <linux/lockdep.h>
18 #include <linux/crc32c.h>
19 #include "misc.h"
20 #include "tree-log.h"
21 #include "disk-io.h"
22 #include "print-tree.h"
23 #include "volumes.h"
24 #include "raid56.h"
25 #include "locking.h"
26 #include "free-space-cache.h"
27 #include "free-space-tree.h"
28 #include "sysfs.h"
29 #include "qgroup.h"
30 #include "ref-verify.h"
31 #include "space-info.h"
32 #include "block-rsv.h"
33 #include "delalloc-space.h"
34 #include "block-group.h"
35 #include "discard.h"
36 #include "rcu-string.h"
37 #include "zoned.h"
38 #include "dev-replace.h"
39 
40 #undef SCRAMBLE_DELAYED_REFS
41 
42 
43 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
44 			       struct btrfs_delayed_ref_node *node, u64 parent,
45 			       u64 root_objectid, u64 owner_objectid,
46 			       u64 owner_offset, int refs_to_drop,
47 			       struct btrfs_delayed_extent_op *extra_op);
48 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
49 				    struct extent_buffer *leaf,
50 				    struct btrfs_extent_item *ei);
51 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
52 				      u64 parent, u64 root_objectid,
53 				      u64 flags, u64 owner, u64 offset,
54 				      struct btrfs_key *ins, int ref_mod);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56 				     struct btrfs_delayed_ref_node *node,
57 				     struct btrfs_delayed_extent_op *extent_op);
58 static int find_next_key(struct btrfs_path *path, int level,
59 			 struct btrfs_key *key);
60 
block_group_bits(struct btrfs_block_group * cache,u64 bits)61 static int block_group_bits(struct btrfs_block_group *cache, u64 bits)
62 {
63 	return (cache->flags & bits) == bits;
64 }
65 
btrfs_add_excluded_extent(struct btrfs_fs_info * fs_info,u64 start,u64 num_bytes)66 int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info,
67 			      u64 start, u64 num_bytes)
68 {
69 	u64 end = start + num_bytes - 1;
70 	set_extent_bits(&fs_info->excluded_extents, start, end,
71 			EXTENT_UPTODATE);
72 	return 0;
73 }
74 
btrfs_free_excluded_extents(struct btrfs_block_group * cache)75 void btrfs_free_excluded_extents(struct btrfs_block_group *cache)
76 {
77 	struct btrfs_fs_info *fs_info = cache->fs_info;
78 	u64 start, end;
79 
80 	start = cache->start;
81 	end = start + cache->length - 1;
82 
83 	clear_extent_bits(&fs_info->excluded_extents, start, end,
84 			  EXTENT_UPTODATE);
85 }
86 
87 /* simple helper to search for an existing data extent at a given offset */
btrfs_lookup_data_extent(struct btrfs_fs_info * fs_info,u64 start,u64 len)88 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
89 {
90 	int ret;
91 	struct btrfs_key key;
92 	struct btrfs_path *path;
93 
94 	path = btrfs_alloc_path();
95 	if (!path)
96 		return -ENOMEM;
97 
98 	key.objectid = start;
99 	key.offset = len;
100 	key.type = BTRFS_EXTENT_ITEM_KEY;
101 	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
102 	btrfs_free_path(path);
103 	return ret;
104 }
105 
106 /*
107  * helper function to lookup reference count and flags of a tree block.
108  *
109  * the head node for delayed ref is used to store the sum of all the
110  * reference count modifications queued up in the rbtree. the head
111  * node may also store the extent flags to set. This way you can check
112  * to see what the reference count and extent flags would be if all of
113  * the delayed refs are not processed.
114  */
btrfs_lookup_extent_info(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 bytenr,u64 offset,int metadata,u64 * refs,u64 * flags)115 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
116 			     struct btrfs_fs_info *fs_info, u64 bytenr,
117 			     u64 offset, int metadata, u64 *refs, u64 *flags)
118 {
119 	struct btrfs_delayed_ref_head *head;
120 	struct btrfs_delayed_ref_root *delayed_refs;
121 	struct btrfs_path *path;
122 	struct btrfs_extent_item *ei;
123 	struct extent_buffer *leaf;
124 	struct btrfs_key key;
125 	u32 item_size;
126 	u64 num_refs;
127 	u64 extent_flags;
128 	int ret;
129 
130 	/*
131 	 * If we don't have skinny metadata, don't bother doing anything
132 	 * different
133 	 */
134 	if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
135 		offset = fs_info->nodesize;
136 		metadata = 0;
137 	}
138 
139 	path = btrfs_alloc_path();
140 	if (!path)
141 		return -ENOMEM;
142 
143 	if (!trans) {
144 		path->skip_locking = 1;
145 		path->search_commit_root = 1;
146 	}
147 
148 search_again:
149 	key.objectid = bytenr;
150 	key.offset = offset;
151 	if (metadata)
152 		key.type = BTRFS_METADATA_ITEM_KEY;
153 	else
154 		key.type = BTRFS_EXTENT_ITEM_KEY;
155 
156 	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
157 	if (ret < 0)
158 		goto out_free;
159 
160 	if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
161 		if (path->slots[0]) {
162 			path->slots[0]--;
163 			btrfs_item_key_to_cpu(path->nodes[0], &key,
164 					      path->slots[0]);
165 			if (key.objectid == bytenr &&
166 			    key.type == BTRFS_EXTENT_ITEM_KEY &&
167 			    key.offset == fs_info->nodesize)
168 				ret = 0;
169 		}
170 	}
171 
172 	if (ret == 0) {
173 		leaf = path->nodes[0];
174 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
175 		if (item_size >= sizeof(*ei)) {
176 			ei = btrfs_item_ptr(leaf, path->slots[0],
177 					    struct btrfs_extent_item);
178 			num_refs = btrfs_extent_refs(leaf, ei);
179 			extent_flags = btrfs_extent_flags(leaf, ei);
180 		} else {
181 			ret = -EINVAL;
182 			btrfs_print_v0_err(fs_info);
183 			if (trans)
184 				btrfs_abort_transaction(trans, ret);
185 			else
186 				btrfs_handle_fs_error(fs_info, ret, NULL);
187 
188 			goto out_free;
189 		}
190 
191 		BUG_ON(num_refs == 0);
192 	} else {
193 		num_refs = 0;
194 		extent_flags = 0;
195 		ret = 0;
196 	}
197 
198 	if (!trans)
199 		goto out;
200 
201 	delayed_refs = &trans->transaction->delayed_refs;
202 	spin_lock(&delayed_refs->lock);
203 	head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
204 	if (head) {
205 		if (!mutex_trylock(&head->mutex)) {
206 			refcount_inc(&head->refs);
207 			spin_unlock(&delayed_refs->lock);
208 
209 			btrfs_release_path(path);
210 
211 			/*
212 			 * Mutex was contended, block until it's released and try
213 			 * again
214 			 */
215 			mutex_lock(&head->mutex);
216 			mutex_unlock(&head->mutex);
217 			btrfs_put_delayed_ref_head(head);
218 			goto search_again;
219 		}
220 		spin_lock(&head->lock);
221 		if (head->extent_op && head->extent_op->update_flags)
222 			extent_flags |= head->extent_op->flags_to_set;
223 		else
224 			BUG_ON(num_refs == 0);
225 
226 		num_refs += head->ref_mod;
227 		spin_unlock(&head->lock);
228 		mutex_unlock(&head->mutex);
229 	}
230 	spin_unlock(&delayed_refs->lock);
231 out:
232 	WARN_ON(num_refs == 0);
233 	if (refs)
234 		*refs = num_refs;
235 	if (flags)
236 		*flags = extent_flags;
237 out_free:
238 	btrfs_free_path(path);
239 	return ret;
240 }
241 
242 /*
243  * Back reference rules.  Back refs have three main goals:
244  *
245  * 1) differentiate between all holders of references to an extent so that
246  *    when a reference is dropped we can make sure it was a valid reference
247  *    before freeing the extent.
248  *
249  * 2) Provide enough information to quickly find the holders of an extent
250  *    if we notice a given block is corrupted or bad.
251  *
252  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
253  *    maintenance.  This is actually the same as #2, but with a slightly
254  *    different use case.
255  *
256  * There are two kinds of back refs. The implicit back refs is optimized
257  * for pointers in non-shared tree blocks. For a given pointer in a block,
258  * back refs of this kind provide information about the block's owner tree
259  * and the pointer's key. These information allow us to find the block by
260  * b-tree searching. The full back refs is for pointers in tree blocks not
261  * referenced by their owner trees. The location of tree block is recorded
262  * in the back refs. Actually the full back refs is generic, and can be
263  * used in all cases the implicit back refs is used. The major shortcoming
264  * of the full back refs is its overhead. Every time a tree block gets
265  * COWed, we have to update back refs entry for all pointers in it.
266  *
267  * For a newly allocated tree block, we use implicit back refs for
268  * pointers in it. This means most tree related operations only involve
269  * implicit back refs. For a tree block created in old transaction, the
270  * only way to drop a reference to it is COW it. So we can detect the
271  * event that tree block loses its owner tree's reference and do the
272  * back refs conversion.
273  *
274  * When a tree block is COWed through a tree, there are four cases:
275  *
276  * The reference count of the block is one and the tree is the block's
277  * owner tree. Nothing to do in this case.
278  *
279  * The reference count of the block is one and the tree is not the
280  * block's owner tree. In this case, full back refs is used for pointers
281  * in the block. Remove these full back refs, add implicit back refs for
282  * every pointers in the new block.
283  *
284  * The reference count of the block is greater than one and the tree is
285  * the block's owner tree. In this case, implicit back refs is used for
286  * pointers in the block. Add full back refs for every pointers in the
287  * block, increase lower level extents' reference counts. The original
288  * implicit back refs are entailed to the new block.
289  *
290  * The reference count of the block is greater than one and the tree is
291  * not the block's owner tree. Add implicit back refs for every pointer in
292  * the new block, increase lower level extents' reference count.
293  *
294  * Back Reference Key composing:
295  *
296  * The key objectid corresponds to the first byte in the extent,
297  * The key type is used to differentiate between types of back refs.
298  * There are different meanings of the key offset for different types
299  * of back refs.
300  *
301  * File extents can be referenced by:
302  *
303  * - multiple snapshots, subvolumes, or different generations in one subvol
304  * - different files inside a single subvolume
305  * - different offsets inside a file (bookend extents in file.c)
306  *
307  * The extent ref structure for the implicit back refs has fields for:
308  *
309  * - Objectid of the subvolume root
310  * - objectid of the file holding the reference
311  * - original offset in the file
312  * - how many bookend extents
313  *
314  * The key offset for the implicit back refs is hash of the first
315  * three fields.
316  *
317  * The extent ref structure for the full back refs has field for:
318  *
319  * - number of pointers in the tree leaf
320  *
321  * The key offset for the implicit back refs is the first byte of
322  * the tree leaf
323  *
324  * When a file extent is allocated, The implicit back refs is used.
325  * the fields are filled in:
326  *
327  *     (root_key.objectid, inode objectid, offset in file, 1)
328  *
329  * When a file extent is removed file truncation, we find the
330  * corresponding implicit back refs and check the following fields:
331  *
332  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
333  *
334  * Btree extents can be referenced by:
335  *
336  * - Different subvolumes
337  *
338  * Both the implicit back refs and the full back refs for tree blocks
339  * only consist of key. The key offset for the implicit back refs is
340  * objectid of block's owner tree. The key offset for the full back refs
341  * is the first byte of parent block.
342  *
343  * When implicit back refs is used, information about the lowest key and
344  * level of the tree block are required. These information are stored in
345  * tree block info structure.
346  */
347 
348 /*
349  * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
350  * is_data == BTRFS_REF_TYPE_DATA, data type is requiried,
351  * is_data == BTRFS_REF_TYPE_ANY, either type is OK.
352  */
btrfs_get_extent_inline_ref_type(const struct extent_buffer * eb,struct btrfs_extent_inline_ref * iref,enum btrfs_inline_ref_type is_data)353 int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
354 				     struct btrfs_extent_inline_ref *iref,
355 				     enum btrfs_inline_ref_type is_data)
356 {
357 	int type = btrfs_extent_inline_ref_type(eb, iref);
358 	u64 offset = btrfs_extent_inline_ref_offset(eb, iref);
359 
360 	if (type == BTRFS_TREE_BLOCK_REF_KEY ||
361 	    type == BTRFS_SHARED_BLOCK_REF_KEY ||
362 	    type == BTRFS_SHARED_DATA_REF_KEY ||
363 	    type == BTRFS_EXTENT_DATA_REF_KEY) {
364 		if (is_data == BTRFS_REF_TYPE_BLOCK) {
365 			if (type == BTRFS_TREE_BLOCK_REF_KEY)
366 				return type;
367 			if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
368 				ASSERT(eb->fs_info);
369 				/*
370 				 * Every shared one has parent tree block,
371 				 * which must be aligned to sector size.
372 				 */
373 				if (offset &&
374 				    IS_ALIGNED(offset, eb->fs_info->sectorsize))
375 					return type;
376 			}
377 		} else if (is_data == BTRFS_REF_TYPE_DATA) {
378 			if (type == BTRFS_EXTENT_DATA_REF_KEY)
379 				return type;
380 			if (type == BTRFS_SHARED_DATA_REF_KEY) {
381 				ASSERT(eb->fs_info);
382 				/*
383 				 * Every shared one has parent tree block,
384 				 * which must be aligned to sector size.
385 				 */
386 				if (offset &&
387 				    IS_ALIGNED(offset, eb->fs_info->sectorsize))
388 					return type;
389 			}
390 		} else {
391 			ASSERT(is_data == BTRFS_REF_TYPE_ANY);
392 			return type;
393 		}
394 	}
395 
396 	btrfs_print_leaf((struct extent_buffer *)eb);
397 	btrfs_err(eb->fs_info,
398 		  "eb %llu iref 0x%lx invalid extent inline ref type %d",
399 		  eb->start, (unsigned long)iref, type);
400 	WARN_ON(1);
401 
402 	return BTRFS_REF_TYPE_INVALID;
403 }
404 
hash_extent_data_ref(u64 root_objectid,u64 owner,u64 offset)405 u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
406 {
407 	u32 high_crc = ~(u32)0;
408 	u32 low_crc = ~(u32)0;
409 	__le64 lenum;
410 
411 	lenum = cpu_to_le64(root_objectid);
412 	high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
413 	lenum = cpu_to_le64(owner);
414 	low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
415 	lenum = cpu_to_le64(offset);
416 	low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
417 
418 	return ((u64)high_crc << 31) ^ (u64)low_crc;
419 }
420 
hash_extent_data_ref_item(struct extent_buffer * leaf,struct btrfs_extent_data_ref * ref)421 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
422 				     struct btrfs_extent_data_ref *ref)
423 {
424 	return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
425 				    btrfs_extent_data_ref_objectid(leaf, ref),
426 				    btrfs_extent_data_ref_offset(leaf, ref));
427 }
428 
match_extent_data_ref(struct extent_buffer * leaf,struct btrfs_extent_data_ref * ref,u64 root_objectid,u64 owner,u64 offset)429 static int match_extent_data_ref(struct extent_buffer *leaf,
430 				 struct btrfs_extent_data_ref *ref,
431 				 u64 root_objectid, u64 owner, u64 offset)
432 {
433 	if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
434 	    btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
435 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
436 		return 0;
437 	return 1;
438 }
439 
lookup_extent_data_ref(struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 bytenr,u64 parent,u64 root_objectid,u64 owner,u64 offset)440 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
441 					   struct btrfs_path *path,
442 					   u64 bytenr, u64 parent,
443 					   u64 root_objectid,
444 					   u64 owner, u64 offset)
445 {
446 	struct btrfs_root *root = trans->fs_info->extent_root;
447 	struct btrfs_key key;
448 	struct btrfs_extent_data_ref *ref;
449 	struct extent_buffer *leaf;
450 	u32 nritems;
451 	int ret;
452 	int recow;
453 	int err = -ENOENT;
454 
455 	key.objectid = bytenr;
456 	if (parent) {
457 		key.type = BTRFS_SHARED_DATA_REF_KEY;
458 		key.offset = parent;
459 	} else {
460 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
461 		key.offset = hash_extent_data_ref(root_objectid,
462 						  owner, offset);
463 	}
464 again:
465 	recow = 0;
466 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
467 	if (ret < 0) {
468 		err = ret;
469 		goto fail;
470 	}
471 
472 	if (parent) {
473 		if (!ret)
474 			return 0;
475 		goto fail;
476 	}
477 
478 	leaf = path->nodes[0];
479 	nritems = btrfs_header_nritems(leaf);
480 	while (1) {
481 		if (path->slots[0] >= nritems) {
482 			ret = btrfs_next_leaf(root, path);
483 			if (ret < 0)
484 				err = ret;
485 			if (ret)
486 				goto fail;
487 
488 			leaf = path->nodes[0];
489 			nritems = btrfs_header_nritems(leaf);
490 			recow = 1;
491 		}
492 
493 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
494 		if (key.objectid != bytenr ||
495 		    key.type != BTRFS_EXTENT_DATA_REF_KEY)
496 			goto fail;
497 
498 		ref = btrfs_item_ptr(leaf, path->slots[0],
499 				     struct btrfs_extent_data_ref);
500 
501 		if (match_extent_data_ref(leaf, ref, root_objectid,
502 					  owner, offset)) {
503 			if (recow) {
504 				btrfs_release_path(path);
505 				goto again;
506 			}
507 			err = 0;
508 			break;
509 		}
510 		path->slots[0]++;
511 	}
512 fail:
513 	return err;
514 }
515 
insert_extent_data_ref(struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 bytenr,u64 parent,u64 root_objectid,u64 owner,u64 offset,int refs_to_add)516 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
517 					   struct btrfs_path *path,
518 					   u64 bytenr, u64 parent,
519 					   u64 root_objectid, u64 owner,
520 					   u64 offset, int refs_to_add)
521 {
522 	struct btrfs_root *root = trans->fs_info->extent_root;
523 	struct btrfs_key key;
524 	struct extent_buffer *leaf;
525 	u32 size;
526 	u32 num_refs;
527 	int ret;
528 
529 	key.objectid = bytenr;
530 	if (parent) {
531 		key.type = BTRFS_SHARED_DATA_REF_KEY;
532 		key.offset = parent;
533 		size = sizeof(struct btrfs_shared_data_ref);
534 	} else {
535 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
536 		key.offset = hash_extent_data_ref(root_objectid,
537 						  owner, offset);
538 		size = sizeof(struct btrfs_extent_data_ref);
539 	}
540 
541 	ret = btrfs_insert_empty_item(trans, root, path, &key, size);
542 	if (ret && ret != -EEXIST)
543 		goto fail;
544 
545 	leaf = path->nodes[0];
546 	if (parent) {
547 		struct btrfs_shared_data_ref *ref;
548 		ref = btrfs_item_ptr(leaf, path->slots[0],
549 				     struct btrfs_shared_data_ref);
550 		if (ret == 0) {
551 			btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
552 		} else {
553 			num_refs = btrfs_shared_data_ref_count(leaf, ref);
554 			num_refs += refs_to_add;
555 			btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
556 		}
557 	} else {
558 		struct btrfs_extent_data_ref *ref;
559 		while (ret == -EEXIST) {
560 			ref = btrfs_item_ptr(leaf, path->slots[0],
561 					     struct btrfs_extent_data_ref);
562 			if (match_extent_data_ref(leaf, ref, root_objectid,
563 						  owner, offset))
564 				break;
565 			btrfs_release_path(path);
566 			key.offset++;
567 			ret = btrfs_insert_empty_item(trans, root, path, &key,
568 						      size);
569 			if (ret && ret != -EEXIST)
570 				goto fail;
571 
572 			leaf = path->nodes[0];
573 		}
574 		ref = btrfs_item_ptr(leaf, path->slots[0],
575 				     struct btrfs_extent_data_ref);
576 		if (ret == 0) {
577 			btrfs_set_extent_data_ref_root(leaf, ref,
578 						       root_objectid);
579 			btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
580 			btrfs_set_extent_data_ref_offset(leaf, ref, offset);
581 			btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
582 		} else {
583 			num_refs = btrfs_extent_data_ref_count(leaf, ref);
584 			num_refs += refs_to_add;
585 			btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
586 		}
587 	}
588 	btrfs_mark_buffer_dirty(leaf);
589 	ret = 0;
590 fail:
591 	btrfs_release_path(path);
592 	return ret;
593 }
594 
remove_extent_data_ref(struct btrfs_trans_handle * trans,struct btrfs_path * path,int refs_to_drop,int * last_ref)595 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
596 					   struct btrfs_path *path,
597 					   int refs_to_drop, int *last_ref)
598 {
599 	struct btrfs_key key;
600 	struct btrfs_extent_data_ref *ref1 = NULL;
601 	struct btrfs_shared_data_ref *ref2 = NULL;
602 	struct extent_buffer *leaf;
603 	u32 num_refs = 0;
604 	int ret = 0;
605 
606 	leaf = path->nodes[0];
607 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
608 
609 	if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
610 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
611 				      struct btrfs_extent_data_ref);
612 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
613 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
614 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
615 				      struct btrfs_shared_data_ref);
616 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
617 	} else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
618 		btrfs_print_v0_err(trans->fs_info);
619 		btrfs_abort_transaction(trans, -EINVAL);
620 		return -EINVAL;
621 	} else {
622 		BUG();
623 	}
624 
625 	BUG_ON(num_refs < refs_to_drop);
626 	num_refs -= refs_to_drop;
627 
628 	if (num_refs == 0) {
629 		ret = btrfs_del_item(trans, trans->fs_info->extent_root, path);
630 		*last_ref = 1;
631 	} else {
632 		if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
633 			btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
634 		else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
635 			btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
636 		btrfs_mark_buffer_dirty(leaf);
637 	}
638 	return ret;
639 }
640 
extent_data_ref_count(struct btrfs_path * path,struct btrfs_extent_inline_ref * iref)641 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
642 					  struct btrfs_extent_inline_ref *iref)
643 {
644 	struct btrfs_key key;
645 	struct extent_buffer *leaf;
646 	struct btrfs_extent_data_ref *ref1;
647 	struct btrfs_shared_data_ref *ref2;
648 	u32 num_refs = 0;
649 	int type;
650 
651 	leaf = path->nodes[0];
652 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
653 
654 	BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
655 	if (iref) {
656 		/*
657 		 * If type is invalid, we should have bailed out earlier than
658 		 * this call.
659 		 */
660 		type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
661 		ASSERT(type != BTRFS_REF_TYPE_INVALID);
662 		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
663 			ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
664 			num_refs = btrfs_extent_data_ref_count(leaf, ref1);
665 		} else {
666 			ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
667 			num_refs = btrfs_shared_data_ref_count(leaf, ref2);
668 		}
669 	} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
670 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
671 				      struct btrfs_extent_data_ref);
672 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
673 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
674 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
675 				      struct btrfs_shared_data_ref);
676 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
677 	} else {
678 		WARN_ON(1);
679 	}
680 	return num_refs;
681 }
682 
lookup_tree_block_ref(struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 bytenr,u64 parent,u64 root_objectid)683 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
684 					  struct btrfs_path *path,
685 					  u64 bytenr, u64 parent,
686 					  u64 root_objectid)
687 {
688 	struct btrfs_root *root = trans->fs_info->extent_root;
689 	struct btrfs_key key;
690 	int ret;
691 
692 	key.objectid = bytenr;
693 	if (parent) {
694 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
695 		key.offset = parent;
696 	} else {
697 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
698 		key.offset = root_objectid;
699 	}
700 
701 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
702 	if (ret > 0)
703 		ret = -ENOENT;
704 	return ret;
705 }
706 
insert_tree_block_ref(struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 bytenr,u64 parent,u64 root_objectid)707 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
708 					  struct btrfs_path *path,
709 					  u64 bytenr, u64 parent,
710 					  u64 root_objectid)
711 {
712 	struct btrfs_key key;
713 	int ret;
714 
715 	key.objectid = bytenr;
716 	if (parent) {
717 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
718 		key.offset = parent;
719 	} else {
720 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
721 		key.offset = root_objectid;
722 	}
723 
724 	ret = btrfs_insert_empty_item(trans, trans->fs_info->extent_root,
725 				      path, &key, 0);
726 	btrfs_release_path(path);
727 	return ret;
728 }
729 
extent_ref_type(u64 parent,u64 owner)730 static inline int extent_ref_type(u64 parent, u64 owner)
731 {
732 	int type;
733 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
734 		if (parent > 0)
735 			type = BTRFS_SHARED_BLOCK_REF_KEY;
736 		else
737 			type = BTRFS_TREE_BLOCK_REF_KEY;
738 	} else {
739 		if (parent > 0)
740 			type = BTRFS_SHARED_DATA_REF_KEY;
741 		else
742 			type = BTRFS_EXTENT_DATA_REF_KEY;
743 	}
744 	return type;
745 }
746 
find_next_key(struct btrfs_path * path,int level,struct btrfs_key * key)747 static int find_next_key(struct btrfs_path *path, int level,
748 			 struct btrfs_key *key)
749 
750 {
751 	for (; level < BTRFS_MAX_LEVEL; level++) {
752 		if (!path->nodes[level])
753 			break;
754 		if (path->slots[level] + 1 >=
755 		    btrfs_header_nritems(path->nodes[level]))
756 			continue;
757 		if (level == 0)
758 			btrfs_item_key_to_cpu(path->nodes[level], key,
759 					      path->slots[level] + 1);
760 		else
761 			btrfs_node_key_to_cpu(path->nodes[level], key,
762 					      path->slots[level] + 1);
763 		return 0;
764 	}
765 	return 1;
766 }
767 
768 /*
769  * look for inline back ref. if back ref is found, *ref_ret is set
770  * to the address of inline back ref, and 0 is returned.
771  *
772  * if back ref isn't found, *ref_ret is set to the address where it
773  * should be inserted, and -ENOENT is returned.
774  *
775  * if insert is true and there are too many inline back refs, the path
776  * points to the extent item, and -EAGAIN is returned.
777  *
778  * NOTE: inline back refs are ordered in the same way that back ref
779  *	 items in the tree are ordered.
780  */
781 static noinline_for_stack
lookup_inline_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_extent_inline_ref ** ref_ret,u64 bytenr,u64 num_bytes,u64 parent,u64 root_objectid,u64 owner,u64 offset,int insert)782 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
783 				 struct btrfs_path *path,
784 				 struct btrfs_extent_inline_ref **ref_ret,
785 				 u64 bytenr, u64 num_bytes,
786 				 u64 parent, u64 root_objectid,
787 				 u64 owner, u64 offset, int insert)
788 {
789 	struct btrfs_fs_info *fs_info = trans->fs_info;
790 	struct btrfs_root *root = fs_info->extent_root;
791 	struct btrfs_key key;
792 	struct extent_buffer *leaf;
793 	struct btrfs_extent_item *ei;
794 	struct btrfs_extent_inline_ref *iref;
795 	u64 flags;
796 	u64 item_size;
797 	unsigned long ptr;
798 	unsigned long end;
799 	int extra_size;
800 	int type;
801 	int want;
802 	int ret;
803 	int err = 0;
804 	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
805 	int needed;
806 
807 	key.objectid = bytenr;
808 	key.type = BTRFS_EXTENT_ITEM_KEY;
809 	key.offset = num_bytes;
810 
811 	want = extent_ref_type(parent, owner);
812 	if (insert) {
813 		extra_size = btrfs_extent_inline_ref_size(want);
814 		path->search_for_extension = 1;
815 		path->keep_locks = 1;
816 	} else
817 		extra_size = -1;
818 
819 	/*
820 	 * Owner is our level, so we can just add one to get the level for the
821 	 * block we are interested in.
822 	 */
823 	if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
824 		key.type = BTRFS_METADATA_ITEM_KEY;
825 		key.offset = owner;
826 	}
827 
828 again:
829 	ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
830 	if (ret < 0) {
831 		err = ret;
832 		goto out;
833 	}
834 
835 	/*
836 	 * We may be a newly converted file system which still has the old fat
837 	 * extent entries for metadata, so try and see if we have one of those.
838 	 */
839 	if (ret > 0 && skinny_metadata) {
840 		skinny_metadata = false;
841 		if (path->slots[0]) {
842 			path->slots[0]--;
843 			btrfs_item_key_to_cpu(path->nodes[0], &key,
844 					      path->slots[0]);
845 			if (key.objectid == bytenr &&
846 			    key.type == BTRFS_EXTENT_ITEM_KEY &&
847 			    key.offset == num_bytes)
848 				ret = 0;
849 		}
850 		if (ret) {
851 			key.objectid = bytenr;
852 			key.type = BTRFS_EXTENT_ITEM_KEY;
853 			key.offset = num_bytes;
854 			btrfs_release_path(path);
855 			goto again;
856 		}
857 	}
858 
859 	if (ret && !insert) {
860 		err = -ENOENT;
861 		goto out;
862 	} else if (WARN_ON(ret)) {
863 		btrfs_print_leaf(path->nodes[0]);
864 		btrfs_err(fs_info,
865 "extent item not found for insert, bytenr %llu num_bytes %llu parent %llu root_objectid %llu owner %llu offset %llu",
866 			  bytenr, num_bytes, parent, root_objectid, owner,
867 			  offset);
868 		err = -EIO;
869 		goto out;
870 	}
871 
872 	leaf = path->nodes[0];
873 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
874 	if (unlikely(item_size < sizeof(*ei))) {
875 		err = -EINVAL;
876 		btrfs_print_v0_err(fs_info);
877 		btrfs_abort_transaction(trans, err);
878 		goto out;
879 	}
880 
881 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
882 	flags = btrfs_extent_flags(leaf, ei);
883 
884 	ptr = (unsigned long)(ei + 1);
885 	end = (unsigned long)ei + item_size;
886 
887 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
888 		ptr += sizeof(struct btrfs_tree_block_info);
889 		BUG_ON(ptr > end);
890 	}
891 
892 	if (owner >= BTRFS_FIRST_FREE_OBJECTID)
893 		needed = BTRFS_REF_TYPE_DATA;
894 	else
895 		needed = BTRFS_REF_TYPE_BLOCK;
896 
897 	err = -ENOENT;
898 	while (1) {
899 		if (ptr >= end) {
900 			WARN_ON(ptr > end);
901 			break;
902 		}
903 		iref = (struct btrfs_extent_inline_ref *)ptr;
904 		type = btrfs_get_extent_inline_ref_type(leaf, iref, needed);
905 		if (type == BTRFS_REF_TYPE_INVALID) {
906 			err = -EUCLEAN;
907 			goto out;
908 		}
909 
910 		if (want < type)
911 			break;
912 		if (want > type) {
913 			ptr += btrfs_extent_inline_ref_size(type);
914 			continue;
915 		}
916 
917 		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
918 			struct btrfs_extent_data_ref *dref;
919 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
920 			if (match_extent_data_ref(leaf, dref, root_objectid,
921 						  owner, offset)) {
922 				err = 0;
923 				break;
924 			}
925 			if (hash_extent_data_ref_item(leaf, dref) <
926 			    hash_extent_data_ref(root_objectid, owner, offset))
927 				break;
928 		} else {
929 			u64 ref_offset;
930 			ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
931 			if (parent > 0) {
932 				if (parent == ref_offset) {
933 					err = 0;
934 					break;
935 				}
936 				if (ref_offset < parent)
937 					break;
938 			} else {
939 				if (root_objectid == ref_offset) {
940 					err = 0;
941 					break;
942 				}
943 				if (ref_offset < root_objectid)
944 					break;
945 			}
946 		}
947 		ptr += btrfs_extent_inline_ref_size(type);
948 	}
949 	if (err == -ENOENT && insert) {
950 		if (item_size + extra_size >=
951 		    BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
952 			err = -EAGAIN;
953 			goto out;
954 		}
955 		/*
956 		 * To add new inline back ref, we have to make sure
957 		 * there is no corresponding back ref item.
958 		 * For simplicity, we just do not add new inline back
959 		 * ref if there is any kind of item for this block
960 		 */
961 		if (find_next_key(path, 0, &key) == 0 &&
962 		    key.objectid == bytenr &&
963 		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
964 			err = -EAGAIN;
965 			goto out;
966 		}
967 	}
968 	*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
969 out:
970 	if (insert) {
971 		path->keep_locks = 0;
972 		path->search_for_extension = 0;
973 		btrfs_unlock_up_safe(path, 1);
974 	}
975 	return err;
976 }
977 
978 /*
979  * helper to add new inline back ref
980  */
981 static noinline_for_stack
setup_inline_extent_backref(struct btrfs_fs_info * fs_info,struct btrfs_path * path,struct btrfs_extent_inline_ref * iref,u64 parent,u64 root_objectid,u64 owner,u64 offset,int refs_to_add,struct btrfs_delayed_extent_op * extent_op)982 void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
983 				 struct btrfs_path *path,
984 				 struct btrfs_extent_inline_ref *iref,
985 				 u64 parent, u64 root_objectid,
986 				 u64 owner, u64 offset, int refs_to_add,
987 				 struct btrfs_delayed_extent_op *extent_op)
988 {
989 	struct extent_buffer *leaf;
990 	struct btrfs_extent_item *ei;
991 	unsigned long ptr;
992 	unsigned long end;
993 	unsigned long item_offset;
994 	u64 refs;
995 	int size;
996 	int type;
997 
998 	leaf = path->nodes[0];
999 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1000 	item_offset = (unsigned long)iref - (unsigned long)ei;
1001 
1002 	type = extent_ref_type(parent, owner);
1003 	size = btrfs_extent_inline_ref_size(type);
1004 
1005 	btrfs_extend_item(path, size);
1006 
1007 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1008 	refs = btrfs_extent_refs(leaf, ei);
1009 	refs += refs_to_add;
1010 	btrfs_set_extent_refs(leaf, ei, refs);
1011 	if (extent_op)
1012 		__run_delayed_extent_op(extent_op, leaf, ei);
1013 
1014 	ptr = (unsigned long)ei + item_offset;
1015 	end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1016 	if (ptr < end - size)
1017 		memmove_extent_buffer(leaf, ptr + size, ptr,
1018 				      end - size - ptr);
1019 
1020 	iref = (struct btrfs_extent_inline_ref *)ptr;
1021 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
1022 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1023 		struct btrfs_extent_data_ref *dref;
1024 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1025 		btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1026 		btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1027 		btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1028 		btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1029 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1030 		struct btrfs_shared_data_ref *sref;
1031 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1032 		btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1033 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1034 	} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1035 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1036 	} else {
1037 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1038 	}
1039 	btrfs_mark_buffer_dirty(leaf);
1040 }
1041 
lookup_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_extent_inline_ref ** ref_ret,u64 bytenr,u64 num_bytes,u64 parent,u64 root_objectid,u64 owner,u64 offset)1042 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1043 				 struct btrfs_path *path,
1044 				 struct btrfs_extent_inline_ref **ref_ret,
1045 				 u64 bytenr, u64 num_bytes, u64 parent,
1046 				 u64 root_objectid, u64 owner, u64 offset)
1047 {
1048 	int ret;
1049 
1050 	ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr,
1051 					   num_bytes, parent, root_objectid,
1052 					   owner, offset, 0);
1053 	if (ret != -ENOENT)
1054 		return ret;
1055 
1056 	btrfs_release_path(path);
1057 	*ref_ret = NULL;
1058 
1059 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1060 		ret = lookup_tree_block_ref(trans, path, bytenr, parent,
1061 					    root_objectid);
1062 	} else {
1063 		ret = lookup_extent_data_ref(trans, path, bytenr, parent,
1064 					     root_objectid, owner, offset);
1065 	}
1066 	return ret;
1067 }
1068 
1069 /*
1070  * helper to update/remove inline back ref
1071  */
1072 static noinline_for_stack
update_inline_extent_backref(struct btrfs_path * path,struct btrfs_extent_inline_ref * iref,int refs_to_mod,struct btrfs_delayed_extent_op * extent_op,int * last_ref)1073 void update_inline_extent_backref(struct btrfs_path *path,
1074 				  struct btrfs_extent_inline_ref *iref,
1075 				  int refs_to_mod,
1076 				  struct btrfs_delayed_extent_op *extent_op,
1077 				  int *last_ref)
1078 {
1079 	struct extent_buffer *leaf = path->nodes[0];
1080 	struct btrfs_extent_item *ei;
1081 	struct btrfs_extent_data_ref *dref = NULL;
1082 	struct btrfs_shared_data_ref *sref = NULL;
1083 	unsigned long ptr;
1084 	unsigned long end;
1085 	u32 item_size;
1086 	int size;
1087 	int type;
1088 	u64 refs;
1089 
1090 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1091 	refs = btrfs_extent_refs(leaf, ei);
1092 	WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1093 	refs += refs_to_mod;
1094 	btrfs_set_extent_refs(leaf, ei, refs);
1095 	if (extent_op)
1096 		__run_delayed_extent_op(extent_op, leaf, ei);
1097 
1098 	/*
1099 	 * If type is invalid, we should have bailed out after
1100 	 * lookup_inline_extent_backref().
1101 	 */
1102 	type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
1103 	ASSERT(type != BTRFS_REF_TYPE_INVALID);
1104 
1105 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1106 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1107 		refs = btrfs_extent_data_ref_count(leaf, dref);
1108 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1109 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1110 		refs = btrfs_shared_data_ref_count(leaf, sref);
1111 	} else {
1112 		refs = 1;
1113 		BUG_ON(refs_to_mod != -1);
1114 	}
1115 
1116 	BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1117 	refs += refs_to_mod;
1118 
1119 	if (refs > 0) {
1120 		if (type == BTRFS_EXTENT_DATA_REF_KEY)
1121 			btrfs_set_extent_data_ref_count(leaf, dref, refs);
1122 		else
1123 			btrfs_set_shared_data_ref_count(leaf, sref, refs);
1124 	} else {
1125 		*last_ref = 1;
1126 		size =  btrfs_extent_inline_ref_size(type);
1127 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1128 		ptr = (unsigned long)iref;
1129 		end = (unsigned long)ei + item_size;
1130 		if (ptr + size < end)
1131 			memmove_extent_buffer(leaf, ptr, ptr + size,
1132 					      end - ptr - size);
1133 		item_size -= size;
1134 		btrfs_truncate_item(path, item_size, 1);
1135 	}
1136 	btrfs_mark_buffer_dirty(leaf);
1137 }
1138 
1139 static noinline_for_stack
insert_inline_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 bytenr,u64 num_bytes,u64 parent,u64 root_objectid,u64 owner,u64 offset,int refs_to_add,struct btrfs_delayed_extent_op * extent_op)1140 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1141 				 struct btrfs_path *path,
1142 				 u64 bytenr, u64 num_bytes, u64 parent,
1143 				 u64 root_objectid, u64 owner,
1144 				 u64 offset, int refs_to_add,
1145 				 struct btrfs_delayed_extent_op *extent_op)
1146 {
1147 	struct btrfs_extent_inline_ref *iref;
1148 	int ret;
1149 
1150 	ret = lookup_inline_extent_backref(trans, path, &iref, bytenr,
1151 					   num_bytes, parent, root_objectid,
1152 					   owner, offset, 1);
1153 	if (ret == 0) {
1154 		/*
1155 		 * We're adding refs to a tree block we already own, this
1156 		 * should not happen at all.
1157 		 */
1158 		if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1159 			btrfs_crit(trans->fs_info,
1160 "adding refs to an existing tree ref, bytenr %llu num_bytes %llu root_objectid %llu",
1161 				   bytenr, num_bytes, root_objectid);
1162 			if (IS_ENABLED(CONFIG_BTRFS_DEBUG)) {
1163 				WARN_ON(1);
1164 				btrfs_crit(trans->fs_info,
1165 			"path->slots[0]=%d path->nodes[0]:", path->slots[0]);
1166 				btrfs_print_leaf(path->nodes[0]);
1167 			}
1168 			return -EUCLEAN;
1169 		}
1170 		update_inline_extent_backref(path, iref, refs_to_add,
1171 					     extent_op, NULL);
1172 	} else if (ret == -ENOENT) {
1173 		setup_inline_extent_backref(trans->fs_info, path, iref, parent,
1174 					    root_objectid, owner, offset,
1175 					    refs_to_add, extent_op);
1176 		ret = 0;
1177 	}
1178 	return ret;
1179 }
1180 
remove_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_extent_inline_ref * iref,int refs_to_drop,int is_data,int * last_ref)1181 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1182 				 struct btrfs_path *path,
1183 				 struct btrfs_extent_inline_ref *iref,
1184 				 int refs_to_drop, int is_data, int *last_ref)
1185 {
1186 	int ret = 0;
1187 
1188 	BUG_ON(!is_data && refs_to_drop != 1);
1189 	if (iref) {
1190 		update_inline_extent_backref(path, iref, -refs_to_drop, NULL,
1191 					     last_ref);
1192 	} else if (is_data) {
1193 		ret = remove_extent_data_ref(trans, path, refs_to_drop,
1194 					     last_ref);
1195 	} else {
1196 		*last_ref = 1;
1197 		ret = btrfs_del_item(trans, trans->fs_info->extent_root, path);
1198 	}
1199 	return ret;
1200 }
1201 
btrfs_issue_discard(struct block_device * bdev,u64 start,u64 len,u64 * discarded_bytes)1202 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1203 			       u64 *discarded_bytes)
1204 {
1205 	int j, ret = 0;
1206 	u64 bytes_left, end;
1207 	u64 aligned_start = ALIGN(start, 1 << 9);
1208 
1209 	/* Adjust the range to be aligned to 512B sectors if necessary. */
1210 	if (start != aligned_start) {
1211 		len -= aligned_start - start;
1212 		len = round_down(len, 1 << 9);
1213 		start = aligned_start;
1214 	}
1215 
1216 	*discarded_bytes = 0;
1217 
1218 	if (!len)
1219 		return 0;
1220 
1221 	end = start + len;
1222 	bytes_left = len;
1223 
1224 	/* Skip any superblocks on this device. */
1225 	for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1226 		u64 sb_start = btrfs_sb_offset(j);
1227 		u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1228 		u64 size = sb_start - start;
1229 
1230 		if (!in_range(sb_start, start, bytes_left) &&
1231 		    !in_range(sb_end, start, bytes_left) &&
1232 		    !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1233 			continue;
1234 
1235 		/*
1236 		 * Superblock spans beginning of range.  Adjust start and
1237 		 * try again.
1238 		 */
1239 		if (sb_start <= start) {
1240 			start += sb_end - start;
1241 			if (start > end) {
1242 				bytes_left = 0;
1243 				break;
1244 			}
1245 			bytes_left = end - start;
1246 			continue;
1247 		}
1248 
1249 		if (size) {
1250 			ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
1251 						   GFP_NOFS, 0);
1252 			if (!ret)
1253 				*discarded_bytes += size;
1254 			else if (ret != -EOPNOTSUPP)
1255 				return ret;
1256 		}
1257 
1258 		start = sb_end;
1259 		if (start > end) {
1260 			bytes_left = 0;
1261 			break;
1262 		}
1263 		bytes_left = end - start;
1264 	}
1265 
1266 	if (bytes_left) {
1267 		ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
1268 					   GFP_NOFS, 0);
1269 		if (!ret)
1270 			*discarded_bytes += bytes_left;
1271 	}
1272 	return ret;
1273 }
1274 
do_discard_extent(struct btrfs_io_stripe * stripe,u64 * bytes)1275 static int do_discard_extent(struct btrfs_io_stripe *stripe, u64 *bytes)
1276 {
1277 	struct btrfs_device *dev = stripe->dev;
1278 	struct btrfs_fs_info *fs_info = dev->fs_info;
1279 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1280 	u64 phys = stripe->physical;
1281 	u64 len = stripe->length;
1282 	u64 discarded = 0;
1283 	int ret = 0;
1284 
1285 	/* Zone reset on a zoned filesystem */
1286 	if (btrfs_can_zone_reset(dev, phys, len)) {
1287 		u64 src_disc;
1288 
1289 		ret = btrfs_reset_device_zone(dev, phys, len, &discarded);
1290 		if (ret)
1291 			goto out;
1292 
1293 		if (!btrfs_dev_replace_is_ongoing(dev_replace) ||
1294 		    dev != dev_replace->srcdev)
1295 			goto out;
1296 
1297 		src_disc = discarded;
1298 
1299 		/* Send to replace target as well */
1300 		ret = btrfs_reset_device_zone(dev_replace->tgtdev, phys, len,
1301 					      &discarded);
1302 		discarded += src_disc;
1303 	} else if (blk_queue_discard(bdev_get_queue(stripe->dev->bdev))) {
1304 		ret = btrfs_issue_discard(dev->bdev, phys, len, &discarded);
1305 	} else {
1306 		ret = 0;
1307 		*bytes = 0;
1308 	}
1309 
1310 out:
1311 	*bytes = discarded;
1312 	return ret;
1313 }
1314 
btrfs_discard_extent(struct btrfs_fs_info * fs_info,u64 bytenr,u64 num_bytes,u64 * actual_bytes)1315 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
1316 			 u64 num_bytes, u64 *actual_bytes)
1317 {
1318 	int ret = 0;
1319 	u64 discarded_bytes = 0;
1320 	u64 end = bytenr + num_bytes;
1321 	u64 cur = bytenr;
1322 	struct btrfs_io_context *bioc = NULL;
1323 
1324 	/*
1325 	 * Avoid races with device replace and make sure our bioc has devices
1326 	 * associated to its stripes that don't go away while we are discarding.
1327 	 */
1328 	btrfs_bio_counter_inc_blocked(fs_info);
1329 	while (cur < end) {
1330 		struct btrfs_io_stripe *stripe;
1331 		int i;
1332 
1333 		num_bytes = end - cur;
1334 		/* Tell the block device(s) that the sectors can be discarded */
1335 		ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, cur,
1336 				      &num_bytes, &bioc, 0);
1337 		/*
1338 		 * Error can be -ENOMEM, -ENOENT (no such chunk mapping) or
1339 		 * -EOPNOTSUPP. For any such error, @num_bytes is not updated,
1340 		 * thus we can't continue anyway.
1341 		 */
1342 		if (ret < 0)
1343 			goto out;
1344 
1345 		stripe = bioc->stripes;
1346 		for (i = 0; i < bioc->num_stripes; i++, stripe++) {
1347 			u64 bytes;
1348 			struct btrfs_device *device = stripe->dev;
1349 
1350 			if (!device->bdev) {
1351 				ASSERT(btrfs_test_opt(fs_info, DEGRADED));
1352 				continue;
1353 			}
1354 
1355 			if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
1356 				continue;
1357 
1358 			ret = do_discard_extent(stripe, &bytes);
1359 			if (!ret) {
1360 				discarded_bytes += bytes;
1361 			} else if (ret != -EOPNOTSUPP) {
1362 				/*
1363 				 * Logic errors or -ENOMEM, or -EIO, but
1364 				 * unlikely to happen.
1365 				 *
1366 				 * And since there are two loops, explicitly
1367 				 * go to out to avoid confusion.
1368 				 */
1369 				btrfs_put_bioc(bioc);
1370 				goto out;
1371 			}
1372 
1373 			/*
1374 			 * Just in case we get back EOPNOTSUPP for some reason,
1375 			 * just ignore the return value so we don't screw up
1376 			 * people calling discard_extent.
1377 			 */
1378 			ret = 0;
1379 		}
1380 		btrfs_put_bioc(bioc);
1381 		cur += num_bytes;
1382 	}
1383 out:
1384 	btrfs_bio_counter_dec(fs_info);
1385 
1386 	if (actual_bytes)
1387 		*actual_bytes = discarded_bytes;
1388 
1389 
1390 	if (ret == -EOPNOTSUPP)
1391 		ret = 0;
1392 	return ret;
1393 }
1394 
1395 /* Can return -ENOMEM */
btrfs_inc_extent_ref(struct btrfs_trans_handle * trans,struct btrfs_ref * generic_ref)1396 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1397 			 struct btrfs_ref *generic_ref)
1398 {
1399 	struct btrfs_fs_info *fs_info = trans->fs_info;
1400 	int ret;
1401 
1402 	ASSERT(generic_ref->type != BTRFS_REF_NOT_SET &&
1403 	       generic_ref->action);
1404 	BUG_ON(generic_ref->type == BTRFS_REF_METADATA &&
1405 	       generic_ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID);
1406 
1407 	if (generic_ref->type == BTRFS_REF_METADATA)
1408 		ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL);
1409 	else
1410 		ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0);
1411 
1412 	btrfs_ref_tree_mod(fs_info, generic_ref);
1413 
1414 	return ret;
1415 }
1416 
1417 /*
1418  * __btrfs_inc_extent_ref - insert backreference for a given extent
1419  *
1420  * The counterpart is in __btrfs_free_extent(), with examples and more details
1421  * how it works.
1422  *
1423  * @trans:	    Handle of transaction
1424  *
1425  * @node:	    The delayed ref node used to get the bytenr/length for
1426  *		    extent whose references are incremented.
1427  *
1428  * @parent:	    If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/
1429  *		    BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical
1430  *		    bytenr of the parent block. Since new extents are always
1431  *		    created with indirect references, this will only be the case
1432  *		    when relocating a shared extent. In that case, root_objectid
1433  *		    will be BTRFS_TREE_RELOC_OBJECTID. Otherwise, parent must
1434  *		    be 0
1435  *
1436  * @root_objectid:  The id of the root where this modification has originated,
1437  *		    this can be either one of the well-known metadata trees or
1438  *		    the subvolume id which references this extent.
1439  *
1440  * @owner:	    For data extents it is the inode number of the owning file.
1441  *		    For metadata extents this parameter holds the level in the
1442  *		    tree of the extent.
1443  *
1444  * @offset:	    For metadata extents the offset is ignored and is currently
1445  *		    always passed as 0. For data extents it is the fileoffset
1446  *		    this extent belongs to.
1447  *
1448  * @refs_to_add     Number of references to add
1449  *
1450  * @extent_op       Pointer to a structure, holding information necessary when
1451  *                  updating a tree block's flags
1452  *
1453  */
__btrfs_inc_extent_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * node,u64 parent,u64 root_objectid,u64 owner,u64 offset,int refs_to_add,struct btrfs_delayed_extent_op * extent_op)1454 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1455 				  struct btrfs_delayed_ref_node *node,
1456 				  u64 parent, u64 root_objectid,
1457 				  u64 owner, u64 offset, int refs_to_add,
1458 				  struct btrfs_delayed_extent_op *extent_op)
1459 {
1460 	struct btrfs_path *path;
1461 	struct extent_buffer *leaf;
1462 	struct btrfs_extent_item *item;
1463 	struct btrfs_key key;
1464 	u64 bytenr = node->bytenr;
1465 	u64 num_bytes = node->num_bytes;
1466 	u64 refs;
1467 	int ret;
1468 
1469 	path = btrfs_alloc_path();
1470 	if (!path)
1471 		return -ENOMEM;
1472 
1473 	/* this will setup the path even if it fails to insert the back ref */
1474 	ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes,
1475 					   parent, root_objectid, owner,
1476 					   offset, refs_to_add, extent_op);
1477 	if ((ret < 0 && ret != -EAGAIN) || !ret)
1478 		goto out;
1479 
1480 	/*
1481 	 * Ok we had -EAGAIN which means we didn't have space to insert and
1482 	 * inline extent ref, so just update the reference count and add a
1483 	 * normal backref.
1484 	 */
1485 	leaf = path->nodes[0];
1486 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1487 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1488 	refs = btrfs_extent_refs(leaf, item);
1489 	btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1490 	if (extent_op)
1491 		__run_delayed_extent_op(extent_op, leaf, item);
1492 
1493 	btrfs_mark_buffer_dirty(leaf);
1494 	btrfs_release_path(path);
1495 
1496 	/* now insert the actual backref */
1497 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1498 		BUG_ON(refs_to_add != 1);
1499 		ret = insert_tree_block_ref(trans, path, bytenr, parent,
1500 					    root_objectid);
1501 	} else {
1502 		ret = insert_extent_data_ref(trans, path, bytenr, parent,
1503 					     root_objectid, owner, offset,
1504 					     refs_to_add);
1505 	}
1506 	if (ret)
1507 		btrfs_abort_transaction(trans, ret);
1508 out:
1509 	btrfs_free_path(path);
1510 	return ret;
1511 }
1512 
run_delayed_data_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op,int insert_reserved)1513 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1514 				struct btrfs_delayed_ref_node *node,
1515 				struct btrfs_delayed_extent_op *extent_op,
1516 				int insert_reserved)
1517 {
1518 	int ret = 0;
1519 	struct btrfs_delayed_data_ref *ref;
1520 	struct btrfs_key ins;
1521 	u64 parent = 0;
1522 	u64 ref_root = 0;
1523 	u64 flags = 0;
1524 
1525 	ins.objectid = node->bytenr;
1526 	ins.offset = node->num_bytes;
1527 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1528 
1529 	ref = btrfs_delayed_node_to_data_ref(node);
1530 	trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action);
1531 
1532 	if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1533 		parent = ref->parent;
1534 	ref_root = ref->root;
1535 
1536 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1537 		if (extent_op)
1538 			flags |= extent_op->flags_to_set;
1539 		ret = alloc_reserved_file_extent(trans, parent, ref_root,
1540 						 flags, ref->objectid,
1541 						 ref->offset, &ins,
1542 						 node->ref_mod);
1543 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
1544 		ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
1545 					     ref->objectid, ref->offset,
1546 					     node->ref_mod, extent_op);
1547 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
1548 		ret = __btrfs_free_extent(trans, node, parent,
1549 					  ref_root, ref->objectid,
1550 					  ref->offset, node->ref_mod,
1551 					  extent_op);
1552 	} else {
1553 		BUG();
1554 	}
1555 	return ret;
1556 }
1557 
__run_delayed_extent_op(struct btrfs_delayed_extent_op * extent_op,struct extent_buffer * leaf,struct btrfs_extent_item * ei)1558 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1559 				    struct extent_buffer *leaf,
1560 				    struct btrfs_extent_item *ei)
1561 {
1562 	u64 flags = btrfs_extent_flags(leaf, ei);
1563 	if (extent_op->update_flags) {
1564 		flags |= extent_op->flags_to_set;
1565 		btrfs_set_extent_flags(leaf, ei, flags);
1566 	}
1567 
1568 	if (extent_op->update_key) {
1569 		struct btrfs_tree_block_info *bi;
1570 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1571 		bi = (struct btrfs_tree_block_info *)(ei + 1);
1572 		btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1573 	}
1574 }
1575 
run_delayed_extent_op(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head,struct btrfs_delayed_extent_op * extent_op)1576 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1577 				 struct btrfs_delayed_ref_head *head,
1578 				 struct btrfs_delayed_extent_op *extent_op)
1579 {
1580 	struct btrfs_fs_info *fs_info = trans->fs_info;
1581 	struct btrfs_key key;
1582 	struct btrfs_path *path;
1583 	struct btrfs_extent_item *ei;
1584 	struct extent_buffer *leaf;
1585 	u32 item_size;
1586 	int ret;
1587 	int err = 0;
1588 	int metadata = !extent_op->is_data;
1589 
1590 	if (TRANS_ABORTED(trans))
1591 		return 0;
1592 
1593 	if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1594 		metadata = 0;
1595 
1596 	path = btrfs_alloc_path();
1597 	if (!path)
1598 		return -ENOMEM;
1599 
1600 	key.objectid = head->bytenr;
1601 
1602 	if (metadata) {
1603 		key.type = BTRFS_METADATA_ITEM_KEY;
1604 		key.offset = extent_op->level;
1605 	} else {
1606 		key.type = BTRFS_EXTENT_ITEM_KEY;
1607 		key.offset = head->num_bytes;
1608 	}
1609 
1610 again:
1611 	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 1);
1612 	if (ret < 0) {
1613 		err = ret;
1614 		goto out;
1615 	}
1616 	if (ret > 0) {
1617 		if (metadata) {
1618 			if (path->slots[0] > 0) {
1619 				path->slots[0]--;
1620 				btrfs_item_key_to_cpu(path->nodes[0], &key,
1621 						      path->slots[0]);
1622 				if (key.objectid == head->bytenr &&
1623 				    key.type == BTRFS_EXTENT_ITEM_KEY &&
1624 				    key.offset == head->num_bytes)
1625 					ret = 0;
1626 			}
1627 			if (ret > 0) {
1628 				btrfs_release_path(path);
1629 				metadata = 0;
1630 
1631 				key.objectid = head->bytenr;
1632 				key.offset = head->num_bytes;
1633 				key.type = BTRFS_EXTENT_ITEM_KEY;
1634 				goto again;
1635 			}
1636 		} else {
1637 			err = -EIO;
1638 			goto out;
1639 		}
1640 	}
1641 
1642 	leaf = path->nodes[0];
1643 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1644 
1645 	if (unlikely(item_size < sizeof(*ei))) {
1646 		err = -EINVAL;
1647 		btrfs_print_v0_err(fs_info);
1648 		btrfs_abort_transaction(trans, err);
1649 		goto out;
1650 	}
1651 
1652 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1653 	__run_delayed_extent_op(extent_op, leaf, ei);
1654 
1655 	btrfs_mark_buffer_dirty(leaf);
1656 out:
1657 	btrfs_free_path(path);
1658 	return err;
1659 }
1660 
run_delayed_tree_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op,int insert_reserved)1661 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1662 				struct btrfs_delayed_ref_node *node,
1663 				struct btrfs_delayed_extent_op *extent_op,
1664 				int insert_reserved)
1665 {
1666 	int ret = 0;
1667 	struct btrfs_delayed_tree_ref *ref;
1668 	u64 parent = 0;
1669 	u64 ref_root = 0;
1670 
1671 	ref = btrfs_delayed_node_to_tree_ref(node);
1672 	trace_run_delayed_tree_ref(trans->fs_info, node, ref, node->action);
1673 
1674 	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1675 		parent = ref->parent;
1676 	ref_root = ref->root;
1677 
1678 	if (unlikely(node->ref_mod != 1)) {
1679 		btrfs_err(trans->fs_info,
1680 	"btree block %llu has %d references rather than 1: action %d ref_root %llu parent %llu",
1681 			  node->bytenr, node->ref_mod, node->action, ref_root,
1682 			  parent);
1683 		return -EUCLEAN;
1684 	}
1685 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1686 		BUG_ON(!extent_op || !extent_op->update_flags);
1687 		ret = alloc_reserved_tree_block(trans, node, extent_op);
1688 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
1689 		ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
1690 					     ref->level, 0, 1, extent_op);
1691 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
1692 		ret = __btrfs_free_extent(trans, node, parent, ref_root,
1693 					  ref->level, 0, 1, extent_op);
1694 	} else {
1695 		BUG();
1696 	}
1697 	return ret;
1698 }
1699 
1700 /* helper function to actually process a single delayed ref entry */
run_one_delayed_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op,int insert_reserved)1701 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1702 			       struct btrfs_delayed_ref_node *node,
1703 			       struct btrfs_delayed_extent_op *extent_op,
1704 			       int insert_reserved)
1705 {
1706 	int ret = 0;
1707 
1708 	if (TRANS_ABORTED(trans)) {
1709 		if (insert_reserved)
1710 			btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
1711 		return 0;
1712 	}
1713 
1714 	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1715 	    node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1716 		ret = run_delayed_tree_ref(trans, node, extent_op,
1717 					   insert_reserved);
1718 	else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1719 		 node->type == BTRFS_SHARED_DATA_REF_KEY)
1720 		ret = run_delayed_data_ref(trans, node, extent_op,
1721 					   insert_reserved);
1722 	else
1723 		BUG();
1724 	if (ret && insert_reserved)
1725 		btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
1726 	if (ret < 0)
1727 		btrfs_err(trans->fs_info,
1728 "failed to run delayed ref for logical %llu num_bytes %llu type %u action %u ref_mod %d: %d",
1729 			  node->bytenr, node->num_bytes, node->type,
1730 			  node->action, node->ref_mod, ret);
1731 	return ret;
1732 }
1733 
1734 static inline struct btrfs_delayed_ref_node *
select_delayed_ref(struct btrfs_delayed_ref_head * head)1735 select_delayed_ref(struct btrfs_delayed_ref_head *head)
1736 {
1737 	struct btrfs_delayed_ref_node *ref;
1738 
1739 	if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
1740 		return NULL;
1741 
1742 	/*
1743 	 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
1744 	 * This is to prevent a ref count from going down to zero, which deletes
1745 	 * the extent item from the extent tree, when there still are references
1746 	 * to add, which would fail because they would not find the extent item.
1747 	 */
1748 	if (!list_empty(&head->ref_add_list))
1749 		return list_first_entry(&head->ref_add_list,
1750 				struct btrfs_delayed_ref_node, add_list);
1751 
1752 	ref = rb_entry(rb_first_cached(&head->ref_tree),
1753 		       struct btrfs_delayed_ref_node, ref_node);
1754 	ASSERT(list_empty(&ref->add_list));
1755 	return ref;
1756 }
1757 
unselect_delayed_ref_head(struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)1758 static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
1759 				      struct btrfs_delayed_ref_head *head)
1760 {
1761 	spin_lock(&delayed_refs->lock);
1762 	head->processing = 0;
1763 	delayed_refs->num_heads_ready++;
1764 	spin_unlock(&delayed_refs->lock);
1765 	btrfs_delayed_ref_unlock(head);
1766 }
1767 
cleanup_extent_op(struct btrfs_delayed_ref_head * head)1768 static struct btrfs_delayed_extent_op *cleanup_extent_op(
1769 				struct btrfs_delayed_ref_head *head)
1770 {
1771 	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
1772 
1773 	if (!extent_op)
1774 		return NULL;
1775 
1776 	if (head->must_insert_reserved) {
1777 		head->extent_op = NULL;
1778 		btrfs_free_delayed_extent_op(extent_op);
1779 		return NULL;
1780 	}
1781 	return extent_op;
1782 }
1783 
run_and_cleanup_extent_op(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head)1784 static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans,
1785 				     struct btrfs_delayed_ref_head *head)
1786 {
1787 	struct btrfs_delayed_extent_op *extent_op;
1788 	int ret;
1789 
1790 	extent_op = cleanup_extent_op(head);
1791 	if (!extent_op)
1792 		return 0;
1793 	head->extent_op = NULL;
1794 	spin_unlock(&head->lock);
1795 	ret = run_delayed_extent_op(trans, head, extent_op);
1796 	btrfs_free_delayed_extent_op(extent_op);
1797 	return ret ? ret : 1;
1798 }
1799 
btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)1800 void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
1801 				  struct btrfs_delayed_ref_root *delayed_refs,
1802 				  struct btrfs_delayed_ref_head *head)
1803 {
1804 	int nr_items = 1;	/* Dropping this ref head update. */
1805 
1806 	/*
1807 	 * We had csum deletions accounted for in our delayed refs rsv, we need
1808 	 * to drop the csum leaves for this update from our delayed_refs_rsv.
1809 	 */
1810 	if (head->total_ref_mod < 0 && head->is_data) {
1811 		spin_lock(&delayed_refs->lock);
1812 		delayed_refs->pending_csums -= head->num_bytes;
1813 		spin_unlock(&delayed_refs->lock);
1814 		nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes);
1815 	}
1816 
1817 	btrfs_delayed_refs_rsv_release(fs_info, nr_items);
1818 }
1819 
cleanup_ref_head(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head)1820 static int cleanup_ref_head(struct btrfs_trans_handle *trans,
1821 			    struct btrfs_delayed_ref_head *head)
1822 {
1823 
1824 	struct btrfs_fs_info *fs_info = trans->fs_info;
1825 	struct btrfs_delayed_ref_root *delayed_refs;
1826 	int ret;
1827 
1828 	delayed_refs = &trans->transaction->delayed_refs;
1829 
1830 	ret = run_and_cleanup_extent_op(trans, head);
1831 	if (ret < 0) {
1832 		unselect_delayed_ref_head(delayed_refs, head);
1833 		btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
1834 		return ret;
1835 	} else if (ret) {
1836 		return ret;
1837 	}
1838 
1839 	/*
1840 	 * Need to drop our head ref lock and re-acquire the delayed ref lock
1841 	 * and then re-check to make sure nobody got added.
1842 	 */
1843 	spin_unlock(&head->lock);
1844 	spin_lock(&delayed_refs->lock);
1845 	spin_lock(&head->lock);
1846 	if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) {
1847 		spin_unlock(&head->lock);
1848 		spin_unlock(&delayed_refs->lock);
1849 		return 1;
1850 	}
1851 	btrfs_delete_ref_head(delayed_refs, head);
1852 	spin_unlock(&head->lock);
1853 	spin_unlock(&delayed_refs->lock);
1854 
1855 	if (head->must_insert_reserved) {
1856 		btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1);
1857 		if (head->is_data) {
1858 			ret = btrfs_del_csums(trans, fs_info->csum_root,
1859 					      head->bytenr, head->num_bytes);
1860 		}
1861 	}
1862 
1863 	btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
1864 
1865 	trace_run_delayed_ref_head(fs_info, head, 0);
1866 	btrfs_delayed_ref_unlock(head);
1867 	btrfs_put_delayed_ref_head(head);
1868 	return ret;
1869 }
1870 
btrfs_obtain_ref_head(struct btrfs_trans_handle * trans)1871 static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
1872 					struct btrfs_trans_handle *trans)
1873 {
1874 	struct btrfs_delayed_ref_root *delayed_refs =
1875 		&trans->transaction->delayed_refs;
1876 	struct btrfs_delayed_ref_head *head = NULL;
1877 	int ret;
1878 
1879 	spin_lock(&delayed_refs->lock);
1880 	head = btrfs_select_ref_head(delayed_refs);
1881 	if (!head) {
1882 		spin_unlock(&delayed_refs->lock);
1883 		return head;
1884 	}
1885 
1886 	/*
1887 	 * Grab the lock that says we are going to process all the refs for
1888 	 * this head
1889 	 */
1890 	ret = btrfs_delayed_ref_lock(delayed_refs, head);
1891 	spin_unlock(&delayed_refs->lock);
1892 
1893 	/*
1894 	 * We may have dropped the spin lock to get the head mutex lock, and
1895 	 * that might have given someone else time to free the head.  If that's
1896 	 * true, it has been removed from our list and we can move on.
1897 	 */
1898 	if (ret == -EAGAIN)
1899 		head = ERR_PTR(-EAGAIN);
1900 
1901 	return head;
1902 }
1903 
btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * locked_ref,unsigned long * run_refs)1904 static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
1905 				    struct btrfs_delayed_ref_head *locked_ref,
1906 				    unsigned long *run_refs)
1907 {
1908 	struct btrfs_fs_info *fs_info = trans->fs_info;
1909 	struct btrfs_delayed_ref_root *delayed_refs;
1910 	struct btrfs_delayed_extent_op *extent_op;
1911 	struct btrfs_delayed_ref_node *ref;
1912 	int must_insert_reserved = 0;
1913 	int ret;
1914 
1915 	delayed_refs = &trans->transaction->delayed_refs;
1916 
1917 	lockdep_assert_held(&locked_ref->mutex);
1918 	lockdep_assert_held(&locked_ref->lock);
1919 
1920 	while ((ref = select_delayed_ref(locked_ref))) {
1921 		if (ref->seq &&
1922 		    btrfs_check_delayed_seq(fs_info, ref->seq)) {
1923 			spin_unlock(&locked_ref->lock);
1924 			unselect_delayed_ref_head(delayed_refs, locked_ref);
1925 			return -EAGAIN;
1926 		}
1927 
1928 		(*run_refs)++;
1929 		ref->in_tree = 0;
1930 		rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
1931 		RB_CLEAR_NODE(&ref->ref_node);
1932 		if (!list_empty(&ref->add_list))
1933 			list_del(&ref->add_list);
1934 		/*
1935 		 * When we play the delayed ref, also correct the ref_mod on
1936 		 * head
1937 		 */
1938 		switch (ref->action) {
1939 		case BTRFS_ADD_DELAYED_REF:
1940 		case BTRFS_ADD_DELAYED_EXTENT:
1941 			locked_ref->ref_mod -= ref->ref_mod;
1942 			break;
1943 		case BTRFS_DROP_DELAYED_REF:
1944 			locked_ref->ref_mod += ref->ref_mod;
1945 			break;
1946 		default:
1947 			WARN_ON(1);
1948 		}
1949 		atomic_dec(&delayed_refs->num_entries);
1950 
1951 		/*
1952 		 * Record the must_insert_reserved flag before we drop the
1953 		 * spin lock.
1954 		 */
1955 		must_insert_reserved = locked_ref->must_insert_reserved;
1956 		locked_ref->must_insert_reserved = 0;
1957 
1958 		extent_op = locked_ref->extent_op;
1959 		locked_ref->extent_op = NULL;
1960 		spin_unlock(&locked_ref->lock);
1961 
1962 		ret = run_one_delayed_ref(trans, ref, extent_op,
1963 					  must_insert_reserved);
1964 
1965 		btrfs_free_delayed_extent_op(extent_op);
1966 		if (ret) {
1967 			unselect_delayed_ref_head(delayed_refs, locked_ref);
1968 			btrfs_put_delayed_ref(ref);
1969 			return ret;
1970 		}
1971 
1972 		btrfs_put_delayed_ref(ref);
1973 		cond_resched();
1974 
1975 		spin_lock(&locked_ref->lock);
1976 		btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
1977 	}
1978 
1979 	return 0;
1980 }
1981 
1982 /*
1983  * Returns 0 on success or if called with an already aborted transaction.
1984  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
1985  */
__btrfs_run_delayed_refs(struct btrfs_trans_handle * trans,unsigned long nr)1986 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
1987 					     unsigned long nr)
1988 {
1989 	struct btrfs_fs_info *fs_info = trans->fs_info;
1990 	struct btrfs_delayed_ref_root *delayed_refs;
1991 	struct btrfs_delayed_ref_head *locked_ref = NULL;
1992 	ktime_t start = ktime_get();
1993 	int ret;
1994 	unsigned long count = 0;
1995 	unsigned long actual_count = 0;
1996 
1997 	delayed_refs = &trans->transaction->delayed_refs;
1998 	do {
1999 		if (!locked_ref) {
2000 			locked_ref = btrfs_obtain_ref_head(trans);
2001 			if (IS_ERR_OR_NULL(locked_ref)) {
2002 				if (PTR_ERR(locked_ref) == -EAGAIN) {
2003 					continue;
2004 				} else {
2005 					break;
2006 				}
2007 			}
2008 			count++;
2009 		}
2010 		/*
2011 		 * We need to try and merge add/drops of the same ref since we
2012 		 * can run into issues with relocate dropping the implicit ref
2013 		 * and then it being added back again before the drop can
2014 		 * finish.  If we merged anything we need to re-loop so we can
2015 		 * get a good ref.
2016 		 * Or we can get node references of the same type that weren't
2017 		 * merged when created due to bumps in the tree mod seq, and
2018 		 * we need to merge them to prevent adding an inline extent
2019 		 * backref before dropping it (triggering a BUG_ON at
2020 		 * insert_inline_extent_backref()).
2021 		 */
2022 		spin_lock(&locked_ref->lock);
2023 		btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
2024 
2025 		ret = btrfs_run_delayed_refs_for_head(trans, locked_ref,
2026 						      &actual_count);
2027 		if (ret < 0 && ret != -EAGAIN) {
2028 			/*
2029 			 * Error, btrfs_run_delayed_refs_for_head already
2030 			 * unlocked everything so just bail out
2031 			 */
2032 			return ret;
2033 		} else if (!ret) {
2034 			/*
2035 			 * Success, perform the usual cleanup of a processed
2036 			 * head
2037 			 */
2038 			ret = cleanup_ref_head(trans, locked_ref);
2039 			if (ret > 0 ) {
2040 				/* We dropped our lock, we need to loop. */
2041 				ret = 0;
2042 				continue;
2043 			} else if (ret) {
2044 				return ret;
2045 			}
2046 		}
2047 
2048 		/*
2049 		 * Either success case or btrfs_run_delayed_refs_for_head
2050 		 * returned -EAGAIN, meaning we need to select another head
2051 		 */
2052 
2053 		locked_ref = NULL;
2054 		cond_resched();
2055 	} while ((nr != -1 && count < nr) || locked_ref);
2056 
2057 	/*
2058 	 * We don't want to include ref heads since we can have empty ref heads
2059 	 * and those will drastically skew our runtime down since we just do
2060 	 * accounting, no actual extent tree updates.
2061 	 */
2062 	if (actual_count > 0) {
2063 		u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2064 		u64 avg;
2065 
2066 		/*
2067 		 * We weigh the current average higher than our current runtime
2068 		 * to avoid large swings in the average.
2069 		 */
2070 		spin_lock(&delayed_refs->lock);
2071 		avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2072 		fs_info->avg_delayed_ref_runtime = avg >> 2;	/* div by 4 */
2073 		spin_unlock(&delayed_refs->lock);
2074 	}
2075 	return 0;
2076 }
2077 
2078 #ifdef SCRAMBLE_DELAYED_REFS
2079 /*
2080  * Normally delayed refs get processed in ascending bytenr order. This
2081  * correlates in most cases to the order added. To expose dependencies on this
2082  * order, we start to process the tree in the middle instead of the beginning
2083  */
find_middle(struct rb_root * root)2084 static u64 find_middle(struct rb_root *root)
2085 {
2086 	struct rb_node *n = root->rb_node;
2087 	struct btrfs_delayed_ref_node *entry;
2088 	int alt = 1;
2089 	u64 middle;
2090 	u64 first = 0, last = 0;
2091 
2092 	n = rb_first(root);
2093 	if (n) {
2094 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2095 		first = entry->bytenr;
2096 	}
2097 	n = rb_last(root);
2098 	if (n) {
2099 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2100 		last = entry->bytenr;
2101 	}
2102 	n = root->rb_node;
2103 
2104 	while (n) {
2105 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2106 		WARN_ON(!entry->in_tree);
2107 
2108 		middle = entry->bytenr;
2109 
2110 		if (alt)
2111 			n = n->rb_left;
2112 		else
2113 			n = n->rb_right;
2114 
2115 		alt = 1 - alt;
2116 	}
2117 	return middle;
2118 }
2119 #endif
2120 
2121 /*
2122  * this starts processing the delayed reference count updates and
2123  * extent insertions we have queued up so far.  count can be
2124  * 0, which means to process everything in the tree at the start
2125  * of the run (but not newly added entries), or it can be some target
2126  * number you'd like to process.
2127  *
2128  * Returns 0 on success or if called with an aborted transaction
2129  * Returns <0 on error and aborts the transaction
2130  */
btrfs_run_delayed_refs(struct btrfs_trans_handle * trans,unsigned long count)2131 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2132 			   unsigned long count)
2133 {
2134 	struct btrfs_fs_info *fs_info = trans->fs_info;
2135 	struct rb_node *node;
2136 	struct btrfs_delayed_ref_root *delayed_refs;
2137 	struct btrfs_delayed_ref_head *head;
2138 	int ret;
2139 	int run_all = count == (unsigned long)-1;
2140 
2141 	/* We'll clean this up in btrfs_cleanup_transaction */
2142 	if (TRANS_ABORTED(trans))
2143 		return 0;
2144 
2145 	if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
2146 		return 0;
2147 
2148 	delayed_refs = &trans->transaction->delayed_refs;
2149 	if (count == 0)
2150 		count = delayed_refs->num_heads_ready;
2151 
2152 again:
2153 #ifdef SCRAMBLE_DELAYED_REFS
2154 	delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2155 #endif
2156 	ret = __btrfs_run_delayed_refs(trans, count);
2157 	if (ret < 0) {
2158 		btrfs_abort_transaction(trans, ret);
2159 		return ret;
2160 	}
2161 
2162 	if (run_all) {
2163 		btrfs_create_pending_block_groups(trans);
2164 
2165 		spin_lock(&delayed_refs->lock);
2166 		node = rb_first_cached(&delayed_refs->href_root);
2167 		if (!node) {
2168 			spin_unlock(&delayed_refs->lock);
2169 			goto out;
2170 		}
2171 		head = rb_entry(node, struct btrfs_delayed_ref_head,
2172 				href_node);
2173 		refcount_inc(&head->refs);
2174 		spin_unlock(&delayed_refs->lock);
2175 
2176 		/* Mutex was contended, block until it's released and retry. */
2177 		mutex_lock(&head->mutex);
2178 		mutex_unlock(&head->mutex);
2179 
2180 		btrfs_put_delayed_ref_head(head);
2181 		cond_resched();
2182 		goto again;
2183 	}
2184 out:
2185 	return 0;
2186 }
2187 
btrfs_set_disk_extent_flags(struct btrfs_trans_handle * trans,struct extent_buffer * eb,u64 flags,int level,int is_data)2188 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2189 				struct extent_buffer *eb, u64 flags,
2190 				int level, int is_data)
2191 {
2192 	struct btrfs_delayed_extent_op *extent_op;
2193 	int ret;
2194 
2195 	extent_op = btrfs_alloc_delayed_extent_op();
2196 	if (!extent_op)
2197 		return -ENOMEM;
2198 
2199 	extent_op->flags_to_set = flags;
2200 	extent_op->update_flags = true;
2201 	extent_op->update_key = false;
2202 	extent_op->is_data = is_data ? true : false;
2203 	extent_op->level = level;
2204 
2205 	ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len, extent_op);
2206 	if (ret)
2207 		btrfs_free_delayed_extent_op(extent_op);
2208 	return ret;
2209 }
2210 
check_delayed_ref(struct btrfs_root * root,struct btrfs_path * path,u64 objectid,u64 offset,u64 bytenr)2211 static noinline int check_delayed_ref(struct btrfs_root *root,
2212 				      struct btrfs_path *path,
2213 				      u64 objectid, u64 offset, u64 bytenr)
2214 {
2215 	struct btrfs_delayed_ref_head *head;
2216 	struct btrfs_delayed_ref_node *ref;
2217 	struct btrfs_delayed_data_ref *data_ref;
2218 	struct btrfs_delayed_ref_root *delayed_refs;
2219 	struct btrfs_transaction *cur_trans;
2220 	struct rb_node *node;
2221 	int ret = 0;
2222 
2223 	spin_lock(&root->fs_info->trans_lock);
2224 	cur_trans = root->fs_info->running_transaction;
2225 	if (cur_trans)
2226 		refcount_inc(&cur_trans->use_count);
2227 	spin_unlock(&root->fs_info->trans_lock);
2228 	if (!cur_trans)
2229 		return 0;
2230 
2231 	delayed_refs = &cur_trans->delayed_refs;
2232 	spin_lock(&delayed_refs->lock);
2233 	head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
2234 	if (!head) {
2235 		spin_unlock(&delayed_refs->lock);
2236 		btrfs_put_transaction(cur_trans);
2237 		return 0;
2238 	}
2239 
2240 	if (!mutex_trylock(&head->mutex)) {
2241 		refcount_inc(&head->refs);
2242 		spin_unlock(&delayed_refs->lock);
2243 
2244 		btrfs_release_path(path);
2245 
2246 		/*
2247 		 * Mutex was contended, block until it's released and let
2248 		 * caller try again
2249 		 */
2250 		mutex_lock(&head->mutex);
2251 		mutex_unlock(&head->mutex);
2252 		btrfs_put_delayed_ref_head(head);
2253 		btrfs_put_transaction(cur_trans);
2254 		return -EAGAIN;
2255 	}
2256 	spin_unlock(&delayed_refs->lock);
2257 
2258 	spin_lock(&head->lock);
2259 	/*
2260 	 * XXX: We should replace this with a proper search function in the
2261 	 * future.
2262 	 */
2263 	for (node = rb_first_cached(&head->ref_tree); node;
2264 	     node = rb_next(node)) {
2265 		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
2266 		/* If it's a shared ref we know a cross reference exists */
2267 		if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2268 			ret = 1;
2269 			break;
2270 		}
2271 
2272 		data_ref = btrfs_delayed_node_to_data_ref(ref);
2273 
2274 		/*
2275 		 * If our ref doesn't match the one we're currently looking at
2276 		 * then we have a cross reference.
2277 		 */
2278 		if (data_ref->root != root->root_key.objectid ||
2279 		    data_ref->objectid != objectid ||
2280 		    data_ref->offset != offset) {
2281 			ret = 1;
2282 			break;
2283 		}
2284 	}
2285 	spin_unlock(&head->lock);
2286 	mutex_unlock(&head->mutex);
2287 	btrfs_put_transaction(cur_trans);
2288 	return ret;
2289 }
2290 
check_committed_ref(struct btrfs_root * root,struct btrfs_path * path,u64 objectid,u64 offset,u64 bytenr,bool strict)2291 static noinline int check_committed_ref(struct btrfs_root *root,
2292 					struct btrfs_path *path,
2293 					u64 objectid, u64 offset, u64 bytenr,
2294 					bool strict)
2295 {
2296 	struct btrfs_fs_info *fs_info = root->fs_info;
2297 	struct btrfs_root *extent_root = fs_info->extent_root;
2298 	struct extent_buffer *leaf;
2299 	struct btrfs_extent_data_ref *ref;
2300 	struct btrfs_extent_inline_ref *iref;
2301 	struct btrfs_extent_item *ei;
2302 	struct btrfs_key key;
2303 	u32 item_size;
2304 	int type;
2305 	int ret;
2306 
2307 	key.objectid = bytenr;
2308 	key.offset = (u64)-1;
2309 	key.type = BTRFS_EXTENT_ITEM_KEY;
2310 
2311 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2312 	if (ret < 0)
2313 		goto out;
2314 	BUG_ON(ret == 0); /* Corruption */
2315 
2316 	ret = -ENOENT;
2317 	if (path->slots[0] == 0)
2318 		goto out;
2319 
2320 	path->slots[0]--;
2321 	leaf = path->nodes[0];
2322 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2323 
2324 	if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2325 		goto out;
2326 
2327 	ret = 1;
2328 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2329 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2330 
2331 	/* If extent item has more than 1 inline ref then it's shared */
2332 	if (item_size != sizeof(*ei) +
2333 	    btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2334 		goto out;
2335 
2336 	/*
2337 	 * If extent created before last snapshot => it's shared unless the
2338 	 * snapshot has been deleted. Use the heuristic if strict is false.
2339 	 */
2340 	if (!strict &&
2341 	    (btrfs_extent_generation(leaf, ei) <=
2342 	     btrfs_root_last_snapshot(&root->root_item)))
2343 		goto out;
2344 
2345 	iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2346 
2347 	/* If this extent has SHARED_DATA_REF then it's shared */
2348 	type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
2349 	if (type != BTRFS_EXTENT_DATA_REF_KEY)
2350 		goto out;
2351 
2352 	ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2353 	if (btrfs_extent_refs(leaf, ei) !=
2354 	    btrfs_extent_data_ref_count(leaf, ref) ||
2355 	    btrfs_extent_data_ref_root(leaf, ref) !=
2356 	    root->root_key.objectid ||
2357 	    btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2358 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
2359 		goto out;
2360 
2361 	ret = 0;
2362 out:
2363 	return ret;
2364 }
2365 
btrfs_cross_ref_exist(struct btrfs_root * root,u64 objectid,u64 offset,u64 bytenr,bool strict)2366 int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
2367 			  u64 bytenr, bool strict)
2368 {
2369 	struct btrfs_path *path;
2370 	int ret;
2371 
2372 	path = btrfs_alloc_path();
2373 	if (!path)
2374 		return -ENOMEM;
2375 
2376 	do {
2377 		ret = check_committed_ref(root, path, objectid,
2378 					  offset, bytenr, strict);
2379 		if (ret && ret != -ENOENT)
2380 			goto out;
2381 
2382 		ret = check_delayed_ref(root, path, objectid, offset, bytenr);
2383 	} while (ret == -EAGAIN);
2384 
2385 out:
2386 	btrfs_free_path(path);
2387 	if (btrfs_is_data_reloc_root(root))
2388 		WARN_ON(ret > 0);
2389 	return ret;
2390 }
2391 
__btrfs_mod_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,int full_backref,int inc)2392 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2393 			   struct btrfs_root *root,
2394 			   struct extent_buffer *buf,
2395 			   int full_backref, int inc)
2396 {
2397 	struct btrfs_fs_info *fs_info = root->fs_info;
2398 	u64 bytenr;
2399 	u64 num_bytes;
2400 	u64 parent;
2401 	u64 ref_root;
2402 	u32 nritems;
2403 	struct btrfs_key key;
2404 	struct btrfs_file_extent_item *fi;
2405 	struct btrfs_ref generic_ref = { 0 };
2406 	bool for_reloc = btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC);
2407 	int i;
2408 	int action;
2409 	int level;
2410 	int ret = 0;
2411 
2412 	if (btrfs_is_testing(fs_info))
2413 		return 0;
2414 
2415 	ref_root = btrfs_header_owner(buf);
2416 	nritems = btrfs_header_nritems(buf);
2417 	level = btrfs_header_level(buf);
2418 
2419 	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0)
2420 		return 0;
2421 
2422 	if (full_backref)
2423 		parent = buf->start;
2424 	else
2425 		parent = 0;
2426 	if (inc)
2427 		action = BTRFS_ADD_DELAYED_REF;
2428 	else
2429 		action = BTRFS_DROP_DELAYED_REF;
2430 
2431 	for (i = 0; i < nritems; i++) {
2432 		if (level == 0) {
2433 			btrfs_item_key_to_cpu(buf, &key, i);
2434 			if (key.type != BTRFS_EXTENT_DATA_KEY)
2435 				continue;
2436 			fi = btrfs_item_ptr(buf, i,
2437 					    struct btrfs_file_extent_item);
2438 			if (btrfs_file_extent_type(buf, fi) ==
2439 			    BTRFS_FILE_EXTENT_INLINE)
2440 				continue;
2441 			bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2442 			if (bytenr == 0)
2443 				continue;
2444 
2445 			num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2446 			key.offset -= btrfs_file_extent_offset(buf, fi);
2447 			btrfs_init_generic_ref(&generic_ref, action, bytenr,
2448 					       num_bytes, parent);
2449 			generic_ref.real_root = root->root_key.objectid;
2450 			btrfs_init_data_ref(&generic_ref, ref_root, key.objectid,
2451 					    key.offset, root->root_key.objectid,
2452 					    for_reloc);
2453 			generic_ref.skip_qgroup = for_reloc;
2454 			if (inc)
2455 				ret = btrfs_inc_extent_ref(trans, &generic_ref);
2456 			else
2457 				ret = btrfs_free_extent(trans, &generic_ref);
2458 			if (ret)
2459 				goto fail;
2460 		} else {
2461 			bytenr = btrfs_node_blockptr(buf, i);
2462 			num_bytes = fs_info->nodesize;
2463 			btrfs_init_generic_ref(&generic_ref, action, bytenr,
2464 					       num_bytes, parent);
2465 			generic_ref.real_root = root->root_key.objectid;
2466 			btrfs_init_tree_ref(&generic_ref, level - 1, ref_root,
2467 					    root->root_key.objectid, for_reloc);
2468 			generic_ref.skip_qgroup = for_reloc;
2469 			if (inc)
2470 				ret = btrfs_inc_extent_ref(trans, &generic_ref);
2471 			else
2472 				ret = btrfs_free_extent(trans, &generic_ref);
2473 			if (ret)
2474 				goto fail;
2475 		}
2476 	}
2477 	return 0;
2478 fail:
2479 	return ret;
2480 }
2481 
btrfs_inc_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,int full_backref)2482 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2483 		  struct extent_buffer *buf, int full_backref)
2484 {
2485 	return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2486 }
2487 
btrfs_dec_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,int full_backref)2488 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2489 		  struct extent_buffer *buf, int full_backref)
2490 {
2491 	return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2492 }
2493 
get_alloc_profile_by_root(struct btrfs_root * root,int data)2494 static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
2495 {
2496 	struct btrfs_fs_info *fs_info = root->fs_info;
2497 	u64 flags;
2498 	u64 ret;
2499 
2500 	if (data)
2501 		flags = BTRFS_BLOCK_GROUP_DATA;
2502 	else if (root == fs_info->chunk_root)
2503 		flags = BTRFS_BLOCK_GROUP_SYSTEM;
2504 	else
2505 		flags = BTRFS_BLOCK_GROUP_METADATA;
2506 
2507 	ret = btrfs_get_alloc_profile(fs_info, flags);
2508 	return ret;
2509 }
2510 
first_logical_byte(struct btrfs_fs_info * fs_info,u64 search_start)2511 static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
2512 {
2513 	struct btrfs_block_group *cache;
2514 	u64 bytenr;
2515 
2516 	spin_lock(&fs_info->block_group_cache_lock);
2517 	bytenr = fs_info->first_logical_byte;
2518 	spin_unlock(&fs_info->block_group_cache_lock);
2519 
2520 	if (bytenr < (u64)-1)
2521 		return bytenr;
2522 
2523 	cache = btrfs_lookup_first_block_group(fs_info, search_start);
2524 	if (!cache)
2525 		return 0;
2526 
2527 	bytenr = cache->start;
2528 	btrfs_put_block_group(cache);
2529 
2530 	return bytenr;
2531 }
2532 
pin_down_extent(struct btrfs_trans_handle * trans,struct btrfs_block_group * cache,u64 bytenr,u64 num_bytes,int reserved)2533 static int pin_down_extent(struct btrfs_trans_handle *trans,
2534 			   struct btrfs_block_group *cache,
2535 			   u64 bytenr, u64 num_bytes, int reserved)
2536 {
2537 	struct btrfs_fs_info *fs_info = cache->fs_info;
2538 
2539 	spin_lock(&cache->space_info->lock);
2540 	spin_lock(&cache->lock);
2541 	cache->pinned += num_bytes;
2542 	btrfs_space_info_update_bytes_pinned(fs_info, cache->space_info,
2543 					     num_bytes);
2544 	if (reserved) {
2545 		cache->reserved -= num_bytes;
2546 		cache->space_info->bytes_reserved -= num_bytes;
2547 	}
2548 	spin_unlock(&cache->lock);
2549 	spin_unlock(&cache->space_info->lock);
2550 
2551 	set_extent_dirty(&trans->transaction->pinned_extents, bytenr,
2552 			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
2553 	return 0;
2554 }
2555 
btrfs_pin_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,int reserved)2556 int btrfs_pin_extent(struct btrfs_trans_handle *trans,
2557 		     u64 bytenr, u64 num_bytes, int reserved)
2558 {
2559 	struct btrfs_block_group *cache;
2560 
2561 	cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
2562 	BUG_ON(!cache); /* Logic error */
2563 
2564 	pin_down_extent(trans, cache, bytenr, num_bytes, reserved);
2565 
2566 	btrfs_put_block_group(cache);
2567 	return 0;
2568 }
2569 
2570 /*
2571  * this function must be called within transaction
2572  */
btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes)2573 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
2574 				    u64 bytenr, u64 num_bytes)
2575 {
2576 	struct btrfs_block_group *cache;
2577 	int ret;
2578 
2579 	cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
2580 	if (!cache)
2581 		return -EINVAL;
2582 
2583 	/*
2584 	 * Fully cache the free space first so that our pin removes the free space
2585 	 * from the cache.
2586 	 */
2587 	ret = btrfs_cache_block_group(cache, true);
2588 	if (ret)
2589 		goto out;
2590 
2591 	pin_down_extent(trans, cache, bytenr, num_bytes, 0);
2592 
2593 	/* remove us from the free space cache (if we're there at all) */
2594 	ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
2595 out:
2596 	btrfs_put_block_group(cache);
2597 	return ret;
2598 }
2599 
__exclude_logged_extent(struct btrfs_fs_info * fs_info,u64 start,u64 num_bytes)2600 static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
2601 				   u64 start, u64 num_bytes)
2602 {
2603 	int ret;
2604 	struct btrfs_block_group *block_group;
2605 
2606 	block_group = btrfs_lookup_block_group(fs_info, start);
2607 	if (!block_group)
2608 		return -EINVAL;
2609 
2610 	ret = btrfs_cache_block_group(block_group, true);
2611 	if (ret)
2612 		goto out;
2613 
2614 	ret = btrfs_remove_free_space(block_group, start, num_bytes);
2615 out:
2616 	btrfs_put_block_group(block_group);
2617 	return ret;
2618 }
2619 
btrfs_exclude_logged_extents(struct extent_buffer * eb)2620 int btrfs_exclude_logged_extents(struct extent_buffer *eb)
2621 {
2622 	struct btrfs_fs_info *fs_info = eb->fs_info;
2623 	struct btrfs_file_extent_item *item;
2624 	struct btrfs_key key;
2625 	int found_type;
2626 	int i;
2627 	int ret = 0;
2628 
2629 	if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS))
2630 		return 0;
2631 
2632 	for (i = 0; i < btrfs_header_nritems(eb); i++) {
2633 		btrfs_item_key_to_cpu(eb, &key, i);
2634 		if (key.type != BTRFS_EXTENT_DATA_KEY)
2635 			continue;
2636 		item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
2637 		found_type = btrfs_file_extent_type(eb, item);
2638 		if (found_type == BTRFS_FILE_EXTENT_INLINE)
2639 			continue;
2640 		if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
2641 			continue;
2642 		key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
2643 		key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
2644 		ret = __exclude_logged_extent(fs_info, key.objectid, key.offset);
2645 		if (ret)
2646 			break;
2647 	}
2648 
2649 	return ret;
2650 }
2651 
2652 static void
btrfs_inc_block_group_reservations(struct btrfs_block_group * bg)2653 btrfs_inc_block_group_reservations(struct btrfs_block_group *bg)
2654 {
2655 	atomic_inc(&bg->reservations);
2656 }
2657 
2658 /*
2659  * Returns the free cluster for the given space info and sets empty_cluster to
2660  * what it should be based on the mount options.
2661  */
2662 static struct btrfs_free_cluster *
fetch_cluster_info(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,u64 * empty_cluster)2663 fetch_cluster_info(struct btrfs_fs_info *fs_info,
2664 		   struct btrfs_space_info *space_info, u64 *empty_cluster)
2665 {
2666 	struct btrfs_free_cluster *ret = NULL;
2667 
2668 	*empty_cluster = 0;
2669 	if (btrfs_mixed_space_info(space_info))
2670 		return ret;
2671 
2672 	if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
2673 		ret = &fs_info->meta_alloc_cluster;
2674 		if (btrfs_test_opt(fs_info, SSD))
2675 			*empty_cluster = SZ_2M;
2676 		else
2677 			*empty_cluster = SZ_64K;
2678 	} else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) &&
2679 		   btrfs_test_opt(fs_info, SSD_SPREAD)) {
2680 		*empty_cluster = SZ_2M;
2681 		ret = &fs_info->data_alloc_cluster;
2682 	}
2683 
2684 	return ret;
2685 }
2686 
unpin_extent_range(struct btrfs_fs_info * fs_info,u64 start,u64 end,const bool return_free_space)2687 static int unpin_extent_range(struct btrfs_fs_info *fs_info,
2688 			      u64 start, u64 end,
2689 			      const bool return_free_space)
2690 {
2691 	struct btrfs_block_group *cache = NULL;
2692 	struct btrfs_space_info *space_info;
2693 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
2694 	struct btrfs_free_cluster *cluster = NULL;
2695 	u64 len;
2696 	u64 total_unpinned = 0;
2697 	u64 empty_cluster = 0;
2698 	bool readonly;
2699 
2700 	while (start <= end) {
2701 		readonly = false;
2702 		if (!cache ||
2703 		    start >= cache->start + cache->length) {
2704 			if (cache)
2705 				btrfs_put_block_group(cache);
2706 			total_unpinned = 0;
2707 			cache = btrfs_lookup_block_group(fs_info, start);
2708 			BUG_ON(!cache); /* Logic error */
2709 
2710 			cluster = fetch_cluster_info(fs_info,
2711 						     cache->space_info,
2712 						     &empty_cluster);
2713 			empty_cluster <<= 1;
2714 		}
2715 
2716 		len = cache->start + cache->length - start;
2717 		len = min(len, end + 1 - start);
2718 
2719 		down_read(&fs_info->commit_root_sem);
2720 		if (start < cache->last_byte_to_unpin && return_free_space) {
2721 			u64 add_len = min(len, cache->last_byte_to_unpin - start);
2722 
2723 			btrfs_add_free_space(cache, start, add_len);
2724 		}
2725 		up_read(&fs_info->commit_root_sem);
2726 
2727 		start += len;
2728 		total_unpinned += len;
2729 		space_info = cache->space_info;
2730 
2731 		/*
2732 		 * If this space cluster has been marked as fragmented and we've
2733 		 * unpinned enough in this block group to potentially allow a
2734 		 * cluster to be created inside of it go ahead and clear the
2735 		 * fragmented check.
2736 		 */
2737 		if (cluster && cluster->fragmented &&
2738 		    total_unpinned > empty_cluster) {
2739 			spin_lock(&cluster->lock);
2740 			cluster->fragmented = 0;
2741 			spin_unlock(&cluster->lock);
2742 		}
2743 
2744 		spin_lock(&space_info->lock);
2745 		spin_lock(&cache->lock);
2746 		cache->pinned -= len;
2747 		btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len);
2748 		space_info->max_extent_size = 0;
2749 		if (cache->ro) {
2750 			space_info->bytes_readonly += len;
2751 			readonly = true;
2752 		} else if (btrfs_is_zoned(fs_info)) {
2753 			/* Need reset before reusing in a zoned block group */
2754 			space_info->bytes_zone_unusable += len;
2755 			readonly = true;
2756 		}
2757 		spin_unlock(&cache->lock);
2758 		if (!readonly && return_free_space &&
2759 		    global_rsv->space_info == space_info) {
2760 			u64 to_add = len;
2761 
2762 			spin_lock(&global_rsv->lock);
2763 			if (!global_rsv->full) {
2764 				to_add = min(len, global_rsv->size -
2765 					     global_rsv->reserved);
2766 				global_rsv->reserved += to_add;
2767 				btrfs_space_info_update_bytes_may_use(fs_info,
2768 						space_info, to_add);
2769 				if (global_rsv->reserved >= global_rsv->size)
2770 					global_rsv->full = 1;
2771 				len -= to_add;
2772 			}
2773 			spin_unlock(&global_rsv->lock);
2774 		}
2775 		/* Add to any tickets we may have */
2776 		if (!readonly && return_free_space && len)
2777 			btrfs_try_granting_tickets(fs_info, space_info);
2778 		spin_unlock(&space_info->lock);
2779 	}
2780 
2781 	if (cache)
2782 		btrfs_put_block_group(cache);
2783 	return 0;
2784 }
2785 
btrfs_finish_extent_commit(struct btrfs_trans_handle * trans)2786 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
2787 {
2788 	struct btrfs_fs_info *fs_info = trans->fs_info;
2789 	struct btrfs_block_group *block_group, *tmp;
2790 	struct list_head *deleted_bgs;
2791 	struct extent_io_tree *unpin;
2792 	u64 start;
2793 	u64 end;
2794 	int ret;
2795 
2796 	unpin = &trans->transaction->pinned_extents;
2797 
2798 	while (!TRANS_ABORTED(trans)) {
2799 		struct extent_state *cached_state = NULL;
2800 
2801 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
2802 		ret = find_first_extent_bit(unpin, 0, &start, &end,
2803 					    EXTENT_DIRTY, &cached_state);
2804 		if (ret) {
2805 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
2806 			break;
2807 		}
2808 
2809 		if (btrfs_test_opt(fs_info, DISCARD_SYNC))
2810 			ret = btrfs_discard_extent(fs_info, start,
2811 						   end + 1 - start, NULL);
2812 
2813 		clear_extent_dirty(unpin, start, end, &cached_state);
2814 		unpin_extent_range(fs_info, start, end, true);
2815 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
2816 		free_extent_state(cached_state);
2817 		cond_resched();
2818 	}
2819 
2820 	if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) {
2821 		btrfs_discard_calc_delay(&fs_info->discard_ctl);
2822 		btrfs_discard_schedule_work(&fs_info->discard_ctl, true);
2823 	}
2824 
2825 	/*
2826 	 * Transaction is finished.  We don't need the lock anymore.  We
2827 	 * do need to clean up the block groups in case of a transaction
2828 	 * abort.
2829 	 */
2830 	deleted_bgs = &trans->transaction->deleted_bgs;
2831 	list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
2832 		u64 trimmed = 0;
2833 
2834 		ret = -EROFS;
2835 		if (!TRANS_ABORTED(trans))
2836 			ret = btrfs_discard_extent(fs_info,
2837 						   block_group->start,
2838 						   block_group->length,
2839 						   &trimmed);
2840 
2841 		list_del_init(&block_group->bg_list);
2842 		btrfs_unfreeze_block_group(block_group);
2843 		btrfs_put_block_group(block_group);
2844 
2845 		if (ret) {
2846 			const char *errstr = btrfs_decode_error(ret);
2847 			btrfs_warn(fs_info,
2848 			   "discard failed while removing blockgroup: errno=%d %s",
2849 				   ret, errstr);
2850 		}
2851 	}
2852 
2853 	return 0;
2854 }
2855 
2856 /*
2857  * Drop one or more refs of @node.
2858  *
2859  * 1. Locate the extent refs.
2860  *    It's either inline in EXTENT/METADATA_ITEM or in keyed SHARED_* item.
2861  *    Locate it, then reduce the refs number or remove the ref line completely.
2862  *
2863  * 2. Update the refs count in EXTENT/METADATA_ITEM
2864  *
2865  * Inline backref case:
2866  *
2867  * in extent tree we have:
2868  *
2869  * 	item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82
2870  *		refs 2 gen 6 flags DATA
2871  *		extent data backref root FS_TREE objectid 258 offset 0 count 1
2872  *		extent data backref root FS_TREE objectid 257 offset 0 count 1
2873  *
2874  * This function gets called with:
2875  *
2876  *    node->bytenr = 13631488
2877  *    node->num_bytes = 1048576
2878  *    root_objectid = FS_TREE
2879  *    owner_objectid = 257
2880  *    owner_offset = 0
2881  *    refs_to_drop = 1
2882  *
2883  * Then we should get some like:
2884  *
2885  * 	item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82
2886  *		refs 1 gen 6 flags DATA
2887  *		extent data backref root FS_TREE objectid 258 offset 0 count 1
2888  *
2889  * Keyed backref case:
2890  *
2891  * in extent tree we have:
2892  *
2893  *	item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24
2894  *		refs 754 gen 6 flags DATA
2895  *	[...]
2896  *	item 2 key (13631488 EXTENT_DATA_REF <HASH>) itemoff 3915 itemsize 28
2897  *		extent data backref root FS_TREE objectid 866 offset 0 count 1
2898  *
2899  * This function get called with:
2900  *
2901  *    node->bytenr = 13631488
2902  *    node->num_bytes = 1048576
2903  *    root_objectid = FS_TREE
2904  *    owner_objectid = 866
2905  *    owner_offset = 0
2906  *    refs_to_drop = 1
2907  *
2908  * Then we should get some like:
2909  *
2910  *	item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24
2911  *		refs 753 gen 6 flags DATA
2912  *
2913  * And that (13631488 EXTENT_DATA_REF <HASH>) gets removed.
2914  */
__btrfs_free_extent(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * node,u64 parent,u64 root_objectid,u64 owner_objectid,u64 owner_offset,int refs_to_drop,struct btrfs_delayed_extent_op * extent_op)2915 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
2916 			       struct btrfs_delayed_ref_node *node, u64 parent,
2917 			       u64 root_objectid, u64 owner_objectid,
2918 			       u64 owner_offset, int refs_to_drop,
2919 			       struct btrfs_delayed_extent_op *extent_op)
2920 {
2921 	struct btrfs_fs_info *info = trans->fs_info;
2922 	struct btrfs_key key;
2923 	struct btrfs_path *path;
2924 	struct btrfs_root *extent_root = info->extent_root;
2925 	struct extent_buffer *leaf;
2926 	struct btrfs_extent_item *ei;
2927 	struct btrfs_extent_inline_ref *iref;
2928 	int ret;
2929 	int is_data;
2930 	int extent_slot = 0;
2931 	int found_extent = 0;
2932 	int num_to_del = 1;
2933 	u32 item_size;
2934 	u64 refs;
2935 	u64 bytenr = node->bytenr;
2936 	u64 num_bytes = node->num_bytes;
2937 	int last_ref = 0;
2938 	bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
2939 
2940 	path = btrfs_alloc_path();
2941 	if (!path)
2942 		return -ENOMEM;
2943 
2944 	is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
2945 
2946 	if (!is_data && refs_to_drop != 1) {
2947 		btrfs_crit(info,
2948 "invalid refs_to_drop, dropping more than 1 refs for tree block %llu refs_to_drop %u",
2949 			   node->bytenr, refs_to_drop);
2950 		ret = -EINVAL;
2951 		btrfs_abort_transaction(trans, ret);
2952 		goto out;
2953 	}
2954 
2955 	if (is_data)
2956 		skinny_metadata = false;
2957 
2958 	ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes,
2959 				    parent, root_objectid, owner_objectid,
2960 				    owner_offset);
2961 	if (ret == 0) {
2962 		/*
2963 		 * Either the inline backref or the SHARED_DATA_REF/
2964 		 * SHARED_BLOCK_REF is found
2965 		 *
2966 		 * Here is a quick path to locate EXTENT/METADATA_ITEM.
2967 		 * It's possible the EXTENT/METADATA_ITEM is near current slot.
2968 		 */
2969 		extent_slot = path->slots[0];
2970 		while (extent_slot >= 0) {
2971 			btrfs_item_key_to_cpu(path->nodes[0], &key,
2972 					      extent_slot);
2973 			if (key.objectid != bytenr)
2974 				break;
2975 			if (key.type == BTRFS_EXTENT_ITEM_KEY &&
2976 			    key.offset == num_bytes) {
2977 				found_extent = 1;
2978 				break;
2979 			}
2980 			if (key.type == BTRFS_METADATA_ITEM_KEY &&
2981 			    key.offset == owner_objectid) {
2982 				found_extent = 1;
2983 				break;
2984 			}
2985 
2986 			/* Quick path didn't find the EXTEMT/METADATA_ITEM */
2987 			if (path->slots[0] - extent_slot > 5)
2988 				break;
2989 			extent_slot--;
2990 		}
2991 
2992 		if (!found_extent) {
2993 			if (iref) {
2994 				btrfs_crit(info,
2995 "invalid iref, no EXTENT/METADATA_ITEM found but has inline extent ref");
2996 				btrfs_abort_transaction(trans, -EUCLEAN);
2997 				goto err_dump;
2998 			}
2999 			/* Must be SHARED_* item, remove the backref first */
3000 			ret = remove_extent_backref(trans, path, NULL,
3001 						    refs_to_drop,
3002 						    is_data, &last_ref);
3003 			if (ret) {
3004 				btrfs_abort_transaction(trans, ret);
3005 				goto out;
3006 			}
3007 			btrfs_release_path(path);
3008 
3009 			/* Slow path to locate EXTENT/METADATA_ITEM */
3010 			key.objectid = bytenr;
3011 			key.type = BTRFS_EXTENT_ITEM_KEY;
3012 			key.offset = num_bytes;
3013 
3014 			if (!is_data && skinny_metadata) {
3015 				key.type = BTRFS_METADATA_ITEM_KEY;
3016 				key.offset = owner_objectid;
3017 			}
3018 
3019 			ret = btrfs_search_slot(trans, extent_root,
3020 						&key, path, -1, 1);
3021 			if (ret > 0 && skinny_metadata && path->slots[0]) {
3022 				/*
3023 				 * Couldn't find our skinny metadata item,
3024 				 * see if we have ye olde extent item.
3025 				 */
3026 				path->slots[0]--;
3027 				btrfs_item_key_to_cpu(path->nodes[0], &key,
3028 						      path->slots[0]);
3029 				if (key.objectid == bytenr &&
3030 				    key.type == BTRFS_EXTENT_ITEM_KEY &&
3031 				    key.offset == num_bytes)
3032 					ret = 0;
3033 			}
3034 
3035 			if (ret > 0 && skinny_metadata) {
3036 				skinny_metadata = false;
3037 				key.objectid = bytenr;
3038 				key.type = BTRFS_EXTENT_ITEM_KEY;
3039 				key.offset = num_bytes;
3040 				btrfs_release_path(path);
3041 				ret = btrfs_search_slot(trans, extent_root,
3042 							&key, path, -1, 1);
3043 			}
3044 
3045 			if (ret) {
3046 				btrfs_err(info,
3047 					  "umm, got %d back from search, was looking for %llu",
3048 					  ret, bytenr);
3049 				if (ret > 0)
3050 					btrfs_print_leaf(path->nodes[0]);
3051 			}
3052 			if (ret < 0) {
3053 				btrfs_abort_transaction(trans, ret);
3054 				goto out;
3055 			}
3056 			extent_slot = path->slots[0];
3057 		}
3058 	} else if (WARN_ON(ret == -ENOENT)) {
3059 		btrfs_print_leaf(path->nodes[0]);
3060 		btrfs_err(info,
3061 			"unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
3062 			bytenr, parent, root_objectid, owner_objectid,
3063 			owner_offset);
3064 		btrfs_abort_transaction(trans, ret);
3065 		goto out;
3066 	} else {
3067 		btrfs_abort_transaction(trans, ret);
3068 		goto out;
3069 	}
3070 
3071 	leaf = path->nodes[0];
3072 	item_size = btrfs_item_size_nr(leaf, extent_slot);
3073 	if (unlikely(item_size < sizeof(*ei))) {
3074 		ret = -EINVAL;
3075 		btrfs_print_v0_err(info);
3076 		btrfs_abort_transaction(trans, ret);
3077 		goto out;
3078 	}
3079 	ei = btrfs_item_ptr(leaf, extent_slot,
3080 			    struct btrfs_extent_item);
3081 	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
3082 	    key.type == BTRFS_EXTENT_ITEM_KEY) {
3083 		struct btrfs_tree_block_info *bi;
3084 		if (item_size < sizeof(*ei) + sizeof(*bi)) {
3085 			btrfs_crit(info,
3086 "invalid extent item size for key (%llu, %u, %llu) owner %llu, has %u expect >= %zu",
3087 				   key.objectid, key.type, key.offset,
3088 				   owner_objectid, item_size,
3089 				   sizeof(*ei) + sizeof(*bi));
3090 			btrfs_abort_transaction(trans, -EUCLEAN);
3091 			goto err_dump;
3092 		}
3093 		bi = (struct btrfs_tree_block_info *)(ei + 1);
3094 		WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
3095 	}
3096 
3097 	refs = btrfs_extent_refs(leaf, ei);
3098 	if (refs < refs_to_drop) {
3099 		btrfs_crit(info,
3100 		"trying to drop %d refs but we only have %llu for bytenr %llu",
3101 			  refs_to_drop, refs, bytenr);
3102 		btrfs_abort_transaction(trans, -EUCLEAN);
3103 		goto err_dump;
3104 	}
3105 	refs -= refs_to_drop;
3106 
3107 	if (refs > 0) {
3108 		if (extent_op)
3109 			__run_delayed_extent_op(extent_op, leaf, ei);
3110 		/*
3111 		 * In the case of inline back ref, reference count will
3112 		 * be updated by remove_extent_backref
3113 		 */
3114 		if (iref) {
3115 			if (!found_extent) {
3116 				btrfs_crit(info,
3117 "invalid iref, got inlined extent ref but no EXTENT/METADATA_ITEM found");
3118 				btrfs_abort_transaction(trans, -EUCLEAN);
3119 				goto err_dump;
3120 			}
3121 		} else {
3122 			btrfs_set_extent_refs(leaf, ei, refs);
3123 			btrfs_mark_buffer_dirty(leaf);
3124 		}
3125 		if (found_extent) {
3126 			ret = remove_extent_backref(trans, path, iref,
3127 						    refs_to_drop, is_data,
3128 						    &last_ref);
3129 			if (ret) {
3130 				btrfs_abort_transaction(trans, ret);
3131 				goto out;
3132 			}
3133 		}
3134 	} else {
3135 		/* In this branch refs == 1 */
3136 		if (found_extent) {
3137 			if (is_data && refs_to_drop !=
3138 			    extent_data_ref_count(path, iref)) {
3139 				btrfs_crit(info,
3140 		"invalid refs_to_drop, current refs %u refs_to_drop %u",
3141 					   extent_data_ref_count(path, iref),
3142 					   refs_to_drop);
3143 				btrfs_abort_transaction(trans, -EUCLEAN);
3144 				goto err_dump;
3145 			}
3146 			if (iref) {
3147 				if (path->slots[0] != extent_slot) {
3148 					btrfs_crit(info,
3149 "invalid iref, extent item key (%llu %u %llu) doesn't have wanted iref",
3150 						   key.objectid, key.type,
3151 						   key.offset);
3152 					btrfs_abort_transaction(trans, -EUCLEAN);
3153 					goto err_dump;
3154 				}
3155 			} else {
3156 				/*
3157 				 * No inline ref, we must be at SHARED_* item,
3158 				 * And it's single ref, it must be:
3159 				 * |	extent_slot	  ||extent_slot + 1|
3160 				 * [ EXTENT/METADATA_ITEM ][ SHARED_* ITEM ]
3161 				 */
3162 				if (path->slots[0] != extent_slot + 1) {
3163 					btrfs_crit(info,
3164 	"invalid SHARED_* item, previous item is not EXTENT/METADATA_ITEM");
3165 					btrfs_abort_transaction(trans, -EUCLEAN);
3166 					goto err_dump;
3167 				}
3168 				path->slots[0] = extent_slot;
3169 				num_to_del = 2;
3170 			}
3171 		}
3172 
3173 		last_ref = 1;
3174 		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
3175 				      num_to_del);
3176 		if (ret) {
3177 			btrfs_abort_transaction(trans, ret);
3178 			goto out;
3179 		}
3180 		btrfs_release_path(path);
3181 
3182 		if (is_data) {
3183 			ret = btrfs_del_csums(trans, info->csum_root, bytenr,
3184 					      num_bytes);
3185 			if (ret) {
3186 				btrfs_abort_transaction(trans, ret);
3187 				goto out;
3188 			}
3189 		}
3190 
3191 		ret = add_to_free_space_tree(trans, bytenr, num_bytes);
3192 		if (ret) {
3193 			btrfs_abort_transaction(trans, ret);
3194 			goto out;
3195 		}
3196 
3197 		ret = btrfs_update_block_group(trans, bytenr, num_bytes, 0);
3198 		if (ret) {
3199 			btrfs_abort_transaction(trans, ret);
3200 			goto out;
3201 		}
3202 	}
3203 	btrfs_release_path(path);
3204 
3205 out:
3206 	btrfs_free_path(path);
3207 	return ret;
3208 err_dump:
3209 	/*
3210 	 * Leaf dump can take up a lot of log buffer, so we only do full leaf
3211 	 * dump for debug build.
3212 	 */
3213 	if (IS_ENABLED(CONFIG_BTRFS_DEBUG)) {
3214 		btrfs_crit(info, "path->slots[0]=%d extent_slot=%d",
3215 			   path->slots[0], extent_slot);
3216 		btrfs_print_leaf(path->nodes[0]);
3217 	}
3218 
3219 	btrfs_free_path(path);
3220 	return -EUCLEAN;
3221 }
3222 
3223 /*
3224  * when we free an block, it is possible (and likely) that we free the last
3225  * delayed ref for that extent as well.  This searches the delayed ref tree for
3226  * a given extent, and if there are no other delayed refs to be processed, it
3227  * removes it from the tree.
3228  */
check_ref_cleanup(struct btrfs_trans_handle * trans,u64 bytenr)3229 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
3230 				      u64 bytenr)
3231 {
3232 	struct btrfs_delayed_ref_head *head;
3233 	struct btrfs_delayed_ref_root *delayed_refs;
3234 	int ret = 0;
3235 
3236 	delayed_refs = &trans->transaction->delayed_refs;
3237 	spin_lock(&delayed_refs->lock);
3238 	head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
3239 	if (!head)
3240 		goto out_delayed_unlock;
3241 
3242 	spin_lock(&head->lock);
3243 	if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
3244 		goto out;
3245 
3246 	if (cleanup_extent_op(head) != NULL)
3247 		goto out;
3248 
3249 	/*
3250 	 * waiting for the lock here would deadlock.  If someone else has it
3251 	 * locked they are already in the process of dropping it anyway
3252 	 */
3253 	if (!mutex_trylock(&head->mutex))
3254 		goto out;
3255 
3256 	btrfs_delete_ref_head(delayed_refs, head);
3257 	head->processing = 0;
3258 
3259 	spin_unlock(&head->lock);
3260 	spin_unlock(&delayed_refs->lock);
3261 
3262 	BUG_ON(head->extent_op);
3263 	if (head->must_insert_reserved)
3264 		ret = 1;
3265 
3266 	btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head);
3267 	mutex_unlock(&head->mutex);
3268 	btrfs_put_delayed_ref_head(head);
3269 	return ret;
3270 out:
3271 	spin_unlock(&head->lock);
3272 
3273 out_delayed_unlock:
3274 	spin_unlock(&delayed_refs->lock);
3275 	return 0;
3276 }
3277 
btrfs_free_tree_block(struct btrfs_trans_handle * trans,u64 root_id,struct extent_buffer * buf,u64 parent,int last_ref)3278 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
3279 			   u64 root_id,
3280 			   struct extent_buffer *buf,
3281 			   u64 parent, int last_ref)
3282 {
3283 	struct btrfs_fs_info *fs_info = trans->fs_info;
3284 	struct btrfs_ref generic_ref = { 0 };
3285 	int ret;
3286 
3287 	btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF,
3288 			       buf->start, buf->len, parent);
3289 	btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf),
3290 			    root_id, 0, false);
3291 
3292 	if (root_id != BTRFS_TREE_LOG_OBJECTID) {
3293 		btrfs_ref_tree_mod(fs_info, &generic_ref);
3294 		ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL);
3295 		BUG_ON(ret); /* -ENOMEM */
3296 	}
3297 
3298 	if (last_ref && btrfs_header_generation(buf) == trans->transid) {
3299 		struct btrfs_block_group *cache;
3300 		bool must_pin = false;
3301 
3302 		if (root_id != BTRFS_TREE_LOG_OBJECTID) {
3303 			ret = check_ref_cleanup(trans, buf->start);
3304 			if (!ret) {
3305 				btrfs_redirty_list_add(trans->transaction, buf);
3306 				goto out;
3307 			}
3308 		}
3309 
3310 		cache = btrfs_lookup_block_group(fs_info, buf->start);
3311 
3312 		if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
3313 			pin_down_extent(trans, cache, buf->start, buf->len, 1);
3314 			btrfs_put_block_group(cache);
3315 			goto out;
3316 		}
3317 
3318 		/*
3319 		 * If there are tree mod log users we may have recorded mod log
3320 		 * operations for this node.  If we re-allocate this node we
3321 		 * could replay operations on this node that happened when it
3322 		 * existed in a completely different root.  For example if it
3323 		 * was part of root A, then was reallocated to root B, and we
3324 		 * are doing a btrfs_old_search_slot(root b), we could replay
3325 		 * operations that happened when the block was part of root A,
3326 		 * giving us an inconsistent view of the btree.
3327 		 *
3328 		 * We are safe from races here because at this point no other
3329 		 * node or root points to this extent buffer, so if after this
3330 		 * check a new tree mod log user joins we will not have an
3331 		 * existing log of operations on this node that we have to
3332 		 * contend with.
3333 		 */
3334 		if (test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
3335 			must_pin = true;
3336 
3337 		if (must_pin || btrfs_is_zoned(fs_info)) {
3338 			btrfs_redirty_list_add(trans->transaction, buf);
3339 			pin_down_extent(trans, cache, buf->start, buf->len, 1);
3340 			btrfs_put_block_group(cache);
3341 			goto out;
3342 		}
3343 
3344 		WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
3345 
3346 		btrfs_add_free_space(cache, buf->start, buf->len);
3347 		btrfs_free_reserved_bytes(cache, buf->len, 0);
3348 		btrfs_put_block_group(cache);
3349 		trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
3350 	}
3351 out:
3352 	if (last_ref) {
3353 		/*
3354 		 * Deleting the buffer, clear the corrupt flag since it doesn't
3355 		 * matter anymore.
3356 		 */
3357 		clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
3358 	}
3359 }
3360 
3361 /* Can return -ENOMEM */
btrfs_free_extent(struct btrfs_trans_handle * trans,struct btrfs_ref * ref)3362 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
3363 {
3364 	struct btrfs_fs_info *fs_info = trans->fs_info;
3365 	int ret;
3366 
3367 	if (btrfs_is_testing(fs_info))
3368 		return 0;
3369 
3370 	/*
3371 	 * tree log blocks never actually go into the extent allocation
3372 	 * tree, just update pinning info and exit early.
3373 	 */
3374 	if ((ref->type == BTRFS_REF_METADATA &&
3375 	     ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID) ||
3376 	    (ref->type == BTRFS_REF_DATA &&
3377 	     ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)) {
3378 		/* unlocks the pinned mutex */
3379 		btrfs_pin_extent(trans, ref->bytenr, ref->len, 1);
3380 		ret = 0;
3381 	} else if (ref->type == BTRFS_REF_METADATA) {
3382 		ret = btrfs_add_delayed_tree_ref(trans, ref, NULL);
3383 	} else {
3384 		ret = btrfs_add_delayed_data_ref(trans, ref, 0);
3385 	}
3386 
3387 	if (!((ref->type == BTRFS_REF_METADATA &&
3388 	       ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID) ||
3389 	      (ref->type == BTRFS_REF_DATA &&
3390 	       ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)))
3391 		btrfs_ref_tree_mod(fs_info, ref);
3392 
3393 	return ret;
3394 }
3395 
3396 enum btrfs_loop_type {
3397 	LOOP_CACHING_NOWAIT,
3398 	LOOP_CACHING_WAIT,
3399 	LOOP_ALLOC_CHUNK,
3400 	LOOP_NO_EMPTY_SIZE,
3401 };
3402 
3403 static inline void
btrfs_lock_block_group(struct btrfs_block_group * cache,int delalloc)3404 btrfs_lock_block_group(struct btrfs_block_group *cache,
3405 		       int delalloc)
3406 {
3407 	if (delalloc)
3408 		down_read(&cache->data_rwsem);
3409 }
3410 
btrfs_grab_block_group(struct btrfs_block_group * cache,int delalloc)3411 static inline void btrfs_grab_block_group(struct btrfs_block_group *cache,
3412 		       int delalloc)
3413 {
3414 	btrfs_get_block_group(cache);
3415 	if (delalloc)
3416 		down_read(&cache->data_rwsem);
3417 }
3418 
btrfs_lock_cluster(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster,int delalloc)3419 static struct btrfs_block_group *btrfs_lock_cluster(
3420 		   struct btrfs_block_group *block_group,
3421 		   struct btrfs_free_cluster *cluster,
3422 		   int delalloc)
3423 	__acquires(&cluster->refill_lock)
3424 {
3425 	struct btrfs_block_group *used_bg = NULL;
3426 
3427 	spin_lock(&cluster->refill_lock);
3428 	while (1) {
3429 		used_bg = cluster->block_group;
3430 		if (!used_bg)
3431 			return NULL;
3432 
3433 		if (used_bg == block_group)
3434 			return used_bg;
3435 
3436 		btrfs_get_block_group(used_bg);
3437 
3438 		if (!delalloc)
3439 			return used_bg;
3440 
3441 		if (down_read_trylock(&used_bg->data_rwsem))
3442 			return used_bg;
3443 
3444 		spin_unlock(&cluster->refill_lock);
3445 
3446 		/* We should only have one-level nested. */
3447 		down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
3448 
3449 		spin_lock(&cluster->refill_lock);
3450 		if (used_bg == cluster->block_group)
3451 			return used_bg;
3452 
3453 		up_read(&used_bg->data_rwsem);
3454 		btrfs_put_block_group(used_bg);
3455 	}
3456 }
3457 
3458 static inline void
btrfs_release_block_group(struct btrfs_block_group * cache,int delalloc)3459 btrfs_release_block_group(struct btrfs_block_group *cache,
3460 			 int delalloc)
3461 {
3462 	if (delalloc)
3463 		up_read(&cache->data_rwsem);
3464 	btrfs_put_block_group(cache);
3465 }
3466 
3467 enum btrfs_extent_allocation_policy {
3468 	BTRFS_EXTENT_ALLOC_CLUSTERED,
3469 	BTRFS_EXTENT_ALLOC_ZONED,
3470 };
3471 
3472 /*
3473  * Structure used internally for find_free_extent() function.  Wraps needed
3474  * parameters.
3475  */
3476 struct find_free_extent_ctl {
3477 	/* Basic allocation info */
3478 	u64 num_bytes;
3479 	u64 empty_size;
3480 	u64 flags;
3481 	int delalloc;
3482 
3483 	/* Where to start the search inside the bg */
3484 	u64 search_start;
3485 
3486 	/* For clustered allocation */
3487 	u64 empty_cluster;
3488 	struct btrfs_free_cluster *last_ptr;
3489 	bool use_cluster;
3490 
3491 	bool have_caching_bg;
3492 	bool orig_have_caching_bg;
3493 
3494 	/* Allocation is called for tree-log */
3495 	bool for_treelog;
3496 
3497 	/* Allocation is called for data relocation */
3498 	bool for_data_reloc;
3499 
3500 	/* RAID index, converted from flags */
3501 	int index;
3502 
3503 	/*
3504 	 * Current loop number, check find_free_extent_update_loop() for details
3505 	 */
3506 	int loop;
3507 
3508 	/*
3509 	 * Whether we're refilling a cluster, if true we need to re-search
3510 	 * current block group but don't try to refill the cluster again.
3511 	 */
3512 	bool retry_clustered;
3513 
3514 	/*
3515 	 * Whether we're updating free space cache, if true we need to re-search
3516 	 * current block group but don't try updating free space cache again.
3517 	 */
3518 	bool retry_unclustered;
3519 
3520 	/* If current block group is cached */
3521 	int cached;
3522 
3523 	/* Max contiguous hole found */
3524 	u64 max_extent_size;
3525 
3526 	/* Total free space from free space cache, not always contiguous */
3527 	u64 total_free_space;
3528 
3529 	/* Found result */
3530 	u64 found_offset;
3531 
3532 	/* Hint where to start looking for an empty space */
3533 	u64 hint_byte;
3534 
3535 	/* Allocation policy */
3536 	enum btrfs_extent_allocation_policy policy;
3537 };
3538 
3539 
3540 /*
3541  * Helper function for find_free_extent().
3542  *
3543  * Return -ENOENT to inform caller that we need fallback to unclustered mode.
3544  * Return -EAGAIN to inform caller that we need to re-search this block group
3545  * Return >0 to inform caller that we find nothing
3546  * Return 0 means we have found a location and set ffe_ctl->found_offset.
3547  */
find_free_extent_clustered(struct btrfs_block_group * bg,struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group ** cluster_bg_ret)3548 static int find_free_extent_clustered(struct btrfs_block_group *bg,
3549 				      struct find_free_extent_ctl *ffe_ctl,
3550 				      struct btrfs_block_group **cluster_bg_ret)
3551 {
3552 	struct btrfs_block_group *cluster_bg;
3553 	struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
3554 	u64 aligned_cluster;
3555 	u64 offset;
3556 	int ret;
3557 
3558 	cluster_bg = btrfs_lock_cluster(bg, last_ptr, ffe_ctl->delalloc);
3559 	if (!cluster_bg)
3560 		goto refill_cluster;
3561 	if (cluster_bg != bg && (cluster_bg->ro ||
3562 	    !block_group_bits(cluster_bg, ffe_ctl->flags)))
3563 		goto release_cluster;
3564 
3565 	offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr,
3566 			ffe_ctl->num_bytes, cluster_bg->start,
3567 			&ffe_ctl->max_extent_size);
3568 	if (offset) {
3569 		/* We have a block, we're done */
3570 		spin_unlock(&last_ptr->refill_lock);
3571 		trace_btrfs_reserve_extent_cluster(cluster_bg,
3572 				ffe_ctl->search_start, ffe_ctl->num_bytes);
3573 		*cluster_bg_ret = cluster_bg;
3574 		ffe_ctl->found_offset = offset;
3575 		return 0;
3576 	}
3577 	WARN_ON(last_ptr->block_group != cluster_bg);
3578 
3579 release_cluster:
3580 	/*
3581 	 * If we are on LOOP_NO_EMPTY_SIZE, we can't set up a new clusters, so
3582 	 * lets just skip it and let the allocator find whatever block it can
3583 	 * find. If we reach this point, we will have tried the cluster
3584 	 * allocator plenty of times and not have found anything, so we are
3585 	 * likely way too fragmented for the clustering stuff to find anything.
3586 	 *
3587 	 * However, if the cluster is taken from the current block group,
3588 	 * release the cluster first, so that we stand a better chance of
3589 	 * succeeding in the unclustered allocation.
3590 	 */
3591 	if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) {
3592 		spin_unlock(&last_ptr->refill_lock);
3593 		btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc);
3594 		return -ENOENT;
3595 	}
3596 
3597 	/* This cluster didn't work out, free it and start over */
3598 	btrfs_return_cluster_to_free_space(NULL, last_ptr);
3599 
3600 	if (cluster_bg != bg)
3601 		btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc);
3602 
3603 refill_cluster:
3604 	if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) {
3605 		spin_unlock(&last_ptr->refill_lock);
3606 		return -ENOENT;
3607 	}
3608 
3609 	aligned_cluster = max_t(u64,
3610 			ffe_ctl->empty_cluster + ffe_ctl->empty_size,
3611 			bg->full_stripe_len);
3612 	ret = btrfs_find_space_cluster(bg, last_ptr, ffe_ctl->search_start,
3613 			ffe_ctl->num_bytes, aligned_cluster);
3614 	if (ret == 0) {
3615 		/* Now pull our allocation out of this cluster */
3616 		offset = btrfs_alloc_from_cluster(bg, last_ptr,
3617 				ffe_ctl->num_bytes, ffe_ctl->search_start,
3618 				&ffe_ctl->max_extent_size);
3619 		if (offset) {
3620 			/* We found one, proceed */
3621 			spin_unlock(&last_ptr->refill_lock);
3622 			trace_btrfs_reserve_extent_cluster(bg,
3623 					ffe_ctl->search_start,
3624 					ffe_ctl->num_bytes);
3625 			ffe_ctl->found_offset = offset;
3626 			return 0;
3627 		}
3628 	} else if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
3629 		   !ffe_ctl->retry_clustered) {
3630 		spin_unlock(&last_ptr->refill_lock);
3631 
3632 		ffe_ctl->retry_clustered = true;
3633 		btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
3634 				ffe_ctl->empty_cluster + ffe_ctl->empty_size);
3635 		return -EAGAIN;
3636 	}
3637 	/*
3638 	 * At this point we either didn't find a cluster or we weren't able to
3639 	 * allocate a block from our cluster.  Free the cluster we've been
3640 	 * trying to use, and go to the next block group.
3641 	 */
3642 	btrfs_return_cluster_to_free_space(NULL, last_ptr);
3643 	spin_unlock(&last_ptr->refill_lock);
3644 	return 1;
3645 }
3646 
3647 /*
3648  * Return >0 to inform caller that we find nothing
3649  * Return 0 when we found an free extent and set ffe_ctrl->found_offset
3650  * Return -EAGAIN to inform caller that we need to re-search this block group
3651  */
find_free_extent_unclustered(struct btrfs_block_group * bg,struct find_free_extent_ctl * ffe_ctl)3652 static int find_free_extent_unclustered(struct btrfs_block_group *bg,
3653 					struct find_free_extent_ctl *ffe_ctl)
3654 {
3655 	struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
3656 	u64 offset;
3657 
3658 	/*
3659 	 * We are doing an unclustered allocation, set the fragmented flag so
3660 	 * we don't bother trying to setup a cluster again until we get more
3661 	 * space.
3662 	 */
3663 	if (unlikely(last_ptr)) {
3664 		spin_lock(&last_ptr->lock);
3665 		last_ptr->fragmented = 1;
3666 		spin_unlock(&last_ptr->lock);
3667 	}
3668 	if (ffe_ctl->cached) {
3669 		struct btrfs_free_space_ctl *free_space_ctl;
3670 
3671 		free_space_ctl = bg->free_space_ctl;
3672 		spin_lock(&free_space_ctl->tree_lock);
3673 		if (free_space_ctl->free_space <
3674 		    ffe_ctl->num_bytes + ffe_ctl->empty_cluster +
3675 		    ffe_ctl->empty_size) {
3676 			ffe_ctl->total_free_space = max_t(u64,
3677 					ffe_ctl->total_free_space,
3678 					free_space_ctl->free_space);
3679 			spin_unlock(&free_space_ctl->tree_lock);
3680 			return 1;
3681 		}
3682 		spin_unlock(&free_space_ctl->tree_lock);
3683 	}
3684 
3685 	offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start,
3686 			ffe_ctl->num_bytes, ffe_ctl->empty_size,
3687 			&ffe_ctl->max_extent_size);
3688 
3689 	/*
3690 	 * If we didn't find a chunk, and we haven't failed on this block group
3691 	 * before, and this block group is in the middle of caching and we are
3692 	 * ok with waiting, then go ahead and wait for progress to be made, and
3693 	 * set @retry_unclustered to true.
3694 	 *
3695 	 * If @retry_unclustered is true then we've already waited on this
3696 	 * block group once and should move on to the next block group.
3697 	 */
3698 	if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached &&
3699 	    ffe_ctl->loop > LOOP_CACHING_NOWAIT) {
3700 		btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
3701 						      ffe_ctl->empty_size);
3702 		ffe_ctl->retry_unclustered = true;
3703 		return -EAGAIN;
3704 	} else if (!offset) {
3705 		return 1;
3706 	}
3707 	ffe_ctl->found_offset = offset;
3708 	return 0;
3709 }
3710 
do_allocation_clustered(struct btrfs_block_group * block_group,struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group ** bg_ret)3711 static int do_allocation_clustered(struct btrfs_block_group *block_group,
3712 				   struct find_free_extent_ctl *ffe_ctl,
3713 				   struct btrfs_block_group **bg_ret)
3714 {
3715 	int ret;
3716 
3717 	/* We want to try and use the cluster allocator, so lets look there */
3718 	if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) {
3719 		ret = find_free_extent_clustered(block_group, ffe_ctl, bg_ret);
3720 		if (ret >= 0 || ret == -EAGAIN)
3721 			return ret;
3722 		/* ret == -ENOENT case falls through */
3723 	}
3724 
3725 	return find_free_extent_unclustered(block_group, ffe_ctl);
3726 }
3727 
3728 /*
3729  * Tree-log block group locking
3730  * ============================
3731  *
3732  * fs_info::treelog_bg_lock protects the fs_info::treelog_bg which
3733  * indicates the starting address of a block group, which is reserved only
3734  * for tree-log metadata.
3735  *
3736  * Lock nesting
3737  * ============
3738  *
3739  * space_info::lock
3740  *   block_group::lock
3741  *     fs_info::treelog_bg_lock
3742  */
3743 
3744 /*
3745  * Simple allocator for sequential-only block group. It only allows sequential
3746  * allocation. No need to play with trees. This function also reserves the
3747  * bytes as in btrfs_add_reserved_bytes.
3748  */
do_allocation_zoned(struct btrfs_block_group * block_group,struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group ** bg_ret)3749 static int do_allocation_zoned(struct btrfs_block_group *block_group,
3750 			       struct find_free_extent_ctl *ffe_ctl,
3751 			       struct btrfs_block_group **bg_ret)
3752 {
3753 	struct btrfs_fs_info *fs_info = block_group->fs_info;
3754 	struct btrfs_space_info *space_info = block_group->space_info;
3755 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3756 	u64 start = block_group->start;
3757 	u64 num_bytes = ffe_ctl->num_bytes;
3758 	u64 avail;
3759 	u64 bytenr = block_group->start;
3760 	u64 log_bytenr;
3761 	u64 data_reloc_bytenr;
3762 	int ret = 0;
3763 	bool skip;
3764 
3765 	ASSERT(btrfs_is_zoned(block_group->fs_info));
3766 
3767 	/*
3768 	 * Do not allow non-tree-log blocks in the dedicated tree-log block
3769 	 * group, and vice versa.
3770 	 */
3771 	spin_lock(&fs_info->treelog_bg_lock);
3772 	log_bytenr = fs_info->treelog_bg;
3773 	skip = log_bytenr && ((ffe_ctl->for_treelog && bytenr != log_bytenr) ||
3774 			      (!ffe_ctl->for_treelog && bytenr == log_bytenr));
3775 	spin_unlock(&fs_info->treelog_bg_lock);
3776 	if (skip)
3777 		return 1;
3778 
3779 	/*
3780 	 * Do not allow non-relocation blocks in the dedicated relocation block
3781 	 * group, and vice versa.
3782 	 */
3783 	spin_lock(&fs_info->relocation_bg_lock);
3784 	data_reloc_bytenr = fs_info->data_reloc_bg;
3785 	if (data_reloc_bytenr &&
3786 	    ((ffe_ctl->for_data_reloc && bytenr != data_reloc_bytenr) ||
3787 	     (!ffe_ctl->for_data_reloc && bytenr == data_reloc_bytenr)))
3788 		skip = true;
3789 	spin_unlock(&fs_info->relocation_bg_lock);
3790 	if (skip)
3791 		return 1;
3792 
3793 	spin_lock(&space_info->lock);
3794 	spin_lock(&block_group->lock);
3795 	spin_lock(&fs_info->treelog_bg_lock);
3796 	spin_lock(&fs_info->relocation_bg_lock);
3797 
3798 	ASSERT(!ffe_ctl->for_treelog ||
3799 	       block_group->start == fs_info->treelog_bg ||
3800 	       fs_info->treelog_bg == 0);
3801 	ASSERT(!ffe_ctl->for_data_reloc ||
3802 	       block_group->start == fs_info->data_reloc_bg ||
3803 	       fs_info->data_reloc_bg == 0);
3804 
3805 	if (block_group->ro || block_group->zoned_data_reloc_ongoing) {
3806 		ret = 1;
3807 		goto out;
3808 	}
3809 
3810 	/*
3811 	 * Do not allow currently using block group to be tree-log dedicated
3812 	 * block group.
3813 	 */
3814 	if (ffe_ctl->for_treelog && !fs_info->treelog_bg &&
3815 	    (block_group->used || block_group->reserved)) {
3816 		ret = 1;
3817 		goto out;
3818 	}
3819 
3820 	/*
3821 	 * Do not allow currently used block group to be the data relocation
3822 	 * dedicated block group.
3823 	 */
3824 	if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg &&
3825 	    (block_group->used || block_group->reserved)) {
3826 		ret = 1;
3827 		goto out;
3828 	}
3829 
3830 	avail = block_group->length - block_group->alloc_offset;
3831 	if (avail < num_bytes) {
3832 		if (ffe_ctl->max_extent_size < avail) {
3833 			/*
3834 			 * With sequential allocator, free space is always
3835 			 * contiguous
3836 			 */
3837 			ffe_ctl->max_extent_size = avail;
3838 			ffe_ctl->total_free_space = avail;
3839 		}
3840 		ret = 1;
3841 		goto out;
3842 	}
3843 
3844 	if (ffe_ctl->for_treelog && !fs_info->treelog_bg)
3845 		fs_info->treelog_bg = block_group->start;
3846 
3847 	if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg)
3848 		fs_info->data_reloc_bg = block_group->start;
3849 
3850 	ffe_ctl->found_offset = start + block_group->alloc_offset;
3851 	block_group->alloc_offset += num_bytes;
3852 	spin_lock(&ctl->tree_lock);
3853 	ctl->free_space -= num_bytes;
3854 	spin_unlock(&ctl->tree_lock);
3855 
3856 	/*
3857 	 * We do not check if found_offset is aligned to stripesize. The
3858 	 * address is anyway rewritten when using zone append writing.
3859 	 */
3860 
3861 	ffe_ctl->search_start = ffe_ctl->found_offset;
3862 
3863 out:
3864 	if (ret && ffe_ctl->for_treelog)
3865 		fs_info->treelog_bg = 0;
3866 	if (ret && ffe_ctl->for_data_reloc &&
3867 	    fs_info->data_reloc_bg == block_group->start) {
3868 		/*
3869 		 * Do not allow further allocations from this block group.
3870 		 * Compared to increasing the ->ro, setting the
3871 		 * ->zoned_data_reloc_ongoing flag still allows nocow
3872 		 *  writers to come in. See btrfs_inc_nocow_writers().
3873 		 *
3874 		 * We need to disable an allocation to avoid an allocation of
3875 		 * regular (non-relocation data) extent. With mix of relocation
3876 		 * extents and regular extents, we can dispatch WRITE commands
3877 		 * (for relocation extents) and ZONE APPEND commands (for
3878 		 * regular extents) at the same time to the same zone, which
3879 		 * easily break the write pointer.
3880 		 */
3881 		block_group->zoned_data_reloc_ongoing = 1;
3882 		fs_info->data_reloc_bg = 0;
3883 	}
3884 	spin_unlock(&fs_info->relocation_bg_lock);
3885 	spin_unlock(&fs_info->treelog_bg_lock);
3886 	spin_unlock(&block_group->lock);
3887 	spin_unlock(&space_info->lock);
3888 	return ret;
3889 }
3890 
do_allocation(struct btrfs_block_group * block_group,struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group ** bg_ret)3891 static int do_allocation(struct btrfs_block_group *block_group,
3892 			 struct find_free_extent_ctl *ffe_ctl,
3893 			 struct btrfs_block_group **bg_ret)
3894 {
3895 	switch (ffe_ctl->policy) {
3896 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
3897 		return do_allocation_clustered(block_group, ffe_ctl, bg_ret);
3898 	case BTRFS_EXTENT_ALLOC_ZONED:
3899 		return do_allocation_zoned(block_group, ffe_ctl, bg_ret);
3900 	default:
3901 		BUG();
3902 	}
3903 }
3904 
release_block_group(struct btrfs_block_group * block_group,struct find_free_extent_ctl * ffe_ctl,int delalloc)3905 static void release_block_group(struct btrfs_block_group *block_group,
3906 				struct find_free_extent_ctl *ffe_ctl,
3907 				int delalloc)
3908 {
3909 	switch (ffe_ctl->policy) {
3910 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
3911 		ffe_ctl->retry_clustered = false;
3912 		ffe_ctl->retry_unclustered = false;
3913 		break;
3914 	case BTRFS_EXTENT_ALLOC_ZONED:
3915 		/* Nothing to do */
3916 		break;
3917 	default:
3918 		BUG();
3919 	}
3920 
3921 	BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) !=
3922 	       ffe_ctl->index);
3923 	btrfs_release_block_group(block_group, delalloc);
3924 }
3925 
found_extent_clustered(struct find_free_extent_ctl * ffe_ctl,struct btrfs_key * ins)3926 static void found_extent_clustered(struct find_free_extent_ctl *ffe_ctl,
3927 				   struct btrfs_key *ins)
3928 {
3929 	struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
3930 
3931 	if (!ffe_ctl->use_cluster && last_ptr) {
3932 		spin_lock(&last_ptr->lock);
3933 		last_ptr->window_start = ins->objectid;
3934 		spin_unlock(&last_ptr->lock);
3935 	}
3936 }
3937 
found_extent(struct find_free_extent_ctl * ffe_ctl,struct btrfs_key * ins)3938 static void found_extent(struct find_free_extent_ctl *ffe_ctl,
3939 			 struct btrfs_key *ins)
3940 {
3941 	switch (ffe_ctl->policy) {
3942 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
3943 		found_extent_clustered(ffe_ctl, ins);
3944 		break;
3945 	case BTRFS_EXTENT_ALLOC_ZONED:
3946 		/* Nothing to do */
3947 		break;
3948 	default:
3949 		BUG();
3950 	}
3951 }
3952 
chunk_allocation_failed(struct find_free_extent_ctl * ffe_ctl)3953 static int chunk_allocation_failed(struct find_free_extent_ctl *ffe_ctl)
3954 {
3955 	switch (ffe_ctl->policy) {
3956 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
3957 		/*
3958 		 * If we can't allocate a new chunk we've already looped through
3959 		 * at least once, move on to the NO_EMPTY_SIZE case.
3960 		 */
3961 		ffe_ctl->loop = LOOP_NO_EMPTY_SIZE;
3962 		return 0;
3963 	case BTRFS_EXTENT_ALLOC_ZONED:
3964 		/* Give up here */
3965 		return -ENOSPC;
3966 	default:
3967 		BUG();
3968 	}
3969 }
3970 
3971 /*
3972  * Return >0 means caller needs to re-search for free extent
3973  * Return 0 means we have the needed free extent.
3974  * Return <0 means we failed to locate any free extent.
3975  */
find_free_extent_update_loop(struct btrfs_fs_info * fs_info,struct btrfs_key * ins,struct find_free_extent_ctl * ffe_ctl,bool full_search)3976 static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
3977 					struct btrfs_key *ins,
3978 					struct find_free_extent_ctl *ffe_ctl,
3979 					bool full_search)
3980 {
3981 	struct btrfs_root *root = fs_info->extent_root;
3982 	int ret;
3983 
3984 	if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) &&
3985 	    ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg)
3986 		ffe_ctl->orig_have_caching_bg = true;
3987 
3988 	if (!ins->objectid && ffe_ctl->loop >= LOOP_CACHING_WAIT &&
3989 	    ffe_ctl->have_caching_bg)
3990 		return 1;
3991 
3992 	if (!ins->objectid && ++(ffe_ctl->index) < BTRFS_NR_RAID_TYPES)
3993 		return 1;
3994 
3995 	if (ins->objectid) {
3996 		found_extent(ffe_ctl, ins);
3997 		return 0;
3998 	}
3999 
4000 	/*
4001 	 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
4002 	 *			caching kthreads as we move along
4003 	 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
4004 	 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
4005 	 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
4006 	 *		       again
4007 	 */
4008 	if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) {
4009 		ffe_ctl->index = 0;
4010 		if (ffe_ctl->loop == LOOP_CACHING_NOWAIT) {
4011 			/*
4012 			 * We want to skip the LOOP_CACHING_WAIT step if we
4013 			 * don't have any uncached bgs and we've already done a
4014 			 * full search through.
4015 			 */
4016 			if (ffe_ctl->orig_have_caching_bg || !full_search)
4017 				ffe_ctl->loop = LOOP_CACHING_WAIT;
4018 			else
4019 				ffe_ctl->loop = LOOP_ALLOC_CHUNK;
4020 		} else {
4021 			ffe_ctl->loop++;
4022 		}
4023 
4024 		if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) {
4025 			struct btrfs_trans_handle *trans;
4026 			int exist = 0;
4027 
4028 			trans = current->journal_info;
4029 			if (trans)
4030 				exist = 1;
4031 			else
4032 				trans = btrfs_join_transaction(root);
4033 
4034 			if (IS_ERR(trans)) {
4035 				ret = PTR_ERR(trans);
4036 				return ret;
4037 			}
4038 
4039 			ret = btrfs_chunk_alloc(trans, ffe_ctl->flags,
4040 						CHUNK_ALLOC_FORCE);
4041 
4042 			/* Do not bail out on ENOSPC since we can do more. */
4043 			if (ret == -ENOSPC)
4044 				ret = chunk_allocation_failed(ffe_ctl);
4045 			else if (ret < 0)
4046 				btrfs_abort_transaction(trans, ret);
4047 			else
4048 				ret = 0;
4049 			if (!exist)
4050 				btrfs_end_transaction(trans);
4051 			if (ret)
4052 				return ret;
4053 		}
4054 
4055 		if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) {
4056 			if (ffe_ctl->policy != BTRFS_EXTENT_ALLOC_CLUSTERED)
4057 				return -ENOSPC;
4058 
4059 			/*
4060 			 * Don't loop again if we already have no empty_size and
4061 			 * no empty_cluster.
4062 			 */
4063 			if (ffe_ctl->empty_size == 0 &&
4064 			    ffe_ctl->empty_cluster == 0)
4065 				return -ENOSPC;
4066 			ffe_ctl->empty_size = 0;
4067 			ffe_ctl->empty_cluster = 0;
4068 		}
4069 		return 1;
4070 	}
4071 	return -ENOSPC;
4072 }
4073 
prepare_allocation_clustered(struct btrfs_fs_info * fs_info,struct find_free_extent_ctl * ffe_ctl,struct btrfs_space_info * space_info,struct btrfs_key * ins)4074 static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
4075 					struct find_free_extent_ctl *ffe_ctl,
4076 					struct btrfs_space_info *space_info,
4077 					struct btrfs_key *ins)
4078 {
4079 	/*
4080 	 * If our free space is heavily fragmented we may not be able to make
4081 	 * big contiguous allocations, so instead of doing the expensive search
4082 	 * for free space, simply return ENOSPC with our max_extent_size so we
4083 	 * can go ahead and search for a more manageable chunk.
4084 	 *
4085 	 * If our max_extent_size is large enough for our allocation simply
4086 	 * disable clustering since we will likely not be able to find enough
4087 	 * space to create a cluster and induce latency trying.
4088 	 */
4089 	if (space_info->max_extent_size) {
4090 		spin_lock(&space_info->lock);
4091 		if (space_info->max_extent_size &&
4092 		    ffe_ctl->num_bytes > space_info->max_extent_size) {
4093 			ins->offset = space_info->max_extent_size;
4094 			spin_unlock(&space_info->lock);
4095 			return -ENOSPC;
4096 		} else if (space_info->max_extent_size) {
4097 			ffe_ctl->use_cluster = false;
4098 		}
4099 		spin_unlock(&space_info->lock);
4100 	}
4101 
4102 	ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info,
4103 					       &ffe_ctl->empty_cluster);
4104 	if (ffe_ctl->last_ptr) {
4105 		struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
4106 
4107 		spin_lock(&last_ptr->lock);
4108 		if (last_ptr->block_group)
4109 			ffe_ctl->hint_byte = last_ptr->window_start;
4110 		if (last_ptr->fragmented) {
4111 			/*
4112 			 * We still set window_start so we can keep track of the
4113 			 * last place we found an allocation to try and save
4114 			 * some time.
4115 			 */
4116 			ffe_ctl->hint_byte = last_ptr->window_start;
4117 			ffe_ctl->use_cluster = false;
4118 		}
4119 		spin_unlock(&last_ptr->lock);
4120 	}
4121 
4122 	return 0;
4123 }
4124 
prepare_allocation(struct btrfs_fs_info * fs_info,struct find_free_extent_ctl * ffe_ctl,struct btrfs_space_info * space_info,struct btrfs_key * ins)4125 static int prepare_allocation(struct btrfs_fs_info *fs_info,
4126 			      struct find_free_extent_ctl *ffe_ctl,
4127 			      struct btrfs_space_info *space_info,
4128 			      struct btrfs_key *ins)
4129 {
4130 	switch (ffe_ctl->policy) {
4131 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
4132 		return prepare_allocation_clustered(fs_info, ffe_ctl,
4133 						    space_info, ins);
4134 	case BTRFS_EXTENT_ALLOC_ZONED:
4135 		if (ffe_ctl->for_treelog) {
4136 			spin_lock(&fs_info->treelog_bg_lock);
4137 			if (fs_info->treelog_bg)
4138 				ffe_ctl->hint_byte = fs_info->treelog_bg;
4139 			spin_unlock(&fs_info->treelog_bg_lock);
4140 		}
4141 		if (ffe_ctl->for_data_reloc) {
4142 			spin_lock(&fs_info->relocation_bg_lock);
4143 			if (fs_info->data_reloc_bg)
4144 				ffe_ctl->hint_byte = fs_info->data_reloc_bg;
4145 			spin_unlock(&fs_info->relocation_bg_lock);
4146 		}
4147 		return 0;
4148 	default:
4149 		BUG();
4150 	}
4151 }
4152 
4153 /*
4154  * walks the btree of allocated extents and find a hole of a given size.
4155  * The key ins is changed to record the hole:
4156  * ins->objectid == start position
4157  * ins->flags = BTRFS_EXTENT_ITEM_KEY
4158  * ins->offset == the size of the hole.
4159  * Any available blocks before search_start are skipped.
4160  *
4161  * If there is no suitable free space, we will record the max size of
4162  * the free space extent currently.
4163  *
4164  * The overall logic and call chain:
4165  *
4166  * find_free_extent()
4167  * |- Iterate through all block groups
4168  * |  |- Get a valid block group
4169  * |  |- Try to do clustered allocation in that block group
4170  * |  |- Try to do unclustered allocation in that block group
4171  * |  |- Check if the result is valid
4172  * |  |  |- If valid, then exit
4173  * |  |- Jump to next block group
4174  * |
4175  * |- Push harder to find free extents
4176  *    |- If not found, re-iterate all block groups
4177  */
find_free_extent(struct btrfs_root * root,u64 ram_bytes,u64 num_bytes,u64 empty_size,u64 hint_byte_orig,struct btrfs_key * ins,u64 flags,int delalloc)4178 static noinline int find_free_extent(struct btrfs_root *root,
4179 				u64 ram_bytes, u64 num_bytes, u64 empty_size,
4180 				u64 hint_byte_orig, struct btrfs_key *ins,
4181 				u64 flags, int delalloc)
4182 {
4183 	struct btrfs_fs_info *fs_info = root->fs_info;
4184 	int ret = 0;
4185 	int cache_block_group_error = 0;
4186 	struct btrfs_block_group *block_group = NULL;
4187 	struct find_free_extent_ctl ffe_ctl = {0};
4188 	struct btrfs_space_info *space_info;
4189 	bool full_search = false;
4190 	bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
4191 	bool for_data_reloc = (btrfs_is_data_reloc_root(root) &&
4192 				       flags & BTRFS_BLOCK_GROUP_DATA);
4193 
4194 	WARN_ON(num_bytes < fs_info->sectorsize);
4195 
4196 	ffe_ctl.num_bytes = num_bytes;
4197 	ffe_ctl.empty_size = empty_size;
4198 	ffe_ctl.flags = flags;
4199 	ffe_ctl.search_start = 0;
4200 	ffe_ctl.delalloc = delalloc;
4201 	ffe_ctl.index = btrfs_bg_flags_to_raid_index(flags);
4202 	ffe_ctl.have_caching_bg = false;
4203 	ffe_ctl.orig_have_caching_bg = false;
4204 	ffe_ctl.found_offset = 0;
4205 	ffe_ctl.hint_byte = hint_byte_orig;
4206 	ffe_ctl.for_treelog = for_treelog;
4207 	ffe_ctl.for_data_reloc = for_data_reloc;
4208 	ffe_ctl.policy = BTRFS_EXTENT_ALLOC_CLUSTERED;
4209 
4210 	/* For clustered allocation */
4211 	ffe_ctl.retry_clustered = false;
4212 	ffe_ctl.retry_unclustered = false;
4213 	ffe_ctl.last_ptr = NULL;
4214 	ffe_ctl.use_cluster = true;
4215 
4216 	if (btrfs_is_zoned(fs_info))
4217 		ffe_ctl.policy = BTRFS_EXTENT_ALLOC_ZONED;
4218 
4219 	ins->type = BTRFS_EXTENT_ITEM_KEY;
4220 	ins->objectid = 0;
4221 	ins->offset = 0;
4222 
4223 	trace_find_free_extent(root, num_bytes, empty_size, flags);
4224 
4225 	space_info = btrfs_find_space_info(fs_info, flags);
4226 	if (!space_info) {
4227 		btrfs_err(fs_info, "No space info for %llu", flags);
4228 		return -ENOSPC;
4229 	}
4230 
4231 	ret = prepare_allocation(fs_info, &ffe_ctl, space_info, ins);
4232 	if (ret < 0)
4233 		return ret;
4234 
4235 	ffe_ctl.search_start = max(ffe_ctl.search_start,
4236 				   first_logical_byte(fs_info, 0));
4237 	ffe_ctl.search_start = max(ffe_ctl.search_start, ffe_ctl.hint_byte);
4238 	if (ffe_ctl.search_start == ffe_ctl.hint_byte) {
4239 		block_group = btrfs_lookup_block_group(fs_info,
4240 						       ffe_ctl.search_start);
4241 		/*
4242 		 * we don't want to use the block group if it doesn't match our
4243 		 * allocation bits, or if its not cached.
4244 		 *
4245 		 * However if we are re-searching with an ideal block group
4246 		 * picked out then we don't care that the block group is cached.
4247 		 */
4248 		if (block_group && block_group_bits(block_group, flags) &&
4249 		    block_group->cached != BTRFS_CACHE_NO) {
4250 			down_read(&space_info->groups_sem);
4251 			if (list_empty(&block_group->list) ||
4252 			    block_group->ro) {
4253 				/*
4254 				 * someone is removing this block group,
4255 				 * we can't jump into the have_block_group
4256 				 * target because our list pointers are not
4257 				 * valid
4258 				 */
4259 				btrfs_put_block_group(block_group);
4260 				up_read(&space_info->groups_sem);
4261 			} else {
4262 				ffe_ctl.index = btrfs_bg_flags_to_raid_index(
4263 						block_group->flags);
4264 				btrfs_lock_block_group(block_group, delalloc);
4265 				goto have_block_group;
4266 			}
4267 		} else if (block_group) {
4268 			btrfs_put_block_group(block_group);
4269 		}
4270 	}
4271 search:
4272 	ffe_ctl.have_caching_bg = false;
4273 	if (ffe_ctl.index == btrfs_bg_flags_to_raid_index(flags) ||
4274 	    ffe_ctl.index == 0)
4275 		full_search = true;
4276 	down_read(&space_info->groups_sem);
4277 	list_for_each_entry(block_group,
4278 			    &space_info->block_groups[ffe_ctl.index], list) {
4279 		struct btrfs_block_group *bg_ret;
4280 
4281 		/* If the block group is read-only, we can skip it entirely. */
4282 		if (unlikely(block_group->ro)) {
4283 			if (for_treelog)
4284 				btrfs_clear_treelog_bg(block_group);
4285 			if (ffe_ctl.for_data_reloc)
4286 				btrfs_clear_data_reloc_bg(block_group);
4287 			continue;
4288 		}
4289 
4290 		btrfs_grab_block_group(block_group, delalloc);
4291 		ffe_ctl.search_start = block_group->start;
4292 
4293 		/*
4294 		 * this can happen if we end up cycling through all the
4295 		 * raid types, but we want to make sure we only allocate
4296 		 * for the proper type.
4297 		 */
4298 		if (!block_group_bits(block_group, flags)) {
4299 			u64 extra = BTRFS_BLOCK_GROUP_DUP |
4300 				BTRFS_BLOCK_GROUP_RAID1_MASK |
4301 				BTRFS_BLOCK_GROUP_RAID56_MASK |
4302 				BTRFS_BLOCK_GROUP_RAID10;
4303 
4304 			/*
4305 			 * if they asked for extra copies and this block group
4306 			 * doesn't provide them, bail.  This does allow us to
4307 			 * fill raid0 from raid1.
4308 			 */
4309 			if ((flags & extra) && !(block_group->flags & extra))
4310 				goto loop;
4311 
4312 			/*
4313 			 * This block group has different flags than we want.
4314 			 * It's possible that we have MIXED_GROUP flag but no
4315 			 * block group is mixed.  Just skip such block group.
4316 			 */
4317 			btrfs_release_block_group(block_group, delalloc);
4318 			continue;
4319 		}
4320 
4321 have_block_group:
4322 		ffe_ctl.cached = btrfs_block_group_done(block_group);
4323 		if (unlikely(!ffe_ctl.cached)) {
4324 			ffe_ctl.have_caching_bg = true;
4325 			ret = btrfs_cache_block_group(block_group, false);
4326 
4327 			/*
4328 			 * If we get ENOMEM here or something else we want to
4329 			 * try other block groups, because it may not be fatal.
4330 			 * However if we can't find anything else we need to
4331 			 * save our return here so that we return the actual
4332 			 * error that caused problems, not ENOSPC.
4333 			 */
4334 			if (ret < 0) {
4335 				if (!cache_block_group_error)
4336 					cache_block_group_error = ret;
4337 				ret = 0;
4338 				goto loop;
4339 			}
4340 			ret = 0;
4341 		}
4342 
4343 		if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) {
4344 			if (!cache_block_group_error)
4345 				cache_block_group_error = -EIO;
4346 			goto loop;
4347 		}
4348 
4349 		bg_ret = NULL;
4350 		ret = do_allocation(block_group, &ffe_ctl, &bg_ret);
4351 		if (ret == 0) {
4352 			if (bg_ret && bg_ret != block_group) {
4353 				btrfs_release_block_group(block_group, delalloc);
4354 				block_group = bg_ret;
4355 			}
4356 		} else if (ret == -EAGAIN) {
4357 			goto have_block_group;
4358 		} else if (ret > 0) {
4359 			goto loop;
4360 		}
4361 
4362 		/* Checks */
4363 		ffe_ctl.search_start = round_up(ffe_ctl.found_offset,
4364 					     fs_info->stripesize);
4365 
4366 		/* move on to the next group */
4367 		if (ffe_ctl.search_start + num_bytes >
4368 		    block_group->start + block_group->length) {
4369 			btrfs_add_free_space_unused(block_group,
4370 					    ffe_ctl.found_offset, num_bytes);
4371 			goto loop;
4372 		}
4373 
4374 		if (ffe_ctl.found_offset < ffe_ctl.search_start)
4375 			btrfs_add_free_space_unused(block_group,
4376 					ffe_ctl.found_offset,
4377 					ffe_ctl.search_start - ffe_ctl.found_offset);
4378 
4379 		ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
4380 				num_bytes, delalloc);
4381 		if (ret == -EAGAIN) {
4382 			btrfs_add_free_space_unused(block_group,
4383 					ffe_ctl.found_offset, num_bytes);
4384 			goto loop;
4385 		}
4386 		btrfs_inc_block_group_reservations(block_group);
4387 
4388 		/* we are all good, lets return */
4389 		ins->objectid = ffe_ctl.search_start;
4390 		ins->offset = num_bytes;
4391 
4392 		trace_btrfs_reserve_extent(block_group, ffe_ctl.search_start,
4393 					   num_bytes);
4394 		btrfs_release_block_group(block_group, delalloc);
4395 		break;
4396 loop:
4397 		release_block_group(block_group, &ffe_ctl, delalloc);
4398 		cond_resched();
4399 	}
4400 	up_read(&space_info->groups_sem);
4401 
4402 	ret = find_free_extent_update_loop(fs_info, ins, &ffe_ctl, full_search);
4403 	if (ret > 0)
4404 		goto search;
4405 
4406 	if (ret == -ENOSPC && !cache_block_group_error) {
4407 		/*
4408 		 * Use ffe_ctl->total_free_space as fallback if we can't find
4409 		 * any contiguous hole.
4410 		 */
4411 		if (!ffe_ctl.max_extent_size)
4412 			ffe_ctl.max_extent_size = ffe_ctl.total_free_space;
4413 		spin_lock(&space_info->lock);
4414 		space_info->max_extent_size = ffe_ctl.max_extent_size;
4415 		spin_unlock(&space_info->lock);
4416 		ins->offset = ffe_ctl.max_extent_size;
4417 	} else if (ret == -ENOSPC) {
4418 		ret = cache_block_group_error;
4419 	}
4420 	return ret;
4421 }
4422 
4423 /*
4424  * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a
4425  *			  hole that is at least as big as @num_bytes.
4426  *
4427  * @root           -	The root that will contain this extent
4428  *
4429  * @ram_bytes      -	The amount of space in ram that @num_bytes take. This
4430  *			is used for accounting purposes. This value differs
4431  *			from @num_bytes only in the case of compressed extents.
4432  *
4433  * @num_bytes      -	Number of bytes to allocate on-disk.
4434  *
4435  * @min_alloc_size -	Indicates the minimum amount of space that the
4436  *			allocator should try to satisfy. In some cases
4437  *			@num_bytes may be larger than what is required and if
4438  *			the filesystem is fragmented then allocation fails.
4439  *			However, the presence of @min_alloc_size gives a
4440  *			chance to try and satisfy the smaller allocation.
4441  *
4442  * @empty_size     -	A hint that you plan on doing more COW. This is the
4443  *			size in bytes the allocator should try to find free
4444  *			next to the block it returns.  This is just a hint and
4445  *			may be ignored by the allocator.
4446  *
4447  * @hint_byte      -	Hint to the allocator to start searching above the byte
4448  *			address passed. It might be ignored.
4449  *
4450  * @ins            -	This key is modified to record the found hole. It will
4451  *			have the following values:
4452  *			ins->objectid == start position
4453  *			ins->flags = BTRFS_EXTENT_ITEM_KEY
4454  *			ins->offset == the size of the hole.
4455  *
4456  * @is_data        -	Boolean flag indicating whether an extent is
4457  *			allocated for data (true) or metadata (false)
4458  *
4459  * @delalloc       -	Boolean flag indicating whether this allocation is for
4460  *			delalloc or not. If 'true' data_rwsem of block groups
4461  *			is going to be acquired.
4462  *
4463  *
4464  * Returns 0 when an allocation succeeded or < 0 when an error occurred. In
4465  * case -ENOSPC is returned then @ins->offset will contain the size of the
4466  * largest available hole the allocator managed to find.
4467  */
btrfs_reserve_extent(struct btrfs_root * root,u64 ram_bytes,u64 num_bytes,u64 min_alloc_size,u64 empty_size,u64 hint_byte,struct btrfs_key * ins,int is_data,int delalloc)4468 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
4469 			 u64 num_bytes, u64 min_alloc_size,
4470 			 u64 empty_size, u64 hint_byte,
4471 			 struct btrfs_key *ins, int is_data, int delalloc)
4472 {
4473 	struct btrfs_fs_info *fs_info = root->fs_info;
4474 	bool final_tried = num_bytes == min_alloc_size;
4475 	u64 flags;
4476 	int ret;
4477 	bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
4478 	bool for_data_reloc = (btrfs_is_data_reloc_root(root) && is_data);
4479 
4480 	flags = get_alloc_profile_by_root(root, is_data);
4481 again:
4482 	WARN_ON(num_bytes < fs_info->sectorsize);
4483 	ret = find_free_extent(root, ram_bytes, num_bytes, empty_size,
4484 			       hint_byte, ins, flags, delalloc);
4485 	if (!ret && !is_data) {
4486 		btrfs_dec_block_group_reservations(fs_info, ins->objectid);
4487 	} else if (ret == -ENOSPC) {
4488 		if (!final_tried && ins->offset) {
4489 			num_bytes = min(num_bytes >> 1, ins->offset);
4490 			num_bytes = round_down(num_bytes,
4491 					       fs_info->sectorsize);
4492 			num_bytes = max(num_bytes, min_alloc_size);
4493 			ram_bytes = num_bytes;
4494 			if (num_bytes == min_alloc_size)
4495 				final_tried = true;
4496 			goto again;
4497 		} else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
4498 			struct btrfs_space_info *sinfo;
4499 
4500 			sinfo = btrfs_find_space_info(fs_info, flags);
4501 			btrfs_err(fs_info,
4502 	"allocation failed flags %llu, wanted %llu tree-log %d, relocation: %d",
4503 				  flags, num_bytes, for_treelog, for_data_reloc);
4504 			if (sinfo)
4505 				btrfs_dump_space_info(fs_info, sinfo,
4506 						      num_bytes, 1);
4507 		}
4508 	}
4509 
4510 	return ret;
4511 }
4512 
btrfs_free_reserved_extent(struct btrfs_fs_info * fs_info,u64 start,u64 len,int delalloc)4513 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
4514 			       u64 start, u64 len, int delalloc)
4515 {
4516 	struct btrfs_block_group *cache;
4517 
4518 	cache = btrfs_lookup_block_group(fs_info, start);
4519 	if (!cache) {
4520 		btrfs_err(fs_info, "Unable to find block group for %llu",
4521 			  start);
4522 		return -ENOSPC;
4523 	}
4524 
4525 	btrfs_add_free_space(cache, start, len);
4526 	btrfs_free_reserved_bytes(cache, len, delalloc);
4527 	trace_btrfs_reserved_extent_free(fs_info, start, len);
4528 
4529 	btrfs_put_block_group(cache);
4530 	return 0;
4531 }
4532 
btrfs_pin_reserved_extent(struct btrfs_trans_handle * trans,u64 start,u64 len)4533 int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start,
4534 			      u64 len)
4535 {
4536 	struct btrfs_block_group *cache;
4537 	int ret = 0;
4538 
4539 	cache = btrfs_lookup_block_group(trans->fs_info, start);
4540 	if (!cache) {
4541 		btrfs_err(trans->fs_info, "unable to find block group for %llu",
4542 			  start);
4543 		return -ENOSPC;
4544 	}
4545 
4546 	ret = pin_down_extent(trans, cache, start, len, 1);
4547 	btrfs_put_block_group(cache);
4548 	return ret;
4549 }
4550 
alloc_reserved_file_extent(struct btrfs_trans_handle * trans,u64 parent,u64 root_objectid,u64 flags,u64 owner,u64 offset,struct btrfs_key * ins,int ref_mod)4551 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4552 				      u64 parent, u64 root_objectid,
4553 				      u64 flags, u64 owner, u64 offset,
4554 				      struct btrfs_key *ins, int ref_mod)
4555 {
4556 	struct btrfs_fs_info *fs_info = trans->fs_info;
4557 	int ret;
4558 	struct btrfs_extent_item *extent_item;
4559 	struct btrfs_extent_inline_ref *iref;
4560 	struct btrfs_path *path;
4561 	struct extent_buffer *leaf;
4562 	int type;
4563 	u32 size;
4564 
4565 	if (parent > 0)
4566 		type = BTRFS_SHARED_DATA_REF_KEY;
4567 	else
4568 		type = BTRFS_EXTENT_DATA_REF_KEY;
4569 
4570 	size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
4571 
4572 	path = btrfs_alloc_path();
4573 	if (!path)
4574 		return -ENOMEM;
4575 
4576 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4577 				      ins, size);
4578 	if (ret) {
4579 		btrfs_free_path(path);
4580 		return ret;
4581 	}
4582 
4583 	leaf = path->nodes[0];
4584 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
4585 				     struct btrfs_extent_item);
4586 	btrfs_set_extent_refs(leaf, extent_item, ref_mod);
4587 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4588 	btrfs_set_extent_flags(leaf, extent_item,
4589 			       flags | BTRFS_EXTENT_FLAG_DATA);
4590 
4591 	iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4592 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
4593 	if (parent > 0) {
4594 		struct btrfs_shared_data_ref *ref;
4595 		ref = (struct btrfs_shared_data_ref *)(iref + 1);
4596 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4597 		btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
4598 	} else {
4599 		struct btrfs_extent_data_ref *ref;
4600 		ref = (struct btrfs_extent_data_ref *)(&iref->offset);
4601 		btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
4602 		btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
4603 		btrfs_set_extent_data_ref_offset(leaf, ref, offset);
4604 		btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
4605 	}
4606 
4607 	btrfs_mark_buffer_dirty(path->nodes[0]);
4608 	btrfs_free_path(path);
4609 
4610 	ret = remove_from_free_space_tree(trans, ins->objectid, ins->offset);
4611 	if (ret)
4612 		return ret;
4613 
4614 	ret = btrfs_update_block_group(trans, ins->objectid, ins->offset, 1);
4615 	if (ret) { /* -ENOENT, logic error */
4616 		btrfs_err(fs_info, "update block group failed for %llu %llu",
4617 			ins->objectid, ins->offset);
4618 		BUG();
4619 	}
4620 	trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid, ins->offset);
4621 	return ret;
4622 }
4623 
alloc_reserved_tree_block(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op)4624 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
4625 				     struct btrfs_delayed_ref_node *node,
4626 				     struct btrfs_delayed_extent_op *extent_op)
4627 {
4628 	struct btrfs_fs_info *fs_info = trans->fs_info;
4629 	int ret;
4630 	struct btrfs_extent_item *extent_item;
4631 	struct btrfs_key extent_key;
4632 	struct btrfs_tree_block_info *block_info;
4633 	struct btrfs_extent_inline_ref *iref;
4634 	struct btrfs_path *path;
4635 	struct extent_buffer *leaf;
4636 	struct btrfs_delayed_tree_ref *ref;
4637 	u32 size = sizeof(*extent_item) + sizeof(*iref);
4638 	u64 num_bytes;
4639 	u64 flags = extent_op->flags_to_set;
4640 	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
4641 
4642 	ref = btrfs_delayed_node_to_tree_ref(node);
4643 
4644 	extent_key.objectid = node->bytenr;
4645 	if (skinny_metadata) {
4646 		extent_key.offset = ref->level;
4647 		extent_key.type = BTRFS_METADATA_ITEM_KEY;
4648 		num_bytes = fs_info->nodesize;
4649 	} else {
4650 		extent_key.offset = node->num_bytes;
4651 		extent_key.type = BTRFS_EXTENT_ITEM_KEY;
4652 		size += sizeof(*block_info);
4653 		num_bytes = node->num_bytes;
4654 	}
4655 
4656 	path = btrfs_alloc_path();
4657 	if (!path)
4658 		return -ENOMEM;
4659 
4660 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4661 				      &extent_key, size);
4662 	if (ret) {
4663 		btrfs_free_path(path);
4664 		return ret;
4665 	}
4666 
4667 	leaf = path->nodes[0];
4668 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
4669 				     struct btrfs_extent_item);
4670 	btrfs_set_extent_refs(leaf, extent_item, 1);
4671 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4672 	btrfs_set_extent_flags(leaf, extent_item,
4673 			       flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
4674 
4675 	if (skinny_metadata) {
4676 		iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4677 	} else {
4678 		block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
4679 		btrfs_set_tree_block_key(leaf, block_info, &extent_op->key);
4680 		btrfs_set_tree_block_level(leaf, block_info, ref->level);
4681 		iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
4682 	}
4683 
4684 	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
4685 		btrfs_set_extent_inline_ref_type(leaf, iref,
4686 						 BTRFS_SHARED_BLOCK_REF_KEY);
4687 		btrfs_set_extent_inline_ref_offset(leaf, iref, ref->parent);
4688 	} else {
4689 		btrfs_set_extent_inline_ref_type(leaf, iref,
4690 						 BTRFS_TREE_BLOCK_REF_KEY);
4691 		btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root);
4692 	}
4693 
4694 	btrfs_mark_buffer_dirty(leaf);
4695 	btrfs_free_path(path);
4696 
4697 	ret = remove_from_free_space_tree(trans, extent_key.objectid,
4698 					  num_bytes);
4699 	if (ret)
4700 		return ret;
4701 
4702 	ret = btrfs_update_block_group(trans, extent_key.objectid,
4703 				       fs_info->nodesize, 1);
4704 	if (ret) { /* -ENOENT, logic error */
4705 		btrfs_err(fs_info, "update block group failed for %llu %llu",
4706 			extent_key.objectid, extent_key.offset);
4707 		BUG();
4708 	}
4709 
4710 	trace_btrfs_reserved_extent_alloc(fs_info, extent_key.objectid,
4711 					  fs_info->nodesize);
4712 	return ret;
4713 }
4714 
btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 owner,u64 offset,u64 ram_bytes,struct btrfs_key * ins)4715 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4716 				     struct btrfs_root *root, u64 owner,
4717 				     u64 offset, u64 ram_bytes,
4718 				     struct btrfs_key *ins)
4719 {
4720 	struct btrfs_ref generic_ref = { 0 };
4721 
4722 	BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
4723 
4724 	btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT,
4725 			       ins->objectid, ins->offset, 0);
4726 	btrfs_init_data_ref(&generic_ref, root->root_key.objectid, owner,
4727 			    offset, 0, false);
4728 	btrfs_ref_tree_mod(root->fs_info, &generic_ref);
4729 
4730 	return btrfs_add_delayed_data_ref(trans, &generic_ref, ram_bytes);
4731 }
4732 
4733 /*
4734  * this is used by the tree logging recovery code.  It records that
4735  * an extent has been allocated and makes sure to clear the free
4736  * space cache bits as well
4737  */
btrfs_alloc_logged_file_extent(struct btrfs_trans_handle * trans,u64 root_objectid,u64 owner,u64 offset,struct btrfs_key * ins)4738 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4739 				   u64 root_objectid, u64 owner, u64 offset,
4740 				   struct btrfs_key *ins)
4741 {
4742 	struct btrfs_fs_info *fs_info = trans->fs_info;
4743 	int ret;
4744 	struct btrfs_block_group *block_group;
4745 	struct btrfs_space_info *space_info;
4746 
4747 	/*
4748 	 * Mixed block groups will exclude before processing the log so we only
4749 	 * need to do the exclude dance if this fs isn't mixed.
4750 	 */
4751 	if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
4752 		ret = __exclude_logged_extent(fs_info, ins->objectid,
4753 					      ins->offset);
4754 		if (ret)
4755 			return ret;
4756 	}
4757 
4758 	block_group = btrfs_lookup_block_group(fs_info, ins->objectid);
4759 	if (!block_group)
4760 		return -EINVAL;
4761 
4762 	space_info = block_group->space_info;
4763 	spin_lock(&space_info->lock);
4764 	spin_lock(&block_group->lock);
4765 	space_info->bytes_reserved += ins->offset;
4766 	block_group->reserved += ins->offset;
4767 	spin_unlock(&block_group->lock);
4768 	spin_unlock(&space_info->lock);
4769 
4770 	ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
4771 					 offset, ins, 1);
4772 	if (ret)
4773 		btrfs_pin_extent(trans, ins->objectid, ins->offset, 1);
4774 	btrfs_put_block_group(block_group);
4775 	return ret;
4776 }
4777 
4778 static struct extent_buffer *
btrfs_init_new_buffer(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytenr,int level,u64 owner,enum btrfs_lock_nesting nest)4779 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4780 		      u64 bytenr, int level, u64 owner,
4781 		      enum btrfs_lock_nesting nest)
4782 {
4783 	struct btrfs_fs_info *fs_info = root->fs_info;
4784 	struct extent_buffer *buf;
4785 	u64 lockdep_owner = owner;
4786 
4787 	buf = btrfs_find_create_tree_block(fs_info, bytenr, owner, level);
4788 	if (IS_ERR(buf))
4789 		return buf;
4790 
4791 	/*
4792 	 * Extra safety check in case the extent tree is corrupted and extent
4793 	 * allocator chooses to use a tree block which is already used and
4794 	 * locked.
4795 	 */
4796 	if (buf->lock_owner == current->pid) {
4797 		btrfs_err_rl(fs_info,
4798 "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
4799 			buf->start, btrfs_header_owner(buf), current->pid);
4800 		free_extent_buffer(buf);
4801 		return ERR_PTR(-EUCLEAN);
4802 	}
4803 
4804 	/*
4805 	 * The reloc trees are just snapshots, so we need them to appear to be
4806 	 * just like any other fs tree WRT lockdep.
4807 	 *
4808 	 * The exception however is in replace_path() in relocation, where we
4809 	 * hold the lock on the original fs root and then search for the reloc
4810 	 * root.  At that point we need to make sure any reloc root buffers are
4811 	 * set to the BTRFS_TREE_RELOC_OBJECTID lockdep class in order to make
4812 	 * lockdep happy.
4813 	 */
4814 	if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID &&
4815 	    !test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state))
4816 		lockdep_owner = BTRFS_FS_TREE_OBJECTID;
4817 
4818 	/* btrfs_clean_tree_block() accesses generation field. */
4819 	btrfs_set_header_generation(buf, trans->transid);
4820 
4821 	/*
4822 	 * This needs to stay, because we could allocate a freed block from an
4823 	 * old tree into a new tree, so we need to make sure this new block is
4824 	 * set to the appropriate level and owner.
4825 	 */
4826 	btrfs_set_buffer_lockdep_class(lockdep_owner, buf, level);
4827 
4828 	__btrfs_tree_lock(buf, nest);
4829 	btrfs_clean_tree_block(buf);
4830 	clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
4831 	clear_bit(EXTENT_BUFFER_NO_CHECK, &buf->bflags);
4832 
4833 	set_extent_buffer_uptodate(buf);
4834 
4835 	memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header));
4836 	btrfs_set_header_level(buf, level);
4837 	btrfs_set_header_bytenr(buf, buf->start);
4838 	btrfs_set_header_generation(buf, trans->transid);
4839 	btrfs_set_header_backref_rev(buf, BTRFS_MIXED_BACKREF_REV);
4840 	btrfs_set_header_owner(buf, owner);
4841 	write_extent_buffer_fsid(buf, fs_info->fs_devices->metadata_uuid);
4842 	write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid);
4843 	if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
4844 		buf->log_index = root->log_transid % 2;
4845 		/*
4846 		 * we allow two log transactions at a time, use different
4847 		 * EXTENT bit to differentiate dirty pages.
4848 		 */
4849 		if (buf->log_index == 0)
4850 			set_extent_dirty(&root->dirty_log_pages, buf->start,
4851 					buf->start + buf->len - 1, GFP_NOFS);
4852 		else
4853 			set_extent_new(&root->dirty_log_pages, buf->start,
4854 					buf->start + buf->len - 1);
4855 	} else {
4856 		buf->log_index = -1;
4857 		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
4858 			 buf->start + buf->len - 1, GFP_NOFS);
4859 	}
4860 	/* this returns a buffer locked for blocking */
4861 	return buf;
4862 }
4863 
4864 /*
4865  * finds a free extent and does all the dirty work required for allocation
4866  * returns the tree buffer or an ERR_PTR on error.
4867  */
btrfs_alloc_tree_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 parent,u64 root_objectid,const struct btrfs_disk_key * key,int level,u64 hint,u64 empty_size,enum btrfs_lock_nesting nest)4868 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
4869 					     struct btrfs_root *root,
4870 					     u64 parent, u64 root_objectid,
4871 					     const struct btrfs_disk_key *key,
4872 					     int level, u64 hint,
4873 					     u64 empty_size,
4874 					     enum btrfs_lock_nesting nest)
4875 {
4876 	struct btrfs_fs_info *fs_info = root->fs_info;
4877 	struct btrfs_key ins;
4878 	struct btrfs_block_rsv *block_rsv;
4879 	struct extent_buffer *buf;
4880 	struct btrfs_delayed_extent_op *extent_op;
4881 	struct btrfs_ref generic_ref = { 0 };
4882 	u64 flags = 0;
4883 	int ret;
4884 	u32 blocksize = fs_info->nodesize;
4885 	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
4886 
4887 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4888 	if (btrfs_is_testing(fs_info)) {
4889 		buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
4890 					    level, root_objectid, nest);
4891 		if (!IS_ERR(buf))
4892 			root->alloc_bytenr += blocksize;
4893 		return buf;
4894 	}
4895 #endif
4896 
4897 	block_rsv = btrfs_use_block_rsv(trans, root, blocksize);
4898 	if (IS_ERR(block_rsv))
4899 		return ERR_CAST(block_rsv);
4900 
4901 	ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
4902 				   empty_size, hint, &ins, 0, 0);
4903 	if (ret)
4904 		goto out_unuse;
4905 
4906 	buf = btrfs_init_new_buffer(trans, root, ins.objectid, level,
4907 				    root_objectid, nest);
4908 	if (IS_ERR(buf)) {
4909 		ret = PTR_ERR(buf);
4910 		goto out_free_reserved;
4911 	}
4912 
4913 	if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
4914 		if (parent == 0)
4915 			parent = ins.objectid;
4916 		flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
4917 	} else
4918 		BUG_ON(parent > 0);
4919 
4920 	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
4921 		extent_op = btrfs_alloc_delayed_extent_op();
4922 		if (!extent_op) {
4923 			ret = -ENOMEM;
4924 			goto out_free_buf;
4925 		}
4926 		if (key)
4927 			memcpy(&extent_op->key, key, sizeof(extent_op->key));
4928 		else
4929 			memset(&extent_op->key, 0, sizeof(extent_op->key));
4930 		extent_op->flags_to_set = flags;
4931 		extent_op->update_key = skinny_metadata ? false : true;
4932 		extent_op->update_flags = true;
4933 		extent_op->is_data = false;
4934 		extent_op->level = level;
4935 
4936 		btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT,
4937 				       ins.objectid, ins.offset, parent);
4938 		generic_ref.real_root = root->root_key.objectid;
4939 		btrfs_init_tree_ref(&generic_ref, level, root_objectid,
4940 				    root->root_key.objectid, false);
4941 		btrfs_ref_tree_mod(fs_info, &generic_ref);
4942 		ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, extent_op);
4943 		if (ret)
4944 			goto out_free_delayed;
4945 	}
4946 	return buf;
4947 
4948 out_free_delayed:
4949 	btrfs_free_delayed_extent_op(extent_op);
4950 out_free_buf:
4951 	btrfs_tree_unlock(buf);
4952 	free_extent_buffer(buf);
4953 out_free_reserved:
4954 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
4955 out_unuse:
4956 	btrfs_unuse_block_rsv(fs_info, block_rsv, blocksize);
4957 	return ERR_PTR(ret);
4958 }
4959 
4960 struct walk_control {
4961 	u64 refs[BTRFS_MAX_LEVEL];
4962 	u64 flags[BTRFS_MAX_LEVEL];
4963 	struct btrfs_key update_progress;
4964 	struct btrfs_key drop_progress;
4965 	int drop_level;
4966 	int stage;
4967 	int level;
4968 	int shared_level;
4969 	int update_ref;
4970 	int keep_locks;
4971 	int reada_slot;
4972 	int reada_count;
4973 	int restarted;
4974 };
4975 
4976 #define DROP_REFERENCE	1
4977 #define UPDATE_BACKREF	2
4978 
reada_walk_down(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct walk_control * wc,struct btrfs_path * path)4979 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
4980 				     struct btrfs_root *root,
4981 				     struct walk_control *wc,
4982 				     struct btrfs_path *path)
4983 {
4984 	struct btrfs_fs_info *fs_info = root->fs_info;
4985 	u64 bytenr;
4986 	u64 generation;
4987 	u64 refs;
4988 	u64 flags;
4989 	u32 nritems;
4990 	struct btrfs_key key;
4991 	struct extent_buffer *eb;
4992 	int ret;
4993 	int slot;
4994 	int nread = 0;
4995 
4996 	if (path->slots[wc->level] < wc->reada_slot) {
4997 		wc->reada_count = wc->reada_count * 2 / 3;
4998 		wc->reada_count = max(wc->reada_count, 2);
4999 	} else {
5000 		wc->reada_count = wc->reada_count * 3 / 2;
5001 		wc->reada_count = min_t(int, wc->reada_count,
5002 					BTRFS_NODEPTRS_PER_BLOCK(fs_info));
5003 	}
5004 
5005 	eb = path->nodes[wc->level];
5006 	nritems = btrfs_header_nritems(eb);
5007 
5008 	for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5009 		if (nread >= wc->reada_count)
5010 			break;
5011 
5012 		cond_resched();
5013 		bytenr = btrfs_node_blockptr(eb, slot);
5014 		generation = btrfs_node_ptr_generation(eb, slot);
5015 
5016 		if (slot == path->slots[wc->level])
5017 			goto reada;
5018 
5019 		if (wc->stage == UPDATE_BACKREF &&
5020 		    generation <= root->root_key.offset)
5021 			continue;
5022 
5023 		/* We don't lock the tree block, it's OK to be racy here */
5024 		ret = btrfs_lookup_extent_info(trans, fs_info, bytenr,
5025 					       wc->level - 1, 1, &refs,
5026 					       &flags);
5027 		/* We don't care about errors in readahead. */
5028 		if (ret < 0)
5029 			continue;
5030 		BUG_ON(refs == 0);
5031 
5032 		if (wc->stage == DROP_REFERENCE) {
5033 			if (refs == 1)
5034 				goto reada;
5035 
5036 			if (wc->level == 1 &&
5037 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5038 				continue;
5039 			if (!wc->update_ref ||
5040 			    generation <= root->root_key.offset)
5041 				continue;
5042 			btrfs_node_key_to_cpu(eb, &key, slot);
5043 			ret = btrfs_comp_cpu_keys(&key,
5044 						  &wc->update_progress);
5045 			if (ret < 0)
5046 				continue;
5047 		} else {
5048 			if (wc->level == 1 &&
5049 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5050 				continue;
5051 		}
5052 reada:
5053 		btrfs_readahead_node_child(eb, slot);
5054 		nread++;
5055 	}
5056 	wc->reada_slot = slot;
5057 }
5058 
5059 /*
5060  * helper to process tree block while walking down the tree.
5061  *
5062  * when wc->stage == UPDATE_BACKREF, this function updates
5063  * back refs for pointers in the block.
5064  *
5065  * NOTE: return value 1 means we should stop walking down.
5066  */
walk_down_proc(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc,int lookup_info)5067 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5068 				   struct btrfs_root *root,
5069 				   struct btrfs_path *path,
5070 				   struct walk_control *wc, int lookup_info)
5071 {
5072 	struct btrfs_fs_info *fs_info = root->fs_info;
5073 	int level = wc->level;
5074 	struct extent_buffer *eb = path->nodes[level];
5075 	u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5076 	int ret;
5077 
5078 	if (wc->stage == UPDATE_BACKREF &&
5079 	    btrfs_header_owner(eb) != root->root_key.objectid)
5080 		return 1;
5081 
5082 	/*
5083 	 * when reference count of tree block is 1, it won't increase
5084 	 * again. once full backref flag is set, we never clear it.
5085 	 */
5086 	if (lookup_info &&
5087 	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5088 	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
5089 		BUG_ON(!path->locks[level]);
5090 		ret = btrfs_lookup_extent_info(trans, fs_info,
5091 					       eb->start, level, 1,
5092 					       &wc->refs[level],
5093 					       &wc->flags[level]);
5094 		BUG_ON(ret == -ENOMEM);
5095 		if (ret)
5096 			return ret;
5097 		BUG_ON(wc->refs[level] == 0);
5098 	}
5099 
5100 	if (wc->stage == DROP_REFERENCE) {
5101 		if (wc->refs[level] > 1)
5102 			return 1;
5103 
5104 		if (path->locks[level] && !wc->keep_locks) {
5105 			btrfs_tree_unlock_rw(eb, path->locks[level]);
5106 			path->locks[level] = 0;
5107 		}
5108 		return 0;
5109 	}
5110 
5111 	/* wc->stage == UPDATE_BACKREF */
5112 	if (!(wc->flags[level] & flag)) {
5113 		BUG_ON(!path->locks[level]);
5114 		ret = btrfs_inc_ref(trans, root, eb, 1);
5115 		BUG_ON(ret); /* -ENOMEM */
5116 		ret = btrfs_dec_ref(trans, root, eb, 0);
5117 		BUG_ON(ret); /* -ENOMEM */
5118 		ret = btrfs_set_disk_extent_flags(trans, eb, flag,
5119 						  btrfs_header_level(eb), 0);
5120 		BUG_ON(ret); /* -ENOMEM */
5121 		wc->flags[level] |= flag;
5122 	}
5123 
5124 	/*
5125 	 * the block is shared by multiple trees, so it's not good to
5126 	 * keep the tree lock
5127 	 */
5128 	if (path->locks[level] && level > 0) {
5129 		btrfs_tree_unlock_rw(eb, path->locks[level]);
5130 		path->locks[level] = 0;
5131 	}
5132 	return 0;
5133 }
5134 
5135 /*
5136  * This is used to verify a ref exists for this root to deal with a bug where we
5137  * would have a drop_progress key that hadn't been updated properly.
5138  */
check_ref_exists(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytenr,u64 parent,int level)5139 static int check_ref_exists(struct btrfs_trans_handle *trans,
5140 			    struct btrfs_root *root, u64 bytenr, u64 parent,
5141 			    int level)
5142 {
5143 	struct btrfs_path *path;
5144 	struct btrfs_extent_inline_ref *iref;
5145 	int ret;
5146 
5147 	path = btrfs_alloc_path();
5148 	if (!path)
5149 		return -ENOMEM;
5150 
5151 	ret = lookup_extent_backref(trans, path, &iref, bytenr,
5152 				    root->fs_info->nodesize, parent,
5153 				    root->root_key.objectid, level, 0);
5154 	btrfs_free_path(path);
5155 	if (ret == -ENOENT)
5156 		return 0;
5157 	if (ret < 0)
5158 		return ret;
5159 	return 1;
5160 }
5161 
5162 /*
5163  * helper to process tree block pointer.
5164  *
5165  * when wc->stage == DROP_REFERENCE, this function checks
5166  * reference count of the block pointed to. if the block
5167  * is shared and we need update back refs for the subtree
5168  * rooted at the block, this function changes wc->stage to
5169  * UPDATE_BACKREF. if the block is shared and there is no
5170  * need to update back, this function drops the reference
5171  * to the block.
5172  *
5173  * NOTE: return value 1 means we should stop walking down.
5174  */
do_walk_down(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc,int * lookup_info)5175 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5176 				 struct btrfs_root *root,
5177 				 struct btrfs_path *path,
5178 				 struct walk_control *wc, int *lookup_info)
5179 {
5180 	struct btrfs_fs_info *fs_info = root->fs_info;
5181 	u64 bytenr;
5182 	u64 generation;
5183 	u64 parent;
5184 	struct btrfs_key key;
5185 	struct btrfs_key first_key;
5186 	struct btrfs_ref ref = { 0 };
5187 	struct extent_buffer *next;
5188 	int level = wc->level;
5189 	int reada = 0;
5190 	int ret = 0;
5191 	bool need_account = false;
5192 
5193 	generation = btrfs_node_ptr_generation(path->nodes[level],
5194 					       path->slots[level]);
5195 	/*
5196 	 * if the lower level block was created before the snapshot
5197 	 * was created, we know there is no need to update back refs
5198 	 * for the subtree
5199 	 */
5200 	if (wc->stage == UPDATE_BACKREF &&
5201 	    generation <= root->root_key.offset) {
5202 		*lookup_info = 1;
5203 		return 1;
5204 	}
5205 
5206 	bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
5207 	btrfs_node_key_to_cpu(path->nodes[level], &first_key,
5208 			      path->slots[level]);
5209 
5210 	next = find_extent_buffer(fs_info, bytenr);
5211 	if (!next) {
5212 		next = btrfs_find_create_tree_block(fs_info, bytenr,
5213 				root->root_key.objectid, level - 1);
5214 		if (IS_ERR(next))
5215 			return PTR_ERR(next);
5216 		reada = 1;
5217 	}
5218 	btrfs_tree_lock(next);
5219 
5220 	ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
5221 				       &wc->refs[level - 1],
5222 				       &wc->flags[level - 1]);
5223 	if (ret < 0)
5224 		goto out_unlock;
5225 
5226 	if (unlikely(wc->refs[level - 1] == 0)) {
5227 		btrfs_err(fs_info, "Missing references.");
5228 		ret = -EIO;
5229 		goto out_unlock;
5230 	}
5231 	*lookup_info = 0;
5232 
5233 	if (wc->stage == DROP_REFERENCE) {
5234 		if (wc->refs[level - 1] > 1) {
5235 			need_account = true;
5236 			if (level == 1 &&
5237 			    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5238 				goto skip;
5239 
5240 			if (!wc->update_ref ||
5241 			    generation <= root->root_key.offset)
5242 				goto skip;
5243 
5244 			btrfs_node_key_to_cpu(path->nodes[level], &key,
5245 					      path->slots[level]);
5246 			ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
5247 			if (ret < 0)
5248 				goto skip;
5249 
5250 			wc->stage = UPDATE_BACKREF;
5251 			wc->shared_level = level - 1;
5252 		}
5253 	} else {
5254 		if (level == 1 &&
5255 		    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5256 			goto skip;
5257 	}
5258 
5259 	if (!btrfs_buffer_uptodate(next, generation, 0)) {
5260 		btrfs_tree_unlock(next);
5261 		free_extent_buffer(next);
5262 		next = NULL;
5263 		*lookup_info = 1;
5264 	}
5265 
5266 	if (!next) {
5267 		if (reada && level == 1)
5268 			reada_walk_down(trans, root, wc, path);
5269 		next = read_tree_block(fs_info, bytenr, root->root_key.objectid,
5270 				       generation, level - 1, &first_key);
5271 		if (IS_ERR(next)) {
5272 			return PTR_ERR(next);
5273 		} else if (!extent_buffer_uptodate(next)) {
5274 			free_extent_buffer(next);
5275 			return -EIO;
5276 		}
5277 		btrfs_tree_lock(next);
5278 	}
5279 
5280 	level--;
5281 	ASSERT(level == btrfs_header_level(next));
5282 	if (level != btrfs_header_level(next)) {
5283 		btrfs_err(root->fs_info, "mismatched level");
5284 		ret = -EIO;
5285 		goto out_unlock;
5286 	}
5287 	path->nodes[level] = next;
5288 	path->slots[level] = 0;
5289 	path->locks[level] = BTRFS_WRITE_LOCK;
5290 	wc->level = level;
5291 	if (wc->level == 1)
5292 		wc->reada_slot = 0;
5293 	return 0;
5294 skip:
5295 	wc->refs[level - 1] = 0;
5296 	wc->flags[level - 1] = 0;
5297 	if (wc->stage == DROP_REFERENCE) {
5298 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5299 			parent = path->nodes[level]->start;
5300 		} else {
5301 			ASSERT(root->root_key.objectid ==
5302 			       btrfs_header_owner(path->nodes[level]));
5303 			if (root->root_key.objectid !=
5304 			    btrfs_header_owner(path->nodes[level])) {
5305 				btrfs_err(root->fs_info,
5306 						"mismatched block owner");
5307 				ret = -EIO;
5308 				goto out_unlock;
5309 			}
5310 			parent = 0;
5311 		}
5312 
5313 		/*
5314 		 * If we had a drop_progress we need to verify the refs are set
5315 		 * as expected.  If we find our ref then we know that from here
5316 		 * on out everything should be correct, and we can clear the
5317 		 * ->restarted flag.
5318 		 */
5319 		if (wc->restarted) {
5320 			ret = check_ref_exists(trans, root, bytenr, parent,
5321 					       level - 1);
5322 			if (ret < 0)
5323 				goto out_unlock;
5324 			if (ret == 0)
5325 				goto no_delete;
5326 			ret = 0;
5327 			wc->restarted = 0;
5328 		}
5329 
5330 		/*
5331 		 * Reloc tree doesn't contribute to qgroup numbers, and we have
5332 		 * already accounted them at merge time (replace_path),
5333 		 * thus we could skip expensive subtree trace here.
5334 		 */
5335 		if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
5336 		    need_account) {
5337 			ret = btrfs_qgroup_trace_subtree(trans, next,
5338 							 generation, level - 1);
5339 			if (ret) {
5340 				btrfs_err_rl(fs_info,
5341 					     "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
5342 					     ret);
5343 			}
5344 		}
5345 
5346 		/*
5347 		 * We need to update the next key in our walk control so we can
5348 		 * update the drop_progress key accordingly.  We don't care if
5349 		 * find_next_key doesn't find a key because that means we're at
5350 		 * the end and are going to clean up now.
5351 		 */
5352 		wc->drop_level = level;
5353 		find_next_key(path, level, &wc->drop_progress);
5354 
5355 		btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
5356 				       fs_info->nodesize, parent);
5357 		btrfs_init_tree_ref(&ref, level - 1, root->root_key.objectid,
5358 				    0, false);
5359 		ret = btrfs_free_extent(trans, &ref);
5360 		if (ret)
5361 			goto out_unlock;
5362 	}
5363 no_delete:
5364 	*lookup_info = 1;
5365 	ret = 1;
5366 
5367 out_unlock:
5368 	btrfs_tree_unlock(next);
5369 	free_extent_buffer(next);
5370 
5371 	return ret;
5372 }
5373 
5374 /*
5375  * helper to process tree block while walking up the tree.
5376  *
5377  * when wc->stage == DROP_REFERENCE, this function drops
5378  * reference count on the block.
5379  *
5380  * when wc->stage == UPDATE_BACKREF, this function changes
5381  * wc->stage back to DROP_REFERENCE if we changed wc->stage
5382  * to UPDATE_BACKREF previously while processing the block.
5383  *
5384  * NOTE: return value 1 means we should stop walking up.
5385  */
walk_up_proc(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc)5386 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
5387 				 struct btrfs_root *root,
5388 				 struct btrfs_path *path,
5389 				 struct walk_control *wc)
5390 {
5391 	struct btrfs_fs_info *fs_info = root->fs_info;
5392 	int ret;
5393 	int level = wc->level;
5394 	struct extent_buffer *eb = path->nodes[level];
5395 	u64 parent = 0;
5396 
5397 	if (wc->stage == UPDATE_BACKREF) {
5398 		BUG_ON(wc->shared_level < level);
5399 		if (level < wc->shared_level)
5400 			goto out;
5401 
5402 		ret = find_next_key(path, level + 1, &wc->update_progress);
5403 		if (ret > 0)
5404 			wc->update_ref = 0;
5405 
5406 		wc->stage = DROP_REFERENCE;
5407 		wc->shared_level = -1;
5408 		path->slots[level] = 0;
5409 
5410 		/*
5411 		 * check reference count again if the block isn't locked.
5412 		 * we should start walking down the tree again if reference
5413 		 * count is one.
5414 		 */
5415 		if (!path->locks[level]) {
5416 			BUG_ON(level == 0);
5417 			btrfs_tree_lock(eb);
5418 			path->locks[level] = BTRFS_WRITE_LOCK;
5419 
5420 			ret = btrfs_lookup_extent_info(trans, fs_info,
5421 						       eb->start, level, 1,
5422 						       &wc->refs[level],
5423 						       &wc->flags[level]);
5424 			if (ret < 0) {
5425 				btrfs_tree_unlock_rw(eb, path->locks[level]);
5426 				path->locks[level] = 0;
5427 				return ret;
5428 			}
5429 			BUG_ON(wc->refs[level] == 0);
5430 			if (wc->refs[level] == 1) {
5431 				btrfs_tree_unlock_rw(eb, path->locks[level]);
5432 				path->locks[level] = 0;
5433 				return 1;
5434 			}
5435 		}
5436 	}
5437 
5438 	/* wc->stage == DROP_REFERENCE */
5439 	BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
5440 
5441 	if (wc->refs[level] == 1) {
5442 		if (level == 0) {
5443 			if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5444 				ret = btrfs_dec_ref(trans, root, eb, 1);
5445 			else
5446 				ret = btrfs_dec_ref(trans, root, eb, 0);
5447 			BUG_ON(ret); /* -ENOMEM */
5448 			if (is_fstree(root->root_key.objectid)) {
5449 				ret = btrfs_qgroup_trace_leaf_items(trans, eb);
5450 				if (ret) {
5451 					btrfs_err_rl(fs_info,
5452 	"error %d accounting leaf items, quota is out of sync, rescan required",
5453 					     ret);
5454 				}
5455 			}
5456 		}
5457 		/* make block locked assertion in btrfs_clean_tree_block happy */
5458 		if (!path->locks[level] &&
5459 		    btrfs_header_generation(eb) == trans->transid) {
5460 			btrfs_tree_lock(eb);
5461 			path->locks[level] = BTRFS_WRITE_LOCK;
5462 		}
5463 		btrfs_clean_tree_block(eb);
5464 	}
5465 
5466 	if (eb == root->node) {
5467 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5468 			parent = eb->start;
5469 		else if (root->root_key.objectid != btrfs_header_owner(eb))
5470 			goto owner_mismatch;
5471 	} else {
5472 		if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5473 			parent = path->nodes[level + 1]->start;
5474 		else if (root->root_key.objectid !=
5475 			 btrfs_header_owner(path->nodes[level + 1]))
5476 			goto owner_mismatch;
5477 	}
5478 
5479 	btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent,
5480 			      wc->refs[level] == 1);
5481 out:
5482 	wc->refs[level] = 0;
5483 	wc->flags[level] = 0;
5484 	return 0;
5485 
5486 owner_mismatch:
5487 	btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu",
5488 		     btrfs_header_owner(eb), root->root_key.objectid);
5489 	return -EUCLEAN;
5490 }
5491 
walk_down_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc)5492 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5493 				   struct btrfs_root *root,
5494 				   struct btrfs_path *path,
5495 				   struct walk_control *wc)
5496 {
5497 	int level = wc->level;
5498 	int lookup_info = 1;
5499 	int ret;
5500 
5501 	while (level >= 0) {
5502 		ret = walk_down_proc(trans, root, path, wc, lookup_info);
5503 		if (ret > 0)
5504 			break;
5505 
5506 		if (level == 0)
5507 			break;
5508 
5509 		if (path->slots[level] >=
5510 		    btrfs_header_nritems(path->nodes[level]))
5511 			break;
5512 
5513 		ret = do_walk_down(trans, root, path, wc, &lookup_info);
5514 		if (ret > 0) {
5515 			path->slots[level]++;
5516 			continue;
5517 		} else if (ret < 0)
5518 			return ret;
5519 		level = wc->level;
5520 	}
5521 	return 0;
5522 }
5523 
walk_up_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc,int max_level)5524 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
5525 				 struct btrfs_root *root,
5526 				 struct btrfs_path *path,
5527 				 struct walk_control *wc, int max_level)
5528 {
5529 	int level = wc->level;
5530 	int ret;
5531 
5532 	path->slots[level] = btrfs_header_nritems(path->nodes[level]);
5533 	while (level < max_level && path->nodes[level]) {
5534 		wc->level = level;
5535 		if (path->slots[level] + 1 <
5536 		    btrfs_header_nritems(path->nodes[level])) {
5537 			path->slots[level]++;
5538 			return 0;
5539 		} else {
5540 			ret = walk_up_proc(trans, root, path, wc);
5541 			if (ret > 0)
5542 				return 0;
5543 			if (ret < 0)
5544 				return ret;
5545 
5546 			if (path->locks[level]) {
5547 				btrfs_tree_unlock_rw(path->nodes[level],
5548 						     path->locks[level]);
5549 				path->locks[level] = 0;
5550 			}
5551 			free_extent_buffer(path->nodes[level]);
5552 			path->nodes[level] = NULL;
5553 			level++;
5554 		}
5555 	}
5556 	return 1;
5557 }
5558 
5559 /*
5560  * drop a subvolume tree.
5561  *
5562  * this function traverses the tree freeing any blocks that only
5563  * referenced by the tree.
5564  *
5565  * when a shared tree block is found. this function decreases its
5566  * reference count by one. if update_ref is true, this function
5567  * also make sure backrefs for the shared block and all lower level
5568  * blocks are properly updated.
5569  *
5570  * If called with for_reloc == 0, may exit early with -EAGAIN
5571  */
btrfs_drop_snapshot(struct btrfs_root * root,int update_ref,int for_reloc)5572 int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
5573 {
5574 	struct btrfs_fs_info *fs_info = root->fs_info;
5575 	struct btrfs_path *path;
5576 	struct btrfs_trans_handle *trans;
5577 	struct btrfs_root *tree_root = fs_info->tree_root;
5578 	struct btrfs_root_item *root_item = &root->root_item;
5579 	struct walk_control *wc;
5580 	struct btrfs_key key;
5581 	int err = 0;
5582 	int ret;
5583 	int level;
5584 	bool root_dropped = false;
5585 	bool unfinished_drop = false;
5586 
5587 	btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid);
5588 
5589 	path = btrfs_alloc_path();
5590 	if (!path) {
5591 		err = -ENOMEM;
5592 		goto out;
5593 	}
5594 
5595 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
5596 	if (!wc) {
5597 		btrfs_free_path(path);
5598 		err = -ENOMEM;
5599 		goto out;
5600 	}
5601 
5602 	/*
5603 	 * Use join to avoid potential EINTR from transaction start. See
5604 	 * wait_reserve_ticket and the whole reservation callchain.
5605 	 */
5606 	if (for_reloc)
5607 		trans = btrfs_join_transaction(tree_root);
5608 	else
5609 		trans = btrfs_start_transaction(tree_root, 0);
5610 	if (IS_ERR(trans)) {
5611 		err = PTR_ERR(trans);
5612 		goto out_free;
5613 	}
5614 
5615 	err = btrfs_run_delayed_items(trans);
5616 	if (err)
5617 		goto out_end_trans;
5618 
5619 	/*
5620 	 * This will help us catch people modifying the fs tree while we're
5621 	 * dropping it.  It is unsafe to mess with the fs tree while it's being
5622 	 * dropped as we unlock the root node and parent nodes as we walk down
5623 	 * the tree, assuming nothing will change.  If something does change
5624 	 * then we'll have stale information and drop references to blocks we've
5625 	 * already dropped.
5626 	 */
5627 	set_bit(BTRFS_ROOT_DELETING, &root->state);
5628 	unfinished_drop = test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state);
5629 
5630 	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
5631 		level = btrfs_header_level(root->node);
5632 		path->nodes[level] = btrfs_lock_root_node(root);
5633 		path->slots[level] = 0;
5634 		path->locks[level] = BTRFS_WRITE_LOCK;
5635 		memset(&wc->update_progress, 0,
5636 		       sizeof(wc->update_progress));
5637 	} else {
5638 		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
5639 		memcpy(&wc->update_progress, &key,
5640 		       sizeof(wc->update_progress));
5641 
5642 		level = btrfs_root_drop_level(root_item);
5643 		BUG_ON(level == 0);
5644 		path->lowest_level = level;
5645 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5646 		path->lowest_level = 0;
5647 		if (ret < 0) {
5648 			err = ret;
5649 			goto out_end_trans;
5650 		}
5651 		WARN_ON(ret > 0);
5652 
5653 		/*
5654 		 * unlock our path, this is safe because only this
5655 		 * function is allowed to delete this snapshot
5656 		 */
5657 		btrfs_unlock_up_safe(path, 0);
5658 
5659 		level = btrfs_header_level(root->node);
5660 		while (1) {
5661 			btrfs_tree_lock(path->nodes[level]);
5662 			path->locks[level] = BTRFS_WRITE_LOCK;
5663 
5664 			ret = btrfs_lookup_extent_info(trans, fs_info,
5665 						path->nodes[level]->start,
5666 						level, 1, &wc->refs[level],
5667 						&wc->flags[level]);
5668 			if (ret < 0) {
5669 				err = ret;
5670 				goto out_end_trans;
5671 			}
5672 			BUG_ON(wc->refs[level] == 0);
5673 
5674 			if (level == btrfs_root_drop_level(root_item))
5675 				break;
5676 
5677 			btrfs_tree_unlock(path->nodes[level]);
5678 			path->locks[level] = 0;
5679 			WARN_ON(wc->refs[level] != 1);
5680 			level--;
5681 		}
5682 	}
5683 
5684 	wc->restarted = test_bit(BTRFS_ROOT_DEAD_TREE, &root->state);
5685 	wc->level = level;
5686 	wc->shared_level = -1;
5687 	wc->stage = DROP_REFERENCE;
5688 	wc->update_ref = update_ref;
5689 	wc->keep_locks = 0;
5690 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
5691 
5692 	while (1) {
5693 
5694 		ret = walk_down_tree(trans, root, path, wc);
5695 		if (ret < 0) {
5696 			err = ret;
5697 			break;
5698 		}
5699 
5700 		ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
5701 		if (ret < 0) {
5702 			err = ret;
5703 			break;
5704 		}
5705 
5706 		if (ret > 0) {
5707 			BUG_ON(wc->stage != DROP_REFERENCE);
5708 			break;
5709 		}
5710 
5711 		if (wc->stage == DROP_REFERENCE) {
5712 			wc->drop_level = wc->level;
5713 			btrfs_node_key_to_cpu(path->nodes[wc->drop_level],
5714 					      &wc->drop_progress,
5715 					      path->slots[wc->drop_level]);
5716 		}
5717 		btrfs_cpu_key_to_disk(&root_item->drop_progress,
5718 				      &wc->drop_progress);
5719 		btrfs_set_root_drop_level(root_item, wc->drop_level);
5720 
5721 		BUG_ON(wc->level == 0);
5722 		if (btrfs_should_end_transaction(trans) ||
5723 		    (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) {
5724 			ret = btrfs_update_root(trans, tree_root,
5725 						&root->root_key,
5726 						root_item);
5727 			if (ret) {
5728 				btrfs_abort_transaction(trans, ret);
5729 				err = ret;
5730 				goto out_end_trans;
5731 			}
5732 
5733 			btrfs_end_transaction_throttle(trans);
5734 			if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
5735 				btrfs_debug(fs_info,
5736 					    "drop snapshot early exit");
5737 				err = -EAGAIN;
5738 				goto out_free;
5739 			}
5740 
5741 		       /*
5742 			* Use join to avoid potential EINTR from transaction
5743 			* start. See wait_reserve_ticket and the whole
5744 			* reservation callchain.
5745 			*/
5746 			if (for_reloc)
5747 				trans = btrfs_join_transaction(tree_root);
5748 			else
5749 				trans = btrfs_start_transaction(tree_root, 0);
5750 			if (IS_ERR(trans)) {
5751 				err = PTR_ERR(trans);
5752 				goto out_free;
5753 			}
5754 		}
5755 	}
5756 	btrfs_release_path(path);
5757 	if (err)
5758 		goto out_end_trans;
5759 
5760 	ret = btrfs_del_root(trans, &root->root_key);
5761 	if (ret) {
5762 		btrfs_abort_transaction(trans, ret);
5763 		err = ret;
5764 		goto out_end_trans;
5765 	}
5766 
5767 	if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
5768 		ret = btrfs_find_root(tree_root, &root->root_key, path,
5769 				      NULL, NULL);
5770 		if (ret < 0) {
5771 			btrfs_abort_transaction(trans, ret);
5772 			err = ret;
5773 			goto out_end_trans;
5774 		} else if (ret > 0) {
5775 			/* if we fail to delete the orphan item this time
5776 			 * around, it'll get picked up the next time.
5777 			 *
5778 			 * The most common failure here is just -ENOENT.
5779 			 */
5780 			btrfs_del_orphan_item(trans, tree_root,
5781 					      root->root_key.objectid);
5782 		}
5783 	}
5784 
5785 	/*
5786 	 * This subvolume is going to be completely dropped, and won't be
5787 	 * recorded as dirty roots, thus pertrans meta rsv will not be freed at
5788 	 * commit transaction time.  So free it here manually.
5789 	 */
5790 	btrfs_qgroup_convert_reserved_meta(root, INT_MAX);
5791 	btrfs_qgroup_free_meta_all_pertrans(root);
5792 
5793 	if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state))
5794 		btrfs_add_dropped_root(trans, root);
5795 	else
5796 		btrfs_put_root(root);
5797 	root_dropped = true;
5798 out_end_trans:
5799 	btrfs_end_transaction_throttle(trans);
5800 out_free:
5801 	kfree(wc);
5802 	btrfs_free_path(path);
5803 out:
5804 	/*
5805 	 * We were an unfinished drop root, check to see if there are any
5806 	 * pending, and if not clear and wake up any waiters.
5807 	 */
5808 	if (!err && unfinished_drop)
5809 		btrfs_maybe_wake_unfinished_drop(fs_info);
5810 
5811 	/*
5812 	 * So if we need to stop dropping the snapshot for whatever reason we
5813 	 * need to make sure to add it back to the dead root list so that we
5814 	 * keep trying to do the work later.  This also cleans up roots if we
5815 	 * don't have it in the radix (like when we recover after a power fail
5816 	 * or unmount) so we don't leak memory.
5817 	 */
5818 	if (!for_reloc && !root_dropped)
5819 		btrfs_add_dead_root(root);
5820 	return err;
5821 }
5822 
5823 /*
5824  * drop subtree rooted at tree block 'node'.
5825  *
5826  * NOTE: this function will unlock and release tree block 'node'
5827  * only used by relocation code
5828  */
btrfs_drop_subtree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * node,struct extent_buffer * parent)5829 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
5830 			struct btrfs_root *root,
5831 			struct extent_buffer *node,
5832 			struct extent_buffer *parent)
5833 {
5834 	struct btrfs_fs_info *fs_info = root->fs_info;
5835 	struct btrfs_path *path;
5836 	struct walk_control *wc;
5837 	int level;
5838 	int parent_level;
5839 	int ret = 0;
5840 	int wret;
5841 
5842 	BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5843 
5844 	path = btrfs_alloc_path();
5845 	if (!path)
5846 		return -ENOMEM;
5847 
5848 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
5849 	if (!wc) {
5850 		btrfs_free_path(path);
5851 		return -ENOMEM;
5852 	}
5853 
5854 	btrfs_assert_tree_locked(parent);
5855 	parent_level = btrfs_header_level(parent);
5856 	atomic_inc(&parent->refs);
5857 	path->nodes[parent_level] = parent;
5858 	path->slots[parent_level] = btrfs_header_nritems(parent);
5859 
5860 	btrfs_assert_tree_locked(node);
5861 	level = btrfs_header_level(node);
5862 	path->nodes[level] = node;
5863 	path->slots[level] = 0;
5864 	path->locks[level] = BTRFS_WRITE_LOCK;
5865 
5866 	wc->refs[parent_level] = 1;
5867 	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5868 	wc->level = level;
5869 	wc->shared_level = -1;
5870 	wc->stage = DROP_REFERENCE;
5871 	wc->update_ref = 0;
5872 	wc->keep_locks = 1;
5873 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
5874 
5875 	while (1) {
5876 		wret = walk_down_tree(trans, root, path, wc);
5877 		if (wret < 0) {
5878 			ret = wret;
5879 			break;
5880 		}
5881 
5882 		wret = walk_up_tree(trans, root, path, wc, parent_level);
5883 		if (wret < 0)
5884 			ret = wret;
5885 		if (wret != 0)
5886 			break;
5887 	}
5888 
5889 	kfree(wc);
5890 	btrfs_free_path(path);
5891 	return ret;
5892 }
5893 
5894 /*
5895  * helper to account the unused space of all the readonly block group in the
5896  * space_info. takes mirrors into account.
5897  */
btrfs_account_ro_block_groups_free_space(struct btrfs_space_info * sinfo)5898 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
5899 {
5900 	struct btrfs_block_group *block_group;
5901 	u64 free_bytes = 0;
5902 	int factor;
5903 
5904 	/* It's df, we don't care if it's racy */
5905 	if (list_empty(&sinfo->ro_bgs))
5906 		return 0;
5907 
5908 	spin_lock(&sinfo->lock);
5909 	list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
5910 		spin_lock(&block_group->lock);
5911 
5912 		if (!block_group->ro) {
5913 			spin_unlock(&block_group->lock);
5914 			continue;
5915 		}
5916 
5917 		factor = btrfs_bg_type_to_factor(block_group->flags);
5918 		free_bytes += (block_group->length -
5919 			       block_group->used) * factor;
5920 
5921 		spin_unlock(&block_group->lock);
5922 	}
5923 	spin_unlock(&sinfo->lock);
5924 
5925 	return free_bytes;
5926 }
5927 
btrfs_error_unpin_extent_range(struct btrfs_fs_info * fs_info,u64 start,u64 end)5928 int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
5929 				   u64 start, u64 end)
5930 {
5931 	return unpin_extent_range(fs_info, start, end, false);
5932 }
5933 
5934 /*
5935  * It used to be that old block groups would be left around forever.
5936  * Iterating over them would be enough to trim unused space.  Since we
5937  * now automatically remove them, we also need to iterate over unallocated
5938  * space.
5939  *
5940  * We don't want a transaction for this since the discard may take a
5941  * substantial amount of time.  We don't require that a transaction be
5942  * running, but we do need to take a running transaction into account
5943  * to ensure that we're not discarding chunks that were released or
5944  * allocated in the current transaction.
5945  *
5946  * Holding the chunks lock will prevent other threads from allocating
5947  * or releasing chunks, but it won't prevent a running transaction
5948  * from committing and releasing the memory that the pending chunks
5949  * list head uses.  For that, we need to take a reference to the
5950  * transaction and hold the commit root sem.  We only need to hold
5951  * it while performing the free space search since we have already
5952  * held back allocations.
5953  */
btrfs_trim_free_extents(struct btrfs_device * device,u64 * trimmed)5954 static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
5955 {
5956 	u64 start = SZ_1M, len = 0, end = 0;
5957 	int ret;
5958 
5959 	*trimmed = 0;
5960 
5961 	/* Discard not supported = nothing to do. */
5962 	if (!blk_queue_discard(bdev_get_queue(device->bdev)))
5963 		return 0;
5964 
5965 	/* Not writable = nothing to do. */
5966 	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
5967 		return 0;
5968 
5969 	/* No free space = nothing to do. */
5970 	if (device->total_bytes <= device->bytes_used)
5971 		return 0;
5972 
5973 	ret = 0;
5974 
5975 	while (1) {
5976 		struct btrfs_fs_info *fs_info = device->fs_info;
5977 		u64 bytes;
5978 
5979 		ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
5980 		if (ret)
5981 			break;
5982 
5983 		find_first_clear_extent_bit(&device->alloc_state, start,
5984 					    &start, &end,
5985 					    CHUNK_TRIMMED | CHUNK_ALLOCATED);
5986 
5987 		/* Check if there are any CHUNK_* bits left */
5988 		if (start > device->total_bytes) {
5989 			WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
5990 			btrfs_warn_in_rcu(fs_info,
5991 "ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu",
5992 					  start, end - start + 1,
5993 					  rcu_str_deref(device->name),
5994 					  device->total_bytes);
5995 			mutex_unlock(&fs_info->chunk_mutex);
5996 			ret = 0;
5997 			break;
5998 		}
5999 
6000 		/* Ensure we skip the reserved area in the first 1M */
6001 		start = max_t(u64, start, SZ_1M);
6002 
6003 		/*
6004 		 * If find_first_clear_extent_bit find a range that spans the
6005 		 * end of the device it will set end to -1, in this case it's up
6006 		 * to the caller to trim the value to the size of the device.
6007 		 */
6008 		end = min(end, device->total_bytes - 1);
6009 
6010 		len = end - start + 1;
6011 
6012 		/* We didn't find any extents */
6013 		if (!len) {
6014 			mutex_unlock(&fs_info->chunk_mutex);
6015 			ret = 0;
6016 			break;
6017 		}
6018 
6019 		ret = btrfs_issue_discard(device->bdev, start, len,
6020 					  &bytes);
6021 		if (!ret)
6022 			set_extent_bits(&device->alloc_state, start,
6023 					start + bytes - 1,
6024 					CHUNK_TRIMMED);
6025 		mutex_unlock(&fs_info->chunk_mutex);
6026 
6027 		if (ret)
6028 			break;
6029 
6030 		start += len;
6031 		*trimmed += bytes;
6032 
6033 		if (fatal_signal_pending(current)) {
6034 			ret = -ERESTARTSYS;
6035 			break;
6036 		}
6037 
6038 		cond_resched();
6039 	}
6040 
6041 	return ret;
6042 }
6043 
6044 /*
6045  * Trim the whole filesystem by:
6046  * 1) trimming the free space in each block group
6047  * 2) trimming the unallocated space on each device
6048  *
6049  * This will also continue trimming even if a block group or device encounters
6050  * an error.  The return value will be the last error, or 0 if nothing bad
6051  * happens.
6052  */
btrfs_trim_fs(struct btrfs_fs_info * fs_info,struct fstrim_range * range)6053 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
6054 {
6055 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6056 	struct btrfs_block_group *cache = NULL;
6057 	struct btrfs_device *device;
6058 	u64 group_trimmed;
6059 	u64 range_end = U64_MAX;
6060 	u64 start;
6061 	u64 end;
6062 	u64 trimmed = 0;
6063 	u64 bg_failed = 0;
6064 	u64 dev_failed = 0;
6065 	int bg_ret = 0;
6066 	int dev_ret = 0;
6067 	int ret = 0;
6068 
6069 	/*
6070 	 * Check range overflow if range->len is set.
6071 	 * The default range->len is U64_MAX.
6072 	 */
6073 	if (range->len != U64_MAX &&
6074 	    check_add_overflow(range->start, range->len, &range_end))
6075 		return -EINVAL;
6076 
6077 	cache = btrfs_lookup_first_block_group(fs_info, range->start);
6078 	for (; cache; cache = btrfs_next_block_group(cache)) {
6079 		if (cache->start >= range_end) {
6080 			btrfs_put_block_group(cache);
6081 			break;
6082 		}
6083 
6084 		start = max(range->start, cache->start);
6085 		end = min(range_end, cache->start + cache->length);
6086 
6087 		if (end - start >= range->minlen) {
6088 			if (!btrfs_block_group_done(cache)) {
6089 				ret = btrfs_cache_block_group(cache, true);
6090 				if (ret) {
6091 					bg_failed++;
6092 					bg_ret = ret;
6093 					continue;
6094 				}
6095 			}
6096 			ret = btrfs_trim_block_group(cache,
6097 						     &group_trimmed,
6098 						     start,
6099 						     end,
6100 						     range->minlen);
6101 
6102 			trimmed += group_trimmed;
6103 			if (ret) {
6104 				bg_failed++;
6105 				bg_ret = ret;
6106 				continue;
6107 			}
6108 		}
6109 	}
6110 
6111 	if (bg_failed)
6112 		btrfs_warn(fs_info,
6113 			"failed to trim %llu block group(s), last error %d",
6114 			bg_failed, bg_ret);
6115 
6116 	mutex_lock(&fs_devices->device_list_mutex);
6117 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6118 		if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
6119 			continue;
6120 
6121 		ret = btrfs_trim_free_extents(device, &group_trimmed);
6122 		if (ret) {
6123 			dev_failed++;
6124 			dev_ret = ret;
6125 			break;
6126 		}
6127 
6128 		trimmed += group_trimmed;
6129 	}
6130 	mutex_unlock(&fs_devices->device_list_mutex);
6131 
6132 	if (dev_failed)
6133 		btrfs_warn(fs_info,
6134 			"failed to trim %llu device(s), last error %d",
6135 			dev_failed, dev_ret);
6136 	range->len = trimmed;
6137 	if (bg_ret)
6138 		return bg_ret;
6139 	return dev_ret;
6140 }
6141