• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 STRATO.  All rights reserved.
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/rbtree.h>
8 #include <trace/events/btrfs.h>
9 #include "ctree.h"
10 #include "disk-io.h"
11 #include "backref.h"
12 #include "ulist.h"
13 #include "transaction.h"
14 #include "delayed-ref.h"
15 #include "locking.h"
16 
17 /* Just an arbitrary number so we can be sure this happened */
18 #define BACKREF_FOUND_SHARED 6
19 
20 struct extent_inode_elem {
21 	u64 inum;
22 	u64 offset;
23 	struct extent_inode_elem *next;
24 };
25 
check_extent_in_eb(const struct btrfs_key * key,const struct extent_buffer * eb,const struct btrfs_file_extent_item * fi,u64 extent_item_pos,struct extent_inode_elem ** eie,bool ignore_offset)26 static int check_extent_in_eb(const struct btrfs_key *key,
27 			      const struct extent_buffer *eb,
28 			      const struct btrfs_file_extent_item *fi,
29 			      u64 extent_item_pos,
30 			      struct extent_inode_elem **eie,
31 			      bool ignore_offset)
32 {
33 	u64 offset = 0;
34 	struct extent_inode_elem *e;
35 
36 	if (!ignore_offset &&
37 	    !btrfs_file_extent_compression(eb, fi) &&
38 	    !btrfs_file_extent_encryption(eb, fi) &&
39 	    !btrfs_file_extent_other_encoding(eb, fi)) {
40 		u64 data_offset;
41 		u64 data_len;
42 
43 		data_offset = btrfs_file_extent_offset(eb, fi);
44 		data_len = btrfs_file_extent_num_bytes(eb, fi);
45 
46 		if (extent_item_pos < data_offset ||
47 		    extent_item_pos >= data_offset + data_len)
48 			return 1;
49 		offset = extent_item_pos - data_offset;
50 	}
51 
52 	e = kmalloc(sizeof(*e), GFP_NOFS);
53 	if (!e)
54 		return -ENOMEM;
55 
56 	e->next = *eie;
57 	e->inum = key->objectid;
58 	e->offset = key->offset + offset;
59 	*eie = e;
60 
61 	return 0;
62 }
63 
free_inode_elem_list(struct extent_inode_elem * eie)64 static void free_inode_elem_list(struct extent_inode_elem *eie)
65 {
66 	struct extent_inode_elem *eie_next;
67 
68 	for (; eie; eie = eie_next) {
69 		eie_next = eie->next;
70 		kfree(eie);
71 	}
72 }
73 
find_extent_in_eb(const struct extent_buffer * eb,u64 wanted_disk_byte,u64 extent_item_pos,struct extent_inode_elem ** eie,bool ignore_offset)74 static int find_extent_in_eb(const struct extent_buffer *eb,
75 			     u64 wanted_disk_byte, u64 extent_item_pos,
76 			     struct extent_inode_elem **eie,
77 			     bool ignore_offset)
78 {
79 	u64 disk_byte;
80 	struct btrfs_key key;
81 	struct btrfs_file_extent_item *fi;
82 	int slot;
83 	int nritems;
84 	int extent_type;
85 	int ret;
86 
87 	/*
88 	 * from the shared data ref, we only have the leaf but we need
89 	 * the key. thus, we must look into all items and see that we
90 	 * find one (some) with a reference to our extent item.
91 	 */
92 	nritems = btrfs_header_nritems(eb);
93 	for (slot = 0; slot < nritems; ++slot) {
94 		btrfs_item_key_to_cpu(eb, &key, slot);
95 		if (key.type != BTRFS_EXTENT_DATA_KEY)
96 			continue;
97 		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
98 		extent_type = btrfs_file_extent_type(eb, fi);
99 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
100 			continue;
101 		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
102 		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
103 		if (disk_byte != wanted_disk_byte)
104 			continue;
105 
106 		ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
107 		if (ret < 0)
108 			return ret;
109 	}
110 
111 	return 0;
112 }
113 
114 struct preftree {
115 	struct rb_root_cached root;
116 	unsigned int count;
117 };
118 
119 #define PREFTREE_INIT	{ .root = RB_ROOT_CACHED, .count = 0 }
120 
121 struct preftrees {
122 	struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
123 	struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
124 	struct preftree indirect_missing_keys;
125 };
126 
127 /*
128  * Checks for a shared extent during backref search.
129  *
130  * The share_count tracks prelim_refs (direct and indirect) having a
131  * ref->count >0:
132  *  - incremented when a ref->count transitions to >0
133  *  - decremented when a ref->count transitions to <1
134  */
135 struct share_check {
136 	u64 root_objectid;
137 	u64 inum;
138 	int share_count;
139 	bool have_delayed_delete_refs;
140 };
141 
extent_is_shared(struct share_check * sc)142 static inline int extent_is_shared(struct share_check *sc)
143 {
144 	return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
145 }
146 
147 static struct kmem_cache *btrfs_prelim_ref_cache;
148 
btrfs_prelim_ref_init(void)149 int __init btrfs_prelim_ref_init(void)
150 {
151 	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
152 					sizeof(struct prelim_ref),
153 					0,
154 					SLAB_MEM_SPREAD,
155 					NULL);
156 	if (!btrfs_prelim_ref_cache)
157 		return -ENOMEM;
158 	return 0;
159 }
160 
btrfs_prelim_ref_exit(void)161 void __cold btrfs_prelim_ref_exit(void)
162 {
163 	kmem_cache_destroy(btrfs_prelim_ref_cache);
164 }
165 
free_pref(struct prelim_ref * ref)166 static void free_pref(struct prelim_ref *ref)
167 {
168 	kmem_cache_free(btrfs_prelim_ref_cache, ref);
169 }
170 
171 /*
172  * Return 0 when both refs are for the same block (and can be merged).
173  * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
174  * indicates a 'higher' block.
175  */
prelim_ref_compare(struct prelim_ref * ref1,struct prelim_ref * ref2)176 static int prelim_ref_compare(struct prelim_ref *ref1,
177 			      struct prelim_ref *ref2)
178 {
179 	if (ref1->level < ref2->level)
180 		return -1;
181 	if (ref1->level > ref2->level)
182 		return 1;
183 	if (ref1->root_id < ref2->root_id)
184 		return -1;
185 	if (ref1->root_id > ref2->root_id)
186 		return 1;
187 	if (ref1->key_for_search.type < ref2->key_for_search.type)
188 		return -1;
189 	if (ref1->key_for_search.type > ref2->key_for_search.type)
190 		return 1;
191 	if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
192 		return -1;
193 	if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
194 		return 1;
195 	if (ref1->key_for_search.offset < ref2->key_for_search.offset)
196 		return -1;
197 	if (ref1->key_for_search.offset > ref2->key_for_search.offset)
198 		return 1;
199 	if (ref1->parent < ref2->parent)
200 		return -1;
201 	if (ref1->parent > ref2->parent)
202 		return 1;
203 
204 	return 0;
205 }
206 
update_share_count(struct share_check * sc,int oldcount,int newcount)207 static void update_share_count(struct share_check *sc, int oldcount,
208 			       int newcount)
209 {
210 	if ((!sc) || (oldcount == 0 && newcount < 1))
211 		return;
212 
213 	if (oldcount > 0 && newcount < 1)
214 		sc->share_count--;
215 	else if (oldcount < 1 && newcount > 0)
216 		sc->share_count++;
217 }
218 
219 /*
220  * Add @newref to the @root rbtree, merging identical refs.
221  *
222  * Callers should assume that newref has been freed after calling.
223  */
prelim_ref_insert(const struct btrfs_fs_info * fs_info,struct preftree * preftree,struct prelim_ref * newref,struct share_check * sc)224 static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
225 			      struct preftree *preftree,
226 			      struct prelim_ref *newref,
227 			      struct share_check *sc)
228 {
229 	struct rb_root_cached *root;
230 	struct rb_node **p;
231 	struct rb_node *parent = NULL;
232 	struct prelim_ref *ref;
233 	int result;
234 	bool leftmost = true;
235 
236 	root = &preftree->root;
237 	p = &root->rb_root.rb_node;
238 
239 	while (*p) {
240 		parent = *p;
241 		ref = rb_entry(parent, struct prelim_ref, rbnode);
242 		result = prelim_ref_compare(ref, newref);
243 		if (result < 0) {
244 			p = &(*p)->rb_left;
245 		} else if (result > 0) {
246 			p = &(*p)->rb_right;
247 			leftmost = false;
248 		} else {
249 			/* Identical refs, merge them and free @newref */
250 			struct extent_inode_elem *eie = ref->inode_list;
251 
252 			while (eie && eie->next)
253 				eie = eie->next;
254 
255 			if (!eie)
256 				ref->inode_list = newref->inode_list;
257 			else
258 				eie->next = newref->inode_list;
259 			trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
260 						     preftree->count);
261 			/*
262 			 * A delayed ref can have newref->count < 0.
263 			 * The ref->count is updated to follow any
264 			 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
265 			 */
266 			update_share_count(sc, ref->count,
267 					   ref->count + newref->count);
268 			ref->count += newref->count;
269 			free_pref(newref);
270 			return;
271 		}
272 	}
273 
274 	update_share_count(sc, 0, newref->count);
275 	preftree->count++;
276 	trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
277 	rb_link_node(&newref->rbnode, parent, p);
278 	rb_insert_color_cached(&newref->rbnode, root, leftmost);
279 }
280 
281 /*
282  * Release the entire tree.  We don't care about internal consistency so
283  * just free everything and then reset the tree root.
284  */
prelim_release(struct preftree * preftree)285 static void prelim_release(struct preftree *preftree)
286 {
287 	struct prelim_ref *ref, *next_ref;
288 
289 	rbtree_postorder_for_each_entry_safe(ref, next_ref,
290 					     &preftree->root.rb_root, rbnode) {
291 		free_inode_elem_list(ref->inode_list);
292 		free_pref(ref);
293 	}
294 
295 	preftree->root = RB_ROOT_CACHED;
296 	preftree->count = 0;
297 }
298 
299 /*
300  * the rules for all callers of this function are:
301  * - obtaining the parent is the goal
302  * - if you add a key, you must know that it is a correct key
303  * - if you cannot add the parent or a correct key, then we will look into the
304  *   block later to set a correct key
305  *
306  * delayed refs
307  * ============
308  *        backref type | shared | indirect | shared | indirect
309  * information         |   tree |     tree |   data |     data
310  * --------------------+--------+----------+--------+----------
311  *      parent logical |    y   |     -    |    -   |     -
312  *      key to resolve |    -   |     y    |    y   |     y
313  *  tree block logical |    -   |     -    |    -   |     -
314  *  root for resolving |    y   |     y    |    y   |     y
315  *
316  * - column 1:       we've the parent -> done
317  * - column 2, 3, 4: we use the key to find the parent
318  *
319  * on disk refs (inline or keyed)
320  * ==============================
321  *        backref type | shared | indirect | shared | indirect
322  * information         |   tree |     tree |   data |     data
323  * --------------------+--------+----------+--------+----------
324  *      parent logical |    y   |     -    |    y   |     -
325  *      key to resolve |    -   |     -    |    -   |     y
326  *  tree block logical |    y   |     y    |    y   |     y
327  *  root for resolving |    -   |     y    |    y   |     y
328  *
329  * - column 1, 3: we've the parent -> done
330  * - column 2:    we take the first key from the block to find the parent
331  *                (see add_missing_keys)
332  * - column 4:    we use the key to find the parent
333  *
334  * additional information that's available but not required to find the parent
335  * block might help in merging entries to gain some speed.
336  */
add_prelim_ref(const struct btrfs_fs_info * fs_info,struct preftree * preftree,u64 root_id,const struct btrfs_key * key,int level,u64 parent,u64 wanted_disk_byte,int count,struct share_check * sc,gfp_t gfp_mask)337 static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
338 			  struct preftree *preftree, u64 root_id,
339 			  const struct btrfs_key *key, int level, u64 parent,
340 			  u64 wanted_disk_byte, int count,
341 			  struct share_check *sc, gfp_t gfp_mask)
342 {
343 	struct prelim_ref *ref;
344 
345 	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
346 		return 0;
347 
348 	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
349 	if (!ref)
350 		return -ENOMEM;
351 
352 	ref->root_id = root_id;
353 	if (key)
354 		ref->key_for_search = *key;
355 	else
356 		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
357 
358 	ref->inode_list = NULL;
359 	ref->level = level;
360 	ref->count = count;
361 	ref->parent = parent;
362 	ref->wanted_disk_byte = wanted_disk_byte;
363 	prelim_ref_insert(fs_info, preftree, ref, sc);
364 	return extent_is_shared(sc);
365 }
366 
367 /* direct refs use root == 0, key == NULL */
add_direct_ref(const struct btrfs_fs_info * fs_info,struct preftrees * preftrees,int level,u64 parent,u64 wanted_disk_byte,int count,struct share_check * sc,gfp_t gfp_mask)368 static int add_direct_ref(const struct btrfs_fs_info *fs_info,
369 			  struct preftrees *preftrees, int level, u64 parent,
370 			  u64 wanted_disk_byte, int count,
371 			  struct share_check *sc, gfp_t gfp_mask)
372 {
373 	return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
374 			      parent, wanted_disk_byte, count, sc, gfp_mask);
375 }
376 
377 /* indirect refs use parent == 0 */
add_indirect_ref(const struct btrfs_fs_info * fs_info,struct preftrees * preftrees,u64 root_id,const struct btrfs_key * key,int level,u64 wanted_disk_byte,int count,struct share_check * sc,gfp_t gfp_mask)378 static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
379 			    struct preftrees *preftrees, u64 root_id,
380 			    const struct btrfs_key *key, int level,
381 			    u64 wanted_disk_byte, int count,
382 			    struct share_check *sc, gfp_t gfp_mask)
383 {
384 	struct preftree *tree = &preftrees->indirect;
385 
386 	if (!key)
387 		tree = &preftrees->indirect_missing_keys;
388 	return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
389 			      wanted_disk_byte, count, sc, gfp_mask);
390 }
391 
is_shared_data_backref(struct preftrees * preftrees,u64 bytenr)392 static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
393 {
394 	struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
395 	struct rb_node *parent = NULL;
396 	struct prelim_ref *ref = NULL;
397 	struct prelim_ref target = {0};
398 	int result;
399 
400 	target.parent = bytenr;
401 
402 	while (*p) {
403 		parent = *p;
404 		ref = rb_entry(parent, struct prelim_ref, rbnode);
405 		result = prelim_ref_compare(ref, &target);
406 
407 		if (result < 0)
408 			p = &(*p)->rb_left;
409 		else if (result > 0)
410 			p = &(*p)->rb_right;
411 		else
412 			return 1;
413 	}
414 	return 0;
415 }
416 
add_all_parents(struct btrfs_root * root,struct btrfs_path * path,struct ulist * parents,struct preftrees * preftrees,struct prelim_ref * ref,int level,u64 time_seq,const u64 * extent_item_pos,bool ignore_offset)417 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
418 			   struct ulist *parents,
419 			   struct preftrees *preftrees, struct prelim_ref *ref,
420 			   int level, u64 time_seq, const u64 *extent_item_pos,
421 			   bool ignore_offset)
422 {
423 	int ret = 0;
424 	int slot;
425 	struct extent_buffer *eb;
426 	struct btrfs_key key;
427 	struct btrfs_key *key_for_search = &ref->key_for_search;
428 	struct btrfs_file_extent_item *fi;
429 	struct extent_inode_elem *eie = NULL, *old = NULL;
430 	u64 disk_byte;
431 	u64 wanted_disk_byte = ref->wanted_disk_byte;
432 	u64 count = 0;
433 	u64 data_offset;
434 	u8 type;
435 
436 	if (level != 0) {
437 		eb = path->nodes[level];
438 		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
439 		if (ret < 0)
440 			return ret;
441 		return 0;
442 	}
443 
444 	/*
445 	 * 1. We normally enter this function with the path already pointing to
446 	 *    the first item to check. But sometimes, we may enter it with
447 	 *    slot == nritems.
448 	 * 2. We are searching for normal backref but bytenr of this leaf
449 	 *    matches shared data backref
450 	 * 3. The leaf owner is not equal to the root we are searching
451 	 *
452 	 * For these cases, go to the next leaf before we continue.
453 	 */
454 	eb = path->nodes[0];
455 	if (path->slots[0] >= btrfs_header_nritems(eb) ||
456 	    is_shared_data_backref(preftrees, eb->start) ||
457 	    ref->root_id != btrfs_header_owner(eb)) {
458 		if (time_seq == SEQ_LAST)
459 			ret = btrfs_next_leaf(root, path);
460 		else
461 			ret = btrfs_next_old_leaf(root, path, time_seq);
462 	}
463 
464 	while (!ret && count < ref->count) {
465 		eb = path->nodes[0];
466 		slot = path->slots[0];
467 
468 		btrfs_item_key_to_cpu(eb, &key, slot);
469 
470 		if (key.objectid != key_for_search->objectid ||
471 		    key.type != BTRFS_EXTENT_DATA_KEY)
472 			break;
473 
474 		/*
475 		 * We are searching for normal backref but bytenr of this leaf
476 		 * matches shared data backref, OR
477 		 * the leaf owner is not equal to the root we are searching for
478 		 */
479 		if (slot == 0 &&
480 		    (is_shared_data_backref(preftrees, eb->start) ||
481 		     ref->root_id != btrfs_header_owner(eb))) {
482 			if (time_seq == SEQ_LAST)
483 				ret = btrfs_next_leaf(root, path);
484 			else
485 				ret = btrfs_next_old_leaf(root, path, time_seq);
486 			continue;
487 		}
488 		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
489 		type = btrfs_file_extent_type(eb, fi);
490 		if (type == BTRFS_FILE_EXTENT_INLINE)
491 			goto next;
492 		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
493 		data_offset = btrfs_file_extent_offset(eb, fi);
494 
495 		if (disk_byte == wanted_disk_byte) {
496 			eie = NULL;
497 			old = NULL;
498 			if (ref->key_for_search.offset == key.offset - data_offset)
499 				count++;
500 			else
501 				goto next;
502 			if (extent_item_pos) {
503 				ret = check_extent_in_eb(&key, eb, fi,
504 						*extent_item_pos,
505 						&eie, ignore_offset);
506 				if (ret < 0)
507 					break;
508 			}
509 			if (ret > 0)
510 				goto next;
511 			ret = ulist_add_merge_ptr(parents, eb->start,
512 						  eie, (void **)&old, GFP_NOFS);
513 			if (ret < 0)
514 				break;
515 			if (!ret && extent_item_pos) {
516 				while (old->next)
517 					old = old->next;
518 				old->next = eie;
519 			}
520 			eie = NULL;
521 		}
522 next:
523 		if (time_seq == SEQ_LAST)
524 			ret = btrfs_next_item(root, path);
525 		else
526 			ret = btrfs_next_old_item(root, path, time_seq);
527 	}
528 
529 	if (ret > 0)
530 		ret = 0;
531 	else if (ret < 0)
532 		free_inode_elem_list(eie);
533 	return ret;
534 }
535 
536 /*
537  * resolve an indirect backref in the form (root_id, key, level)
538  * to a logical address
539  */
resolve_indirect_ref(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 time_seq,struct preftrees * preftrees,struct prelim_ref * ref,struct ulist * parents,const u64 * extent_item_pos,bool ignore_offset)540 static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
541 				struct btrfs_path *path, u64 time_seq,
542 				struct preftrees *preftrees,
543 				struct prelim_ref *ref, struct ulist *parents,
544 				const u64 *extent_item_pos, bool ignore_offset)
545 {
546 	struct btrfs_root *root;
547 	struct btrfs_key root_key;
548 	struct extent_buffer *eb;
549 	int ret = 0;
550 	int root_level;
551 	int level = ref->level;
552 	int index;
553 	struct btrfs_key search_key = ref->key_for_search;
554 
555 	root_key.objectid = ref->root_id;
556 	root_key.type = BTRFS_ROOT_ITEM_KEY;
557 	root_key.offset = (u64)-1;
558 
559 	index = srcu_read_lock(&fs_info->subvol_srcu);
560 
561 	root = btrfs_get_fs_root(fs_info, &root_key, false);
562 	if (IS_ERR(root)) {
563 		srcu_read_unlock(&fs_info->subvol_srcu, index);
564 		ret = PTR_ERR(root);
565 		goto out;
566 	}
567 
568 	if (btrfs_is_testing(fs_info)) {
569 		srcu_read_unlock(&fs_info->subvol_srcu, index);
570 		ret = -ENOENT;
571 		goto out;
572 	}
573 
574 	if (path->search_commit_root)
575 		root_level = btrfs_header_level(root->commit_root);
576 	else if (time_seq == SEQ_LAST)
577 		root_level = btrfs_header_level(root->node);
578 	else
579 		root_level = btrfs_old_root_level(root, time_seq);
580 
581 	if (root_level + 1 == level) {
582 		srcu_read_unlock(&fs_info->subvol_srcu, index);
583 		goto out;
584 	}
585 
586 	/*
587 	 * We can often find data backrefs with an offset that is too large
588 	 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
589 	 * subtracting a file's offset with the data offset of its
590 	 * corresponding extent data item. This can happen for example in the
591 	 * clone ioctl.
592 	 *
593 	 * So if we detect such case we set the search key's offset to zero to
594 	 * make sure we will find the matching file extent item at
595 	 * add_all_parents(), otherwise we will miss it because the offset
596 	 * taken form the backref is much larger then the offset of the file
597 	 * extent item. This can make us scan a very large number of file
598 	 * extent items, but at least it will not make us miss any.
599 	 *
600 	 * This is an ugly workaround for a behaviour that should have never
601 	 * existed, but it does and a fix for the clone ioctl would touch a lot
602 	 * of places, cause backwards incompatibility and would not fix the
603 	 * problem for extents cloned with older kernels.
604 	 */
605 	if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
606 	    search_key.offset >= LLONG_MAX)
607 		search_key.offset = 0;
608 	path->lowest_level = level;
609 	if (time_seq == SEQ_LAST)
610 		ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
611 	else
612 		ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
613 
614 	/* root node has been locked, we can release @subvol_srcu safely here */
615 	srcu_read_unlock(&fs_info->subvol_srcu, index);
616 
617 	btrfs_debug(fs_info,
618 		"search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
619 		 ref->root_id, level, ref->count, ret,
620 		 ref->key_for_search.objectid, ref->key_for_search.type,
621 		 ref->key_for_search.offset);
622 	if (ret < 0)
623 		goto out;
624 
625 	eb = path->nodes[level];
626 	while (!eb) {
627 		if (WARN_ON(!level)) {
628 			ret = 1;
629 			goto out;
630 		}
631 		level--;
632 		eb = path->nodes[level];
633 	}
634 
635 	ret = add_all_parents(root, path, parents, preftrees, ref, level,
636 			      time_seq, extent_item_pos, ignore_offset);
637 out:
638 	path->lowest_level = 0;
639 	btrfs_release_path(path);
640 	return ret;
641 }
642 
643 static struct extent_inode_elem *
unode_aux_to_inode_list(struct ulist_node * node)644 unode_aux_to_inode_list(struct ulist_node *node)
645 {
646 	if (!node)
647 		return NULL;
648 	return (struct extent_inode_elem *)(uintptr_t)node->aux;
649 }
650 
free_leaf_list(struct ulist * ulist)651 static void free_leaf_list(struct ulist *ulist)
652 {
653 	struct ulist_node *node;
654 	struct ulist_iterator uiter;
655 
656 	ULIST_ITER_INIT(&uiter);
657 	while ((node = ulist_next(ulist, &uiter)))
658 		free_inode_elem_list(unode_aux_to_inode_list(node));
659 
660 	ulist_free(ulist);
661 }
662 
663 /*
664  * We maintain three separate rbtrees: one for direct refs, one for
665  * indirect refs which have a key, and one for indirect refs which do not
666  * have a key. Each tree does merge on insertion.
667  *
668  * Once all of the references are located, we iterate over the tree of
669  * indirect refs with missing keys. An appropriate key is located and
670  * the ref is moved onto the tree for indirect refs. After all missing
671  * keys are thus located, we iterate over the indirect ref tree, resolve
672  * each reference, and then insert the resolved reference onto the
673  * direct tree (merging there too).
674  *
675  * New backrefs (i.e., for parent nodes) are added to the appropriate
676  * rbtree as they are encountered. The new backrefs are subsequently
677  * resolved as above.
678  */
resolve_indirect_refs(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 time_seq,struct preftrees * preftrees,const u64 * extent_item_pos,struct share_check * sc,bool ignore_offset)679 static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
680 				 struct btrfs_path *path, u64 time_seq,
681 				 struct preftrees *preftrees,
682 				 const u64 *extent_item_pos,
683 				 struct share_check *sc, bool ignore_offset)
684 {
685 	int err;
686 	int ret = 0;
687 	struct ulist *parents;
688 	struct ulist_node *node;
689 	struct ulist_iterator uiter;
690 	struct rb_node *rnode;
691 
692 	parents = ulist_alloc(GFP_NOFS);
693 	if (!parents)
694 		return -ENOMEM;
695 
696 	/*
697 	 * We could trade memory usage for performance here by iterating
698 	 * the tree, allocating new refs for each insertion, and then
699 	 * freeing the entire indirect tree when we're done.  In some test
700 	 * cases, the tree can grow quite large (~200k objects).
701 	 */
702 	while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
703 		struct prelim_ref *ref;
704 
705 		ref = rb_entry(rnode, struct prelim_ref, rbnode);
706 		if (WARN(ref->parent,
707 			 "BUG: direct ref found in indirect tree")) {
708 			ret = -EINVAL;
709 			goto out;
710 		}
711 
712 		rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
713 		preftrees->indirect.count--;
714 
715 		if (ref->count == 0) {
716 			free_pref(ref);
717 			continue;
718 		}
719 
720 		if (sc && sc->root_objectid &&
721 		    ref->root_id != sc->root_objectid) {
722 			free_pref(ref);
723 			ret = BACKREF_FOUND_SHARED;
724 			goto out;
725 		}
726 		err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
727 					   ref, parents, extent_item_pos,
728 					   ignore_offset);
729 		/*
730 		 * we can only tolerate ENOENT,otherwise,we should catch error
731 		 * and return directly.
732 		 */
733 		if (err == -ENOENT) {
734 			prelim_ref_insert(fs_info, &preftrees->direct, ref,
735 					  NULL);
736 			continue;
737 		} else if (err) {
738 			free_pref(ref);
739 			ret = err;
740 			goto out;
741 		}
742 
743 		/* we put the first parent into the ref at hand */
744 		ULIST_ITER_INIT(&uiter);
745 		node = ulist_next(parents, &uiter);
746 		ref->parent = node ? node->val : 0;
747 		ref->inode_list = unode_aux_to_inode_list(node);
748 
749 		/* Add a prelim_ref(s) for any other parent(s). */
750 		while ((node = ulist_next(parents, &uiter))) {
751 			struct prelim_ref *new_ref;
752 
753 			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
754 						   GFP_NOFS);
755 			if (!new_ref) {
756 				free_pref(ref);
757 				ret = -ENOMEM;
758 				goto out;
759 			}
760 			memcpy(new_ref, ref, sizeof(*ref));
761 			new_ref->parent = node->val;
762 			new_ref->inode_list = unode_aux_to_inode_list(node);
763 			prelim_ref_insert(fs_info, &preftrees->direct,
764 					  new_ref, NULL);
765 		}
766 
767 		/*
768 		 * Now it's a direct ref, put it in the direct tree. We must
769 		 * do this last because the ref could be merged/freed here.
770 		 */
771 		prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
772 
773 		ulist_reinit(parents);
774 		cond_resched();
775 	}
776 out:
777 	/*
778 	 * We may have inode lists attached to refs in the parents ulist, so we
779 	 * must free them before freeing the ulist and its refs.
780 	 */
781 	free_leaf_list(parents);
782 	return ret;
783 }
784 
785 /*
786  * read tree blocks and add keys where required.
787  */
add_missing_keys(struct btrfs_fs_info * fs_info,struct preftrees * preftrees,bool lock)788 static int add_missing_keys(struct btrfs_fs_info *fs_info,
789 			    struct preftrees *preftrees, bool lock)
790 {
791 	struct prelim_ref *ref;
792 	struct extent_buffer *eb;
793 	struct preftree *tree = &preftrees->indirect_missing_keys;
794 	struct rb_node *node;
795 
796 	while ((node = rb_first_cached(&tree->root))) {
797 		ref = rb_entry(node, struct prelim_ref, rbnode);
798 		rb_erase_cached(node, &tree->root);
799 
800 		BUG_ON(ref->parent);	/* should not be a direct ref */
801 		BUG_ON(ref->key_for_search.type);
802 		BUG_ON(!ref->wanted_disk_byte);
803 
804 		eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0,
805 				     ref->level - 1, NULL);
806 		if (IS_ERR(eb)) {
807 			free_pref(ref);
808 			return PTR_ERR(eb);
809 		} else if (!extent_buffer_uptodate(eb)) {
810 			free_pref(ref);
811 			free_extent_buffer(eb);
812 			return -EIO;
813 		}
814 		if (lock)
815 			btrfs_tree_read_lock(eb);
816 		if (btrfs_header_level(eb) == 0)
817 			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
818 		else
819 			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
820 		if (lock)
821 			btrfs_tree_read_unlock(eb);
822 		free_extent_buffer(eb);
823 		prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
824 		cond_resched();
825 	}
826 	return 0;
827 }
828 
829 /*
830  * add all currently queued delayed refs from this head whose seq nr is
831  * smaller or equal that seq to the list
832  */
add_delayed_refs(const struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_head * head,u64 seq,struct preftrees * preftrees,struct share_check * sc)833 static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
834 			    struct btrfs_delayed_ref_head *head, u64 seq,
835 			    struct preftrees *preftrees, struct share_check *sc)
836 {
837 	struct btrfs_delayed_ref_node *node;
838 	struct btrfs_key key;
839 	struct rb_node *n;
840 	int count;
841 	int ret = 0;
842 
843 	spin_lock(&head->lock);
844 	for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
845 		node = rb_entry(n, struct btrfs_delayed_ref_node,
846 				ref_node);
847 		if (node->seq > seq)
848 			continue;
849 
850 		switch (node->action) {
851 		case BTRFS_ADD_DELAYED_EXTENT:
852 		case BTRFS_UPDATE_DELAYED_HEAD:
853 			WARN_ON(1);
854 			continue;
855 		case BTRFS_ADD_DELAYED_REF:
856 			count = node->ref_mod;
857 			break;
858 		case BTRFS_DROP_DELAYED_REF:
859 			count = node->ref_mod * -1;
860 			break;
861 		default:
862 			BUG();
863 		}
864 		switch (node->type) {
865 		case BTRFS_TREE_BLOCK_REF_KEY: {
866 			/* NORMAL INDIRECT METADATA backref */
867 			struct btrfs_delayed_tree_ref *ref;
868 			struct btrfs_key *key_ptr = NULL;
869 
870 			if (head->extent_op && head->extent_op->update_key) {
871 				btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
872 				key_ptr = &key;
873 			}
874 
875 			ref = btrfs_delayed_node_to_tree_ref(node);
876 			ret = add_indirect_ref(fs_info, preftrees, ref->root,
877 					       key_ptr, ref->level + 1,
878 					       node->bytenr, count, sc,
879 					       GFP_ATOMIC);
880 			break;
881 		}
882 		case BTRFS_SHARED_BLOCK_REF_KEY: {
883 			/* SHARED DIRECT METADATA backref */
884 			struct btrfs_delayed_tree_ref *ref;
885 
886 			ref = btrfs_delayed_node_to_tree_ref(node);
887 
888 			ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
889 					     ref->parent, node->bytenr, count,
890 					     sc, GFP_ATOMIC);
891 			break;
892 		}
893 		case BTRFS_EXTENT_DATA_REF_KEY: {
894 			/* NORMAL INDIRECT DATA backref */
895 			struct btrfs_delayed_data_ref *ref;
896 			ref = btrfs_delayed_node_to_data_ref(node);
897 
898 			key.objectid = ref->objectid;
899 			key.type = BTRFS_EXTENT_DATA_KEY;
900 			key.offset = ref->offset;
901 
902 			/*
903 			 * If we have a share check context and a reference for
904 			 * another inode, we can't exit immediately. This is
905 			 * because even if this is a BTRFS_ADD_DELAYED_REF
906 			 * reference we may find next a BTRFS_DROP_DELAYED_REF
907 			 * which cancels out this ADD reference.
908 			 *
909 			 * If this is a DROP reference and there was no previous
910 			 * ADD reference, then we need to signal that when we
911 			 * process references from the extent tree (through
912 			 * add_inline_refs() and add_keyed_refs()), we should
913 			 * not exit early if we find a reference for another
914 			 * inode, because one of the delayed DROP references
915 			 * may cancel that reference in the extent tree.
916 			 */
917 			if (sc && count < 0)
918 				sc->have_delayed_delete_refs = true;
919 
920 			ret = add_indirect_ref(fs_info, preftrees, ref->root,
921 					       &key, 0, node->bytenr, count, sc,
922 					       GFP_ATOMIC);
923 			break;
924 		}
925 		case BTRFS_SHARED_DATA_REF_KEY: {
926 			/* SHARED DIRECT FULL backref */
927 			struct btrfs_delayed_data_ref *ref;
928 
929 			ref = btrfs_delayed_node_to_data_ref(node);
930 
931 			ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
932 					     node->bytenr, count, sc,
933 					     GFP_ATOMIC);
934 			break;
935 		}
936 		default:
937 			WARN_ON(1);
938 		}
939 		/*
940 		 * We must ignore BACKREF_FOUND_SHARED until all delayed
941 		 * refs have been checked.
942 		 */
943 		if (ret && (ret != BACKREF_FOUND_SHARED))
944 			break;
945 	}
946 	if (!ret)
947 		ret = extent_is_shared(sc);
948 
949 	spin_unlock(&head->lock);
950 	return ret;
951 }
952 
953 /*
954  * add all inline backrefs for bytenr to the list
955  *
956  * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
957  */
add_inline_refs(const struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 bytenr,int * info_level,struct preftrees * preftrees,struct share_check * sc)958 static int add_inline_refs(const struct btrfs_fs_info *fs_info,
959 			   struct btrfs_path *path, u64 bytenr,
960 			   int *info_level, struct preftrees *preftrees,
961 			   struct share_check *sc)
962 {
963 	int ret = 0;
964 	int slot;
965 	struct extent_buffer *leaf;
966 	struct btrfs_key key;
967 	struct btrfs_key found_key;
968 	unsigned long ptr;
969 	unsigned long end;
970 	struct btrfs_extent_item *ei;
971 	u64 flags;
972 	u64 item_size;
973 
974 	/*
975 	 * enumerate all inline refs
976 	 */
977 	leaf = path->nodes[0];
978 	slot = path->slots[0];
979 
980 	item_size = btrfs_item_size_nr(leaf, slot);
981 	BUG_ON(item_size < sizeof(*ei));
982 
983 	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
984 	flags = btrfs_extent_flags(leaf, ei);
985 	btrfs_item_key_to_cpu(leaf, &found_key, slot);
986 
987 	ptr = (unsigned long)(ei + 1);
988 	end = (unsigned long)ei + item_size;
989 
990 	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
991 	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
992 		struct btrfs_tree_block_info *info;
993 
994 		info = (struct btrfs_tree_block_info *)ptr;
995 		*info_level = btrfs_tree_block_level(leaf, info);
996 		ptr += sizeof(struct btrfs_tree_block_info);
997 		BUG_ON(ptr > end);
998 	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
999 		*info_level = found_key.offset;
1000 	} else {
1001 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1002 	}
1003 
1004 	while (ptr < end) {
1005 		struct btrfs_extent_inline_ref *iref;
1006 		u64 offset;
1007 		int type;
1008 
1009 		iref = (struct btrfs_extent_inline_ref *)ptr;
1010 		type = btrfs_get_extent_inline_ref_type(leaf, iref,
1011 							BTRFS_REF_TYPE_ANY);
1012 		if (type == BTRFS_REF_TYPE_INVALID)
1013 			return -EUCLEAN;
1014 
1015 		offset = btrfs_extent_inline_ref_offset(leaf, iref);
1016 
1017 		switch (type) {
1018 		case BTRFS_SHARED_BLOCK_REF_KEY:
1019 			ret = add_direct_ref(fs_info, preftrees,
1020 					     *info_level + 1, offset,
1021 					     bytenr, 1, NULL, GFP_NOFS);
1022 			break;
1023 		case BTRFS_SHARED_DATA_REF_KEY: {
1024 			struct btrfs_shared_data_ref *sdref;
1025 			int count;
1026 
1027 			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1028 			count = btrfs_shared_data_ref_count(leaf, sdref);
1029 
1030 			ret = add_direct_ref(fs_info, preftrees, 0, offset,
1031 					     bytenr, count, sc, GFP_NOFS);
1032 			break;
1033 		}
1034 		case BTRFS_TREE_BLOCK_REF_KEY:
1035 			ret = add_indirect_ref(fs_info, preftrees, offset,
1036 					       NULL, *info_level + 1,
1037 					       bytenr, 1, NULL, GFP_NOFS);
1038 			break;
1039 		case BTRFS_EXTENT_DATA_REF_KEY: {
1040 			struct btrfs_extent_data_ref *dref;
1041 			int count;
1042 			u64 root;
1043 
1044 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1045 			count = btrfs_extent_data_ref_count(leaf, dref);
1046 			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1047 								      dref);
1048 			key.type = BTRFS_EXTENT_DATA_KEY;
1049 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1050 
1051 			if (sc && sc->inum && key.objectid != sc->inum &&
1052 			    !sc->have_delayed_delete_refs) {
1053 				ret = BACKREF_FOUND_SHARED;
1054 				break;
1055 			}
1056 
1057 			root = btrfs_extent_data_ref_root(leaf, dref);
1058 
1059 			ret = add_indirect_ref(fs_info, preftrees, root,
1060 					       &key, 0, bytenr, count,
1061 					       sc, GFP_NOFS);
1062 
1063 			break;
1064 		}
1065 		default:
1066 			WARN_ON(1);
1067 		}
1068 		if (ret)
1069 			return ret;
1070 		ptr += btrfs_extent_inline_ref_size(type);
1071 	}
1072 
1073 	return 0;
1074 }
1075 
1076 /*
1077  * add all non-inline backrefs for bytenr to the list
1078  *
1079  * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1080  */
add_keyed_refs(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 bytenr,int info_level,struct preftrees * preftrees,struct share_check * sc)1081 static int add_keyed_refs(struct btrfs_fs_info *fs_info,
1082 			  struct btrfs_path *path, u64 bytenr,
1083 			  int info_level, struct preftrees *preftrees,
1084 			  struct share_check *sc)
1085 {
1086 	struct btrfs_root *extent_root = fs_info->extent_root;
1087 	int ret;
1088 	int slot;
1089 	struct extent_buffer *leaf;
1090 	struct btrfs_key key;
1091 
1092 	while (1) {
1093 		ret = btrfs_next_item(extent_root, path);
1094 		if (ret < 0)
1095 			break;
1096 		if (ret) {
1097 			ret = 0;
1098 			break;
1099 		}
1100 
1101 		slot = path->slots[0];
1102 		leaf = path->nodes[0];
1103 		btrfs_item_key_to_cpu(leaf, &key, slot);
1104 
1105 		if (key.objectid != bytenr)
1106 			break;
1107 		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1108 			continue;
1109 		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1110 			break;
1111 
1112 		switch (key.type) {
1113 		case BTRFS_SHARED_BLOCK_REF_KEY:
1114 			/* SHARED DIRECT METADATA backref */
1115 			ret = add_direct_ref(fs_info, preftrees,
1116 					     info_level + 1, key.offset,
1117 					     bytenr, 1, NULL, GFP_NOFS);
1118 			break;
1119 		case BTRFS_SHARED_DATA_REF_KEY: {
1120 			/* SHARED DIRECT FULL backref */
1121 			struct btrfs_shared_data_ref *sdref;
1122 			int count;
1123 
1124 			sdref = btrfs_item_ptr(leaf, slot,
1125 					      struct btrfs_shared_data_ref);
1126 			count = btrfs_shared_data_ref_count(leaf, sdref);
1127 			ret = add_direct_ref(fs_info, preftrees, 0,
1128 					     key.offset, bytenr, count,
1129 					     sc, GFP_NOFS);
1130 			break;
1131 		}
1132 		case BTRFS_TREE_BLOCK_REF_KEY:
1133 			/* NORMAL INDIRECT METADATA backref */
1134 			ret = add_indirect_ref(fs_info, preftrees, key.offset,
1135 					       NULL, info_level + 1, bytenr,
1136 					       1, NULL, GFP_NOFS);
1137 			break;
1138 		case BTRFS_EXTENT_DATA_REF_KEY: {
1139 			/* NORMAL INDIRECT DATA backref */
1140 			struct btrfs_extent_data_ref *dref;
1141 			int count;
1142 			u64 root;
1143 
1144 			dref = btrfs_item_ptr(leaf, slot,
1145 					      struct btrfs_extent_data_ref);
1146 			count = btrfs_extent_data_ref_count(leaf, dref);
1147 			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1148 								      dref);
1149 			key.type = BTRFS_EXTENT_DATA_KEY;
1150 			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1151 
1152 			if (sc && sc->inum && key.objectid != sc->inum &&
1153 			    !sc->have_delayed_delete_refs) {
1154 				ret = BACKREF_FOUND_SHARED;
1155 				break;
1156 			}
1157 
1158 			root = btrfs_extent_data_ref_root(leaf, dref);
1159 			ret = add_indirect_ref(fs_info, preftrees, root,
1160 					       &key, 0, bytenr, count,
1161 					       sc, GFP_NOFS);
1162 			break;
1163 		}
1164 		default:
1165 			WARN_ON(1);
1166 		}
1167 		if (ret)
1168 			return ret;
1169 
1170 	}
1171 
1172 	return ret;
1173 }
1174 
1175 /*
1176  * this adds all existing backrefs (inline backrefs, backrefs and delayed
1177  * refs) for the given bytenr to the refs list, merges duplicates and resolves
1178  * indirect refs to their parent bytenr.
1179  * When roots are found, they're added to the roots list
1180  *
1181  * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1182  * much like trans == NULL case, the difference only lies in it will not
1183  * commit root.
1184  * The special case is for qgroup to search roots in commit_transaction().
1185  *
1186  * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1187  * shared extent is detected.
1188  *
1189  * Otherwise this returns 0 for success and <0 for an error.
1190  *
1191  * If ignore_offset is set to false, only extent refs whose offsets match
1192  * extent_item_pos are returned.  If true, every extent ref is returned
1193  * and extent_item_pos is ignored.
1194  *
1195  * FIXME some caching might speed things up
1196  */
find_parent_nodes(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 bytenr,u64 time_seq,struct ulist * refs,struct ulist * roots,const u64 * extent_item_pos,struct share_check * sc,bool ignore_offset)1197 static int find_parent_nodes(struct btrfs_trans_handle *trans,
1198 			     struct btrfs_fs_info *fs_info, u64 bytenr,
1199 			     u64 time_seq, struct ulist *refs,
1200 			     struct ulist *roots, const u64 *extent_item_pos,
1201 			     struct share_check *sc, bool ignore_offset)
1202 {
1203 	struct btrfs_key key;
1204 	struct btrfs_path *path;
1205 	struct btrfs_delayed_ref_root *delayed_refs = NULL;
1206 	struct btrfs_delayed_ref_head *head;
1207 	int info_level = 0;
1208 	int ret;
1209 	struct prelim_ref *ref;
1210 	struct rb_node *node;
1211 	struct extent_inode_elem *eie = NULL;
1212 	struct preftrees preftrees = {
1213 		.direct = PREFTREE_INIT,
1214 		.indirect = PREFTREE_INIT,
1215 		.indirect_missing_keys = PREFTREE_INIT
1216 	};
1217 
1218 	key.objectid = bytenr;
1219 	key.offset = (u64)-1;
1220 	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1221 		key.type = BTRFS_METADATA_ITEM_KEY;
1222 	else
1223 		key.type = BTRFS_EXTENT_ITEM_KEY;
1224 
1225 	path = btrfs_alloc_path();
1226 	if (!path)
1227 		return -ENOMEM;
1228 	if (!trans) {
1229 		path->search_commit_root = 1;
1230 		path->skip_locking = 1;
1231 	}
1232 
1233 	if (time_seq == SEQ_LAST)
1234 		path->skip_locking = 1;
1235 
1236 	/*
1237 	 * grab both a lock on the path and a lock on the delayed ref head.
1238 	 * We need both to get a consistent picture of how the refs look
1239 	 * at a specified point in time
1240 	 */
1241 again:
1242 	head = NULL;
1243 
1244 	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
1245 	if (ret < 0)
1246 		goto out;
1247 	if (ret == 0) {
1248 		/* This shouldn't happen, indicates a bug or fs corruption. */
1249 		ASSERT(ret != 0);
1250 		ret = -EUCLEAN;
1251 		goto out;
1252 	}
1253 
1254 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1255 	if (trans && likely(trans->type != __TRANS_DUMMY) &&
1256 	    time_seq != SEQ_LAST) {
1257 #else
1258 	if (trans && time_seq != SEQ_LAST) {
1259 #endif
1260 		/*
1261 		 * look if there are updates for this ref queued and lock the
1262 		 * head
1263 		 */
1264 		delayed_refs = &trans->transaction->delayed_refs;
1265 		spin_lock(&delayed_refs->lock);
1266 		head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1267 		if (head) {
1268 			if (!mutex_trylock(&head->mutex)) {
1269 				refcount_inc(&head->refs);
1270 				spin_unlock(&delayed_refs->lock);
1271 
1272 				btrfs_release_path(path);
1273 
1274 				/*
1275 				 * Mutex was contended, block until it's
1276 				 * released and try again
1277 				 */
1278 				mutex_lock(&head->mutex);
1279 				mutex_unlock(&head->mutex);
1280 				btrfs_put_delayed_ref_head(head);
1281 				goto again;
1282 			}
1283 			spin_unlock(&delayed_refs->lock);
1284 			ret = add_delayed_refs(fs_info, head, time_seq,
1285 					       &preftrees, sc);
1286 			mutex_unlock(&head->mutex);
1287 			if (ret)
1288 				goto out;
1289 		} else {
1290 			spin_unlock(&delayed_refs->lock);
1291 		}
1292 	}
1293 
1294 	if (path->slots[0]) {
1295 		struct extent_buffer *leaf;
1296 		int slot;
1297 
1298 		path->slots[0]--;
1299 		leaf = path->nodes[0];
1300 		slot = path->slots[0];
1301 		btrfs_item_key_to_cpu(leaf, &key, slot);
1302 		if (key.objectid == bytenr &&
1303 		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
1304 		     key.type == BTRFS_METADATA_ITEM_KEY)) {
1305 			ret = add_inline_refs(fs_info, path, bytenr,
1306 					      &info_level, &preftrees, sc);
1307 			if (ret)
1308 				goto out;
1309 			ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1310 					     &preftrees, sc);
1311 			if (ret)
1312 				goto out;
1313 		}
1314 	}
1315 
1316 	btrfs_release_path(path);
1317 
1318 	ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1319 	if (ret)
1320 		goto out;
1321 
1322 	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1323 
1324 	ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1325 				    extent_item_pos, sc, ignore_offset);
1326 	if (ret)
1327 		goto out;
1328 
1329 	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1330 
1331 	/*
1332 	 * This walks the tree of merged and resolved refs. Tree blocks are
1333 	 * read in as needed. Unique entries are added to the ulist, and
1334 	 * the list of found roots is updated.
1335 	 *
1336 	 * We release the entire tree in one go before returning.
1337 	 */
1338 	node = rb_first_cached(&preftrees.direct.root);
1339 	while (node) {
1340 		ref = rb_entry(node, struct prelim_ref, rbnode);
1341 		node = rb_next(&ref->rbnode);
1342 		/*
1343 		 * ref->count < 0 can happen here if there are delayed
1344 		 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1345 		 * prelim_ref_insert() relies on this when merging
1346 		 * identical refs to keep the overall count correct.
1347 		 * prelim_ref_insert() will merge only those refs
1348 		 * which compare identically.  Any refs having
1349 		 * e.g. different offsets would not be merged,
1350 		 * and would retain their original ref->count < 0.
1351 		 */
1352 		if (roots && ref->count && ref->root_id && ref->parent == 0) {
1353 			if (sc && sc->root_objectid &&
1354 			    ref->root_id != sc->root_objectid) {
1355 				ret = BACKREF_FOUND_SHARED;
1356 				goto out;
1357 			}
1358 
1359 			/* no parent == root of tree */
1360 			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1361 			if (ret < 0)
1362 				goto out;
1363 		}
1364 		if (ref->count && ref->parent) {
1365 			if (extent_item_pos && !ref->inode_list &&
1366 			    ref->level == 0) {
1367 				struct extent_buffer *eb;
1368 
1369 				eb = read_tree_block(fs_info, ref->parent, 0,
1370 						     ref->level, NULL);
1371 				if (IS_ERR(eb)) {
1372 					ret = PTR_ERR(eb);
1373 					goto out;
1374 				} else if (!extent_buffer_uptodate(eb)) {
1375 					free_extent_buffer(eb);
1376 					ret = -EIO;
1377 					goto out;
1378 				}
1379 
1380 				if (!path->skip_locking) {
1381 					btrfs_tree_read_lock(eb);
1382 					btrfs_set_lock_blocking_read(eb);
1383 				}
1384 				ret = find_extent_in_eb(eb, bytenr,
1385 							*extent_item_pos, &eie, ignore_offset);
1386 				if (!path->skip_locking)
1387 					btrfs_tree_read_unlock_blocking(eb);
1388 				free_extent_buffer(eb);
1389 				if (ret < 0)
1390 					goto out;
1391 				ref->inode_list = eie;
1392 				/*
1393 				 * We transferred the list ownership to the ref,
1394 				 * so set to NULL to avoid a double free in case
1395 				 * an error happens after this.
1396 				 */
1397 				eie = NULL;
1398 			}
1399 			ret = ulist_add_merge_ptr(refs, ref->parent,
1400 						  ref->inode_list,
1401 						  (void **)&eie, GFP_NOFS);
1402 			if (ret < 0)
1403 				goto out;
1404 			if (!ret && extent_item_pos) {
1405 				/*
1406 				 * We've recorded that parent, so we must extend
1407 				 * its inode list here.
1408 				 *
1409 				 * However if there was corruption we may not
1410 				 * have found an eie, return an error in this
1411 				 * case.
1412 				 */
1413 				ASSERT(eie);
1414 				if (!eie) {
1415 					ret = -EUCLEAN;
1416 					goto out;
1417 				}
1418 				while (eie->next)
1419 					eie = eie->next;
1420 				eie->next = ref->inode_list;
1421 			}
1422 			eie = NULL;
1423 			/*
1424 			 * We have transferred the inode list ownership from
1425 			 * this ref to the ref we added to the 'refs' ulist.
1426 			 * So set this ref's inode list to NULL to avoid
1427 			 * use-after-free when our caller uses it or double
1428 			 * frees in case an error happens before we return.
1429 			 */
1430 			ref->inode_list = NULL;
1431 		}
1432 		cond_resched();
1433 	}
1434 
1435 out:
1436 	btrfs_free_path(path);
1437 
1438 	prelim_release(&preftrees.direct);
1439 	prelim_release(&preftrees.indirect);
1440 	prelim_release(&preftrees.indirect_missing_keys);
1441 
1442 	if (ret < 0)
1443 		free_inode_elem_list(eie);
1444 	return ret;
1445 }
1446 
1447 /*
1448  * Finds all leafs with a reference to the specified combination of bytenr and
1449  * offset. key_list_head will point to a list of corresponding keys (caller must
1450  * free each list element). The leafs will be stored in the leafs ulist, which
1451  * must be freed with ulist_free.
1452  *
1453  * returns 0 on success, <0 on error
1454  */
1455 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1456 				struct btrfs_fs_info *fs_info, u64 bytenr,
1457 				u64 time_seq, struct ulist **leafs,
1458 				const u64 *extent_item_pos, bool ignore_offset)
1459 {
1460 	int ret;
1461 
1462 	*leafs = ulist_alloc(GFP_NOFS);
1463 	if (!*leafs)
1464 		return -ENOMEM;
1465 
1466 	ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1467 				*leafs, NULL, extent_item_pos, NULL, ignore_offset);
1468 	if (ret < 0 && ret != -ENOENT) {
1469 		free_leaf_list(*leafs);
1470 		return ret;
1471 	}
1472 
1473 	return 0;
1474 }
1475 
1476 /*
1477  * walk all backrefs for a given extent to find all roots that reference this
1478  * extent. Walking a backref means finding all extents that reference this
1479  * extent and in turn walk the backrefs of those, too. Naturally this is a
1480  * recursive process, but here it is implemented in an iterative fashion: We
1481  * find all referencing extents for the extent in question and put them on a
1482  * list. In turn, we find all referencing extents for those, further appending
1483  * to the list. The way we iterate the list allows adding more elements after
1484  * the current while iterating. The process stops when we reach the end of the
1485  * list. Found roots are added to the roots list.
1486  *
1487  * returns 0 on success, < 0 on error.
1488  */
1489 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1490 				     struct btrfs_fs_info *fs_info, u64 bytenr,
1491 				     u64 time_seq, struct ulist **roots,
1492 				     bool ignore_offset)
1493 {
1494 	struct ulist *tmp;
1495 	struct ulist_node *node = NULL;
1496 	struct ulist_iterator uiter;
1497 	int ret;
1498 
1499 	tmp = ulist_alloc(GFP_NOFS);
1500 	if (!tmp)
1501 		return -ENOMEM;
1502 	*roots = ulist_alloc(GFP_NOFS);
1503 	if (!*roots) {
1504 		ulist_free(tmp);
1505 		return -ENOMEM;
1506 	}
1507 
1508 	ULIST_ITER_INIT(&uiter);
1509 	while (1) {
1510 		ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1511 					tmp, *roots, NULL, NULL, ignore_offset);
1512 		if (ret < 0 && ret != -ENOENT) {
1513 			ulist_free(tmp);
1514 			ulist_free(*roots);
1515 			*roots = NULL;
1516 			return ret;
1517 		}
1518 		node = ulist_next(tmp, &uiter);
1519 		if (!node)
1520 			break;
1521 		bytenr = node->val;
1522 		cond_resched();
1523 	}
1524 
1525 	ulist_free(tmp);
1526 	return 0;
1527 }
1528 
1529 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1530 			 struct btrfs_fs_info *fs_info, u64 bytenr,
1531 			 u64 time_seq, struct ulist **roots,
1532 			 bool ignore_offset)
1533 {
1534 	int ret;
1535 
1536 	if (!trans)
1537 		down_read(&fs_info->commit_root_sem);
1538 	ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1539 					time_seq, roots, ignore_offset);
1540 	if (!trans)
1541 		up_read(&fs_info->commit_root_sem);
1542 	return ret;
1543 }
1544 
1545 /**
1546  * btrfs_check_shared - tell us whether an extent is shared
1547  *
1548  * btrfs_check_shared uses the backref walking code but will short
1549  * circuit as soon as it finds a root or inode that doesn't match the
1550  * one passed in. This provides a significant performance benefit for
1551  * callers (such as fiemap) which want to know whether the extent is
1552  * shared but do not need a ref count.
1553  *
1554  * This attempts to attach to the running transaction in order to account for
1555  * delayed refs, but continues on even when no running transaction exists.
1556  *
1557  * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1558  */
1559 int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1560 		struct ulist *roots, struct ulist *tmp)
1561 {
1562 	struct btrfs_fs_info *fs_info = root->fs_info;
1563 	struct btrfs_trans_handle *trans;
1564 	struct ulist_iterator uiter;
1565 	struct ulist_node *node;
1566 	struct seq_list elem = SEQ_LIST_INIT(elem);
1567 	int ret = 0;
1568 	struct share_check shared = {
1569 		.root_objectid = root->root_key.objectid,
1570 		.inum = inum,
1571 		.share_count = 0,
1572 		.have_delayed_delete_refs = false,
1573 	};
1574 
1575 	ulist_init(roots);
1576 	ulist_init(tmp);
1577 
1578 	trans = btrfs_join_transaction_nostart(root);
1579 	if (IS_ERR(trans)) {
1580 		if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1581 			ret = PTR_ERR(trans);
1582 			goto out;
1583 		}
1584 		trans = NULL;
1585 		down_read(&fs_info->commit_root_sem);
1586 	} else {
1587 		btrfs_get_tree_mod_seq(fs_info, &elem);
1588 	}
1589 
1590 	ULIST_ITER_INIT(&uiter);
1591 	while (1) {
1592 		ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1593 					roots, NULL, &shared, false);
1594 		if (ret == BACKREF_FOUND_SHARED) {
1595 			/* this is the only condition under which we return 1 */
1596 			ret = 1;
1597 			break;
1598 		}
1599 		if (ret < 0 && ret != -ENOENT)
1600 			break;
1601 		ret = 0;
1602 		node = ulist_next(tmp, &uiter);
1603 		if (!node)
1604 			break;
1605 		bytenr = node->val;
1606 		shared.share_count = 0;
1607 		shared.have_delayed_delete_refs = false;
1608 		cond_resched();
1609 	}
1610 
1611 	if (trans) {
1612 		btrfs_put_tree_mod_seq(fs_info, &elem);
1613 		btrfs_end_transaction(trans);
1614 	} else {
1615 		up_read(&fs_info->commit_root_sem);
1616 	}
1617 out:
1618 	ulist_release(roots);
1619 	ulist_release(tmp);
1620 	return ret;
1621 }
1622 
1623 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1624 			  u64 start_off, struct btrfs_path *path,
1625 			  struct btrfs_inode_extref **ret_extref,
1626 			  u64 *found_off)
1627 {
1628 	int ret, slot;
1629 	struct btrfs_key key;
1630 	struct btrfs_key found_key;
1631 	struct btrfs_inode_extref *extref;
1632 	const struct extent_buffer *leaf;
1633 	unsigned long ptr;
1634 
1635 	key.objectid = inode_objectid;
1636 	key.type = BTRFS_INODE_EXTREF_KEY;
1637 	key.offset = start_off;
1638 
1639 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1640 	if (ret < 0)
1641 		return ret;
1642 
1643 	while (1) {
1644 		leaf = path->nodes[0];
1645 		slot = path->slots[0];
1646 		if (slot >= btrfs_header_nritems(leaf)) {
1647 			/*
1648 			 * If the item at offset is not found,
1649 			 * btrfs_search_slot will point us to the slot
1650 			 * where it should be inserted. In our case
1651 			 * that will be the slot directly before the
1652 			 * next INODE_REF_KEY_V2 item. In the case
1653 			 * that we're pointing to the last slot in a
1654 			 * leaf, we must move one leaf over.
1655 			 */
1656 			ret = btrfs_next_leaf(root, path);
1657 			if (ret) {
1658 				if (ret >= 1)
1659 					ret = -ENOENT;
1660 				break;
1661 			}
1662 			continue;
1663 		}
1664 
1665 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1666 
1667 		/*
1668 		 * Check that we're still looking at an extended ref key for
1669 		 * this particular objectid. If we have different
1670 		 * objectid or type then there are no more to be found
1671 		 * in the tree and we can exit.
1672 		 */
1673 		ret = -ENOENT;
1674 		if (found_key.objectid != inode_objectid)
1675 			break;
1676 		if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1677 			break;
1678 
1679 		ret = 0;
1680 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1681 		extref = (struct btrfs_inode_extref *)ptr;
1682 		*ret_extref = extref;
1683 		if (found_off)
1684 			*found_off = found_key.offset;
1685 		break;
1686 	}
1687 
1688 	return ret;
1689 }
1690 
1691 /*
1692  * this iterates to turn a name (from iref/extref) into a full filesystem path.
1693  * Elements of the path are separated by '/' and the path is guaranteed to be
1694  * 0-terminated. the path is only given within the current file system.
1695  * Therefore, it never starts with a '/'. the caller is responsible to provide
1696  * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1697  * the start point of the resulting string is returned. this pointer is within
1698  * dest, normally.
1699  * in case the path buffer would overflow, the pointer is decremented further
1700  * as if output was written to the buffer, though no more output is actually
1701  * generated. that way, the caller can determine how much space would be
1702  * required for the path to fit into the buffer. in that case, the returned
1703  * value will be smaller than dest. callers must check this!
1704  */
1705 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1706 			u32 name_len, unsigned long name_off,
1707 			struct extent_buffer *eb_in, u64 parent,
1708 			char *dest, u32 size)
1709 {
1710 	int slot;
1711 	u64 next_inum;
1712 	int ret;
1713 	s64 bytes_left = ((s64)size) - 1;
1714 	struct extent_buffer *eb = eb_in;
1715 	struct btrfs_key found_key;
1716 	int leave_spinning = path->leave_spinning;
1717 	struct btrfs_inode_ref *iref;
1718 
1719 	if (bytes_left >= 0)
1720 		dest[bytes_left] = '\0';
1721 
1722 	path->leave_spinning = 1;
1723 	while (1) {
1724 		bytes_left -= name_len;
1725 		if (bytes_left >= 0)
1726 			read_extent_buffer(eb, dest + bytes_left,
1727 					   name_off, name_len);
1728 		if (eb != eb_in) {
1729 			if (!path->skip_locking)
1730 				btrfs_tree_read_unlock_blocking(eb);
1731 			free_extent_buffer(eb);
1732 		}
1733 		ret = btrfs_find_item(fs_root, path, parent, 0,
1734 				BTRFS_INODE_REF_KEY, &found_key);
1735 		if (ret > 0)
1736 			ret = -ENOENT;
1737 		if (ret)
1738 			break;
1739 
1740 		next_inum = found_key.offset;
1741 
1742 		/* regular exit ahead */
1743 		if (parent == next_inum)
1744 			break;
1745 
1746 		slot = path->slots[0];
1747 		eb = path->nodes[0];
1748 		/* make sure we can use eb after releasing the path */
1749 		if (eb != eb_in) {
1750 			if (!path->skip_locking)
1751 				btrfs_set_lock_blocking_read(eb);
1752 			path->nodes[0] = NULL;
1753 			path->locks[0] = 0;
1754 		}
1755 		btrfs_release_path(path);
1756 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1757 
1758 		name_len = btrfs_inode_ref_name_len(eb, iref);
1759 		name_off = (unsigned long)(iref + 1);
1760 
1761 		parent = next_inum;
1762 		--bytes_left;
1763 		if (bytes_left >= 0)
1764 			dest[bytes_left] = '/';
1765 	}
1766 
1767 	btrfs_release_path(path);
1768 	path->leave_spinning = leave_spinning;
1769 
1770 	if (ret)
1771 		return ERR_PTR(ret);
1772 
1773 	return dest + bytes_left;
1774 }
1775 
1776 /*
1777  * this makes the path point to (logical EXTENT_ITEM *)
1778  * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1779  * tree blocks and <0 on error.
1780  */
1781 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1782 			struct btrfs_path *path, struct btrfs_key *found_key,
1783 			u64 *flags_ret)
1784 {
1785 	int ret;
1786 	u64 flags;
1787 	u64 size = 0;
1788 	u32 item_size;
1789 	const struct extent_buffer *eb;
1790 	struct btrfs_extent_item *ei;
1791 	struct btrfs_key key;
1792 
1793 	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1794 		key.type = BTRFS_METADATA_ITEM_KEY;
1795 	else
1796 		key.type = BTRFS_EXTENT_ITEM_KEY;
1797 	key.objectid = logical;
1798 	key.offset = (u64)-1;
1799 
1800 	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1801 	if (ret < 0)
1802 		return ret;
1803 
1804 	ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1805 	if (ret) {
1806 		if (ret > 0)
1807 			ret = -ENOENT;
1808 		return ret;
1809 	}
1810 	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1811 	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1812 		size = fs_info->nodesize;
1813 	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1814 		size = found_key->offset;
1815 
1816 	if (found_key->objectid > logical ||
1817 	    found_key->objectid + size <= logical) {
1818 		btrfs_debug(fs_info,
1819 			"logical %llu is not within any extent", logical);
1820 		return -ENOENT;
1821 	}
1822 
1823 	eb = path->nodes[0];
1824 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
1825 	BUG_ON(item_size < sizeof(*ei));
1826 
1827 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1828 	flags = btrfs_extent_flags(eb, ei);
1829 
1830 	btrfs_debug(fs_info,
1831 		"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1832 		 logical, logical - found_key->objectid, found_key->objectid,
1833 		 found_key->offset, flags, item_size);
1834 
1835 	WARN_ON(!flags_ret);
1836 	if (flags_ret) {
1837 		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1838 			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1839 		else if (flags & BTRFS_EXTENT_FLAG_DATA)
1840 			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
1841 		else
1842 			BUG();
1843 		return 0;
1844 	}
1845 
1846 	return -EIO;
1847 }
1848 
1849 /*
1850  * helper function to iterate extent inline refs. ptr must point to a 0 value
1851  * for the first call and may be modified. it is used to track state.
1852  * if more refs exist, 0 is returned and the next call to
1853  * get_extent_inline_ref must pass the modified ptr parameter to get the
1854  * next ref. after the last ref was processed, 1 is returned.
1855  * returns <0 on error
1856  */
1857 static int get_extent_inline_ref(unsigned long *ptr,
1858 				 const struct extent_buffer *eb,
1859 				 const struct btrfs_key *key,
1860 				 const struct btrfs_extent_item *ei,
1861 				 u32 item_size,
1862 				 struct btrfs_extent_inline_ref **out_eiref,
1863 				 int *out_type)
1864 {
1865 	unsigned long end;
1866 	u64 flags;
1867 	struct btrfs_tree_block_info *info;
1868 
1869 	if (!*ptr) {
1870 		/* first call */
1871 		flags = btrfs_extent_flags(eb, ei);
1872 		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1873 			if (key->type == BTRFS_METADATA_ITEM_KEY) {
1874 				/* a skinny metadata extent */
1875 				*out_eiref =
1876 				     (struct btrfs_extent_inline_ref *)(ei + 1);
1877 			} else {
1878 				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1879 				info = (struct btrfs_tree_block_info *)(ei + 1);
1880 				*out_eiref =
1881 				   (struct btrfs_extent_inline_ref *)(info + 1);
1882 			}
1883 		} else {
1884 			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1885 		}
1886 		*ptr = (unsigned long)*out_eiref;
1887 		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1888 			return -ENOENT;
1889 	}
1890 
1891 	end = (unsigned long)ei + item_size;
1892 	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1893 	*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1894 						     BTRFS_REF_TYPE_ANY);
1895 	if (*out_type == BTRFS_REF_TYPE_INVALID)
1896 		return -EUCLEAN;
1897 
1898 	*ptr += btrfs_extent_inline_ref_size(*out_type);
1899 	WARN_ON(*ptr > end);
1900 	if (*ptr == end)
1901 		return 1; /* last */
1902 
1903 	return 0;
1904 }
1905 
1906 /*
1907  * reads the tree block backref for an extent. tree level and root are returned
1908  * through out_level and out_root. ptr must point to a 0 value for the first
1909  * call and may be modified (see get_extent_inline_ref comment).
1910  * returns 0 if data was provided, 1 if there was no more data to provide or
1911  * <0 on error.
1912  */
1913 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1914 			    struct btrfs_key *key, struct btrfs_extent_item *ei,
1915 			    u32 item_size, u64 *out_root, u8 *out_level)
1916 {
1917 	int ret;
1918 	int type;
1919 	struct btrfs_extent_inline_ref *eiref;
1920 
1921 	if (*ptr == (unsigned long)-1)
1922 		return 1;
1923 
1924 	while (1) {
1925 		ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1926 					      &eiref, &type);
1927 		if (ret < 0)
1928 			return ret;
1929 
1930 		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1931 		    type == BTRFS_SHARED_BLOCK_REF_KEY)
1932 			break;
1933 
1934 		if (ret == 1)
1935 			return 1;
1936 	}
1937 
1938 	/* we can treat both ref types equally here */
1939 	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1940 
1941 	if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1942 		struct btrfs_tree_block_info *info;
1943 
1944 		info = (struct btrfs_tree_block_info *)(ei + 1);
1945 		*out_level = btrfs_tree_block_level(eb, info);
1946 	} else {
1947 		ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1948 		*out_level = (u8)key->offset;
1949 	}
1950 
1951 	if (ret == 1)
1952 		*ptr = (unsigned long)-1;
1953 
1954 	return 0;
1955 }
1956 
1957 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1958 			     struct extent_inode_elem *inode_list,
1959 			     u64 root, u64 extent_item_objectid,
1960 			     iterate_extent_inodes_t *iterate, void *ctx)
1961 {
1962 	struct extent_inode_elem *eie;
1963 	int ret = 0;
1964 
1965 	for (eie = inode_list; eie; eie = eie->next) {
1966 		btrfs_debug(fs_info,
1967 			    "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1968 			    extent_item_objectid, eie->inum,
1969 			    eie->offset, root);
1970 		ret = iterate(eie->inum, eie->offset, root, ctx);
1971 		if (ret) {
1972 			btrfs_debug(fs_info,
1973 				    "stopping iteration for %llu due to ret=%d",
1974 				    extent_item_objectid, ret);
1975 			break;
1976 		}
1977 	}
1978 
1979 	return ret;
1980 }
1981 
1982 /*
1983  * calls iterate() for every inode that references the extent identified by
1984  * the given parameters.
1985  * when the iterator function returns a non-zero value, iteration stops.
1986  */
1987 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1988 				u64 extent_item_objectid, u64 extent_item_pos,
1989 				int search_commit_root,
1990 				iterate_extent_inodes_t *iterate, void *ctx,
1991 				bool ignore_offset)
1992 {
1993 	int ret;
1994 	struct btrfs_trans_handle *trans = NULL;
1995 	struct ulist *refs = NULL;
1996 	struct ulist *roots = NULL;
1997 	struct ulist_node *ref_node = NULL;
1998 	struct ulist_node *root_node = NULL;
1999 	struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
2000 	struct ulist_iterator ref_uiter;
2001 	struct ulist_iterator root_uiter;
2002 
2003 	btrfs_debug(fs_info, "resolving all inodes for extent %llu",
2004 			extent_item_objectid);
2005 
2006 	if (!search_commit_root) {
2007 		trans = btrfs_attach_transaction(fs_info->extent_root);
2008 		if (IS_ERR(trans)) {
2009 			if (PTR_ERR(trans) != -ENOENT &&
2010 			    PTR_ERR(trans) != -EROFS)
2011 				return PTR_ERR(trans);
2012 			trans = NULL;
2013 		}
2014 	}
2015 
2016 	if (trans)
2017 		btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2018 	else
2019 		down_read(&fs_info->commit_root_sem);
2020 
2021 	ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
2022 				   tree_mod_seq_elem.seq, &refs,
2023 				   &extent_item_pos, ignore_offset);
2024 	if (ret)
2025 		goto out;
2026 
2027 	ULIST_ITER_INIT(&ref_uiter);
2028 	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2029 		ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
2030 						tree_mod_seq_elem.seq, &roots,
2031 						ignore_offset);
2032 		if (ret)
2033 			break;
2034 		ULIST_ITER_INIT(&root_uiter);
2035 		while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
2036 			btrfs_debug(fs_info,
2037 				    "root %llu references leaf %llu, data list %#llx",
2038 				    root_node->val, ref_node->val,
2039 				    ref_node->aux);
2040 			ret = iterate_leaf_refs(fs_info,
2041 						(struct extent_inode_elem *)
2042 						(uintptr_t)ref_node->aux,
2043 						root_node->val,
2044 						extent_item_objectid,
2045 						iterate, ctx);
2046 		}
2047 		ulist_free(roots);
2048 	}
2049 
2050 	free_leaf_list(refs);
2051 out:
2052 	if (trans) {
2053 		btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2054 		btrfs_end_transaction(trans);
2055 	} else {
2056 		up_read(&fs_info->commit_root_sem);
2057 	}
2058 
2059 	return ret;
2060 }
2061 
2062 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2063 				struct btrfs_path *path,
2064 				iterate_extent_inodes_t *iterate, void *ctx,
2065 				bool ignore_offset)
2066 {
2067 	int ret;
2068 	u64 extent_item_pos;
2069 	u64 flags = 0;
2070 	struct btrfs_key found_key;
2071 	int search_commit_root = path->search_commit_root;
2072 
2073 	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2074 	btrfs_release_path(path);
2075 	if (ret < 0)
2076 		return ret;
2077 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2078 		return -EINVAL;
2079 
2080 	extent_item_pos = logical - found_key.objectid;
2081 	ret = iterate_extent_inodes(fs_info, found_key.objectid,
2082 					extent_item_pos, search_commit_root,
2083 					iterate, ctx, ignore_offset);
2084 
2085 	return ret;
2086 }
2087 
2088 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
2089 			      struct extent_buffer *eb, void *ctx);
2090 
2091 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
2092 			      struct btrfs_path *path,
2093 			      iterate_irefs_t *iterate, void *ctx)
2094 {
2095 	int ret = 0;
2096 	int slot;
2097 	u32 cur;
2098 	u32 len;
2099 	u32 name_len;
2100 	u64 parent = 0;
2101 	int found = 0;
2102 	struct extent_buffer *eb;
2103 	struct btrfs_item *item;
2104 	struct btrfs_inode_ref *iref;
2105 	struct btrfs_key found_key;
2106 
2107 	while (!ret) {
2108 		ret = btrfs_find_item(fs_root, path, inum,
2109 				parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2110 				&found_key);
2111 
2112 		if (ret < 0)
2113 			break;
2114 		if (ret) {
2115 			ret = found ? 0 : -ENOENT;
2116 			break;
2117 		}
2118 		++found;
2119 
2120 		parent = found_key.offset;
2121 		slot = path->slots[0];
2122 		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2123 		if (!eb) {
2124 			ret = -ENOMEM;
2125 			break;
2126 		}
2127 		btrfs_release_path(path);
2128 
2129 		item = btrfs_item_nr(slot);
2130 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2131 
2132 		for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
2133 			name_len = btrfs_inode_ref_name_len(eb, iref);
2134 			/* path must be released before calling iterate()! */
2135 			btrfs_debug(fs_root->fs_info,
2136 				"following ref at offset %u for inode %llu in tree %llu",
2137 				cur, found_key.objectid,
2138 				fs_root->root_key.objectid);
2139 			ret = iterate(parent, name_len,
2140 				      (unsigned long)(iref + 1), eb, ctx);
2141 			if (ret)
2142 				break;
2143 			len = sizeof(*iref) + name_len;
2144 			iref = (struct btrfs_inode_ref *)((char *)iref + len);
2145 		}
2146 		free_extent_buffer(eb);
2147 	}
2148 
2149 	btrfs_release_path(path);
2150 
2151 	return ret;
2152 }
2153 
2154 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2155 				 struct btrfs_path *path,
2156 				 iterate_irefs_t *iterate, void *ctx)
2157 {
2158 	int ret;
2159 	int slot;
2160 	u64 offset = 0;
2161 	u64 parent;
2162 	int found = 0;
2163 	struct extent_buffer *eb;
2164 	struct btrfs_inode_extref *extref;
2165 	u32 item_size;
2166 	u32 cur_offset;
2167 	unsigned long ptr;
2168 
2169 	while (1) {
2170 		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2171 					    &offset);
2172 		if (ret < 0)
2173 			break;
2174 		if (ret) {
2175 			ret = found ? 0 : -ENOENT;
2176 			break;
2177 		}
2178 		++found;
2179 
2180 		slot = path->slots[0];
2181 		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2182 		if (!eb) {
2183 			ret = -ENOMEM;
2184 			break;
2185 		}
2186 		btrfs_release_path(path);
2187 
2188 		item_size = btrfs_item_size_nr(eb, slot);
2189 		ptr = btrfs_item_ptr_offset(eb, slot);
2190 		cur_offset = 0;
2191 
2192 		while (cur_offset < item_size) {
2193 			u32 name_len;
2194 
2195 			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2196 			parent = btrfs_inode_extref_parent(eb, extref);
2197 			name_len = btrfs_inode_extref_name_len(eb, extref);
2198 			ret = iterate(parent, name_len,
2199 				      (unsigned long)&extref->name, eb, ctx);
2200 			if (ret)
2201 				break;
2202 
2203 			cur_offset += btrfs_inode_extref_name_len(eb, extref);
2204 			cur_offset += sizeof(*extref);
2205 		}
2206 		free_extent_buffer(eb);
2207 
2208 		offset++;
2209 	}
2210 
2211 	btrfs_release_path(path);
2212 
2213 	return ret;
2214 }
2215 
2216 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
2217 			 struct btrfs_path *path, iterate_irefs_t *iterate,
2218 			 void *ctx)
2219 {
2220 	int ret;
2221 	int found_refs = 0;
2222 
2223 	ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
2224 	if (!ret)
2225 		++found_refs;
2226 	else if (ret != -ENOENT)
2227 		return ret;
2228 
2229 	ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
2230 	if (ret == -ENOENT && found_refs)
2231 		return 0;
2232 
2233 	return ret;
2234 }
2235 
2236 /*
2237  * returns 0 if the path could be dumped (probably truncated)
2238  * returns <0 in case of an error
2239  */
2240 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2241 			 struct extent_buffer *eb, void *ctx)
2242 {
2243 	struct inode_fs_paths *ipath = ctx;
2244 	char *fspath;
2245 	char *fspath_min;
2246 	int i = ipath->fspath->elem_cnt;
2247 	const int s_ptr = sizeof(char *);
2248 	u32 bytes_left;
2249 
2250 	bytes_left = ipath->fspath->bytes_left > s_ptr ?
2251 					ipath->fspath->bytes_left - s_ptr : 0;
2252 
2253 	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2254 	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2255 				   name_off, eb, inum, fspath_min, bytes_left);
2256 	if (IS_ERR(fspath))
2257 		return PTR_ERR(fspath);
2258 
2259 	if (fspath > fspath_min) {
2260 		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2261 		++ipath->fspath->elem_cnt;
2262 		ipath->fspath->bytes_left = fspath - fspath_min;
2263 	} else {
2264 		++ipath->fspath->elem_missed;
2265 		ipath->fspath->bytes_missing += fspath_min - fspath;
2266 		ipath->fspath->bytes_left = 0;
2267 	}
2268 
2269 	return 0;
2270 }
2271 
2272 /*
2273  * this dumps all file system paths to the inode into the ipath struct, provided
2274  * is has been created large enough. each path is zero-terminated and accessed
2275  * from ipath->fspath->val[i].
2276  * when it returns, there are ipath->fspath->elem_cnt number of paths available
2277  * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2278  * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2279  * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2280  * have been needed to return all paths.
2281  */
2282 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2283 {
2284 	return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2285 			     inode_to_path, ipath);
2286 }
2287 
2288 struct btrfs_data_container *init_data_container(u32 total_bytes)
2289 {
2290 	struct btrfs_data_container *data;
2291 	size_t alloc_bytes;
2292 
2293 	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2294 	data = kvmalloc(alloc_bytes, GFP_KERNEL);
2295 	if (!data)
2296 		return ERR_PTR(-ENOMEM);
2297 
2298 	if (total_bytes >= sizeof(*data)) {
2299 		data->bytes_left = total_bytes - sizeof(*data);
2300 		data->bytes_missing = 0;
2301 	} else {
2302 		data->bytes_missing = sizeof(*data) - total_bytes;
2303 		data->bytes_left = 0;
2304 	}
2305 
2306 	data->elem_cnt = 0;
2307 	data->elem_missed = 0;
2308 
2309 	return data;
2310 }
2311 
2312 /*
2313  * allocates space to return multiple file system paths for an inode.
2314  * total_bytes to allocate are passed, note that space usable for actual path
2315  * information will be total_bytes - sizeof(struct inode_fs_paths).
2316  * the returned pointer must be freed with free_ipath() in the end.
2317  */
2318 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2319 					struct btrfs_path *path)
2320 {
2321 	struct inode_fs_paths *ifp;
2322 	struct btrfs_data_container *fspath;
2323 
2324 	fspath = init_data_container(total_bytes);
2325 	if (IS_ERR(fspath))
2326 		return ERR_CAST(fspath);
2327 
2328 	ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2329 	if (!ifp) {
2330 		kvfree(fspath);
2331 		return ERR_PTR(-ENOMEM);
2332 	}
2333 
2334 	ifp->btrfs_path = path;
2335 	ifp->fspath = fspath;
2336 	ifp->fs_root = fs_root;
2337 
2338 	return ifp;
2339 }
2340 
2341 void free_ipath(struct inode_fs_paths *ipath)
2342 {
2343 	if (!ipath)
2344 		return;
2345 	kvfree(ipath->fspath);
2346 	kfree(ipath);
2347 }
2348