1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2011 STRATO. All rights reserved.
4 */
5
6 #include <linux/mm.h>
7 #include <linux/rbtree.h>
8 #include <trace/events/btrfs.h>
9 #include "ctree.h"
10 #include "disk-io.h"
11 #include "backref.h"
12 #include "ulist.h"
13 #include "transaction.h"
14 #include "delayed-ref.h"
15 #include "locking.h"
16 #include "misc.h"
17
18 /* Just an arbitrary number so we can be sure this happened */
19 #define BACKREF_FOUND_SHARED 6
20
21 struct extent_inode_elem {
22 u64 inum;
23 u64 offset;
24 struct extent_inode_elem *next;
25 };
26
check_extent_in_eb(const struct btrfs_key * key,const struct extent_buffer * eb,const struct btrfs_file_extent_item * fi,u64 extent_item_pos,struct extent_inode_elem ** eie,bool ignore_offset)27 static int check_extent_in_eb(const struct btrfs_key *key,
28 const struct extent_buffer *eb,
29 const struct btrfs_file_extent_item *fi,
30 u64 extent_item_pos,
31 struct extent_inode_elem **eie,
32 bool ignore_offset)
33 {
34 u64 offset = 0;
35 struct extent_inode_elem *e;
36
37 if (!ignore_offset &&
38 !btrfs_file_extent_compression(eb, fi) &&
39 !btrfs_file_extent_encryption(eb, fi) &&
40 !btrfs_file_extent_other_encoding(eb, fi)) {
41 u64 data_offset;
42 u64 data_len;
43
44 data_offset = btrfs_file_extent_offset(eb, fi);
45 data_len = btrfs_file_extent_num_bytes(eb, fi);
46
47 if (extent_item_pos < data_offset ||
48 extent_item_pos >= data_offset + data_len)
49 return 1;
50 offset = extent_item_pos - data_offset;
51 }
52
53 e = kmalloc(sizeof(*e), GFP_NOFS);
54 if (!e)
55 return -ENOMEM;
56
57 e->next = *eie;
58 e->inum = key->objectid;
59 e->offset = key->offset + offset;
60 *eie = e;
61
62 return 0;
63 }
64
free_inode_elem_list(struct extent_inode_elem * eie)65 static void free_inode_elem_list(struct extent_inode_elem *eie)
66 {
67 struct extent_inode_elem *eie_next;
68
69 for (; eie; eie = eie_next) {
70 eie_next = eie->next;
71 kfree(eie);
72 }
73 }
74
find_extent_in_eb(const struct extent_buffer * eb,u64 wanted_disk_byte,u64 extent_item_pos,struct extent_inode_elem ** eie,bool ignore_offset)75 static int find_extent_in_eb(const struct extent_buffer *eb,
76 u64 wanted_disk_byte, u64 extent_item_pos,
77 struct extent_inode_elem **eie,
78 bool ignore_offset)
79 {
80 u64 disk_byte;
81 struct btrfs_key key;
82 struct btrfs_file_extent_item *fi;
83 int slot;
84 int nritems;
85 int extent_type;
86 int ret;
87
88 /*
89 * from the shared data ref, we only have the leaf but we need
90 * the key. thus, we must look into all items and see that we
91 * find one (some) with a reference to our extent item.
92 */
93 nritems = btrfs_header_nritems(eb);
94 for (slot = 0; slot < nritems; ++slot) {
95 btrfs_item_key_to_cpu(eb, &key, slot);
96 if (key.type != BTRFS_EXTENT_DATA_KEY)
97 continue;
98 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
99 extent_type = btrfs_file_extent_type(eb, fi);
100 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
101 continue;
102 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
103 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
104 if (disk_byte != wanted_disk_byte)
105 continue;
106
107 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
108 if (ret < 0)
109 return ret;
110 }
111
112 return 0;
113 }
114
115 struct preftree {
116 struct rb_root_cached root;
117 unsigned int count;
118 };
119
120 #define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
121
122 struct preftrees {
123 struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
124 struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
125 struct preftree indirect_missing_keys;
126 };
127
128 /*
129 * Checks for a shared extent during backref search.
130 *
131 * The share_count tracks prelim_refs (direct and indirect) having a
132 * ref->count >0:
133 * - incremented when a ref->count transitions to >0
134 * - decremented when a ref->count transitions to <1
135 */
136 struct share_check {
137 u64 root_objectid;
138 u64 inum;
139 int share_count;
140 bool have_delayed_delete_refs;
141 };
142
extent_is_shared(struct share_check * sc)143 static inline int extent_is_shared(struct share_check *sc)
144 {
145 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
146 }
147
148 static struct kmem_cache *btrfs_prelim_ref_cache;
149
btrfs_prelim_ref_init(void)150 int __init btrfs_prelim_ref_init(void)
151 {
152 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
153 sizeof(struct prelim_ref),
154 0,
155 SLAB_MEM_SPREAD,
156 NULL);
157 if (!btrfs_prelim_ref_cache)
158 return -ENOMEM;
159 return 0;
160 }
161
btrfs_prelim_ref_exit(void)162 void __cold btrfs_prelim_ref_exit(void)
163 {
164 kmem_cache_destroy(btrfs_prelim_ref_cache);
165 }
166
free_pref(struct prelim_ref * ref)167 static void free_pref(struct prelim_ref *ref)
168 {
169 kmem_cache_free(btrfs_prelim_ref_cache, ref);
170 }
171
172 /*
173 * Return 0 when both refs are for the same block (and can be merged).
174 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
175 * indicates a 'higher' block.
176 */
prelim_ref_compare(struct prelim_ref * ref1,struct prelim_ref * ref2)177 static int prelim_ref_compare(struct prelim_ref *ref1,
178 struct prelim_ref *ref2)
179 {
180 if (ref1->level < ref2->level)
181 return -1;
182 if (ref1->level > ref2->level)
183 return 1;
184 if (ref1->root_id < ref2->root_id)
185 return -1;
186 if (ref1->root_id > ref2->root_id)
187 return 1;
188 if (ref1->key_for_search.type < ref2->key_for_search.type)
189 return -1;
190 if (ref1->key_for_search.type > ref2->key_for_search.type)
191 return 1;
192 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
193 return -1;
194 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
195 return 1;
196 if (ref1->key_for_search.offset < ref2->key_for_search.offset)
197 return -1;
198 if (ref1->key_for_search.offset > ref2->key_for_search.offset)
199 return 1;
200 if (ref1->parent < ref2->parent)
201 return -1;
202 if (ref1->parent > ref2->parent)
203 return 1;
204
205 return 0;
206 }
207
update_share_count(struct share_check * sc,int oldcount,int newcount)208 static void update_share_count(struct share_check *sc, int oldcount,
209 int newcount)
210 {
211 if ((!sc) || (oldcount == 0 && newcount < 1))
212 return;
213
214 if (oldcount > 0 && newcount < 1)
215 sc->share_count--;
216 else if (oldcount < 1 && newcount > 0)
217 sc->share_count++;
218 }
219
220 /*
221 * Add @newref to the @root rbtree, merging identical refs.
222 *
223 * Callers should assume that newref has been freed after calling.
224 */
prelim_ref_insert(const struct btrfs_fs_info * fs_info,struct preftree * preftree,struct prelim_ref * newref,struct share_check * sc)225 static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
226 struct preftree *preftree,
227 struct prelim_ref *newref,
228 struct share_check *sc)
229 {
230 struct rb_root_cached *root;
231 struct rb_node **p;
232 struct rb_node *parent = NULL;
233 struct prelim_ref *ref;
234 int result;
235 bool leftmost = true;
236
237 root = &preftree->root;
238 p = &root->rb_root.rb_node;
239
240 while (*p) {
241 parent = *p;
242 ref = rb_entry(parent, struct prelim_ref, rbnode);
243 result = prelim_ref_compare(ref, newref);
244 if (result < 0) {
245 p = &(*p)->rb_left;
246 } else if (result > 0) {
247 p = &(*p)->rb_right;
248 leftmost = false;
249 } else {
250 /* Identical refs, merge them and free @newref */
251 struct extent_inode_elem *eie = ref->inode_list;
252
253 while (eie && eie->next)
254 eie = eie->next;
255
256 if (!eie)
257 ref->inode_list = newref->inode_list;
258 else
259 eie->next = newref->inode_list;
260 trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
261 preftree->count);
262 /*
263 * A delayed ref can have newref->count < 0.
264 * The ref->count is updated to follow any
265 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
266 */
267 update_share_count(sc, ref->count,
268 ref->count + newref->count);
269 ref->count += newref->count;
270 free_pref(newref);
271 return;
272 }
273 }
274
275 update_share_count(sc, 0, newref->count);
276 preftree->count++;
277 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
278 rb_link_node(&newref->rbnode, parent, p);
279 rb_insert_color_cached(&newref->rbnode, root, leftmost);
280 }
281
282 /*
283 * Release the entire tree. We don't care about internal consistency so
284 * just free everything and then reset the tree root.
285 */
prelim_release(struct preftree * preftree)286 static void prelim_release(struct preftree *preftree)
287 {
288 struct prelim_ref *ref, *next_ref;
289
290 rbtree_postorder_for_each_entry_safe(ref, next_ref,
291 &preftree->root.rb_root, rbnode) {
292 free_inode_elem_list(ref->inode_list);
293 free_pref(ref);
294 }
295
296 preftree->root = RB_ROOT_CACHED;
297 preftree->count = 0;
298 }
299
300 /*
301 * the rules for all callers of this function are:
302 * - obtaining the parent is the goal
303 * - if you add a key, you must know that it is a correct key
304 * - if you cannot add the parent or a correct key, then we will look into the
305 * block later to set a correct key
306 *
307 * delayed refs
308 * ============
309 * backref type | shared | indirect | shared | indirect
310 * information | tree | tree | data | data
311 * --------------------+--------+----------+--------+----------
312 * parent logical | y | - | - | -
313 * key to resolve | - | y | y | y
314 * tree block logical | - | - | - | -
315 * root for resolving | y | y | y | y
316 *
317 * - column 1: we've the parent -> done
318 * - column 2, 3, 4: we use the key to find the parent
319 *
320 * on disk refs (inline or keyed)
321 * ==============================
322 * backref type | shared | indirect | shared | indirect
323 * information | tree | tree | data | data
324 * --------------------+--------+----------+--------+----------
325 * parent logical | y | - | y | -
326 * key to resolve | - | - | - | y
327 * tree block logical | y | y | y | y
328 * root for resolving | - | y | y | y
329 *
330 * - column 1, 3: we've the parent -> done
331 * - column 2: we take the first key from the block to find the parent
332 * (see add_missing_keys)
333 * - column 4: we use the key to find the parent
334 *
335 * additional information that's available but not required to find the parent
336 * block might help in merging entries to gain some speed.
337 */
add_prelim_ref(const struct btrfs_fs_info * fs_info,struct preftree * preftree,u64 root_id,const struct btrfs_key * key,int level,u64 parent,u64 wanted_disk_byte,int count,struct share_check * sc,gfp_t gfp_mask)338 static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
339 struct preftree *preftree, u64 root_id,
340 const struct btrfs_key *key, int level, u64 parent,
341 u64 wanted_disk_byte, int count,
342 struct share_check *sc, gfp_t gfp_mask)
343 {
344 struct prelim_ref *ref;
345
346 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
347 return 0;
348
349 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
350 if (!ref)
351 return -ENOMEM;
352
353 ref->root_id = root_id;
354 if (key)
355 ref->key_for_search = *key;
356 else
357 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
358
359 ref->inode_list = NULL;
360 ref->level = level;
361 ref->count = count;
362 ref->parent = parent;
363 ref->wanted_disk_byte = wanted_disk_byte;
364 prelim_ref_insert(fs_info, preftree, ref, sc);
365 return extent_is_shared(sc);
366 }
367
368 /* direct refs use root == 0, key == NULL */
add_direct_ref(const struct btrfs_fs_info * fs_info,struct preftrees * preftrees,int level,u64 parent,u64 wanted_disk_byte,int count,struct share_check * sc,gfp_t gfp_mask)369 static int add_direct_ref(const struct btrfs_fs_info *fs_info,
370 struct preftrees *preftrees, int level, u64 parent,
371 u64 wanted_disk_byte, int count,
372 struct share_check *sc, gfp_t gfp_mask)
373 {
374 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
375 parent, wanted_disk_byte, count, sc, gfp_mask);
376 }
377
378 /* indirect refs use parent == 0 */
add_indirect_ref(const struct btrfs_fs_info * fs_info,struct preftrees * preftrees,u64 root_id,const struct btrfs_key * key,int level,u64 wanted_disk_byte,int count,struct share_check * sc,gfp_t gfp_mask)379 static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
380 struct preftrees *preftrees, u64 root_id,
381 const struct btrfs_key *key, int level,
382 u64 wanted_disk_byte, int count,
383 struct share_check *sc, gfp_t gfp_mask)
384 {
385 struct preftree *tree = &preftrees->indirect;
386
387 if (!key)
388 tree = &preftrees->indirect_missing_keys;
389 return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
390 wanted_disk_byte, count, sc, gfp_mask);
391 }
392
is_shared_data_backref(struct preftrees * preftrees,u64 bytenr)393 static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
394 {
395 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
396 struct rb_node *parent = NULL;
397 struct prelim_ref *ref = NULL;
398 struct prelim_ref target = {};
399 int result;
400
401 target.parent = bytenr;
402
403 while (*p) {
404 parent = *p;
405 ref = rb_entry(parent, struct prelim_ref, rbnode);
406 result = prelim_ref_compare(ref, &target);
407
408 if (result < 0)
409 p = &(*p)->rb_left;
410 else if (result > 0)
411 p = &(*p)->rb_right;
412 else
413 return 1;
414 }
415 return 0;
416 }
417
add_all_parents(struct btrfs_root * root,struct btrfs_path * path,struct ulist * parents,struct preftrees * preftrees,struct prelim_ref * ref,int level,u64 time_seq,const u64 * extent_item_pos,bool ignore_offset)418 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
419 struct ulist *parents,
420 struct preftrees *preftrees, struct prelim_ref *ref,
421 int level, u64 time_seq, const u64 *extent_item_pos,
422 bool ignore_offset)
423 {
424 int ret = 0;
425 int slot;
426 struct extent_buffer *eb;
427 struct btrfs_key key;
428 struct btrfs_key *key_for_search = &ref->key_for_search;
429 struct btrfs_file_extent_item *fi;
430 struct extent_inode_elem *eie = NULL, *old = NULL;
431 u64 disk_byte;
432 u64 wanted_disk_byte = ref->wanted_disk_byte;
433 u64 count = 0;
434 u64 data_offset;
435 u8 type;
436
437 if (level != 0) {
438 eb = path->nodes[level];
439 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
440 if (ret < 0)
441 return ret;
442 return 0;
443 }
444
445 /*
446 * 1. We normally enter this function with the path already pointing to
447 * the first item to check. But sometimes, we may enter it with
448 * slot == nritems.
449 * 2. We are searching for normal backref but bytenr of this leaf
450 * matches shared data backref
451 * 3. The leaf owner is not equal to the root we are searching
452 *
453 * For these cases, go to the next leaf before we continue.
454 */
455 eb = path->nodes[0];
456 if (path->slots[0] >= btrfs_header_nritems(eb) ||
457 is_shared_data_backref(preftrees, eb->start) ||
458 ref->root_id != btrfs_header_owner(eb)) {
459 if (time_seq == SEQ_LAST)
460 ret = btrfs_next_leaf(root, path);
461 else
462 ret = btrfs_next_old_leaf(root, path, time_seq);
463 }
464
465 while (!ret && count < ref->count) {
466 eb = path->nodes[0];
467 slot = path->slots[0];
468
469 btrfs_item_key_to_cpu(eb, &key, slot);
470
471 if (key.objectid != key_for_search->objectid ||
472 key.type != BTRFS_EXTENT_DATA_KEY)
473 break;
474
475 /*
476 * We are searching for normal backref but bytenr of this leaf
477 * matches shared data backref, OR
478 * the leaf owner is not equal to the root we are searching for
479 */
480 if (slot == 0 &&
481 (is_shared_data_backref(preftrees, eb->start) ||
482 ref->root_id != btrfs_header_owner(eb))) {
483 if (time_seq == SEQ_LAST)
484 ret = btrfs_next_leaf(root, path);
485 else
486 ret = btrfs_next_old_leaf(root, path, time_seq);
487 continue;
488 }
489 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
490 type = btrfs_file_extent_type(eb, fi);
491 if (type == BTRFS_FILE_EXTENT_INLINE)
492 goto next;
493 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
494 data_offset = btrfs_file_extent_offset(eb, fi);
495
496 if (disk_byte == wanted_disk_byte) {
497 eie = NULL;
498 old = NULL;
499 if (ref->key_for_search.offset == key.offset - data_offset)
500 count++;
501 else
502 goto next;
503 if (extent_item_pos) {
504 ret = check_extent_in_eb(&key, eb, fi,
505 *extent_item_pos,
506 &eie, ignore_offset);
507 if (ret < 0)
508 break;
509 }
510 if (ret > 0)
511 goto next;
512 ret = ulist_add_merge_ptr(parents, eb->start,
513 eie, (void **)&old, GFP_NOFS);
514 if (ret < 0)
515 break;
516 if (!ret && extent_item_pos) {
517 while (old->next)
518 old = old->next;
519 old->next = eie;
520 }
521 eie = NULL;
522 }
523 next:
524 if (time_seq == SEQ_LAST)
525 ret = btrfs_next_item(root, path);
526 else
527 ret = btrfs_next_old_item(root, path, time_seq);
528 }
529
530 if (ret > 0)
531 ret = 0;
532 else if (ret < 0)
533 free_inode_elem_list(eie);
534 return ret;
535 }
536
537 /*
538 * resolve an indirect backref in the form (root_id, key, level)
539 * to a logical address
540 */
resolve_indirect_ref(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 time_seq,struct preftrees * preftrees,struct prelim_ref * ref,struct ulist * parents,const u64 * extent_item_pos,bool ignore_offset)541 static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
542 struct btrfs_path *path, u64 time_seq,
543 struct preftrees *preftrees,
544 struct prelim_ref *ref, struct ulist *parents,
545 const u64 *extent_item_pos, bool ignore_offset)
546 {
547 struct btrfs_root *root;
548 struct extent_buffer *eb;
549 int ret = 0;
550 int root_level;
551 int level = ref->level;
552 struct btrfs_key search_key = ref->key_for_search;
553
554 /*
555 * If we're search_commit_root we could possibly be holding locks on
556 * other tree nodes. This happens when qgroups does backref walks when
557 * adding new delayed refs. To deal with this we need to look in cache
558 * for the root, and if we don't find it then we need to search the
559 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
560 * here.
561 */
562 if (path->search_commit_root)
563 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
564 else
565 root = btrfs_get_fs_root(fs_info, ref->root_id, false);
566 if (IS_ERR(root)) {
567 ret = PTR_ERR(root);
568 goto out_free;
569 }
570
571 if (!path->search_commit_root &&
572 test_bit(BTRFS_ROOT_DELETING, &root->state)) {
573 ret = -ENOENT;
574 goto out;
575 }
576
577 if (btrfs_is_testing(fs_info)) {
578 ret = -ENOENT;
579 goto out;
580 }
581
582 if (path->search_commit_root)
583 root_level = btrfs_header_level(root->commit_root);
584 else if (time_seq == SEQ_LAST)
585 root_level = btrfs_header_level(root->node);
586 else
587 root_level = btrfs_old_root_level(root, time_seq);
588
589 if (root_level + 1 == level)
590 goto out;
591
592 /*
593 * We can often find data backrefs with an offset that is too large
594 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
595 * subtracting a file's offset with the data offset of its
596 * corresponding extent data item. This can happen for example in the
597 * clone ioctl.
598 *
599 * So if we detect such case we set the search key's offset to zero to
600 * make sure we will find the matching file extent item at
601 * add_all_parents(), otherwise we will miss it because the offset
602 * taken form the backref is much larger then the offset of the file
603 * extent item. This can make us scan a very large number of file
604 * extent items, but at least it will not make us miss any.
605 *
606 * This is an ugly workaround for a behaviour that should have never
607 * existed, but it does and a fix for the clone ioctl would touch a lot
608 * of places, cause backwards incompatibility and would not fix the
609 * problem for extents cloned with older kernels.
610 */
611 if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
612 search_key.offset >= LLONG_MAX)
613 search_key.offset = 0;
614 path->lowest_level = level;
615 if (time_seq == SEQ_LAST)
616 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
617 else
618 ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
619
620 btrfs_debug(fs_info,
621 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
622 ref->root_id, level, ref->count, ret,
623 ref->key_for_search.objectid, ref->key_for_search.type,
624 ref->key_for_search.offset);
625 if (ret < 0)
626 goto out;
627
628 eb = path->nodes[level];
629 while (!eb) {
630 if (WARN_ON(!level)) {
631 ret = 1;
632 goto out;
633 }
634 level--;
635 eb = path->nodes[level];
636 }
637
638 ret = add_all_parents(root, path, parents, preftrees, ref, level,
639 time_seq, extent_item_pos, ignore_offset);
640 out:
641 btrfs_put_root(root);
642 out_free:
643 path->lowest_level = 0;
644 btrfs_release_path(path);
645 return ret;
646 }
647
648 static struct extent_inode_elem *
unode_aux_to_inode_list(struct ulist_node * node)649 unode_aux_to_inode_list(struct ulist_node *node)
650 {
651 if (!node)
652 return NULL;
653 return (struct extent_inode_elem *)(uintptr_t)node->aux;
654 }
655
free_leaf_list(struct ulist * ulist)656 static void free_leaf_list(struct ulist *ulist)
657 {
658 struct ulist_node *node;
659 struct ulist_iterator uiter;
660
661 ULIST_ITER_INIT(&uiter);
662 while ((node = ulist_next(ulist, &uiter)))
663 free_inode_elem_list(unode_aux_to_inode_list(node));
664
665 ulist_free(ulist);
666 }
667
668 /*
669 * We maintain three separate rbtrees: one for direct refs, one for
670 * indirect refs which have a key, and one for indirect refs which do not
671 * have a key. Each tree does merge on insertion.
672 *
673 * Once all of the references are located, we iterate over the tree of
674 * indirect refs with missing keys. An appropriate key is located and
675 * the ref is moved onto the tree for indirect refs. After all missing
676 * keys are thus located, we iterate over the indirect ref tree, resolve
677 * each reference, and then insert the resolved reference onto the
678 * direct tree (merging there too).
679 *
680 * New backrefs (i.e., for parent nodes) are added to the appropriate
681 * rbtree as they are encountered. The new backrefs are subsequently
682 * resolved as above.
683 */
resolve_indirect_refs(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 time_seq,struct preftrees * preftrees,const u64 * extent_item_pos,struct share_check * sc,bool ignore_offset)684 static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
685 struct btrfs_path *path, u64 time_seq,
686 struct preftrees *preftrees,
687 const u64 *extent_item_pos,
688 struct share_check *sc, bool ignore_offset)
689 {
690 int err;
691 int ret = 0;
692 struct ulist *parents;
693 struct ulist_node *node;
694 struct ulist_iterator uiter;
695 struct rb_node *rnode;
696
697 parents = ulist_alloc(GFP_NOFS);
698 if (!parents)
699 return -ENOMEM;
700
701 /*
702 * We could trade memory usage for performance here by iterating
703 * the tree, allocating new refs for each insertion, and then
704 * freeing the entire indirect tree when we're done. In some test
705 * cases, the tree can grow quite large (~200k objects).
706 */
707 while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
708 struct prelim_ref *ref;
709
710 ref = rb_entry(rnode, struct prelim_ref, rbnode);
711 if (WARN(ref->parent,
712 "BUG: direct ref found in indirect tree")) {
713 ret = -EINVAL;
714 goto out;
715 }
716
717 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
718 preftrees->indirect.count--;
719
720 if (ref->count == 0) {
721 free_pref(ref);
722 continue;
723 }
724
725 if (sc && sc->root_objectid &&
726 ref->root_id != sc->root_objectid) {
727 free_pref(ref);
728 ret = BACKREF_FOUND_SHARED;
729 goto out;
730 }
731 err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
732 ref, parents, extent_item_pos,
733 ignore_offset);
734 /*
735 * we can only tolerate ENOENT,otherwise,we should catch error
736 * and return directly.
737 */
738 if (err == -ENOENT) {
739 prelim_ref_insert(fs_info, &preftrees->direct, ref,
740 NULL);
741 continue;
742 } else if (err) {
743 free_pref(ref);
744 ret = err;
745 goto out;
746 }
747
748 /* we put the first parent into the ref at hand */
749 ULIST_ITER_INIT(&uiter);
750 node = ulist_next(parents, &uiter);
751 ref->parent = node ? node->val : 0;
752 ref->inode_list = unode_aux_to_inode_list(node);
753
754 /* Add a prelim_ref(s) for any other parent(s). */
755 while ((node = ulist_next(parents, &uiter))) {
756 struct prelim_ref *new_ref;
757
758 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
759 GFP_NOFS);
760 if (!new_ref) {
761 free_pref(ref);
762 ret = -ENOMEM;
763 goto out;
764 }
765 memcpy(new_ref, ref, sizeof(*ref));
766 new_ref->parent = node->val;
767 new_ref->inode_list = unode_aux_to_inode_list(node);
768 prelim_ref_insert(fs_info, &preftrees->direct,
769 new_ref, NULL);
770 }
771
772 /*
773 * Now it's a direct ref, put it in the direct tree. We must
774 * do this last because the ref could be merged/freed here.
775 */
776 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
777
778 ulist_reinit(parents);
779 cond_resched();
780 }
781 out:
782 /*
783 * We may have inode lists attached to refs in the parents ulist, so we
784 * must free them before freeing the ulist and its refs.
785 */
786 free_leaf_list(parents);
787 return ret;
788 }
789
790 /*
791 * read tree blocks and add keys where required.
792 */
add_missing_keys(struct btrfs_fs_info * fs_info,struct preftrees * preftrees,bool lock)793 static int add_missing_keys(struct btrfs_fs_info *fs_info,
794 struct preftrees *preftrees, bool lock)
795 {
796 struct prelim_ref *ref;
797 struct extent_buffer *eb;
798 struct preftree *tree = &preftrees->indirect_missing_keys;
799 struct rb_node *node;
800
801 while ((node = rb_first_cached(&tree->root))) {
802 ref = rb_entry(node, struct prelim_ref, rbnode);
803 rb_erase_cached(node, &tree->root);
804
805 BUG_ON(ref->parent); /* should not be a direct ref */
806 BUG_ON(ref->key_for_search.type);
807 BUG_ON(!ref->wanted_disk_byte);
808
809 eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0,
810 ref->level - 1, NULL);
811 if (IS_ERR(eb)) {
812 free_pref(ref);
813 return PTR_ERR(eb);
814 } else if (!extent_buffer_uptodate(eb)) {
815 free_pref(ref);
816 free_extent_buffer(eb);
817 return -EIO;
818 }
819 if (lock)
820 btrfs_tree_read_lock(eb);
821 if (btrfs_header_level(eb) == 0)
822 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
823 else
824 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
825 if (lock)
826 btrfs_tree_read_unlock(eb);
827 free_extent_buffer(eb);
828 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
829 cond_resched();
830 }
831 return 0;
832 }
833
834 /*
835 * add all currently queued delayed refs from this head whose seq nr is
836 * smaller or equal that seq to the list
837 */
add_delayed_refs(const struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_head * head,u64 seq,struct preftrees * preftrees,struct share_check * sc)838 static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
839 struct btrfs_delayed_ref_head *head, u64 seq,
840 struct preftrees *preftrees, struct share_check *sc)
841 {
842 struct btrfs_delayed_ref_node *node;
843 struct btrfs_key key;
844 struct rb_node *n;
845 int count;
846 int ret = 0;
847
848 spin_lock(&head->lock);
849 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
850 node = rb_entry(n, struct btrfs_delayed_ref_node,
851 ref_node);
852 if (node->seq > seq)
853 continue;
854
855 switch (node->action) {
856 case BTRFS_ADD_DELAYED_EXTENT:
857 case BTRFS_UPDATE_DELAYED_HEAD:
858 WARN_ON(1);
859 continue;
860 case BTRFS_ADD_DELAYED_REF:
861 count = node->ref_mod;
862 break;
863 case BTRFS_DROP_DELAYED_REF:
864 count = node->ref_mod * -1;
865 break;
866 default:
867 BUG();
868 }
869 switch (node->type) {
870 case BTRFS_TREE_BLOCK_REF_KEY: {
871 /* NORMAL INDIRECT METADATA backref */
872 struct btrfs_delayed_tree_ref *ref;
873 struct btrfs_key *key_ptr = NULL;
874
875 if (head->extent_op && head->extent_op->update_key) {
876 btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
877 key_ptr = &key;
878 }
879
880 ref = btrfs_delayed_node_to_tree_ref(node);
881 ret = add_indirect_ref(fs_info, preftrees, ref->root,
882 key_ptr, ref->level + 1,
883 node->bytenr, count, sc,
884 GFP_ATOMIC);
885 break;
886 }
887 case BTRFS_SHARED_BLOCK_REF_KEY: {
888 /* SHARED DIRECT METADATA backref */
889 struct btrfs_delayed_tree_ref *ref;
890
891 ref = btrfs_delayed_node_to_tree_ref(node);
892
893 ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
894 ref->parent, node->bytenr, count,
895 sc, GFP_ATOMIC);
896 break;
897 }
898 case BTRFS_EXTENT_DATA_REF_KEY: {
899 /* NORMAL INDIRECT DATA backref */
900 struct btrfs_delayed_data_ref *ref;
901 ref = btrfs_delayed_node_to_data_ref(node);
902
903 key.objectid = ref->objectid;
904 key.type = BTRFS_EXTENT_DATA_KEY;
905 key.offset = ref->offset;
906
907 /*
908 * If we have a share check context and a reference for
909 * another inode, we can't exit immediately. This is
910 * because even if this is a BTRFS_ADD_DELAYED_REF
911 * reference we may find next a BTRFS_DROP_DELAYED_REF
912 * which cancels out this ADD reference.
913 *
914 * If this is a DROP reference and there was no previous
915 * ADD reference, then we need to signal that when we
916 * process references from the extent tree (through
917 * add_inline_refs() and add_keyed_refs()), we should
918 * not exit early if we find a reference for another
919 * inode, because one of the delayed DROP references
920 * may cancel that reference in the extent tree.
921 */
922 if (sc && count < 0)
923 sc->have_delayed_delete_refs = true;
924
925 ret = add_indirect_ref(fs_info, preftrees, ref->root,
926 &key, 0, node->bytenr, count, sc,
927 GFP_ATOMIC);
928 break;
929 }
930 case BTRFS_SHARED_DATA_REF_KEY: {
931 /* SHARED DIRECT FULL backref */
932 struct btrfs_delayed_data_ref *ref;
933
934 ref = btrfs_delayed_node_to_data_ref(node);
935
936 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
937 node->bytenr, count, sc,
938 GFP_ATOMIC);
939 break;
940 }
941 default:
942 WARN_ON(1);
943 }
944 /*
945 * We must ignore BACKREF_FOUND_SHARED until all delayed
946 * refs have been checked.
947 */
948 if (ret && (ret != BACKREF_FOUND_SHARED))
949 break;
950 }
951 if (!ret)
952 ret = extent_is_shared(sc);
953
954 spin_unlock(&head->lock);
955 return ret;
956 }
957
958 /*
959 * add all inline backrefs for bytenr to the list
960 *
961 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
962 */
add_inline_refs(const struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 bytenr,int * info_level,struct preftrees * preftrees,struct share_check * sc)963 static int add_inline_refs(const struct btrfs_fs_info *fs_info,
964 struct btrfs_path *path, u64 bytenr,
965 int *info_level, struct preftrees *preftrees,
966 struct share_check *sc)
967 {
968 int ret = 0;
969 int slot;
970 struct extent_buffer *leaf;
971 struct btrfs_key key;
972 struct btrfs_key found_key;
973 unsigned long ptr;
974 unsigned long end;
975 struct btrfs_extent_item *ei;
976 u64 flags;
977 u64 item_size;
978
979 /*
980 * enumerate all inline refs
981 */
982 leaf = path->nodes[0];
983 slot = path->slots[0];
984
985 item_size = btrfs_item_size_nr(leaf, slot);
986 BUG_ON(item_size < sizeof(*ei));
987
988 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
989 flags = btrfs_extent_flags(leaf, ei);
990 btrfs_item_key_to_cpu(leaf, &found_key, slot);
991
992 ptr = (unsigned long)(ei + 1);
993 end = (unsigned long)ei + item_size;
994
995 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
996 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
997 struct btrfs_tree_block_info *info;
998
999 info = (struct btrfs_tree_block_info *)ptr;
1000 *info_level = btrfs_tree_block_level(leaf, info);
1001 ptr += sizeof(struct btrfs_tree_block_info);
1002 BUG_ON(ptr > end);
1003 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
1004 *info_level = found_key.offset;
1005 } else {
1006 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1007 }
1008
1009 while (ptr < end) {
1010 struct btrfs_extent_inline_ref *iref;
1011 u64 offset;
1012 int type;
1013
1014 iref = (struct btrfs_extent_inline_ref *)ptr;
1015 type = btrfs_get_extent_inline_ref_type(leaf, iref,
1016 BTRFS_REF_TYPE_ANY);
1017 if (type == BTRFS_REF_TYPE_INVALID)
1018 return -EUCLEAN;
1019
1020 offset = btrfs_extent_inline_ref_offset(leaf, iref);
1021
1022 switch (type) {
1023 case BTRFS_SHARED_BLOCK_REF_KEY:
1024 ret = add_direct_ref(fs_info, preftrees,
1025 *info_level + 1, offset,
1026 bytenr, 1, NULL, GFP_NOFS);
1027 break;
1028 case BTRFS_SHARED_DATA_REF_KEY: {
1029 struct btrfs_shared_data_ref *sdref;
1030 int count;
1031
1032 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1033 count = btrfs_shared_data_ref_count(leaf, sdref);
1034
1035 ret = add_direct_ref(fs_info, preftrees, 0, offset,
1036 bytenr, count, sc, GFP_NOFS);
1037 break;
1038 }
1039 case BTRFS_TREE_BLOCK_REF_KEY:
1040 ret = add_indirect_ref(fs_info, preftrees, offset,
1041 NULL, *info_level + 1,
1042 bytenr, 1, NULL, GFP_NOFS);
1043 break;
1044 case BTRFS_EXTENT_DATA_REF_KEY: {
1045 struct btrfs_extent_data_ref *dref;
1046 int count;
1047 u64 root;
1048
1049 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1050 count = btrfs_extent_data_ref_count(leaf, dref);
1051 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1052 dref);
1053 key.type = BTRFS_EXTENT_DATA_KEY;
1054 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1055
1056 if (sc && sc->inum && key.objectid != sc->inum &&
1057 !sc->have_delayed_delete_refs) {
1058 ret = BACKREF_FOUND_SHARED;
1059 break;
1060 }
1061
1062 root = btrfs_extent_data_ref_root(leaf, dref);
1063
1064 ret = add_indirect_ref(fs_info, preftrees, root,
1065 &key, 0, bytenr, count,
1066 sc, GFP_NOFS);
1067
1068 break;
1069 }
1070 default:
1071 WARN_ON(1);
1072 }
1073 if (ret)
1074 return ret;
1075 ptr += btrfs_extent_inline_ref_size(type);
1076 }
1077
1078 return 0;
1079 }
1080
1081 /*
1082 * add all non-inline backrefs for bytenr to the list
1083 *
1084 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1085 */
add_keyed_refs(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 bytenr,int info_level,struct preftrees * preftrees,struct share_check * sc)1086 static int add_keyed_refs(struct btrfs_fs_info *fs_info,
1087 struct btrfs_path *path, u64 bytenr,
1088 int info_level, struct preftrees *preftrees,
1089 struct share_check *sc)
1090 {
1091 struct btrfs_root *extent_root = fs_info->extent_root;
1092 int ret;
1093 int slot;
1094 struct extent_buffer *leaf;
1095 struct btrfs_key key;
1096
1097 while (1) {
1098 ret = btrfs_next_item(extent_root, path);
1099 if (ret < 0)
1100 break;
1101 if (ret) {
1102 ret = 0;
1103 break;
1104 }
1105
1106 slot = path->slots[0];
1107 leaf = path->nodes[0];
1108 btrfs_item_key_to_cpu(leaf, &key, slot);
1109
1110 if (key.objectid != bytenr)
1111 break;
1112 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1113 continue;
1114 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1115 break;
1116
1117 switch (key.type) {
1118 case BTRFS_SHARED_BLOCK_REF_KEY:
1119 /* SHARED DIRECT METADATA backref */
1120 ret = add_direct_ref(fs_info, preftrees,
1121 info_level + 1, key.offset,
1122 bytenr, 1, NULL, GFP_NOFS);
1123 break;
1124 case BTRFS_SHARED_DATA_REF_KEY: {
1125 /* SHARED DIRECT FULL backref */
1126 struct btrfs_shared_data_ref *sdref;
1127 int count;
1128
1129 sdref = btrfs_item_ptr(leaf, slot,
1130 struct btrfs_shared_data_ref);
1131 count = btrfs_shared_data_ref_count(leaf, sdref);
1132 ret = add_direct_ref(fs_info, preftrees, 0,
1133 key.offset, bytenr, count,
1134 sc, GFP_NOFS);
1135 break;
1136 }
1137 case BTRFS_TREE_BLOCK_REF_KEY:
1138 /* NORMAL INDIRECT METADATA backref */
1139 ret = add_indirect_ref(fs_info, preftrees, key.offset,
1140 NULL, info_level + 1, bytenr,
1141 1, NULL, GFP_NOFS);
1142 break;
1143 case BTRFS_EXTENT_DATA_REF_KEY: {
1144 /* NORMAL INDIRECT DATA backref */
1145 struct btrfs_extent_data_ref *dref;
1146 int count;
1147 u64 root;
1148
1149 dref = btrfs_item_ptr(leaf, slot,
1150 struct btrfs_extent_data_ref);
1151 count = btrfs_extent_data_ref_count(leaf, dref);
1152 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1153 dref);
1154 key.type = BTRFS_EXTENT_DATA_KEY;
1155 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1156
1157 if (sc && sc->inum && key.objectid != sc->inum &&
1158 !sc->have_delayed_delete_refs) {
1159 ret = BACKREF_FOUND_SHARED;
1160 break;
1161 }
1162
1163 root = btrfs_extent_data_ref_root(leaf, dref);
1164 ret = add_indirect_ref(fs_info, preftrees, root,
1165 &key, 0, bytenr, count,
1166 sc, GFP_NOFS);
1167 break;
1168 }
1169 default:
1170 WARN_ON(1);
1171 }
1172 if (ret)
1173 return ret;
1174
1175 }
1176
1177 return ret;
1178 }
1179
1180 /*
1181 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1182 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1183 * indirect refs to their parent bytenr.
1184 * When roots are found, they're added to the roots list
1185 *
1186 * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1187 * much like trans == NULL case, the difference only lies in it will not
1188 * commit root.
1189 * The special case is for qgroup to search roots in commit_transaction().
1190 *
1191 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1192 * shared extent is detected.
1193 *
1194 * Otherwise this returns 0 for success and <0 for an error.
1195 *
1196 * If ignore_offset is set to false, only extent refs whose offsets match
1197 * extent_item_pos are returned. If true, every extent ref is returned
1198 * and extent_item_pos is ignored.
1199 *
1200 * FIXME some caching might speed things up
1201 */
find_parent_nodes(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 bytenr,u64 time_seq,struct ulist * refs,struct ulist * roots,const u64 * extent_item_pos,struct share_check * sc,bool ignore_offset)1202 static int find_parent_nodes(struct btrfs_trans_handle *trans,
1203 struct btrfs_fs_info *fs_info, u64 bytenr,
1204 u64 time_seq, struct ulist *refs,
1205 struct ulist *roots, const u64 *extent_item_pos,
1206 struct share_check *sc, bool ignore_offset)
1207 {
1208 struct btrfs_key key;
1209 struct btrfs_path *path;
1210 struct btrfs_delayed_ref_root *delayed_refs = NULL;
1211 struct btrfs_delayed_ref_head *head;
1212 int info_level = 0;
1213 int ret;
1214 struct prelim_ref *ref;
1215 struct rb_node *node;
1216 struct extent_inode_elem *eie = NULL;
1217 struct preftrees preftrees = {
1218 .direct = PREFTREE_INIT,
1219 .indirect = PREFTREE_INIT,
1220 .indirect_missing_keys = PREFTREE_INIT
1221 };
1222
1223 key.objectid = bytenr;
1224 key.offset = (u64)-1;
1225 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1226 key.type = BTRFS_METADATA_ITEM_KEY;
1227 else
1228 key.type = BTRFS_EXTENT_ITEM_KEY;
1229
1230 path = btrfs_alloc_path();
1231 if (!path)
1232 return -ENOMEM;
1233 if (!trans) {
1234 path->search_commit_root = 1;
1235 path->skip_locking = 1;
1236 }
1237
1238 if (time_seq == SEQ_LAST)
1239 path->skip_locking = 1;
1240
1241 /*
1242 * grab both a lock on the path and a lock on the delayed ref head.
1243 * We need both to get a consistent picture of how the refs look
1244 * at a specified point in time
1245 */
1246 again:
1247 head = NULL;
1248
1249 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
1250 if (ret < 0)
1251 goto out;
1252 if (ret == 0) {
1253 /* This shouldn't happen, indicates a bug or fs corruption. */
1254 ASSERT(ret != 0);
1255 ret = -EUCLEAN;
1256 goto out;
1257 }
1258
1259 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1260 if (trans && likely(trans->type != __TRANS_DUMMY) &&
1261 time_seq != SEQ_LAST) {
1262 #else
1263 if (trans && time_seq != SEQ_LAST) {
1264 #endif
1265 /*
1266 * look if there are updates for this ref queued and lock the
1267 * head
1268 */
1269 delayed_refs = &trans->transaction->delayed_refs;
1270 spin_lock(&delayed_refs->lock);
1271 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1272 if (head) {
1273 if (!mutex_trylock(&head->mutex)) {
1274 refcount_inc(&head->refs);
1275 spin_unlock(&delayed_refs->lock);
1276
1277 btrfs_release_path(path);
1278
1279 /*
1280 * Mutex was contended, block until it's
1281 * released and try again
1282 */
1283 mutex_lock(&head->mutex);
1284 mutex_unlock(&head->mutex);
1285 btrfs_put_delayed_ref_head(head);
1286 goto again;
1287 }
1288 spin_unlock(&delayed_refs->lock);
1289 ret = add_delayed_refs(fs_info, head, time_seq,
1290 &preftrees, sc);
1291 mutex_unlock(&head->mutex);
1292 if (ret)
1293 goto out;
1294 } else {
1295 spin_unlock(&delayed_refs->lock);
1296 }
1297 }
1298
1299 if (path->slots[0]) {
1300 struct extent_buffer *leaf;
1301 int slot;
1302
1303 path->slots[0]--;
1304 leaf = path->nodes[0];
1305 slot = path->slots[0];
1306 btrfs_item_key_to_cpu(leaf, &key, slot);
1307 if (key.objectid == bytenr &&
1308 (key.type == BTRFS_EXTENT_ITEM_KEY ||
1309 key.type == BTRFS_METADATA_ITEM_KEY)) {
1310 ret = add_inline_refs(fs_info, path, bytenr,
1311 &info_level, &preftrees, sc);
1312 if (ret)
1313 goto out;
1314 ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1315 &preftrees, sc);
1316 if (ret)
1317 goto out;
1318 }
1319 }
1320
1321 btrfs_release_path(path);
1322
1323 ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1324 if (ret)
1325 goto out;
1326
1327 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1328
1329 ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1330 extent_item_pos, sc, ignore_offset);
1331 if (ret)
1332 goto out;
1333
1334 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1335
1336 /*
1337 * This walks the tree of merged and resolved refs. Tree blocks are
1338 * read in as needed. Unique entries are added to the ulist, and
1339 * the list of found roots is updated.
1340 *
1341 * We release the entire tree in one go before returning.
1342 */
1343 node = rb_first_cached(&preftrees.direct.root);
1344 while (node) {
1345 ref = rb_entry(node, struct prelim_ref, rbnode);
1346 node = rb_next(&ref->rbnode);
1347 /*
1348 * ref->count < 0 can happen here if there are delayed
1349 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1350 * prelim_ref_insert() relies on this when merging
1351 * identical refs to keep the overall count correct.
1352 * prelim_ref_insert() will merge only those refs
1353 * which compare identically. Any refs having
1354 * e.g. different offsets would not be merged,
1355 * and would retain their original ref->count < 0.
1356 */
1357 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1358 if (sc && sc->root_objectid &&
1359 ref->root_id != sc->root_objectid) {
1360 ret = BACKREF_FOUND_SHARED;
1361 goto out;
1362 }
1363
1364 /* no parent == root of tree */
1365 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1366 if (ret < 0)
1367 goto out;
1368 }
1369 if (ref->count && ref->parent) {
1370 if (extent_item_pos && !ref->inode_list &&
1371 ref->level == 0) {
1372 struct extent_buffer *eb;
1373
1374 eb = read_tree_block(fs_info, ref->parent, 0,
1375 ref->level, NULL);
1376 if (IS_ERR(eb)) {
1377 ret = PTR_ERR(eb);
1378 goto out;
1379 } else if (!extent_buffer_uptodate(eb)) {
1380 free_extent_buffer(eb);
1381 ret = -EIO;
1382 goto out;
1383 }
1384
1385 if (!path->skip_locking) {
1386 btrfs_tree_read_lock(eb);
1387 btrfs_set_lock_blocking_read(eb);
1388 }
1389 ret = find_extent_in_eb(eb, bytenr,
1390 *extent_item_pos, &eie, ignore_offset);
1391 if (!path->skip_locking)
1392 btrfs_tree_read_unlock_blocking(eb);
1393 free_extent_buffer(eb);
1394 if (ret < 0)
1395 goto out;
1396 ref->inode_list = eie;
1397 /*
1398 * We transferred the list ownership to the ref,
1399 * so set to NULL to avoid a double free in case
1400 * an error happens after this.
1401 */
1402 eie = NULL;
1403 }
1404 ret = ulist_add_merge_ptr(refs, ref->parent,
1405 ref->inode_list,
1406 (void **)&eie, GFP_NOFS);
1407 if (ret < 0)
1408 goto out;
1409 if (!ret && extent_item_pos) {
1410 /*
1411 * We've recorded that parent, so we must extend
1412 * its inode list here.
1413 *
1414 * However if there was corruption we may not
1415 * have found an eie, return an error in this
1416 * case.
1417 */
1418 ASSERT(eie);
1419 if (!eie) {
1420 ret = -EUCLEAN;
1421 goto out;
1422 }
1423 while (eie->next)
1424 eie = eie->next;
1425 eie->next = ref->inode_list;
1426 }
1427 eie = NULL;
1428 /*
1429 * We have transferred the inode list ownership from
1430 * this ref to the ref we added to the 'refs' ulist.
1431 * So set this ref's inode list to NULL to avoid
1432 * use-after-free when our caller uses it or double
1433 * frees in case an error happens before we return.
1434 */
1435 ref->inode_list = NULL;
1436 }
1437 cond_resched();
1438 }
1439
1440 out:
1441 btrfs_free_path(path);
1442
1443 prelim_release(&preftrees.direct);
1444 prelim_release(&preftrees.indirect);
1445 prelim_release(&preftrees.indirect_missing_keys);
1446
1447 if (ret < 0)
1448 free_inode_elem_list(eie);
1449 return ret;
1450 }
1451
1452 /*
1453 * Finds all leafs with a reference to the specified combination of bytenr and
1454 * offset. key_list_head will point to a list of corresponding keys (caller must
1455 * free each list element). The leafs will be stored in the leafs ulist, which
1456 * must be freed with ulist_free.
1457 *
1458 * returns 0 on success, <0 on error
1459 */
1460 int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1461 struct btrfs_fs_info *fs_info, u64 bytenr,
1462 u64 time_seq, struct ulist **leafs,
1463 const u64 *extent_item_pos, bool ignore_offset)
1464 {
1465 int ret;
1466
1467 *leafs = ulist_alloc(GFP_NOFS);
1468 if (!*leafs)
1469 return -ENOMEM;
1470
1471 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1472 *leafs, NULL, extent_item_pos, NULL, ignore_offset);
1473 if (ret < 0 && ret != -ENOENT) {
1474 free_leaf_list(*leafs);
1475 return ret;
1476 }
1477
1478 return 0;
1479 }
1480
1481 /*
1482 * walk all backrefs for a given extent to find all roots that reference this
1483 * extent. Walking a backref means finding all extents that reference this
1484 * extent and in turn walk the backrefs of those, too. Naturally this is a
1485 * recursive process, but here it is implemented in an iterative fashion: We
1486 * find all referencing extents for the extent in question and put them on a
1487 * list. In turn, we find all referencing extents for those, further appending
1488 * to the list. The way we iterate the list allows adding more elements after
1489 * the current while iterating. The process stops when we reach the end of the
1490 * list. Found roots are added to the roots list.
1491 *
1492 * returns 0 on success, < 0 on error.
1493 */
1494 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1495 struct btrfs_fs_info *fs_info, u64 bytenr,
1496 u64 time_seq, struct ulist **roots,
1497 bool ignore_offset)
1498 {
1499 struct ulist *tmp;
1500 struct ulist_node *node = NULL;
1501 struct ulist_iterator uiter;
1502 int ret;
1503
1504 tmp = ulist_alloc(GFP_NOFS);
1505 if (!tmp)
1506 return -ENOMEM;
1507 *roots = ulist_alloc(GFP_NOFS);
1508 if (!*roots) {
1509 ulist_free(tmp);
1510 return -ENOMEM;
1511 }
1512
1513 ULIST_ITER_INIT(&uiter);
1514 while (1) {
1515 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1516 tmp, *roots, NULL, NULL, ignore_offset);
1517 if (ret < 0 && ret != -ENOENT) {
1518 ulist_free(tmp);
1519 ulist_free(*roots);
1520 *roots = NULL;
1521 return ret;
1522 }
1523 node = ulist_next(tmp, &uiter);
1524 if (!node)
1525 break;
1526 bytenr = node->val;
1527 cond_resched();
1528 }
1529
1530 ulist_free(tmp);
1531 return 0;
1532 }
1533
1534 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1535 struct btrfs_fs_info *fs_info, u64 bytenr,
1536 u64 time_seq, struct ulist **roots,
1537 bool ignore_offset)
1538 {
1539 int ret;
1540
1541 if (!trans)
1542 down_read(&fs_info->commit_root_sem);
1543 ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1544 time_seq, roots, ignore_offset);
1545 if (!trans)
1546 up_read(&fs_info->commit_root_sem);
1547 return ret;
1548 }
1549
1550 /**
1551 * btrfs_check_shared - tell us whether an extent is shared
1552 *
1553 * btrfs_check_shared uses the backref walking code but will short
1554 * circuit as soon as it finds a root or inode that doesn't match the
1555 * one passed in. This provides a significant performance benefit for
1556 * callers (such as fiemap) which want to know whether the extent is
1557 * shared but do not need a ref count.
1558 *
1559 * This attempts to attach to the running transaction in order to account for
1560 * delayed refs, but continues on even when no running transaction exists.
1561 *
1562 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1563 */
1564 int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1565 struct ulist *roots, struct ulist *tmp)
1566 {
1567 struct btrfs_fs_info *fs_info = root->fs_info;
1568 struct btrfs_trans_handle *trans;
1569 struct ulist_iterator uiter;
1570 struct ulist_node *node;
1571 struct seq_list elem = SEQ_LIST_INIT(elem);
1572 int ret = 0;
1573 struct share_check shared = {
1574 .root_objectid = root->root_key.objectid,
1575 .inum = inum,
1576 .share_count = 0,
1577 .have_delayed_delete_refs = false,
1578 };
1579
1580 ulist_init(roots);
1581 ulist_init(tmp);
1582
1583 trans = btrfs_join_transaction_nostart(root);
1584 if (IS_ERR(trans)) {
1585 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1586 ret = PTR_ERR(trans);
1587 goto out;
1588 }
1589 trans = NULL;
1590 down_read(&fs_info->commit_root_sem);
1591 } else {
1592 btrfs_get_tree_mod_seq(fs_info, &elem);
1593 }
1594
1595 ULIST_ITER_INIT(&uiter);
1596 while (1) {
1597 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1598 roots, NULL, &shared, false);
1599 if (ret == BACKREF_FOUND_SHARED) {
1600 /* this is the only condition under which we return 1 */
1601 ret = 1;
1602 break;
1603 }
1604 if (ret < 0 && ret != -ENOENT)
1605 break;
1606 ret = 0;
1607 node = ulist_next(tmp, &uiter);
1608 if (!node)
1609 break;
1610 bytenr = node->val;
1611 shared.share_count = 0;
1612 shared.have_delayed_delete_refs = false;
1613 cond_resched();
1614 }
1615
1616 if (trans) {
1617 btrfs_put_tree_mod_seq(fs_info, &elem);
1618 btrfs_end_transaction(trans);
1619 } else {
1620 up_read(&fs_info->commit_root_sem);
1621 }
1622 out:
1623 ulist_release(roots);
1624 ulist_release(tmp);
1625 return ret;
1626 }
1627
1628 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1629 u64 start_off, struct btrfs_path *path,
1630 struct btrfs_inode_extref **ret_extref,
1631 u64 *found_off)
1632 {
1633 int ret, slot;
1634 struct btrfs_key key;
1635 struct btrfs_key found_key;
1636 struct btrfs_inode_extref *extref;
1637 const struct extent_buffer *leaf;
1638 unsigned long ptr;
1639
1640 key.objectid = inode_objectid;
1641 key.type = BTRFS_INODE_EXTREF_KEY;
1642 key.offset = start_off;
1643
1644 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1645 if (ret < 0)
1646 return ret;
1647
1648 while (1) {
1649 leaf = path->nodes[0];
1650 slot = path->slots[0];
1651 if (slot >= btrfs_header_nritems(leaf)) {
1652 /*
1653 * If the item at offset is not found,
1654 * btrfs_search_slot will point us to the slot
1655 * where it should be inserted. In our case
1656 * that will be the slot directly before the
1657 * next INODE_REF_KEY_V2 item. In the case
1658 * that we're pointing to the last slot in a
1659 * leaf, we must move one leaf over.
1660 */
1661 ret = btrfs_next_leaf(root, path);
1662 if (ret) {
1663 if (ret >= 1)
1664 ret = -ENOENT;
1665 break;
1666 }
1667 continue;
1668 }
1669
1670 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1671
1672 /*
1673 * Check that we're still looking at an extended ref key for
1674 * this particular objectid. If we have different
1675 * objectid or type then there are no more to be found
1676 * in the tree and we can exit.
1677 */
1678 ret = -ENOENT;
1679 if (found_key.objectid != inode_objectid)
1680 break;
1681 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1682 break;
1683
1684 ret = 0;
1685 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1686 extref = (struct btrfs_inode_extref *)ptr;
1687 *ret_extref = extref;
1688 if (found_off)
1689 *found_off = found_key.offset;
1690 break;
1691 }
1692
1693 return ret;
1694 }
1695
1696 /*
1697 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1698 * Elements of the path are separated by '/' and the path is guaranteed to be
1699 * 0-terminated. the path is only given within the current file system.
1700 * Therefore, it never starts with a '/'. the caller is responsible to provide
1701 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1702 * the start point of the resulting string is returned. this pointer is within
1703 * dest, normally.
1704 * in case the path buffer would overflow, the pointer is decremented further
1705 * as if output was written to the buffer, though no more output is actually
1706 * generated. that way, the caller can determine how much space would be
1707 * required for the path to fit into the buffer. in that case, the returned
1708 * value will be smaller than dest. callers must check this!
1709 */
1710 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1711 u32 name_len, unsigned long name_off,
1712 struct extent_buffer *eb_in, u64 parent,
1713 char *dest, u32 size)
1714 {
1715 int slot;
1716 u64 next_inum;
1717 int ret;
1718 s64 bytes_left = ((s64)size) - 1;
1719 struct extent_buffer *eb = eb_in;
1720 struct btrfs_key found_key;
1721 int leave_spinning = path->leave_spinning;
1722 struct btrfs_inode_ref *iref;
1723
1724 if (bytes_left >= 0)
1725 dest[bytes_left] = '\0';
1726
1727 path->leave_spinning = 1;
1728 while (1) {
1729 bytes_left -= name_len;
1730 if (bytes_left >= 0)
1731 read_extent_buffer(eb, dest + bytes_left,
1732 name_off, name_len);
1733 if (eb != eb_in) {
1734 if (!path->skip_locking)
1735 btrfs_tree_read_unlock_blocking(eb);
1736 free_extent_buffer(eb);
1737 }
1738 ret = btrfs_find_item(fs_root, path, parent, 0,
1739 BTRFS_INODE_REF_KEY, &found_key);
1740 if (ret > 0)
1741 ret = -ENOENT;
1742 if (ret)
1743 break;
1744
1745 next_inum = found_key.offset;
1746
1747 /* regular exit ahead */
1748 if (parent == next_inum)
1749 break;
1750
1751 slot = path->slots[0];
1752 eb = path->nodes[0];
1753 /* make sure we can use eb after releasing the path */
1754 if (eb != eb_in) {
1755 if (!path->skip_locking)
1756 btrfs_set_lock_blocking_read(eb);
1757 path->nodes[0] = NULL;
1758 path->locks[0] = 0;
1759 }
1760 btrfs_release_path(path);
1761 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1762
1763 name_len = btrfs_inode_ref_name_len(eb, iref);
1764 name_off = (unsigned long)(iref + 1);
1765
1766 parent = next_inum;
1767 --bytes_left;
1768 if (bytes_left >= 0)
1769 dest[bytes_left] = '/';
1770 }
1771
1772 btrfs_release_path(path);
1773 path->leave_spinning = leave_spinning;
1774
1775 if (ret)
1776 return ERR_PTR(ret);
1777
1778 return dest + bytes_left;
1779 }
1780
1781 /*
1782 * this makes the path point to (logical EXTENT_ITEM *)
1783 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1784 * tree blocks and <0 on error.
1785 */
1786 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1787 struct btrfs_path *path, struct btrfs_key *found_key,
1788 u64 *flags_ret)
1789 {
1790 int ret;
1791 u64 flags;
1792 u64 size = 0;
1793 u32 item_size;
1794 const struct extent_buffer *eb;
1795 struct btrfs_extent_item *ei;
1796 struct btrfs_key key;
1797
1798 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1799 key.type = BTRFS_METADATA_ITEM_KEY;
1800 else
1801 key.type = BTRFS_EXTENT_ITEM_KEY;
1802 key.objectid = logical;
1803 key.offset = (u64)-1;
1804
1805 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1806 if (ret < 0)
1807 return ret;
1808
1809 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1810 if (ret) {
1811 if (ret > 0)
1812 ret = -ENOENT;
1813 return ret;
1814 }
1815 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1816 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1817 size = fs_info->nodesize;
1818 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1819 size = found_key->offset;
1820
1821 if (found_key->objectid > logical ||
1822 found_key->objectid + size <= logical) {
1823 btrfs_debug(fs_info,
1824 "logical %llu is not within any extent", logical);
1825 return -ENOENT;
1826 }
1827
1828 eb = path->nodes[0];
1829 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1830 BUG_ON(item_size < sizeof(*ei));
1831
1832 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1833 flags = btrfs_extent_flags(eb, ei);
1834
1835 btrfs_debug(fs_info,
1836 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1837 logical, logical - found_key->objectid, found_key->objectid,
1838 found_key->offset, flags, item_size);
1839
1840 WARN_ON(!flags_ret);
1841 if (flags_ret) {
1842 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1843 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1844 else if (flags & BTRFS_EXTENT_FLAG_DATA)
1845 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1846 else
1847 BUG();
1848 return 0;
1849 }
1850
1851 return -EIO;
1852 }
1853
1854 /*
1855 * helper function to iterate extent inline refs. ptr must point to a 0 value
1856 * for the first call and may be modified. it is used to track state.
1857 * if more refs exist, 0 is returned and the next call to
1858 * get_extent_inline_ref must pass the modified ptr parameter to get the
1859 * next ref. after the last ref was processed, 1 is returned.
1860 * returns <0 on error
1861 */
1862 static int get_extent_inline_ref(unsigned long *ptr,
1863 const struct extent_buffer *eb,
1864 const struct btrfs_key *key,
1865 const struct btrfs_extent_item *ei,
1866 u32 item_size,
1867 struct btrfs_extent_inline_ref **out_eiref,
1868 int *out_type)
1869 {
1870 unsigned long end;
1871 u64 flags;
1872 struct btrfs_tree_block_info *info;
1873
1874 if (!*ptr) {
1875 /* first call */
1876 flags = btrfs_extent_flags(eb, ei);
1877 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1878 if (key->type == BTRFS_METADATA_ITEM_KEY) {
1879 /* a skinny metadata extent */
1880 *out_eiref =
1881 (struct btrfs_extent_inline_ref *)(ei + 1);
1882 } else {
1883 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1884 info = (struct btrfs_tree_block_info *)(ei + 1);
1885 *out_eiref =
1886 (struct btrfs_extent_inline_ref *)(info + 1);
1887 }
1888 } else {
1889 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1890 }
1891 *ptr = (unsigned long)*out_eiref;
1892 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1893 return -ENOENT;
1894 }
1895
1896 end = (unsigned long)ei + item_size;
1897 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1898 *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1899 BTRFS_REF_TYPE_ANY);
1900 if (*out_type == BTRFS_REF_TYPE_INVALID)
1901 return -EUCLEAN;
1902
1903 *ptr += btrfs_extent_inline_ref_size(*out_type);
1904 WARN_ON(*ptr > end);
1905 if (*ptr == end)
1906 return 1; /* last */
1907
1908 return 0;
1909 }
1910
1911 /*
1912 * reads the tree block backref for an extent. tree level and root are returned
1913 * through out_level and out_root. ptr must point to a 0 value for the first
1914 * call and may be modified (see get_extent_inline_ref comment).
1915 * returns 0 if data was provided, 1 if there was no more data to provide or
1916 * <0 on error.
1917 */
1918 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1919 struct btrfs_key *key, struct btrfs_extent_item *ei,
1920 u32 item_size, u64 *out_root, u8 *out_level)
1921 {
1922 int ret;
1923 int type;
1924 struct btrfs_extent_inline_ref *eiref;
1925
1926 if (*ptr == (unsigned long)-1)
1927 return 1;
1928
1929 while (1) {
1930 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1931 &eiref, &type);
1932 if (ret < 0)
1933 return ret;
1934
1935 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1936 type == BTRFS_SHARED_BLOCK_REF_KEY)
1937 break;
1938
1939 if (ret == 1)
1940 return 1;
1941 }
1942
1943 /* we can treat both ref types equally here */
1944 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1945
1946 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1947 struct btrfs_tree_block_info *info;
1948
1949 info = (struct btrfs_tree_block_info *)(ei + 1);
1950 *out_level = btrfs_tree_block_level(eb, info);
1951 } else {
1952 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1953 *out_level = (u8)key->offset;
1954 }
1955
1956 if (ret == 1)
1957 *ptr = (unsigned long)-1;
1958
1959 return 0;
1960 }
1961
1962 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1963 struct extent_inode_elem *inode_list,
1964 u64 root, u64 extent_item_objectid,
1965 iterate_extent_inodes_t *iterate, void *ctx)
1966 {
1967 struct extent_inode_elem *eie;
1968 int ret = 0;
1969
1970 for (eie = inode_list; eie; eie = eie->next) {
1971 btrfs_debug(fs_info,
1972 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1973 extent_item_objectid, eie->inum,
1974 eie->offset, root);
1975 ret = iterate(eie->inum, eie->offset, root, ctx);
1976 if (ret) {
1977 btrfs_debug(fs_info,
1978 "stopping iteration for %llu due to ret=%d",
1979 extent_item_objectid, ret);
1980 break;
1981 }
1982 }
1983
1984 return ret;
1985 }
1986
1987 /*
1988 * calls iterate() for every inode that references the extent identified by
1989 * the given parameters.
1990 * when the iterator function returns a non-zero value, iteration stops.
1991 */
1992 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1993 u64 extent_item_objectid, u64 extent_item_pos,
1994 int search_commit_root,
1995 iterate_extent_inodes_t *iterate, void *ctx,
1996 bool ignore_offset)
1997 {
1998 int ret;
1999 struct btrfs_trans_handle *trans = NULL;
2000 struct ulist *refs = NULL;
2001 struct ulist *roots = NULL;
2002 struct ulist_node *ref_node = NULL;
2003 struct ulist_node *root_node = NULL;
2004 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
2005 struct ulist_iterator ref_uiter;
2006 struct ulist_iterator root_uiter;
2007
2008 btrfs_debug(fs_info, "resolving all inodes for extent %llu",
2009 extent_item_objectid);
2010
2011 if (!search_commit_root) {
2012 trans = btrfs_attach_transaction(fs_info->extent_root);
2013 if (IS_ERR(trans)) {
2014 if (PTR_ERR(trans) != -ENOENT &&
2015 PTR_ERR(trans) != -EROFS)
2016 return PTR_ERR(trans);
2017 trans = NULL;
2018 }
2019 }
2020
2021 if (trans)
2022 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2023 else
2024 down_read(&fs_info->commit_root_sem);
2025
2026 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
2027 tree_mod_seq_elem.seq, &refs,
2028 &extent_item_pos, ignore_offset);
2029 if (ret)
2030 goto out;
2031
2032 ULIST_ITER_INIT(&ref_uiter);
2033 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2034 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
2035 tree_mod_seq_elem.seq, &roots,
2036 ignore_offset);
2037 if (ret)
2038 break;
2039 ULIST_ITER_INIT(&root_uiter);
2040 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
2041 btrfs_debug(fs_info,
2042 "root %llu references leaf %llu, data list %#llx",
2043 root_node->val, ref_node->val,
2044 ref_node->aux);
2045 ret = iterate_leaf_refs(fs_info,
2046 (struct extent_inode_elem *)
2047 (uintptr_t)ref_node->aux,
2048 root_node->val,
2049 extent_item_objectid,
2050 iterate, ctx);
2051 }
2052 ulist_free(roots);
2053 }
2054
2055 free_leaf_list(refs);
2056 out:
2057 if (trans) {
2058 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2059 btrfs_end_transaction(trans);
2060 } else {
2061 up_read(&fs_info->commit_root_sem);
2062 }
2063
2064 return ret;
2065 }
2066
2067 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
2068 {
2069 struct btrfs_data_container *inodes = ctx;
2070 const size_t c = 3 * sizeof(u64);
2071
2072 if (inodes->bytes_left >= c) {
2073 inodes->bytes_left -= c;
2074 inodes->val[inodes->elem_cnt] = inum;
2075 inodes->val[inodes->elem_cnt + 1] = offset;
2076 inodes->val[inodes->elem_cnt + 2] = root;
2077 inodes->elem_cnt += 3;
2078 } else {
2079 inodes->bytes_missing += c - inodes->bytes_left;
2080 inodes->bytes_left = 0;
2081 inodes->elem_missed += 3;
2082 }
2083
2084 return 0;
2085 }
2086
2087 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2088 struct btrfs_path *path,
2089 void *ctx, bool ignore_offset)
2090 {
2091 int ret;
2092 u64 extent_item_pos;
2093 u64 flags = 0;
2094 struct btrfs_key found_key;
2095 int search_commit_root = path->search_commit_root;
2096
2097 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2098 btrfs_release_path(path);
2099 if (ret < 0)
2100 return ret;
2101 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2102 return -EINVAL;
2103
2104 extent_item_pos = logical - found_key.objectid;
2105 ret = iterate_extent_inodes(fs_info, found_key.objectid,
2106 extent_item_pos, search_commit_root,
2107 build_ino_list, ctx, ignore_offset);
2108
2109 return ret;
2110 }
2111
2112 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
2113 struct extent_buffer *eb, void *ctx);
2114
2115 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
2116 struct btrfs_path *path,
2117 iterate_irefs_t *iterate, void *ctx)
2118 {
2119 int ret = 0;
2120 int slot;
2121 u32 cur;
2122 u32 len;
2123 u32 name_len;
2124 u64 parent = 0;
2125 int found = 0;
2126 struct extent_buffer *eb;
2127 struct btrfs_item *item;
2128 struct btrfs_inode_ref *iref;
2129 struct btrfs_key found_key;
2130
2131 while (!ret) {
2132 ret = btrfs_find_item(fs_root, path, inum,
2133 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2134 &found_key);
2135
2136 if (ret < 0)
2137 break;
2138 if (ret) {
2139 ret = found ? 0 : -ENOENT;
2140 break;
2141 }
2142 ++found;
2143
2144 parent = found_key.offset;
2145 slot = path->slots[0];
2146 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2147 if (!eb) {
2148 ret = -ENOMEM;
2149 break;
2150 }
2151 btrfs_release_path(path);
2152
2153 item = btrfs_item_nr(slot);
2154 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2155
2156 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
2157 name_len = btrfs_inode_ref_name_len(eb, iref);
2158 /* path must be released before calling iterate()! */
2159 btrfs_debug(fs_root->fs_info,
2160 "following ref at offset %u for inode %llu in tree %llu",
2161 cur, found_key.objectid,
2162 fs_root->root_key.objectid);
2163 ret = iterate(parent, name_len,
2164 (unsigned long)(iref + 1), eb, ctx);
2165 if (ret)
2166 break;
2167 len = sizeof(*iref) + name_len;
2168 iref = (struct btrfs_inode_ref *)((char *)iref + len);
2169 }
2170 free_extent_buffer(eb);
2171 }
2172
2173 btrfs_release_path(path);
2174
2175 return ret;
2176 }
2177
2178 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2179 struct btrfs_path *path,
2180 iterate_irefs_t *iterate, void *ctx)
2181 {
2182 int ret;
2183 int slot;
2184 u64 offset = 0;
2185 u64 parent;
2186 int found = 0;
2187 struct extent_buffer *eb;
2188 struct btrfs_inode_extref *extref;
2189 u32 item_size;
2190 u32 cur_offset;
2191 unsigned long ptr;
2192
2193 while (1) {
2194 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2195 &offset);
2196 if (ret < 0)
2197 break;
2198 if (ret) {
2199 ret = found ? 0 : -ENOENT;
2200 break;
2201 }
2202 ++found;
2203
2204 slot = path->slots[0];
2205 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2206 if (!eb) {
2207 ret = -ENOMEM;
2208 break;
2209 }
2210 btrfs_release_path(path);
2211
2212 item_size = btrfs_item_size_nr(eb, slot);
2213 ptr = btrfs_item_ptr_offset(eb, slot);
2214 cur_offset = 0;
2215
2216 while (cur_offset < item_size) {
2217 u32 name_len;
2218
2219 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2220 parent = btrfs_inode_extref_parent(eb, extref);
2221 name_len = btrfs_inode_extref_name_len(eb, extref);
2222 ret = iterate(parent, name_len,
2223 (unsigned long)&extref->name, eb, ctx);
2224 if (ret)
2225 break;
2226
2227 cur_offset += btrfs_inode_extref_name_len(eb, extref);
2228 cur_offset += sizeof(*extref);
2229 }
2230 free_extent_buffer(eb);
2231
2232 offset++;
2233 }
2234
2235 btrfs_release_path(path);
2236
2237 return ret;
2238 }
2239
2240 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
2241 struct btrfs_path *path, iterate_irefs_t *iterate,
2242 void *ctx)
2243 {
2244 int ret;
2245 int found_refs = 0;
2246
2247 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
2248 if (!ret)
2249 ++found_refs;
2250 else if (ret != -ENOENT)
2251 return ret;
2252
2253 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
2254 if (ret == -ENOENT && found_refs)
2255 return 0;
2256
2257 return ret;
2258 }
2259
2260 /*
2261 * returns 0 if the path could be dumped (probably truncated)
2262 * returns <0 in case of an error
2263 */
2264 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2265 struct extent_buffer *eb, void *ctx)
2266 {
2267 struct inode_fs_paths *ipath = ctx;
2268 char *fspath;
2269 char *fspath_min;
2270 int i = ipath->fspath->elem_cnt;
2271 const int s_ptr = sizeof(char *);
2272 u32 bytes_left;
2273
2274 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2275 ipath->fspath->bytes_left - s_ptr : 0;
2276
2277 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2278 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2279 name_off, eb, inum, fspath_min, bytes_left);
2280 if (IS_ERR(fspath))
2281 return PTR_ERR(fspath);
2282
2283 if (fspath > fspath_min) {
2284 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2285 ++ipath->fspath->elem_cnt;
2286 ipath->fspath->bytes_left = fspath - fspath_min;
2287 } else {
2288 ++ipath->fspath->elem_missed;
2289 ipath->fspath->bytes_missing += fspath_min - fspath;
2290 ipath->fspath->bytes_left = 0;
2291 }
2292
2293 return 0;
2294 }
2295
2296 /*
2297 * this dumps all file system paths to the inode into the ipath struct, provided
2298 * is has been created large enough. each path is zero-terminated and accessed
2299 * from ipath->fspath->val[i].
2300 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2301 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2302 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2303 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2304 * have been needed to return all paths.
2305 */
2306 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2307 {
2308 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2309 inode_to_path, ipath);
2310 }
2311
2312 struct btrfs_data_container *init_data_container(u32 total_bytes)
2313 {
2314 struct btrfs_data_container *data;
2315 size_t alloc_bytes;
2316
2317 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2318 data = kvmalloc(alloc_bytes, GFP_KERNEL);
2319 if (!data)
2320 return ERR_PTR(-ENOMEM);
2321
2322 if (total_bytes >= sizeof(*data)) {
2323 data->bytes_left = total_bytes - sizeof(*data);
2324 data->bytes_missing = 0;
2325 } else {
2326 data->bytes_missing = sizeof(*data) - total_bytes;
2327 data->bytes_left = 0;
2328 }
2329
2330 data->elem_cnt = 0;
2331 data->elem_missed = 0;
2332
2333 return data;
2334 }
2335
2336 /*
2337 * allocates space to return multiple file system paths for an inode.
2338 * total_bytes to allocate are passed, note that space usable for actual path
2339 * information will be total_bytes - sizeof(struct inode_fs_paths).
2340 * the returned pointer must be freed with free_ipath() in the end.
2341 */
2342 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2343 struct btrfs_path *path)
2344 {
2345 struct inode_fs_paths *ifp;
2346 struct btrfs_data_container *fspath;
2347
2348 fspath = init_data_container(total_bytes);
2349 if (IS_ERR(fspath))
2350 return ERR_CAST(fspath);
2351
2352 ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2353 if (!ifp) {
2354 kvfree(fspath);
2355 return ERR_PTR(-ENOMEM);
2356 }
2357
2358 ifp->btrfs_path = path;
2359 ifp->fspath = fspath;
2360 ifp->fs_root = fs_root;
2361
2362 return ifp;
2363 }
2364
2365 void free_ipath(struct inode_fs_paths *ipath)
2366 {
2367 if (!ipath)
2368 return;
2369 kvfree(ipath->fspath);
2370 kfree(ipath);
2371 }
2372
2373 struct btrfs_backref_iter *btrfs_backref_iter_alloc(
2374 struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
2375 {
2376 struct btrfs_backref_iter *ret;
2377
2378 ret = kzalloc(sizeof(*ret), gfp_flag);
2379 if (!ret)
2380 return NULL;
2381
2382 ret->path = btrfs_alloc_path();
2383 if (!ret->path) {
2384 kfree(ret);
2385 return NULL;
2386 }
2387
2388 /* Current backref iterator only supports iteration in commit root */
2389 ret->path->search_commit_root = 1;
2390 ret->path->skip_locking = 1;
2391 ret->fs_info = fs_info;
2392
2393 return ret;
2394 }
2395
2396 int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2397 {
2398 struct btrfs_fs_info *fs_info = iter->fs_info;
2399 struct btrfs_path *path = iter->path;
2400 struct btrfs_extent_item *ei;
2401 struct btrfs_key key;
2402 int ret;
2403
2404 key.objectid = bytenr;
2405 key.type = BTRFS_METADATA_ITEM_KEY;
2406 key.offset = (u64)-1;
2407 iter->bytenr = bytenr;
2408
2409 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
2410 if (ret < 0)
2411 return ret;
2412 if (ret == 0) {
2413 ret = -EUCLEAN;
2414 goto release;
2415 }
2416 if (path->slots[0] == 0) {
2417 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2418 ret = -EUCLEAN;
2419 goto release;
2420 }
2421 path->slots[0]--;
2422
2423 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2424 if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2425 key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2426 ret = -ENOENT;
2427 goto release;
2428 }
2429 memcpy(&iter->cur_key, &key, sizeof(key));
2430 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2431 path->slots[0]);
2432 iter->end_ptr = (u32)(iter->item_ptr +
2433 btrfs_item_size_nr(path->nodes[0], path->slots[0]));
2434 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2435 struct btrfs_extent_item);
2436
2437 /*
2438 * Only support iteration on tree backref yet.
2439 *
2440 * This is an extra precaution for non skinny-metadata, where
2441 * EXTENT_ITEM is also used for tree blocks, that we can only use
2442 * extent flags to determine if it's a tree block.
2443 */
2444 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2445 ret = -ENOTSUPP;
2446 goto release;
2447 }
2448 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2449
2450 /* If there is no inline backref, go search for keyed backref */
2451 if (iter->cur_ptr >= iter->end_ptr) {
2452 ret = btrfs_next_item(fs_info->extent_root, path);
2453
2454 /* No inline nor keyed ref */
2455 if (ret > 0) {
2456 ret = -ENOENT;
2457 goto release;
2458 }
2459 if (ret < 0)
2460 goto release;
2461
2462 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2463 path->slots[0]);
2464 if (iter->cur_key.objectid != bytenr ||
2465 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2466 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2467 ret = -ENOENT;
2468 goto release;
2469 }
2470 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2471 path->slots[0]);
2472 iter->item_ptr = iter->cur_ptr;
2473 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
2474 path->nodes[0], path->slots[0]));
2475 }
2476
2477 return 0;
2478 release:
2479 btrfs_backref_iter_release(iter);
2480 return ret;
2481 }
2482
2483 /*
2484 * Go to the next backref item of current bytenr, can be either inlined or
2485 * keyed.
2486 *
2487 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2488 *
2489 * Return 0 if we get next backref without problem.
2490 * Return >0 if there is no extra backref for this bytenr.
2491 * Return <0 if there is something wrong happened.
2492 */
2493 int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2494 {
2495 struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2496 struct btrfs_path *path = iter->path;
2497 struct btrfs_extent_inline_ref *iref;
2498 int ret;
2499 u32 size;
2500
2501 if (btrfs_backref_iter_is_inline_ref(iter)) {
2502 /* We're still inside the inline refs */
2503 ASSERT(iter->cur_ptr < iter->end_ptr);
2504
2505 if (btrfs_backref_has_tree_block_info(iter)) {
2506 /* First tree block info */
2507 size = sizeof(struct btrfs_tree_block_info);
2508 } else {
2509 /* Use inline ref type to determine the size */
2510 int type;
2511
2512 iref = (struct btrfs_extent_inline_ref *)
2513 ((unsigned long)iter->cur_ptr);
2514 type = btrfs_extent_inline_ref_type(eb, iref);
2515
2516 size = btrfs_extent_inline_ref_size(type);
2517 }
2518 iter->cur_ptr += size;
2519 if (iter->cur_ptr < iter->end_ptr)
2520 return 0;
2521
2522 /* All inline items iterated, fall through */
2523 }
2524
2525 /* We're at keyed items, there is no inline item, go to the next one */
2526 ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
2527 if (ret)
2528 return ret;
2529
2530 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2531 if (iter->cur_key.objectid != iter->bytenr ||
2532 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2533 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2534 return 1;
2535 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2536 path->slots[0]);
2537 iter->cur_ptr = iter->item_ptr;
2538 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
2539 path->slots[0]);
2540 return 0;
2541 }
2542
2543 void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2544 struct btrfs_backref_cache *cache, int is_reloc)
2545 {
2546 int i;
2547
2548 cache->rb_root = RB_ROOT;
2549 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2550 INIT_LIST_HEAD(&cache->pending[i]);
2551 INIT_LIST_HEAD(&cache->changed);
2552 INIT_LIST_HEAD(&cache->detached);
2553 INIT_LIST_HEAD(&cache->leaves);
2554 INIT_LIST_HEAD(&cache->pending_edge);
2555 INIT_LIST_HEAD(&cache->useless_node);
2556 cache->fs_info = fs_info;
2557 cache->is_reloc = is_reloc;
2558 }
2559
2560 struct btrfs_backref_node *btrfs_backref_alloc_node(
2561 struct btrfs_backref_cache *cache, u64 bytenr, int level)
2562 {
2563 struct btrfs_backref_node *node;
2564
2565 ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2566 node = kzalloc(sizeof(*node), GFP_NOFS);
2567 if (!node)
2568 return node;
2569
2570 INIT_LIST_HEAD(&node->list);
2571 INIT_LIST_HEAD(&node->upper);
2572 INIT_LIST_HEAD(&node->lower);
2573 RB_CLEAR_NODE(&node->rb_node);
2574 cache->nr_nodes++;
2575 node->level = level;
2576 node->bytenr = bytenr;
2577
2578 return node;
2579 }
2580
2581 struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2582 struct btrfs_backref_cache *cache)
2583 {
2584 struct btrfs_backref_edge *edge;
2585
2586 edge = kzalloc(sizeof(*edge), GFP_NOFS);
2587 if (edge)
2588 cache->nr_edges++;
2589 return edge;
2590 }
2591
2592 /*
2593 * Drop the backref node from cache, also cleaning up all its
2594 * upper edges and any uncached nodes in the path.
2595 *
2596 * This cleanup happens bottom up, thus the node should either
2597 * be the lowest node in the cache or a detached node.
2598 */
2599 void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2600 struct btrfs_backref_node *node)
2601 {
2602 struct btrfs_backref_node *upper;
2603 struct btrfs_backref_edge *edge;
2604
2605 if (!node)
2606 return;
2607
2608 BUG_ON(!node->lowest && !node->detached);
2609 while (!list_empty(&node->upper)) {
2610 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2611 list[LOWER]);
2612 upper = edge->node[UPPER];
2613 list_del(&edge->list[LOWER]);
2614 list_del(&edge->list[UPPER]);
2615 btrfs_backref_free_edge(cache, edge);
2616
2617 /*
2618 * Add the node to leaf node list if no other child block
2619 * cached.
2620 */
2621 if (list_empty(&upper->lower)) {
2622 list_add_tail(&upper->lower, &cache->leaves);
2623 upper->lowest = 1;
2624 }
2625 }
2626
2627 btrfs_backref_drop_node(cache, node);
2628 }
2629
2630 /*
2631 * Release all nodes/edges from current cache
2632 */
2633 void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
2634 {
2635 struct btrfs_backref_node *node;
2636 int i;
2637
2638 while (!list_empty(&cache->detached)) {
2639 node = list_entry(cache->detached.next,
2640 struct btrfs_backref_node, list);
2641 btrfs_backref_cleanup_node(cache, node);
2642 }
2643
2644 while (!list_empty(&cache->leaves)) {
2645 node = list_entry(cache->leaves.next,
2646 struct btrfs_backref_node, lower);
2647 btrfs_backref_cleanup_node(cache, node);
2648 }
2649
2650 cache->last_trans = 0;
2651
2652 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2653 ASSERT(list_empty(&cache->pending[i]));
2654 ASSERT(list_empty(&cache->pending_edge));
2655 ASSERT(list_empty(&cache->useless_node));
2656 ASSERT(list_empty(&cache->changed));
2657 ASSERT(list_empty(&cache->detached));
2658 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2659 ASSERT(!cache->nr_nodes);
2660 ASSERT(!cache->nr_edges);
2661 }
2662
2663 /*
2664 * Handle direct tree backref
2665 *
2666 * Direct tree backref means, the backref item shows its parent bytenr
2667 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
2668 *
2669 * @ref_key: The converted backref key.
2670 * For keyed backref, it's the item key.
2671 * For inlined backref, objectid is the bytenr,
2672 * type is btrfs_inline_ref_type, offset is
2673 * btrfs_inline_ref_offset.
2674 */
2675 static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
2676 struct btrfs_key *ref_key,
2677 struct btrfs_backref_node *cur)
2678 {
2679 struct btrfs_backref_edge *edge;
2680 struct btrfs_backref_node *upper;
2681 struct rb_node *rb_node;
2682
2683 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2684
2685 /* Only reloc root uses backref pointing to itself */
2686 if (ref_key->objectid == ref_key->offset) {
2687 struct btrfs_root *root;
2688
2689 cur->is_reloc_root = 1;
2690 /* Only reloc backref cache cares about a specific root */
2691 if (cache->is_reloc) {
2692 root = find_reloc_root(cache->fs_info, cur->bytenr);
2693 if (!root)
2694 return -ENOENT;
2695 cur->root = root;
2696 } else {
2697 /*
2698 * For generic purpose backref cache, reloc root node
2699 * is useless.
2700 */
2701 list_add(&cur->list, &cache->useless_node);
2702 }
2703 return 0;
2704 }
2705
2706 edge = btrfs_backref_alloc_edge(cache);
2707 if (!edge)
2708 return -ENOMEM;
2709
2710 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2711 if (!rb_node) {
2712 /* Parent node not yet cached */
2713 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2714 cur->level + 1);
2715 if (!upper) {
2716 btrfs_backref_free_edge(cache, edge);
2717 return -ENOMEM;
2718 }
2719
2720 /*
2721 * Backrefs for the upper level block isn't cached, add the
2722 * block to pending list
2723 */
2724 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2725 } else {
2726 /* Parent node already cached */
2727 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2728 ASSERT(upper->checked);
2729 INIT_LIST_HEAD(&edge->list[UPPER]);
2730 }
2731 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2732 return 0;
2733 }
2734
2735 /*
2736 * Handle indirect tree backref
2737 *
2738 * Indirect tree backref means, we only know which tree the node belongs to.
2739 * We still need to do a tree search to find out the parents. This is for
2740 * TREE_BLOCK_REF backref (keyed or inlined).
2741 *
2742 * @ref_key: The same as @ref_key in handle_direct_tree_backref()
2743 * @tree_key: The first key of this tree block.
2744 * @path: A clean (released) path, to avoid allocating path everytime
2745 * the function get called.
2746 */
2747 static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
2748 struct btrfs_path *path,
2749 struct btrfs_key *ref_key,
2750 struct btrfs_key *tree_key,
2751 struct btrfs_backref_node *cur)
2752 {
2753 struct btrfs_fs_info *fs_info = cache->fs_info;
2754 struct btrfs_backref_node *upper;
2755 struct btrfs_backref_node *lower;
2756 struct btrfs_backref_edge *edge;
2757 struct extent_buffer *eb;
2758 struct btrfs_root *root;
2759 struct rb_node *rb_node;
2760 int level;
2761 bool need_check = true;
2762 int ret;
2763
2764 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2765 if (IS_ERR(root))
2766 return PTR_ERR(root);
2767 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2768 cur->cowonly = 1;
2769
2770 if (btrfs_root_level(&root->root_item) == cur->level) {
2771 /* Tree root */
2772 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2773 /*
2774 * For reloc backref cache, we may ignore reloc root. But for
2775 * general purpose backref cache, we can't rely on
2776 * btrfs_should_ignore_reloc_root() as it may conflict with
2777 * current running relocation and lead to missing root.
2778 *
2779 * For general purpose backref cache, reloc root detection is
2780 * completely relying on direct backref (key->offset is parent
2781 * bytenr), thus only do such check for reloc cache.
2782 */
2783 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2784 btrfs_put_root(root);
2785 list_add(&cur->list, &cache->useless_node);
2786 } else {
2787 cur->root = root;
2788 }
2789 return 0;
2790 }
2791
2792 level = cur->level + 1;
2793
2794 /* Search the tree to find parent blocks referring to the block */
2795 path->search_commit_root = 1;
2796 path->skip_locking = 1;
2797 path->lowest_level = level;
2798 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
2799 path->lowest_level = 0;
2800 if (ret < 0) {
2801 btrfs_put_root(root);
2802 return ret;
2803 }
2804 if (ret > 0 && path->slots[level] > 0)
2805 path->slots[level]--;
2806
2807 eb = path->nodes[level];
2808 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2809 btrfs_err(fs_info,
2810 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
2811 cur->bytenr, level - 1, root->root_key.objectid,
2812 tree_key->objectid, tree_key->type, tree_key->offset);
2813 btrfs_put_root(root);
2814 ret = -ENOENT;
2815 goto out;
2816 }
2817 lower = cur;
2818
2819 /* Add all nodes and edges in the path */
2820 for (; level < BTRFS_MAX_LEVEL; level++) {
2821 if (!path->nodes[level]) {
2822 ASSERT(btrfs_root_bytenr(&root->root_item) ==
2823 lower->bytenr);
2824 /* Same as previous should_ignore_reloc_root() call */
2825 if (btrfs_should_ignore_reloc_root(root) &&
2826 cache->is_reloc) {
2827 btrfs_put_root(root);
2828 list_add(&lower->list, &cache->useless_node);
2829 } else {
2830 lower->root = root;
2831 }
2832 break;
2833 }
2834
2835 edge = btrfs_backref_alloc_edge(cache);
2836 if (!edge) {
2837 btrfs_put_root(root);
2838 ret = -ENOMEM;
2839 goto out;
2840 }
2841
2842 eb = path->nodes[level];
2843 rb_node = rb_simple_search(&cache->rb_root, eb->start);
2844 if (!rb_node) {
2845 upper = btrfs_backref_alloc_node(cache, eb->start,
2846 lower->level + 1);
2847 if (!upper) {
2848 btrfs_put_root(root);
2849 btrfs_backref_free_edge(cache, edge);
2850 ret = -ENOMEM;
2851 goto out;
2852 }
2853 upper->owner = btrfs_header_owner(eb);
2854 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2855 upper->cowonly = 1;
2856
2857 /*
2858 * If we know the block isn't shared we can avoid
2859 * checking its backrefs.
2860 */
2861 if (btrfs_block_can_be_shared(root, eb))
2862 upper->checked = 0;
2863 else
2864 upper->checked = 1;
2865
2866 /*
2867 * Add the block to pending list if we need to check its
2868 * backrefs, we only do this once while walking up a
2869 * tree as we will catch anything else later on.
2870 */
2871 if (!upper->checked && need_check) {
2872 need_check = false;
2873 list_add_tail(&edge->list[UPPER],
2874 &cache->pending_edge);
2875 } else {
2876 if (upper->checked)
2877 need_check = true;
2878 INIT_LIST_HEAD(&edge->list[UPPER]);
2879 }
2880 } else {
2881 upper = rb_entry(rb_node, struct btrfs_backref_node,
2882 rb_node);
2883 ASSERT(upper->checked);
2884 INIT_LIST_HEAD(&edge->list[UPPER]);
2885 if (!upper->owner)
2886 upper->owner = btrfs_header_owner(eb);
2887 }
2888 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2889
2890 if (rb_node) {
2891 btrfs_put_root(root);
2892 break;
2893 }
2894 lower = upper;
2895 upper = NULL;
2896 }
2897 out:
2898 btrfs_release_path(path);
2899 return ret;
2900 }
2901
2902 /*
2903 * Add backref node @cur into @cache.
2904 *
2905 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
2906 * links aren't yet bi-directional. Needs to finish such links.
2907 * Use btrfs_backref_finish_upper_links() to finish such linkage.
2908 *
2909 * @path: Released path for indirect tree backref lookup
2910 * @iter: Released backref iter for extent tree search
2911 * @node_key: The first key of the tree block
2912 */
2913 int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
2914 struct btrfs_path *path,
2915 struct btrfs_backref_iter *iter,
2916 struct btrfs_key *node_key,
2917 struct btrfs_backref_node *cur)
2918 {
2919 struct btrfs_fs_info *fs_info = cache->fs_info;
2920 struct btrfs_backref_edge *edge;
2921 struct btrfs_backref_node *exist;
2922 int ret;
2923
2924 ret = btrfs_backref_iter_start(iter, cur->bytenr);
2925 if (ret < 0)
2926 return ret;
2927 /*
2928 * We skip the first btrfs_tree_block_info, as we don't use the key
2929 * stored in it, but fetch it from the tree block
2930 */
2931 if (btrfs_backref_has_tree_block_info(iter)) {
2932 ret = btrfs_backref_iter_next(iter);
2933 if (ret < 0)
2934 goto out;
2935 /* No extra backref? This means the tree block is corrupted */
2936 if (ret > 0) {
2937 ret = -EUCLEAN;
2938 goto out;
2939 }
2940 }
2941 WARN_ON(cur->checked);
2942 if (!list_empty(&cur->upper)) {
2943 /*
2944 * The backref was added previously when processing backref of
2945 * type BTRFS_TREE_BLOCK_REF_KEY
2946 */
2947 ASSERT(list_is_singular(&cur->upper));
2948 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2949 list[LOWER]);
2950 ASSERT(list_empty(&edge->list[UPPER]));
2951 exist = edge->node[UPPER];
2952 /*
2953 * Add the upper level block to pending list if we need check
2954 * its backrefs
2955 */
2956 if (!exist->checked)
2957 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2958 } else {
2959 exist = NULL;
2960 }
2961
2962 for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
2963 struct extent_buffer *eb;
2964 struct btrfs_key key;
2965 int type;
2966
2967 cond_resched();
2968 eb = btrfs_backref_get_eb(iter);
2969
2970 key.objectid = iter->bytenr;
2971 if (btrfs_backref_iter_is_inline_ref(iter)) {
2972 struct btrfs_extent_inline_ref *iref;
2973
2974 /* Update key for inline backref */
2975 iref = (struct btrfs_extent_inline_ref *)
2976 ((unsigned long)iter->cur_ptr);
2977 type = btrfs_get_extent_inline_ref_type(eb, iref,
2978 BTRFS_REF_TYPE_BLOCK);
2979 if (type == BTRFS_REF_TYPE_INVALID) {
2980 ret = -EUCLEAN;
2981 goto out;
2982 }
2983 key.type = type;
2984 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
2985 } else {
2986 key.type = iter->cur_key.type;
2987 key.offset = iter->cur_key.offset;
2988 }
2989
2990 /*
2991 * Parent node found and matches current inline ref, no need to
2992 * rebuild this node for this inline ref
2993 */
2994 if (exist &&
2995 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
2996 exist->owner == key.offset) ||
2997 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
2998 exist->bytenr == key.offset))) {
2999 exist = NULL;
3000 continue;
3001 }
3002
3003 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
3004 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
3005 ret = handle_direct_tree_backref(cache, &key, cur);
3006 if (ret < 0)
3007 goto out;
3008 continue;
3009 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
3010 ret = -EINVAL;
3011 btrfs_print_v0_err(fs_info);
3012 btrfs_handle_fs_error(fs_info, ret, NULL);
3013 goto out;
3014 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
3015 continue;
3016 }
3017
3018 /*
3019 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
3020 * means the root objectid. We need to search the tree to get
3021 * its parent bytenr.
3022 */
3023 ret = handle_indirect_tree_backref(cache, path, &key, node_key,
3024 cur);
3025 if (ret < 0)
3026 goto out;
3027 }
3028 ret = 0;
3029 cur->checked = 1;
3030 WARN_ON(exist);
3031 out:
3032 btrfs_backref_iter_release(iter);
3033 return ret;
3034 }
3035
3036 /*
3037 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
3038 */
3039 int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3040 struct btrfs_backref_node *start)
3041 {
3042 struct list_head *useless_node = &cache->useless_node;
3043 struct btrfs_backref_edge *edge;
3044 struct rb_node *rb_node;
3045 LIST_HEAD(pending_edge);
3046
3047 ASSERT(start->checked);
3048
3049 /* Insert this node to cache if it's not COW-only */
3050 if (!start->cowonly) {
3051 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3052 &start->rb_node);
3053 if (rb_node)
3054 btrfs_backref_panic(cache->fs_info, start->bytenr,
3055 -EEXIST);
3056 list_add_tail(&start->lower, &cache->leaves);
3057 }
3058
3059 /*
3060 * Use breadth first search to iterate all related edges.
3061 *
3062 * The starting points are all the edges of this node
3063 */
3064 list_for_each_entry(edge, &start->upper, list[LOWER])
3065 list_add_tail(&edge->list[UPPER], &pending_edge);
3066
3067 while (!list_empty(&pending_edge)) {
3068 struct btrfs_backref_node *upper;
3069 struct btrfs_backref_node *lower;
3070
3071 edge = list_first_entry(&pending_edge,
3072 struct btrfs_backref_edge, list[UPPER]);
3073 list_del_init(&edge->list[UPPER]);
3074 upper = edge->node[UPPER];
3075 lower = edge->node[LOWER];
3076
3077 /* Parent is detached, no need to keep any edges */
3078 if (upper->detached) {
3079 list_del(&edge->list[LOWER]);
3080 btrfs_backref_free_edge(cache, edge);
3081
3082 /* Lower node is orphan, queue for cleanup */
3083 if (list_empty(&lower->upper))
3084 list_add(&lower->list, useless_node);
3085 continue;
3086 }
3087
3088 /*
3089 * All new nodes added in current build_backref_tree() haven't
3090 * been linked to the cache rb tree.
3091 * So if we have upper->rb_node populated, this means a cache
3092 * hit. We only need to link the edge, as @upper and all its
3093 * parents have already been linked.
3094 */
3095 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3096 if (upper->lowest) {
3097 list_del_init(&upper->lower);
3098 upper->lowest = 0;
3099 }
3100
3101 list_add_tail(&edge->list[UPPER], &upper->lower);
3102 continue;
3103 }
3104
3105 /* Sanity check, we shouldn't have any unchecked nodes */
3106 if (!upper->checked) {
3107 ASSERT(0);
3108 return -EUCLEAN;
3109 }
3110
3111 /* Sanity check, COW-only node has non-COW-only parent */
3112 if (start->cowonly != upper->cowonly) {
3113 ASSERT(0);
3114 return -EUCLEAN;
3115 }
3116
3117 /* Only cache non-COW-only (subvolume trees) tree blocks */
3118 if (!upper->cowonly) {
3119 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3120 &upper->rb_node);
3121 if (rb_node) {
3122 btrfs_backref_panic(cache->fs_info,
3123 upper->bytenr, -EEXIST);
3124 return -EUCLEAN;
3125 }
3126 }
3127
3128 list_add_tail(&edge->list[UPPER], &upper->lower);
3129
3130 /*
3131 * Also queue all the parent edges of this uncached node
3132 * to finish the upper linkage
3133 */
3134 list_for_each_entry(edge, &upper->upper, list[LOWER])
3135 list_add_tail(&edge->list[UPPER], &pending_edge);
3136 }
3137 return 0;
3138 }
3139
3140 void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3141 struct btrfs_backref_node *node)
3142 {
3143 struct btrfs_backref_node *lower;
3144 struct btrfs_backref_node *upper;
3145 struct btrfs_backref_edge *edge;
3146
3147 while (!list_empty(&cache->useless_node)) {
3148 lower = list_first_entry(&cache->useless_node,
3149 struct btrfs_backref_node, list);
3150 list_del_init(&lower->list);
3151 }
3152 while (!list_empty(&cache->pending_edge)) {
3153 edge = list_first_entry(&cache->pending_edge,
3154 struct btrfs_backref_edge, list[UPPER]);
3155 list_del(&edge->list[UPPER]);
3156 list_del(&edge->list[LOWER]);
3157 lower = edge->node[LOWER];
3158 upper = edge->node[UPPER];
3159 btrfs_backref_free_edge(cache, edge);
3160
3161 /*
3162 * Lower is no longer linked to any upper backref nodes and
3163 * isn't in the cache, we can free it ourselves.
3164 */
3165 if (list_empty(&lower->upper) &&
3166 RB_EMPTY_NODE(&lower->rb_node))
3167 list_add(&lower->list, &cache->useless_node);
3168
3169 if (!RB_EMPTY_NODE(&upper->rb_node))
3170 continue;
3171
3172 /* Add this guy's upper edges to the list to process */
3173 list_for_each_entry(edge, &upper->upper, list[LOWER])
3174 list_add_tail(&edge->list[UPPER],
3175 &cache->pending_edge);
3176 if (list_empty(&upper->upper))
3177 list_add(&upper->list, &cache->useless_node);
3178 }
3179
3180 while (!list_empty(&cache->useless_node)) {
3181 lower = list_first_entry(&cache->useless_node,
3182 struct btrfs_backref_node, list);
3183 list_del_init(&lower->list);
3184 if (lower == node)
3185 node = NULL;
3186 btrfs_backref_drop_node(cache, lower);
3187 }
3188
3189 btrfs_backref_cleanup_node(cache, node);
3190 ASSERT(list_empty(&cache->useless_node) &&
3191 list_empty(&cache->pending_edge));
3192 }
3193