1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2011 STRATO. All rights reserved.
4 */
5
6 #include <linux/mm.h>
7 #include <linux/rbtree.h>
8 #include <trace/events/btrfs.h>
9 #include "ctree.h"
10 #include "disk-io.h"
11 #include "backref.h"
12 #include "ulist.h"
13 #include "transaction.h"
14 #include "delayed-ref.h"
15 #include "locking.h"
16 #include "misc.h"
17
18 /* Just an arbitrary number so we can be sure this happened */
19 #define BACKREF_FOUND_SHARED 6
20
21 struct extent_inode_elem {
22 u64 inum;
23 u64 offset;
24 struct extent_inode_elem *next;
25 };
26
check_extent_in_eb(const struct btrfs_key * key,const struct extent_buffer * eb,const struct btrfs_file_extent_item * fi,u64 extent_item_pos,struct extent_inode_elem ** eie,bool ignore_offset)27 static int check_extent_in_eb(const struct btrfs_key *key,
28 const struct extent_buffer *eb,
29 const struct btrfs_file_extent_item *fi,
30 u64 extent_item_pos,
31 struct extent_inode_elem **eie,
32 bool ignore_offset)
33 {
34 u64 offset = 0;
35 struct extent_inode_elem *e;
36
37 if (!ignore_offset &&
38 !btrfs_file_extent_compression(eb, fi) &&
39 !btrfs_file_extent_encryption(eb, fi) &&
40 !btrfs_file_extent_other_encoding(eb, fi)) {
41 u64 data_offset;
42 u64 data_len;
43
44 data_offset = btrfs_file_extent_offset(eb, fi);
45 data_len = btrfs_file_extent_num_bytes(eb, fi);
46
47 if (extent_item_pos < data_offset ||
48 extent_item_pos >= data_offset + data_len)
49 return 1;
50 offset = extent_item_pos - data_offset;
51 }
52
53 e = kmalloc(sizeof(*e), GFP_NOFS);
54 if (!e)
55 return -ENOMEM;
56
57 e->next = *eie;
58 e->inum = key->objectid;
59 e->offset = key->offset + offset;
60 *eie = e;
61
62 return 0;
63 }
64
free_inode_elem_list(struct extent_inode_elem * eie)65 static void free_inode_elem_list(struct extent_inode_elem *eie)
66 {
67 struct extent_inode_elem *eie_next;
68
69 for (; eie; eie = eie_next) {
70 eie_next = eie->next;
71 kfree(eie);
72 }
73 }
74
find_extent_in_eb(const struct extent_buffer * eb,u64 wanted_disk_byte,u64 extent_item_pos,struct extent_inode_elem ** eie,bool ignore_offset)75 static int find_extent_in_eb(const struct extent_buffer *eb,
76 u64 wanted_disk_byte, u64 extent_item_pos,
77 struct extent_inode_elem **eie,
78 bool ignore_offset)
79 {
80 u64 disk_byte;
81 struct btrfs_key key;
82 struct btrfs_file_extent_item *fi;
83 int slot;
84 int nritems;
85 int extent_type;
86 int ret;
87
88 /*
89 * from the shared data ref, we only have the leaf but we need
90 * the key. thus, we must look into all items and see that we
91 * find one (some) with a reference to our extent item.
92 */
93 nritems = btrfs_header_nritems(eb);
94 for (slot = 0; slot < nritems; ++slot) {
95 btrfs_item_key_to_cpu(eb, &key, slot);
96 if (key.type != BTRFS_EXTENT_DATA_KEY)
97 continue;
98 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
99 extent_type = btrfs_file_extent_type(eb, fi);
100 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
101 continue;
102 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
103 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
104 if (disk_byte != wanted_disk_byte)
105 continue;
106
107 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
108 if (ret < 0)
109 return ret;
110 }
111
112 return 0;
113 }
114
115 struct preftree {
116 struct rb_root_cached root;
117 unsigned int count;
118 };
119
120 #define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
121
122 struct preftrees {
123 struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
124 struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
125 struct preftree indirect_missing_keys;
126 };
127
128 /*
129 * Checks for a shared extent during backref search.
130 *
131 * The share_count tracks prelim_refs (direct and indirect) having a
132 * ref->count >0:
133 * - incremented when a ref->count transitions to >0
134 * - decremented when a ref->count transitions to <1
135 */
136 struct share_check {
137 u64 root_objectid;
138 u64 inum;
139 int share_count;
140 };
141
extent_is_shared(struct share_check * sc)142 static inline int extent_is_shared(struct share_check *sc)
143 {
144 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
145 }
146
147 static struct kmem_cache *btrfs_prelim_ref_cache;
148
btrfs_prelim_ref_init(void)149 int __init btrfs_prelim_ref_init(void)
150 {
151 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
152 sizeof(struct prelim_ref),
153 0,
154 SLAB_MEM_SPREAD,
155 NULL);
156 if (!btrfs_prelim_ref_cache)
157 return -ENOMEM;
158 return 0;
159 }
160
btrfs_prelim_ref_exit(void)161 void __cold btrfs_prelim_ref_exit(void)
162 {
163 kmem_cache_destroy(btrfs_prelim_ref_cache);
164 }
165
free_pref(struct prelim_ref * ref)166 static void free_pref(struct prelim_ref *ref)
167 {
168 kmem_cache_free(btrfs_prelim_ref_cache, ref);
169 }
170
171 /*
172 * Return 0 when both refs are for the same block (and can be merged).
173 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
174 * indicates a 'higher' block.
175 */
prelim_ref_compare(struct prelim_ref * ref1,struct prelim_ref * ref2)176 static int prelim_ref_compare(struct prelim_ref *ref1,
177 struct prelim_ref *ref2)
178 {
179 if (ref1->level < ref2->level)
180 return -1;
181 if (ref1->level > ref2->level)
182 return 1;
183 if (ref1->root_id < ref2->root_id)
184 return -1;
185 if (ref1->root_id > ref2->root_id)
186 return 1;
187 if (ref1->key_for_search.type < ref2->key_for_search.type)
188 return -1;
189 if (ref1->key_for_search.type > ref2->key_for_search.type)
190 return 1;
191 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
192 return -1;
193 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
194 return 1;
195 if (ref1->key_for_search.offset < ref2->key_for_search.offset)
196 return -1;
197 if (ref1->key_for_search.offset > ref2->key_for_search.offset)
198 return 1;
199 if (ref1->parent < ref2->parent)
200 return -1;
201 if (ref1->parent > ref2->parent)
202 return 1;
203
204 return 0;
205 }
206
update_share_count(struct share_check * sc,int oldcount,int newcount)207 static void update_share_count(struct share_check *sc, int oldcount,
208 int newcount)
209 {
210 if ((!sc) || (oldcount == 0 && newcount < 1))
211 return;
212
213 if (oldcount > 0 && newcount < 1)
214 sc->share_count--;
215 else if (oldcount < 1 && newcount > 0)
216 sc->share_count++;
217 }
218
219 /*
220 * Add @newref to the @root rbtree, merging identical refs.
221 *
222 * Callers should assume that newref has been freed after calling.
223 */
prelim_ref_insert(const struct btrfs_fs_info * fs_info,struct preftree * preftree,struct prelim_ref * newref,struct share_check * sc)224 static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
225 struct preftree *preftree,
226 struct prelim_ref *newref,
227 struct share_check *sc)
228 {
229 struct rb_root_cached *root;
230 struct rb_node **p;
231 struct rb_node *parent = NULL;
232 struct prelim_ref *ref;
233 int result;
234 bool leftmost = true;
235
236 root = &preftree->root;
237 p = &root->rb_root.rb_node;
238
239 while (*p) {
240 parent = *p;
241 ref = rb_entry(parent, struct prelim_ref, rbnode);
242 result = prelim_ref_compare(ref, newref);
243 if (result < 0) {
244 p = &(*p)->rb_left;
245 } else if (result > 0) {
246 p = &(*p)->rb_right;
247 leftmost = false;
248 } else {
249 /* Identical refs, merge them and free @newref */
250 struct extent_inode_elem *eie = ref->inode_list;
251
252 while (eie && eie->next)
253 eie = eie->next;
254
255 if (!eie)
256 ref->inode_list = newref->inode_list;
257 else
258 eie->next = newref->inode_list;
259 trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
260 preftree->count);
261 /*
262 * A delayed ref can have newref->count < 0.
263 * The ref->count is updated to follow any
264 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
265 */
266 update_share_count(sc, ref->count,
267 ref->count + newref->count);
268 ref->count += newref->count;
269 free_pref(newref);
270 return;
271 }
272 }
273
274 update_share_count(sc, 0, newref->count);
275 preftree->count++;
276 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
277 rb_link_node(&newref->rbnode, parent, p);
278 rb_insert_color_cached(&newref->rbnode, root, leftmost);
279 }
280
281 /*
282 * Release the entire tree. We don't care about internal consistency so
283 * just free everything and then reset the tree root.
284 */
prelim_release(struct preftree * preftree)285 static void prelim_release(struct preftree *preftree)
286 {
287 struct prelim_ref *ref, *next_ref;
288
289 rbtree_postorder_for_each_entry_safe(ref, next_ref,
290 &preftree->root.rb_root, rbnode)
291 free_pref(ref);
292
293 preftree->root = RB_ROOT_CACHED;
294 preftree->count = 0;
295 }
296
297 /*
298 * the rules for all callers of this function are:
299 * - obtaining the parent is the goal
300 * - if you add a key, you must know that it is a correct key
301 * - if you cannot add the parent or a correct key, then we will look into the
302 * block later to set a correct key
303 *
304 * delayed refs
305 * ============
306 * backref type | shared | indirect | shared | indirect
307 * information | tree | tree | data | data
308 * --------------------+--------+----------+--------+----------
309 * parent logical | y | - | - | -
310 * key to resolve | - | y | y | y
311 * tree block logical | - | - | - | -
312 * root for resolving | y | y | y | y
313 *
314 * - column 1: we've the parent -> done
315 * - column 2, 3, 4: we use the key to find the parent
316 *
317 * on disk refs (inline or keyed)
318 * ==============================
319 * backref type | shared | indirect | shared | indirect
320 * information | tree | tree | data | data
321 * --------------------+--------+----------+--------+----------
322 * parent logical | y | - | y | -
323 * key to resolve | - | - | - | y
324 * tree block logical | y | y | y | y
325 * root for resolving | - | y | y | y
326 *
327 * - column 1, 3: we've the parent -> done
328 * - column 2: we take the first key from the block to find the parent
329 * (see add_missing_keys)
330 * - column 4: we use the key to find the parent
331 *
332 * additional information that's available but not required to find the parent
333 * block might help in merging entries to gain some speed.
334 */
add_prelim_ref(const struct btrfs_fs_info * fs_info,struct preftree * preftree,u64 root_id,const struct btrfs_key * key,int level,u64 parent,u64 wanted_disk_byte,int count,struct share_check * sc,gfp_t gfp_mask)335 static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
336 struct preftree *preftree, u64 root_id,
337 const struct btrfs_key *key, int level, u64 parent,
338 u64 wanted_disk_byte, int count,
339 struct share_check *sc, gfp_t gfp_mask)
340 {
341 struct prelim_ref *ref;
342
343 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
344 return 0;
345
346 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
347 if (!ref)
348 return -ENOMEM;
349
350 ref->root_id = root_id;
351 if (key)
352 ref->key_for_search = *key;
353 else
354 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
355
356 ref->inode_list = NULL;
357 ref->level = level;
358 ref->count = count;
359 ref->parent = parent;
360 ref->wanted_disk_byte = wanted_disk_byte;
361 prelim_ref_insert(fs_info, preftree, ref, sc);
362 return extent_is_shared(sc);
363 }
364
365 /* direct refs use root == 0, key == NULL */
add_direct_ref(const struct btrfs_fs_info * fs_info,struct preftrees * preftrees,int level,u64 parent,u64 wanted_disk_byte,int count,struct share_check * sc,gfp_t gfp_mask)366 static int add_direct_ref(const struct btrfs_fs_info *fs_info,
367 struct preftrees *preftrees, int level, u64 parent,
368 u64 wanted_disk_byte, int count,
369 struct share_check *sc, gfp_t gfp_mask)
370 {
371 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
372 parent, wanted_disk_byte, count, sc, gfp_mask);
373 }
374
375 /* indirect refs use parent == 0 */
add_indirect_ref(const struct btrfs_fs_info * fs_info,struct preftrees * preftrees,u64 root_id,const struct btrfs_key * key,int level,u64 wanted_disk_byte,int count,struct share_check * sc,gfp_t gfp_mask)376 static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
377 struct preftrees *preftrees, u64 root_id,
378 const struct btrfs_key *key, int level,
379 u64 wanted_disk_byte, int count,
380 struct share_check *sc, gfp_t gfp_mask)
381 {
382 struct preftree *tree = &preftrees->indirect;
383
384 if (!key)
385 tree = &preftrees->indirect_missing_keys;
386 return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
387 wanted_disk_byte, count, sc, gfp_mask);
388 }
389
is_shared_data_backref(struct preftrees * preftrees,u64 bytenr)390 static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
391 {
392 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
393 struct rb_node *parent = NULL;
394 struct prelim_ref *ref = NULL;
395 struct prelim_ref target = {};
396 int result;
397
398 target.parent = bytenr;
399
400 while (*p) {
401 parent = *p;
402 ref = rb_entry(parent, struct prelim_ref, rbnode);
403 result = prelim_ref_compare(ref, &target);
404
405 if (result < 0)
406 p = &(*p)->rb_left;
407 else if (result > 0)
408 p = &(*p)->rb_right;
409 else
410 return 1;
411 }
412 return 0;
413 }
414
add_all_parents(struct btrfs_root * root,struct btrfs_path * path,struct ulist * parents,struct preftrees * preftrees,struct prelim_ref * ref,int level,u64 time_seq,const u64 * extent_item_pos,bool ignore_offset)415 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
416 struct ulist *parents,
417 struct preftrees *preftrees, struct prelim_ref *ref,
418 int level, u64 time_seq, const u64 *extent_item_pos,
419 bool ignore_offset)
420 {
421 int ret = 0;
422 int slot;
423 struct extent_buffer *eb;
424 struct btrfs_key key;
425 struct btrfs_key *key_for_search = &ref->key_for_search;
426 struct btrfs_file_extent_item *fi;
427 struct extent_inode_elem *eie = NULL, *old = NULL;
428 u64 disk_byte;
429 u64 wanted_disk_byte = ref->wanted_disk_byte;
430 u64 count = 0;
431 u64 data_offset;
432
433 if (level != 0) {
434 eb = path->nodes[level];
435 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
436 if (ret < 0)
437 return ret;
438 return 0;
439 }
440
441 /*
442 * 1. We normally enter this function with the path already pointing to
443 * the first item to check. But sometimes, we may enter it with
444 * slot == nritems.
445 * 2. We are searching for normal backref but bytenr of this leaf
446 * matches shared data backref
447 * 3. The leaf owner is not equal to the root we are searching
448 *
449 * For these cases, go to the next leaf before we continue.
450 */
451 eb = path->nodes[0];
452 if (path->slots[0] >= btrfs_header_nritems(eb) ||
453 is_shared_data_backref(preftrees, eb->start) ||
454 ref->root_id != btrfs_header_owner(eb)) {
455 if (time_seq == SEQ_LAST)
456 ret = btrfs_next_leaf(root, path);
457 else
458 ret = btrfs_next_old_leaf(root, path, time_seq);
459 }
460
461 while (!ret && count < ref->count) {
462 eb = path->nodes[0];
463 slot = path->slots[0];
464
465 btrfs_item_key_to_cpu(eb, &key, slot);
466
467 if (key.objectid != key_for_search->objectid ||
468 key.type != BTRFS_EXTENT_DATA_KEY)
469 break;
470
471 /*
472 * We are searching for normal backref but bytenr of this leaf
473 * matches shared data backref, OR
474 * the leaf owner is not equal to the root we are searching for
475 */
476 if (slot == 0 &&
477 (is_shared_data_backref(preftrees, eb->start) ||
478 ref->root_id != btrfs_header_owner(eb))) {
479 if (time_seq == SEQ_LAST)
480 ret = btrfs_next_leaf(root, path);
481 else
482 ret = btrfs_next_old_leaf(root, path, time_seq);
483 continue;
484 }
485 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
486 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
487 data_offset = btrfs_file_extent_offset(eb, fi);
488
489 if (disk_byte == wanted_disk_byte) {
490 eie = NULL;
491 old = NULL;
492 if (ref->key_for_search.offset == key.offset - data_offset)
493 count++;
494 else
495 goto next;
496 if (extent_item_pos) {
497 ret = check_extent_in_eb(&key, eb, fi,
498 *extent_item_pos,
499 &eie, ignore_offset);
500 if (ret < 0)
501 break;
502 }
503 if (ret > 0)
504 goto next;
505 ret = ulist_add_merge_ptr(parents, eb->start,
506 eie, (void **)&old, GFP_NOFS);
507 if (ret < 0)
508 break;
509 if (!ret && extent_item_pos) {
510 while (old->next)
511 old = old->next;
512 old->next = eie;
513 }
514 eie = NULL;
515 }
516 next:
517 if (time_seq == SEQ_LAST)
518 ret = btrfs_next_item(root, path);
519 else
520 ret = btrfs_next_old_item(root, path, time_seq);
521 }
522
523 if (ret > 0)
524 ret = 0;
525 else if (ret < 0)
526 free_inode_elem_list(eie);
527 return ret;
528 }
529
530 /*
531 * resolve an indirect backref in the form (root_id, key, level)
532 * to a logical address
533 */
resolve_indirect_ref(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 time_seq,struct preftrees * preftrees,struct prelim_ref * ref,struct ulist * parents,const u64 * extent_item_pos,bool ignore_offset)534 static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
535 struct btrfs_path *path, u64 time_seq,
536 struct preftrees *preftrees,
537 struct prelim_ref *ref, struct ulist *parents,
538 const u64 *extent_item_pos, bool ignore_offset)
539 {
540 struct btrfs_root *root;
541 struct extent_buffer *eb;
542 int ret = 0;
543 int root_level;
544 int level = ref->level;
545 struct btrfs_key search_key = ref->key_for_search;
546
547 /*
548 * If we're search_commit_root we could possibly be holding locks on
549 * other tree nodes. This happens when qgroups does backref walks when
550 * adding new delayed refs. To deal with this we need to look in cache
551 * for the root, and if we don't find it then we need to search the
552 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
553 * here.
554 */
555 if (path->search_commit_root)
556 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
557 else
558 root = btrfs_get_fs_root(fs_info, ref->root_id, false);
559 if (IS_ERR(root)) {
560 ret = PTR_ERR(root);
561 goto out_free;
562 }
563
564 if (!path->search_commit_root &&
565 test_bit(BTRFS_ROOT_DELETING, &root->state)) {
566 ret = -ENOENT;
567 goto out;
568 }
569
570 if (btrfs_is_testing(fs_info)) {
571 ret = -ENOENT;
572 goto out;
573 }
574
575 if (path->search_commit_root)
576 root_level = btrfs_header_level(root->commit_root);
577 else if (time_seq == SEQ_LAST)
578 root_level = btrfs_header_level(root->node);
579 else
580 root_level = btrfs_old_root_level(root, time_seq);
581
582 if (root_level + 1 == level)
583 goto out;
584
585 /*
586 * We can often find data backrefs with an offset that is too large
587 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
588 * subtracting a file's offset with the data offset of its
589 * corresponding extent data item. This can happen for example in the
590 * clone ioctl.
591 *
592 * So if we detect such case we set the search key's offset to zero to
593 * make sure we will find the matching file extent item at
594 * add_all_parents(), otherwise we will miss it because the offset
595 * taken form the backref is much larger then the offset of the file
596 * extent item. This can make us scan a very large number of file
597 * extent items, but at least it will not make us miss any.
598 *
599 * This is an ugly workaround for a behaviour that should have never
600 * existed, but it does and a fix for the clone ioctl would touch a lot
601 * of places, cause backwards incompatibility and would not fix the
602 * problem for extents cloned with older kernels.
603 */
604 if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
605 search_key.offset >= LLONG_MAX)
606 search_key.offset = 0;
607 path->lowest_level = level;
608 if (time_seq == SEQ_LAST)
609 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
610 else
611 ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
612
613 btrfs_debug(fs_info,
614 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
615 ref->root_id, level, ref->count, ret,
616 ref->key_for_search.objectid, ref->key_for_search.type,
617 ref->key_for_search.offset);
618 if (ret < 0)
619 goto out;
620
621 eb = path->nodes[level];
622 while (!eb) {
623 if (WARN_ON(!level)) {
624 ret = 1;
625 goto out;
626 }
627 level--;
628 eb = path->nodes[level];
629 }
630
631 ret = add_all_parents(root, path, parents, preftrees, ref, level,
632 time_seq, extent_item_pos, ignore_offset);
633 out:
634 btrfs_put_root(root);
635 out_free:
636 path->lowest_level = 0;
637 btrfs_release_path(path);
638 return ret;
639 }
640
641 static struct extent_inode_elem *
unode_aux_to_inode_list(struct ulist_node * node)642 unode_aux_to_inode_list(struct ulist_node *node)
643 {
644 if (!node)
645 return NULL;
646 return (struct extent_inode_elem *)(uintptr_t)node->aux;
647 }
648
649 /*
650 * We maintain three separate rbtrees: one for direct refs, one for
651 * indirect refs which have a key, and one for indirect refs which do not
652 * have a key. Each tree does merge on insertion.
653 *
654 * Once all of the references are located, we iterate over the tree of
655 * indirect refs with missing keys. An appropriate key is located and
656 * the ref is moved onto the tree for indirect refs. After all missing
657 * keys are thus located, we iterate over the indirect ref tree, resolve
658 * each reference, and then insert the resolved reference onto the
659 * direct tree (merging there too).
660 *
661 * New backrefs (i.e., for parent nodes) are added to the appropriate
662 * rbtree as they are encountered. The new backrefs are subsequently
663 * resolved as above.
664 */
resolve_indirect_refs(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 time_seq,struct preftrees * preftrees,const u64 * extent_item_pos,struct share_check * sc,bool ignore_offset)665 static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
666 struct btrfs_path *path, u64 time_seq,
667 struct preftrees *preftrees,
668 const u64 *extent_item_pos,
669 struct share_check *sc, bool ignore_offset)
670 {
671 int err;
672 int ret = 0;
673 struct ulist *parents;
674 struct ulist_node *node;
675 struct ulist_iterator uiter;
676 struct rb_node *rnode;
677
678 parents = ulist_alloc(GFP_NOFS);
679 if (!parents)
680 return -ENOMEM;
681
682 /*
683 * We could trade memory usage for performance here by iterating
684 * the tree, allocating new refs for each insertion, and then
685 * freeing the entire indirect tree when we're done. In some test
686 * cases, the tree can grow quite large (~200k objects).
687 */
688 while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
689 struct prelim_ref *ref;
690
691 ref = rb_entry(rnode, struct prelim_ref, rbnode);
692 if (WARN(ref->parent,
693 "BUG: direct ref found in indirect tree")) {
694 ret = -EINVAL;
695 goto out;
696 }
697
698 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
699 preftrees->indirect.count--;
700
701 if (ref->count == 0) {
702 free_pref(ref);
703 continue;
704 }
705
706 if (sc && sc->root_objectid &&
707 ref->root_id != sc->root_objectid) {
708 free_pref(ref);
709 ret = BACKREF_FOUND_SHARED;
710 goto out;
711 }
712 err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
713 ref, parents, extent_item_pos,
714 ignore_offset);
715 /*
716 * we can only tolerate ENOENT,otherwise,we should catch error
717 * and return directly.
718 */
719 if (err == -ENOENT) {
720 prelim_ref_insert(fs_info, &preftrees->direct, ref,
721 NULL);
722 continue;
723 } else if (err) {
724 free_pref(ref);
725 ret = err;
726 goto out;
727 }
728
729 /* we put the first parent into the ref at hand */
730 ULIST_ITER_INIT(&uiter);
731 node = ulist_next(parents, &uiter);
732 ref->parent = node ? node->val : 0;
733 ref->inode_list = unode_aux_to_inode_list(node);
734
735 /* Add a prelim_ref(s) for any other parent(s). */
736 while ((node = ulist_next(parents, &uiter))) {
737 struct prelim_ref *new_ref;
738
739 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
740 GFP_NOFS);
741 if (!new_ref) {
742 free_pref(ref);
743 ret = -ENOMEM;
744 goto out;
745 }
746 memcpy(new_ref, ref, sizeof(*ref));
747 new_ref->parent = node->val;
748 new_ref->inode_list = unode_aux_to_inode_list(node);
749 prelim_ref_insert(fs_info, &preftrees->direct,
750 new_ref, NULL);
751 }
752
753 /*
754 * Now it's a direct ref, put it in the direct tree. We must
755 * do this last because the ref could be merged/freed here.
756 */
757 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
758
759 ulist_reinit(parents);
760 cond_resched();
761 }
762 out:
763 ulist_free(parents);
764 return ret;
765 }
766
767 /*
768 * read tree blocks and add keys where required.
769 */
add_missing_keys(struct btrfs_fs_info * fs_info,struct preftrees * preftrees,bool lock)770 static int add_missing_keys(struct btrfs_fs_info *fs_info,
771 struct preftrees *preftrees, bool lock)
772 {
773 struct prelim_ref *ref;
774 struct extent_buffer *eb;
775 struct preftree *tree = &preftrees->indirect_missing_keys;
776 struct rb_node *node;
777
778 while ((node = rb_first_cached(&tree->root))) {
779 ref = rb_entry(node, struct prelim_ref, rbnode);
780 rb_erase_cached(node, &tree->root);
781
782 BUG_ON(ref->parent); /* should not be a direct ref */
783 BUG_ON(ref->key_for_search.type);
784 BUG_ON(!ref->wanted_disk_byte);
785
786 eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0,
787 ref->level - 1, NULL);
788 if (IS_ERR(eb)) {
789 free_pref(ref);
790 return PTR_ERR(eb);
791 } else if (!extent_buffer_uptodate(eb)) {
792 free_pref(ref);
793 free_extent_buffer(eb);
794 return -EIO;
795 }
796 if (lock)
797 btrfs_tree_read_lock(eb);
798 if (btrfs_header_level(eb) == 0)
799 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
800 else
801 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
802 if (lock)
803 btrfs_tree_read_unlock(eb);
804 free_extent_buffer(eb);
805 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
806 cond_resched();
807 }
808 return 0;
809 }
810
811 /*
812 * add all currently queued delayed refs from this head whose seq nr is
813 * smaller or equal that seq to the list
814 */
add_delayed_refs(const struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_head * head,u64 seq,struct preftrees * preftrees,struct share_check * sc)815 static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
816 struct btrfs_delayed_ref_head *head, u64 seq,
817 struct preftrees *preftrees, struct share_check *sc)
818 {
819 struct btrfs_delayed_ref_node *node;
820 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
821 struct btrfs_key key;
822 struct btrfs_key tmp_op_key;
823 struct rb_node *n;
824 int count;
825 int ret = 0;
826
827 if (extent_op && extent_op->update_key)
828 btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
829
830 spin_lock(&head->lock);
831 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
832 node = rb_entry(n, struct btrfs_delayed_ref_node,
833 ref_node);
834 if (node->seq > seq)
835 continue;
836
837 switch (node->action) {
838 case BTRFS_ADD_DELAYED_EXTENT:
839 case BTRFS_UPDATE_DELAYED_HEAD:
840 WARN_ON(1);
841 continue;
842 case BTRFS_ADD_DELAYED_REF:
843 count = node->ref_mod;
844 break;
845 case BTRFS_DROP_DELAYED_REF:
846 count = node->ref_mod * -1;
847 break;
848 default:
849 BUG();
850 }
851 switch (node->type) {
852 case BTRFS_TREE_BLOCK_REF_KEY: {
853 /* NORMAL INDIRECT METADATA backref */
854 struct btrfs_delayed_tree_ref *ref;
855
856 ref = btrfs_delayed_node_to_tree_ref(node);
857 ret = add_indirect_ref(fs_info, preftrees, ref->root,
858 &tmp_op_key, ref->level + 1,
859 node->bytenr, count, sc,
860 GFP_ATOMIC);
861 break;
862 }
863 case BTRFS_SHARED_BLOCK_REF_KEY: {
864 /* SHARED DIRECT METADATA backref */
865 struct btrfs_delayed_tree_ref *ref;
866
867 ref = btrfs_delayed_node_to_tree_ref(node);
868
869 ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
870 ref->parent, node->bytenr, count,
871 sc, GFP_ATOMIC);
872 break;
873 }
874 case BTRFS_EXTENT_DATA_REF_KEY: {
875 /* NORMAL INDIRECT DATA backref */
876 struct btrfs_delayed_data_ref *ref;
877 ref = btrfs_delayed_node_to_data_ref(node);
878
879 key.objectid = ref->objectid;
880 key.type = BTRFS_EXTENT_DATA_KEY;
881 key.offset = ref->offset;
882
883 /*
884 * Found a inum that doesn't match our known inum, we
885 * know it's shared.
886 */
887 if (sc && sc->inum && ref->objectid != sc->inum) {
888 ret = BACKREF_FOUND_SHARED;
889 goto out;
890 }
891
892 ret = add_indirect_ref(fs_info, preftrees, ref->root,
893 &key, 0, node->bytenr, count, sc,
894 GFP_ATOMIC);
895 break;
896 }
897 case BTRFS_SHARED_DATA_REF_KEY: {
898 /* SHARED DIRECT FULL backref */
899 struct btrfs_delayed_data_ref *ref;
900
901 ref = btrfs_delayed_node_to_data_ref(node);
902
903 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
904 node->bytenr, count, sc,
905 GFP_ATOMIC);
906 break;
907 }
908 default:
909 WARN_ON(1);
910 }
911 /*
912 * We must ignore BACKREF_FOUND_SHARED until all delayed
913 * refs have been checked.
914 */
915 if (ret && (ret != BACKREF_FOUND_SHARED))
916 break;
917 }
918 if (!ret)
919 ret = extent_is_shared(sc);
920 out:
921 spin_unlock(&head->lock);
922 return ret;
923 }
924
925 /*
926 * add all inline backrefs for bytenr to the list
927 *
928 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
929 */
add_inline_refs(const struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 bytenr,int * info_level,struct preftrees * preftrees,struct share_check * sc)930 static int add_inline_refs(const struct btrfs_fs_info *fs_info,
931 struct btrfs_path *path, u64 bytenr,
932 int *info_level, struct preftrees *preftrees,
933 struct share_check *sc)
934 {
935 int ret = 0;
936 int slot;
937 struct extent_buffer *leaf;
938 struct btrfs_key key;
939 struct btrfs_key found_key;
940 unsigned long ptr;
941 unsigned long end;
942 struct btrfs_extent_item *ei;
943 u64 flags;
944 u64 item_size;
945
946 /*
947 * enumerate all inline refs
948 */
949 leaf = path->nodes[0];
950 slot = path->slots[0];
951
952 item_size = btrfs_item_size_nr(leaf, slot);
953 BUG_ON(item_size < sizeof(*ei));
954
955 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
956 flags = btrfs_extent_flags(leaf, ei);
957 btrfs_item_key_to_cpu(leaf, &found_key, slot);
958
959 ptr = (unsigned long)(ei + 1);
960 end = (unsigned long)ei + item_size;
961
962 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
963 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
964 struct btrfs_tree_block_info *info;
965
966 info = (struct btrfs_tree_block_info *)ptr;
967 *info_level = btrfs_tree_block_level(leaf, info);
968 ptr += sizeof(struct btrfs_tree_block_info);
969 BUG_ON(ptr > end);
970 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
971 *info_level = found_key.offset;
972 } else {
973 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
974 }
975
976 while (ptr < end) {
977 struct btrfs_extent_inline_ref *iref;
978 u64 offset;
979 int type;
980
981 iref = (struct btrfs_extent_inline_ref *)ptr;
982 type = btrfs_get_extent_inline_ref_type(leaf, iref,
983 BTRFS_REF_TYPE_ANY);
984 if (type == BTRFS_REF_TYPE_INVALID)
985 return -EUCLEAN;
986
987 offset = btrfs_extent_inline_ref_offset(leaf, iref);
988
989 switch (type) {
990 case BTRFS_SHARED_BLOCK_REF_KEY:
991 ret = add_direct_ref(fs_info, preftrees,
992 *info_level + 1, offset,
993 bytenr, 1, NULL, GFP_NOFS);
994 break;
995 case BTRFS_SHARED_DATA_REF_KEY: {
996 struct btrfs_shared_data_ref *sdref;
997 int count;
998
999 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1000 count = btrfs_shared_data_ref_count(leaf, sdref);
1001
1002 ret = add_direct_ref(fs_info, preftrees, 0, offset,
1003 bytenr, count, sc, GFP_NOFS);
1004 break;
1005 }
1006 case BTRFS_TREE_BLOCK_REF_KEY:
1007 ret = add_indirect_ref(fs_info, preftrees, offset,
1008 NULL, *info_level + 1,
1009 bytenr, 1, NULL, GFP_NOFS);
1010 break;
1011 case BTRFS_EXTENT_DATA_REF_KEY: {
1012 struct btrfs_extent_data_ref *dref;
1013 int count;
1014 u64 root;
1015
1016 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1017 count = btrfs_extent_data_ref_count(leaf, dref);
1018 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1019 dref);
1020 key.type = BTRFS_EXTENT_DATA_KEY;
1021 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1022
1023 if (sc && sc->inum && key.objectid != sc->inum) {
1024 ret = BACKREF_FOUND_SHARED;
1025 break;
1026 }
1027
1028 root = btrfs_extent_data_ref_root(leaf, dref);
1029
1030 ret = add_indirect_ref(fs_info, preftrees, root,
1031 &key, 0, bytenr, count,
1032 sc, GFP_NOFS);
1033 break;
1034 }
1035 default:
1036 WARN_ON(1);
1037 }
1038 if (ret)
1039 return ret;
1040 ptr += btrfs_extent_inline_ref_size(type);
1041 }
1042
1043 return 0;
1044 }
1045
1046 /*
1047 * add all non-inline backrefs for bytenr to the list
1048 *
1049 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1050 */
add_keyed_refs(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 bytenr,int info_level,struct preftrees * preftrees,struct share_check * sc)1051 static int add_keyed_refs(struct btrfs_fs_info *fs_info,
1052 struct btrfs_path *path, u64 bytenr,
1053 int info_level, struct preftrees *preftrees,
1054 struct share_check *sc)
1055 {
1056 struct btrfs_root *extent_root = fs_info->extent_root;
1057 int ret;
1058 int slot;
1059 struct extent_buffer *leaf;
1060 struct btrfs_key key;
1061
1062 while (1) {
1063 ret = btrfs_next_item(extent_root, path);
1064 if (ret < 0)
1065 break;
1066 if (ret) {
1067 ret = 0;
1068 break;
1069 }
1070
1071 slot = path->slots[0];
1072 leaf = path->nodes[0];
1073 btrfs_item_key_to_cpu(leaf, &key, slot);
1074
1075 if (key.objectid != bytenr)
1076 break;
1077 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1078 continue;
1079 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1080 break;
1081
1082 switch (key.type) {
1083 case BTRFS_SHARED_BLOCK_REF_KEY:
1084 /* SHARED DIRECT METADATA backref */
1085 ret = add_direct_ref(fs_info, preftrees,
1086 info_level + 1, key.offset,
1087 bytenr, 1, NULL, GFP_NOFS);
1088 break;
1089 case BTRFS_SHARED_DATA_REF_KEY: {
1090 /* SHARED DIRECT FULL backref */
1091 struct btrfs_shared_data_ref *sdref;
1092 int count;
1093
1094 sdref = btrfs_item_ptr(leaf, slot,
1095 struct btrfs_shared_data_ref);
1096 count = btrfs_shared_data_ref_count(leaf, sdref);
1097 ret = add_direct_ref(fs_info, preftrees, 0,
1098 key.offset, bytenr, count,
1099 sc, GFP_NOFS);
1100 break;
1101 }
1102 case BTRFS_TREE_BLOCK_REF_KEY:
1103 /* NORMAL INDIRECT METADATA backref */
1104 ret = add_indirect_ref(fs_info, preftrees, key.offset,
1105 NULL, info_level + 1, bytenr,
1106 1, NULL, GFP_NOFS);
1107 break;
1108 case BTRFS_EXTENT_DATA_REF_KEY: {
1109 /* NORMAL INDIRECT DATA backref */
1110 struct btrfs_extent_data_ref *dref;
1111 int count;
1112 u64 root;
1113
1114 dref = btrfs_item_ptr(leaf, slot,
1115 struct btrfs_extent_data_ref);
1116 count = btrfs_extent_data_ref_count(leaf, dref);
1117 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1118 dref);
1119 key.type = BTRFS_EXTENT_DATA_KEY;
1120 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1121
1122 if (sc && sc->inum && key.objectid != sc->inum) {
1123 ret = BACKREF_FOUND_SHARED;
1124 break;
1125 }
1126
1127 root = btrfs_extent_data_ref_root(leaf, dref);
1128 ret = add_indirect_ref(fs_info, preftrees, root,
1129 &key, 0, bytenr, count,
1130 sc, GFP_NOFS);
1131 break;
1132 }
1133 default:
1134 WARN_ON(1);
1135 }
1136 if (ret)
1137 return ret;
1138
1139 }
1140
1141 return ret;
1142 }
1143
1144 /*
1145 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1146 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1147 * indirect refs to their parent bytenr.
1148 * When roots are found, they're added to the roots list
1149 *
1150 * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1151 * much like trans == NULL case, the difference only lies in it will not
1152 * commit root.
1153 * The special case is for qgroup to search roots in commit_transaction().
1154 *
1155 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1156 * shared extent is detected.
1157 *
1158 * Otherwise this returns 0 for success and <0 for an error.
1159 *
1160 * If ignore_offset is set to false, only extent refs whose offsets match
1161 * extent_item_pos are returned. If true, every extent ref is returned
1162 * and extent_item_pos is ignored.
1163 *
1164 * FIXME some caching might speed things up
1165 */
find_parent_nodes(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 bytenr,u64 time_seq,struct ulist * refs,struct ulist * roots,const u64 * extent_item_pos,struct share_check * sc,bool ignore_offset)1166 static int find_parent_nodes(struct btrfs_trans_handle *trans,
1167 struct btrfs_fs_info *fs_info, u64 bytenr,
1168 u64 time_seq, struct ulist *refs,
1169 struct ulist *roots, const u64 *extent_item_pos,
1170 struct share_check *sc, bool ignore_offset)
1171 {
1172 struct btrfs_key key;
1173 struct btrfs_path *path;
1174 struct btrfs_delayed_ref_root *delayed_refs = NULL;
1175 struct btrfs_delayed_ref_head *head;
1176 int info_level = 0;
1177 int ret;
1178 struct prelim_ref *ref;
1179 struct rb_node *node;
1180 struct extent_inode_elem *eie = NULL;
1181 struct preftrees preftrees = {
1182 .direct = PREFTREE_INIT,
1183 .indirect = PREFTREE_INIT,
1184 .indirect_missing_keys = PREFTREE_INIT
1185 };
1186
1187 key.objectid = bytenr;
1188 key.offset = (u64)-1;
1189 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1190 key.type = BTRFS_METADATA_ITEM_KEY;
1191 else
1192 key.type = BTRFS_EXTENT_ITEM_KEY;
1193
1194 path = btrfs_alloc_path();
1195 if (!path)
1196 return -ENOMEM;
1197 if (!trans) {
1198 path->search_commit_root = 1;
1199 path->skip_locking = 1;
1200 }
1201
1202 if (time_seq == SEQ_LAST)
1203 path->skip_locking = 1;
1204
1205 /*
1206 * grab both a lock on the path and a lock on the delayed ref head.
1207 * We need both to get a consistent picture of how the refs look
1208 * at a specified point in time
1209 */
1210 again:
1211 head = NULL;
1212
1213 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
1214 if (ret < 0)
1215 goto out;
1216 if (ret == 0) {
1217 /* This shouldn't happen, indicates a bug or fs corruption. */
1218 ASSERT(ret != 0);
1219 ret = -EUCLEAN;
1220 goto out;
1221 }
1222
1223 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1224 if (trans && likely(trans->type != __TRANS_DUMMY) &&
1225 time_seq != SEQ_LAST) {
1226 #else
1227 if (trans && time_seq != SEQ_LAST) {
1228 #endif
1229 /*
1230 * look if there are updates for this ref queued and lock the
1231 * head
1232 */
1233 delayed_refs = &trans->transaction->delayed_refs;
1234 spin_lock(&delayed_refs->lock);
1235 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1236 if (head) {
1237 if (!mutex_trylock(&head->mutex)) {
1238 refcount_inc(&head->refs);
1239 spin_unlock(&delayed_refs->lock);
1240
1241 btrfs_release_path(path);
1242
1243 /*
1244 * Mutex was contended, block until it's
1245 * released and try again
1246 */
1247 mutex_lock(&head->mutex);
1248 mutex_unlock(&head->mutex);
1249 btrfs_put_delayed_ref_head(head);
1250 goto again;
1251 }
1252 spin_unlock(&delayed_refs->lock);
1253 ret = add_delayed_refs(fs_info, head, time_seq,
1254 &preftrees, sc);
1255 mutex_unlock(&head->mutex);
1256 if (ret)
1257 goto out;
1258 } else {
1259 spin_unlock(&delayed_refs->lock);
1260 }
1261 }
1262
1263 if (path->slots[0]) {
1264 struct extent_buffer *leaf;
1265 int slot;
1266
1267 path->slots[0]--;
1268 leaf = path->nodes[0];
1269 slot = path->slots[0];
1270 btrfs_item_key_to_cpu(leaf, &key, slot);
1271 if (key.objectid == bytenr &&
1272 (key.type == BTRFS_EXTENT_ITEM_KEY ||
1273 key.type == BTRFS_METADATA_ITEM_KEY)) {
1274 ret = add_inline_refs(fs_info, path, bytenr,
1275 &info_level, &preftrees, sc);
1276 if (ret)
1277 goto out;
1278 ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1279 &preftrees, sc);
1280 if (ret)
1281 goto out;
1282 }
1283 }
1284
1285 btrfs_release_path(path);
1286
1287 ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1288 if (ret)
1289 goto out;
1290
1291 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1292
1293 ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1294 extent_item_pos, sc, ignore_offset);
1295 if (ret)
1296 goto out;
1297
1298 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1299
1300 /*
1301 * This walks the tree of merged and resolved refs. Tree blocks are
1302 * read in as needed. Unique entries are added to the ulist, and
1303 * the list of found roots is updated.
1304 *
1305 * We release the entire tree in one go before returning.
1306 */
1307 node = rb_first_cached(&preftrees.direct.root);
1308 while (node) {
1309 ref = rb_entry(node, struct prelim_ref, rbnode);
1310 node = rb_next(&ref->rbnode);
1311 /*
1312 * ref->count < 0 can happen here if there are delayed
1313 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1314 * prelim_ref_insert() relies on this when merging
1315 * identical refs to keep the overall count correct.
1316 * prelim_ref_insert() will merge only those refs
1317 * which compare identically. Any refs having
1318 * e.g. different offsets would not be merged,
1319 * and would retain their original ref->count < 0.
1320 */
1321 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1322 if (sc && sc->root_objectid &&
1323 ref->root_id != sc->root_objectid) {
1324 ret = BACKREF_FOUND_SHARED;
1325 goto out;
1326 }
1327
1328 /* no parent == root of tree */
1329 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1330 if (ret < 0)
1331 goto out;
1332 }
1333 if (ref->count && ref->parent) {
1334 if (extent_item_pos && !ref->inode_list &&
1335 ref->level == 0) {
1336 struct extent_buffer *eb;
1337
1338 eb = read_tree_block(fs_info, ref->parent, 0,
1339 ref->level, NULL);
1340 if (IS_ERR(eb)) {
1341 ret = PTR_ERR(eb);
1342 goto out;
1343 } else if (!extent_buffer_uptodate(eb)) {
1344 free_extent_buffer(eb);
1345 ret = -EIO;
1346 goto out;
1347 }
1348
1349 if (!path->skip_locking) {
1350 btrfs_tree_read_lock(eb);
1351 btrfs_set_lock_blocking_read(eb);
1352 }
1353 ret = find_extent_in_eb(eb, bytenr,
1354 *extent_item_pos, &eie, ignore_offset);
1355 if (!path->skip_locking)
1356 btrfs_tree_read_unlock_blocking(eb);
1357 free_extent_buffer(eb);
1358 if (ret < 0)
1359 goto out;
1360 ref->inode_list = eie;
1361 }
1362 ret = ulist_add_merge_ptr(refs, ref->parent,
1363 ref->inode_list,
1364 (void **)&eie, GFP_NOFS);
1365 if (ret < 0)
1366 goto out;
1367 if (!ret && extent_item_pos) {
1368 /*
1369 * We've recorded that parent, so we must extend
1370 * its inode list here.
1371 *
1372 * However if there was corruption we may not
1373 * have found an eie, return an error in this
1374 * case.
1375 */
1376 ASSERT(eie);
1377 if (!eie) {
1378 ret = -EUCLEAN;
1379 goto out;
1380 }
1381 while (eie->next)
1382 eie = eie->next;
1383 eie->next = ref->inode_list;
1384 }
1385 eie = NULL;
1386 }
1387 cond_resched();
1388 }
1389
1390 out:
1391 btrfs_free_path(path);
1392
1393 prelim_release(&preftrees.direct);
1394 prelim_release(&preftrees.indirect);
1395 prelim_release(&preftrees.indirect_missing_keys);
1396
1397 if (ret < 0)
1398 free_inode_elem_list(eie);
1399 return ret;
1400 }
1401
1402 static void free_leaf_list(struct ulist *blocks)
1403 {
1404 struct ulist_node *node = NULL;
1405 struct extent_inode_elem *eie;
1406 struct ulist_iterator uiter;
1407
1408 ULIST_ITER_INIT(&uiter);
1409 while ((node = ulist_next(blocks, &uiter))) {
1410 if (!node->aux)
1411 continue;
1412 eie = unode_aux_to_inode_list(node);
1413 free_inode_elem_list(eie);
1414 node->aux = 0;
1415 }
1416
1417 ulist_free(blocks);
1418 }
1419
1420 /*
1421 * Finds all leafs with a reference to the specified combination of bytenr and
1422 * offset. key_list_head will point to a list of corresponding keys (caller must
1423 * free each list element). The leafs will be stored in the leafs ulist, which
1424 * must be freed with ulist_free.
1425 *
1426 * returns 0 on success, <0 on error
1427 */
1428 int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1429 struct btrfs_fs_info *fs_info, u64 bytenr,
1430 u64 time_seq, struct ulist **leafs,
1431 const u64 *extent_item_pos, bool ignore_offset)
1432 {
1433 int ret;
1434
1435 *leafs = ulist_alloc(GFP_NOFS);
1436 if (!*leafs)
1437 return -ENOMEM;
1438
1439 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1440 *leafs, NULL, extent_item_pos, NULL, ignore_offset);
1441 if (ret < 0 && ret != -ENOENT) {
1442 free_leaf_list(*leafs);
1443 return ret;
1444 }
1445
1446 return 0;
1447 }
1448
1449 /*
1450 * walk all backrefs for a given extent to find all roots that reference this
1451 * extent. Walking a backref means finding all extents that reference this
1452 * extent and in turn walk the backrefs of those, too. Naturally this is a
1453 * recursive process, but here it is implemented in an iterative fashion: We
1454 * find all referencing extents for the extent in question and put them on a
1455 * list. In turn, we find all referencing extents for those, further appending
1456 * to the list. The way we iterate the list allows adding more elements after
1457 * the current while iterating. The process stops when we reach the end of the
1458 * list. Found roots are added to the roots list.
1459 *
1460 * returns 0 on success, < 0 on error.
1461 */
1462 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1463 struct btrfs_fs_info *fs_info, u64 bytenr,
1464 u64 time_seq, struct ulist **roots,
1465 bool ignore_offset)
1466 {
1467 struct ulist *tmp;
1468 struct ulist_node *node = NULL;
1469 struct ulist_iterator uiter;
1470 int ret;
1471
1472 tmp = ulist_alloc(GFP_NOFS);
1473 if (!tmp)
1474 return -ENOMEM;
1475 *roots = ulist_alloc(GFP_NOFS);
1476 if (!*roots) {
1477 ulist_free(tmp);
1478 return -ENOMEM;
1479 }
1480
1481 ULIST_ITER_INIT(&uiter);
1482 while (1) {
1483 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1484 tmp, *roots, NULL, NULL, ignore_offset);
1485 if (ret < 0 && ret != -ENOENT) {
1486 ulist_free(tmp);
1487 ulist_free(*roots);
1488 *roots = NULL;
1489 return ret;
1490 }
1491 node = ulist_next(tmp, &uiter);
1492 if (!node)
1493 break;
1494 bytenr = node->val;
1495 cond_resched();
1496 }
1497
1498 ulist_free(tmp);
1499 return 0;
1500 }
1501
1502 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1503 struct btrfs_fs_info *fs_info, u64 bytenr,
1504 u64 time_seq, struct ulist **roots,
1505 bool ignore_offset)
1506 {
1507 int ret;
1508
1509 if (!trans)
1510 down_read(&fs_info->commit_root_sem);
1511 ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1512 time_seq, roots, ignore_offset);
1513 if (!trans)
1514 up_read(&fs_info->commit_root_sem);
1515 return ret;
1516 }
1517
1518 /**
1519 * btrfs_check_shared - tell us whether an extent is shared
1520 *
1521 * btrfs_check_shared uses the backref walking code but will short
1522 * circuit as soon as it finds a root or inode that doesn't match the
1523 * one passed in. This provides a significant performance benefit for
1524 * callers (such as fiemap) which want to know whether the extent is
1525 * shared but do not need a ref count.
1526 *
1527 * This attempts to attach to the running transaction in order to account for
1528 * delayed refs, but continues on even when no running transaction exists.
1529 *
1530 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1531 */
1532 int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1533 struct ulist *roots, struct ulist *tmp)
1534 {
1535 struct btrfs_fs_info *fs_info = root->fs_info;
1536 struct btrfs_trans_handle *trans;
1537 struct ulist_iterator uiter;
1538 struct ulist_node *node;
1539 struct seq_list elem = SEQ_LIST_INIT(elem);
1540 int ret = 0;
1541 struct share_check shared = {
1542 .root_objectid = root->root_key.objectid,
1543 .inum = inum,
1544 .share_count = 0,
1545 };
1546
1547 ulist_init(roots);
1548 ulist_init(tmp);
1549
1550 trans = btrfs_join_transaction_nostart(root);
1551 if (IS_ERR(trans)) {
1552 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1553 ret = PTR_ERR(trans);
1554 goto out;
1555 }
1556 trans = NULL;
1557 down_read(&fs_info->commit_root_sem);
1558 } else {
1559 btrfs_get_tree_mod_seq(fs_info, &elem);
1560 }
1561
1562 ULIST_ITER_INIT(&uiter);
1563 while (1) {
1564 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1565 roots, NULL, &shared, false);
1566 if (ret == BACKREF_FOUND_SHARED) {
1567 /* this is the only condition under which we return 1 */
1568 ret = 1;
1569 break;
1570 }
1571 if (ret < 0 && ret != -ENOENT)
1572 break;
1573 ret = 0;
1574 node = ulist_next(tmp, &uiter);
1575 if (!node)
1576 break;
1577 bytenr = node->val;
1578 shared.share_count = 0;
1579 cond_resched();
1580 }
1581
1582 if (trans) {
1583 btrfs_put_tree_mod_seq(fs_info, &elem);
1584 btrfs_end_transaction(trans);
1585 } else {
1586 up_read(&fs_info->commit_root_sem);
1587 }
1588 out:
1589 ulist_release(roots);
1590 ulist_release(tmp);
1591 return ret;
1592 }
1593
1594 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1595 u64 start_off, struct btrfs_path *path,
1596 struct btrfs_inode_extref **ret_extref,
1597 u64 *found_off)
1598 {
1599 int ret, slot;
1600 struct btrfs_key key;
1601 struct btrfs_key found_key;
1602 struct btrfs_inode_extref *extref;
1603 const struct extent_buffer *leaf;
1604 unsigned long ptr;
1605
1606 key.objectid = inode_objectid;
1607 key.type = BTRFS_INODE_EXTREF_KEY;
1608 key.offset = start_off;
1609
1610 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1611 if (ret < 0)
1612 return ret;
1613
1614 while (1) {
1615 leaf = path->nodes[0];
1616 slot = path->slots[0];
1617 if (slot >= btrfs_header_nritems(leaf)) {
1618 /*
1619 * If the item at offset is not found,
1620 * btrfs_search_slot will point us to the slot
1621 * where it should be inserted. In our case
1622 * that will be the slot directly before the
1623 * next INODE_REF_KEY_V2 item. In the case
1624 * that we're pointing to the last slot in a
1625 * leaf, we must move one leaf over.
1626 */
1627 ret = btrfs_next_leaf(root, path);
1628 if (ret) {
1629 if (ret >= 1)
1630 ret = -ENOENT;
1631 break;
1632 }
1633 continue;
1634 }
1635
1636 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1637
1638 /*
1639 * Check that we're still looking at an extended ref key for
1640 * this particular objectid. If we have different
1641 * objectid or type then there are no more to be found
1642 * in the tree and we can exit.
1643 */
1644 ret = -ENOENT;
1645 if (found_key.objectid != inode_objectid)
1646 break;
1647 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1648 break;
1649
1650 ret = 0;
1651 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1652 extref = (struct btrfs_inode_extref *)ptr;
1653 *ret_extref = extref;
1654 if (found_off)
1655 *found_off = found_key.offset;
1656 break;
1657 }
1658
1659 return ret;
1660 }
1661
1662 /*
1663 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1664 * Elements of the path are separated by '/' and the path is guaranteed to be
1665 * 0-terminated. the path is only given within the current file system.
1666 * Therefore, it never starts with a '/'. the caller is responsible to provide
1667 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1668 * the start point of the resulting string is returned. this pointer is within
1669 * dest, normally.
1670 * in case the path buffer would overflow, the pointer is decremented further
1671 * as if output was written to the buffer, though no more output is actually
1672 * generated. that way, the caller can determine how much space would be
1673 * required for the path to fit into the buffer. in that case, the returned
1674 * value will be smaller than dest. callers must check this!
1675 */
1676 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1677 u32 name_len, unsigned long name_off,
1678 struct extent_buffer *eb_in, u64 parent,
1679 char *dest, u32 size)
1680 {
1681 int slot;
1682 u64 next_inum;
1683 int ret;
1684 s64 bytes_left = ((s64)size) - 1;
1685 struct extent_buffer *eb = eb_in;
1686 struct btrfs_key found_key;
1687 int leave_spinning = path->leave_spinning;
1688 struct btrfs_inode_ref *iref;
1689
1690 if (bytes_left >= 0)
1691 dest[bytes_left] = '\0';
1692
1693 path->leave_spinning = 1;
1694 while (1) {
1695 bytes_left -= name_len;
1696 if (bytes_left >= 0)
1697 read_extent_buffer(eb, dest + bytes_left,
1698 name_off, name_len);
1699 if (eb != eb_in) {
1700 if (!path->skip_locking)
1701 btrfs_tree_read_unlock_blocking(eb);
1702 free_extent_buffer(eb);
1703 }
1704 ret = btrfs_find_item(fs_root, path, parent, 0,
1705 BTRFS_INODE_REF_KEY, &found_key);
1706 if (ret > 0)
1707 ret = -ENOENT;
1708 if (ret)
1709 break;
1710
1711 next_inum = found_key.offset;
1712
1713 /* regular exit ahead */
1714 if (parent == next_inum)
1715 break;
1716
1717 slot = path->slots[0];
1718 eb = path->nodes[0];
1719 /* make sure we can use eb after releasing the path */
1720 if (eb != eb_in) {
1721 if (!path->skip_locking)
1722 btrfs_set_lock_blocking_read(eb);
1723 path->nodes[0] = NULL;
1724 path->locks[0] = 0;
1725 }
1726 btrfs_release_path(path);
1727 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1728
1729 name_len = btrfs_inode_ref_name_len(eb, iref);
1730 name_off = (unsigned long)(iref + 1);
1731
1732 parent = next_inum;
1733 --bytes_left;
1734 if (bytes_left >= 0)
1735 dest[bytes_left] = '/';
1736 }
1737
1738 btrfs_release_path(path);
1739 path->leave_spinning = leave_spinning;
1740
1741 if (ret)
1742 return ERR_PTR(ret);
1743
1744 return dest + bytes_left;
1745 }
1746
1747 /*
1748 * this makes the path point to (logical EXTENT_ITEM *)
1749 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1750 * tree blocks and <0 on error.
1751 */
1752 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1753 struct btrfs_path *path, struct btrfs_key *found_key,
1754 u64 *flags_ret)
1755 {
1756 int ret;
1757 u64 flags;
1758 u64 size = 0;
1759 u32 item_size;
1760 const struct extent_buffer *eb;
1761 struct btrfs_extent_item *ei;
1762 struct btrfs_key key;
1763
1764 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1765 key.type = BTRFS_METADATA_ITEM_KEY;
1766 else
1767 key.type = BTRFS_EXTENT_ITEM_KEY;
1768 key.objectid = logical;
1769 key.offset = (u64)-1;
1770
1771 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1772 if (ret < 0)
1773 return ret;
1774
1775 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1776 if (ret) {
1777 if (ret > 0)
1778 ret = -ENOENT;
1779 return ret;
1780 }
1781 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1782 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1783 size = fs_info->nodesize;
1784 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1785 size = found_key->offset;
1786
1787 if (found_key->objectid > logical ||
1788 found_key->objectid + size <= logical) {
1789 btrfs_debug(fs_info,
1790 "logical %llu is not within any extent", logical);
1791 return -ENOENT;
1792 }
1793
1794 eb = path->nodes[0];
1795 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1796 BUG_ON(item_size < sizeof(*ei));
1797
1798 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1799 flags = btrfs_extent_flags(eb, ei);
1800
1801 btrfs_debug(fs_info,
1802 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1803 logical, logical - found_key->objectid, found_key->objectid,
1804 found_key->offset, flags, item_size);
1805
1806 WARN_ON(!flags_ret);
1807 if (flags_ret) {
1808 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1809 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1810 else if (flags & BTRFS_EXTENT_FLAG_DATA)
1811 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1812 else
1813 BUG();
1814 return 0;
1815 }
1816
1817 return -EIO;
1818 }
1819
1820 /*
1821 * helper function to iterate extent inline refs. ptr must point to a 0 value
1822 * for the first call and may be modified. it is used to track state.
1823 * if more refs exist, 0 is returned and the next call to
1824 * get_extent_inline_ref must pass the modified ptr parameter to get the
1825 * next ref. after the last ref was processed, 1 is returned.
1826 * returns <0 on error
1827 */
1828 static int get_extent_inline_ref(unsigned long *ptr,
1829 const struct extent_buffer *eb,
1830 const struct btrfs_key *key,
1831 const struct btrfs_extent_item *ei,
1832 u32 item_size,
1833 struct btrfs_extent_inline_ref **out_eiref,
1834 int *out_type)
1835 {
1836 unsigned long end;
1837 u64 flags;
1838 struct btrfs_tree_block_info *info;
1839
1840 if (!*ptr) {
1841 /* first call */
1842 flags = btrfs_extent_flags(eb, ei);
1843 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1844 if (key->type == BTRFS_METADATA_ITEM_KEY) {
1845 /* a skinny metadata extent */
1846 *out_eiref =
1847 (struct btrfs_extent_inline_ref *)(ei + 1);
1848 } else {
1849 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1850 info = (struct btrfs_tree_block_info *)(ei + 1);
1851 *out_eiref =
1852 (struct btrfs_extent_inline_ref *)(info + 1);
1853 }
1854 } else {
1855 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1856 }
1857 *ptr = (unsigned long)*out_eiref;
1858 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1859 return -ENOENT;
1860 }
1861
1862 end = (unsigned long)ei + item_size;
1863 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1864 *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1865 BTRFS_REF_TYPE_ANY);
1866 if (*out_type == BTRFS_REF_TYPE_INVALID)
1867 return -EUCLEAN;
1868
1869 *ptr += btrfs_extent_inline_ref_size(*out_type);
1870 WARN_ON(*ptr > end);
1871 if (*ptr == end)
1872 return 1; /* last */
1873
1874 return 0;
1875 }
1876
1877 /*
1878 * reads the tree block backref for an extent. tree level and root are returned
1879 * through out_level and out_root. ptr must point to a 0 value for the first
1880 * call and may be modified (see get_extent_inline_ref comment).
1881 * returns 0 if data was provided, 1 if there was no more data to provide or
1882 * <0 on error.
1883 */
1884 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1885 struct btrfs_key *key, struct btrfs_extent_item *ei,
1886 u32 item_size, u64 *out_root, u8 *out_level)
1887 {
1888 int ret;
1889 int type;
1890 struct btrfs_extent_inline_ref *eiref;
1891
1892 if (*ptr == (unsigned long)-1)
1893 return 1;
1894
1895 while (1) {
1896 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1897 &eiref, &type);
1898 if (ret < 0)
1899 return ret;
1900
1901 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1902 type == BTRFS_SHARED_BLOCK_REF_KEY)
1903 break;
1904
1905 if (ret == 1)
1906 return 1;
1907 }
1908
1909 /* we can treat both ref types equally here */
1910 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1911
1912 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1913 struct btrfs_tree_block_info *info;
1914
1915 info = (struct btrfs_tree_block_info *)(ei + 1);
1916 *out_level = btrfs_tree_block_level(eb, info);
1917 } else {
1918 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1919 *out_level = (u8)key->offset;
1920 }
1921
1922 if (ret == 1)
1923 *ptr = (unsigned long)-1;
1924
1925 return 0;
1926 }
1927
1928 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1929 struct extent_inode_elem *inode_list,
1930 u64 root, u64 extent_item_objectid,
1931 iterate_extent_inodes_t *iterate, void *ctx)
1932 {
1933 struct extent_inode_elem *eie;
1934 int ret = 0;
1935
1936 for (eie = inode_list; eie; eie = eie->next) {
1937 btrfs_debug(fs_info,
1938 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1939 extent_item_objectid, eie->inum,
1940 eie->offset, root);
1941 ret = iterate(eie->inum, eie->offset, root, ctx);
1942 if (ret) {
1943 btrfs_debug(fs_info,
1944 "stopping iteration for %llu due to ret=%d",
1945 extent_item_objectid, ret);
1946 break;
1947 }
1948 }
1949
1950 return ret;
1951 }
1952
1953 /*
1954 * calls iterate() for every inode that references the extent identified by
1955 * the given parameters.
1956 * when the iterator function returns a non-zero value, iteration stops.
1957 */
1958 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1959 u64 extent_item_objectid, u64 extent_item_pos,
1960 int search_commit_root,
1961 iterate_extent_inodes_t *iterate, void *ctx,
1962 bool ignore_offset)
1963 {
1964 int ret;
1965 struct btrfs_trans_handle *trans = NULL;
1966 struct ulist *refs = NULL;
1967 struct ulist *roots = NULL;
1968 struct ulist_node *ref_node = NULL;
1969 struct ulist_node *root_node = NULL;
1970 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
1971 struct ulist_iterator ref_uiter;
1972 struct ulist_iterator root_uiter;
1973
1974 btrfs_debug(fs_info, "resolving all inodes for extent %llu",
1975 extent_item_objectid);
1976
1977 if (!search_commit_root) {
1978 trans = btrfs_attach_transaction(fs_info->extent_root);
1979 if (IS_ERR(trans)) {
1980 if (PTR_ERR(trans) != -ENOENT &&
1981 PTR_ERR(trans) != -EROFS)
1982 return PTR_ERR(trans);
1983 trans = NULL;
1984 }
1985 }
1986
1987 if (trans)
1988 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1989 else
1990 down_read(&fs_info->commit_root_sem);
1991
1992 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1993 tree_mod_seq_elem.seq, &refs,
1994 &extent_item_pos, ignore_offset);
1995 if (ret)
1996 goto out;
1997
1998 ULIST_ITER_INIT(&ref_uiter);
1999 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2000 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
2001 tree_mod_seq_elem.seq, &roots,
2002 ignore_offset);
2003 if (ret)
2004 break;
2005 ULIST_ITER_INIT(&root_uiter);
2006 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
2007 btrfs_debug(fs_info,
2008 "root %llu references leaf %llu, data list %#llx",
2009 root_node->val, ref_node->val,
2010 ref_node->aux);
2011 ret = iterate_leaf_refs(fs_info,
2012 (struct extent_inode_elem *)
2013 (uintptr_t)ref_node->aux,
2014 root_node->val,
2015 extent_item_objectid,
2016 iterate, ctx);
2017 }
2018 ulist_free(roots);
2019 }
2020
2021 free_leaf_list(refs);
2022 out:
2023 if (trans) {
2024 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2025 btrfs_end_transaction(trans);
2026 } else {
2027 up_read(&fs_info->commit_root_sem);
2028 }
2029
2030 return ret;
2031 }
2032
2033 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2034 struct btrfs_path *path,
2035 iterate_extent_inodes_t *iterate, void *ctx,
2036 bool ignore_offset)
2037 {
2038 int ret;
2039 u64 extent_item_pos;
2040 u64 flags = 0;
2041 struct btrfs_key found_key;
2042 int search_commit_root = path->search_commit_root;
2043
2044 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2045 btrfs_release_path(path);
2046 if (ret < 0)
2047 return ret;
2048 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2049 return -EINVAL;
2050
2051 extent_item_pos = logical - found_key.objectid;
2052 ret = iterate_extent_inodes(fs_info, found_key.objectid,
2053 extent_item_pos, search_commit_root,
2054 iterate, ctx, ignore_offset);
2055
2056 return ret;
2057 }
2058
2059 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
2060 struct extent_buffer *eb, void *ctx);
2061
2062 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
2063 struct btrfs_path *path,
2064 iterate_irefs_t *iterate, void *ctx)
2065 {
2066 int ret = 0;
2067 int slot;
2068 u32 cur;
2069 u32 len;
2070 u32 name_len;
2071 u64 parent = 0;
2072 int found = 0;
2073 struct extent_buffer *eb;
2074 struct btrfs_item *item;
2075 struct btrfs_inode_ref *iref;
2076 struct btrfs_key found_key;
2077
2078 while (!ret) {
2079 ret = btrfs_find_item(fs_root, path, inum,
2080 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2081 &found_key);
2082
2083 if (ret < 0)
2084 break;
2085 if (ret) {
2086 ret = found ? 0 : -ENOENT;
2087 break;
2088 }
2089 ++found;
2090
2091 parent = found_key.offset;
2092 slot = path->slots[0];
2093 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2094 if (!eb) {
2095 ret = -ENOMEM;
2096 break;
2097 }
2098 btrfs_release_path(path);
2099
2100 item = btrfs_item_nr(slot);
2101 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2102
2103 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
2104 name_len = btrfs_inode_ref_name_len(eb, iref);
2105 /* path must be released before calling iterate()! */
2106 btrfs_debug(fs_root->fs_info,
2107 "following ref at offset %u for inode %llu in tree %llu",
2108 cur, found_key.objectid,
2109 fs_root->root_key.objectid);
2110 ret = iterate(parent, name_len,
2111 (unsigned long)(iref + 1), eb, ctx);
2112 if (ret)
2113 break;
2114 len = sizeof(*iref) + name_len;
2115 iref = (struct btrfs_inode_ref *)((char *)iref + len);
2116 }
2117 free_extent_buffer(eb);
2118 }
2119
2120 btrfs_release_path(path);
2121
2122 return ret;
2123 }
2124
2125 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2126 struct btrfs_path *path,
2127 iterate_irefs_t *iterate, void *ctx)
2128 {
2129 int ret;
2130 int slot;
2131 u64 offset = 0;
2132 u64 parent;
2133 int found = 0;
2134 struct extent_buffer *eb;
2135 struct btrfs_inode_extref *extref;
2136 u32 item_size;
2137 u32 cur_offset;
2138 unsigned long ptr;
2139
2140 while (1) {
2141 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2142 &offset);
2143 if (ret < 0)
2144 break;
2145 if (ret) {
2146 ret = found ? 0 : -ENOENT;
2147 break;
2148 }
2149 ++found;
2150
2151 slot = path->slots[0];
2152 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2153 if (!eb) {
2154 ret = -ENOMEM;
2155 break;
2156 }
2157 btrfs_release_path(path);
2158
2159 item_size = btrfs_item_size_nr(eb, slot);
2160 ptr = btrfs_item_ptr_offset(eb, slot);
2161 cur_offset = 0;
2162
2163 while (cur_offset < item_size) {
2164 u32 name_len;
2165
2166 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2167 parent = btrfs_inode_extref_parent(eb, extref);
2168 name_len = btrfs_inode_extref_name_len(eb, extref);
2169 ret = iterate(parent, name_len,
2170 (unsigned long)&extref->name, eb, ctx);
2171 if (ret)
2172 break;
2173
2174 cur_offset += btrfs_inode_extref_name_len(eb, extref);
2175 cur_offset += sizeof(*extref);
2176 }
2177 free_extent_buffer(eb);
2178
2179 offset++;
2180 }
2181
2182 btrfs_release_path(path);
2183
2184 return ret;
2185 }
2186
2187 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
2188 struct btrfs_path *path, iterate_irefs_t *iterate,
2189 void *ctx)
2190 {
2191 int ret;
2192 int found_refs = 0;
2193
2194 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
2195 if (!ret)
2196 ++found_refs;
2197 else if (ret != -ENOENT)
2198 return ret;
2199
2200 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
2201 if (ret == -ENOENT && found_refs)
2202 return 0;
2203
2204 return ret;
2205 }
2206
2207 /*
2208 * returns 0 if the path could be dumped (probably truncated)
2209 * returns <0 in case of an error
2210 */
2211 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2212 struct extent_buffer *eb, void *ctx)
2213 {
2214 struct inode_fs_paths *ipath = ctx;
2215 char *fspath;
2216 char *fspath_min;
2217 int i = ipath->fspath->elem_cnt;
2218 const int s_ptr = sizeof(char *);
2219 u32 bytes_left;
2220
2221 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2222 ipath->fspath->bytes_left - s_ptr : 0;
2223
2224 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2225 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2226 name_off, eb, inum, fspath_min, bytes_left);
2227 if (IS_ERR(fspath))
2228 return PTR_ERR(fspath);
2229
2230 if (fspath > fspath_min) {
2231 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2232 ++ipath->fspath->elem_cnt;
2233 ipath->fspath->bytes_left = fspath - fspath_min;
2234 } else {
2235 ++ipath->fspath->elem_missed;
2236 ipath->fspath->bytes_missing += fspath_min - fspath;
2237 ipath->fspath->bytes_left = 0;
2238 }
2239
2240 return 0;
2241 }
2242
2243 /*
2244 * this dumps all file system paths to the inode into the ipath struct, provided
2245 * is has been created large enough. each path is zero-terminated and accessed
2246 * from ipath->fspath->val[i].
2247 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2248 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2249 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2250 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2251 * have been needed to return all paths.
2252 */
2253 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2254 {
2255 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2256 inode_to_path, ipath);
2257 }
2258
2259 struct btrfs_data_container *init_data_container(u32 total_bytes)
2260 {
2261 struct btrfs_data_container *data;
2262 size_t alloc_bytes;
2263
2264 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2265 data = kvmalloc(alloc_bytes, GFP_KERNEL);
2266 if (!data)
2267 return ERR_PTR(-ENOMEM);
2268
2269 if (total_bytes >= sizeof(*data)) {
2270 data->bytes_left = total_bytes - sizeof(*data);
2271 data->bytes_missing = 0;
2272 } else {
2273 data->bytes_missing = sizeof(*data) - total_bytes;
2274 data->bytes_left = 0;
2275 }
2276
2277 data->elem_cnt = 0;
2278 data->elem_missed = 0;
2279
2280 return data;
2281 }
2282
2283 /*
2284 * allocates space to return multiple file system paths for an inode.
2285 * total_bytes to allocate are passed, note that space usable for actual path
2286 * information will be total_bytes - sizeof(struct inode_fs_paths).
2287 * the returned pointer must be freed with free_ipath() in the end.
2288 */
2289 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2290 struct btrfs_path *path)
2291 {
2292 struct inode_fs_paths *ifp;
2293 struct btrfs_data_container *fspath;
2294
2295 fspath = init_data_container(total_bytes);
2296 if (IS_ERR(fspath))
2297 return ERR_CAST(fspath);
2298
2299 ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2300 if (!ifp) {
2301 kvfree(fspath);
2302 return ERR_PTR(-ENOMEM);
2303 }
2304
2305 ifp->btrfs_path = path;
2306 ifp->fspath = fspath;
2307 ifp->fs_root = fs_root;
2308
2309 return ifp;
2310 }
2311
2312 void free_ipath(struct inode_fs_paths *ipath)
2313 {
2314 if (!ipath)
2315 return;
2316 kvfree(ipath->fspath);
2317 kfree(ipath);
2318 }
2319
2320 struct btrfs_backref_iter *btrfs_backref_iter_alloc(
2321 struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
2322 {
2323 struct btrfs_backref_iter *ret;
2324
2325 ret = kzalloc(sizeof(*ret), gfp_flag);
2326 if (!ret)
2327 return NULL;
2328
2329 ret->path = btrfs_alloc_path();
2330 if (!ret->path) {
2331 kfree(ret);
2332 return NULL;
2333 }
2334
2335 /* Current backref iterator only supports iteration in commit root */
2336 ret->path->search_commit_root = 1;
2337 ret->path->skip_locking = 1;
2338 ret->fs_info = fs_info;
2339
2340 return ret;
2341 }
2342
2343 int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2344 {
2345 struct btrfs_fs_info *fs_info = iter->fs_info;
2346 struct btrfs_path *path = iter->path;
2347 struct btrfs_extent_item *ei;
2348 struct btrfs_key key;
2349 int ret;
2350
2351 key.objectid = bytenr;
2352 key.type = BTRFS_METADATA_ITEM_KEY;
2353 key.offset = (u64)-1;
2354 iter->bytenr = bytenr;
2355
2356 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
2357 if (ret < 0)
2358 return ret;
2359 if (ret == 0) {
2360 ret = -EUCLEAN;
2361 goto release;
2362 }
2363 if (path->slots[0] == 0) {
2364 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2365 ret = -EUCLEAN;
2366 goto release;
2367 }
2368 path->slots[0]--;
2369
2370 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2371 if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2372 key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2373 ret = -ENOENT;
2374 goto release;
2375 }
2376 memcpy(&iter->cur_key, &key, sizeof(key));
2377 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2378 path->slots[0]);
2379 iter->end_ptr = (u32)(iter->item_ptr +
2380 btrfs_item_size_nr(path->nodes[0], path->slots[0]));
2381 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2382 struct btrfs_extent_item);
2383
2384 /*
2385 * Only support iteration on tree backref yet.
2386 *
2387 * This is an extra precaution for non skinny-metadata, where
2388 * EXTENT_ITEM is also used for tree blocks, that we can only use
2389 * extent flags to determine if it's a tree block.
2390 */
2391 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2392 ret = -ENOTSUPP;
2393 goto release;
2394 }
2395 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2396
2397 /* If there is no inline backref, go search for keyed backref */
2398 if (iter->cur_ptr >= iter->end_ptr) {
2399 ret = btrfs_next_item(fs_info->extent_root, path);
2400
2401 /* No inline nor keyed ref */
2402 if (ret > 0) {
2403 ret = -ENOENT;
2404 goto release;
2405 }
2406 if (ret < 0)
2407 goto release;
2408
2409 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2410 path->slots[0]);
2411 if (iter->cur_key.objectid != bytenr ||
2412 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2413 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2414 ret = -ENOENT;
2415 goto release;
2416 }
2417 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2418 path->slots[0]);
2419 iter->item_ptr = iter->cur_ptr;
2420 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
2421 path->nodes[0], path->slots[0]));
2422 }
2423
2424 return 0;
2425 release:
2426 btrfs_backref_iter_release(iter);
2427 return ret;
2428 }
2429
2430 /*
2431 * Go to the next backref item of current bytenr, can be either inlined or
2432 * keyed.
2433 *
2434 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2435 *
2436 * Return 0 if we get next backref without problem.
2437 * Return >0 if there is no extra backref for this bytenr.
2438 * Return <0 if there is something wrong happened.
2439 */
2440 int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2441 {
2442 struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2443 struct btrfs_path *path = iter->path;
2444 struct btrfs_extent_inline_ref *iref;
2445 int ret;
2446 u32 size;
2447
2448 if (btrfs_backref_iter_is_inline_ref(iter)) {
2449 /* We're still inside the inline refs */
2450 ASSERT(iter->cur_ptr < iter->end_ptr);
2451
2452 if (btrfs_backref_has_tree_block_info(iter)) {
2453 /* First tree block info */
2454 size = sizeof(struct btrfs_tree_block_info);
2455 } else {
2456 /* Use inline ref type to determine the size */
2457 int type;
2458
2459 iref = (struct btrfs_extent_inline_ref *)
2460 ((unsigned long)iter->cur_ptr);
2461 type = btrfs_extent_inline_ref_type(eb, iref);
2462
2463 size = btrfs_extent_inline_ref_size(type);
2464 }
2465 iter->cur_ptr += size;
2466 if (iter->cur_ptr < iter->end_ptr)
2467 return 0;
2468
2469 /* All inline items iterated, fall through */
2470 }
2471
2472 /* We're at keyed items, there is no inline item, go to the next one */
2473 ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
2474 if (ret)
2475 return ret;
2476
2477 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2478 if (iter->cur_key.objectid != iter->bytenr ||
2479 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2480 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2481 return 1;
2482 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2483 path->slots[0]);
2484 iter->cur_ptr = iter->item_ptr;
2485 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
2486 path->slots[0]);
2487 return 0;
2488 }
2489
2490 void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2491 struct btrfs_backref_cache *cache, int is_reloc)
2492 {
2493 int i;
2494
2495 cache->rb_root = RB_ROOT;
2496 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2497 INIT_LIST_HEAD(&cache->pending[i]);
2498 INIT_LIST_HEAD(&cache->changed);
2499 INIT_LIST_HEAD(&cache->detached);
2500 INIT_LIST_HEAD(&cache->leaves);
2501 INIT_LIST_HEAD(&cache->pending_edge);
2502 INIT_LIST_HEAD(&cache->useless_node);
2503 cache->fs_info = fs_info;
2504 cache->is_reloc = is_reloc;
2505 }
2506
2507 struct btrfs_backref_node *btrfs_backref_alloc_node(
2508 struct btrfs_backref_cache *cache, u64 bytenr, int level)
2509 {
2510 struct btrfs_backref_node *node;
2511
2512 ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2513 node = kzalloc(sizeof(*node), GFP_NOFS);
2514 if (!node)
2515 return node;
2516
2517 INIT_LIST_HEAD(&node->list);
2518 INIT_LIST_HEAD(&node->upper);
2519 INIT_LIST_HEAD(&node->lower);
2520 RB_CLEAR_NODE(&node->rb_node);
2521 cache->nr_nodes++;
2522 node->level = level;
2523 node->bytenr = bytenr;
2524
2525 return node;
2526 }
2527
2528 struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2529 struct btrfs_backref_cache *cache)
2530 {
2531 struct btrfs_backref_edge *edge;
2532
2533 edge = kzalloc(sizeof(*edge), GFP_NOFS);
2534 if (edge)
2535 cache->nr_edges++;
2536 return edge;
2537 }
2538
2539 /*
2540 * Drop the backref node from cache, also cleaning up all its
2541 * upper edges and any uncached nodes in the path.
2542 *
2543 * This cleanup happens bottom up, thus the node should either
2544 * be the lowest node in the cache or a detached node.
2545 */
2546 void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2547 struct btrfs_backref_node *node)
2548 {
2549 struct btrfs_backref_node *upper;
2550 struct btrfs_backref_edge *edge;
2551
2552 if (!node)
2553 return;
2554
2555 BUG_ON(!node->lowest && !node->detached);
2556 while (!list_empty(&node->upper)) {
2557 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2558 list[LOWER]);
2559 upper = edge->node[UPPER];
2560 list_del(&edge->list[LOWER]);
2561 list_del(&edge->list[UPPER]);
2562 btrfs_backref_free_edge(cache, edge);
2563
2564 /*
2565 * Add the node to leaf node list if no other child block
2566 * cached.
2567 */
2568 if (list_empty(&upper->lower)) {
2569 list_add_tail(&upper->lower, &cache->leaves);
2570 upper->lowest = 1;
2571 }
2572 }
2573
2574 btrfs_backref_drop_node(cache, node);
2575 }
2576
2577 /*
2578 * Release all nodes/edges from current cache
2579 */
2580 void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
2581 {
2582 struct btrfs_backref_node *node;
2583 int i;
2584
2585 while (!list_empty(&cache->detached)) {
2586 node = list_entry(cache->detached.next,
2587 struct btrfs_backref_node, list);
2588 btrfs_backref_cleanup_node(cache, node);
2589 }
2590
2591 while (!list_empty(&cache->leaves)) {
2592 node = list_entry(cache->leaves.next,
2593 struct btrfs_backref_node, lower);
2594 btrfs_backref_cleanup_node(cache, node);
2595 }
2596
2597 cache->last_trans = 0;
2598
2599 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2600 ASSERT(list_empty(&cache->pending[i]));
2601 ASSERT(list_empty(&cache->pending_edge));
2602 ASSERT(list_empty(&cache->useless_node));
2603 ASSERT(list_empty(&cache->changed));
2604 ASSERT(list_empty(&cache->detached));
2605 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2606 ASSERT(!cache->nr_nodes);
2607 ASSERT(!cache->nr_edges);
2608 }
2609
2610 /*
2611 * Handle direct tree backref
2612 *
2613 * Direct tree backref means, the backref item shows its parent bytenr
2614 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
2615 *
2616 * @ref_key: The converted backref key.
2617 * For keyed backref, it's the item key.
2618 * For inlined backref, objectid is the bytenr,
2619 * type is btrfs_inline_ref_type, offset is
2620 * btrfs_inline_ref_offset.
2621 */
2622 static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
2623 struct btrfs_key *ref_key,
2624 struct btrfs_backref_node *cur)
2625 {
2626 struct btrfs_backref_edge *edge;
2627 struct btrfs_backref_node *upper;
2628 struct rb_node *rb_node;
2629
2630 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2631
2632 /* Only reloc root uses backref pointing to itself */
2633 if (ref_key->objectid == ref_key->offset) {
2634 struct btrfs_root *root;
2635
2636 cur->is_reloc_root = 1;
2637 /* Only reloc backref cache cares about a specific root */
2638 if (cache->is_reloc) {
2639 root = find_reloc_root(cache->fs_info, cur->bytenr);
2640 if (!root)
2641 return -ENOENT;
2642 cur->root = root;
2643 } else {
2644 /*
2645 * For generic purpose backref cache, reloc root node
2646 * is useless.
2647 */
2648 list_add(&cur->list, &cache->useless_node);
2649 }
2650 return 0;
2651 }
2652
2653 edge = btrfs_backref_alloc_edge(cache);
2654 if (!edge)
2655 return -ENOMEM;
2656
2657 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2658 if (!rb_node) {
2659 /* Parent node not yet cached */
2660 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2661 cur->level + 1);
2662 if (!upper) {
2663 btrfs_backref_free_edge(cache, edge);
2664 return -ENOMEM;
2665 }
2666
2667 /*
2668 * Backrefs for the upper level block isn't cached, add the
2669 * block to pending list
2670 */
2671 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2672 } else {
2673 /* Parent node already cached */
2674 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2675 ASSERT(upper->checked);
2676 INIT_LIST_HEAD(&edge->list[UPPER]);
2677 }
2678 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2679 return 0;
2680 }
2681
2682 /*
2683 * Handle indirect tree backref
2684 *
2685 * Indirect tree backref means, we only know which tree the node belongs to.
2686 * We still need to do a tree search to find out the parents. This is for
2687 * TREE_BLOCK_REF backref (keyed or inlined).
2688 *
2689 * @ref_key: The same as @ref_key in handle_direct_tree_backref()
2690 * @tree_key: The first key of this tree block.
2691 * @path: A clean (released) path, to avoid allocating path everytime
2692 * the function get called.
2693 */
2694 static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
2695 struct btrfs_path *path,
2696 struct btrfs_key *ref_key,
2697 struct btrfs_key *tree_key,
2698 struct btrfs_backref_node *cur)
2699 {
2700 struct btrfs_fs_info *fs_info = cache->fs_info;
2701 struct btrfs_backref_node *upper;
2702 struct btrfs_backref_node *lower;
2703 struct btrfs_backref_edge *edge;
2704 struct extent_buffer *eb;
2705 struct btrfs_root *root;
2706 struct rb_node *rb_node;
2707 int level;
2708 bool need_check = true;
2709 int ret;
2710
2711 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2712 if (IS_ERR(root))
2713 return PTR_ERR(root);
2714 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2715 cur->cowonly = 1;
2716
2717 if (btrfs_root_level(&root->root_item) == cur->level) {
2718 /* Tree root */
2719 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2720 /*
2721 * For reloc backref cache, we may ignore reloc root. But for
2722 * general purpose backref cache, we can't rely on
2723 * btrfs_should_ignore_reloc_root() as it may conflict with
2724 * current running relocation and lead to missing root.
2725 *
2726 * For general purpose backref cache, reloc root detection is
2727 * completely relying on direct backref (key->offset is parent
2728 * bytenr), thus only do such check for reloc cache.
2729 */
2730 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2731 btrfs_put_root(root);
2732 list_add(&cur->list, &cache->useless_node);
2733 } else {
2734 cur->root = root;
2735 }
2736 return 0;
2737 }
2738
2739 level = cur->level + 1;
2740
2741 /* Search the tree to find parent blocks referring to the block */
2742 path->search_commit_root = 1;
2743 path->skip_locking = 1;
2744 path->lowest_level = level;
2745 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
2746 path->lowest_level = 0;
2747 if (ret < 0) {
2748 btrfs_put_root(root);
2749 return ret;
2750 }
2751 if (ret > 0 && path->slots[level] > 0)
2752 path->slots[level]--;
2753
2754 eb = path->nodes[level];
2755 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2756 btrfs_err(fs_info,
2757 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
2758 cur->bytenr, level - 1, root->root_key.objectid,
2759 tree_key->objectid, tree_key->type, tree_key->offset);
2760 btrfs_put_root(root);
2761 ret = -ENOENT;
2762 goto out;
2763 }
2764 lower = cur;
2765
2766 /* Add all nodes and edges in the path */
2767 for (; level < BTRFS_MAX_LEVEL; level++) {
2768 if (!path->nodes[level]) {
2769 ASSERT(btrfs_root_bytenr(&root->root_item) ==
2770 lower->bytenr);
2771 /* Same as previous should_ignore_reloc_root() call */
2772 if (btrfs_should_ignore_reloc_root(root) &&
2773 cache->is_reloc) {
2774 btrfs_put_root(root);
2775 list_add(&lower->list, &cache->useless_node);
2776 } else {
2777 lower->root = root;
2778 }
2779 break;
2780 }
2781
2782 edge = btrfs_backref_alloc_edge(cache);
2783 if (!edge) {
2784 btrfs_put_root(root);
2785 ret = -ENOMEM;
2786 goto out;
2787 }
2788
2789 eb = path->nodes[level];
2790 rb_node = rb_simple_search(&cache->rb_root, eb->start);
2791 if (!rb_node) {
2792 upper = btrfs_backref_alloc_node(cache, eb->start,
2793 lower->level + 1);
2794 if (!upper) {
2795 btrfs_put_root(root);
2796 btrfs_backref_free_edge(cache, edge);
2797 ret = -ENOMEM;
2798 goto out;
2799 }
2800 upper->owner = btrfs_header_owner(eb);
2801 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2802 upper->cowonly = 1;
2803
2804 /*
2805 * If we know the block isn't shared we can avoid
2806 * checking its backrefs.
2807 */
2808 if (btrfs_block_can_be_shared(root, eb))
2809 upper->checked = 0;
2810 else
2811 upper->checked = 1;
2812
2813 /*
2814 * Add the block to pending list if we need to check its
2815 * backrefs, we only do this once while walking up a
2816 * tree as we will catch anything else later on.
2817 */
2818 if (!upper->checked && need_check) {
2819 need_check = false;
2820 list_add_tail(&edge->list[UPPER],
2821 &cache->pending_edge);
2822 } else {
2823 if (upper->checked)
2824 need_check = true;
2825 INIT_LIST_HEAD(&edge->list[UPPER]);
2826 }
2827 } else {
2828 upper = rb_entry(rb_node, struct btrfs_backref_node,
2829 rb_node);
2830 ASSERT(upper->checked);
2831 INIT_LIST_HEAD(&edge->list[UPPER]);
2832 if (!upper->owner)
2833 upper->owner = btrfs_header_owner(eb);
2834 }
2835 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2836
2837 if (rb_node) {
2838 btrfs_put_root(root);
2839 break;
2840 }
2841 lower = upper;
2842 upper = NULL;
2843 }
2844 out:
2845 btrfs_release_path(path);
2846 return ret;
2847 }
2848
2849 /*
2850 * Add backref node @cur into @cache.
2851 *
2852 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
2853 * links aren't yet bi-directional. Needs to finish such links.
2854 * Use btrfs_backref_finish_upper_links() to finish such linkage.
2855 *
2856 * @path: Released path for indirect tree backref lookup
2857 * @iter: Released backref iter for extent tree search
2858 * @node_key: The first key of the tree block
2859 */
2860 int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
2861 struct btrfs_path *path,
2862 struct btrfs_backref_iter *iter,
2863 struct btrfs_key *node_key,
2864 struct btrfs_backref_node *cur)
2865 {
2866 struct btrfs_fs_info *fs_info = cache->fs_info;
2867 struct btrfs_backref_edge *edge;
2868 struct btrfs_backref_node *exist;
2869 int ret;
2870
2871 ret = btrfs_backref_iter_start(iter, cur->bytenr);
2872 if (ret < 0)
2873 return ret;
2874 /*
2875 * We skip the first btrfs_tree_block_info, as we don't use the key
2876 * stored in it, but fetch it from the tree block
2877 */
2878 if (btrfs_backref_has_tree_block_info(iter)) {
2879 ret = btrfs_backref_iter_next(iter);
2880 if (ret < 0)
2881 goto out;
2882 /* No extra backref? This means the tree block is corrupted */
2883 if (ret > 0) {
2884 ret = -EUCLEAN;
2885 goto out;
2886 }
2887 }
2888 WARN_ON(cur->checked);
2889 if (!list_empty(&cur->upper)) {
2890 /*
2891 * The backref was added previously when processing backref of
2892 * type BTRFS_TREE_BLOCK_REF_KEY
2893 */
2894 ASSERT(list_is_singular(&cur->upper));
2895 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2896 list[LOWER]);
2897 ASSERT(list_empty(&edge->list[UPPER]));
2898 exist = edge->node[UPPER];
2899 /*
2900 * Add the upper level block to pending list if we need check
2901 * its backrefs
2902 */
2903 if (!exist->checked)
2904 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2905 } else {
2906 exist = NULL;
2907 }
2908
2909 for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
2910 struct extent_buffer *eb;
2911 struct btrfs_key key;
2912 int type;
2913
2914 cond_resched();
2915 eb = btrfs_backref_get_eb(iter);
2916
2917 key.objectid = iter->bytenr;
2918 if (btrfs_backref_iter_is_inline_ref(iter)) {
2919 struct btrfs_extent_inline_ref *iref;
2920
2921 /* Update key for inline backref */
2922 iref = (struct btrfs_extent_inline_ref *)
2923 ((unsigned long)iter->cur_ptr);
2924 type = btrfs_get_extent_inline_ref_type(eb, iref,
2925 BTRFS_REF_TYPE_BLOCK);
2926 if (type == BTRFS_REF_TYPE_INVALID) {
2927 ret = -EUCLEAN;
2928 goto out;
2929 }
2930 key.type = type;
2931 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
2932 } else {
2933 key.type = iter->cur_key.type;
2934 key.offset = iter->cur_key.offset;
2935 }
2936
2937 /*
2938 * Parent node found and matches current inline ref, no need to
2939 * rebuild this node for this inline ref
2940 */
2941 if (exist &&
2942 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
2943 exist->owner == key.offset) ||
2944 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
2945 exist->bytenr == key.offset))) {
2946 exist = NULL;
2947 continue;
2948 }
2949
2950 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
2951 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
2952 ret = handle_direct_tree_backref(cache, &key, cur);
2953 if (ret < 0)
2954 goto out;
2955 continue;
2956 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
2957 ret = -EINVAL;
2958 btrfs_print_v0_err(fs_info);
2959 btrfs_handle_fs_error(fs_info, ret, NULL);
2960 goto out;
2961 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
2962 continue;
2963 }
2964
2965 /*
2966 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
2967 * means the root objectid. We need to search the tree to get
2968 * its parent bytenr.
2969 */
2970 ret = handle_indirect_tree_backref(cache, path, &key, node_key,
2971 cur);
2972 if (ret < 0)
2973 goto out;
2974 }
2975 ret = 0;
2976 cur->checked = 1;
2977 WARN_ON(exist);
2978 out:
2979 btrfs_backref_iter_release(iter);
2980 return ret;
2981 }
2982
2983 /*
2984 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
2985 */
2986 int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
2987 struct btrfs_backref_node *start)
2988 {
2989 struct list_head *useless_node = &cache->useless_node;
2990 struct btrfs_backref_edge *edge;
2991 struct rb_node *rb_node;
2992 LIST_HEAD(pending_edge);
2993
2994 ASSERT(start->checked);
2995
2996 /* Insert this node to cache if it's not COW-only */
2997 if (!start->cowonly) {
2998 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
2999 &start->rb_node);
3000 if (rb_node)
3001 btrfs_backref_panic(cache->fs_info, start->bytenr,
3002 -EEXIST);
3003 list_add_tail(&start->lower, &cache->leaves);
3004 }
3005
3006 /*
3007 * Use breadth first search to iterate all related edges.
3008 *
3009 * The starting points are all the edges of this node
3010 */
3011 list_for_each_entry(edge, &start->upper, list[LOWER])
3012 list_add_tail(&edge->list[UPPER], &pending_edge);
3013
3014 while (!list_empty(&pending_edge)) {
3015 struct btrfs_backref_node *upper;
3016 struct btrfs_backref_node *lower;
3017
3018 edge = list_first_entry(&pending_edge,
3019 struct btrfs_backref_edge, list[UPPER]);
3020 list_del_init(&edge->list[UPPER]);
3021 upper = edge->node[UPPER];
3022 lower = edge->node[LOWER];
3023
3024 /* Parent is detached, no need to keep any edges */
3025 if (upper->detached) {
3026 list_del(&edge->list[LOWER]);
3027 btrfs_backref_free_edge(cache, edge);
3028
3029 /* Lower node is orphan, queue for cleanup */
3030 if (list_empty(&lower->upper))
3031 list_add(&lower->list, useless_node);
3032 continue;
3033 }
3034
3035 /*
3036 * All new nodes added in current build_backref_tree() haven't
3037 * been linked to the cache rb tree.
3038 * So if we have upper->rb_node populated, this means a cache
3039 * hit. We only need to link the edge, as @upper and all its
3040 * parents have already been linked.
3041 */
3042 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3043 if (upper->lowest) {
3044 list_del_init(&upper->lower);
3045 upper->lowest = 0;
3046 }
3047
3048 list_add_tail(&edge->list[UPPER], &upper->lower);
3049 continue;
3050 }
3051
3052 /* Sanity check, we shouldn't have any unchecked nodes */
3053 if (!upper->checked) {
3054 ASSERT(0);
3055 return -EUCLEAN;
3056 }
3057
3058 /* Sanity check, COW-only node has non-COW-only parent */
3059 if (start->cowonly != upper->cowonly) {
3060 ASSERT(0);
3061 return -EUCLEAN;
3062 }
3063
3064 /* Only cache non-COW-only (subvolume trees) tree blocks */
3065 if (!upper->cowonly) {
3066 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3067 &upper->rb_node);
3068 if (rb_node) {
3069 btrfs_backref_panic(cache->fs_info,
3070 upper->bytenr, -EEXIST);
3071 return -EUCLEAN;
3072 }
3073 }
3074
3075 list_add_tail(&edge->list[UPPER], &upper->lower);
3076
3077 /*
3078 * Also queue all the parent edges of this uncached node
3079 * to finish the upper linkage
3080 */
3081 list_for_each_entry(edge, &upper->upper, list[LOWER])
3082 list_add_tail(&edge->list[UPPER], &pending_edge);
3083 }
3084 return 0;
3085 }
3086
3087 void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3088 struct btrfs_backref_node *node)
3089 {
3090 struct btrfs_backref_node *lower;
3091 struct btrfs_backref_node *upper;
3092 struct btrfs_backref_edge *edge;
3093
3094 while (!list_empty(&cache->useless_node)) {
3095 lower = list_first_entry(&cache->useless_node,
3096 struct btrfs_backref_node, list);
3097 list_del_init(&lower->list);
3098 }
3099 while (!list_empty(&cache->pending_edge)) {
3100 edge = list_first_entry(&cache->pending_edge,
3101 struct btrfs_backref_edge, list[UPPER]);
3102 list_del(&edge->list[UPPER]);
3103 list_del(&edge->list[LOWER]);
3104 lower = edge->node[LOWER];
3105 upper = edge->node[UPPER];
3106 btrfs_backref_free_edge(cache, edge);
3107
3108 /*
3109 * Lower is no longer linked to any upper backref nodes and
3110 * isn't in the cache, we can free it ourselves.
3111 */
3112 if (list_empty(&lower->upper) &&
3113 RB_EMPTY_NODE(&lower->rb_node))
3114 list_add(&lower->list, &cache->useless_node);
3115
3116 if (!RB_EMPTY_NODE(&upper->rb_node))
3117 continue;
3118
3119 /* Add this guy's upper edges to the list to process */
3120 list_for_each_entry(edge, &upper->upper, list[LOWER])
3121 list_add_tail(&edge->list[UPPER],
3122 &cache->pending_edge);
3123 if (list_empty(&upper->upper))
3124 list_add(&upper->list, &cache->useless_node);
3125 }
3126
3127 while (!list_empty(&cache->useless_node)) {
3128 lower = list_first_entry(&cache->useless_node,
3129 struct btrfs_backref_node, list);
3130 list_del_init(&lower->list);
3131 if (lower == node)
3132 node = NULL;
3133 btrfs_backref_drop_node(cache, lower);
3134 }
3135
3136 btrfs_backref_cleanup_node(cache, node);
3137 ASSERT(list_empty(&cache->useless_node) &&
3138 list_empty(&cache->pending_edge));
3139 }
3140