• Home
  • Raw
  • Download

Lines Matching +full:edge +full:- +full:offset

1 // SPDX-License-Identifier: GPL-2.0
10 #include "disk-io.h"
14 #include "delayed-ref.h"
23 u64 offset; member
34 u64 offset = 0; in check_extent_in_eb() local
50 offset = extent_item_pos - data_offset; in check_extent_in_eb()
55 return -ENOMEM; in check_extent_in_eb()
57 e->next = *eie; in check_extent_in_eb()
58 e->inum = key->objectid; in check_extent_in_eb()
59 e->offset = key->offset + offset; in check_extent_in_eb()
70 eie_next = eie->next; in free_inode_elem_list()
132 * ref->count >0:
133 * - incremented when a ref->count transitions to >0
134 * - decremented when a ref->count transitions to <1
145 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0; in extent_is_shared()
158 return -ENOMEM; in btrfs_prelim_ref_init()
174 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
180 if (ref1->level < ref2->level) in prelim_ref_compare()
181 return -1; in prelim_ref_compare()
182 if (ref1->level > ref2->level) in prelim_ref_compare()
184 if (ref1->root_id < ref2->root_id) in prelim_ref_compare()
185 return -1; in prelim_ref_compare()
186 if (ref1->root_id > ref2->root_id) in prelim_ref_compare()
188 if (ref1->key_for_search.type < ref2->key_for_search.type) in prelim_ref_compare()
189 return -1; in prelim_ref_compare()
190 if (ref1->key_for_search.type > ref2->key_for_search.type) in prelim_ref_compare()
192 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid) in prelim_ref_compare()
193 return -1; in prelim_ref_compare()
194 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid) in prelim_ref_compare()
196 if (ref1->key_for_search.offset < ref2->key_for_search.offset) in prelim_ref_compare()
197 return -1; in prelim_ref_compare()
198 if (ref1->key_for_search.offset > ref2->key_for_search.offset) in prelim_ref_compare()
200 if (ref1->parent < ref2->parent) in prelim_ref_compare()
201 return -1; in prelim_ref_compare()
202 if (ref1->parent > ref2->parent) in prelim_ref_compare()
215 sc->share_count--; in update_share_count()
217 sc->share_count++; in update_share_count()
237 root = &preftree->root; in prelim_ref_insert()
238 p = &root->rb_root.rb_node; in prelim_ref_insert()
245 p = &(*p)->rb_left; in prelim_ref_insert()
247 p = &(*p)->rb_right; in prelim_ref_insert()
251 struct extent_inode_elem *eie = ref->inode_list; in prelim_ref_insert()
253 while (eie && eie->next) in prelim_ref_insert()
254 eie = eie->next; in prelim_ref_insert()
257 ref->inode_list = newref->inode_list; in prelim_ref_insert()
259 eie->next = newref->inode_list; in prelim_ref_insert()
261 preftree->count); in prelim_ref_insert()
263 * A delayed ref can have newref->count < 0. in prelim_ref_insert()
264 * The ref->count is updated to follow any in prelim_ref_insert()
267 update_share_count(sc, ref->count, in prelim_ref_insert()
268 ref->count + newref->count); in prelim_ref_insert()
269 ref->count += newref->count; in prelim_ref_insert()
275 update_share_count(sc, 0, newref->count); in prelim_ref_insert()
276 preftree->count++; in prelim_ref_insert()
277 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count); in prelim_ref_insert()
278 rb_link_node(&newref->rbnode, parent, p); in prelim_ref_insert()
279 rb_insert_color_cached(&newref->rbnode, root, leftmost); in prelim_ref_insert()
291 &preftree->root.rb_root, rbnode) { in prelim_release()
292 free_inode_elem_list(ref->inode_list); in prelim_release()
296 preftree->root = RB_ROOT_CACHED; in prelim_release()
297 preftree->count = 0; in prelim_release()
302 * - obtaining the parent is the goal
303 * - if you add a key, you must know that it is a correct key
304 * - if you cannot add the parent or a correct key, then we will look into the
311 * --------------------+--------+----------+--------+----------
312 * parent logical | y | - | - | -
313 * key to resolve | - | y | y | y
314 * tree block logical | - | - | - | -
317 * - column 1: we've the parent -> done
318 * - column 2, 3, 4: we use the key to find the parent
324 * --------------------+--------+----------+--------+----------
325 * parent logical | y | - | y | -
326 * key to resolve | - | - | - | y
328 * root for resolving | - | y | y | y
330 * - column 1, 3: we've the parent -> done
331 * - column 2: we take the first key from the block to find the parent
333 * - column 4: we use the key to find the parent
351 return -ENOMEM; in add_prelim_ref()
353 ref->root_id = root_id; in add_prelim_ref()
355 ref->key_for_search = *key; in add_prelim_ref()
357 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); in add_prelim_ref()
359 ref->inode_list = NULL; in add_prelim_ref()
360 ref->level = level; in add_prelim_ref()
361 ref->count = count; in add_prelim_ref()
362 ref->parent = parent; in add_prelim_ref()
363 ref->wanted_disk_byte = wanted_disk_byte; in add_prelim_ref()
374 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level, in add_direct_ref()
385 struct preftree *tree = &preftrees->indirect; in add_indirect_ref()
388 tree = &preftrees->indirect_missing_keys; in add_indirect_ref()
395 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node; in is_shared_data_backref()
409 p = &(*p)->rb_left; in is_shared_data_backref()
411 p = &(*p)->rb_right; in is_shared_data_backref()
428 struct btrfs_key *key_for_search = &ref->key_for_search; in add_all_parents()
432 u64 wanted_disk_byte = ref->wanted_disk_byte; in add_all_parents()
438 eb = path->nodes[level]; in add_all_parents()
439 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); in add_all_parents()
455 eb = path->nodes[0]; in add_all_parents()
456 if (path->slots[0] >= btrfs_header_nritems(eb) || in add_all_parents()
457 is_shared_data_backref(preftrees, eb->start) || in add_all_parents()
458 ref->root_id != btrfs_header_owner(eb)) { in add_all_parents()
465 while (!ret && count < ref->count) { in add_all_parents()
466 eb = path->nodes[0]; in add_all_parents()
467 slot = path->slots[0]; in add_all_parents()
471 if (key.objectid != key_for_search->objectid || in add_all_parents()
481 (is_shared_data_backref(preftrees, eb->start) || in add_all_parents()
482 ref->root_id != btrfs_header_owner(eb))) { in add_all_parents()
499 if (ref->key_for_search.offset == key.offset - data_offset) in add_all_parents()
512 ret = ulist_add_merge_ptr(parents, eb->start, in add_all_parents()
517 while (old->next) in add_all_parents()
518 old = old->next; in add_all_parents()
519 old->next = eie; in add_all_parents()
551 int level = ref->level; in resolve_indirect_ref()
552 struct btrfs_key search_key = ref->key_for_search; in resolve_indirect_ref()
562 if (path->search_commit_root) in resolve_indirect_ref()
563 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id); in resolve_indirect_ref()
565 root = btrfs_get_fs_root(fs_info, ref->root_id, false); in resolve_indirect_ref()
571 if (!path->search_commit_root && in resolve_indirect_ref()
572 test_bit(BTRFS_ROOT_DELETING, &root->state)) { in resolve_indirect_ref()
573 ret = -ENOENT; in resolve_indirect_ref()
578 ret = -ENOENT; in resolve_indirect_ref()
582 if (path->search_commit_root) in resolve_indirect_ref()
583 root_level = btrfs_header_level(root->commit_root); in resolve_indirect_ref()
585 root_level = btrfs_header_level(root->node); in resolve_indirect_ref()
593 * We can often find data backrefs with an offset that is too large in resolve_indirect_ref()
594 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when in resolve_indirect_ref()
595 * subtracting a file's offset with the data offset of its in resolve_indirect_ref()
599 * So if we detect such case we set the search key's offset to zero to in resolve_indirect_ref()
601 * add_all_parents(), otherwise we will miss it because the offset in resolve_indirect_ref()
602 * taken form the backref is much larger then the offset of the file in resolve_indirect_ref()
612 search_key.offset >= LLONG_MAX) in resolve_indirect_ref()
613 search_key.offset = 0; in resolve_indirect_ref()
614 path->lowest_level = level; in resolve_indirect_ref()
622 ref->root_id, level, ref->count, ret, in resolve_indirect_ref()
623 ref->key_for_search.objectid, ref->key_for_search.type, in resolve_indirect_ref()
624 ref->key_for_search.offset); in resolve_indirect_ref()
628 eb = path->nodes[level]; in resolve_indirect_ref()
634 level--; in resolve_indirect_ref()
635 eb = path->nodes[level]; in resolve_indirect_ref()
643 path->lowest_level = 0; in resolve_indirect_ref()
653 return (struct extent_inode_elem *)(uintptr_t)node->aux; in unode_aux_to_inode_list()
699 return -ENOMEM; in resolve_indirect_refs()
707 while ((rnode = rb_first_cached(&preftrees->indirect.root))) { in resolve_indirect_refs()
711 if (WARN(ref->parent, in resolve_indirect_refs()
713 ret = -EINVAL; in resolve_indirect_refs()
717 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root); in resolve_indirect_refs()
718 preftrees->indirect.count--; in resolve_indirect_refs()
720 if (ref->count == 0) { in resolve_indirect_refs()
725 if (sc && sc->root_objectid && in resolve_indirect_refs()
726 ref->root_id != sc->root_objectid) { in resolve_indirect_refs()
738 if (err == -ENOENT) { in resolve_indirect_refs()
739 prelim_ref_insert(fs_info, &preftrees->direct, ref, in resolve_indirect_refs()
751 ref->parent = node ? node->val : 0; in resolve_indirect_refs()
752 ref->inode_list = unode_aux_to_inode_list(node); in resolve_indirect_refs()
762 ret = -ENOMEM; in resolve_indirect_refs()
766 new_ref->parent = node->val; in resolve_indirect_refs()
767 new_ref->inode_list = unode_aux_to_inode_list(node); in resolve_indirect_refs()
768 prelim_ref_insert(fs_info, &preftrees->direct, in resolve_indirect_refs()
776 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL); in resolve_indirect_refs()
798 struct preftree *tree = &preftrees->indirect_missing_keys; in add_missing_keys()
801 while ((node = rb_first_cached(&tree->root))) { in add_missing_keys()
803 rb_erase_cached(node, &tree->root); in add_missing_keys()
805 BUG_ON(ref->parent); /* should not be a direct ref */ in add_missing_keys()
806 BUG_ON(ref->key_for_search.type); in add_missing_keys()
807 BUG_ON(!ref->wanted_disk_byte); in add_missing_keys()
809 eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0, in add_missing_keys()
810 ref->level - 1, NULL); in add_missing_keys()
817 return -EIO; in add_missing_keys()
822 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); in add_missing_keys()
824 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); in add_missing_keys()
828 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL); in add_missing_keys()
848 spin_lock(&head->lock); in add_delayed_refs()
849 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { in add_delayed_refs()
852 if (node->seq > seq) in add_delayed_refs()
855 switch (node->action) { in add_delayed_refs()
861 count = node->ref_mod; in add_delayed_refs()
864 count = node->ref_mod * -1; in add_delayed_refs()
869 switch (node->type) { in add_delayed_refs()
875 if (head->extent_op && head->extent_op->update_key) { in add_delayed_refs()
876 btrfs_disk_key_to_cpu(&key, &head->extent_op->key); in add_delayed_refs()
881 ret = add_indirect_ref(fs_info, preftrees, ref->root, in add_delayed_refs()
882 key_ptr, ref->level + 1, in add_delayed_refs()
883 node->bytenr, count, sc, in add_delayed_refs()
893 ret = add_direct_ref(fs_info, preftrees, ref->level + 1, in add_delayed_refs()
894 ref->parent, node->bytenr, count, in add_delayed_refs()
903 key.objectid = ref->objectid; in add_delayed_refs()
905 key.offset = ref->offset; in add_delayed_refs()
923 sc->have_delayed_delete_refs = true; in add_delayed_refs()
925 ret = add_indirect_ref(fs_info, preftrees, ref->root, in add_delayed_refs()
926 &key, 0, node->bytenr, count, sc, in add_delayed_refs()
936 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent, in add_delayed_refs()
937 node->bytenr, count, sc, in add_delayed_refs()
954 spin_unlock(&head->lock); in add_delayed_refs()
982 leaf = path->nodes[0]; in add_inline_refs()
983 slot = path->slots[0]; in add_inline_refs()
1004 *info_level = found_key.offset; in add_inline_refs()
1011 u64 offset; in add_inline_refs() local
1018 return -EUCLEAN; in add_inline_refs()
1020 offset = btrfs_extent_inline_ref_offset(leaf, iref); in add_inline_refs()
1025 *info_level + 1, offset, in add_inline_refs()
1035 ret = add_direct_ref(fs_info, preftrees, 0, offset, in add_inline_refs()
1040 ret = add_indirect_ref(fs_info, preftrees, offset, in add_inline_refs()
1049 dref = (struct btrfs_extent_data_ref *)(&iref->offset); in add_inline_refs()
1054 key.offset = btrfs_extent_data_ref_offset(leaf, dref); in add_inline_refs()
1056 if (sc && sc->inum && key.objectid != sc->inum && in add_inline_refs()
1057 !sc->have_delayed_delete_refs) { in add_inline_refs()
1082 * add all non-inline backrefs for bytenr to the list
1091 struct btrfs_root *extent_root = fs_info->extent_root; in add_keyed_refs()
1106 slot = path->slots[0]; in add_keyed_refs()
1107 leaf = path->nodes[0]; in add_keyed_refs()
1121 info_level + 1, key.offset, in add_keyed_refs()
1133 key.offset, bytenr, count, in add_keyed_refs()
1139 ret = add_indirect_ref(fs_info, preftrees, key.offset, in add_keyed_refs()
1155 key.offset = btrfs_extent_data_ref_offset(leaf, dref); in add_keyed_refs()
1157 if (sc && sc->inum && key.objectid != sc->inum && in add_keyed_refs()
1158 !sc->have_delayed_delete_refs) { in add_keyed_refs()
1191 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1224 key.offset = (u64)-1;
1232 return -ENOMEM;
1234 path->search_commit_root = 1;
1235 path->skip_locking = 1;
1239 path->skip_locking = 1;
1249 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
1255 ret = -EUCLEAN;
1260 if (trans && likely(trans->type != __TRANS_DUMMY) &&
1269 delayed_refs = &trans->transaction->delayed_refs;
1270 spin_lock(&delayed_refs->lock);
1273 if (!mutex_trylock(&head->mutex)) {
1274 refcount_inc(&head->refs);
1275 spin_unlock(&delayed_refs->lock);
1283 mutex_lock(&head->mutex);
1284 mutex_unlock(&head->mutex);
1288 spin_unlock(&delayed_refs->lock);
1291 mutex_unlock(&head->mutex);
1295 spin_unlock(&delayed_refs->lock);
1299 if (path->slots[0]) {
1303 path->slots[0]--;
1304 leaf = path->nodes[0];
1305 slot = path->slots[0];
1323 ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1346 node = rb_next(&ref->rbnode);
1348 * ref->count < 0 can happen here if there are delayed
1349 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1355 * and would retain their original ref->count < 0.
1357 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1358 if (sc && sc->root_objectid &&
1359 ref->root_id != sc->root_objectid) {
1365 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1369 if (ref->count && ref->parent) {
1370 if (extent_item_pos && !ref->inode_list &&
1371 ref->level == 0) {
1374 eb = read_tree_block(fs_info, ref->parent, 0,
1375 ref->level, NULL);
1381 ret = -EIO;
1385 if (!path->skip_locking) {
1391 if (!path->skip_locking)
1396 ref->inode_list = eie;
1404 ret = ulist_add_merge_ptr(refs, ref->parent,
1405 ref->inode_list,
1420 ret = -EUCLEAN;
1423 while (eie->next)
1424 eie = eie->next;
1425 eie->next = ref->inode_list;
1432 * use-after-free when our caller uses it or double
1435 ref->inode_list = NULL;
1454 * offset. key_list_head will point to a list of corresponding keys (caller must
1469 return -ENOMEM;
1473 if (ret < 0 && ret != -ENOENT) {
1506 return -ENOMEM;
1510 return -ENOMEM;
1517 if (ret < 0 && ret != -ENOENT) {
1526 bytenr = node->val;
1542 down_read(&fs_info->commit_root_sem);
1546 up_read(&fs_info->commit_root_sem);
1551 * btrfs_check_shared - tell us whether an extent is shared
1567 struct btrfs_fs_info *fs_info = root->fs_info;
1574 .root_objectid = root->root_key.objectid,
1585 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1590 down_read(&fs_info->commit_root_sem);
1604 if (ret < 0 && ret != -ENOENT)
1610 bytenr = node->val;
1620 up_read(&fs_info->commit_root_sem);
1642 key.offset = start_off;
1649 leaf = path->nodes[0];
1650 slot = path->slots[0];
1653 * If the item at offset is not found,
1664 ret = -ENOENT;
1678 ret = -ENOENT;
1685 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1689 *found_off = found_key.offset;
1699 * 0-terminated. the path is only given within the current file system.
1718 s64 bytes_left = ((s64)size) - 1;
1721 int leave_spinning = path->leave_spinning;
1727 path->leave_spinning = 1;
1729 bytes_left -= name_len;
1734 if (!path->skip_locking)
1741 ret = -ENOENT;
1745 next_inum = found_key.offset;
1751 slot = path->slots[0];
1752 eb = path->nodes[0];
1755 if (!path->skip_locking)
1757 path->nodes[0] = NULL;
1758 path->locks[0] = 0;
1767 --bytes_left;
1773 path->leave_spinning = leave_spinning;
1803 key.offset = (u64)-1;
1805 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1809 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1812 ret = -ENOENT;
1815 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1816 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1817 size = fs_info->nodesize;
1818 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1819 size = found_key->offset;
1821 if (found_key->objectid > logical ||
1822 found_key->objectid + size <= logical) {
1825 return -ENOENT;
1828 eb = path->nodes[0];
1829 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1832 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1837 logical, logical - found_key->objectid, found_key->objectid,
1838 found_key->offset, flags, item_size);
1851 return -EIO;
1878 if (key->type == BTRFS_METADATA_ITEM_KEY) {
1883 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1893 return -ENOENT;
1901 return -EUCLEAN;
1926 if (*ptr == (unsigned long)-1)
1946 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1952 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1953 *out_level = (u8)key->offset;
1957 *ptr = (unsigned long)-1;
1970 for (eie = inode_list; eie; eie = eie->next) {
1973 extent_item_objectid, eie->inum,
1974 eie->offset, root);
1975 ret = iterate(eie->inum, eie->offset, root, ctx);
1990 * when the iterator function returns a non-zero value, iteration stops.
2012 trans = btrfs_attach_transaction(fs_info->extent_root);
2014 if (PTR_ERR(trans) != -ENOENT &&
2015 PTR_ERR(trans) != -EROFS)
2024 down_read(&fs_info->commit_root_sem);
2034 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
2043 root_node->val, ref_node->val,
2044 ref_node->aux);
2047 (uintptr_t)ref_node->aux,
2048 root_node->val,
2061 up_read(&fs_info->commit_root_sem);
2067 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx) argument
2072 if (inodes->bytes_left >= c) {
2073 inodes->bytes_left -= c;
2074 inodes->val[inodes->elem_cnt] = inum;
2075 inodes->val[inodes->elem_cnt + 1] = offset;
2076 inodes->val[inodes->elem_cnt + 2] = root;
2077 inodes->elem_cnt += 3;
2079 inodes->bytes_missing += c - inodes->bytes_left;
2080 inodes->bytes_left = 0;
2081 inodes->elem_missed += 3;
2095 int search_commit_root = path->search_commit_root;
2102 return -EINVAL;
2104 extent_item_pos = logical - found_key.objectid;
2139 ret = found ? 0 : -ENOENT;
2144 parent = found_key.offset;
2145 slot = path->slots[0];
2146 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2148 ret = -ENOMEM;
2159 btrfs_debug(fs_root->fs_info,
2160 "following ref at offset %u for inode %llu in tree %llu",
2162 fs_root->root_key.objectid);
2184 u64 offset = 0; local
2194 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2195 &offset);
2199 ret = found ? 0 : -ENOENT;
2204 slot = path->slots[0];
2205 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2207 ret = -ENOMEM;
2223 (unsigned long)&extref->name, eb, ctx);
2232 offset++;
2250 else if (ret != -ENOENT)
2254 if (ret == -ENOENT && found_refs)
2270 int i = ipath->fspath->elem_cnt;
2274 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2275 ipath->fspath->bytes_left - s_ptr : 0;
2277 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2278 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2284 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2285 ++ipath->fspath->elem_cnt;
2286 ipath->fspath->bytes_left = fspath - fspath_min;
2288 ++ipath->fspath->elem_missed;
2289 ipath->fspath->bytes_missing += fspath_min - fspath;
2290 ipath->fspath->bytes_left = 0;
2298 * is has been created large enough. each path is zero-terminated and accessed
2299 * from ipath->fspath->val[i].
2300 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2301 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2302 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2303 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2308 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2320 return ERR_PTR(-ENOMEM);
2323 data->bytes_left = total_bytes - sizeof(*data);
2324 data->bytes_missing = 0;
2326 data->bytes_missing = sizeof(*data) - total_bytes;
2327 data->bytes_left = 0;
2330 data->elem_cnt = 0;
2331 data->elem_missed = 0;
2339 * information will be total_bytes - sizeof(struct inode_fs_paths).
2355 return ERR_PTR(-ENOMEM);
2358 ifp->btrfs_path = path;
2359 ifp->fspath = fspath;
2360 ifp->fs_root = fs_root;
2369 kvfree(ipath->fspath);
2382 ret->path = btrfs_alloc_path();
2383 if (!ret->path) {
2389 ret->path->search_commit_root = 1;
2390 ret->path->skip_locking = 1;
2391 ret->fs_info = fs_info;
2398 struct btrfs_fs_info *fs_info = iter->fs_info;
2399 struct btrfs_path *path = iter->path;
2406 key.offset = (u64)-1;
2407 iter->bytenr = bytenr;
2409 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
2413 ret = -EUCLEAN;
2416 if (path->slots[0] == 0) {
2418 ret = -EUCLEAN;
2421 path->slots[0]--;
2423 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2426 ret = -ENOENT;
2429 memcpy(&iter->cur_key, &key, sizeof(key));
2430 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2431 path->slots[0]);
2432 iter->end_ptr = (u32)(iter->item_ptr +
2433 btrfs_item_size_nr(path->nodes[0], path->slots[0]));
2434 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2440 * This is an extra precaution for non skinny-metadata, where
2444 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2445 ret = -ENOTSUPP;
2448 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2451 if (iter->cur_ptr >= iter->end_ptr) {
2452 ret = btrfs_next_item(fs_info->extent_root, path);
2456 ret = -ENOENT;
2462 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2463 path->slots[0]);
2464 if (iter->cur_key.objectid != bytenr ||
2465 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2466 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2467 ret = -ENOENT;
2470 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2471 path->slots[0]);
2472 iter->item_ptr = iter->cur_ptr;
2473 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
2474 path->nodes[0], path->slots[0]));
2487 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2496 struct btrfs_path *path = iter->path;
2503 ASSERT(iter->cur_ptr < iter->end_ptr);
2513 ((unsigned long)iter->cur_ptr);
2518 iter->cur_ptr += size;
2519 if (iter->cur_ptr < iter->end_ptr)
2526 ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
2530 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2531 if (iter->cur_key.objectid != iter->bytenr ||
2532 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2533 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2535 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2536 path->slots[0]);
2537 iter->cur_ptr = iter->item_ptr;
2538 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
2539 path->slots[0]);
2548 cache->rb_root = RB_ROOT;
2550 INIT_LIST_HEAD(&cache->pending[i]);
2551 INIT_LIST_HEAD(&cache->changed);
2552 INIT_LIST_HEAD(&cache->detached);
2553 INIT_LIST_HEAD(&cache->leaves);
2554 INIT_LIST_HEAD(&cache->pending_edge);
2555 INIT_LIST_HEAD(&cache->useless_node);
2556 cache->fs_info = fs_info;
2557 cache->is_reloc = is_reloc;
2570 INIT_LIST_HEAD(&node->list);
2571 INIT_LIST_HEAD(&node->upper);
2572 INIT_LIST_HEAD(&node->lower);
2573 RB_CLEAR_NODE(&node->rb_node);
2574 cache->nr_nodes++;
2575 node->level = level;
2576 node->bytenr = bytenr;
2584 struct btrfs_backref_edge *edge; local
2586 edge = kzalloc(sizeof(*edge), GFP_NOFS);
2587 if (edge)
2588 cache->nr_edges++;
2589 return edge;
2603 struct btrfs_backref_edge *edge; local
2608 BUG_ON(!node->lowest && !node->detached);
2609 while (!list_empty(&node->upper)) {
2610 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2612 upper = edge->node[UPPER];
2613 list_del(&edge->list[LOWER]);
2614 list_del(&edge->list[UPPER]);
2615 btrfs_backref_free_edge(cache, edge);
2621 if (list_empty(&upper->lower)) {
2622 list_add_tail(&upper->lower, &cache->leaves);
2623 upper->lowest = 1;
2638 while (!list_empty(&cache->detached)) {
2639 node = list_entry(cache->detached.next,
2644 while (!list_empty(&cache->leaves)) {
2645 node = list_entry(cache->leaves.next,
2650 cache->last_trans = 0;
2653 ASSERT(list_empty(&cache->pending[i]));
2654 ASSERT(list_empty(&cache->pending_edge));
2655 ASSERT(list_empty(&cache->useless_node));
2656 ASSERT(list_empty(&cache->changed));
2657 ASSERT(list_empty(&cache->detached));
2658 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2659 ASSERT(!cache->nr_nodes);
2660 ASSERT(!cache->nr_edges);
2672 * type is btrfs_inline_ref_type, offset is
2679 struct btrfs_backref_edge *edge; local
2683 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2686 if (ref_key->objectid == ref_key->offset) {
2689 cur->is_reloc_root = 1;
2691 if (cache->is_reloc) {
2692 root = find_reloc_root(cache->fs_info, cur->bytenr);
2694 return -ENOENT;
2695 cur->root = root;
2701 list_add(&cur->list, &cache->useless_node);
2706 edge = btrfs_backref_alloc_edge(cache);
2707 if (!edge)
2708 return -ENOMEM;
2710 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2713 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2714 cur->level + 1);
2716 btrfs_backref_free_edge(cache, edge);
2717 return -ENOMEM;
2724 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2728 ASSERT(upper->checked);
2729 INIT_LIST_HEAD(&edge->list[UPPER]);
2731 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2753 struct btrfs_fs_info *fs_info = cache->fs_info;
2756 struct btrfs_backref_edge *edge; local
2764 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2767 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2768 cur->cowonly = 1;
2770 if (btrfs_root_level(&root->root_item) == cur->level) {
2772 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2780 * completely relying on direct backref (key->offset is parent
2783 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2785 list_add(&cur->list, &cache->useless_node);
2787 cur->root = root;
2792 level = cur->level + 1;
2795 path->search_commit_root = 1;
2796 path->skip_locking = 1;
2797 path->lowest_level = level;
2799 path->lowest_level = 0;
2804 if (ret > 0 && path->slots[level] > 0)
2805 path->slots[level]--;
2807 eb = path->nodes[level];
2808 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2811 cur->bytenr, level - 1, root->root_key.objectid,
2812 tree_key->objectid, tree_key->type, tree_key->offset);
2814 ret = -ENOENT;
2821 if (!path->nodes[level]) {
2822 ASSERT(btrfs_root_bytenr(&root->root_item) ==
2823 lower->bytenr);
2826 cache->is_reloc) {
2828 list_add(&lower->list, &cache->useless_node);
2830 lower->root = root;
2835 edge = btrfs_backref_alloc_edge(cache);
2836 if (!edge) {
2838 ret = -ENOMEM;
2842 eb = path->nodes[level];
2843 rb_node = rb_simple_search(&cache->rb_root, eb->start);
2845 upper = btrfs_backref_alloc_node(cache, eb->start,
2846 lower->level + 1);
2849 btrfs_backref_free_edge(cache, edge);
2850 ret = -ENOMEM;
2853 upper->owner = btrfs_header_owner(eb);
2854 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2855 upper->cowonly = 1;
2862 upper->checked = 0;
2864 upper->checked = 1;
2871 if (!upper->checked && need_check) {
2873 list_add_tail(&edge->list[UPPER],
2874 &cache->pending_edge);
2876 if (upper->checked)
2878 INIT_LIST_HEAD(&edge->list[UPPER]);
2883 ASSERT(upper->checked);
2884 INIT_LIST_HEAD(&edge->list[UPPER]);
2885 if (!upper->owner)
2886 upper->owner = btrfs_header_owner(eb);
2888 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2906 * links aren't yet bi-directional. Needs to finish such links.
2919 struct btrfs_fs_info *fs_info = cache->fs_info;
2920 struct btrfs_backref_edge *edge; local
2924 ret = btrfs_backref_iter_start(iter, cur->bytenr);
2937 ret = -EUCLEAN;
2941 WARN_ON(cur->checked);
2942 if (!list_empty(&cur->upper)) {
2947 ASSERT(list_is_singular(&cur->upper));
2948 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2950 ASSERT(list_empty(&edge->list[UPPER]));
2951 exist = edge->node[UPPER];
2956 if (!exist->checked)
2957 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2970 key.objectid = iter->bytenr;
2976 ((unsigned long)iter->cur_ptr);
2980 ret = -EUCLEAN;
2984 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
2986 key.type = iter->cur_key.type;
2987 key.offset = iter->cur_key.offset;
2996 exist->owner == key.offset) ||
2998 exist->bytenr == key.offset))) {
3003 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
3010 ret = -EINVAL;
3019 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
3029 cur->checked = 1;
3042 struct list_head *useless_node = &cache->useless_node;
3043 struct btrfs_backref_edge *edge; local
3047 ASSERT(start->checked);
3049 /* Insert this node to cache if it's not COW-only */
3050 if (!start->cowonly) {
3051 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3052 &start->rb_node);
3054 btrfs_backref_panic(cache->fs_info, start->bytenr,
3055 -EEXIST);
3056 list_add_tail(&start->lower, &cache->leaves);
3064 list_for_each_entry(edge, &start->upper, list[LOWER])
3065 list_add_tail(&edge->list[UPPER], &pending_edge);
3071 edge = list_first_entry(&pending_edge,
3073 list_del_init(&edge->list[UPPER]);
3074 upper = edge->node[UPPER];
3075 lower = edge->node[LOWER];
3078 if (upper->detached) {
3079 list_del(&edge->list[LOWER]);
3080 btrfs_backref_free_edge(cache, edge);
3083 if (list_empty(&lower->upper))
3084 list_add(&lower->list, useless_node);
3091 * So if we have upper->rb_node populated, this means a cache
3092 * hit. We only need to link the edge, as @upper and all its
3095 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3096 if (upper->lowest) {
3097 list_del_init(&upper->lower);
3098 upper->lowest = 0;
3101 list_add_tail(&edge->list[UPPER], &upper->lower);
3106 if (!upper->checked) {
3108 return -EUCLEAN;
3111 /* Sanity check, COW-only node has non-COW-only parent */
3112 if (start->cowonly != upper->cowonly) {
3114 return -EUCLEAN;
3117 /* Only cache non-COW-only (subvolume trees) tree blocks */
3118 if (!upper->cowonly) {
3119 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3120 &upper->rb_node);
3122 btrfs_backref_panic(cache->fs_info,
3123 upper->bytenr, -EEXIST);
3124 return -EUCLEAN;
3128 list_add_tail(&edge->list[UPPER], &upper->lower);
3134 list_for_each_entry(edge, &upper->upper, list[LOWER])
3135 list_add_tail(&edge->list[UPPER], &pending_edge);
3145 struct btrfs_backref_edge *edge; local
3147 while (!list_empty(&cache->useless_node)) {
3148 lower = list_first_entry(&cache->useless_node,
3150 list_del_init(&lower->list);
3152 while (!list_empty(&cache->pending_edge)) {
3153 edge = list_first_entry(&cache->pending_edge,
3155 list_del(&edge->list[UPPER]);
3156 list_del(&edge->list[LOWER]);
3157 lower = edge->node[LOWER];
3158 upper = edge->node[UPPER];
3159 btrfs_backref_free_edge(cache, edge);
3165 if (list_empty(&lower->upper) &&
3166 RB_EMPTY_NODE(&lower->rb_node))
3167 list_add(&lower->list, &cache->useless_node);
3169 if (!RB_EMPTY_NODE(&upper->rb_node))
3173 list_for_each_entry(edge, &upper->upper, list[LOWER])
3174 list_add_tail(&edge->list[UPPER],
3175 &cache->pending_edge);
3176 if (list_empty(&upper->upper))
3177 list_add(&upper->list, &cache->useless_node);
3180 while (!list_empty(&cache->useless_node)) {
3181 lower = list_first_entry(&cache->useless_node,
3183 list_del_init(&lower->list);
3190 ASSERT(list_empty(&cache->useless_node) &&
3191 list_empty(&cache->pending_edge));