• Home
  • Raw
  • Download

Lines Matching +full:lower +full:- +full:case

1 // SPDX-License-Identifier: GPL-2.0
10 #include "disk-io.h"
14 #include "delayed-ref.h"
50 offset = extent_item_pos - data_offset; in check_extent_in_eb()
55 return -ENOMEM; in check_extent_in_eb()
57 e->next = *eie; in check_extent_in_eb()
58 e->inum = key->objectid; in check_extent_in_eb()
59 e->offset = key->offset + offset; in check_extent_in_eb()
70 eie_next = eie->next; in free_inode_elem_list()
132 * ref->count >0:
133 * - incremented when a ref->count transitions to >0
134 * - decremented when a ref->count transitions to <1
144 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0; in extent_is_shared()
157 return -ENOMEM; in btrfs_prelim_ref_init()
173 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
179 if (ref1->level < ref2->level) in prelim_ref_compare()
180 return -1; in prelim_ref_compare()
181 if (ref1->level > ref2->level) in prelim_ref_compare()
183 if (ref1->root_id < ref2->root_id) in prelim_ref_compare()
184 return -1; in prelim_ref_compare()
185 if (ref1->root_id > ref2->root_id) in prelim_ref_compare()
187 if (ref1->key_for_search.type < ref2->key_for_search.type) in prelim_ref_compare()
188 return -1; in prelim_ref_compare()
189 if (ref1->key_for_search.type > ref2->key_for_search.type) in prelim_ref_compare()
191 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid) in prelim_ref_compare()
192 return -1; in prelim_ref_compare()
193 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid) in prelim_ref_compare()
195 if (ref1->key_for_search.offset < ref2->key_for_search.offset) in prelim_ref_compare()
196 return -1; in prelim_ref_compare()
197 if (ref1->key_for_search.offset > ref2->key_for_search.offset) in prelim_ref_compare()
199 if (ref1->parent < ref2->parent) in prelim_ref_compare()
200 return -1; in prelim_ref_compare()
201 if (ref1->parent > ref2->parent) in prelim_ref_compare()
214 sc->share_count--; in update_share_count()
216 sc->share_count++; in update_share_count()
236 root = &preftree->root; in prelim_ref_insert()
237 p = &root->rb_root.rb_node; in prelim_ref_insert()
244 p = &(*p)->rb_left; in prelim_ref_insert()
246 p = &(*p)->rb_right; in prelim_ref_insert()
250 struct extent_inode_elem *eie = ref->inode_list; in prelim_ref_insert()
252 while (eie && eie->next) in prelim_ref_insert()
253 eie = eie->next; in prelim_ref_insert()
256 ref->inode_list = newref->inode_list; in prelim_ref_insert()
258 eie->next = newref->inode_list; in prelim_ref_insert()
260 preftree->count); in prelim_ref_insert()
262 * A delayed ref can have newref->count < 0. in prelim_ref_insert()
263 * The ref->count is updated to follow any in prelim_ref_insert()
266 update_share_count(sc, ref->count, in prelim_ref_insert()
267 ref->count + newref->count); in prelim_ref_insert()
268 ref->count += newref->count; in prelim_ref_insert()
274 update_share_count(sc, 0, newref->count); in prelim_ref_insert()
275 preftree->count++; in prelim_ref_insert()
276 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count); in prelim_ref_insert()
277 rb_link_node(&newref->rbnode, parent, p); in prelim_ref_insert()
278 rb_insert_color_cached(&newref->rbnode, root, leftmost); in prelim_ref_insert()
290 &preftree->root.rb_root, rbnode) in prelim_release()
293 preftree->root = RB_ROOT_CACHED; in prelim_release()
294 preftree->count = 0; in prelim_release()
299 * - obtaining the parent is the goal
300 * - if you add a key, you must know that it is a correct key
301 * - if you cannot add the parent or a correct key, then we will look into the
308 * --------------------+--------+----------+--------+----------
309 * parent logical | y | - | - | -
310 * key to resolve | - | y | y | y
311 * tree block logical | - | - | - | -
314 * - column 1: we've the parent -> done
315 * - column 2, 3, 4: we use the key to find the parent
321 * --------------------+--------+----------+--------+----------
322 * parent logical | y | - | y | -
323 * key to resolve | - | - | - | y
325 * root for resolving | - | y | y | y
327 * - column 1, 3: we've the parent -> done
328 * - column 2: we take the first key from the block to find the parent
330 * - column 4: we use the key to find the parent
348 return -ENOMEM; in add_prelim_ref()
350 ref->root_id = root_id; in add_prelim_ref()
352 ref->key_for_search = *key; in add_prelim_ref()
354 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); in add_prelim_ref()
356 ref->inode_list = NULL; in add_prelim_ref()
357 ref->level = level; in add_prelim_ref()
358 ref->count = count; in add_prelim_ref()
359 ref->parent = parent; in add_prelim_ref()
360 ref->wanted_disk_byte = wanted_disk_byte; in add_prelim_ref()
371 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level, in add_direct_ref()
382 struct preftree *tree = &preftrees->indirect; in add_indirect_ref()
385 tree = &preftrees->indirect_missing_keys; in add_indirect_ref()
392 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node; in is_shared_data_backref()
406 p = &(*p)->rb_left; in is_shared_data_backref()
408 p = &(*p)->rb_right; in is_shared_data_backref()
425 struct btrfs_key *key_for_search = &ref->key_for_search; in add_all_parents()
429 u64 wanted_disk_byte = ref->wanted_disk_byte; in add_all_parents()
434 eb = path->nodes[level]; in add_all_parents()
435 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); in add_all_parents()
451 eb = path->nodes[0]; in add_all_parents()
452 if (path->slots[0] >= btrfs_header_nritems(eb) || in add_all_parents()
453 is_shared_data_backref(preftrees, eb->start) || in add_all_parents()
454 ref->root_id != btrfs_header_owner(eb)) { in add_all_parents()
461 while (!ret && count < ref->count) { in add_all_parents()
462 eb = path->nodes[0]; in add_all_parents()
463 slot = path->slots[0]; in add_all_parents()
467 if (key.objectid != key_for_search->objectid || in add_all_parents()
477 (is_shared_data_backref(preftrees, eb->start) || in add_all_parents()
478 ref->root_id != btrfs_header_owner(eb))) { in add_all_parents()
492 if (ref->key_for_search.offset == key.offset - data_offset) in add_all_parents()
505 ret = ulist_add_merge_ptr(parents, eb->start, in add_all_parents()
510 while (old->next) in add_all_parents()
511 old = old->next; in add_all_parents()
512 old->next = eie; in add_all_parents()
544 int level = ref->level; in resolve_indirect_ref()
545 struct btrfs_key search_key = ref->key_for_search; in resolve_indirect_ref()
555 if (path->search_commit_root) in resolve_indirect_ref()
556 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id); in resolve_indirect_ref()
558 root = btrfs_get_fs_root(fs_info, ref->root_id, false); in resolve_indirect_ref()
564 if (!path->search_commit_root && in resolve_indirect_ref()
565 test_bit(BTRFS_ROOT_DELETING, &root->state)) { in resolve_indirect_ref()
566 ret = -ENOENT; in resolve_indirect_ref()
571 ret = -ENOENT; in resolve_indirect_ref()
575 if (path->search_commit_root) in resolve_indirect_ref()
576 root_level = btrfs_header_level(root->commit_root); in resolve_indirect_ref()
578 root_level = btrfs_header_level(root->node); in resolve_indirect_ref()
592 * So if we detect such case we set the search key's offset to zero to in resolve_indirect_ref()
607 path->lowest_level = level; in resolve_indirect_ref()
615 ref->root_id, level, ref->count, ret, in resolve_indirect_ref()
616 ref->key_for_search.objectid, ref->key_for_search.type, in resolve_indirect_ref()
617 ref->key_for_search.offset); in resolve_indirect_ref()
621 eb = path->nodes[level]; in resolve_indirect_ref()
627 level--; in resolve_indirect_ref()
628 eb = path->nodes[level]; in resolve_indirect_ref()
636 path->lowest_level = 0; in resolve_indirect_ref()
646 return (struct extent_inode_elem *)(uintptr_t)node->aux; in unode_aux_to_inode_list()
680 return -ENOMEM; in resolve_indirect_refs()
688 while ((rnode = rb_first_cached(&preftrees->indirect.root))) { in resolve_indirect_refs()
692 if (WARN(ref->parent, in resolve_indirect_refs()
694 ret = -EINVAL; in resolve_indirect_refs()
698 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root); in resolve_indirect_refs()
699 preftrees->indirect.count--; in resolve_indirect_refs()
701 if (ref->count == 0) { in resolve_indirect_refs()
706 if (sc && sc->root_objectid && in resolve_indirect_refs()
707 ref->root_id != sc->root_objectid) { in resolve_indirect_refs()
719 if (err == -ENOENT) { in resolve_indirect_refs()
720 prelim_ref_insert(fs_info, &preftrees->direct, ref, in resolve_indirect_refs()
732 ref->parent = node ? node->val : 0; in resolve_indirect_refs()
733 ref->inode_list = unode_aux_to_inode_list(node); in resolve_indirect_refs()
743 ret = -ENOMEM; in resolve_indirect_refs()
747 new_ref->parent = node->val; in resolve_indirect_refs()
748 new_ref->inode_list = unode_aux_to_inode_list(node); in resolve_indirect_refs()
749 prelim_ref_insert(fs_info, &preftrees->direct, in resolve_indirect_refs()
757 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL); in resolve_indirect_refs()
775 struct preftree *tree = &preftrees->indirect_missing_keys; in add_missing_keys()
778 while ((node = rb_first_cached(&tree->root))) { in add_missing_keys()
780 rb_erase_cached(node, &tree->root); in add_missing_keys()
782 BUG_ON(ref->parent); /* should not be a direct ref */ in add_missing_keys()
783 BUG_ON(ref->key_for_search.type); in add_missing_keys()
784 BUG_ON(!ref->wanted_disk_byte); in add_missing_keys()
786 eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0, in add_missing_keys()
787 ref->level - 1, NULL); in add_missing_keys()
794 return -EIO; in add_missing_keys()
799 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); in add_missing_keys()
801 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); in add_missing_keys()
805 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL); in add_missing_keys()
820 struct btrfs_delayed_extent_op *extent_op = head->extent_op; in add_delayed_refs()
827 if (extent_op && extent_op->update_key) in add_delayed_refs()
828 btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key); in add_delayed_refs()
830 spin_lock(&head->lock); in add_delayed_refs()
831 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { in add_delayed_refs()
834 if (node->seq > seq) in add_delayed_refs()
837 switch (node->action) { in add_delayed_refs()
838 case BTRFS_ADD_DELAYED_EXTENT: in add_delayed_refs()
839 case BTRFS_UPDATE_DELAYED_HEAD: in add_delayed_refs()
842 case BTRFS_ADD_DELAYED_REF: in add_delayed_refs()
843 count = node->ref_mod; in add_delayed_refs()
845 case BTRFS_DROP_DELAYED_REF: in add_delayed_refs()
846 count = node->ref_mod * -1; in add_delayed_refs()
851 switch (node->type) { in add_delayed_refs()
852 case BTRFS_TREE_BLOCK_REF_KEY: { in add_delayed_refs()
857 ret = add_indirect_ref(fs_info, preftrees, ref->root, in add_delayed_refs()
858 &tmp_op_key, ref->level + 1, in add_delayed_refs()
859 node->bytenr, count, sc, in add_delayed_refs()
863 case BTRFS_SHARED_BLOCK_REF_KEY: { in add_delayed_refs()
869 ret = add_direct_ref(fs_info, preftrees, ref->level + 1, in add_delayed_refs()
870 ref->parent, node->bytenr, count, in add_delayed_refs()
874 case BTRFS_EXTENT_DATA_REF_KEY: { in add_delayed_refs()
879 key.objectid = ref->objectid; in add_delayed_refs()
881 key.offset = ref->offset; in add_delayed_refs()
887 if (sc && sc->inum && ref->objectid != sc->inum) { in add_delayed_refs()
892 ret = add_indirect_ref(fs_info, preftrees, ref->root, in add_delayed_refs()
893 &key, 0, node->bytenr, count, sc, in add_delayed_refs()
897 case BTRFS_SHARED_DATA_REF_KEY: { in add_delayed_refs()
903 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent, in add_delayed_refs()
904 node->bytenr, count, sc, in add_delayed_refs()
921 spin_unlock(&head->lock); in add_delayed_refs()
949 leaf = path->nodes[0]; in add_inline_refs()
950 slot = path->slots[0]; in add_inline_refs()
985 return -EUCLEAN; in add_inline_refs()
990 case BTRFS_SHARED_BLOCK_REF_KEY: in add_inline_refs()
995 case BTRFS_SHARED_DATA_REF_KEY: { in add_inline_refs()
1006 case BTRFS_TREE_BLOCK_REF_KEY: in add_inline_refs()
1011 case BTRFS_EXTENT_DATA_REF_KEY: { in add_inline_refs()
1016 dref = (struct btrfs_extent_data_ref *)(&iref->offset); in add_inline_refs()
1023 if (sc && sc->inum && key.objectid != sc->inum) { in add_inline_refs()
1047 * add all non-inline backrefs for bytenr to the list
1056 struct btrfs_root *extent_root = fs_info->extent_root; in add_keyed_refs()
1071 slot = path->slots[0]; in add_keyed_refs()
1072 leaf = path->nodes[0]; in add_keyed_refs()
1083 case BTRFS_SHARED_BLOCK_REF_KEY: in add_keyed_refs()
1089 case BTRFS_SHARED_DATA_REF_KEY: { in add_keyed_refs()
1102 case BTRFS_TREE_BLOCK_REF_KEY: in add_keyed_refs()
1108 case BTRFS_EXTENT_DATA_REF_KEY: { in add_keyed_refs()
1122 if (sc && sc->inum && key.objectid != sc->inum) { in add_keyed_refs()
1151 * much like trans == NULL case, the difference only lies in it will not
1153 * The special case is for qgroup to search roots in commit_transaction().
1155 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1188 key.offset = (u64)-1;
1196 return -ENOMEM;
1198 path->search_commit_root = 1;
1199 path->skip_locking = 1;
1203 path->skip_locking = 1;
1213 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
1219 ret = -EUCLEAN;
1224 if (trans && likely(trans->type != __TRANS_DUMMY) &&
1233 delayed_refs = &trans->transaction->delayed_refs;
1234 spin_lock(&delayed_refs->lock);
1237 if (!mutex_trylock(&head->mutex)) {
1238 refcount_inc(&head->refs);
1239 spin_unlock(&delayed_refs->lock);
1247 mutex_lock(&head->mutex);
1248 mutex_unlock(&head->mutex);
1252 spin_unlock(&delayed_refs->lock);
1255 mutex_unlock(&head->mutex);
1259 spin_unlock(&delayed_refs->lock);
1263 if (path->slots[0]) {
1267 path->slots[0]--;
1268 leaf = path->nodes[0];
1269 slot = path->slots[0];
1287 ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1310 node = rb_next(&ref->rbnode);
1312 * ref->count < 0 can happen here if there are delayed
1313 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1319 * and would retain their original ref->count < 0.
1321 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1322 if (sc && sc->root_objectid &&
1323 ref->root_id != sc->root_objectid) {
1329 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1333 if (ref->count && ref->parent) {
1334 if (extent_item_pos && !ref->inode_list &&
1335 ref->level == 0) {
1338 eb = read_tree_block(fs_info, ref->parent, 0,
1339 ref->level, NULL);
1345 ret = -EIO;
1349 if (!path->skip_locking) {
1355 if (!path->skip_locking)
1360 ref->inode_list = eie;
1362 ret = ulist_add_merge_ptr(refs, ref->parent,
1363 ref->inode_list,
1374 * case.
1378 ret = -EUCLEAN;
1381 while (eie->next)
1382 eie = eie->next;
1383 eie->next = ref->inode_list;
1410 if (!node->aux)
1414 node->aux = 0;
1437 return -ENOMEM;
1441 if (ret < 0 && ret != -ENOENT) {
1474 return -ENOMEM;
1478 return -ENOMEM;
1485 if (ret < 0 && ret != -ENOENT) {
1494 bytenr = node->val;
1510 down_read(&fs_info->commit_root_sem);
1514 up_read(&fs_info->commit_root_sem);
1519 * btrfs_check_shared - tell us whether an extent is shared
1535 struct btrfs_fs_info *fs_info = root->fs_info;
1542 .root_objectid = root->root_key.objectid,
1552 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1557 down_read(&fs_info->commit_root_sem);
1571 if (ret < 0 && ret != -ENOENT)
1577 bytenr = node->val;
1586 up_read(&fs_info->commit_root_sem);
1615 leaf = path->nodes[0];
1616 slot = path->slots[0];
1621 * where it should be inserted. In our case
1623 * next INODE_REF_KEY_V2 item. In the case
1630 ret = -ENOENT;
1644 ret = -ENOENT;
1651 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1665 * 0-terminated. the path is only given within the current file system.
1670 * in case the path buffer would overflow, the pointer is decremented further
1673 * required for the path to fit into the buffer. in that case, the returned
1684 s64 bytes_left = ((s64)size) - 1;
1687 int leave_spinning = path->leave_spinning;
1693 path->leave_spinning = 1;
1695 bytes_left -= name_len;
1700 if (!path->skip_locking)
1707 ret = -ENOENT;
1717 slot = path->slots[0];
1718 eb = path->nodes[0];
1721 if (!path->skip_locking)
1723 path->nodes[0] = NULL;
1724 path->locks[0] = 0;
1733 --bytes_left;
1739 path->leave_spinning = leave_spinning;
1769 key.offset = (u64)-1;
1771 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1775 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1778 ret = -ENOENT;
1781 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1782 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1783 size = fs_info->nodesize;
1784 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1785 size = found_key->offset;
1787 if (found_key->objectid > logical ||
1788 found_key->objectid + size <= logical) {
1791 return -ENOENT;
1794 eb = path->nodes[0];
1795 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1798 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1803 logical, logical - found_key->objectid, found_key->objectid,
1804 found_key->offset, flags, item_size);
1817 return -EIO;
1844 if (key->type == BTRFS_METADATA_ITEM_KEY) {
1849 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1859 return -ENOENT;
1867 return -EUCLEAN;
1892 if (*ptr == (unsigned long)-1)
1912 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1918 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1919 *out_level = (u8)key->offset;
1923 *ptr = (unsigned long)-1;
1936 for (eie = inode_list; eie; eie = eie->next) {
1939 extent_item_objectid, eie->inum,
1940 eie->offset, root);
1941 ret = iterate(eie->inum, eie->offset, root, ctx);
1956 * when the iterator function returns a non-zero value, iteration stops.
1978 trans = btrfs_attach_transaction(fs_info->extent_root);
1980 if (PTR_ERR(trans) != -ENOENT &&
1981 PTR_ERR(trans) != -EROFS)
1990 down_read(&fs_info->commit_root_sem);
2000 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
2009 root_node->val, ref_node->val,
2010 ref_node->aux);
2013 (uintptr_t)ref_node->aux,
2014 root_node->val,
2027 up_read(&fs_info->commit_root_sem);
2042 int search_commit_root = path->search_commit_root;
2049 return -EINVAL;
2051 extent_item_pos = logical - found_key.objectid;
2086 ret = found ? 0 : -ENOENT;
2092 slot = path->slots[0];
2093 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2095 ret = -ENOMEM;
2106 btrfs_debug(fs_root->fs_info,
2109 fs_root->root_key.objectid);
2146 ret = found ? 0 : -ENOENT;
2151 slot = path->slots[0];
2152 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2154 ret = -ENOMEM;
2170 (unsigned long)&extref->name, eb, ctx);
2197 else if (ret != -ENOENT)
2201 if (ret == -ENOENT && found_refs)
2209 * returns <0 in case of an error
2217 int i = ipath->fspath->elem_cnt;
2221 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2222 ipath->fspath->bytes_left - s_ptr : 0;
2224 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2225 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2231 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2232 ++ipath->fspath->elem_cnt;
2233 ipath->fspath->bytes_left = fspath - fspath_min;
2235 ++ipath->fspath->elem_missed;
2236 ipath->fspath->bytes_missing += fspath_min - fspath;
2237 ipath->fspath->bytes_left = 0;
2245 * is has been created large enough. each path is zero-terminated and accessed
2246 * from ipath->fspath->val[i].
2247 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2248 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2249 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2250 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2255 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2267 return ERR_PTR(-ENOMEM);
2270 data->bytes_left = total_bytes - sizeof(*data);
2271 data->bytes_missing = 0;
2273 data->bytes_missing = sizeof(*data) - total_bytes;
2274 data->bytes_left = 0;
2277 data->elem_cnt = 0;
2278 data->elem_missed = 0;
2286 * information will be total_bytes - sizeof(struct inode_fs_paths).
2302 return ERR_PTR(-ENOMEM);
2305 ifp->btrfs_path = path;
2306 ifp->fspath = fspath;
2307 ifp->fs_root = fs_root;
2316 kvfree(ipath->fspath);
2329 ret->path = btrfs_alloc_path();
2330 if (!ret->path) {
2336 ret->path->search_commit_root = 1;
2337 ret->path->skip_locking = 1;
2338 ret->fs_info = fs_info;
2345 struct btrfs_fs_info *fs_info = iter->fs_info;
2346 struct btrfs_path *path = iter->path;
2353 key.offset = (u64)-1;
2354 iter->bytenr = bytenr;
2356 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
2360 ret = -EUCLEAN;
2363 if (path->slots[0] == 0) {
2365 ret = -EUCLEAN;
2368 path->slots[0]--;
2370 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2373 ret = -ENOENT;
2376 memcpy(&iter->cur_key, &key, sizeof(key));
2377 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2378 path->slots[0]);
2379 iter->end_ptr = (u32)(iter->item_ptr +
2380 btrfs_item_size_nr(path->nodes[0], path->slots[0]));
2381 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2387 * This is an extra precaution for non skinny-metadata, where
2391 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2392 ret = -ENOTSUPP;
2395 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2398 if (iter->cur_ptr >= iter->end_ptr) {
2399 ret = btrfs_next_item(fs_info->extent_root, path);
2403 ret = -ENOENT;
2409 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2410 path->slots[0]);
2411 if (iter->cur_key.objectid != bytenr ||
2412 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2413 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2414 ret = -ENOENT;
2417 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2418 path->slots[0]);
2419 iter->item_ptr = iter->cur_ptr;
2420 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
2421 path->nodes[0], path->slots[0]));
2434 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2443 struct btrfs_path *path = iter->path;
2450 ASSERT(iter->cur_ptr < iter->end_ptr);
2460 ((unsigned long)iter->cur_ptr);
2465 iter->cur_ptr += size;
2466 if (iter->cur_ptr < iter->end_ptr)
2473 ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
2477 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2478 if (iter->cur_key.objectid != iter->bytenr ||
2479 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2480 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2482 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2483 path->slots[0]);
2484 iter->cur_ptr = iter->item_ptr;
2485 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
2486 path->slots[0]);
2495 cache->rb_root = RB_ROOT;
2497 INIT_LIST_HEAD(&cache->pending[i]);
2498 INIT_LIST_HEAD(&cache->changed);
2499 INIT_LIST_HEAD(&cache->detached);
2500 INIT_LIST_HEAD(&cache->leaves);
2501 INIT_LIST_HEAD(&cache->pending_edge);
2502 INIT_LIST_HEAD(&cache->useless_node);
2503 cache->fs_info = fs_info;
2504 cache->is_reloc = is_reloc;
2517 INIT_LIST_HEAD(&node->list);
2518 INIT_LIST_HEAD(&node->upper);
2519 INIT_LIST_HEAD(&node->lower);
2520 RB_CLEAR_NODE(&node->rb_node);
2521 cache->nr_nodes++;
2522 node->level = level;
2523 node->bytenr = bytenr;
2535 cache->nr_edges++;
2555 BUG_ON(!node->lowest && !node->detached);
2556 while (!list_empty(&node->upper)) {
2557 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2558 list[LOWER]);
2559 upper = edge->node[UPPER];
2560 list_del(&edge->list[LOWER]);
2561 list_del(&edge->list[UPPER]);
2568 if (list_empty(&upper->lower)) {
2569 list_add_tail(&upper->lower, &cache->leaves);
2570 upper->lowest = 1;
2585 while (!list_empty(&cache->detached)) {
2586 node = list_entry(cache->detached.next,
2591 while (!list_empty(&cache->leaves)) {
2592 node = list_entry(cache->leaves.next,
2593 struct btrfs_backref_node, lower);
2597 cache->last_trans = 0;
2600 ASSERT(list_empty(&cache->pending[i]));
2601 ASSERT(list_empty(&cache->pending_edge));
2602 ASSERT(list_empty(&cache->useless_node));
2603 ASSERT(list_empty(&cache->changed));
2604 ASSERT(list_empty(&cache->detached));
2605 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2606 ASSERT(!cache->nr_nodes);
2607 ASSERT(!cache->nr_edges);
2630 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2633 if (ref_key->objectid == ref_key->offset) {
2636 cur->is_reloc_root = 1;
2638 if (cache->is_reloc) {
2639 root = find_reloc_root(cache->fs_info, cur->bytenr);
2641 return -ENOENT;
2642 cur->root = root;
2648 list_add(&cur->list, &cache->useless_node);
2655 return -ENOMEM;
2657 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2660 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2661 cur->level + 1);
2664 return -ENOMEM;
2671 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2675 ASSERT(upper->checked);
2676 INIT_LIST_HEAD(&edge->list[UPPER]);
2700 struct btrfs_fs_info *fs_info = cache->fs_info;
2702 struct btrfs_backref_node *lower; local
2711 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2714 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2715 cur->cowonly = 1;
2717 if (btrfs_root_level(&root->root_item) == cur->level) {
2719 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2727 * completely relying on direct backref (key->offset is parent
2730 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2732 list_add(&cur->list, &cache->useless_node);
2734 cur->root = root;
2739 level = cur->level + 1;
2742 path->search_commit_root = 1;
2743 path->skip_locking = 1;
2744 path->lowest_level = level;
2746 path->lowest_level = 0;
2751 if (ret > 0 && path->slots[level] > 0)
2752 path->slots[level]--;
2754 eb = path->nodes[level];
2755 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2758 cur->bytenr, level - 1, root->root_key.objectid,
2759 tree_key->objectid, tree_key->type, tree_key->offset);
2761 ret = -ENOENT;
2764 lower = cur;
2768 if (!path->nodes[level]) {
2769 ASSERT(btrfs_root_bytenr(&root->root_item) ==
2770 lower->bytenr);
2773 cache->is_reloc) {
2775 list_add(&lower->list, &cache->useless_node);
2777 lower->root = root;
2785 ret = -ENOMEM;
2789 eb = path->nodes[level];
2790 rb_node = rb_simple_search(&cache->rb_root, eb->start);
2792 upper = btrfs_backref_alloc_node(cache, eb->start,
2793 lower->level + 1);
2797 ret = -ENOMEM;
2800 upper->owner = btrfs_header_owner(eb);
2801 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2802 upper->cowonly = 1;
2809 upper->checked = 0;
2811 upper->checked = 1;
2818 if (!upper->checked && need_check) {
2820 list_add_tail(&edge->list[UPPER],
2821 &cache->pending_edge);
2823 if (upper->checked)
2825 INIT_LIST_HEAD(&edge->list[UPPER]);
2830 ASSERT(upper->checked);
2831 INIT_LIST_HEAD(&edge->list[UPPER]);
2832 if (!upper->owner)
2833 upper->owner = btrfs_header_owner(eb);
2835 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2841 lower = upper;
2853 * links aren't yet bi-directional. Needs to finish such links.
2866 struct btrfs_fs_info *fs_info = cache->fs_info;
2871 ret = btrfs_backref_iter_start(iter, cur->bytenr);
2884 ret = -EUCLEAN;
2888 WARN_ON(cur->checked);
2889 if (!list_empty(&cur->upper)) {
2894 ASSERT(list_is_singular(&cur->upper));
2895 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2896 list[LOWER]);
2897 ASSERT(list_empty(&edge->list[UPPER]));
2898 exist = edge->node[UPPER];
2903 if (!exist->checked)
2904 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2917 key.objectid = iter->bytenr;
2923 ((unsigned long)iter->cur_ptr);
2927 ret = -EUCLEAN;
2933 key.type = iter->cur_key.type;
2934 key.offset = iter->cur_key.offset;
2943 exist->owner == key.offset) ||
2945 exist->bytenr == key.offset))) {
2957 ret = -EINVAL;
2976 cur->checked = 1;
2989 struct list_head *useless_node = &cache->useless_node;
2994 ASSERT(start->checked);
2996 /* Insert this node to cache if it's not COW-only */
2997 if (!start->cowonly) {
2998 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
2999 &start->rb_node);
3001 btrfs_backref_panic(cache->fs_info, start->bytenr,
3002 -EEXIST);
3003 list_add_tail(&start->lower, &cache->leaves);
3011 list_for_each_entry(edge, &start->upper, list[LOWER])
3012 list_add_tail(&edge->list[UPPER], &pending_edge);
3016 struct btrfs_backref_node *lower; local
3020 list_del_init(&edge->list[UPPER]);
3021 upper = edge->node[UPPER];
3022 lower = edge->node[LOWER];
3025 if (upper->detached) {
3026 list_del(&edge->list[LOWER]);
3029 /* Lower node is orphan, queue for cleanup */
3030 if (list_empty(&lower->upper))
3031 list_add(&lower->list, useless_node);
3038 * So if we have upper->rb_node populated, this means a cache
3042 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3043 if (upper->lowest) {
3044 list_del_init(&upper->lower);
3045 upper->lowest = 0;
3048 list_add_tail(&edge->list[UPPER], &upper->lower);
3053 if (!upper->checked) {
3055 return -EUCLEAN;
3058 /* Sanity check, COW-only node has non-COW-only parent */
3059 if (start->cowonly != upper->cowonly) {
3061 return -EUCLEAN;
3064 /* Only cache non-COW-only (subvolume trees) tree blocks */
3065 if (!upper->cowonly) {
3066 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3067 &upper->rb_node);
3069 btrfs_backref_panic(cache->fs_info,
3070 upper->bytenr, -EEXIST);
3071 return -EUCLEAN;
3075 list_add_tail(&edge->list[UPPER], &upper->lower);
3081 list_for_each_entry(edge, &upper->upper, list[LOWER])
3082 list_add_tail(&edge->list[UPPER], &pending_edge);
3090 struct btrfs_backref_node *lower; local
3094 while (!list_empty(&cache->useless_node)) {
3095 lower = list_first_entry(&cache->useless_node,
3097 list_del_init(&lower->list);
3099 while (!list_empty(&cache->pending_edge)) {
3100 edge = list_first_entry(&cache->pending_edge,
3102 list_del(&edge->list[UPPER]);
3103 list_del(&edge->list[LOWER]);
3104 lower = edge->node[LOWER];
3105 upper = edge->node[UPPER];
3109 * Lower is no longer linked to any upper backref nodes and
3112 if (list_empty(&lower->upper) &&
3113 RB_EMPTY_NODE(&lower->rb_node))
3114 list_add(&lower->list, &cache->useless_node);
3116 if (!RB_EMPTY_NODE(&upper->rb_node))
3120 list_for_each_entry(edge, &upper->upper, list[LOWER])
3121 list_add_tail(&edge->list[UPPER],
3122 &cache->pending_edge);
3123 if (list_empty(&upper->upper))
3124 list_add(&upper->list, &cache->useless_node);
3127 while (!list_empty(&cache->useless_node)) {
3128 lower = list_first_entry(&cache->useless_node,
3130 list_del_init(&lower->list);
3131 if (lower == node)
3133 btrfs_backref_drop_node(cache, lower);
3137 ASSERT(list_empty(&cache->useless_node) &&
3138 list_empty(&cache->pending_edge));