Lines Matching +full:parent +full:- +full:locked
1 // SPDX-License-Identifier: GPL-2.0
8 #include "disk-io.h"
9 #include "print-tree.h"
14 #include "delalloc-space.h"
17 #include "file-item.h"
42 * This value is different for compressed/non-compressed extents, thus
52 if (defrag1->root > defrag2->root) in __compare_inode_defrag()
54 else if (defrag1->root < defrag2->root) in __compare_inode_defrag()
55 return -1; in __compare_inode_defrag()
56 else if (defrag1->ino > defrag2->ino) in __compare_inode_defrag()
58 else if (defrag1->ino < defrag2->ino) in __compare_inode_defrag()
59 return -1; in __compare_inode_defrag()
76 struct btrfs_fs_info *fs_info = inode->root->fs_info; in __btrfs_add_inode_defrag()
79 struct rb_node *parent = NULL; in __btrfs_add_inode_defrag() local
82 p = &fs_info->defrag_inodes.rb_node; in __btrfs_add_inode_defrag()
84 parent = *p; in __btrfs_add_inode_defrag()
85 entry = rb_entry(parent, struct inode_defrag, rb_node); in __btrfs_add_inode_defrag()
89 p = &parent->rb_left; in __btrfs_add_inode_defrag()
91 p = &parent->rb_right; in __btrfs_add_inode_defrag()
98 if (defrag->transid < entry->transid) in __btrfs_add_inode_defrag()
99 entry->transid = defrag->transid; in __btrfs_add_inode_defrag()
100 entry->extent_thresh = min(defrag->extent_thresh, in __btrfs_add_inode_defrag()
101 entry->extent_thresh); in __btrfs_add_inode_defrag()
102 return -EEXIST; in __btrfs_add_inode_defrag()
105 set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags); in __btrfs_add_inode_defrag()
106 rb_link_node(&defrag->rb_node, parent, p); in __btrfs_add_inode_defrag()
107 rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes); in __btrfs_add_inode_defrag()
128 struct btrfs_root *root = inode->root; in btrfs_add_inode_defrag()
129 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_add_inode_defrag()
137 if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) in btrfs_add_inode_defrag()
141 transid = trans->transid; in btrfs_add_inode_defrag()
143 transid = inode->root->last_trans; in btrfs_add_inode_defrag()
147 return -ENOMEM; in btrfs_add_inode_defrag()
149 defrag->ino = btrfs_ino(inode); in btrfs_add_inode_defrag()
150 defrag->transid = transid; in btrfs_add_inode_defrag()
151 defrag->root = root->root_key.objectid; in btrfs_add_inode_defrag()
152 defrag->extent_thresh = extent_thresh; in btrfs_add_inode_defrag()
154 spin_lock(&fs_info->defrag_inodes_lock); in btrfs_add_inode_defrag()
155 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) { in btrfs_add_inode_defrag()
158 * and then re-read this inode, this new inode doesn't have in btrfs_add_inode_defrag()
167 spin_unlock(&fs_info->defrag_inodes_lock); in btrfs_add_inode_defrag()
181 struct rb_node *parent = NULL; in btrfs_pick_defrag_inode() local
187 spin_lock(&fs_info->defrag_inodes_lock); in btrfs_pick_defrag_inode()
188 p = fs_info->defrag_inodes.rb_node; in btrfs_pick_defrag_inode()
190 parent = p; in btrfs_pick_defrag_inode()
191 entry = rb_entry(parent, struct inode_defrag, rb_node); in btrfs_pick_defrag_inode()
195 p = parent->rb_left; in btrfs_pick_defrag_inode()
197 p = parent->rb_right; in btrfs_pick_defrag_inode()
202 if (parent && __compare_inode_defrag(&tmp, entry) > 0) { in btrfs_pick_defrag_inode()
203 parent = rb_next(parent); in btrfs_pick_defrag_inode()
204 if (parent) in btrfs_pick_defrag_inode()
205 entry = rb_entry(parent, struct inode_defrag, rb_node); in btrfs_pick_defrag_inode()
211 rb_erase(parent, &fs_info->defrag_inodes); in btrfs_pick_defrag_inode()
212 spin_unlock(&fs_info->defrag_inodes_lock); in btrfs_pick_defrag_inode()
221 spin_lock(&fs_info->defrag_inodes_lock); in btrfs_cleanup_defrag_inodes()
222 node = rb_first(&fs_info->defrag_inodes); in btrfs_cleanup_defrag_inodes()
224 rb_erase(node, &fs_info->defrag_inodes); in btrfs_cleanup_defrag_inodes()
228 cond_resched_lock(&fs_info->defrag_inodes_lock); in btrfs_cleanup_defrag_inodes()
230 node = rb_first(&fs_info->defrag_inodes); in btrfs_cleanup_defrag_inodes()
232 spin_unlock(&fs_info->defrag_inodes_lock); in btrfs_cleanup_defrag_inodes()
247 if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)) in __btrfs_run_defrag_inode()
253 inode_root = btrfs_get_fs_root(fs_info, defrag->root, true); in __btrfs_run_defrag_inode()
259 inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root); in __btrfs_run_defrag_inode()
272 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); in __btrfs_run_defrag_inode()
274 range.len = (u64)-1; in __btrfs_run_defrag_inode()
276 range.extent_thresh = defrag->extent_thresh; in __btrfs_run_defrag_inode()
278 sb_start_write(fs_info->sb); in __btrfs_run_defrag_inode()
279 ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid, in __btrfs_run_defrag_inode()
281 sb_end_write(fs_info->sb); in __btrfs_run_defrag_inode()
287 cur = max(cur + fs_info->sectorsize, range.start); in __btrfs_run_defrag_inode()
304 atomic_inc(&fs_info->defrag_running); in btrfs_run_defrag_inodes()
307 if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)) in btrfs_run_defrag_inodes()
325 first_ino = defrag->ino + 1; in btrfs_run_defrag_inodes()
326 root_objectid = defrag->root; in btrfs_run_defrag_inodes()
330 atomic_dec(&fs_info->defrag_running); in btrfs_run_defrag_inodes()
336 wake_up(&fs_info->transaction_wait); in btrfs_run_defrag_inodes()
357 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) in btrfs_defrag_leaves()
362 ret = -ENOMEM; in btrfs_defrag_leaves()
366 level = btrfs_header_level(root->node); in btrfs_defrag_leaves()
371 if (root->defrag_progress.objectid == 0) { in btrfs_defrag_leaves()
377 root->defrag_max.objectid = 0; in btrfs_defrag_leaves()
379 btrfs_node_key_to_cpu(root_node, &root->defrag_max, in btrfs_defrag_leaves()
380 nritems - 1); in btrfs_defrag_leaves()
385 memcpy(&key, &root->defrag_progress, sizeof(key)); in btrfs_defrag_leaves()
388 path->keep_locks = 1; in btrfs_defrag_leaves()
400 * leafs from path->nodes[1], so set lowest_level to 1 to avoid later in btrfs_defrag_leaves()
401 * a deadlock (attempting to write lock an already write locked leaf). in btrfs_defrag_leaves()
403 path->lowest_level = 1; in btrfs_defrag_leaves()
410 if (!path->nodes[1]) { in btrfs_defrag_leaves()
415 * The node at level 1 must always be locked when our path has in btrfs_defrag_leaves()
417 * path->slots[1]. in btrfs_defrag_leaves()
419 ASSERT(path->locks[1] != 0); in btrfs_defrag_leaves()
421 path->nodes[1], 0, in btrfs_defrag_leaves()
423 &root->defrag_progress); in btrfs_defrag_leaves()
425 WARN_ON(ret == -EAGAIN); in btrfs_defrag_leaves()
431 * without COWing, this is because even with path->keep_locks = 1, in btrfs_defrag_leaves()
433 * node when path->slots[node_level - 1] does not point to the last in btrfs_defrag_leaves()
437 path->slots[1] = btrfs_header_nritems(path->nodes[1]); in btrfs_defrag_leaves()
441 memcpy(&root->defrag_progress, &key, sizeof(key)); in btrfs_defrag_leaves()
442 ret = -EAGAIN; in btrfs_defrag_leaves()
446 if (ret == -EAGAIN) { in btrfs_defrag_leaves()
447 if (root->defrag_max.objectid > root->defrag_progress.objectid) in btrfs_defrag_leaves()
449 if (root->defrag_max.type > root->defrag_progress.type) in btrfs_defrag_leaves()
451 if (root->defrag_max.offset > root->defrag_progress.offset) in btrfs_defrag_leaves()
456 if (ret != -EAGAIN) in btrfs_defrag_leaves()
457 memset(&root->defrag_progress, 0, in btrfs_defrag_leaves()
458 sizeof(root->defrag_progress)); in btrfs_defrag_leaves()
468 * - No extent_map will be added to inode->extent_tree
471 * - Extra optimization to skip file extents older than @newer_than
485 struct btrfs_root *root = inode->root; in defrag_get_extent()
495 ret = -ENOMEM; in defrag_get_extent()
522 path.slots[0] = btrfs_header_nritems(path.nodes[0]) - 1; in defrag_get_extent()
534 path.slots[0]--; in defrag_get_extent()
560 * | |<- File extent ->| in defrag_get_extent()
561 * \- start in defrag_get_extent()
566 em->start = start; in defrag_get_extent()
567 em->orig_start = start; in defrag_get_extent()
568 em->block_start = EXTENT_MAP_HOLE; in defrag_get_extent()
569 em->len = key.offset - start; in defrag_get_extent()
578 * |<- file extent ->| | in defrag_get_extent()
579 * \- start in defrag_get_extent()
611 u64 newer_than, bool locked) in defrag_lookup_extent() argument
613 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; in defrag_lookup_extent()
614 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in defrag_lookup_extent()
616 const u32 sectorsize = BTRFS_I(inode)->root->fs_info->sectorsize; in defrag_lookup_extent()
622 read_lock(&em_tree->lock); in defrag_lookup_extent()
624 read_unlock(&em_tree->lock); in defrag_lookup_extent()
627 * We can get a merged extent, in that case, we need to re-search in defrag_lookup_extent()
634 if (em && test_bit(EXTENT_FLAG_MERGED, &em->flags) && in defrag_lookup_extent()
635 newer_than && em->generation >= newer_than) { in defrag_lookup_extent()
642 u64 end = start + sectorsize - 1; in defrag_lookup_extent()
645 if (!locked) in defrag_lookup_extent()
648 if (!locked) in defrag_lookup_extent()
661 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) in get_extent_max_capacity()
663 return fs_info->max_extent_size; in get_extent_max_capacity()
667 u32 extent_thresh, u64 newer_than, bool locked) in defrag_check_next_extent() argument
669 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in defrag_check_next_extent()
674 if (em->start + em->len >= i_size_read(inode)) in defrag_check_next_extent()
683 next = defrag_lookup_extent(inode, em->start + em->len, newer_than, locked); in defrag_check_next_extent()
685 if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE) in defrag_check_next_extent()
687 if (test_bit(EXTENT_FLAG_PREALLOC, &next->flags)) in defrag_check_next_extent()
693 if (next->len >= get_extent_max_capacity(fs_info, em)) in defrag_check_next_extent()
696 if (next->generation < newer_than) in defrag_check_next_extent()
699 if (next->len >= extent_thresh) in defrag_check_next_extent()
713 * - Returned page is locked and has been set up properly.
714 * - No ordered extent exists in the page.
715 * - The page is uptodate.
722 struct address_space *mapping = inode->vfs_inode.i_mapping; in defrag_prepare_one_page()
725 u64 page_end = page_start + PAGE_SIZE - 1; in defrag_prepare_one_page()
733 return ERR_PTR(-ENOMEM); in defrag_prepare_one_page()
736 * Since we can defragment files opened read-only, we can encounter in defrag_prepare_one_page()
746 return ERR_PTR(-ETXTBSY); in defrag_prepare_one_page()
760 lock_extent(&inode->io_tree, page_start, page_end, &cached_state); in defrag_prepare_one_page()
762 unlock_extent(&inode->io_tree, page_start, page_end, in defrag_prepare_one_page()
775 if (page->mapping != mapping || !PagePrivate(page)) { in defrag_prepare_one_page()
789 if (page->mapping != mapping || !PagePrivate(page)) { in defrag_prepare_one_page()
797 return ERR_PTR(-EIO); in defrag_prepare_one_page()
820 * @locked: if the range has already held extent lock
826 bool locked, struct list_head *target_list, in defrag_collect_targets() argument
829 struct btrfs_fs_info *fs_info = inode->root->fs_info; in defrag_collect_targets()
841 em = defrag_lookup_extent(&inode->vfs_inode, cur, newer_than, locked); in defrag_collect_targets()
851 if (em->block_start == EXTENT_MAP_INLINE && in defrag_collect_targets()
852 em->len <= inode->root->fs_info->max_inline) in defrag_collect_targets()
856 if (em->block_start == EXTENT_MAP_HOLE || in defrag_collect_targets()
857 em->block_start == EXTENT_MAP_DELALLOC || in defrag_collect_targets()
858 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) in defrag_collect_targets()
862 if (em->generation < newer_than) in defrag_collect_targets()
866 if (em->generation == (u64)-1) in defrag_collect_targets()
873 range_len = em->len - (cur - em->start); in defrag_collect_targets()
883 * locked, and starting writeback, or finishing an ordered in defrag_collect_targets()
894 if (test_range_bit(&inode->io_tree, cur, cur + range_len - 1, in defrag_collect_targets()
906 if (em->len >= extent_thresh) in defrag_collect_targets()
913 if (em->len >= get_extent_max_capacity(fs_info, em)) in defrag_collect_targets()
922 if (em->block_start == EXTENT_MAP_INLINE) in defrag_collect_targets()
925 next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em, in defrag_collect_targets()
926 extent_thresh, newer_than, locked); in defrag_collect_targets()
933 last = list_entry(target_list->prev, in defrag_collect_targets()
936 if (last->start + last->len != cur) in defrag_collect_targets()
944 range_len = min(extent_map_end(em), start + len) - cur; in defrag_collect_targets()
952 last = list_entry(target_list->prev, in defrag_collect_targets()
954 ASSERT(last->start + last->len <= cur); in defrag_collect_targets()
955 if (last->start + last->len == cur) { in defrag_collect_targets()
957 last->len += range_len; in defrag_collect_targets()
967 ret = -ENOMEM; in defrag_collect_targets()
970 new->start = cur; in defrag_collect_targets()
971 new->len = range_len; in defrag_collect_targets()
972 list_add_tail(&new->list, target_list); in defrag_collect_targets()
983 list_del_init(&entry->list); in defrag_collect_targets()
1009 * @pages: locked pages covering the defrag range
1010 * @nr_pages: number of locked pages
1014 * - Pages are prepared
1015 * Pages should be locked, no ordered extent in the pages range,
1018 * - Extent bits are locked
1025 struct btrfs_fs_info *fs_info = inode->root->fs_info; in defrag_one_locked_target()
1027 const u64 start = target->start; in defrag_one_locked_target()
1028 const u64 len = target->len; in defrag_one_locked_target()
1029 unsigned long last_index = (start + len - 1) >> PAGE_SHIFT; in defrag_one_locked_target()
1035 ASSERT(last_index - first_index + 1 <= nr_pages); in defrag_one_locked_target()
1040 clear_extent_bit(&inode->io_tree, start, start + len - 1, in defrag_one_locked_target()
1043 set_extent_bit(&inode->io_tree, start, start + len - 1, in defrag_one_locked_target()
1047 for (i = start_index - first_index; i <= last_index - first_index; i++) { in defrag_one_locked_target()
1066 const u32 sectorsize = inode->root->fs_info->sectorsize; in defrag_one_range()
1067 u64 last_index = (start + len - 1) >> PAGE_SHIFT; in defrag_one_range()
1069 unsigned int nr_pages = last_index - start_index + 1; in defrag_one_range()
1078 return -ENOMEM; in defrag_one_range()
1093 lock_extent(&inode->io_tree, start_index << PAGE_SHIFT, in defrag_one_range()
1094 (last_index << PAGE_SHIFT) + PAGE_SIZE - 1, in defrag_one_range()
1097 * Now we have a consistent view about the extent map, re-check in defrag_one_range()
1100 * And this time we have extent locked already, pass @locked = true in defrag_one_range()
1117 list_del_init(&entry->list); in defrag_one_range()
1121 unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT, in defrag_one_range()
1122 (last_index << PAGE_SHIFT) + PAGE_SIZE - 1, in defrag_one_range()
1143 const u32 sectorsize = inode->root->fs_info->sectorsize; in defrag_one_cluster()
1156 u32 range_len = entry->len; in defrag_one_cluster()
1166 (max_sectors - *sectors_defragged) * sectorsize); in defrag_one_cluster()
1174 if (entry->start + range_len <= *last_scanned_ret) in defrag_one_cluster()
1178 page_cache_sync_readahead(inode->vfs_inode.i_mapping, in defrag_one_cluster()
1179 ra, NULL, entry->start >> PAGE_SHIFT, in defrag_one_cluster()
1180 ((entry->start + range_len - 1) >> PAGE_SHIFT) - in defrag_one_cluster()
1181 (entry->start >> PAGE_SHIFT) + 1); in defrag_one_cluster()
1184 * we locked the pages. in defrag_one_cluster()
1188 ret = defrag_one_range(inode, entry->start, range_len, in defrag_one_cluster()
1194 inode->root->fs_info->sectorsize_bits; in defrag_one_cluster()
1198 list_del_init(&entry->list); in defrag_one_cluster()
1217 * Return >=0 for the number of sectors defragged, and range->start will be updated
1226 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_defrag_file()
1231 bool do_compress = (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS); in btrfs_defrag_file()
1235 u32 extent_thresh = range->extent_thresh; in btrfs_defrag_file()
1241 if (range->start >= isize) in btrfs_defrag_file()
1242 return -EINVAL; in btrfs_defrag_file()
1245 if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES) in btrfs_defrag_file()
1246 return -EINVAL; in btrfs_defrag_file()
1247 if (range->compress_type) in btrfs_defrag_file()
1248 compress_type = range->compress_type; in btrfs_defrag_file()
1254 if (range->start + range->len > range->start) { in btrfs_defrag_file()
1256 last_byte = min(isize, range->start + range->len); in btrfs_defrag_file()
1263 cur = round_down(range->start, fs_info->sectorsize); in btrfs_defrag_file()
1264 last_byte = round_up(last_byte, fs_info->sectorsize) - 1; in btrfs_defrag_file()
1275 file_ra_state_init(ra, inode->i_mapping); in btrfs_defrag_file()
1283 if (start_index < inode->i_mapping->writeback_index) in btrfs_defrag_file()
1284 inode->i_mapping->writeback_index = start_index; in btrfs_defrag_file()
1292 ret = -EAGAIN; in btrfs_defrag_file()
1298 (SZ_256K >> PAGE_SHIFT)) << PAGE_SHIFT) - 1; in btrfs_defrag_file()
1303 ret = -ETXTBSY; in btrfs_defrag_file()
1307 if (!(inode->i_sb->s_flags & SB_ACTIVE)) { in btrfs_defrag_file()
1312 BTRFS_I(inode)->defrag_compress = compress_type; in btrfs_defrag_file()
1314 cluster_end + 1 - cur, extent_thresh, in btrfs_defrag_file()
1319 balance_dirty_pages_ratelimited(inode->i_mapping); in btrfs_defrag_file()
1338 range->start = cur; in btrfs_defrag_file()
1344 if (range->flags & BTRFS_DEFRAG_RANGE_START_IO) { in btrfs_defrag_file()
1345 filemap_flush(inode->i_mapping); in btrfs_defrag_file()
1347 &BTRFS_I(inode)->runtime_flags)) in btrfs_defrag_file()
1348 filemap_flush(inode->i_mapping); in btrfs_defrag_file()
1350 if (range->compress_type == BTRFS_COMPRESS_LZO) in btrfs_defrag_file()
1352 else if (range->compress_type == BTRFS_COMPRESS_ZSTD) in btrfs_defrag_file()
1358 BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE; in btrfs_defrag_file()
1376 return -ENOMEM; in btrfs_auto_defrag_init()