Lines Matching +full:cluster +full:- +full:index
1 // SPDX-License-Identifier: GPL-2.0
12 #include <linux/error-injection.h>
18 #include "free-space-cache.h"
20 #include "disk-io.h"
23 #include "space-info.h"
24 #include "delalloc-space.h"
25 #include "block-group.h"
28 #include "inode-item.h"
30 #include "file-item.h"
65 while ((node = rb_last(&ctl->free_space_offset)) != NULL) { in __btrfs_remove_free_space_cache()
67 if (!info->bitmap) { in __btrfs_remove_free_space_cache()
74 cond_resched_lock(&ctl->tree_lock); in __btrfs_remove_free_space_cache()
82 struct btrfs_fs_info *fs_info = root->fs_info; in __lookup_free_space_inode()
101 return ERR_PTR(-ENOENT); in __lookup_free_space_inode()
104 leaf = path->nodes[0]; in __lookup_free_space_inode()
105 header = btrfs_item_ptr(leaf, path->slots[0], in __lookup_free_space_inode()
116 inode = btrfs_iget_path(fs_info->sb, location.objectid, root, path); in __lookup_free_space_inode()
122 mapping_set_gfp_mask(inode->i_mapping, in __lookup_free_space_inode()
123 mapping_gfp_constraint(inode->i_mapping, in __lookup_free_space_inode()
132 struct btrfs_fs_info *fs_info = block_group->fs_info; in lookup_free_space_inode()
136 spin_lock(&block_group->lock); in lookup_free_space_inode()
137 if (block_group->inode) in lookup_free_space_inode()
138 inode = igrab(block_group->inode); in lookup_free_space_inode()
139 spin_unlock(&block_group->lock); in lookup_free_space_inode()
143 inode = __lookup_free_space_inode(fs_info->tree_root, path, in lookup_free_space_inode()
144 block_group->start); in lookup_free_space_inode()
148 spin_lock(&block_group->lock); in lookup_free_space_inode()
149 if (!((BTRFS_I(inode)->flags & flags) == flags)) { in lookup_free_space_inode()
151 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM | in lookup_free_space_inode()
153 block_group->disk_cache_state = BTRFS_DC_CLEAR; in lookup_free_space_inode()
156 if (!test_and_set_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) in lookup_free_space_inode()
157 block_group->inode = igrab(inode); in lookup_free_space_inode()
158 spin_unlock(&block_group->lock); in lookup_free_space_inode()
182 leaf = path->nodes[0]; in __create_free_space_inode()
183 inode_item = btrfs_item_ptr(leaf, path->slots[0], in __create_free_space_inode()
185 btrfs_item_key(leaf, &disk_key, path->slots[0]); in __create_free_space_inode()
188 btrfs_set_inode_generation(leaf, inode_item, trans->transid); in __create_free_space_inode()
196 btrfs_set_inode_transid(leaf, inode_item, trans->transid); in __create_free_space_inode()
211 leaf = path->nodes[0]; in __create_free_space_inode()
212 header = btrfs_item_ptr(leaf, path->slots[0], in __create_free_space_inode()
229 ret = btrfs_get_free_objectid(trans->fs_info->tree_root, &ino); in create_free_space_inode()
233 return __create_free_space_inode(trans->fs_info->tree_root, trans, path, in create_free_space_inode()
234 ino, block_group->start); in create_free_space_inode()
252 return -ENOMEM; in btrfs_remove_free_space_inode()
257 if (PTR_ERR(inode) != -ENOENT) in btrfs_remove_free_space_inode()
268 spin_lock(&block_group->lock); in btrfs_remove_free_space_inode()
269 if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) { in btrfs_remove_free_space_inode()
270 block_group->inode = NULL; in btrfs_remove_free_space_inode()
271 spin_unlock(&block_group->lock); in btrfs_remove_free_space_inode()
274 spin_unlock(&block_group->lock); in btrfs_remove_free_space_inode()
281 key.offset = block_group->start; in btrfs_remove_free_space_inode()
282 ret = btrfs_search_slot(trans, trans->fs_info->tree_root, &key, path, in btrfs_remove_free_space_inode()
283 -1, 1); in btrfs_remove_free_space_inode()
289 ret = btrfs_del_item(trans, trans->fs_info->tree_root, path); in btrfs_remove_free_space_inode()
307 struct btrfs_root *root = inode->root; in btrfs_truncate_free_space_cache()
316 ret = -ENOMEM; in btrfs_truncate_free_space_cache()
320 mutex_lock(&trans->transaction->cache_write_mutex); in btrfs_truncate_free_space_cache()
321 if (!list_empty(&block_group->io_list)) { in btrfs_truncate_free_space_cache()
322 list_del_init(&block_group->io_list); in btrfs_truncate_free_space_cache()
332 spin_lock(&block_group->lock); in btrfs_truncate_free_space_cache()
333 block_group->disk_cache_state = BTRFS_DC_CLEAR; in btrfs_truncate_free_space_cache()
334 spin_unlock(&block_group->lock); in btrfs_truncate_free_space_cache()
341 lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state); in btrfs_truncate_free_space_cache()
342 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false); in btrfs_truncate_free_space_cache()
346 * need to check for -EAGAIN. in btrfs_truncate_free_space_cache()
350 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); in btrfs_truncate_free_space_cache()
353 unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state); in btrfs_truncate_free_space_cache()
361 mutex_unlock(&trans->transaction->cache_write_mutex); in btrfs_truncate_free_space_cache()
373 file_ra_state_init(&ra, inode->i_mapping); in readahead_cache()
374 last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; in readahead_cache()
376 page_cache_sync_readahead(inode->i_mapping, &ra, NULL, 0, last_index); in readahead_cache()
388 return -ENOSPC; in io_ctl_init()
392 io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS); in io_ctl_init()
393 if (!io_ctl->pages) in io_ctl_init()
394 return -ENOMEM; in io_ctl_init()
396 io_ctl->num_pages = num_pages; in io_ctl_init()
397 io_ctl->fs_info = btrfs_sb(inode->i_sb); in io_ctl_init()
398 io_ctl->inode = inode; in io_ctl_init()
406 kfree(io_ctl->pages); in io_ctl_free()
407 io_ctl->pages = NULL; in io_ctl_free()
412 if (io_ctl->cur) { in io_ctl_unmap_page()
413 io_ctl->cur = NULL; in io_ctl_unmap_page()
414 io_ctl->orig = NULL; in io_ctl_unmap_page()
420 ASSERT(io_ctl->index < io_ctl->num_pages); in io_ctl_map_page()
421 io_ctl->page = io_ctl->pages[io_ctl->index++]; in io_ctl_map_page()
422 io_ctl->cur = page_address(io_ctl->page); in io_ctl_map_page()
423 io_ctl->orig = io_ctl->cur; in io_ctl_map_page()
424 io_ctl->size = PAGE_SIZE; in io_ctl_map_page()
426 clear_page(io_ctl->cur); in io_ctl_map_page()
435 for (i = 0; i < io_ctl->num_pages; i++) { in io_ctl_drop_pages()
436 if (io_ctl->pages[i]) { in io_ctl_drop_pages()
437 btrfs_page_clear_checked(io_ctl->fs_info, in io_ctl_drop_pages()
438 io_ctl->pages[i], in io_ctl_drop_pages()
439 page_offset(io_ctl->pages[i]), in io_ctl_drop_pages()
441 unlock_page(io_ctl->pages[i]); in io_ctl_drop_pages()
442 put_page(io_ctl->pages[i]); in io_ctl_drop_pages()
450 struct inode *inode = io_ctl->inode; in io_ctl_prepare_pages()
451 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); in io_ctl_prepare_pages()
454 for (i = 0; i < io_ctl->num_pages; i++) { in io_ctl_prepare_pages()
457 page = find_or_create_page(inode->i_mapping, i, mask); in io_ctl_prepare_pages()
460 return -ENOMEM; in io_ctl_prepare_pages()
471 io_ctl->pages[i] = page; in io_ctl_prepare_pages()
475 if (page->mapping != inode->i_mapping) { in io_ctl_prepare_pages()
476 btrfs_err(BTRFS_I(inode)->root->fs_info, in io_ctl_prepare_pages()
479 return -EIO; in io_ctl_prepare_pages()
482 btrfs_err(BTRFS_I(inode)->root->fs_info, in io_ctl_prepare_pages()
485 return -EIO; in io_ctl_prepare_pages()
490 for (i = 0; i < io_ctl->num_pages; i++) in io_ctl_prepare_pages()
491 clear_page_dirty_for_io(io_ctl->pages[i]); in io_ctl_prepare_pages()
504 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages); in io_ctl_set_generation()
505 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); in io_ctl_set_generation()
507 put_unaligned_le64(generation, io_ctl->cur); in io_ctl_set_generation()
508 io_ctl->cur += sizeof(u64); in io_ctl_set_generation()
519 io_ctl->cur += sizeof(u32) * io_ctl->num_pages; in io_ctl_check_generation()
520 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); in io_ctl_check_generation()
522 cache_gen = get_unaligned_le64(io_ctl->cur); in io_ctl_check_generation()
524 btrfs_err_rl(io_ctl->fs_info, in io_ctl_check_generation()
528 return -EIO; in io_ctl_check_generation()
530 io_ctl->cur += sizeof(u64); in io_ctl_check_generation()
534 static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index) in io_ctl_set_crc() argument
540 if (index == 0) in io_ctl_set_crc()
541 offset = sizeof(u32) * io_ctl->num_pages; in io_ctl_set_crc()
543 crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset); in io_ctl_set_crc()
546 tmp = page_address(io_ctl->pages[0]); in io_ctl_set_crc()
547 tmp += index; in io_ctl_set_crc()
551 static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index) in io_ctl_check_crc() argument
557 if (index == 0) in io_ctl_check_crc()
558 offset = sizeof(u32) * io_ctl->num_pages; in io_ctl_check_crc()
560 tmp = page_address(io_ctl->pages[0]); in io_ctl_check_crc()
561 tmp += index; in io_ctl_check_crc()
565 crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset); in io_ctl_check_crc()
568 btrfs_err_rl(io_ctl->fs_info, in io_ctl_check_crc()
571 return -EIO; in io_ctl_check_crc()
582 if (!io_ctl->cur) in io_ctl_add_entry()
583 return -ENOSPC; in io_ctl_add_entry()
585 entry = io_ctl->cur; in io_ctl_add_entry()
586 put_unaligned_le64(offset, &entry->offset); in io_ctl_add_entry()
587 put_unaligned_le64(bytes, &entry->bytes); in io_ctl_add_entry()
588 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP : in io_ctl_add_entry()
590 io_ctl->cur += sizeof(struct btrfs_free_space_entry); in io_ctl_add_entry()
591 io_ctl->size -= sizeof(struct btrfs_free_space_entry); in io_ctl_add_entry()
593 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) in io_ctl_add_entry()
596 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_add_entry()
599 if (io_ctl->index >= io_ctl->num_pages) in io_ctl_add_entry()
609 if (!io_ctl->cur) in io_ctl_add_bitmap()
610 return -ENOSPC; in io_ctl_add_bitmap()
616 if (io_ctl->cur != io_ctl->orig) { in io_ctl_add_bitmap()
617 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_add_bitmap()
618 if (io_ctl->index >= io_ctl->num_pages) in io_ctl_add_bitmap()
619 return -ENOSPC; in io_ctl_add_bitmap()
623 copy_page(io_ctl->cur, bitmap); in io_ctl_add_bitmap()
624 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_add_bitmap()
625 if (io_ctl->index < io_ctl->num_pages) in io_ctl_add_bitmap()
636 if (io_ctl->cur != io_ctl->orig) in io_ctl_zero_remaining_pages()
637 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_zero_remaining_pages()
641 while (io_ctl->index < io_ctl->num_pages) { in io_ctl_zero_remaining_pages()
643 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_zero_remaining_pages()
653 if (!io_ctl->cur) { in io_ctl_read_entry()
654 ret = io_ctl_check_crc(io_ctl, io_ctl->index); in io_ctl_read_entry()
659 e = io_ctl->cur; in io_ctl_read_entry()
660 entry->offset = get_unaligned_le64(&e->offset); in io_ctl_read_entry()
661 entry->bytes = get_unaligned_le64(&e->bytes); in io_ctl_read_entry()
662 *type = e->type; in io_ctl_read_entry()
663 io_ctl->cur += sizeof(struct btrfs_free_space_entry); in io_ctl_read_entry()
664 io_ctl->size -= sizeof(struct btrfs_free_space_entry); in io_ctl_read_entry()
666 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) in io_ctl_read_entry()
679 ret = io_ctl_check_crc(io_ctl, io_ctl->index); in io_ctl_read_bitmap()
683 copy_page(entry->bitmap, io_ctl->cur); in io_ctl_read_bitmap()
691 struct btrfs_block_group *block_group = ctl->block_group; in recalculate_thresholds()
695 u64 size = block_group->length; in recalculate_thresholds()
696 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; in recalculate_thresholds()
697 u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); in recalculate_thresholds()
701 if (ctl->total_bitmaps > max_bitmaps) in recalculate_thresholds()
702 btrfs_err(block_group->fs_info, in recalculate_thresholds()
704 block_group->start, block_group->length, in recalculate_thresholds()
705 ctl->total_bitmaps, ctl->unit, max_bitmaps, in recalculate_thresholds()
707 ASSERT(ctl->total_bitmaps <= max_bitmaps); in recalculate_thresholds()
720 bitmap_bytes = ctl->total_bitmaps * ctl->unit; in recalculate_thresholds()
726 extent_bytes = max_bytes - bitmap_bytes; in recalculate_thresholds()
729 ctl->extents_thresh = in recalculate_thresholds()
737 struct btrfs_fs_info *fs_info = root->fs_info; in __load_free_space_cache()
766 ret = -1; in __load_free_space_cache()
768 leaf = path->nodes[0]; in __load_free_space_cache()
769 header = btrfs_item_ptr(leaf, path->slots[0], in __load_free_space_cache()
776 if (!BTRFS_I(inode)->generation) { in __load_free_space_cache()
783 if (BTRFS_I(inode)->generation != generation) { in __load_free_space_cache()
786 BTRFS_I(inode)->generation, generation); in __load_free_space_cache()
815 ret = -ENOMEM; in __load_free_space_cache()
825 if (!e->bytes) { in __load_free_space_cache()
826 ret = -1; in __load_free_space_cache()
832 spin_lock(&ctl->tree_lock); in __load_free_space_cache()
834 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
843 num_bitmaps--; in __load_free_space_cache()
844 e->bitmap = kmem_cache_zalloc( in __load_free_space_cache()
846 if (!e->bitmap) { in __load_free_space_cache()
847 ret = -ENOMEM; in __load_free_space_cache()
852 spin_lock(&ctl->tree_lock); in __load_free_space_cache()
855 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
858 kmem_cache_free(btrfs_free_space_bitmap_cachep, e->bitmap); in __load_free_space_cache()
862 ctl->total_bitmaps++; in __load_free_space_cache()
864 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
865 list_add_tail(&e->list, &bitmaps); in __load_free_space_cache()
868 num_entries--; in __load_free_space_cache()
878 list_del_init(&e->list); in __load_free_space_cache()
892 spin_lock(&ctl->tree_lock); in __load_free_space_cache()
894 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
905 while (!ret && (n = rb_first(&ctl->free_space_offset)) != NULL) { in copy_free_space_cache()
907 if (!info->bitmap) { in copy_free_space_cache()
908 const u64 offset = info->offset; in copy_free_space_cache()
909 const u64 bytes = info->bytes; in copy_free_space_cache()
912 spin_unlock(&ctl->tree_lock); in copy_free_space_cache()
915 spin_lock(&ctl->tree_lock); in copy_free_space_cache()
917 u64 offset = info->offset; in copy_free_space_cache()
918 u64 bytes = ctl->unit; in copy_free_space_cache()
923 spin_unlock(&ctl->tree_lock); in copy_free_space_cache()
926 spin_lock(&ctl->tree_lock); in copy_free_space_cache()
932 cond_resched_lock(&ctl->tree_lock); in copy_free_space_cache()
941 struct btrfs_fs_info *fs_info = block_group->fs_info; in load_free_space_cache()
942 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in load_free_space_cache()
948 u64 used = block_group->used; in load_free_space_cache()
961 spin_lock(&block_group->lock); in load_free_space_cache()
962 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { in load_free_space_cache()
963 spin_unlock(&block_group->lock); in load_free_space_cache()
966 spin_unlock(&block_group->lock); in load_free_space_cache()
971 path->search_commit_root = 1; in load_free_space_cache()
972 path->skip_locking = 1; in load_free_space_cache()
979 * for a free extent, at extent-tree.c:find_free_extent(), we can find in load_free_space_cache()
990 * once created get their ->cached field set to BTRFS_CACHE_FINISHED so in load_free_space_cache()
1000 spin_lock(&block_group->lock); in load_free_space_cache()
1001 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { in load_free_space_cache()
1002 spin_unlock(&block_group->lock); in load_free_space_cache()
1006 spin_unlock(&block_group->lock); in load_free_space_cache()
1009 * Reinitialize the class of struct inode's mapping->invalidate_lock for in load_free_space_cache()
1013 lockdep_set_class(&(&inode->i_data)->invalidate_lock, in load_free_space_cache()
1016 ret = __load_free_space_cache(fs_info->tree_root, inode, &tmp_ctl, in load_free_space_cache()
1017 path, block_group->start); in load_free_space_cache()
1022 matched = (tmp_ctl.free_space == (block_group->length - used - in load_free_space_cache()
1023 block_group->bytes_super)); in load_free_space_cache()
1031 * so we need to re-set it here. in load_free_space_cache()
1045 block_group->start); in load_free_space_cache()
1046 ret = -1; in load_free_space_cache()
1051 spin_lock(&block_group->lock); in load_free_space_cache()
1052 block_group->disk_cache_state = BTRFS_DC_CLEAR; in load_free_space_cache()
1053 spin_unlock(&block_group->lock); in load_free_space_cache()
1058 block_group->start); in load_free_space_cache()
1061 spin_lock(&ctl->tree_lock); in load_free_space_cache()
1063 spin_unlock(&ctl->tree_lock); in load_free_space_cache()
1076 struct btrfs_free_cluster *cluster = NULL; in write_cache_extent_entries() local
1078 struct rb_node *node = rb_first(&ctl->free_space_offset); in write_cache_extent_entries()
1081 /* Get the cluster for this block_group if it exists */ in write_cache_extent_entries()
1082 if (block_group && !list_empty(&block_group->cluster_list)) { in write_cache_extent_entries()
1083 cluster = list_entry(block_group->cluster_list.next, in write_cache_extent_entries()
1088 if (!node && cluster) { in write_cache_extent_entries()
1089 cluster_locked = cluster; in write_cache_extent_entries()
1090 spin_lock(&cluster_locked->lock); in write_cache_extent_entries()
1091 node = rb_first(&cluster->root); in write_cache_extent_entries()
1092 cluster = NULL; in write_cache_extent_entries()
1102 ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes, in write_cache_extent_entries()
1103 e->bitmap); in write_cache_extent_entries()
1107 if (e->bitmap) { in write_cache_extent_entries()
1108 list_add_tail(&e->list, bitmap_list); in write_cache_extent_entries()
1112 if (!node && cluster) { in write_cache_extent_entries()
1113 node = rb_first(&cluster->root); in write_cache_extent_entries()
1114 cluster_locked = cluster; in write_cache_extent_entries()
1115 spin_lock(&cluster_locked->lock); in write_cache_extent_entries()
1116 cluster = NULL; in write_cache_extent_entries()
1120 spin_unlock(&cluster_locked->lock); in write_cache_extent_entries()
1130 list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) { in write_cache_extent_entries()
1131 ret = io_ctl_add_entry(io_ctl, trim_entry->start, in write_cache_extent_entries()
1132 trim_entry->bytes, NULL); in write_cache_extent_entries()
1141 spin_unlock(&cluster_locked->lock); in write_cache_extent_entries()
1142 return -ENOSPC; in write_cache_extent_entries()
1163 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, in update_cache_item()
1167 leaf = path->nodes[0]; in update_cache_item()
1170 ASSERT(path->slots[0]); in update_cache_item()
1171 path->slots[0]--; in update_cache_item()
1172 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in update_cache_item()
1175 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, in update_cache_item()
1176 inode->i_size - 1, EXTENT_DELALLOC, in update_cache_item()
1183 BTRFS_I(inode)->generation = trans->transid; in update_cache_item()
1184 header = btrfs_item_ptr(leaf, path->slots[0], in update_cache_item()
1188 btrfs_set_free_space_generation(leaf, header, trans->transid); in update_cache_item()
1195 return -1; in update_cache_item()
1218 unpin = &trans->transaction->pinned_extents; in write_pinned_extent_entries()
1220 start = block_group->start; in write_pinned_extent_entries()
1222 while (start < block_group->start + block_group->length) { in write_pinned_extent_entries()
1229 if (extent_start >= block_group->start + block_group->length) in write_pinned_extent_entries()
1233 extent_end = min(block_group->start + block_group->length, in write_pinned_extent_entries()
1235 len = extent_end - extent_start; in write_pinned_extent_entries()
1240 return -ENOSPC; in write_pinned_extent_entries()
1256 ret = io_ctl_add_bitmap(io_ctl, entry->bitmap); in write_bitmap_entries()
1258 return -ENOSPC; in write_bitmap_entries()
1259 list_del_init(&entry->list); in write_bitmap_entries()
1269 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); in flush_dirty_cache()
1271 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, in flush_dirty_cache()
1283 list_del_init(&entry->list); in cleanup_bitmap_list()
1292 unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, in cleanup_write_cache_enospc()
1303 struct inode *inode = io_ctl->inode; in __btrfs_wait_cache_io()
1315 io_ctl->entries, io_ctl->bitmaps); in __btrfs_wait_cache_io()
1318 invalidate_inode_pages2(inode->i_mapping); in __btrfs_wait_cache_io()
1319 BTRFS_I(inode)->generation = 0; in __btrfs_wait_cache_io()
1321 btrfs_debug(root->fs_info, in __btrfs_wait_cache_io()
1323 block_group->start, ret); in __btrfs_wait_cache_io()
1329 spin_lock(&trans->transaction->dirty_bgs_lock); in __btrfs_wait_cache_io()
1332 spin_lock(&block_group->lock); in __btrfs_wait_cache_io()
1339 if (!ret && list_empty(&block_group->dirty_list)) in __btrfs_wait_cache_io()
1340 block_group->disk_cache_state = BTRFS_DC_WRITTEN; in __btrfs_wait_cache_io()
1342 block_group->disk_cache_state = BTRFS_DC_ERROR; in __btrfs_wait_cache_io()
1344 spin_unlock(&block_group->lock); in __btrfs_wait_cache_io()
1345 spin_unlock(&trans->transaction->dirty_bgs_lock); in __btrfs_wait_cache_io()
1346 io_ctl->inode = NULL; in __btrfs_wait_cache_io()
1358 return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans, in btrfs_wait_cache_io()
1359 block_group, &block_group->io_ctl, in btrfs_wait_cache_io()
1360 path, block_group->start); in btrfs_wait_cache_io()
1391 return -EIO; in __btrfs_write_out_cache()
1393 WARN_ON(io_ctl->pages); in __btrfs_write_out_cache()
1398 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) { in __btrfs_write_out_cache()
1399 down_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1400 spin_lock(&block_group->lock); in __btrfs_write_out_cache()
1401 if (block_group->delalloc_bytes) { in __btrfs_write_out_cache()
1402 block_group->disk_cache_state = BTRFS_DC_WRITTEN; in __btrfs_write_out_cache()
1403 spin_unlock(&block_group->lock); in __btrfs_write_out_cache()
1404 up_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1405 BTRFS_I(inode)->generation = 0; in __btrfs_write_out_cache()
1410 spin_unlock(&block_group->lock); in __btrfs_write_out_cache()
1418 lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, in __btrfs_write_out_cache()
1421 io_ctl_set_generation(io_ctl, trans->transid); in __btrfs_write_out_cache()
1423 mutex_lock(&ctl->cache_writeout_mutex); in __btrfs_write_out_cache()
1425 spin_lock(&ctl->tree_lock); in __btrfs_write_out_cache()
1450 spin_unlock(&ctl->tree_lock); in __btrfs_write_out_cache()
1451 mutex_unlock(&ctl->cache_writeout_mutex); in __btrfs_write_out_cache()
1459 ret = btrfs_dirty_pages(BTRFS_I(inode), io_ctl->pages, in __btrfs_write_out_cache()
1460 io_ctl->num_pages, 0, i_size_read(inode), in __btrfs_write_out_cache()
1465 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) in __btrfs_write_out_cache()
1466 up_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1474 unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, in __btrfs_write_out_cache()
1482 io_ctl->entries = entries; in __btrfs_write_out_cache()
1483 io_ctl->bitmaps = bitmaps; in __btrfs_write_out_cache()
1485 ret = btrfs_fdatawrite_range(inode, 0, (u64)-1); in __btrfs_write_out_cache()
1493 spin_unlock(&ctl->tree_lock); in __btrfs_write_out_cache()
1494 mutex_unlock(&ctl->cache_writeout_mutex); in __btrfs_write_out_cache()
1500 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) in __btrfs_write_out_cache()
1501 up_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1504 io_ctl->inode = NULL; in __btrfs_write_out_cache()
1507 invalidate_inode_pages2(inode->i_mapping); in __btrfs_write_out_cache()
1508 BTRFS_I(inode)->generation = 0; in __btrfs_write_out_cache()
1520 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_write_out_cache()
1521 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_write_out_cache()
1525 spin_lock(&block_group->lock); in btrfs_write_out_cache()
1526 if (block_group->disk_cache_state < BTRFS_DC_SETUP) { in btrfs_write_out_cache()
1527 spin_unlock(&block_group->lock); in btrfs_write_out_cache()
1530 spin_unlock(&block_group->lock); in btrfs_write_out_cache()
1536 ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl, in btrfs_write_out_cache()
1537 block_group, &block_group->io_ctl, trans); in btrfs_write_out_cache()
1541 block_group->start, ret); in btrfs_write_out_cache()
1542 spin_lock(&block_group->lock); in btrfs_write_out_cache()
1543 block_group->disk_cache_state = BTRFS_DC_ERROR; in btrfs_write_out_cache()
1544 spin_unlock(&block_group->lock); in btrfs_write_out_cache()
1546 block_group->io_ctl.inode = NULL; in btrfs_write_out_cache()
1562 offset -= bitmap_start; in offset_to_bit()
1577 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; in offset_to_bitmap()
1578 bitmap_start = offset - ctl->start; in offset_to_bitmap()
1581 bitmap_start += ctl->start; in offset_to_bitmap()
1587 struct btrfs_free_cluster *cluster, in tree_insert_offset() argument
1594 lockdep_assert_held(&ctl->tree_lock); in tree_insert_offset()
1596 if (cluster) { in tree_insert_offset()
1597 lockdep_assert_held(&cluster->lock); in tree_insert_offset()
1598 root = &cluster->root; in tree_insert_offset()
1600 root = &ctl->free_space_offset; in tree_insert_offset()
1603 p = &root->rb_node; in tree_insert_offset()
1611 if (new_entry->offset < info->offset) { in tree_insert_offset()
1612 p = &(*p)->rb_left; in tree_insert_offset()
1613 } else if (new_entry->offset > info->offset) { in tree_insert_offset()
1614 p = &(*p)->rb_right; in tree_insert_offset()
1629 if (new_entry->bitmap) { in tree_insert_offset()
1630 if (info->bitmap) { in tree_insert_offset()
1632 return -EEXIST; in tree_insert_offset()
1634 p = &(*p)->rb_right; in tree_insert_offset()
1636 if (!info->bitmap) { in tree_insert_offset()
1638 return -EEXIST; in tree_insert_offset()
1640 p = &(*p)->rb_left; in tree_insert_offset()
1645 rb_link_node(&new_entry->offset_index, parent, p); in tree_insert_offset()
1646 rb_insert_color(&new_entry->offset_index, root); in tree_insert_offset()
1652 * This is a little subtle. We *only* have ->max_extent_size set if we actually
1653 * searched through the bitmap and figured out the largest ->max_extent_size,
1656 * we've found already if it's larger, or we want to use ->bytes.
1658 * This matters because find_free_space() will skip entries who's ->bytes is
1660 * may pick some previous entry that has a smaller ->max_extent_size than we
1662 * ->max_extent_size set to 4K and ->bytes set to 1M. A second entry hasn't set
1663 * ->max_extent_size yet, has ->bytes set to 8K and it's contiguous. We will
1665 * that first bitmap entry had ->max_extent_size set, but the second one did
1670 * don't have ->max_extent_size set. We'll return 16K, and the next time the
1677 if (entry->bitmap && entry->max_extent_size) in get_max_extent_size()
1678 return entry->max_extent_size; in get_max_extent_size()
1679 return entry->bytes; in get_max_extent_size()
1698 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1706 struct rb_node *n = ctl->free_space_offset.rb_node; in tree_search_offset()
1709 lockdep_assert_held(&ctl->tree_lock); in tree_search_offset()
1716 if (offset < entry->offset) in tree_search_offset()
1717 n = n->rb_left; in tree_search_offset()
1718 else if (offset > entry->offset) in tree_search_offset()
1719 n = n->rb_right; in tree_search_offset()
1729 if (entry->bitmap) in tree_search_offset()
1740 if (entry->offset != offset) in tree_search_offset()
1743 WARN_ON(!entry->bitmap); in tree_search_offset()
1746 if (entry->bitmap) { in tree_search_offset()
1751 n = rb_prev(&entry->offset_index); in tree_search_offset()
1755 if (!prev->bitmap && in tree_search_offset()
1756 prev->offset + prev->bytes > offset) in tree_search_offset()
1768 if (entry->offset > offset) { in tree_search_offset()
1769 n = rb_prev(&entry->offset_index); in tree_search_offset()
1773 ASSERT(entry->offset <= offset); in tree_search_offset()
1782 if (entry->bitmap) { in tree_search_offset()
1783 n = rb_prev(&entry->offset_index); in tree_search_offset()
1787 if (!prev->bitmap && in tree_search_offset()
1788 prev->offset + prev->bytes > offset) in tree_search_offset()
1791 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) in tree_search_offset()
1793 } else if (entry->offset + entry->bytes > offset) in tree_search_offset()
1800 n = rb_next(&entry->offset_index); in tree_search_offset()
1804 if (entry->bitmap) { in tree_search_offset()
1805 if (entry->offset + BITS_PER_BITMAP * in tree_search_offset()
1806 ctl->unit > offset) in tree_search_offset()
1809 if (entry->offset + entry->bytes > offset) in tree_search_offset()
1820 lockdep_assert_held(&ctl->tree_lock); in unlink_free_space()
1822 rb_erase(&info->offset_index, &ctl->free_space_offset); in unlink_free_space()
1823 rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes); in unlink_free_space()
1824 ctl->free_extents--; in unlink_free_space()
1826 if (!info->bitmap && !btrfs_free_space_trimmed(info)) { in unlink_free_space()
1827 ctl->discardable_extents[BTRFS_STAT_CURR]--; in unlink_free_space()
1828 ctl->discardable_bytes[BTRFS_STAT_CURR] -= info->bytes; in unlink_free_space()
1832 ctl->free_space -= info->bytes; in unlink_free_space()
1840 lockdep_assert_held(&ctl->tree_lock); in link_free_space()
1842 ASSERT(info->bytes || info->bitmap); in link_free_space()
1847 rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less); in link_free_space()
1849 if (!info->bitmap && !btrfs_free_space_trimmed(info)) { in link_free_space()
1850 ctl->discardable_extents[BTRFS_STAT_CURR]++; in link_free_space()
1851 ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes; in link_free_space()
1854 ctl->free_space += info->bytes; in link_free_space()
1855 ctl->free_extents++; in link_free_space()
1862 ASSERT(info->bitmap); in relink_bitmap_entry()
1865 * If our entry is empty it's because we're on a cluster and we don't in relink_bitmap_entry()
1866 * want to re-link it into our ctl bytes index. in relink_bitmap_entry()
1868 if (RB_EMPTY_NODE(&info->bytes_index)) in relink_bitmap_entry()
1871 lockdep_assert_held(&ctl->tree_lock); in relink_bitmap_entry()
1873 rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes); in relink_bitmap_entry()
1874 rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less); in relink_bitmap_entry()
1882 int extent_delta = -1; in bitmap_clear_bits()
1884 start = offset_to_bit(info->offset, ctl->unit, offset); in bitmap_clear_bits()
1885 count = bytes_to_bits(bytes, ctl->unit); in bitmap_clear_bits()
1889 bitmap_clear(info->bitmap, start, count); in bitmap_clear_bits()
1891 info->bytes -= bytes; in bitmap_clear_bits()
1892 if (info->max_extent_size > ctl->unit) in bitmap_clear_bits()
1893 info->max_extent_size = 0; in bitmap_clear_bits()
1897 if (start && test_bit(start - 1, info->bitmap)) in bitmap_clear_bits()
1900 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap)) in bitmap_clear_bits()
1903 info->bitmap_extents += extent_delta; in bitmap_clear_bits()
1905 ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; in bitmap_clear_bits()
1906 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes; in bitmap_clear_bits()
1910 ctl->free_space -= bytes; in bitmap_clear_bits()
1920 start = offset_to_bit(info->offset, ctl->unit, offset); in btrfs_bitmap_set_bits()
1921 count = bytes_to_bits(bytes, ctl->unit); in btrfs_bitmap_set_bits()
1925 bitmap_set(info->bitmap, start, count); in btrfs_bitmap_set_bits()
1931 info->max_extent_size = 0; in btrfs_bitmap_set_bits()
1932 info->bytes += bytes; in btrfs_bitmap_set_bits()
1933 ctl->free_space += bytes; in btrfs_bitmap_set_bits()
1937 if (start && test_bit(start - 1, info->bitmap)) in btrfs_bitmap_set_bits()
1938 extent_delta--; in btrfs_bitmap_set_bits()
1940 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap)) in btrfs_bitmap_set_bits()
1941 extent_delta--; in btrfs_bitmap_set_bits()
1943 info->bitmap_extents += extent_delta; in btrfs_bitmap_set_bits()
1945 ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; in btrfs_bitmap_set_bits()
1946 ctl->discardable_bytes[BTRFS_STAT_CURR] += bytes; in btrfs_bitmap_set_bits()
1969 bitmap_info->max_extent_size && in search_bitmap()
1970 bitmap_info->max_extent_size < *bytes) { in search_bitmap()
1971 *bytes = bitmap_info->max_extent_size; in search_bitmap()
1972 return -1; in search_bitmap()
1975 i = offset_to_bit(bitmap_info->offset, ctl->unit, in search_bitmap()
1976 max_t(u64, *offset, bitmap_info->offset)); in search_bitmap()
1977 bits = bytes_to_bits(*bytes, ctl->unit); in search_bitmap()
1979 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) { in search_bitmap()
1984 next_zero = find_next_zero_bit(bitmap_info->bitmap, in search_bitmap()
1986 extent_bits = next_zero - i; in search_bitmap()
1997 *offset = (u64)(i * ctl->unit) + bitmap_info->offset; in search_bitmap()
1998 *bytes = (u64)(found_bits) * ctl->unit; in search_bitmap()
2002 *bytes = (u64)(max_bits) * ctl->unit; in search_bitmap()
2003 bitmap_info->max_extent_size = *bytes; in search_bitmap()
2005 return -1; in search_bitmap()
2019 if (!ctl->free_space_offset.rb_node) in find_free_space()
2023 node = rb_first_cached(&ctl->free_space_bytes); in find_free_space()
2029 node = &entry->offset_index; in find_free_space()
2041 * If we are using the bytes index then all subsequent entries in find_free_space()
2045 * If we're using the offset index then we need to keep going in find_free_space()
2048 if (entry->bytes < *bytes) { in find_free_space()
2060 tmp = entry->offset - ctl->start + align - 1; in find_free_space()
2062 tmp = tmp * align + ctl->start; in find_free_space()
2063 align_off = tmp - entry->offset; in find_free_space()
2066 tmp = entry->offset; in find_free_space()
2070 * We don't break here if we're using the bytes index because we in find_free_space()
2076 if (entry->bytes < *bytes + align_off) { in find_free_space()
2082 if (entry->bitmap) { in find_free_space()
2098 * The bitmap may have gotten re-arranged in the space in find_free_space()
2099 * index here because the max_extent_size may have been in find_free_space()
2109 *bytes = entry->bytes - align_off; in find_free_space()
2119 info->offset = offset_to_bitmap(ctl, offset); in add_new_bitmap()
2120 info->bytes = 0; in add_new_bitmap()
2121 info->bitmap_extents = 0; in add_new_bitmap()
2122 INIT_LIST_HEAD(&info->list); in add_new_bitmap()
2124 ctl->total_bitmaps++; in add_new_bitmap()
2137 if (bitmap_info->bytes && !btrfs_free_space_trimmed(bitmap_info)) { in free_bitmap()
2138 ctl->discardable_extents[BTRFS_STAT_CURR] -= in free_bitmap()
2139 bitmap_info->bitmap_extents; in free_bitmap()
2140 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bitmap_info->bytes; in free_bitmap()
2144 kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap); in free_bitmap()
2146 ctl->total_bitmaps--; in free_bitmap()
2159 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; in remove_from_bitmap()
2168 search_bytes = ctl->unit; in remove_from_bitmap()
2169 search_bytes = min(search_bytes, end - search_start + 1); in remove_from_bitmap()
2173 return -EINVAL; in remove_from_bitmap()
2179 search_bytes = min(search_bytes, end - search_start + 1); in remove_from_bitmap()
2183 *bytes -= search_bytes; in remove_from_bitmap()
2186 struct rb_node *next = rb_next(&bitmap_info->offset_index); in remove_from_bitmap()
2187 if (!bitmap_info->bytes) in remove_from_bitmap()
2195 return -EINVAL; in remove_from_bitmap()
2204 if (!bitmap_info->bitmap) in remove_from_bitmap()
2205 return -EAGAIN; in remove_from_bitmap()
2214 search_bytes = ctl->unit; in remove_from_bitmap()
2218 return -EAGAIN; in remove_from_bitmap()
2221 } else if (!bitmap_info->bytes) in remove_from_bitmap()
2240 ctl->discardable_extents[BTRFS_STAT_CURR] += in add_bytes_to_bitmap()
2241 info->bitmap_extents; in add_bytes_to_bitmap()
2242 ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes; in add_bytes_to_bitmap()
2244 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in add_bytes_to_bitmap()
2247 end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); in add_bytes_to_bitmap()
2249 bytes_to_set = min(end - offset, bytes); in add_bytes_to_bitmap()
2260 struct btrfs_block_group *block_group = ctl->block_group; in use_bitmap()
2261 struct btrfs_fs_info *fs_info = block_group->fs_info; in use_bitmap()
2270 if (!forced && info->bytes >= FORCE_EXTENT_THRESHOLD) in use_bitmap()
2277 if (!forced && ctl->free_extents < ctl->extents_thresh) { in use_bitmap()
2285 if (info->bytes <= fs_info->sectorsize * 8) { in use_bitmap()
2286 if (ctl->free_extents * 3 <= ctl->extents_thresh) in use_bitmap()
2301 if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length) in use_bitmap()
2321 bytes = info->bytes; in insert_into_bitmap()
2322 offset = info->offset; in insert_into_bitmap()
2323 trim_state = info->trim_state; in insert_into_bitmap()
2325 if (!ctl->op->use_bitmap(ctl, info)) in insert_into_bitmap()
2328 if (ctl->op == &free_space_op) in insert_into_bitmap()
2329 block_group = ctl->block_group; in insert_into_bitmap()
2332 * Since we link bitmaps right into the cluster we need to see if we in insert_into_bitmap()
2333 * have a cluster here, and if so and it has our bitmap we need to add in insert_into_bitmap()
2336 if (block_group && !list_empty(&block_group->cluster_list)) { in insert_into_bitmap()
2337 struct btrfs_free_cluster *cluster; in insert_into_bitmap() local
2341 cluster = list_entry(block_group->cluster_list.next, in insert_into_bitmap()
2344 spin_lock(&cluster->lock); in insert_into_bitmap()
2345 node = rb_first(&cluster->root); in insert_into_bitmap()
2347 spin_unlock(&cluster->lock); in insert_into_bitmap()
2352 if (!entry->bitmap) { in insert_into_bitmap()
2353 spin_unlock(&cluster->lock); in insert_into_bitmap()
2357 if (entry->offset == offset_to_bitmap(ctl, offset)) { in insert_into_bitmap()
2360 bytes -= bytes_added; in insert_into_bitmap()
2363 spin_unlock(&cluster->lock); in insert_into_bitmap()
2380 bytes -= bytes_added; in insert_into_bitmap()
2391 if (info && info->bitmap) { in insert_into_bitmap()
2397 spin_unlock(&ctl->tree_lock); in insert_into_bitmap()
2399 /* no pre-allocated info, allocate a new one */ in insert_into_bitmap()
2404 spin_lock(&ctl->tree_lock); in insert_into_bitmap()
2405 ret = -ENOMEM; in insert_into_bitmap()
2411 info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, in insert_into_bitmap()
2413 info->trim_state = BTRFS_TRIM_STATE_TRIMMED; in insert_into_bitmap()
2414 spin_lock(&ctl->tree_lock); in insert_into_bitmap()
2415 if (!info->bitmap) { in insert_into_bitmap()
2416 ret = -ENOMEM; in insert_into_bitmap()
2424 if (info->bitmap) in insert_into_bitmap()
2426 info->bitmap); in insert_into_bitmap()
2455 u64 offset = info->offset; in try_merge_free_space()
2456 u64 bytes = info->bytes; in try_merge_free_space()
2467 right_prev = rb_prev(&right_info->offset_index); in try_merge_free_space()
2472 left_info = tree_search_offset(ctl, offset - 1, 0, 0); in try_merge_free_space()
2475 if (right_info && !right_info->bitmap && in try_merge_free_space()
2478 info->bytes += right_info->bytes; in try_merge_free_space()
2484 if (left_info && !left_info->bitmap && in try_merge_free_space()
2485 left_info->offset + left_info->bytes == offset && in try_merge_free_space()
2488 info->offset = left_info->offset; in try_merge_free_space()
2489 info->bytes += left_info->bytes; in try_merge_free_space()
2504 const u64 end = info->offset + info->bytes; in steal_from_bitmap_to_end()
2512 i = offset_to_bit(bitmap->offset, ctl->unit, end); in steal_from_bitmap_to_end()
2513 j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i); in steal_from_bitmap_to_end()
2516 bytes = (j - i) * ctl->unit; in steal_from_bitmap_to_end()
2517 info->bytes += bytes; in steal_from_bitmap_to_end()
2521 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in steal_from_bitmap_to_end()
2525 if (!bitmap->bytes) in steal_from_bitmap_to_end()
2542 bitmap_offset = offset_to_bitmap(ctl, info->offset); in steal_from_bitmap_to_front()
2544 if (bitmap_offset == info->offset) { in steal_from_bitmap_to_front()
2545 if (info->offset == 0) in steal_from_bitmap_to_front()
2547 bitmap_offset = offset_to_bitmap(ctl, info->offset - 1); in steal_from_bitmap_to_front()
2554 i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1; in steal_from_bitmap_to_front()
2556 prev_j = (unsigned long)-1; in steal_from_bitmap_to_front()
2557 for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) { in steal_from_bitmap_to_front()
2565 if (prev_j == (unsigned long)-1) in steal_from_bitmap_to_front()
2566 bytes = (i + 1) * ctl->unit; in steal_from_bitmap_to_front()
2568 bytes = (i - prev_j) * ctl->unit; in steal_from_bitmap_to_front()
2570 info->offset -= bytes; in steal_from_bitmap_to_front()
2571 info->bytes += bytes; in steal_from_bitmap_to_front()
2575 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in steal_from_bitmap_to_front()
2577 bitmap_clear_bits(ctl, bitmap, info->offset, bytes, update_stat); in steal_from_bitmap_to_front()
2579 if (!bitmap->bytes) in steal_from_bitmap_to_front()
2587 * non-clustered allocation requests. So when attempting to add a new extent
2592 * on 2 or more entries - even if the entries represent a contiguous free space
2604 ASSERT(!info->bitmap); in steal_from_bitmap()
2605 ASSERT(RB_EMPTY_NODE(&info->offset_index)); in steal_from_bitmap()
2607 if (ctl->total_bitmaps > 0) { in steal_from_bitmap()
2612 if (ctl->total_bitmaps > 0) in steal_from_bitmap()
2625 struct btrfs_fs_info *fs_info = block_group->fs_info; in __btrfs_add_free_space()
2626 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in __btrfs_add_free_space()
2635 return -ENOMEM; in __btrfs_add_free_space()
2637 info->offset = offset; in __btrfs_add_free_space()
2638 info->bytes = bytes; in __btrfs_add_free_space()
2639 info->trim_state = trim_state; in __btrfs_add_free_space()
2640 RB_CLEAR_NODE(&info->offset_index); in __btrfs_add_free_space()
2641 RB_CLEAR_NODE(&info->bytes_index); in __btrfs_add_free_space()
2643 spin_lock(&ctl->tree_lock); in __btrfs_add_free_space()
2663 * going to add the new free space to existing bitmap entries - because in __btrfs_add_free_space()
2669 filter_bytes = max(filter_bytes, info->bytes); in __btrfs_add_free_space()
2676 spin_unlock(&ctl->tree_lock); in __btrfs_add_free_space()
2680 ASSERT(ret != -EEXIST); in __btrfs_add_free_space()
2685 btrfs_discard_queue_work(&fs_info->discard_ctl, block_group); in __btrfs_add_free_space()
2694 struct btrfs_space_info *sinfo = block_group->space_info; in __btrfs_add_free_space_zoned()
2695 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in __btrfs_add_free_space_zoned()
2696 u64 offset = bytenr - block_group->start; in __btrfs_add_free_space_zoned()
2702 spin_lock(&block_group->lock); in __btrfs_add_free_space_zoned()
2704 initial = ((size == block_group->length) && (block_group->alloc_offset == 0)); in __btrfs_add_free_space_zoned()
2705 WARN_ON(!initial && offset + size > block_group->zone_capacity); in __btrfs_add_free_space_zoned()
2707 bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold); in __btrfs_add_free_space_zoned()
2712 to_free = block_group->zone_capacity; in __btrfs_add_free_space_zoned()
2713 else if (offset >= block_group->alloc_offset) in __btrfs_add_free_space_zoned()
2715 else if (offset + size <= block_group->alloc_offset) in __btrfs_add_free_space_zoned()
2718 to_free = offset + size - block_group->alloc_offset; in __btrfs_add_free_space_zoned()
2719 to_unusable = size - to_free; in __btrfs_add_free_space_zoned()
2721 spin_lock(&ctl->tree_lock); in __btrfs_add_free_space_zoned()
2722 ctl->free_space += to_free; in __btrfs_add_free_space_zoned()
2723 spin_unlock(&ctl->tree_lock); in __btrfs_add_free_space_zoned()
2725 * If the block group is read-only, we should account freed space into in __btrfs_add_free_space_zoned()
2728 if (!block_group->ro) { in __btrfs_add_free_space_zoned()
2729 block_group->zone_unusable += to_unusable; in __btrfs_add_free_space_zoned()
2730 WARN_ON(block_group->zone_unusable > block_group->length); in __btrfs_add_free_space_zoned()
2733 block_group->alloc_offset -= size; in __btrfs_add_free_space_zoned()
2736 reclaimable_unusable = block_group->zone_unusable - in __btrfs_add_free_space_zoned()
2737 (block_group->length - block_group->zone_capacity); in __btrfs_add_free_space_zoned()
2739 if (block_group->zone_unusable == block_group->length) { in __btrfs_add_free_space_zoned()
2743 mult_perc(block_group->zone_capacity, bg_reclaim_threshold)) { in __btrfs_add_free_space_zoned()
2747 spin_unlock(&block_group->lock); in __btrfs_add_free_space_zoned()
2757 if (btrfs_is_zoned(block_group->fs_info)) in btrfs_add_free_space()
2761 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC)) in btrfs_add_free_space()
2770 if (btrfs_is_zoned(block_group->fs_info)) in btrfs_add_free_space_unused()
2787 if (btrfs_is_zoned(block_group->fs_info)) in btrfs_add_free_space_async_trimmed()
2791 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC) || in btrfs_add_free_space_async_trimmed()
2792 btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) in btrfs_add_free_space_async_trimmed()
2801 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_remove_free_space()
2806 if (btrfs_is_zoned(block_group->fs_info)) { in btrfs_remove_free_space()
2809 * Since the allocation info of tree-log nodes are not recorded in btrfs_remove_free_space()
2810 * to the extent-tree, calculate_alloc_pointer() failed to in btrfs_remove_free_space()
2816 * Advance the pointer not to overwrite the tree-log nodes. in btrfs_remove_free_space()
2818 if (block_group->start + block_group->alloc_offset < in btrfs_remove_free_space()
2820 block_group->alloc_offset = in btrfs_remove_free_space()
2821 offset + bytes - block_group->start; in btrfs_remove_free_space()
2826 spin_lock(&ctl->tree_lock); in btrfs_remove_free_space()
2853 if (!info->bitmap) { in btrfs_remove_free_space()
2855 if (offset == info->offset) { in btrfs_remove_free_space()
2856 u64 to_free = min(bytes, info->bytes); in btrfs_remove_free_space()
2858 info->bytes -= to_free; in btrfs_remove_free_space()
2859 info->offset += to_free; in btrfs_remove_free_space()
2860 if (info->bytes) { in btrfs_remove_free_space()
2868 bytes -= to_free; in btrfs_remove_free_space()
2871 u64 old_end = info->bytes + info->offset; in btrfs_remove_free_space()
2873 info->bytes = offset - info->offset; in btrfs_remove_free_space()
2881 bytes -= old_end - offset; in btrfs_remove_free_space()
2888 spin_unlock(&ctl->tree_lock); in btrfs_remove_free_space()
2892 old_end - (offset + bytes), in btrfs_remove_free_space()
2893 info->trim_state); in btrfs_remove_free_space()
2900 if (ret == -EAGAIN) { in btrfs_remove_free_space()
2906 spin_unlock(&ctl->tree_lock); in btrfs_remove_free_space()
2914 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_dump_free_space()
2915 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_dump_free_space()
2921 * Zoned btrfs does not use free space tree and cluster. Just print in btrfs_dump_free_space()
2926 block_group->zone_capacity - block_group->alloc_offset, in btrfs_dump_free_space()
2928 &block_group->runtime_flags)); in btrfs_dump_free_space()
2932 spin_lock(&ctl->tree_lock); in btrfs_dump_free_space()
2933 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { in btrfs_dump_free_space()
2935 if (info->bytes >= bytes && !block_group->ro) in btrfs_dump_free_space()
2938 info->offset, info->bytes, in btrfs_dump_free_space()
2939 (info->bitmap) ? "yes" : "no"); in btrfs_dump_free_space()
2941 spin_unlock(&ctl->tree_lock); in btrfs_dump_free_space()
2942 btrfs_info(fs_info, "block group has cluster?: %s", in btrfs_dump_free_space()
2943 list_empty(&block_group->cluster_list) ? "no" : "yes"); in btrfs_dump_free_space()
2952 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_init_free_space_ctl()
2954 spin_lock_init(&ctl->tree_lock); in btrfs_init_free_space_ctl()
2955 ctl->unit = fs_info->sectorsize; in btrfs_init_free_space_ctl()
2956 ctl->start = block_group->start; in btrfs_init_free_space_ctl()
2957 ctl->block_group = block_group; in btrfs_init_free_space_ctl()
2958 ctl->op = &free_space_op; in btrfs_init_free_space_ctl()
2959 ctl->free_space_bytes = RB_ROOT_CACHED; in btrfs_init_free_space_ctl()
2960 INIT_LIST_HEAD(&ctl->trimming_ranges); in btrfs_init_free_space_ctl()
2961 mutex_init(&ctl->cache_writeout_mutex); in btrfs_init_free_space_ctl()
2968 ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space); in btrfs_init_free_space_ctl()
2972 * for a given cluster, put all of its extents back into the free
2974 * pointed to by the cluster, someone else raced in and freed the
2975 * cluster already. In that case, we just return without changing anything
2979 struct btrfs_free_cluster *cluster) in __btrfs_return_cluster_to_free_space() argument
2981 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in __btrfs_return_cluster_to_free_space()
2984 lockdep_assert_held(&ctl->tree_lock); in __btrfs_return_cluster_to_free_space()
2986 spin_lock(&cluster->lock); in __btrfs_return_cluster_to_free_space()
2987 if (cluster->block_group != block_group) { in __btrfs_return_cluster_to_free_space()
2988 spin_unlock(&cluster->lock); in __btrfs_return_cluster_to_free_space()
2992 cluster->block_group = NULL; in __btrfs_return_cluster_to_free_space()
2993 cluster->window_start = 0; in __btrfs_return_cluster_to_free_space()
2994 list_del_init(&cluster->block_group_list); in __btrfs_return_cluster_to_free_space()
2996 node = rb_first(&cluster->root); in __btrfs_return_cluster_to_free_space()
3001 node = rb_next(&entry->offset_index); in __btrfs_return_cluster_to_free_space()
3002 rb_erase(&entry->offset_index, &cluster->root); in __btrfs_return_cluster_to_free_space()
3003 RB_CLEAR_NODE(&entry->offset_index); in __btrfs_return_cluster_to_free_space()
3005 if (!entry->bitmap) { in __btrfs_return_cluster_to_free_space()
3008 ctl->discardable_extents[BTRFS_STAT_CURR]--; in __btrfs_return_cluster_to_free_space()
3009 ctl->discardable_bytes[BTRFS_STAT_CURR] -= in __btrfs_return_cluster_to_free_space()
3010 entry->bytes; in __btrfs_return_cluster_to_free_space()
3018 ctl->discardable_extents[BTRFS_STAT_CURR]++; in __btrfs_return_cluster_to_free_space()
3019 ctl->discardable_bytes[BTRFS_STAT_CURR] += in __btrfs_return_cluster_to_free_space()
3020 entry->bytes; in __btrfs_return_cluster_to_free_space()
3024 rb_add_cached(&entry->bytes_index, &ctl->free_space_bytes, in __btrfs_return_cluster_to_free_space()
3027 cluster->root = RB_ROOT; in __btrfs_return_cluster_to_free_space()
3028 spin_unlock(&cluster->lock); in __btrfs_return_cluster_to_free_space()
3034 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_remove_free_space_cache()
3035 struct btrfs_free_cluster *cluster; in btrfs_remove_free_space_cache() local
3038 spin_lock(&ctl->tree_lock); in btrfs_remove_free_space_cache()
3039 while ((head = block_group->cluster_list.next) != in btrfs_remove_free_space_cache()
3040 &block_group->cluster_list) { in btrfs_remove_free_space_cache()
3041 cluster = list_entry(head, struct btrfs_free_cluster, in btrfs_remove_free_space_cache()
3044 WARN_ON(cluster->block_group != block_group); in btrfs_remove_free_space_cache()
3045 __btrfs_return_cluster_to_free_space(block_group, cluster); in btrfs_remove_free_space_cache()
3047 cond_resched_lock(&ctl->tree_lock); in btrfs_remove_free_space_cache()
3051 spin_unlock(&ctl->tree_lock); in btrfs_remove_free_space_cache()
3060 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_is_free_space_trimmed()
3065 spin_lock(&ctl->tree_lock); in btrfs_is_free_space_trimmed()
3066 node = rb_first(&ctl->free_space_offset); in btrfs_is_free_space_trimmed()
3079 spin_unlock(&ctl->tree_lock); in btrfs_is_free_space_trimmed()
3087 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_find_space_for_alloc()
3089 &block_group->fs_info->discard_ctl; in btrfs_find_space_for_alloc()
3096 bool use_bytes_index = (offset == block_group->start); in btrfs_find_space_for_alloc()
3098 ASSERT(!btrfs_is_zoned(block_group->fs_info)); in btrfs_find_space_for_alloc()
3100 spin_lock(&ctl->tree_lock); in btrfs_find_space_for_alloc()
3102 block_group->full_stripe_len, max_extent_size, in btrfs_find_space_for_alloc()
3108 if (entry->bitmap) { in btrfs_find_space_for_alloc()
3112 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); in btrfs_find_space_for_alloc()
3114 if (!entry->bytes) in btrfs_find_space_for_alloc()
3118 align_gap_len = offset - entry->offset; in btrfs_find_space_for_alloc()
3119 align_gap = entry->offset; in btrfs_find_space_for_alloc()
3120 align_gap_trim_state = entry->trim_state; in btrfs_find_space_for_alloc()
3123 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); in btrfs_find_space_for_alloc()
3125 entry->offset = offset + bytes; in btrfs_find_space_for_alloc()
3126 WARN_ON(entry->bytes < bytes + align_gap_len); in btrfs_find_space_for_alloc()
3128 entry->bytes -= bytes + align_gap_len; in btrfs_find_space_for_alloc()
3129 if (!entry->bytes) in btrfs_find_space_for_alloc()
3136 spin_unlock(&ctl->tree_lock); in btrfs_find_space_for_alloc()
3145 * given a cluster, put all of its extents back into the free space
3147 * a cluster that belongs to the passed block group.
3150 * cluster and remove the cluster from it.
3154 struct btrfs_free_cluster *cluster) in btrfs_return_cluster_to_free_space() argument
3159 spin_lock(&cluster->lock); in btrfs_return_cluster_to_free_space()
3161 block_group = cluster->block_group; in btrfs_return_cluster_to_free_space()
3163 spin_unlock(&cluster->lock); in btrfs_return_cluster_to_free_space()
3166 } else if (cluster->block_group != block_group) { in btrfs_return_cluster_to_free_space()
3168 spin_unlock(&cluster->lock); in btrfs_return_cluster_to_free_space()
3172 spin_unlock(&cluster->lock); in btrfs_return_cluster_to_free_space()
3174 ctl = block_group->free_space_ctl; in btrfs_return_cluster_to_free_space()
3176 /* now return any extents the cluster had on it */ in btrfs_return_cluster_to_free_space()
3177 spin_lock(&ctl->tree_lock); in btrfs_return_cluster_to_free_space()
3178 __btrfs_return_cluster_to_free_space(block_group, cluster); in btrfs_return_cluster_to_free_space()
3179 spin_unlock(&ctl->tree_lock); in btrfs_return_cluster_to_free_space()
3181 btrfs_discard_queue_work(&block_group->fs_info->discard_ctl, block_group); in btrfs_return_cluster_to_free_space()
3188 struct btrfs_free_cluster *cluster, in btrfs_alloc_from_bitmap() argument
3193 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_alloc_from_bitmap()
3195 u64 search_start = cluster->window_start; in btrfs_alloc_from_bitmap()
3216 * given a cluster, try to allocate 'bytes' from it, returns 0
3221 struct btrfs_free_cluster *cluster, u64 bytes, in btrfs_alloc_from_cluster() argument
3224 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_alloc_from_cluster()
3226 &block_group->fs_info->discard_ctl; in btrfs_alloc_from_cluster()
3231 ASSERT(!btrfs_is_zoned(block_group->fs_info)); in btrfs_alloc_from_cluster()
3233 spin_lock(&cluster->lock); in btrfs_alloc_from_cluster()
3234 if (bytes > cluster->max_size) in btrfs_alloc_from_cluster()
3237 if (cluster->block_group != block_group) in btrfs_alloc_from_cluster()
3240 node = rb_first(&cluster->root); in btrfs_alloc_from_cluster()
3246 if (entry->bytes < bytes) in btrfs_alloc_from_cluster()
3250 if (entry->bytes < bytes || in btrfs_alloc_from_cluster()
3251 (!entry->bitmap && entry->offset < min_start)) { in btrfs_alloc_from_cluster()
3252 node = rb_next(&entry->offset_index); in btrfs_alloc_from_cluster()
3260 if (entry->bitmap) { in btrfs_alloc_from_cluster()
3262 cluster, entry, bytes, in btrfs_alloc_from_cluster()
3263 cluster->window_start, in btrfs_alloc_from_cluster()
3266 node = rb_next(&entry->offset_index); in btrfs_alloc_from_cluster()
3273 cluster->window_start += bytes; in btrfs_alloc_from_cluster()
3275 ret = entry->offset; in btrfs_alloc_from_cluster()
3277 entry->offset += bytes; in btrfs_alloc_from_cluster()
3278 entry->bytes -= bytes; in btrfs_alloc_from_cluster()
3284 spin_unlock(&cluster->lock); in btrfs_alloc_from_cluster()
3289 spin_lock(&ctl->tree_lock); in btrfs_alloc_from_cluster()
3292 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); in btrfs_alloc_from_cluster()
3294 ctl->free_space -= bytes; in btrfs_alloc_from_cluster()
3295 if (!entry->bitmap && !btrfs_free_space_trimmed(entry)) in btrfs_alloc_from_cluster()
3296 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes; in btrfs_alloc_from_cluster()
3298 spin_lock(&cluster->lock); in btrfs_alloc_from_cluster()
3299 if (entry->bytes == 0) { in btrfs_alloc_from_cluster()
3300 rb_erase(&entry->offset_index, &cluster->root); in btrfs_alloc_from_cluster()
3301 ctl->free_extents--; in btrfs_alloc_from_cluster()
3302 if (entry->bitmap) { in btrfs_alloc_from_cluster()
3304 entry->bitmap); in btrfs_alloc_from_cluster()
3305 ctl->total_bitmaps--; in btrfs_alloc_from_cluster()
3308 ctl->discardable_extents[BTRFS_STAT_CURR]--; in btrfs_alloc_from_cluster()
3313 spin_unlock(&cluster->lock); in btrfs_alloc_from_cluster()
3314 spin_unlock(&ctl->tree_lock); in btrfs_alloc_from_cluster()
3321 struct btrfs_free_cluster *cluster, in btrfs_bitmap_cluster() argument
3325 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_bitmap_cluster()
3336 lockdep_assert_held(&ctl->tree_lock); in btrfs_bitmap_cluster()
3338 i = offset_to_bit(entry->offset, ctl->unit, in btrfs_bitmap_cluster()
3339 max_t(u64, offset, entry->offset)); in btrfs_bitmap_cluster()
3340 want_bits = bytes_to_bits(bytes, ctl->unit); in btrfs_bitmap_cluster()
3341 min_bits = bytes_to_bits(min_bytes, ctl->unit); in btrfs_bitmap_cluster()
3344 * Don't bother looking for a cluster in this bitmap if it's heavily in btrfs_bitmap_cluster()
3347 if (entry->max_extent_size && in btrfs_bitmap_cluster()
3348 entry->max_extent_size < cont1_bytes) in btrfs_bitmap_cluster()
3349 return -ENOSPC; in btrfs_bitmap_cluster()
3352 for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) { in btrfs_bitmap_cluster()
3353 next_zero = find_next_zero_bit(entry->bitmap, in btrfs_bitmap_cluster()
3355 if (next_zero - i >= min_bits) { in btrfs_bitmap_cluster()
3356 found_bits = next_zero - i; in btrfs_bitmap_cluster()
3361 if (next_zero - i > max_bits) in btrfs_bitmap_cluster()
3362 max_bits = next_zero - i; in btrfs_bitmap_cluster()
3367 entry->max_extent_size = (u64)max_bits * ctl->unit; in btrfs_bitmap_cluster()
3368 return -ENOSPC; in btrfs_bitmap_cluster()
3373 cluster->max_size = 0; in btrfs_bitmap_cluster()
3378 if (cluster->max_size < found_bits * ctl->unit) in btrfs_bitmap_cluster()
3379 cluster->max_size = found_bits * ctl->unit; in btrfs_bitmap_cluster()
3381 if (total_found < want_bits || cluster->max_size < cont1_bytes) { in btrfs_bitmap_cluster()
3386 cluster->window_start = start * ctl->unit + entry->offset; in btrfs_bitmap_cluster()
3387 rb_erase(&entry->offset_index, &ctl->free_space_offset); in btrfs_bitmap_cluster()
3388 rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes); in btrfs_bitmap_cluster()
3391 * We need to know if we're currently on the normal space index when we in btrfs_bitmap_cluster()
3392 * manipulate the bitmap so that we know we need to remove and re-insert in btrfs_bitmap_cluster()
3397 RB_CLEAR_NODE(&entry->bytes_index); in btrfs_bitmap_cluster()
3399 ret = tree_insert_offset(ctl, cluster, entry); in btrfs_bitmap_cluster()
3400 ASSERT(!ret); /* -EEXIST; Logic error */ in btrfs_bitmap_cluster()
3402 trace_btrfs_setup_cluster(block_group, cluster, in btrfs_bitmap_cluster()
3403 total_found * ctl->unit, 1); in btrfs_bitmap_cluster()
3408 * This searches the block group for just extents to fill the cluster with.
3409 * Try to find a cluster with at least bytes total bytes, at least one
3414 struct btrfs_free_cluster *cluster, in setup_cluster_no_bitmap() argument
3418 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in setup_cluster_no_bitmap()
3427 lockdep_assert_held(&ctl->tree_lock); in setup_cluster_no_bitmap()
3431 return -ENOSPC; in setup_cluster_no_bitmap()
3437 while (entry->bitmap || entry->bytes < min_bytes) { in setup_cluster_no_bitmap()
3438 if (entry->bitmap && list_empty(&entry->list)) in setup_cluster_no_bitmap()
3439 list_add_tail(&entry->list, bitmaps); in setup_cluster_no_bitmap()
3440 node = rb_next(&entry->offset_index); in setup_cluster_no_bitmap()
3442 return -ENOSPC; in setup_cluster_no_bitmap()
3446 window_free = entry->bytes; in setup_cluster_no_bitmap()
3447 max_extent = entry->bytes; in setup_cluster_no_bitmap()
3451 for (node = rb_next(&entry->offset_index); node; in setup_cluster_no_bitmap()
3452 node = rb_next(&entry->offset_index)) { in setup_cluster_no_bitmap()
3455 if (entry->bitmap) { in setup_cluster_no_bitmap()
3456 if (list_empty(&entry->list)) in setup_cluster_no_bitmap()
3457 list_add_tail(&entry->list, bitmaps); in setup_cluster_no_bitmap()
3461 if (entry->bytes < min_bytes) in setup_cluster_no_bitmap()
3465 window_free += entry->bytes; in setup_cluster_no_bitmap()
3466 if (entry->bytes > max_extent) in setup_cluster_no_bitmap()
3467 max_extent = entry->bytes; in setup_cluster_no_bitmap()
3471 return -ENOSPC; in setup_cluster_no_bitmap()
3473 cluster->window_start = first->offset; in setup_cluster_no_bitmap()
3475 node = &first->offset_index; in setup_cluster_no_bitmap()
3479 * cache and put them into the cluster rbtree in setup_cluster_no_bitmap()
3485 node = rb_next(&entry->offset_index); in setup_cluster_no_bitmap()
3486 if (entry->bitmap || entry->bytes < min_bytes) in setup_cluster_no_bitmap()
3489 rb_erase(&entry->offset_index, &ctl->free_space_offset); in setup_cluster_no_bitmap()
3490 rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes); in setup_cluster_no_bitmap()
3491 ret = tree_insert_offset(ctl, cluster, entry); in setup_cluster_no_bitmap()
3492 total_size += entry->bytes; in setup_cluster_no_bitmap()
3493 ASSERT(!ret); /* -EEXIST; Logic error */ in setup_cluster_no_bitmap()
3496 cluster->max_size = max_extent; in setup_cluster_no_bitmap()
3497 trace_btrfs_setup_cluster(block_group, cluster, total_size, 0); in setup_cluster_no_bitmap()
3502 * This specifically looks for bitmaps that may work in the cluster, we assume
3507 struct btrfs_free_cluster *cluster, in setup_cluster_bitmap() argument
3511 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in setup_cluster_bitmap()
3513 int ret = -ENOSPC; in setup_cluster_bitmap()
3516 if (ctl->total_bitmaps == 0) in setup_cluster_bitmap()
3517 return -ENOSPC; in setup_cluster_bitmap()
3526 if (!entry || entry->offset != bitmap_offset) { in setup_cluster_bitmap()
3528 if (entry && list_empty(&entry->list)) in setup_cluster_bitmap()
3529 list_add(&entry->list, bitmaps); in setup_cluster_bitmap()
3533 if (entry->bytes < bytes) in setup_cluster_bitmap()
3535 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, in setup_cluster_bitmap()
3545 return -ENOSPC; in setup_cluster_bitmap()
3549 * here we try to find a cluster of blocks in a block group. The goal
3553 * returns zero and sets up cluster if things worked out, otherwise
3554 * it returns -enospc
3557 struct btrfs_free_cluster *cluster, in btrfs_find_space_cluster() argument
3560 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_find_space_cluster()
3561 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_find_space_cluster()
3570 * cluster. For SSD_SPREAD, don't allow any fragmentation. in btrfs_find_space_cluster()
3577 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { in btrfs_find_space_cluster()
3579 min_bytes = fs_info->sectorsize; in btrfs_find_space_cluster()
3582 min_bytes = fs_info->sectorsize; in btrfs_find_space_cluster()
3585 spin_lock(&ctl->tree_lock); in btrfs_find_space_cluster()
3588 * If we know we don't have enough space to make a cluster don't even in btrfs_find_space_cluster()
3591 if (ctl->free_space < bytes) { in btrfs_find_space_cluster()
3592 spin_unlock(&ctl->tree_lock); in btrfs_find_space_cluster()
3593 return -ENOSPC; in btrfs_find_space_cluster()
3596 spin_lock(&cluster->lock); in btrfs_find_space_cluster()
3598 /* someone already found a cluster, hooray */ in btrfs_find_space_cluster()
3599 if (cluster->block_group) { in btrfs_find_space_cluster()
3607 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, in btrfs_find_space_cluster()
3611 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps, in btrfs_find_space_cluster()
3617 list_del_init(&entry->list); in btrfs_find_space_cluster()
3621 list_add_tail(&cluster->block_group_list, in btrfs_find_space_cluster()
3622 &block_group->cluster_list); in btrfs_find_space_cluster()
3623 cluster->block_group = block_group; in btrfs_find_space_cluster()
3628 spin_unlock(&cluster->lock); in btrfs_find_space_cluster()
3629 spin_unlock(&ctl->tree_lock); in btrfs_find_space_cluster()
3635 * simple code to zero out a cluster
3637 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) in btrfs_init_free_cluster() argument
3639 spin_lock_init(&cluster->lock); in btrfs_init_free_cluster()
3640 spin_lock_init(&cluster->refill_lock); in btrfs_init_free_cluster()
3641 cluster->root = RB_ROOT; in btrfs_init_free_cluster()
3642 cluster->max_size = 0; in btrfs_init_free_cluster()
3643 cluster->fragmented = false; in btrfs_init_free_cluster()
3644 INIT_LIST_HEAD(&cluster->block_group_list); in btrfs_init_free_cluster()
3645 cluster->block_group = NULL; in btrfs_init_free_cluster()
3654 struct btrfs_space_info *space_info = block_group->space_info; in do_trimming()
3655 struct btrfs_fs_info *fs_info = block_group->fs_info; in do_trimming()
3656 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in do_trimming()
3664 spin_lock(&space_info->lock); in do_trimming()
3665 spin_lock(&block_group->lock); in do_trimming()
3666 if (!block_group->ro) { in do_trimming()
3667 block_group->reserved += reserved_bytes; in do_trimming()
3668 space_info->bytes_reserved += reserved_bytes; in do_trimming()
3671 spin_unlock(&block_group->lock); in do_trimming()
3672 spin_unlock(&space_info->lock); in do_trimming()
3680 mutex_lock(&ctl->cache_writeout_mutex); in do_trimming()
3683 start - reserved_start, in do_trimming()
3686 __btrfs_add_free_space(block_group, end, reserved_end - end, in do_trimming()
3689 list_del(&trim_entry->list); in do_trimming()
3690 mutex_unlock(&ctl->cache_writeout_mutex); in do_trimming()
3693 spin_lock(&space_info->lock); in do_trimming()
3694 spin_lock(&block_group->lock); in do_trimming()
3695 if (block_group->ro) in do_trimming()
3696 space_info->bytes_readonly += reserved_bytes; in do_trimming()
3697 block_group->reserved -= reserved_bytes; in do_trimming()
3698 space_info->bytes_reserved -= reserved_bytes; in do_trimming()
3699 spin_unlock(&block_group->lock); in do_trimming()
3700 spin_unlock(&space_info->lock); in do_trimming()
3714 &block_group->fs_info->discard_ctl; in trim_no_bitmap()
3715 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in trim_no_bitmap()
3723 const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size); in trim_no_bitmap()
3728 mutex_lock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3729 spin_lock(&ctl->tree_lock); in trim_no_bitmap()
3731 if (ctl->free_space < minlen) in trim_no_bitmap()
3739 while (entry->bitmap || in trim_no_bitmap()
3741 node = rb_next(&entry->offset_index); in trim_no_bitmap()
3748 if (entry->offset >= end) in trim_no_bitmap()
3751 extent_start = entry->offset; in trim_no_bitmap()
3752 extent_bytes = entry->bytes; in trim_no_bitmap()
3753 extent_trim_state = entry->trim_state; in trim_no_bitmap()
3755 start = entry->offset; in trim_no_bitmap()
3756 bytes = entry->bytes; in trim_no_bitmap()
3758 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3759 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3773 entry->offset += max_discard_size; in trim_no_bitmap()
3774 entry->bytes -= max_discard_size; in trim_no_bitmap()
3781 bytes = min(extent_start + extent_bytes, end) - start; in trim_no_bitmap()
3783 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3784 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3792 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3795 list_add_tail(&trim_entry.list, &ctl->trimming_ranges); in trim_no_bitmap()
3796 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3802 block_group->discard_cursor = start + bytes; in trim_no_bitmap()
3807 block_group->discard_cursor = start; in trim_no_bitmap()
3812 ret = -ERESTARTSYS; in trim_no_bitmap()
3822 block_group->discard_cursor = btrfs_block_group_end(block_group); in trim_no_bitmap()
3823 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3824 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3847 spin_lock(&ctl->tree_lock); in reset_trimming_bitmap()
3851 ctl->discardable_extents[BTRFS_STAT_CURR] += in reset_trimming_bitmap()
3852 entry->bitmap_extents; in reset_trimming_bitmap()
3853 ctl->discardable_bytes[BTRFS_STAT_CURR] += entry->bytes; in reset_trimming_bitmap()
3855 entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in reset_trimming_bitmap()
3858 spin_unlock(&ctl->tree_lock); in reset_trimming_bitmap()
3865 entry->trim_state = BTRFS_TRIM_STATE_TRIMMED; in end_trimming_bitmap()
3866 ctl->discardable_extents[BTRFS_STAT_CURR] -= in end_trimming_bitmap()
3867 entry->bitmap_extents; in end_trimming_bitmap()
3868 ctl->discardable_bytes[BTRFS_STAT_CURR] -= entry->bytes; in end_trimming_bitmap()
3880 &block_group->fs_info->discard_ctl; in trim_bitmaps()
3881 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in trim_bitmaps()
3887 const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size); in trim_bitmaps()
3893 mutex_lock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3894 spin_lock(&ctl->tree_lock); in trim_bitmaps()
3896 if (ctl->free_space < minlen) { in trim_bitmaps()
3897 block_group->discard_cursor = in trim_bitmaps()
3899 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3900 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3911 * which is the only discard index which sets minlen to 0. in trim_bitmaps()
3915 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3916 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3928 entry->trim_state = BTRFS_TRIM_STATE_TRIMMING; in trim_bitmaps()
3940 entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in trim_bitmaps()
3941 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3942 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3952 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3953 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3957 bytes = min(bytes, end - start); in trim_bitmaps()
3959 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3960 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3976 if (entry->bytes == 0) in trim_bitmaps()
3979 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3982 list_add_tail(&trim_entry.list, &ctl->trimming_ranges); in trim_bitmaps()
3983 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3989 block_group->discard_cursor = in trim_bitmaps()
3995 offset += BITS_PER_BITMAP * ctl->unit; in trim_bitmaps()
4000 block_group->discard_cursor = start; in trim_bitmaps()
4005 ret = -ERESTARTSYS; in trim_bitmaps()
4013 block_group->discard_cursor = end; in trim_bitmaps()
4022 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_trim_block_group()
4026 ASSERT(!btrfs_is_zoned(block_group->fs_info)); in btrfs_trim_block_group()
4030 spin_lock(&block_group->lock); in btrfs_trim_block_group()
4031 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { in btrfs_trim_block_group()
4032 spin_unlock(&block_group->lock); in btrfs_trim_block_group()
4036 spin_unlock(&block_group->lock); in btrfs_trim_block_group()
4043 div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem); in btrfs_trim_block_group()
4060 spin_lock(&block_group->lock); in btrfs_trim_block_group_extents()
4061 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { in btrfs_trim_block_group_extents()
4062 spin_unlock(&block_group->lock); in btrfs_trim_block_group_extents()
4066 spin_unlock(&block_group->lock); in btrfs_trim_block_group_extents()
4082 spin_lock(&block_group->lock); in btrfs_trim_block_group_bitmaps()
4083 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { in btrfs_trim_block_group_bitmaps()
4084 spin_unlock(&block_group->lock); in btrfs_trim_block_group_bitmaps()
4088 spin_unlock(&block_group->lock); in btrfs_trim_block_group_bitmaps()
4100 return btrfs_super_cache_generation(fs_info->super_copy); in btrfs_free_space_cache_v1_active()
4112 node = rb_first_cached(&fs_info->block_group_cache_tree); in cleanup_free_space_cache_v1()
4131 * super_copy->cache_generation based on SPACE_CACHE and in btrfs_set_free_space_cache_v1_active()
4137 trans = btrfs_start_transaction(fs_info->tree_root, 0); in btrfs_set_free_space_cache_v1_active()
4142 set_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags); in btrfs_set_free_space_cache_v1_active()
4153 clear_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags); in btrfs_set_free_space_cache_v1_active()
4164 return -ENOMEM; in btrfs_free_space_init()
4171 return -ENOMEM; in btrfs_free_space_init()
4193 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; in test_add_free_space_entry()
4204 return -ENOMEM; in test_add_free_space_entry()
4208 spin_lock(&ctl->tree_lock); in test_add_free_space_entry()
4209 info->offset = offset; in test_add_free_space_entry()
4210 info->bytes = bytes; in test_add_free_space_entry()
4211 info->max_extent_size = 0; in test_add_free_space_entry()
4213 spin_unlock(&ctl->tree_lock); in test_add_free_space_entry()
4223 return -ENOMEM; in test_add_free_space_entry()
4227 spin_lock(&ctl->tree_lock); in test_add_free_space_entry()
4231 info->bitmap = map; in test_add_free_space_entry()
4241 bytes -= bytes_added; in test_add_free_space_entry()
4243 spin_unlock(&ctl->tree_lock); in test_add_free_space_entry()
4263 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; in test_check_exists()
4267 spin_lock(&ctl->tree_lock); in test_check_exists()
4277 if (info->bitmap) { in test_check_exists()
4283 bit_bytes = ctl->unit; in test_check_exists()
4296 n = rb_prev(&info->offset_index); in test_check_exists()
4300 if (tmp->offset + tmp->bytes < offset) in test_check_exists()
4302 if (offset + bytes < tmp->offset) { in test_check_exists()
4303 n = rb_prev(&tmp->offset_index); in test_check_exists()
4310 n = rb_next(&info->offset_index); in test_check_exists()
4314 if (offset + bytes < tmp->offset) in test_check_exists()
4316 if (tmp->offset + tmp->bytes < offset) { in test_check_exists()
4317 n = rb_next(&tmp->offset_index); in test_check_exists()
4328 if (info->offset == offset) { in test_check_exists()
4333 if (offset > info->offset && offset < info->offset + info->bytes) in test_check_exists()
4336 spin_unlock(&ctl->tree_lock); in test_check_exists()