Lines Matching +full:cluster +full:- +full:index
1 // SPDX-License-Identifier: GPL-2.0
12 #include <linux/error-injection.h>
18 #include "free-space-cache.h"
20 #include "disk-io.h"
23 #include "space-info.h"
24 #include "delalloc-space.h"
25 #include "block-group.h"
28 #include "inode-item.h"
30 #include "file-item.h"
65 while ((node = rb_last(&ctl->free_space_offset)) != NULL) { in __btrfs_remove_free_space_cache()
67 if (!info->bitmap) { in __btrfs_remove_free_space_cache()
74 cond_resched_lock(&ctl->tree_lock); in __btrfs_remove_free_space_cache()
82 struct btrfs_fs_info *fs_info = root->fs_info; in __lookup_free_space_inode()
101 return ERR_PTR(-ENOENT); in __lookup_free_space_inode()
104 leaf = path->nodes[0]; in __lookup_free_space_inode()
105 header = btrfs_item_ptr(leaf, path->slots[0], in __lookup_free_space_inode()
116 inode = btrfs_iget_path(fs_info->sb, location.objectid, root, path); in __lookup_free_space_inode()
122 mapping_set_gfp_mask(inode->i_mapping, in __lookup_free_space_inode()
123 mapping_gfp_constraint(inode->i_mapping, in __lookup_free_space_inode()
132 struct btrfs_fs_info *fs_info = block_group->fs_info; in lookup_free_space_inode()
136 spin_lock(&block_group->lock); in lookup_free_space_inode()
137 if (block_group->inode) in lookup_free_space_inode()
138 inode = igrab(block_group->inode); in lookup_free_space_inode()
139 spin_unlock(&block_group->lock); in lookup_free_space_inode()
143 inode = __lookup_free_space_inode(fs_info->tree_root, path, in lookup_free_space_inode()
144 block_group->start); in lookup_free_space_inode()
148 spin_lock(&block_group->lock); in lookup_free_space_inode()
149 if (!((BTRFS_I(inode)->flags & flags) == flags)) { in lookup_free_space_inode()
151 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM | in lookup_free_space_inode()
153 block_group->disk_cache_state = BTRFS_DC_CLEAR; in lookup_free_space_inode()
156 if (!test_and_set_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) in lookup_free_space_inode()
157 block_group->inode = igrab(inode); in lookup_free_space_inode()
158 spin_unlock(&block_group->lock); in lookup_free_space_inode()
182 leaf = path->nodes[0]; in __create_free_space_inode()
183 inode_item = btrfs_item_ptr(leaf, path->slots[0], in __create_free_space_inode()
185 btrfs_item_key(leaf, &disk_key, path->slots[0]); in __create_free_space_inode()
188 btrfs_set_inode_generation(leaf, inode_item, trans->transid); in __create_free_space_inode()
196 btrfs_set_inode_transid(leaf, inode_item, trans->transid); in __create_free_space_inode()
211 leaf = path->nodes[0]; in __create_free_space_inode()
212 header = btrfs_item_ptr(leaf, path->slots[0], in __create_free_space_inode()
229 ret = btrfs_get_free_objectid(trans->fs_info->tree_root, &ino); in create_free_space_inode()
233 return __create_free_space_inode(trans->fs_info->tree_root, trans, path, in create_free_space_inode()
234 ino, block_group->start); in create_free_space_inode()
252 return -ENOMEM; in btrfs_remove_free_space_inode()
257 if (PTR_ERR(inode) != -ENOENT) in btrfs_remove_free_space_inode()
268 spin_lock(&block_group->lock); in btrfs_remove_free_space_inode()
269 if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) { in btrfs_remove_free_space_inode()
270 block_group->inode = NULL; in btrfs_remove_free_space_inode()
271 spin_unlock(&block_group->lock); in btrfs_remove_free_space_inode()
274 spin_unlock(&block_group->lock); in btrfs_remove_free_space_inode()
281 key.offset = block_group->start; in btrfs_remove_free_space_inode()
282 ret = btrfs_search_slot(trans, trans->fs_info->tree_root, &key, path, in btrfs_remove_free_space_inode()
283 -1, 1); in btrfs_remove_free_space_inode()
289 ret = btrfs_del_item(trans, trans->fs_info->tree_root, path); in btrfs_remove_free_space_inode()
307 struct btrfs_root *root = inode->root; in btrfs_truncate_free_space_cache()
316 ret = -ENOMEM; in btrfs_truncate_free_space_cache()
320 mutex_lock(&trans->transaction->cache_write_mutex); in btrfs_truncate_free_space_cache()
321 if (!list_empty(&block_group->io_list)) { in btrfs_truncate_free_space_cache()
322 list_del_init(&block_group->io_list); in btrfs_truncate_free_space_cache()
332 spin_lock(&block_group->lock); in btrfs_truncate_free_space_cache()
333 block_group->disk_cache_state = BTRFS_DC_CLEAR; in btrfs_truncate_free_space_cache()
334 spin_unlock(&block_group->lock); in btrfs_truncate_free_space_cache()
341 lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state); in btrfs_truncate_free_space_cache()
342 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false); in btrfs_truncate_free_space_cache()
346 * need to check for -EAGAIN. in btrfs_truncate_free_space_cache()
350 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); in btrfs_truncate_free_space_cache()
353 unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state); in btrfs_truncate_free_space_cache()
361 mutex_unlock(&trans->transaction->cache_write_mutex); in btrfs_truncate_free_space_cache()
373 file_ra_state_init(&ra, inode->i_mapping); in readahead_cache()
374 last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; in readahead_cache()
376 page_cache_sync_readahead(inode->i_mapping, &ra, NULL, 0, last_index); in readahead_cache()
388 return -ENOSPC; in io_ctl_init()
392 io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS); in io_ctl_init()
393 if (!io_ctl->pages) in io_ctl_init()
394 return -ENOMEM; in io_ctl_init()
396 io_ctl->num_pages = num_pages; in io_ctl_init()
397 io_ctl->fs_info = btrfs_sb(inode->i_sb); in io_ctl_init()
398 io_ctl->inode = inode; in io_ctl_init()
406 kfree(io_ctl->pages); in io_ctl_free()
407 io_ctl->pages = NULL; in io_ctl_free()
412 if (io_ctl->cur) { in io_ctl_unmap_page()
413 io_ctl->cur = NULL; in io_ctl_unmap_page()
414 io_ctl->orig = NULL; in io_ctl_unmap_page()
420 ASSERT(io_ctl->index < io_ctl->num_pages); in io_ctl_map_page()
421 io_ctl->page = io_ctl->pages[io_ctl->index++]; in io_ctl_map_page()
422 io_ctl->cur = page_address(io_ctl->page); in io_ctl_map_page()
423 io_ctl->orig = io_ctl->cur; in io_ctl_map_page()
424 io_ctl->size = PAGE_SIZE; in io_ctl_map_page()
426 clear_page(io_ctl->cur); in io_ctl_map_page()
435 for (i = 0; i < io_ctl->num_pages; i++) { in io_ctl_drop_pages()
436 if (io_ctl->pages[i]) { in io_ctl_drop_pages()
437 btrfs_page_clear_checked(io_ctl->fs_info, in io_ctl_drop_pages()
438 io_ctl->pages[i], in io_ctl_drop_pages()
439 page_offset(io_ctl->pages[i]), in io_ctl_drop_pages()
441 unlock_page(io_ctl->pages[i]); in io_ctl_drop_pages()
442 put_page(io_ctl->pages[i]); in io_ctl_drop_pages()
450 struct inode *inode = io_ctl->inode; in io_ctl_prepare_pages()
451 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); in io_ctl_prepare_pages()
454 for (i = 0; i < io_ctl->num_pages; i++) { in io_ctl_prepare_pages()
457 page = find_or_create_page(inode->i_mapping, i, mask); in io_ctl_prepare_pages()
460 return -ENOMEM; in io_ctl_prepare_pages()
471 io_ctl->pages[i] = page; in io_ctl_prepare_pages()
475 if (page->mapping != inode->i_mapping) { in io_ctl_prepare_pages()
476 btrfs_err(BTRFS_I(inode)->root->fs_info, in io_ctl_prepare_pages()
479 return -EIO; in io_ctl_prepare_pages()
482 btrfs_err(BTRFS_I(inode)->root->fs_info, in io_ctl_prepare_pages()
485 return -EIO; in io_ctl_prepare_pages()
490 for (i = 0; i < io_ctl->num_pages; i++) in io_ctl_prepare_pages()
491 clear_page_dirty_for_io(io_ctl->pages[i]); in io_ctl_prepare_pages()
504 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages); in io_ctl_set_generation()
505 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); in io_ctl_set_generation()
507 put_unaligned_le64(generation, io_ctl->cur); in io_ctl_set_generation()
508 io_ctl->cur += sizeof(u64); in io_ctl_set_generation()
519 io_ctl->cur += sizeof(u32) * io_ctl->num_pages; in io_ctl_check_generation()
520 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); in io_ctl_check_generation()
522 cache_gen = get_unaligned_le64(io_ctl->cur); in io_ctl_check_generation()
524 btrfs_err_rl(io_ctl->fs_info, in io_ctl_check_generation()
528 return -EIO; in io_ctl_check_generation()
530 io_ctl->cur += sizeof(u64); in io_ctl_check_generation()
534 static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index) in io_ctl_set_crc() argument
540 if (index == 0) in io_ctl_set_crc()
541 offset = sizeof(u32) * io_ctl->num_pages; in io_ctl_set_crc()
543 crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset); in io_ctl_set_crc()
546 tmp = page_address(io_ctl->pages[0]); in io_ctl_set_crc()
547 tmp += index; in io_ctl_set_crc()
551 static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index) in io_ctl_check_crc() argument
557 if (index == 0) in io_ctl_check_crc()
558 offset = sizeof(u32) * io_ctl->num_pages; in io_ctl_check_crc()
560 tmp = page_address(io_ctl->pages[0]); in io_ctl_check_crc()
561 tmp += index; in io_ctl_check_crc()
565 crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset); in io_ctl_check_crc()
568 btrfs_err_rl(io_ctl->fs_info, in io_ctl_check_crc()
571 return -EIO; in io_ctl_check_crc()
582 if (!io_ctl->cur) in io_ctl_add_entry()
583 return -ENOSPC; in io_ctl_add_entry()
585 entry = io_ctl->cur; in io_ctl_add_entry()
586 put_unaligned_le64(offset, &entry->offset); in io_ctl_add_entry()
587 put_unaligned_le64(bytes, &entry->bytes); in io_ctl_add_entry()
588 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP : in io_ctl_add_entry()
590 io_ctl->cur += sizeof(struct btrfs_free_space_entry); in io_ctl_add_entry()
591 io_ctl->size -= sizeof(struct btrfs_free_space_entry); in io_ctl_add_entry()
593 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) in io_ctl_add_entry()
596 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_add_entry()
599 if (io_ctl->index >= io_ctl->num_pages) in io_ctl_add_entry()
609 if (!io_ctl->cur) in io_ctl_add_bitmap()
610 return -ENOSPC; in io_ctl_add_bitmap()
616 if (io_ctl->cur != io_ctl->orig) { in io_ctl_add_bitmap()
617 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_add_bitmap()
618 if (io_ctl->index >= io_ctl->num_pages) in io_ctl_add_bitmap()
619 return -ENOSPC; in io_ctl_add_bitmap()
623 copy_page(io_ctl->cur, bitmap); in io_ctl_add_bitmap()
624 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_add_bitmap()
625 if (io_ctl->index < io_ctl->num_pages) in io_ctl_add_bitmap()
636 if (io_ctl->cur != io_ctl->orig) in io_ctl_zero_remaining_pages()
637 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_zero_remaining_pages()
641 while (io_ctl->index < io_ctl->num_pages) { in io_ctl_zero_remaining_pages()
643 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_zero_remaining_pages()
653 if (!io_ctl->cur) { in io_ctl_read_entry()
654 ret = io_ctl_check_crc(io_ctl, io_ctl->index); in io_ctl_read_entry()
659 e = io_ctl->cur; in io_ctl_read_entry()
660 entry->offset = get_unaligned_le64(&e->offset); in io_ctl_read_entry()
661 entry->bytes = get_unaligned_le64(&e->bytes); in io_ctl_read_entry()
662 *type = e->type; in io_ctl_read_entry()
663 io_ctl->cur += sizeof(struct btrfs_free_space_entry); in io_ctl_read_entry()
664 io_ctl->size -= sizeof(struct btrfs_free_space_entry); in io_ctl_read_entry()
666 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) in io_ctl_read_entry()
679 ret = io_ctl_check_crc(io_ctl, io_ctl->index); in io_ctl_read_bitmap()
683 copy_page(entry->bitmap, io_ctl->cur); in io_ctl_read_bitmap()
691 struct btrfs_block_group *block_group = ctl->block_group; in recalculate_thresholds()
695 u64 size = block_group->length; in recalculate_thresholds()
696 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; in recalculate_thresholds()
697 u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); in recalculate_thresholds()
701 if (ctl->total_bitmaps > max_bitmaps) in recalculate_thresholds()
702 btrfs_err(block_group->fs_info, in recalculate_thresholds()
704 block_group->start, block_group->length, in recalculate_thresholds()
705 ctl->total_bitmaps, ctl->unit, max_bitmaps, in recalculate_thresholds()
707 ASSERT(ctl->total_bitmaps <= max_bitmaps); in recalculate_thresholds()
720 bitmap_bytes = ctl->total_bitmaps * ctl->unit; in recalculate_thresholds()
726 extent_bytes = max_bytes - bitmap_bytes; in recalculate_thresholds()
729 ctl->extents_thresh = in recalculate_thresholds()
737 struct btrfs_fs_info *fs_info = root->fs_info; in __load_free_space_cache()
766 ret = -1; in __load_free_space_cache()
768 leaf = path->nodes[0]; in __load_free_space_cache()
769 header = btrfs_item_ptr(leaf, path->slots[0], in __load_free_space_cache()
776 if (!BTRFS_I(inode)->generation) { in __load_free_space_cache()
783 if (BTRFS_I(inode)->generation != generation) { in __load_free_space_cache()
786 BTRFS_I(inode)->generation, generation); in __load_free_space_cache()
815 ret = -ENOMEM; in __load_free_space_cache()
825 if (!e->bytes) { in __load_free_space_cache()
826 ret = -1; in __load_free_space_cache()
832 spin_lock(&ctl->tree_lock); in __load_free_space_cache()
834 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
843 num_bitmaps--; in __load_free_space_cache()
844 e->bitmap = kmem_cache_zalloc( in __load_free_space_cache()
846 if (!e->bitmap) { in __load_free_space_cache()
847 ret = -ENOMEM; in __load_free_space_cache()
852 spin_lock(&ctl->tree_lock); in __load_free_space_cache()
855 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
861 ctl->total_bitmaps++; in __load_free_space_cache()
863 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
864 list_add_tail(&e->list, &bitmaps); in __load_free_space_cache()
867 num_entries--; in __load_free_space_cache()
877 list_del_init(&e->list); in __load_free_space_cache()
891 spin_lock(&ctl->tree_lock); in __load_free_space_cache()
893 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
904 while (!ret && (n = rb_first(&ctl->free_space_offset)) != NULL) { in copy_free_space_cache()
906 if (!info->bitmap) { in copy_free_space_cache()
907 const u64 offset = info->offset; in copy_free_space_cache()
908 const u64 bytes = info->bytes; in copy_free_space_cache()
911 spin_unlock(&ctl->tree_lock); in copy_free_space_cache()
914 spin_lock(&ctl->tree_lock); in copy_free_space_cache()
916 u64 offset = info->offset; in copy_free_space_cache()
917 u64 bytes = ctl->unit; in copy_free_space_cache()
922 spin_unlock(&ctl->tree_lock); in copy_free_space_cache()
925 spin_lock(&ctl->tree_lock); in copy_free_space_cache()
931 cond_resched_lock(&ctl->tree_lock); in copy_free_space_cache()
940 struct btrfs_fs_info *fs_info = block_group->fs_info; in load_free_space_cache()
941 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in load_free_space_cache()
947 u64 used = block_group->used; in load_free_space_cache()
960 spin_lock(&block_group->lock); in load_free_space_cache()
961 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { in load_free_space_cache()
962 spin_unlock(&block_group->lock); in load_free_space_cache()
965 spin_unlock(&block_group->lock); in load_free_space_cache()
970 path->search_commit_root = 1; in load_free_space_cache()
971 path->skip_locking = 1; in load_free_space_cache()
978 * for a free extent, at extent-tree.c:find_free_extent(), we can find in load_free_space_cache()
989 * once created get their ->cached field set to BTRFS_CACHE_FINISHED so in load_free_space_cache()
999 spin_lock(&block_group->lock); in load_free_space_cache()
1000 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { in load_free_space_cache()
1001 spin_unlock(&block_group->lock); in load_free_space_cache()
1005 spin_unlock(&block_group->lock); in load_free_space_cache()
1008 * Reinitialize the class of struct inode's mapping->invalidate_lock for in load_free_space_cache()
1012 lockdep_set_class(&(&inode->i_data)->invalidate_lock, in load_free_space_cache()
1015 ret = __load_free_space_cache(fs_info->tree_root, inode, &tmp_ctl, in load_free_space_cache()
1016 path, block_group->start); in load_free_space_cache()
1021 matched = (tmp_ctl.free_space == (block_group->length - used - in load_free_space_cache()
1022 block_group->bytes_super)); in load_free_space_cache()
1030 * so we need to re-set it here. in load_free_space_cache()
1044 block_group->start); in load_free_space_cache()
1045 ret = -1; in load_free_space_cache()
1050 spin_lock(&block_group->lock); in load_free_space_cache()
1051 block_group->disk_cache_state = BTRFS_DC_CLEAR; in load_free_space_cache()
1052 spin_unlock(&block_group->lock); in load_free_space_cache()
1057 block_group->start); in load_free_space_cache()
1060 spin_lock(&ctl->tree_lock); in load_free_space_cache()
1062 spin_unlock(&ctl->tree_lock); in load_free_space_cache()
1075 struct btrfs_free_cluster *cluster = NULL; in write_cache_extent_entries() local
1077 struct rb_node *node = rb_first(&ctl->free_space_offset); in write_cache_extent_entries()
1080 /* Get the cluster for this block_group if it exists */ in write_cache_extent_entries()
1081 if (block_group && !list_empty(&block_group->cluster_list)) { in write_cache_extent_entries()
1082 cluster = list_entry(block_group->cluster_list.next, in write_cache_extent_entries()
1087 if (!node && cluster) { in write_cache_extent_entries()
1088 cluster_locked = cluster; in write_cache_extent_entries()
1089 spin_lock(&cluster_locked->lock); in write_cache_extent_entries()
1090 node = rb_first(&cluster->root); in write_cache_extent_entries()
1091 cluster = NULL; in write_cache_extent_entries()
1101 ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes, in write_cache_extent_entries()
1102 e->bitmap); in write_cache_extent_entries()
1106 if (e->bitmap) { in write_cache_extent_entries()
1107 list_add_tail(&e->list, bitmap_list); in write_cache_extent_entries()
1111 if (!node && cluster) { in write_cache_extent_entries()
1112 node = rb_first(&cluster->root); in write_cache_extent_entries()
1113 cluster_locked = cluster; in write_cache_extent_entries()
1114 spin_lock(&cluster_locked->lock); in write_cache_extent_entries()
1115 cluster = NULL; in write_cache_extent_entries()
1119 spin_unlock(&cluster_locked->lock); in write_cache_extent_entries()
1129 list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) { in write_cache_extent_entries()
1130 ret = io_ctl_add_entry(io_ctl, trim_entry->start, in write_cache_extent_entries()
1131 trim_entry->bytes, NULL); in write_cache_extent_entries()
1140 spin_unlock(&cluster_locked->lock); in write_cache_extent_entries()
1141 return -ENOSPC; in write_cache_extent_entries()
1162 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, in update_cache_item()
1166 leaf = path->nodes[0]; in update_cache_item()
1169 ASSERT(path->slots[0]); in update_cache_item()
1170 path->slots[0]--; in update_cache_item()
1171 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in update_cache_item()
1174 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, in update_cache_item()
1175 inode->i_size - 1, EXTENT_DELALLOC, in update_cache_item()
1182 BTRFS_I(inode)->generation = trans->transid; in update_cache_item()
1183 header = btrfs_item_ptr(leaf, path->slots[0], in update_cache_item()
1187 btrfs_set_free_space_generation(leaf, header, trans->transid); in update_cache_item()
1194 return -1; in update_cache_item()
1217 unpin = &trans->transaction->pinned_extents; in write_pinned_extent_entries()
1219 start = block_group->start; in write_pinned_extent_entries()
1221 while (start < block_group->start + block_group->length) { in write_pinned_extent_entries()
1228 if (extent_start >= block_group->start + block_group->length) in write_pinned_extent_entries()
1232 extent_end = min(block_group->start + block_group->length, in write_pinned_extent_entries()
1234 len = extent_end - extent_start; in write_pinned_extent_entries()
1239 return -ENOSPC; in write_pinned_extent_entries()
1255 ret = io_ctl_add_bitmap(io_ctl, entry->bitmap); in write_bitmap_entries()
1257 return -ENOSPC; in write_bitmap_entries()
1258 list_del_init(&entry->list); in write_bitmap_entries()
1268 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); in flush_dirty_cache()
1270 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, in flush_dirty_cache()
1282 list_del_init(&entry->list); in cleanup_bitmap_list()
1291 unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, in cleanup_write_cache_enospc()
1302 struct inode *inode = io_ctl->inode; in __btrfs_wait_cache_io()
1314 io_ctl->entries, io_ctl->bitmaps); in __btrfs_wait_cache_io()
1317 invalidate_inode_pages2(inode->i_mapping); in __btrfs_wait_cache_io()
1318 BTRFS_I(inode)->generation = 0; in __btrfs_wait_cache_io()
1320 btrfs_debug(root->fs_info, in __btrfs_wait_cache_io()
1322 block_group->start, ret); in __btrfs_wait_cache_io()
1328 spin_lock(&trans->transaction->dirty_bgs_lock); in __btrfs_wait_cache_io()
1331 spin_lock(&block_group->lock); in __btrfs_wait_cache_io()
1338 if (!ret && list_empty(&block_group->dirty_list)) in __btrfs_wait_cache_io()
1339 block_group->disk_cache_state = BTRFS_DC_WRITTEN; in __btrfs_wait_cache_io()
1341 block_group->disk_cache_state = BTRFS_DC_ERROR; in __btrfs_wait_cache_io()
1343 spin_unlock(&block_group->lock); in __btrfs_wait_cache_io()
1344 spin_unlock(&trans->transaction->dirty_bgs_lock); in __btrfs_wait_cache_io()
1345 io_ctl->inode = NULL; in __btrfs_wait_cache_io()
1357 return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans, in btrfs_wait_cache_io()
1358 block_group, &block_group->io_ctl, in btrfs_wait_cache_io()
1359 path, block_group->start); in btrfs_wait_cache_io()
1390 return -EIO; in __btrfs_write_out_cache()
1392 WARN_ON(io_ctl->pages); in __btrfs_write_out_cache()
1397 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) { in __btrfs_write_out_cache()
1398 down_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1399 spin_lock(&block_group->lock); in __btrfs_write_out_cache()
1400 if (block_group->delalloc_bytes) { in __btrfs_write_out_cache()
1401 block_group->disk_cache_state = BTRFS_DC_WRITTEN; in __btrfs_write_out_cache()
1402 spin_unlock(&block_group->lock); in __btrfs_write_out_cache()
1403 up_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1404 BTRFS_I(inode)->generation = 0; in __btrfs_write_out_cache()
1409 spin_unlock(&block_group->lock); in __btrfs_write_out_cache()
1417 lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, in __btrfs_write_out_cache()
1420 io_ctl_set_generation(io_ctl, trans->transid); in __btrfs_write_out_cache()
1422 mutex_lock(&ctl->cache_writeout_mutex); in __btrfs_write_out_cache()
1424 spin_lock(&ctl->tree_lock); in __btrfs_write_out_cache()
1449 spin_unlock(&ctl->tree_lock); in __btrfs_write_out_cache()
1450 mutex_unlock(&ctl->cache_writeout_mutex); in __btrfs_write_out_cache()
1458 ret = btrfs_dirty_pages(BTRFS_I(inode), io_ctl->pages, in __btrfs_write_out_cache()
1459 io_ctl->num_pages, 0, i_size_read(inode), in __btrfs_write_out_cache()
1464 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) in __btrfs_write_out_cache()
1465 up_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1473 unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, in __btrfs_write_out_cache()
1481 io_ctl->entries = entries; in __btrfs_write_out_cache()
1482 io_ctl->bitmaps = bitmaps; in __btrfs_write_out_cache()
1484 ret = btrfs_fdatawrite_range(inode, 0, (u64)-1); in __btrfs_write_out_cache()
1492 spin_unlock(&ctl->tree_lock); in __btrfs_write_out_cache()
1493 mutex_unlock(&ctl->cache_writeout_mutex); in __btrfs_write_out_cache()
1499 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) in __btrfs_write_out_cache()
1500 up_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1503 io_ctl->inode = NULL; in __btrfs_write_out_cache()
1506 invalidate_inode_pages2(inode->i_mapping); in __btrfs_write_out_cache()
1507 BTRFS_I(inode)->generation = 0; in __btrfs_write_out_cache()
1519 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_write_out_cache()
1520 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_write_out_cache()
1524 spin_lock(&block_group->lock); in btrfs_write_out_cache()
1525 if (block_group->disk_cache_state < BTRFS_DC_SETUP) { in btrfs_write_out_cache()
1526 spin_unlock(&block_group->lock); in btrfs_write_out_cache()
1529 spin_unlock(&block_group->lock); in btrfs_write_out_cache()
1535 ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl, in btrfs_write_out_cache()
1536 block_group, &block_group->io_ctl, trans); in btrfs_write_out_cache()
1540 block_group->start, ret); in btrfs_write_out_cache()
1541 spin_lock(&block_group->lock); in btrfs_write_out_cache()
1542 block_group->disk_cache_state = BTRFS_DC_ERROR; in btrfs_write_out_cache()
1543 spin_unlock(&block_group->lock); in btrfs_write_out_cache()
1545 block_group->io_ctl.inode = NULL; in btrfs_write_out_cache()
1561 offset -= bitmap_start; in offset_to_bit()
1576 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; in offset_to_bitmap()
1577 bitmap_start = offset - ctl->start; in offset_to_bitmap()
1580 bitmap_start += ctl->start; in offset_to_bitmap()
1586 struct btrfs_free_cluster *cluster, in tree_insert_offset() argument
1593 lockdep_assert_held(&ctl->tree_lock); in tree_insert_offset()
1595 if (cluster) { in tree_insert_offset()
1596 lockdep_assert_held(&cluster->lock); in tree_insert_offset()
1597 root = &cluster->root; in tree_insert_offset()
1599 root = &ctl->free_space_offset; in tree_insert_offset()
1602 p = &root->rb_node; in tree_insert_offset()
1610 if (new_entry->offset < info->offset) { in tree_insert_offset()
1611 p = &(*p)->rb_left; in tree_insert_offset()
1612 } else if (new_entry->offset > info->offset) { in tree_insert_offset()
1613 p = &(*p)->rb_right; in tree_insert_offset()
1628 if (new_entry->bitmap) { in tree_insert_offset()
1629 if (info->bitmap) { in tree_insert_offset()
1631 return -EEXIST; in tree_insert_offset()
1633 p = &(*p)->rb_right; in tree_insert_offset()
1635 if (!info->bitmap) { in tree_insert_offset()
1637 return -EEXIST; in tree_insert_offset()
1639 p = &(*p)->rb_left; in tree_insert_offset()
1644 rb_link_node(&new_entry->offset_index, parent, p); in tree_insert_offset()
1645 rb_insert_color(&new_entry->offset_index, root); in tree_insert_offset()
1651 * This is a little subtle. We *only* have ->max_extent_size set if we actually
1652 * searched through the bitmap and figured out the largest ->max_extent_size,
1655 * we've found already if it's larger, or we want to use ->bytes.
1657 * This matters because find_free_space() will skip entries who's ->bytes is
1659 * may pick some previous entry that has a smaller ->max_extent_size than we
1661 * ->max_extent_size set to 4K and ->bytes set to 1M. A second entry hasn't set
1662 * ->max_extent_size yet, has ->bytes set to 8K and it's contiguous. We will
1664 * that first bitmap entry had ->max_extent_size set, but the second one did
1669 * don't have ->max_extent_size set. We'll return 16K, and the next time the
1676 if (entry->bitmap && entry->max_extent_size) in get_max_extent_size()
1677 return entry->max_extent_size; in get_max_extent_size()
1678 return entry->bytes; in get_max_extent_size()
1697 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1705 struct rb_node *n = ctl->free_space_offset.rb_node; in tree_search_offset()
1708 lockdep_assert_held(&ctl->tree_lock); in tree_search_offset()
1715 if (offset < entry->offset) in tree_search_offset()
1716 n = n->rb_left; in tree_search_offset()
1717 else if (offset > entry->offset) in tree_search_offset()
1718 n = n->rb_right; in tree_search_offset()
1728 if (entry->bitmap) in tree_search_offset()
1739 if (entry->offset != offset) in tree_search_offset()
1742 WARN_ON(!entry->bitmap); in tree_search_offset()
1745 if (entry->bitmap) { in tree_search_offset()
1750 n = rb_prev(&entry->offset_index); in tree_search_offset()
1754 if (!prev->bitmap && in tree_search_offset()
1755 prev->offset + prev->bytes > offset) in tree_search_offset()
1767 if (entry->offset > offset) { in tree_search_offset()
1768 n = rb_prev(&entry->offset_index); in tree_search_offset()
1772 ASSERT(entry->offset <= offset); in tree_search_offset()
1781 if (entry->bitmap) { in tree_search_offset()
1782 n = rb_prev(&entry->offset_index); in tree_search_offset()
1786 if (!prev->bitmap && in tree_search_offset()
1787 prev->offset + prev->bytes > offset) in tree_search_offset()
1790 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) in tree_search_offset()
1792 } else if (entry->offset + entry->bytes > offset) in tree_search_offset()
1799 n = rb_next(&entry->offset_index); in tree_search_offset()
1803 if (entry->bitmap) { in tree_search_offset()
1804 if (entry->offset + BITS_PER_BITMAP * in tree_search_offset()
1805 ctl->unit > offset) in tree_search_offset()
1808 if (entry->offset + entry->bytes > offset) in tree_search_offset()
1819 lockdep_assert_held(&ctl->tree_lock); in unlink_free_space()
1821 rb_erase(&info->offset_index, &ctl->free_space_offset); in unlink_free_space()
1822 rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes); in unlink_free_space()
1823 ctl->free_extents--; in unlink_free_space()
1825 if (!info->bitmap && !btrfs_free_space_trimmed(info)) { in unlink_free_space()
1826 ctl->discardable_extents[BTRFS_STAT_CURR]--; in unlink_free_space()
1827 ctl->discardable_bytes[BTRFS_STAT_CURR] -= info->bytes; in unlink_free_space()
1831 ctl->free_space -= info->bytes; in unlink_free_space()
1839 lockdep_assert_held(&ctl->tree_lock); in link_free_space()
1841 ASSERT(info->bytes || info->bitmap); in link_free_space()
1846 rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less); in link_free_space()
1848 if (!info->bitmap && !btrfs_free_space_trimmed(info)) { in link_free_space()
1849 ctl->discardable_extents[BTRFS_STAT_CURR]++; in link_free_space()
1850 ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes; in link_free_space()
1853 ctl->free_space += info->bytes; in link_free_space()
1854 ctl->free_extents++; in link_free_space()
1861 ASSERT(info->bitmap); in relink_bitmap_entry()
1864 * If our entry is empty it's because we're on a cluster and we don't in relink_bitmap_entry()
1865 * want to re-link it into our ctl bytes index. in relink_bitmap_entry()
1867 if (RB_EMPTY_NODE(&info->bytes_index)) in relink_bitmap_entry()
1870 lockdep_assert_held(&ctl->tree_lock); in relink_bitmap_entry()
1872 rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes); in relink_bitmap_entry()
1873 rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less); in relink_bitmap_entry()
1881 int extent_delta = -1; in bitmap_clear_bits()
1883 start = offset_to_bit(info->offset, ctl->unit, offset); in bitmap_clear_bits()
1884 count = bytes_to_bits(bytes, ctl->unit); in bitmap_clear_bits()
1888 bitmap_clear(info->bitmap, start, count); in bitmap_clear_bits()
1890 info->bytes -= bytes; in bitmap_clear_bits()
1891 if (info->max_extent_size > ctl->unit) in bitmap_clear_bits()
1892 info->max_extent_size = 0; in bitmap_clear_bits()
1896 if (start && test_bit(start - 1, info->bitmap)) in bitmap_clear_bits()
1899 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap)) in bitmap_clear_bits()
1902 info->bitmap_extents += extent_delta; in bitmap_clear_bits()
1904 ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; in bitmap_clear_bits()
1905 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes; in bitmap_clear_bits()
1909 ctl->free_space -= bytes; in bitmap_clear_bits()
1919 start = offset_to_bit(info->offset, ctl->unit, offset); in bitmap_set_bits()
1920 count = bytes_to_bits(bytes, ctl->unit); in bitmap_set_bits()
1924 bitmap_set(info->bitmap, start, count); in bitmap_set_bits()
1930 info->max_extent_size = 0; in bitmap_set_bits()
1931 info->bytes += bytes; in bitmap_set_bits()
1932 ctl->free_space += bytes; in bitmap_set_bits()
1936 if (start && test_bit(start - 1, info->bitmap)) in bitmap_set_bits()
1937 extent_delta--; in bitmap_set_bits()
1939 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap)) in bitmap_set_bits()
1940 extent_delta--; in bitmap_set_bits()
1942 info->bitmap_extents += extent_delta; in bitmap_set_bits()
1944 ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; in bitmap_set_bits()
1945 ctl->discardable_bytes[BTRFS_STAT_CURR] += bytes; in bitmap_set_bits()
1968 bitmap_info->max_extent_size && in search_bitmap()
1969 bitmap_info->max_extent_size < *bytes) { in search_bitmap()
1970 *bytes = bitmap_info->max_extent_size; in search_bitmap()
1971 return -1; in search_bitmap()
1974 i = offset_to_bit(bitmap_info->offset, ctl->unit, in search_bitmap()
1975 max_t(u64, *offset, bitmap_info->offset)); in search_bitmap()
1976 bits = bytes_to_bits(*bytes, ctl->unit); in search_bitmap()
1978 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) { in search_bitmap()
1983 next_zero = find_next_zero_bit(bitmap_info->bitmap, in search_bitmap()
1985 extent_bits = next_zero - i; in search_bitmap()
1996 *offset = (u64)(i * ctl->unit) + bitmap_info->offset; in search_bitmap()
1997 *bytes = (u64)(found_bits) * ctl->unit; in search_bitmap()
2001 *bytes = (u64)(max_bits) * ctl->unit; in search_bitmap()
2002 bitmap_info->max_extent_size = *bytes; in search_bitmap()
2004 return -1; in search_bitmap()
2018 if (!ctl->free_space_offset.rb_node) in find_free_space()
2022 node = rb_first_cached(&ctl->free_space_bytes); in find_free_space()
2028 node = &entry->offset_index; in find_free_space()
2040 * If we are using the bytes index then all subsequent entries in find_free_space()
2044 * If we're using the offset index then we need to keep going in find_free_space()
2047 if (entry->bytes < *bytes) { in find_free_space()
2059 tmp = entry->offset - ctl->start + align - 1; in find_free_space()
2061 tmp = tmp * align + ctl->start; in find_free_space()
2062 align_off = tmp - entry->offset; in find_free_space()
2065 tmp = entry->offset; in find_free_space()
2069 * We don't break here if we're using the bytes index because we in find_free_space()
2075 if (entry->bytes < *bytes + align_off) { in find_free_space()
2081 if (entry->bitmap) { in find_free_space()
2097 * The bitmap may have gotten re-arranged in the space in find_free_space()
2098 * index here because the max_extent_size may have been in find_free_space()
2108 *bytes = entry->bytes - align_off; in find_free_space()
2118 info->offset = offset_to_bitmap(ctl, offset); in add_new_bitmap()
2119 info->bytes = 0; in add_new_bitmap()
2120 info->bitmap_extents = 0; in add_new_bitmap()
2121 INIT_LIST_HEAD(&info->list); in add_new_bitmap()
2123 ctl->total_bitmaps++; in add_new_bitmap()
2136 if (bitmap_info->bytes && !btrfs_free_space_trimmed(bitmap_info)) { in free_bitmap()
2137 ctl->discardable_extents[BTRFS_STAT_CURR] -= in free_bitmap()
2138 bitmap_info->bitmap_extents; in free_bitmap()
2139 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bitmap_info->bytes; in free_bitmap()
2143 kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap); in free_bitmap()
2145 ctl->total_bitmaps--; in free_bitmap()
2158 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; in remove_from_bitmap()
2167 search_bytes = ctl->unit; in remove_from_bitmap()
2168 search_bytes = min(search_bytes, end - search_start + 1); in remove_from_bitmap()
2172 return -EINVAL; in remove_from_bitmap()
2178 search_bytes = min(search_bytes, end - search_start + 1); in remove_from_bitmap()
2182 *bytes -= search_bytes; in remove_from_bitmap()
2185 struct rb_node *next = rb_next(&bitmap_info->offset_index); in remove_from_bitmap()
2186 if (!bitmap_info->bytes) in remove_from_bitmap()
2194 return -EINVAL; in remove_from_bitmap()
2203 if (!bitmap_info->bitmap) in remove_from_bitmap()
2204 return -EAGAIN; in remove_from_bitmap()
2213 search_bytes = ctl->unit; in remove_from_bitmap()
2217 return -EAGAIN; in remove_from_bitmap()
2220 } else if (!bitmap_info->bytes) in remove_from_bitmap()
2239 ctl->discardable_extents[BTRFS_STAT_CURR] += in add_bytes_to_bitmap()
2240 info->bitmap_extents; in add_bytes_to_bitmap()
2241 ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes; in add_bytes_to_bitmap()
2243 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in add_bytes_to_bitmap()
2246 end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); in add_bytes_to_bitmap()
2248 bytes_to_set = min(end - offset, bytes); in add_bytes_to_bitmap()
2259 struct btrfs_block_group *block_group = ctl->block_group; in use_bitmap()
2260 struct btrfs_fs_info *fs_info = block_group->fs_info; in use_bitmap()
2269 if (!forced && info->bytes >= FORCE_EXTENT_THRESHOLD) in use_bitmap()
2276 if (!forced && ctl->free_extents < ctl->extents_thresh) { in use_bitmap()
2284 if (info->bytes <= fs_info->sectorsize * 8) { in use_bitmap()
2285 if (ctl->free_extents * 3 <= ctl->extents_thresh) in use_bitmap()
2300 if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length) in use_bitmap()
2320 bytes = info->bytes; in insert_into_bitmap()
2321 offset = info->offset; in insert_into_bitmap()
2322 trim_state = info->trim_state; in insert_into_bitmap()
2324 if (!ctl->op->use_bitmap(ctl, info)) in insert_into_bitmap()
2327 if (ctl->op == &free_space_op) in insert_into_bitmap()
2328 block_group = ctl->block_group; in insert_into_bitmap()
2331 * Since we link bitmaps right into the cluster we need to see if we in insert_into_bitmap()
2332 * have a cluster here, and if so and it has our bitmap we need to add in insert_into_bitmap()
2335 if (block_group && !list_empty(&block_group->cluster_list)) { in insert_into_bitmap()
2336 struct btrfs_free_cluster *cluster; in insert_into_bitmap() local
2340 cluster = list_entry(block_group->cluster_list.next, in insert_into_bitmap()
2343 spin_lock(&cluster->lock); in insert_into_bitmap()
2344 node = rb_first(&cluster->root); in insert_into_bitmap()
2346 spin_unlock(&cluster->lock); in insert_into_bitmap()
2351 if (!entry->bitmap) { in insert_into_bitmap()
2352 spin_unlock(&cluster->lock); in insert_into_bitmap()
2356 if (entry->offset == offset_to_bitmap(ctl, offset)) { in insert_into_bitmap()
2359 bytes -= bytes_added; in insert_into_bitmap()
2362 spin_unlock(&cluster->lock); in insert_into_bitmap()
2379 bytes -= bytes_added; in insert_into_bitmap()
2390 if (info && info->bitmap) { in insert_into_bitmap()
2396 spin_unlock(&ctl->tree_lock); in insert_into_bitmap()
2398 /* no pre-allocated info, allocate a new one */ in insert_into_bitmap()
2403 spin_lock(&ctl->tree_lock); in insert_into_bitmap()
2404 ret = -ENOMEM; in insert_into_bitmap()
2410 info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, in insert_into_bitmap()
2412 info->trim_state = BTRFS_TRIM_STATE_TRIMMED; in insert_into_bitmap()
2413 spin_lock(&ctl->tree_lock); in insert_into_bitmap()
2414 if (!info->bitmap) { in insert_into_bitmap()
2415 ret = -ENOMEM; in insert_into_bitmap()
2423 if (info->bitmap) in insert_into_bitmap()
2425 info->bitmap); in insert_into_bitmap()
2454 u64 offset = info->offset; in try_merge_free_space()
2455 u64 bytes = info->bytes; in try_merge_free_space()
2466 right_prev = rb_prev(&right_info->offset_index); in try_merge_free_space()
2471 left_info = tree_search_offset(ctl, offset - 1, 0, 0); in try_merge_free_space()
2474 if (right_info && !right_info->bitmap && in try_merge_free_space()
2477 info->bytes += right_info->bytes; in try_merge_free_space()
2483 if (left_info && !left_info->bitmap && in try_merge_free_space()
2484 left_info->offset + left_info->bytes == offset && in try_merge_free_space()
2487 info->offset = left_info->offset; in try_merge_free_space()
2488 info->bytes += left_info->bytes; in try_merge_free_space()
2503 const u64 end = info->offset + info->bytes; in steal_from_bitmap_to_end()
2511 i = offset_to_bit(bitmap->offset, ctl->unit, end); in steal_from_bitmap_to_end()
2512 j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i); in steal_from_bitmap_to_end()
2515 bytes = (j - i) * ctl->unit; in steal_from_bitmap_to_end()
2516 info->bytes += bytes; in steal_from_bitmap_to_end()
2520 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in steal_from_bitmap_to_end()
2524 if (!bitmap->bytes) in steal_from_bitmap_to_end()
2541 bitmap_offset = offset_to_bitmap(ctl, info->offset); in steal_from_bitmap_to_front()
2543 if (bitmap_offset == info->offset) { in steal_from_bitmap_to_front()
2544 if (info->offset == 0) in steal_from_bitmap_to_front()
2546 bitmap_offset = offset_to_bitmap(ctl, info->offset - 1); in steal_from_bitmap_to_front()
2553 i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1; in steal_from_bitmap_to_front()
2555 prev_j = (unsigned long)-1; in steal_from_bitmap_to_front()
2556 for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) { in steal_from_bitmap_to_front()
2564 if (prev_j == (unsigned long)-1) in steal_from_bitmap_to_front()
2565 bytes = (i + 1) * ctl->unit; in steal_from_bitmap_to_front()
2567 bytes = (i - prev_j) * ctl->unit; in steal_from_bitmap_to_front()
2569 info->offset -= bytes; in steal_from_bitmap_to_front()
2570 info->bytes += bytes; in steal_from_bitmap_to_front()
2574 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in steal_from_bitmap_to_front()
2576 bitmap_clear_bits(ctl, bitmap, info->offset, bytes, update_stat); in steal_from_bitmap_to_front()
2578 if (!bitmap->bytes) in steal_from_bitmap_to_front()
2586 * non-clustered allocation requests. So when attempting to add a new extent
2591 * on 2 or more entries - even if the entries represent a contiguous free space
2603 ASSERT(!info->bitmap); in steal_from_bitmap()
2604 ASSERT(RB_EMPTY_NODE(&info->offset_index)); in steal_from_bitmap()
2606 if (ctl->total_bitmaps > 0) { in steal_from_bitmap()
2611 if (ctl->total_bitmaps > 0) in steal_from_bitmap()
2624 struct btrfs_fs_info *fs_info = block_group->fs_info; in __btrfs_add_free_space()
2625 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in __btrfs_add_free_space()
2634 return -ENOMEM; in __btrfs_add_free_space()
2636 info->offset = offset; in __btrfs_add_free_space()
2637 info->bytes = bytes; in __btrfs_add_free_space()
2638 info->trim_state = trim_state; in __btrfs_add_free_space()
2639 RB_CLEAR_NODE(&info->offset_index); in __btrfs_add_free_space()
2640 RB_CLEAR_NODE(&info->bytes_index); in __btrfs_add_free_space()
2642 spin_lock(&ctl->tree_lock); in __btrfs_add_free_space()
2662 * going to add the new free space to existing bitmap entries - because in __btrfs_add_free_space()
2668 filter_bytes = max(filter_bytes, info->bytes); in __btrfs_add_free_space()
2675 spin_unlock(&ctl->tree_lock); in __btrfs_add_free_space()
2679 ASSERT(ret != -EEXIST); in __btrfs_add_free_space()
2684 btrfs_discard_queue_work(&fs_info->discard_ctl, block_group); in __btrfs_add_free_space()
2693 struct btrfs_space_info *sinfo = block_group->space_info; in __btrfs_add_free_space_zoned()
2694 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in __btrfs_add_free_space_zoned()
2695 u64 offset = bytenr - block_group->start; in __btrfs_add_free_space_zoned()
2698 bool initial = (size == block_group->length); in __btrfs_add_free_space_zoned()
2701 WARN_ON(!initial && offset + size > block_group->zone_capacity); in __btrfs_add_free_space_zoned()
2704 bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold); in __btrfs_add_free_space_zoned()
2706 spin_lock(&ctl->tree_lock); in __btrfs_add_free_space_zoned()
2710 to_free = block_group->zone_capacity; in __btrfs_add_free_space_zoned()
2711 else if (offset >= block_group->alloc_offset) in __btrfs_add_free_space_zoned()
2713 else if (offset + size <= block_group->alloc_offset) in __btrfs_add_free_space_zoned()
2716 to_free = offset + size - block_group->alloc_offset; in __btrfs_add_free_space_zoned()
2717 to_unusable = size - to_free; in __btrfs_add_free_space_zoned()
2719 ctl->free_space += to_free; in __btrfs_add_free_space_zoned()
2721 * If the block group is read-only, we should account freed space into in __btrfs_add_free_space_zoned()
2724 if (!block_group->ro) in __btrfs_add_free_space_zoned()
2725 block_group->zone_unusable += to_unusable; in __btrfs_add_free_space_zoned()
2726 spin_unlock(&ctl->tree_lock); in __btrfs_add_free_space_zoned()
2728 spin_lock(&block_group->lock); in __btrfs_add_free_space_zoned()
2729 block_group->alloc_offset -= size; in __btrfs_add_free_space_zoned()
2730 spin_unlock(&block_group->lock); in __btrfs_add_free_space_zoned()
2733 reclaimable_unusable = block_group->zone_unusable - in __btrfs_add_free_space_zoned()
2734 (block_group->length - block_group->zone_capacity); in __btrfs_add_free_space_zoned()
2736 if (block_group->zone_unusable == block_group->length) { in __btrfs_add_free_space_zoned()
2740 mult_perc(block_group->zone_capacity, bg_reclaim_threshold)) { in __btrfs_add_free_space_zoned()
2752 if (btrfs_is_zoned(block_group->fs_info)) in btrfs_add_free_space()
2756 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC)) in btrfs_add_free_space()
2765 if (btrfs_is_zoned(block_group->fs_info)) in btrfs_add_free_space_unused()
2782 if (btrfs_is_zoned(block_group->fs_info)) in btrfs_add_free_space_async_trimmed()
2786 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC) || in btrfs_add_free_space_async_trimmed()
2787 btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) in btrfs_add_free_space_async_trimmed()
2796 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_remove_free_space()
2801 if (btrfs_is_zoned(block_group->fs_info)) { in btrfs_remove_free_space()
2804 * Since the allocation info of tree-log nodes are not recorded in btrfs_remove_free_space()
2805 * to the extent-tree, calculate_alloc_pointer() failed to in btrfs_remove_free_space()
2811 * Advance the pointer not to overwrite the tree-log nodes. in btrfs_remove_free_space()
2813 if (block_group->start + block_group->alloc_offset < in btrfs_remove_free_space()
2815 block_group->alloc_offset = in btrfs_remove_free_space()
2816 offset + bytes - block_group->start; in btrfs_remove_free_space()
2821 spin_lock(&ctl->tree_lock); in btrfs_remove_free_space()
2848 if (!info->bitmap) { in btrfs_remove_free_space()
2850 if (offset == info->offset) { in btrfs_remove_free_space()
2851 u64 to_free = min(bytes, info->bytes); in btrfs_remove_free_space()
2853 info->bytes -= to_free; in btrfs_remove_free_space()
2854 info->offset += to_free; in btrfs_remove_free_space()
2855 if (info->bytes) { in btrfs_remove_free_space()
2863 bytes -= to_free; in btrfs_remove_free_space()
2866 u64 old_end = info->bytes + info->offset; in btrfs_remove_free_space()
2868 info->bytes = offset - info->offset; in btrfs_remove_free_space()
2876 bytes -= old_end - offset; in btrfs_remove_free_space()
2883 spin_unlock(&ctl->tree_lock); in btrfs_remove_free_space()
2887 old_end - (offset + bytes), in btrfs_remove_free_space()
2888 info->trim_state); in btrfs_remove_free_space()
2895 if (ret == -EAGAIN) { in btrfs_remove_free_space()
2901 spin_unlock(&ctl->tree_lock); in btrfs_remove_free_space()
2909 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_dump_free_space()
2910 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_dump_free_space()
2916 * Zoned btrfs does not use free space tree and cluster. Just print in btrfs_dump_free_space()
2921 block_group->zone_capacity - block_group->alloc_offset, in btrfs_dump_free_space()
2923 &block_group->runtime_flags)); in btrfs_dump_free_space()
2927 spin_lock(&ctl->tree_lock); in btrfs_dump_free_space()
2928 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { in btrfs_dump_free_space()
2930 if (info->bytes >= bytes && !block_group->ro) in btrfs_dump_free_space()
2933 info->offset, info->bytes, in btrfs_dump_free_space()
2934 (info->bitmap) ? "yes" : "no"); in btrfs_dump_free_space()
2936 spin_unlock(&ctl->tree_lock); in btrfs_dump_free_space()
2937 btrfs_info(fs_info, "block group has cluster?: %s", in btrfs_dump_free_space()
2938 list_empty(&block_group->cluster_list) ? "no" : "yes"); in btrfs_dump_free_space()
2947 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_init_free_space_ctl()
2949 spin_lock_init(&ctl->tree_lock); in btrfs_init_free_space_ctl()
2950 ctl->unit = fs_info->sectorsize; in btrfs_init_free_space_ctl()
2951 ctl->start = block_group->start; in btrfs_init_free_space_ctl()
2952 ctl->block_group = block_group; in btrfs_init_free_space_ctl()
2953 ctl->op = &free_space_op; in btrfs_init_free_space_ctl()
2954 ctl->free_space_bytes = RB_ROOT_CACHED; in btrfs_init_free_space_ctl()
2955 INIT_LIST_HEAD(&ctl->trimming_ranges); in btrfs_init_free_space_ctl()
2956 mutex_init(&ctl->cache_writeout_mutex); in btrfs_init_free_space_ctl()
2963 ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space); in btrfs_init_free_space_ctl()
2967 * for a given cluster, put all of its extents back into the free
2969 * pointed to by the cluster, someone else raced in and freed the
2970 * cluster already. In that case, we just return without changing anything
2974 struct btrfs_free_cluster *cluster) in __btrfs_return_cluster_to_free_space() argument
2976 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in __btrfs_return_cluster_to_free_space()
2979 lockdep_assert_held(&ctl->tree_lock); in __btrfs_return_cluster_to_free_space()
2981 spin_lock(&cluster->lock); in __btrfs_return_cluster_to_free_space()
2982 if (cluster->block_group != block_group) { in __btrfs_return_cluster_to_free_space()
2983 spin_unlock(&cluster->lock); in __btrfs_return_cluster_to_free_space()
2987 cluster->block_group = NULL; in __btrfs_return_cluster_to_free_space()
2988 cluster->window_start = 0; in __btrfs_return_cluster_to_free_space()
2989 list_del_init(&cluster->block_group_list); in __btrfs_return_cluster_to_free_space()
2991 node = rb_first(&cluster->root); in __btrfs_return_cluster_to_free_space()
2996 node = rb_next(&entry->offset_index); in __btrfs_return_cluster_to_free_space()
2997 rb_erase(&entry->offset_index, &cluster->root); in __btrfs_return_cluster_to_free_space()
2998 RB_CLEAR_NODE(&entry->offset_index); in __btrfs_return_cluster_to_free_space()
3000 if (!entry->bitmap) { in __btrfs_return_cluster_to_free_space()
3003 ctl->discardable_extents[BTRFS_STAT_CURR]--; in __btrfs_return_cluster_to_free_space()
3004 ctl->discardable_bytes[BTRFS_STAT_CURR] -= in __btrfs_return_cluster_to_free_space()
3005 entry->bytes; in __btrfs_return_cluster_to_free_space()
3013 ctl->discardable_extents[BTRFS_STAT_CURR]++; in __btrfs_return_cluster_to_free_space()
3014 ctl->discardable_bytes[BTRFS_STAT_CURR] += in __btrfs_return_cluster_to_free_space()
3015 entry->bytes; in __btrfs_return_cluster_to_free_space()
3019 rb_add_cached(&entry->bytes_index, &ctl->free_space_bytes, in __btrfs_return_cluster_to_free_space()
3022 cluster->root = RB_ROOT; in __btrfs_return_cluster_to_free_space()
3023 spin_unlock(&cluster->lock); in __btrfs_return_cluster_to_free_space()
3029 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_remove_free_space_cache()
3030 struct btrfs_free_cluster *cluster; in btrfs_remove_free_space_cache() local
3033 spin_lock(&ctl->tree_lock); in btrfs_remove_free_space_cache()
3034 while ((head = block_group->cluster_list.next) != in btrfs_remove_free_space_cache()
3035 &block_group->cluster_list) { in btrfs_remove_free_space_cache()
3036 cluster = list_entry(head, struct btrfs_free_cluster, in btrfs_remove_free_space_cache()
3039 WARN_ON(cluster->block_group != block_group); in btrfs_remove_free_space_cache()
3040 __btrfs_return_cluster_to_free_space(block_group, cluster); in btrfs_remove_free_space_cache()
3042 cond_resched_lock(&ctl->tree_lock); in btrfs_remove_free_space_cache()
3046 spin_unlock(&ctl->tree_lock); in btrfs_remove_free_space_cache()
3055 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_is_free_space_trimmed()
3060 spin_lock(&ctl->tree_lock); in btrfs_is_free_space_trimmed()
3061 node = rb_first(&ctl->free_space_offset); in btrfs_is_free_space_trimmed()
3074 spin_unlock(&ctl->tree_lock); in btrfs_is_free_space_trimmed()
3082 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_find_space_for_alloc()
3084 &block_group->fs_info->discard_ctl; in btrfs_find_space_for_alloc()
3091 bool use_bytes_index = (offset == block_group->start); in btrfs_find_space_for_alloc()
3093 ASSERT(!btrfs_is_zoned(block_group->fs_info)); in btrfs_find_space_for_alloc()
3095 spin_lock(&ctl->tree_lock); in btrfs_find_space_for_alloc()
3097 block_group->full_stripe_len, max_extent_size, in btrfs_find_space_for_alloc()
3103 if (entry->bitmap) { in btrfs_find_space_for_alloc()
3107 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); in btrfs_find_space_for_alloc()
3109 if (!entry->bytes) in btrfs_find_space_for_alloc()
3113 align_gap_len = offset - entry->offset; in btrfs_find_space_for_alloc()
3114 align_gap = entry->offset; in btrfs_find_space_for_alloc()
3115 align_gap_trim_state = entry->trim_state; in btrfs_find_space_for_alloc()
3118 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); in btrfs_find_space_for_alloc()
3120 entry->offset = offset + bytes; in btrfs_find_space_for_alloc()
3121 WARN_ON(entry->bytes < bytes + align_gap_len); in btrfs_find_space_for_alloc()
3123 entry->bytes -= bytes + align_gap_len; in btrfs_find_space_for_alloc()
3124 if (!entry->bytes) in btrfs_find_space_for_alloc()
3131 spin_unlock(&ctl->tree_lock); in btrfs_find_space_for_alloc()
3140 * given a cluster, put all of its extents back into the free space
3142 * a cluster that belongs to the passed block group.
3145 * cluster and remove the cluster from it.
3149 struct btrfs_free_cluster *cluster) in btrfs_return_cluster_to_free_space() argument
3154 spin_lock(&cluster->lock); in btrfs_return_cluster_to_free_space()
3156 block_group = cluster->block_group; in btrfs_return_cluster_to_free_space()
3158 spin_unlock(&cluster->lock); in btrfs_return_cluster_to_free_space()
3161 } else if (cluster->block_group != block_group) { in btrfs_return_cluster_to_free_space()
3163 spin_unlock(&cluster->lock); in btrfs_return_cluster_to_free_space()
3167 spin_unlock(&cluster->lock); in btrfs_return_cluster_to_free_space()
3169 ctl = block_group->free_space_ctl; in btrfs_return_cluster_to_free_space()
3171 /* now return any extents the cluster had on it */ in btrfs_return_cluster_to_free_space()
3172 spin_lock(&ctl->tree_lock); in btrfs_return_cluster_to_free_space()
3173 __btrfs_return_cluster_to_free_space(block_group, cluster); in btrfs_return_cluster_to_free_space()
3174 spin_unlock(&ctl->tree_lock); in btrfs_return_cluster_to_free_space()
3176 btrfs_discard_queue_work(&block_group->fs_info->discard_ctl, block_group); in btrfs_return_cluster_to_free_space()
3183 struct btrfs_free_cluster *cluster, in btrfs_alloc_from_bitmap() argument
3188 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_alloc_from_bitmap()
3190 u64 search_start = cluster->window_start; in btrfs_alloc_from_bitmap()
3211 * given a cluster, try to allocate 'bytes' from it, returns 0
3216 struct btrfs_free_cluster *cluster, u64 bytes, in btrfs_alloc_from_cluster() argument
3219 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_alloc_from_cluster()
3221 &block_group->fs_info->discard_ctl; in btrfs_alloc_from_cluster()
3226 ASSERT(!btrfs_is_zoned(block_group->fs_info)); in btrfs_alloc_from_cluster()
3228 spin_lock(&cluster->lock); in btrfs_alloc_from_cluster()
3229 if (bytes > cluster->max_size) in btrfs_alloc_from_cluster()
3232 if (cluster->block_group != block_group) in btrfs_alloc_from_cluster()
3235 node = rb_first(&cluster->root); in btrfs_alloc_from_cluster()
3241 if (entry->bytes < bytes) in btrfs_alloc_from_cluster()
3245 if (entry->bytes < bytes || in btrfs_alloc_from_cluster()
3246 (!entry->bitmap && entry->offset < min_start)) { in btrfs_alloc_from_cluster()
3247 node = rb_next(&entry->offset_index); in btrfs_alloc_from_cluster()
3255 if (entry->bitmap) { in btrfs_alloc_from_cluster()
3257 cluster, entry, bytes, in btrfs_alloc_from_cluster()
3258 cluster->window_start, in btrfs_alloc_from_cluster()
3261 node = rb_next(&entry->offset_index); in btrfs_alloc_from_cluster()
3268 cluster->window_start += bytes; in btrfs_alloc_from_cluster()
3270 ret = entry->offset; in btrfs_alloc_from_cluster()
3272 entry->offset += bytes; in btrfs_alloc_from_cluster()
3273 entry->bytes -= bytes; in btrfs_alloc_from_cluster()
3279 spin_unlock(&cluster->lock); in btrfs_alloc_from_cluster()
3284 spin_lock(&ctl->tree_lock); in btrfs_alloc_from_cluster()
3287 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); in btrfs_alloc_from_cluster()
3289 ctl->free_space -= bytes; in btrfs_alloc_from_cluster()
3290 if (!entry->bitmap && !btrfs_free_space_trimmed(entry)) in btrfs_alloc_from_cluster()
3291 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes; in btrfs_alloc_from_cluster()
3293 spin_lock(&cluster->lock); in btrfs_alloc_from_cluster()
3294 if (entry->bytes == 0) { in btrfs_alloc_from_cluster()
3295 rb_erase(&entry->offset_index, &cluster->root); in btrfs_alloc_from_cluster()
3296 ctl->free_extents--; in btrfs_alloc_from_cluster()
3297 if (entry->bitmap) { in btrfs_alloc_from_cluster()
3299 entry->bitmap); in btrfs_alloc_from_cluster()
3300 ctl->total_bitmaps--; in btrfs_alloc_from_cluster()
3303 ctl->discardable_extents[BTRFS_STAT_CURR]--; in btrfs_alloc_from_cluster()
3308 spin_unlock(&cluster->lock); in btrfs_alloc_from_cluster()
3309 spin_unlock(&ctl->tree_lock); in btrfs_alloc_from_cluster()
3316 struct btrfs_free_cluster *cluster, in btrfs_bitmap_cluster() argument
3320 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_bitmap_cluster()
3331 lockdep_assert_held(&ctl->tree_lock); in btrfs_bitmap_cluster()
3333 i = offset_to_bit(entry->offset, ctl->unit, in btrfs_bitmap_cluster()
3334 max_t(u64, offset, entry->offset)); in btrfs_bitmap_cluster()
3335 want_bits = bytes_to_bits(bytes, ctl->unit); in btrfs_bitmap_cluster()
3336 min_bits = bytes_to_bits(min_bytes, ctl->unit); in btrfs_bitmap_cluster()
3339 * Don't bother looking for a cluster in this bitmap if it's heavily in btrfs_bitmap_cluster()
3342 if (entry->max_extent_size && in btrfs_bitmap_cluster()
3343 entry->max_extent_size < cont1_bytes) in btrfs_bitmap_cluster()
3344 return -ENOSPC; in btrfs_bitmap_cluster()
3347 for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) { in btrfs_bitmap_cluster()
3348 next_zero = find_next_zero_bit(entry->bitmap, in btrfs_bitmap_cluster()
3350 if (next_zero - i >= min_bits) { in btrfs_bitmap_cluster()
3351 found_bits = next_zero - i; in btrfs_bitmap_cluster()
3356 if (next_zero - i > max_bits) in btrfs_bitmap_cluster()
3357 max_bits = next_zero - i; in btrfs_bitmap_cluster()
3362 entry->max_extent_size = (u64)max_bits * ctl->unit; in btrfs_bitmap_cluster()
3363 return -ENOSPC; in btrfs_bitmap_cluster()
3368 cluster->max_size = 0; in btrfs_bitmap_cluster()
3373 if (cluster->max_size < found_bits * ctl->unit) in btrfs_bitmap_cluster()
3374 cluster->max_size = found_bits * ctl->unit; in btrfs_bitmap_cluster()
3376 if (total_found < want_bits || cluster->max_size < cont1_bytes) { in btrfs_bitmap_cluster()
3381 cluster->window_start = start * ctl->unit + entry->offset; in btrfs_bitmap_cluster()
3382 rb_erase(&entry->offset_index, &ctl->free_space_offset); in btrfs_bitmap_cluster()
3383 rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes); in btrfs_bitmap_cluster()
3386 * We need to know if we're currently on the normal space index when we in btrfs_bitmap_cluster()
3387 * manipulate the bitmap so that we know we need to remove and re-insert in btrfs_bitmap_cluster()
3392 RB_CLEAR_NODE(&entry->bytes_index); in btrfs_bitmap_cluster()
3394 ret = tree_insert_offset(ctl, cluster, entry); in btrfs_bitmap_cluster()
3395 ASSERT(!ret); /* -EEXIST; Logic error */ in btrfs_bitmap_cluster()
3397 trace_btrfs_setup_cluster(block_group, cluster, in btrfs_bitmap_cluster()
3398 total_found * ctl->unit, 1); in btrfs_bitmap_cluster()
3403 * This searches the block group for just extents to fill the cluster with.
3404 * Try to find a cluster with at least bytes total bytes, at least one
3409 struct btrfs_free_cluster *cluster, in setup_cluster_no_bitmap() argument
3413 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in setup_cluster_no_bitmap()
3422 lockdep_assert_held(&ctl->tree_lock); in setup_cluster_no_bitmap()
3426 return -ENOSPC; in setup_cluster_no_bitmap()
3432 while (entry->bitmap || entry->bytes < min_bytes) { in setup_cluster_no_bitmap()
3433 if (entry->bitmap && list_empty(&entry->list)) in setup_cluster_no_bitmap()
3434 list_add_tail(&entry->list, bitmaps); in setup_cluster_no_bitmap()
3435 node = rb_next(&entry->offset_index); in setup_cluster_no_bitmap()
3437 return -ENOSPC; in setup_cluster_no_bitmap()
3441 window_free = entry->bytes; in setup_cluster_no_bitmap()
3442 max_extent = entry->bytes; in setup_cluster_no_bitmap()
3446 for (node = rb_next(&entry->offset_index); node; in setup_cluster_no_bitmap()
3447 node = rb_next(&entry->offset_index)) { in setup_cluster_no_bitmap()
3450 if (entry->bitmap) { in setup_cluster_no_bitmap()
3451 if (list_empty(&entry->list)) in setup_cluster_no_bitmap()
3452 list_add_tail(&entry->list, bitmaps); in setup_cluster_no_bitmap()
3456 if (entry->bytes < min_bytes) in setup_cluster_no_bitmap()
3460 window_free += entry->bytes; in setup_cluster_no_bitmap()
3461 if (entry->bytes > max_extent) in setup_cluster_no_bitmap()
3462 max_extent = entry->bytes; in setup_cluster_no_bitmap()
3466 return -ENOSPC; in setup_cluster_no_bitmap()
3468 cluster->window_start = first->offset; in setup_cluster_no_bitmap()
3470 node = &first->offset_index; in setup_cluster_no_bitmap()
3474 * cache and put them into the cluster rbtree in setup_cluster_no_bitmap()
3480 node = rb_next(&entry->offset_index); in setup_cluster_no_bitmap()
3481 if (entry->bitmap || entry->bytes < min_bytes) in setup_cluster_no_bitmap()
3484 rb_erase(&entry->offset_index, &ctl->free_space_offset); in setup_cluster_no_bitmap()
3485 rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes); in setup_cluster_no_bitmap()
3486 ret = tree_insert_offset(ctl, cluster, entry); in setup_cluster_no_bitmap()
3487 total_size += entry->bytes; in setup_cluster_no_bitmap()
3488 ASSERT(!ret); /* -EEXIST; Logic error */ in setup_cluster_no_bitmap()
3491 cluster->max_size = max_extent; in setup_cluster_no_bitmap()
3492 trace_btrfs_setup_cluster(block_group, cluster, total_size, 0); in setup_cluster_no_bitmap()
3497 * This specifically looks for bitmaps that may work in the cluster, we assume
3502 struct btrfs_free_cluster *cluster, in setup_cluster_bitmap() argument
3506 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in setup_cluster_bitmap()
3508 int ret = -ENOSPC; in setup_cluster_bitmap()
3511 if (ctl->total_bitmaps == 0) in setup_cluster_bitmap()
3512 return -ENOSPC; in setup_cluster_bitmap()
3521 if (!entry || entry->offset != bitmap_offset) { in setup_cluster_bitmap()
3523 if (entry && list_empty(&entry->list)) in setup_cluster_bitmap()
3524 list_add(&entry->list, bitmaps); in setup_cluster_bitmap()
3528 if (entry->bytes < bytes) in setup_cluster_bitmap()
3530 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, in setup_cluster_bitmap()
3540 return -ENOSPC; in setup_cluster_bitmap()
3544 * here we try to find a cluster of blocks in a block group. The goal
3548 * returns zero and sets up cluster if things worked out, otherwise
3549 * it returns -enospc
3552 struct btrfs_free_cluster *cluster, in btrfs_find_space_cluster() argument
3555 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_find_space_cluster()
3556 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_find_space_cluster()
3565 * cluster. For SSD_SPREAD, don't allow any fragmentation. in btrfs_find_space_cluster()
3572 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { in btrfs_find_space_cluster()
3574 min_bytes = fs_info->sectorsize; in btrfs_find_space_cluster()
3577 min_bytes = fs_info->sectorsize; in btrfs_find_space_cluster()
3580 spin_lock(&ctl->tree_lock); in btrfs_find_space_cluster()
3583 * If we know we don't have enough space to make a cluster don't even in btrfs_find_space_cluster()
3586 if (ctl->free_space < bytes) { in btrfs_find_space_cluster()
3587 spin_unlock(&ctl->tree_lock); in btrfs_find_space_cluster()
3588 return -ENOSPC; in btrfs_find_space_cluster()
3591 spin_lock(&cluster->lock); in btrfs_find_space_cluster()
3593 /* someone already found a cluster, hooray */ in btrfs_find_space_cluster()
3594 if (cluster->block_group) { in btrfs_find_space_cluster()
3602 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, in btrfs_find_space_cluster()
3606 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps, in btrfs_find_space_cluster()
3612 list_del_init(&entry->list); in btrfs_find_space_cluster()
3616 list_add_tail(&cluster->block_group_list, in btrfs_find_space_cluster()
3617 &block_group->cluster_list); in btrfs_find_space_cluster()
3618 cluster->block_group = block_group; in btrfs_find_space_cluster()
3623 spin_unlock(&cluster->lock); in btrfs_find_space_cluster()
3624 spin_unlock(&ctl->tree_lock); in btrfs_find_space_cluster()
3630 * simple code to zero out a cluster
3632 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) in btrfs_init_free_cluster() argument
3634 spin_lock_init(&cluster->lock); in btrfs_init_free_cluster()
3635 spin_lock_init(&cluster->refill_lock); in btrfs_init_free_cluster()
3636 cluster->root = RB_ROOT; in btrfs_init_free_cluster()
3637 cluster->max_size = 0; in btrfs_init_free_cluster()
3638 cluster->fragmented = false; in btrfs_init_free_cluster()
3639 INIT_LIST_HEAD(&cluster->block_group_list); in btrfs_init_free_cluster()
3640 cluster->block_group = NULL; in btrfs_init_free_cluster()
3649 struct btrfs_space_info *space_info = block_group->space_info; in do_trimming()
3650 struct btrfs_fs_info *fs_info = block_group->fs_info; in do_trimming()
3651 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in do_trimming()
3659 spin_lock(&space_info->lock); in do_trimming()
3660 spin_lock(&block_group->lock); in do_trimming()
3661 if (!block_group->ro) { in do_trimming()
3662 block_group->reserved += reserved_bytes; in do_trimming()
3663 space_info->bytes_reserved += reserved_bytes; in do_trimming()
3666 spin_unlock(&block_group->lock); in do_trimming()
3667 spin_unlock(&space_info->lock); in do_trimming()
3675 mutex_lock(&ctl->cache_writeout_mutex); in do_trimming()
3678 start - reserved_start, in do_trimming()
3681 __btrfs_add_free_space(block_group, end, reserved_end - end, in do_trimming()
3684 list_del(&trim_entry->list); in do_trimming()
3685 mutex_unlock(&ctl->cache_writeout_mutex); in do_trimming()
3688 spin_lock(&space_info->lock); in do_trimming()
3689 spin_lock(&block_group->lock); in do_trimming()
3690 if (block_group->ro) in do_trimming()
3691 space_info->bytes_readonly += reserved_bytes; in do_trimming()
3692 block_group->reserved -= reserved_bytes; in do_trimming()
3693 space_info->bytes_reserved -= reserved_bytes; in do_trimming()
3694 spin_unlock(&block_group->lock); in do_trimming()
3695 spin_unlock(&space_info->lock); in do_trimming()
3709 &block_group->fs_info->discard_ctl; in trim_no_bitmap()
3710 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in trim_no_bitmap()
3718 const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size); in trim_no_bitmap()
3723 mutex_lock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3724 spin_lock(&ctl->tree_lock); in trim_no_bitmap()
3726 if (ctl->free_space < minlen) in trim_no_bitmap()
3734 while (entry->bitmap || in trim_no_bitmap()
3736 node = rb_next(&entry->offset_index); in trim_no_bitmap()
3743 if (entry->offset >= end) in trim_no_bitmap()
3746 extent_start = entry->offset; in trim_no_bitmap()
3747 extent_bytes = entry->bytes; in trim_no_bitmap()
3748 extent_trim_state = entry->trim_state; in trim_no_bitmap()
3750 start = entry->offset; in trim_no_bitmap()
3751 bytes = entry->bytes; in trim_no_bitmap()
3753 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3754 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3768 entry->offset += max_discard_size; in trim_no_bitmap()
3769 entry->bytes -= max_discard_size; in trim_no_bitmap()
3776 bytes = min(extent_start + extent_bytes, end) - start; in trim_no_bitmap()
3778 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3779 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3787 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3790 list_add_tail(&trim_entry.list, &ctl->trimming_ranges); in trim_no_bitmap()
3791 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3797 block_group->discard_cursor = start + bytes; in trim_no_bitmap()
3802 block_group->discard_cursor = start; in trim_no_bitmap()
3807 ret = -ERESTARTSYS; in trim_no_bitmap()
3817 block_group->discard_cursor = btrfs_block_group_end(block_group); in trim_no_bitmap()
3818 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3819 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3842 spin_lock(&ctl->tree_lock); in reset_trimming_bitmap()
3846 ctl->discardable_extents[BTRFS_STAT_CURR] += in reset_trimming_bitmap()
3847 entry->bitmap_extents; in reset_trimming_bitmap()
3848 ctl->discardable_bytes[BTRFS_STAT_CURR] += entry->bytes; in reset_trimming_bitmap()
3850 entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in reset_trimming_bitmap()
3853 spin_unlock(&ctl->tree_lock); in reset_trimming_bitmap()
3860 entry->trim_state = BTRFS_TRIM_STATE_TRIMMED; in end_trimming_bitmap()
3861 ctl->discardable_extents[BTRFS_STAT_CURR] -= in end_trimming_bitmap()
3862 entry->bitmap_extents; in end_trimming_bitmap()
3863 ctl->discardable_bytes[BTRFS_STAT_CURR] -= entry->bytes; in end_trimming_bitmap()
3875 &block_group->fs_info->discard_ctl; in trim_bitmaps()
3876 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in trim_bitmaps()
3882 const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size); in trim_bitmaps()
3888 mutex_lock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3889 spin_lock(&ctl->tree_lock); in trim_bitmaps()
3891 if (ctl->free_space < minlen) { in trim_bitmaps()
3892 block_group->discard_cursor = in trim_bitmaps()
3894 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3895 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3906 * which is the only discard index which sets minlen to 0. in trim_bitmaps()
3910 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3911 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3923 entry->trim_state = BTRFS_TRIM_STATE_TRIMMING; in trim_bitmaps()
3935 entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in trim_bitmaps()
3936 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3937 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3947 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3948 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3952 bytes = min(bytes, end - start); in trim_bitmaps()
3954 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3955 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3971 if (entry->bytes == 0) in trim_bitmaps()
3974 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3977 list_add_tail(&trim_entry.list, &ctl->trimming_ranges); in trim_bitmaps()
3978 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3984 block_group->discard_cursor = in trim_bitmaps()
3990 offset += BITS_PER_BITMAP * ctl->unit; in trim_bitmaps()
3995 block_group->discard_cursor = start; in trim_bitmaps()
4000 ret = -ERESTARTSYS; in trim_bitmaps()
4008 block_group->discard_cursor = end; in trim_bitmaps()
4017 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_trim_block_group()
4021 ASSERT(!btrfs_is_zoned(block_group->fs_info)); in btrfs_trim_block_group()
4025 spin_lock(&block_group->lock); in btrfs_trim_block_group()
4026 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { in btrfs_trim_block_group()
4027 spin_unlock(&block_group->lock); in btrfs_trim_block_group()
4031 spin_unlock(&block_group->lock); in btrfs_trim_block_group()
4038 div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem); in btrfs_trim_block_group()
4055 spin_lock(&block_group->lock); in btrfs_trim_block_group_extents()
4056 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { in btrfs_trim_block_group_extents()
4057 spin_unlock(&block_group->lock); in btrfs_trim_block_group_extents()
4061 spin_unlock(&block_group->lock); in btrfs_trim_block_group_extents()
4077 spin_lock(&block_group->lock); in btrfs_trim_block_group_bitmaps()
4078 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { in btrfs_trim_block_group_bitmaps()
4079 spin_unlock(&block_group->lock); in btrfs_trim_block_group_bitmaps()
4083 spin_unlock(&block_group->lock); in btrfs_trim_block_group_bitmaps()
4095 return btrfs_super_cache_generation(fs_info->super_copy); in btrfs_free_space_cache_v1_active()
4107 node = rb_first_cached(&fs_info->block_group_cache_tree); in cleanup_free_space_cache_v1()
4126 * super_copy->cache_generation based on SPACE_CACHE and in btrfs_set_free_space_cache_v1_active()
4132 trans = btrfs_start_transaction(fs_info->tree_root, 0); in btrfs_set_free_space_cache_v1_active()
4137 set_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags); in btrfs_set_free_space_cache_v1_active()
4148 clear_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags); in btrfs_set_free_space_cache_v1_active()
4159 return -ENOMEM; in btrfs_free_space_init()
4166 return -ENOMEM; in btrfs_free_space_init()
4188 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; in test_add_free_space_entry()
4199 return -ENOMEM; in test_add_free_space_entry()
4203 spin_lock(&ctl->tree_lock); in test_add_free_space_entry()
4204 info->offset = offset; in test_add_free_space_entry()
4205 info->bytes = bytes; in test_add_free_space_entry()
4206 info->max_extent_size = 0; in test_add_free_space_entry()
4208 spin_unlock(&ctl->tree_lock); in test_add_free_space_entry()
4218 return -ENOMEM; in test_add_free_space_entry()
4222 spin_lock(&ctl->tree_lock); in test_add_free_space_entry()
4226 info->bitmap = map; in test_add_free_space_entry()
4236 bytes -= bytes_added; in test_add_free_space_entry()
4238 spin_unlock(&ctl->tree_lock); in test_add_free_space_entry()
4258 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; in test_check_exists()
4262 spin_lock(&ctl->tree_lock); in test_check_exists()
4272 if (info->bitmap) { in test_check_exists()
4278 bit_bytes = ctl->unit; in test_check_exists()
4291 n = rb_prev(&info->offset_index); in test_check_exists()
4295 if (tmp->offset + tmp->bytes < offset) in test_check_exists()
4297 if (offset + bytes < tmp->offset) { in test_check_exists()
4298 n = rb_prev(&tmp->offset_index); in test_check_exists()
4305 n = rb_next(&info->offset_index); in test_check_exists()
4309 if (offset + bytes < tmp->offset) in test_check_exists()
4311 if (tmp->offset + tmp->bytes < offset) { in test_check_exists()
4312 n = rb_next(&tmp->offset_index); in test_check_exists()
4323 if (info->offset == offset) { in test_check_exists()
4328 if (offset > info->offset && offset < info->offset + info->bytes) in test_check_exists()
4331 spin_unlock(&ctl->tree_lock); in test_check_exists()