• Home
  • Raw
  • Download

Lines Matching +full:cluster +full:- +full:index

1 // SPDX-License-Identifier: GPL-2.0
12 #include <linux/error-injection.h>
15 #include "free-space-cache.h"
17 #include "disk-io.h"
19 #include "inode-map.h"
21 #include "space-info.h"
22 #include "delalloc-space.h"
23 #include "block-group.h"
51 struct btrfs_fs_info *fs_info = root->fs_info; in __lookup_free_space_inode()
70 return ERR_PTR(-ENOENT); in __lookup_free_space_inode()
73 leaf = path->nodes[0]; in __lookup_free_space_inode()
74 header = btrfs_item_ptr(leaf, path->slots[0], in __lookup_free_space_inode()
85 inode = btrfs_iget_path(fs_info->sb, location.objectid, root, path); in __lookup_free_space_inode()
91 mapping_set_gfp_mask(inode->i_mapping, in __lookup_free_space_inode()
92 mapping_gfp_constraint(inode->i_mapping, in __lookup_free_space_inode()
101 struct btrfs_fs_info *fs_info = block_group->fs_info; in lookup_free_space_inode()
105 spin_lock(&block_group->lock); in lookup_free_space_inode()
106 if (block_group->inode) in lookup_free_space_inode()
107 inode = igrab(block_group->inode); in lookup_free_space_inode()
108 spin_unlock(&block_group->lock); in lookup_free_space_inode()
112 inode = __lookup_free_space_inode(fs_info->tree_root, path, in lookup_free_space_inode()
113 block_group->start); in lookup_free_space_inode()
117 spin_lock(&block_group->lock); in lookup_free_space_inode()
118 if (!((BTRFS_I(inode)->flags & flags) == flags)) { in lookup_free_space_inode()
120 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM | in lookup_free_space_inode()
122 block_group->disk_cache_state = BTRFS_DC_CLEAR; in lookup_free_space_inode()
125 if (!block_group->iref) { in lookup_free_space_inode()
126 block_group->inode = igrab(inode); in lookup_free_space_inode()
127 block_group->iref = 1; in lookup_free_space_inode()
129 spin_unlock(&block_group->lock); in lookup_free_space_inode()
155 leaf = path->nodes[0]; in __create_free_space_inode()
156 inode_item = btrfs_item_ptr(leaf, path->slots[0], in __create_free_space_inode()
158 btrfs_item_key(leaf, &disk_key, path->slots[0]); in __create_free_space_inode()
161 btrfs_set_inode_generation(leaf, inode_item, trans->transid); in __create_free_space_inode()
169 btrfs_set_inode_transid(leaf, inode_item, trans->transid); in __create_free_space_inode()
184 leaf = path->nodes[0]; in __create_free_space_inode()
185 header = btrfs_item_ptr(leaf, path->slots[0], in __create_free_space_inode()
202 ret = btrfs_find_free_objectid(trans->fs_info->tree_root, &ino); in create_free_space_inode()
206 return __create_free_space_inode(trans->fs_info->tree_root, trans, path, in create_free_space_inode()
207 ino, block_group->start); in create_free_space_inode()
220 spin_lock(&rsv->lock); in btrfs_check_trunc_cache_free_space()
221 if (rsv->reserved < needed_bytes) in btrfs_check_trunc_cache_free_space()
222 ret = -ENOSPC; in btrfs_check_trunc_cache_free_space()
225 spin_unlock(&rsv->lock); in btrfs_check_trunc_cache_free_space()
233 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_truncate_free_space_cache()
241 ret = -ENOMEM; in btrfs_truncate_free_space_cache()
245 mutex_lock(&trans->transaction->cache_write_mutex); in btrfs_truncate_free_space_cache()
246 if (!list_empty(&block_group->io_list)) { in btrfs_truncate_free_space_cache()
247 list_del_init(&block_group->io_list); in btrfs_truncate_free_space_cache()
257 spin_lock(&block_group->lock); in btrfs_truncate_free_space_cache()
258 block_group->disk_cache_state = BTRFS_DC_CLEAR; in btrfs_truncate_free_space_cache()
259 spin_unlock(&block_group->lock); in btrfs_truncate_free_space_cache()
268 * need to check for -EAGAIN. in btrfs_truncate_free_space_cache()
279 mutex_unlock(&trans->transaction->cache_write_mutex); in btrfs_truncate_free_space_cache()
295 file_ra_state_init(ra, inode->i_mapping); in readahead_cache()
296 last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; in readahead_cache()
298 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index); in readahead_cache()
317 return -ENOSPC; in io_ctl_init()
321 io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS); in io_ctl_init()
322 if (!io_ctl->pages) in io_ctl_init()
323 return -ENOMEM; in io_ctl_init()
325 io_ctl->num_pages = num_pages; in io_ctl_init()
326 io_ctl->fs_info = btrfs_sb(inode->i_sb); in io_ctl_init()
327 io_ctl->check_crcs = check_crcs; in io_ctl_init()
328 io_ctl->inode = inode; in io_ctl_init()
336 kfree(io_ctl->pages); in io_ctl_free()
337 io_ctl->pages = NULL; in io_ctl_free()
342 if (io_ctl->cur) { in io_ctl_unmap_page()
343 io_ctl->cur = NULL; in io_ctl_unmap_page()
344 io_ctl->orig = NULL; in io_ctl_unmap_page()
350 ASSERT(io_ctl->index < io_ctl->num_pages); in io_ctl_map_page()
351 io_ctl->page = io_ctl->pages[io_ctl->index++]; in io_ctl_map_page()
352 io_ctl->cur = page_address(io_ctl->page); in io_ctl_map_page()
353 io_ctl->orig = io_ctl->cur; in io_ctl_map_page()
354 io_ctl->size = PAGE_SIZE; in io_ctl_map_page()
356 clear_page(io_ctl->cur); in io_ctl_map_page()
365 for (i = 0; i < io_ctl->num_pages; i++) { in io_ctl_drop_pages()
366 if (io_ctl->pages[i]) { in io_ctl_drop_pages()
367 ClearPageChecked(io_ctl->pages[i]); in io_ctl_drop_pages()
368 unlock_page(io_ctl->pages[i]); in io_ctl_drop_pages()
369 put_page(io_ctl->pages[i]); in io_ctl_drop_pages()
377 struct inode *inode = io_ctl->inode; in io_ctl_prepare_pages()
378 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); in io_ctl_prepare_pages()
381 for (i = 0; i < io_ctl->num_pages; i++) { in io_ctl_prepare_pages()
382 page = find_or_create_page(inode->i_mapping, i, mask); in io_ctl_prepare_pages()
385 return -ENOMEM; in io_ctl_prepare_pages()
387 io_ctl->pages[i] = page; in io_ctl_prepare_pages()
391 if (page->mapping != inode->i_mapping) { in io_ctl_prepare_pages()
392 btrfs_err(BTRFS_I(inode)->root->fs_info, in io_ctl_prepare_pages()
395 return -EIO; in io_ctl_prepare_pages()
398 btrfs_err(BTRFS_I(inode)->root->fs_info, in io_ctl_prepare_pages()
401 return -EIO; in io_ctl_prepare_pages()
406 for (i = 0; i < io_ctl->num_pages; i++) { in io_ctl_prepare_pages()
407 clear_page_dirty_for_io(io_ctl->pages[i]); in io_ctl_prepare_pages()
408 set_page_extent_mapped(io_ctl->pages[i]); in io_ctl_prepare_pages()
422 if (io_ctl->check_crcs) { in io_ctl_set_generation()
423 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages); in io_ctl_set_generation()
424 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); in io_ctl_set_generation()
426 io_ctl->cur += sizeof(u64); in io_ctl_set_generation()
427 io_ctl->size -= sizeof(u64) * 2; in io_ctl_set_generation()
430 put_unaligned_le64(generation, io_ctl->cur); in io_ctl_set_generation()
431 io_ctl->cur += sizeof(u64); in io_ctl_set_generation()
442 if (io_ctl->check_crcs) { in io_ctl_check_generation()
443 io_ctl->cur += sizeof(u32) * io_ctl->num_pages; in io_ctl_check_generation()
444 io_ctl->size -= sizeof(u64) + in io_ctl_check_generation()
445 (sizeof(u32) * io_ctl->num_pages); in io_ctl_check_generation()
447 io_ctl->cur += sizeof(u64); in io_ctl_check_generation()
448 io_ctl->size -= sizeof(u64) * 2; in io_ctl_check_generation()
451 cache_gen = get_unaligned_le64(io_ctl->cur); in io_ctl_check_generation()
453 btrfs_err_rl(io_ctl->fs_info, in io_ctl_check_generation()
457 return -EIO; in io_ctl_check_generation()
459 io_ctl->cur += sizeof(u64); in io_ctl_check_generation()
463 static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index) in io_ctl_set_crc() argument
469 if (!io_ctl->check_crcs) { in io_ctl_set_crc()
474 if (index == 0) in io_ctl_set_crc()
475 offset = sizeof(u32) * io_ctl->num_pages; in io_ctl_set_crc()
477 crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset); in io_ctl_set_crc()
480 tmp = page_address(io_ctl->pages[0]); in io_ctl_set_crc()
481 tmp += index; in io_ctl_set_crc()
485 static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index) in io_ctl_check_crc() argument
491 if (!io_ctl->check_crcs) { in io_ctl_check_crc()
496 if (index == 0) in io_ctl_check_crc()
497 offset = sizeof(u32) * io_ctl->num_pages; in io_ctl_check_crc()
499 tmp = page_address(io_ctl->pages[0]); in io_ctl_check_crc()
500 tmp += index; in io_ctl_check_crc()
504 crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset); in io_ctl_check_crc()
507 btrfs_err_rl(io_ctl->fs_info, in io_ctl_check_crc()
510 return -EIO; in io_ctl_check_crc()
521 if (!io_ctl->cur) in io_ctl_add_entry()
522 return -ENOSPC; in io_ctl_add_entry()
524 entry = io_ctl->cur; in io_ctl_add_entry()
525 put_unaligned_le64(offset, &entry->offset); in io_ctl_add_entry()
526 put_unaligned_le64(bytes, &entry->bytes); in io_ctl_add_entry()
527 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP : in io_ctl_add_entry()
529 io_ctl->cur += sizeof(struct btrfs_free_space_entry); in io_ctl_add_entry()
530 io_ctl->size -= sizeof(struct btrfs_free_space_entry); in io_ctl_add_entry()
532 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) in io_ctl_add_entry()
535 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_add_entry()
538 if (io_ctl->index >= io_ctl->num_pages) in io_ctl_add_entry()
548 if (!io_ctl->cur) in io_ctl_add_bitmap()
549 return -ENOSPC; in io_ctl_add_bitmap()
555 if (io_ctl->cur != io_ctl->orig) { in io_ctl_add_bitmap()
556 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_add_bitmap()
557 if (io_ctl->index >= io_ctl->num_pages) in io_ctl_add_bitmap()
558 return -ENOSPC; in io_ctl_add_bitmap()
562 copy_page(io_ctl->cur, bitmap); in io_ctl_add_bitmap()
563 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_add_bitmap()
564 if (io_ctl->index < io_ctl->num_pages) in io_ctl_add_bitmap()
575 if (io_ctl->cur != io_ctl->orig) in io_ctl_zero_remaining_pages()
576 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_zero_remaining_pages()
580 while (io_ctl->index < io_ctl->num_pages) { in io_ctl_zero_remaining_pages()
582 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_zero_remaining_pages()
592 if (!io_ctl->cur) { in io_ctl_read_entry()
593 ret = io_ctl_check_crc(io_ctl, io_ctl->index); in io_ctl_read_entry()
598 e = io_ctl->cur; in io_ctl_read_entry()
599 entry->offset = get_unaligned_le64(&e->offset); in io_ctl_read_entry()
600 entry->bytes = get_unaligned_le64(&e->bytes); in io_ctl_read_entry()
601 *type = e->type; in io_ctl_read_entry()
602 io_ctl->cur += sizeof(struct btrfs_free_space_entry); in io_ctl_read_entry()
603 io_ctl->size -= sizeof(struct btrfs_free_space_entry); in io_ctl_read_entry()
605 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) in io_ctl_read_entry()
618 ret = io_ctl_check_crc(io_ctl, io_ctl->index); in io_ctl_read_bitmap()
622 copy_page(entry->bitmap, io_ctl->cur); in io_ctl_read_bitmap()
643 spin_lock(&ctl->tree_lock); in merge_space_tree()
644 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { in merge_space_tree()
648 if (e->bitmap || prev->bitmap) in merge_space_tree()
650 if (prev->offset + prev->bytes == e->offset) { in merge_space_tree()
653 prev->bytes += e->bytes; in merge_space_tree()
657 spin_unlock(&ctl->tree_lock); in merge_space_tree()
663 spin_unlock(&ctl->tree_lock); in merge_space_tree()
670 struct btrfs_fs_info *fs_info = root->fs_info; in __load_free_space_cache()
699 ret = -1; in __load_free_space_cache()
701 leaf = path->nodes[0]; in __load_free_space_cache()
702 header = btrfs_item_ptr(leaf, path->slots[0], in __load_free_space_cache()
709 if (!BTRFS_I(inode)->generation) { in __load_free_space_cache()
716 if (BTRFS_I(inode)->generation != generation) { in __load_free_space_cache()
719 BTRFS_I(inode)->generation, generation); in __load_free_space_cache()
748 ret = -ENOMEM; in __load_free_space_cache()
766 e->trim_state = BTRFS_TRIM_STATE_TRIMMED; in __load_free_space_cache()
768 if (!e->bytes) { in __load_free_space_cache()
769 ret = -1; in __load_free_space_cache()
775 spin_lock(&ctl->tree_lock); in __load_free_space_cache()
777 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
786 num_bitmaps--; in __load_free_space_cache()
787 e->bitmap = kmem_cache_zalloc( in __load_free_space_cache()
789 if (!e->bitmap) { in __load_free_space_cache()
790 ret = -ENOMEM; in __load_free_space_cache()
795 spin_lock(&ctl->tree_lock); in __load_free_space_cache()
798 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
804 ctl->total_bitmaps++; in __load_free_space_cache()
805 ctl->op->recalc_thresholds(ctl); in __load_free_space_cache()
806 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
807 list_add_tail(&e->list, &bitmaps); in __load_free_space_cache()
810 num_entries--; in __load_free_space_cache()
820 list_del_init(&e->list); in __load_free_space_cache()
824 e->bitmap_extents = count_bitmap_extents(ctl, e); in __load_free_space_cache()
826 ctl->discardable_extents[BTRFS_STAT_CURR] += in __load_free_space_cache()
827 e->bitmap_extents; in __load_free_space_cache()
828 ctl->discardable_bytes[BTRFS_STAT_CURR] += e->bytes; in __load_free_space_cache()
836 btrfs_discard_update_discardable(ctl->private, ctl); in __load_free_space_cache()
847 struct btrfs_fs_info *fs_info = block_group->fs_info; in load_free_space_cache()
848 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in load_free_space_cache()
853 u64 used = block_group->used; in load_free_space_cache()
859 spin_lock(&block_group->lock); in load_free_space_cache()
860 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { in load_free_space_cache()
861 spin_unlock(&block_group->lock); in load_free_space_cache()
864 spin_unlock(&block_group->lock); in load_free_space_cache()
869 path->search_commit_root = 1; in load_free_space_cache()
870 path->skip_locking = 1; in load_free_space_cache()
877 * for a free extent, at extent-tree.c:find_free_extent(), we can find in load_free_space_cache()
888 * once created get their ->cached field set to BTRFS_CACHE_FINISHED so in load_free_space_cache()
898 spin_lock(&block_group->lock); in load_free_space_cache()
899 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { in load_free_space_cache()
900 spin_unlock(&block_group->lock); in load_free_space_cache()
904 spin_unlock(&block_group->lock); in load_free_space_cache()
906 ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, in load_free_space_cache()
907 path, block_group->start); in load_free_space_cache()
912 spin_lock(&ctl->tree_lock); in load_free_space_cache()
913 matched = (ctl->free_space == (block_group->length - used - in load_free_space_cache()
914 block_group->bytes_super)); in load_free_space_cache()
915 spin_unlock(&ctl->tree_lock); in load_free_space_cache()
921 block_group->start); in load_free_space_cache()
922 ret = -1; in load_free_space_cache()
927 spin_lock(&block_group->lock); in load_free_space_cache()
928 block_group->disk_cache_state = BTRFS_DC_CLEAR; in load_free_space_cache()
929 spin_unlock(&block_group->lock); in load_free_space_cache()
934 block_group->start); in load_free_space_cache()
949 struct btrfs_free_cluster *cluster = NULL; in write_cache_extent_entries() local
951 struct rb_node *node = rb_first(&ctl->free_space_offset); in write_cache_extent_entries()
954 /* Get the cluster for this block_group if it exists */ in write_cache_extent_entries()
955 if (block_group && !list_empty(&block_group->cluster_list)) { in write_cache_extent_entries()
956 cluster = list_entry(block_group->cluster_list.next, in write_cache_extent_entries()
961 if (!node && cluster) { in write_cache_extent_entries()
962 cluster_locked = cluster; in write_cache_extent_entries()
963 spin_lock(&cluster_locked->lock); in write_cache_extent_entries()
964 node = rb_first(&cluster->root); in write_cache_extent_entries()
965 cluster = NULL; in write_cache_extent_entries()
975 ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes, in write_cache_extent_entries()
976 e->bitmap); in write_cache_extent_entries()
980 if (e->bitmap) { in write_cache_extent_entries()
981 list_add_tail(&e->list, bitmap_list); in write_cache_extent_entries()
985 if (!node && cluster) { in write_cache_extent_entries()
986 node = rb_first(&cluster->root); in write_cache_extent_entries()
987 cluster_locked = cluster; in write_cache_extent_entries()
988 spin_lock(&cluster_locked->lock); in write_cache_extent_entries()
989 cluster = NULL; in write_cache_extent_entries()
993 spin_unlock(&cluster_locked->lock); in write_cache_extent_entries()
1003 list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) { in write_cache_extent_entries()
1004 ret = io_ctl_add_entry(io_ctl, trim_entry->start, in write_cache_extent_entries()
1005 trim_entry->bytes, NULL); in write_cache_extent_entries()
1014 spin_unlock(&cluster_locked->lock); in write_cache_extent_entries()
1015 return -ENOSPC; in write_cache_extent_entries()
1036 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, in update_cache_item()
1040 leaf = path->nodes[0]; in update_cache_item()
1043 ASSERT(path->slots[0]); in update_cache_item()
1044 path->slots[0]--; in update_cache_item()
1045 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in update_cache_item()
1048 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, in update_cache_item()
1049 inode->i_size - 1, EXTENT_DELALLOC, 0, in update_cache_item()
1056 BTRFS_I(inode)->generation = trans->transid; in update_cache_item()
1057 header = btrfs_item_ptr(leaf, path->slots[0], in update_cache_item()
1061 btrfs_set_free_space_generation(leaf, header, trans->transid); in update_cache_item()
1068 return -1; in update_cache_item()
1091 unpin = &trans->transaction->pinned_extents; in write_pinned_extent_entries()
1093 start = block_group->start; in write_pinned_extent_entries()
1095 while (start < block_group->start + block_group->length) { in write_pinned_extent_entries()
1103 if (extent_start >= block_group->start + block_group->length) in write_pinned_extent_entries()
1107 extent_end = min(block_group->start + block_group->length, in write_pinned_extent_entries()
1109 len = extent_end - extent_start; in write_pinned_extent_entries()
1114 return -ENOSPC; in write_pinned_extent_entries()
1130 ret = io_ctl_add_bitmap(io_ctl, entry->bitmap); in write_bitmap_entries()
1132 return -ENOSPC; in write_bitmap_entries()
1133 list_del_init(&entry->list); in write_bitmap_entries()
1143 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); in flush_dirty_cache()
1145 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, in flush_dirty_cache()
1157 list_del_init(&entry->list); in cleanup_bitmap_list()
1166 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, in cleanup_write_cache_enospc()
1167 i_size_read(inode) - 1, cached_state); in cleanup_write_cache_enospc()
1177 struct inode *inode = io_ctl->inode; in __btrfs_wait_cache_io()
1189 io_ctl->entries, io_ctl->bitmaps); in __btrfs_wait_cache_io()
1192 invalidate_inode_pages2(inode->i_mapping); in __btrfs_wait_cache_io()
1193 BTRFS_I(inode)->generation = 0; in __btrfs_wait_cache_io()
1195 btrfs_debug(root->fs_info, in __btrfs_wait_cache_io()
1197 block_group->start, ret); in __btrfs_wait_cache_io()
1203 spin_lock(&trans->transaction->dirty_bgs_lock); in __btrfs_wait_cache_io()
1206 spin_lock(&block_group->lock); in __btrfs_wait_cache_io()
1213 if (!ret && list_empty(&block_group->dirty_list)) in __btrfs_wait_cache_io()
1214 block_group->disk_cache_state = BTRFS_DC_WRITTEN; in __btrfs_wait_cache_io()
1216 block_group->disk_cache_state = BTRFS_DC_ERROR; in __btrfs_wait_cache_io()
1218 spin_unlock(&block_group->lock); in __btrfs_wait_cache_io()
1219 spin_unlock(&trans->transaction->dirty_bgs_lock); in __btrfs_wait_cache_io()
1220 io_ctl->inode = NULL; in __btrfs_wait_cache_io()
1240 return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans, in btrfs_wait_cache_io()
1241 block_group, &block_group->io_ctl, in btrfs_wait_cache_io()
1242 path, block_group->start); in btrfs_wait_cache_io()
1246 * __btrfs_write_out_cache - write out cached info to an inode
1247 * @root - the root the inode belongs to
1248 * @ctl - the free space cache we are going to write out
1249 * @block_group - the block_group for this cache if it belongs to a block_group
1250 * @trans - the trans handle
1270 return -EIO; in __btrfs_write_out_cache()
1272 WARN_ON(io_ctl->pages); in __btrfs_write_out_cache()
1277 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) { in __btrfs_write_out_cache()
1278 down_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1279 spin_lock(&block_group->lock); in __btrfs_write_out_cache()
1280 if (block_group->delalloc_bytes) { in __btrfs_write_out_cache()
1281 block_group->disk_cache_state = BTRFS_DC_WRITTEN; in __btrfs_write_out_cache()
1282 spin_unlock(&block_group->lock); in __btrfs_write_out_cache()
1283 up_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1284 BTRFS_I(inode)->generation = 0; in __btrfs_write_out_cache()
1289 spin_unlock(&block_group->lock); in __btrfs_write_out_cache()
1297 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, in __btrfs_write_out_cache()
1300 io_ctl_set_generation(io_ctl, trans->transid); in __btrfs_write_out_cache()
1302 mutex_lock(&ctl->cache_writeout_mutex); in __btrfs_write_out_cache()
1304 spin_lock(&ctl->tree_lock); in __btrfs_write_out_cache()
1329 spin_unlock(&ctl->tree_lock); in __btrfs_write_out_cache()
1330 mutex_unlock(&ctl->cache_writeout_mutex); in __btrfs_write_out_cache()
1338 ret = btrfs_dirty_pages(BTRFS_I(inode), io_ctl->pages, in __btrfs_write_out_cache()
1339 io_ctl->num_pages, 0, i_size_read(inode), in __btrfs_write_out_cache()
1344 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) in __btrfs_write_out_cache()
1345 up_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1353 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, in __btrfs_write_out_cache()
1354 i_size_read(inode) - 1, &cached_state); in __btrfs_write_out_cache()
1361 io_ctl->entries = entries; in __btrfs_write_out_cache()
1362 io_ctl->bitmaps = bitmaps; in __btrfs_write_out_cache()
1364 ret = btrfs_fdatawrite_range(inode, 0, (u64)-1); in __btrfs_write_out_cache()
1372 spin_unlock(&ctl->tree_lock); in __btrfs_write_out_cache()
1373 mutex_unlock(&ctl->cache_writeout_mutex); in __btrfs_write_out_cache()
1379 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) in __btrfs_write_out_cache()
1380 up_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1383 io_ctl->inode = NULL; in __btrfs_write_out_cache()
1386 invalidate_inode_pages2(inode->i_mapping); in __btrfs_write_out_cache()
1387 BTRFS_I(inode)->generation = 0; in __btrfs_write_out_cache()
1399 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_write_out_cache()
1400 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_write_out_cache()
1404 spin_lock(&block_group->lock); in btrfs_write_out_cache()
1405 if (block_group->disk_cache_state < BTRFS_DC_SETUP) { in btrfs_write_out_cache()
1406 spin_unlock(&block_group->lock); in btrfs_write_out_cache()
1409 spin_unlock(&block_group->lock); in btrfs_write_out_cache()
1415 ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl, in btrfs_write_out_cache()
1416 block_group, &block_group->io_ctl, trans); in btrfs_write_out_cache()
1420 block_group->start, ret); in btrfs_write_out_cache()
1421 spin_lock(&block_group->lock); in btrfs_write_out_cache()
1422 block_group->disk_cache_state = BTRFS_DC_ERROR; in btrfs_write_out_cache()
1423 spin_unlock(&block_group->lock); in btrfs_write_out_cache()
1425 block_group->io_ctl.inode = NULL; in btrfs_write_out_cache()
1441 offset -= bitmap_start; in offset_to_bit()
1456 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; in offset_to_bitmap()
1457 bitmap_start = offset - ctl->start; in offset_to_bitmap()
1460 bitmap_start += ctl->start; in offset_to_bitmap()
1468 struct rb_node **p = &root->rb_node; in tree_insert_offset()
1476 if (offset < info->offset) { in tree_insert_offset()
1477 p = &(*p)->rb_left; in tree_insert_offset()
1478 } else if (offset > info->offset) { in tree_insert_offset()
1479 p = &(*p)->rb_right; in tree_insert_offset()
1495 if (info->bitmap) { in tree_insert_offset()
1497 return -EEXIST; in tree_insert_offset()
1499 p = &(*p)->rb_right; in tree_insert_offset()
1501 if (!info->bitmap) { in tree_insert_offset()
1503 return -EEXIST; in tree_insert_offset()
1505 p = &(*p)->rb_left; in tree_insert_offset()
1519 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1527 struct rb_node *n = ctl->free_space_offset.rb_node; in tree_search_offset()
1540 if (offset < entry->offset) in tree_search_offset()
1541 n = n->rb_left; in tree_search_offset()
1542 else if (offset > entry->offset) in tree_search_offset()
1543 n = n->rb_right; in tree_search_offset()
1551 if (entry->bitmap) in tree_search_offset()
1562 if (entry->offset != offset) in tree_search_offset()
1565 WARN_ON(!entry->bitmap); in tree_search_offset()
1568 if (entry->bitmap) { in tree_search_offset()
1573 n = rb_prev(&entry->offset_index); in tree_search_offset()
1577 if (!prev->bitmap && in tree_search_offset()
1578 prev->offset + prev->bytes > offset) in tree_search_offset()
1590 if (entry->offset > offset) { in tree_search_offset()
1591 n = rb_prev(&entry->offset_index); in tree_search_offset()
1595 ASSERT(entry->offset <= offset); in tree_search_offset()
1604 if (entry->bitmap) { in tree_search_offset()
1605 n = rb_prev(&entry->offset_index); in tree_search_offset()
1609 if (!prev->bitmap && in tree_search_offset()
1610 prev->offset + prev->bytes > offset) in tree_search_offset()
1613 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) in tree_search_offset()
1615 } else if (entry->offset + entry->bytes > offset) in tree_search_offset()
1622 if (entry->bitmap) { in tree_search_offset()
1623 if (entry->offset + BITS_PER_BITMAP * in tree_search_offset()
1624 ctl->unit > offset) in tree_search_offset()
1627 if (entry->offset + entry->bytes > offset) in tree_search_offset()
1631 n = rb_next(&entry->offset_index); in tree_search_offset()
1643 rb_erase(&info->offset_index, &ctl->free_space_offset); in __unlink_free_space()
1644 ctl->free_extents--; in __unlink_free_space()
1646 if (!info->bitmap && !btrfs_free_space_trimmed(info)) { in __unlink_free_space()
1647 ctl->discardable_extents[BTRFS_STAT_CURR]--; in __unlink_free_space()
1648 ctl->discardable_bytes[BTRFS_STAT_CURR] -= info->bytes; in __unlink_free_space()
1656 ctl->free_space -= info->bytes; in unlink_free_space()
1664 ASSERT(info->bytes || info->bitmap); in link_free_space()
1665 ret = tree_insert_offset(&ctl->free_space_offset, info->offset, in link_free_space()
1666 &info->offset_index, (info->bitmap != NULL)); in link_free_space()
1670 if (!info->bitmap && !btrfs_free_space_trimmed(info)) { in link_free_space()
1671 ctl->discardable_extents[BTRFS_STAT_CURR]++; in link_free_space()
1672 ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes; in link_free_space()
1675 ctl->free_space += info->bytes; in link_free_space()
1676 ctl->free_extents++; in link_free_space()
1682 struct btrfs_block_group *block_group = ctl->private; in recalculate_thresholds()
1686 u64 size = block_group->length; in recalculate_thresholds()
1687 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; in recalculate_thresholds()
1688 u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); in recalculate_thresholds()
1692 ASSERT(ctl->total_bitmaps <= max_bitmaps); in recalculate_thresholds()
1705 bitmap_bytes = ctl->total_bitmaps * ctl->unit; in recalculate_thresholds()
1711 extent_bytes = max_bytes - bitmap_bytes; in recalculate_thresholds()
1714 ctl->extents_thresh = in recalculate_thresholds()
1723 int extent_delta = -1; in __bitmap_clear_bits()
1725 start = offset_to_bit(info->offset, ctl->unit, offset); in __bitmap_clear_bits()
1726 count = bytes_to_bits(bytes, ctl->unit); in __bitmap_clear_bits()
1730 bitmap_clear(info->bitmap, start, count); in __bitmap_clear_bits()
1732 info->bytes -= bytes; in __bitmap_clear_bits()
1733 if (info->max_extent_size > ctl->unit) in __bitmap_clear_bits()
1734 info->max_extent_size = 0; in __bitmap_clear_bits()
1736 if (start && test_bit(start - 1, info->bitmap)) in __bitmap_clear_bits()
1739 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap)) in __bitmap_clear_bits()
1742 info->bitmap_extents += extent_delta; in __bitmap_clear_bits()
1744 ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; in __bitmap_clear_bits()
1745 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes; in __bitmap_clear_bits()
1754 ctl->free_space -= bytes; in bitmap_clear_bits()
1764 start = offset_to_bit(info->offset, ctl->unit, offset); in bitmap_set_bits()
1765 count = bytes_to_bits(bytes, ctl->unit); in bitmap_set_bits()
1769 bitmap_set(info->bitmap, start, count); in bitmap_set_bits()
1771 info->bytes += bytes; in bitmap_set_bits()
1772 ctl->free_space += bytes; in bitmap_set_bits()
1774 if (start && test_bit(start - 1, info->bitmap)) in bitmap_set_bits()
1775 extent_delta--; in bitmap_set_bits()
1777 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap)) in bitmap_set_bits()
1778 extent_delta--; in bitmap_set_bits()
1780 info->bitmap_extents += extent_delta; in bitmap_set_bits()
1782 ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; in bitmap_set_bits()
1783 ctl->discardable_bytes[BTRFS_STAT_CURR] += bytes; in bitmap_set_bits()
1806 bitmap_info->max_extent_size && in search_bitmap()
1807 bitmap_info->max_extent_size < *bytes) { in search_bitmap()
1808 *bytes = bitmap_info->max_extent_size; in search_bitmap()
1809 return -1; in search_bitmap()
1812 i = offset_to_bit(bitmap_info->offset, ctl->unit, in search_bitmap()
1813 max_t(u64, *offset, bitmap_info->offset)); in search_bitmap()
1814 bits = bytes_to_bits(*bytes, ctl->unit); in search_bitmap()
1816 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) { in search_bitmap()
1821 next_zero = find_next_zero_bit(bitmap_info->bitmap, in search_bitmap()
1823 extent_bits = next_zero - i; in search_bitmap()
1834 *offset = (u64)(i * ctl->unit) + bitmap_info->offset; in search_bitmap()
1835 *bytes = (u64)(found_bits) * ctl->unit; in search_bitmap()
1839 *bytes = (u64)(max_bits) * ctl->unit; in search_bitmap()
1840 bitmap_info->max_extent_size = *bytes; in search_bitmap()
1841 return -1; in search_bitmap()
1846 if (entry->bitmap) in get_max_extent_size()
1847 return entry->max_extent_size; in get_max_extent_size()
1848 return entry->bytes; in get_max_extent_size()
1862 if (!ctl->free_space_offset.rb_node) in find_free_space()
1869 for (node = &entry->offset_index; node; node = rb_next(node)) { in find_free_space()
1871 if (entry->bytes < *bytes) { in find_free_space()
1881 tmp = entry->offset - ctl->start + align - 1; in find_free_space()
1883 tmp = tmp * align + ctl->start; in find_free_space()
1884 align_off = tmp - entry->offset; in find_free_space()
1887 tmp = entry->offset; in find_free_space()
1890 if (entry->bytes < *bytes + align_off) { in find_free_space()
1896 if (entry->bitmap) { in find_free_space()
1913 *bytes = entry->bytes - align_off; in find_free_space()
1923 struct btrfs_block_group *block_group = ctl->private; in count_bitmap_extents()
1924 u64 bytes = bitmap_info->bytes; in count_bitmap_extents()
1931 bitmap_for_each_set_region(bitmap_info->bitmap, rs, re, 0, in count_bitmap_extents()
1933 bytes -= (rs - re) * ctl->unit; in count_bitmap_extents()
1946 info->offset = offset_to_bitmap(ctl, offset); in add_new_bitmap()
1947 info->bytes = 0; in add_new_bitmap()
1948 info->bitmap_extents = 0; in add_new_bitmap()
1949 INIT_LIST_HEAD(&info->list); in add_new_bitmap()
1951 ctl->total_bitmaps++; in add_new_bitmap()
1953 ctl->op->recalc_thresholds(ctl); in add_new_bitmap()
1965 if (bitmap_info->bytes && !btrfs_free_space_trimmed(bitmap_info)) { in free_bitmap()
1966 ctl->discardable_extents[BTRFS_STAT_CURR] -= in free_bitmap()
1967 bitmap_info->bitmap_extents; in free_bitmap()
1968 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bitmap_info->bytes; in free_bitmap()
1972 kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap); in free_bitmap()
1974 ctl->total_bitmaps--; in free_bitmap()
1975 ctl->op->recalc_thresholds(ctl); in free_bitmap()
1987 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; in remove_from_bitmap()
1996 search_bytes = ctl->unit; in remove_from_bitmap()
1997 search_bytes = min(search_bytes, end - search_start + 1); in remove_from_bitmap()
2001 return -EINVAL; in remove_from_bitmap()
2007 search_bytes = min(search_bytes, end - search_start + 1); in remove_from_bitmap()
2011 *bytes -= search_bytes; in remove_from_bitmap()
2014 struct rb_node *next = rb_next(&bitmap_info->offset_index); in remove_from_bitmap()
2015 if (!bitmap_info->bytes) in remove_from_bitmap()
2023 return -EINVAL; in remove_from_bitmap()
2032 if (!bitmap_info->bitmap) in remove_from_bitmap()
2033 return -EAGAIN; in remove_from_bitmap()
2042 search_bytes = ctl->unit; in remove_from_bitmap()
2046 return -EAGAIN; in remove_from_bitmap()
2049 } else if (!bitmap_info->bytes) in remove_from_bitmap()
2068 ctl->discardable_extents[BTRFS_STAT_CURR] += in add_bytes_to_bitmap()
2069 info->bitmap_extents; in add_bytes_to_bitmap()
2070 ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes; in add_bytes_to_bitmap()
2072 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in add_bytes_to_bitmap()
2075 end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); in add_bytes_to_bitmap()
2077 bytes_to_set = min(end - offset, bytes); in add_bytes_to_bitmap()
2085 info->max_extent_size = 0; in add_bytes_to_bitmap()
2094 struct btrfs_block_group *block_group = ctl->private; in use_bitmap()
2095 struct btrfs_fs_info *fs_info = block_group->fs_info; in use_bitmap()
2104 if (!forced && info->bytes >= FORCE_EXTENT_THRESHOLD) in use_bitmap()
2111 if (!forced && ctl->free_extents < ctl->extents_thresh) { in use_bitmap()
2119 if (info->bytes <= fs_info->sectorsize * 8) { in use_bitmap()
2120 if (ctl->free_extents * 3 <= ctl->extents_thresh) in use_bitmap()
2135 if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length) in use_bitmap()
2156 bytes = info->bytes; in insert_into_bitmap()
2157 offset = info->offset; in insert_into_bitmap()
2158 trim_state = info->trim_state; in insert_into_bitmap()
2160 if (!ctl->op->use_bitmap(ctl, info)) in insert_into_bitmap()
2163 if (ctl->op == &free_space_op) in insert_into_bitmap()
2164 block_group = ctl->private; in insert_into_bitmap()
2167 * Since we link bitmaps right into the cluster we need to see if we in insert_into_bitmap()
2168 * have a cluster here, and if so and it has our bitmap we need to add in insert_into_bitmap()
2171 if (block_group && !list_empty(&block_group->cluster_list)) { in insert_into_bitmap()
2172 struct btrfs_free_cluster *cluster; in insert_into_bitmap() local
2176 cluster = list_entry(block_group->cluster_list.next, in insert_into_bitmap()
2179 spin_lock(&cluster->lock); in insert_into_bitmap()
2180 node = rb_first(&cluster->root); in insert_into_bitmap()
2182 spin_unlock(&cluster->lock); in insert_into_bitmap()
2187 if (!entry->bitmap) { in insert_into_bitmap()
2188 spin_unlock(&cluster->lock); in insert_into_bitmap()
2192 if (entry->offset == offset_to_bitmap(ctl, offset)) { in insert_into_bitmap()
2195 bytes -= bytes_added; in insert_into_bitmap()
2198 spin_unlock(&cluster->lock); in insert_into_bitmap()
2215 bytes -= bytes_added; in insert_into_bitmap()
2226 if (info && info->bitmap) { in insert_into_bitmap()
2232 spin_unlock(&ctl->tree_lock); in insert_into_bitmap()
2234 /* no pre-allocated info, allocate a new one */ in insert_into_bitmap()
2239 spin_lock(&ctl->tree_lock); in insert_into_bitmap()
2240 ret = -ENOMEM; in insert_into_bitmap()
2246 info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, in insert_into_bitmap()
2248 info->trim_state = BTRFS_TRIM_STATE_TRIMMED; in insert_into_bitmap()
2249 spin_lock(&ctl->tree_lock); in insert_into_bitmap()
2250 if (!info->bitmap) { in insert_into_bitmap()
2251 ret = -ENOMEM; in insert_into_bitmap()
2259 if (info->bitmap) in insert_into_bitmap()
2261 info->bitmap); in insert_into_bitmap()
2290 u64 offset = info->offset; in try_merge_free_space()
2291 u64 bytes = info->bytes; in try_merge_free_space()
2300 if (right_info && rb_prev(&right_info->offset_index)) in try_merge_free_space()
2301 left_info = rb_entry(rb_prev(&right_info->offset_index), in try_merge_free_space()
2304 left_info = tree_search_offset(ctl, offset - 1, 0, 0); in try_merge_free_space()
2307 if (right_info && !right_info->bitmap && in try_merge_free_space()
2313 info->bytes += right_info->bytes; in try_merge_free_space()
2319 if (left_info && !left_info->bitmap && in try_merge_free_space()
2320 left_info->offset + left_info->bytes == offset && in try_merge_free_space()
2326 info->offset = left_info->offset; in try_merge_free_space()
2327 info->bytes += left_info->bytes; in try_merge_free_space()
2342 const u64 end = info->offset + info->bytes; in steal_from_bitmap_to_end()
2350 i = offset_to_bit(bitmap->offset, ctl->unit, end); in steal_from_bitmap_to_end()
2351 j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i); in steal_from_bitmap_to_end()
2354 bytes = (j - i) * ctl->unit; in steal_from_bitmap_to_end()
2355 info->bytes += bytes; in steal_from_bitmap_to_end()
2359 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in steal_from_bitmap_to_end()
2366 if (!bitmap->bytes) in steal_from_bitmap_to_end()
2383 bitmap_offset = offset_to_bitmap(ctl, info->offset); in steal_from_bitmap_to_front()
2385 if (bitmap_offset == info->offset) { in steal_from_bitmap_to_front()
2386 if (info->offset == 0) in steal_from_bitmap_to_front()
2388 bitmap_offset = offset_to_bitmap(ctl, info->offset - 1); in steal_from_bitmap_to_front()
2395 i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1; in steal_from_bitmap_to_front()
2397 prev_j = (unsigned long)-1; in steal_from_bitmap_to_front()
2398 for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) { in steal_from_bitmap_to_front()
2406 if (prev_j == (unsigned long)-1) in steal_from_bitmap_to_front()
2407 bytes = (i + 1) * ctl->unit; in steal_from_bitmap_to_front()
2409 bytes = (i - prev_j) * ctl->unit; in steal_from_bitmap_to_front()
2411 info->offset -= bytes; in steal_from_bitmap_to_front()
2412 info->bytes += bytes; in steal_from_bitmap_to_front()
2416 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in steal_from_bitmap_to_front()
2419 bitmap_clear_bits(ctl, bitmap, info->offset, bytes); in steal_from_bitmap_to_front()
2421 __bitmap_clear_bits(ctl, bitmap, info->offset, bytes); in steal_from_bitmap_to_front()
2423 if (!bitmap->bytes) in steal_from_bitmap_to_front()
2431 * non-clustered allocation requests. So when attempting to add a new extent
2436 * on 2 or more entries - even if the entries represent a contiguous free space
2448 ASSERT(!info->bitmap); in steal_from_bitmap()
2449 ASSERT(RB_EMPTY_NODE(&info->offset_index)); in steal_from_bitmap()
2451 if (ctl->total_bitmaps > 0) { in steal_from_bitmap()
2456 if (ctl->total_bitmaps > 0) in steal_from_bitmap()
2470 struct btrfs_block_group *block_group = ctl->private; in __btrfs_add_free_space()
2477 return -ENOMEM; in __btrfs_add_free_space()
2479 info->offset = offset; in __btrfs_add_free_space()
2480 info->bytes = bytes; in __btrfs_add_free_space()
2481 info->trim_state = trim_state; in __btrfs_add_free_space()
2482 RB_CLEAR_NODE(&info->offset_index); in __btrfs_add_free_space()
2484 spin_lock(&ctl->tree_lock); in __btrfs_add_free_space()
2504 * going to add the new free space to existing bitmap entries - because in __btrfs_add_free_space()
2510 filter_bytes = max(filter_bytes, info->bytes); in __btrfs_add_free_space()
2517 spin_unlock(&ctl->tree_lock); in __btrfs_add_free_space()
2521 ASSERT(ret != -EEXIST); in __btrfs_add_free_space()
2526 btrfs_discard_queue_work(&fs_info->discard_ctl, block_group); in __btrfs_add_free_space()
2537 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC)) in btrfs_add_free_space()
2540 return __btrfs_add_free_space(block_group->fs_info, in btrfs_add_free_space()
2541 block_group->free_space_ctl, in btrfs_add_free_space()
2555 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC) || in btrfs_add_free_space_async_trimmed()
2556 btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) in btrfs_add_free_space_async_trimmed()
2559 return __btrfs_add_free_space(block_group->fs_info, in btrfs_add_free_space_async_trimmed()
2560 block_group->free_space_ctl, in btrfs_add_free_space_async_trimmed()
2567 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_remove_free_space()
2572 spin_lock(&ctl->tree_lock); in btrfs_remove_free_space()
2599 if (!info->bitmap) { in btrfs_remove_free_space()
2601 if (offset == info->offset) { in btrfs_remove_free_space()
2602 u64 to_free = min(bytes, info->bytes); in btrfs_remove_free_space()
2604 info->bytes -= to_free; in btrfs_remove_free_space()
2605 info->offset += to_free; in btrfs_remove_free_space()
2606 if (info->bytes) { in btrfs_remove_free_space()
2614 bytes -= to_free; in btrfs_remove_free_space()
2617 u64 old_end = info->bytes + info->offset; in btrfs_remove_free_space()
2619 info->bytes = offset - info->offset; in btrfs_remove_free_space()
2627 bytes -= old_end - offset; in btrfs_remove_free_space()
2634 spin_unlock(&ctl->tree_lock); in btrfs_remove_free_space()
2636 ret = __btrfs_add_free_space(block_group->fs_info, ctl, in btrfs_remove_free_space()
2638 old_end - (offset + bytes), in btrfs_remove_free_space()
2639 info->trim_state); in btrfs_remove_free_space()
2646 if (ret == -EAGAIN) { in btrfs_remove_free_space()
2652 spin_unlock(&ctl->tree_lock); in btrfs_remove_free_space()
2660 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_dump_free_space()
2661 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_dump_free_space()
2666 spin_lock(&ctl->tree_lock); in btrfs_dump_free_space()
2667 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { in btrfs_dump_free_space()
2669 if (info->bytes >= bytes && !block_group->ro) in btrfs_dump_free_space()
2672 info->offset, info->bytes, in btrfs_dump_free_space()
2673 (info->bitmap) ? "yes" : "no"); in btrfs_dump_free_space()
2675 spin_unlock(&ctl->tree_lock); in btrfs_dump_free_space()
2676 btrfs_info(fs_info, "block group has cluster?: %s", in btrfs_dump_free_space()
2677 list_empty(&block_group->cluster_list) ? "no" : "yes"); in btrfs_dump_free_space()
2684 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_init_free_space_ctl()
2685 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_init_free_space_ctl()
2687 spin_lock_init(&ctl->tree_lock); in btrfs_init_free_space_ctl()
2688 ctl->unit = fs_info->sectorsize; in btrfs_init_free_space_ctl()
2689 ctl->start = block_group->start; in btrfs_init_free_space_ctl()
2690 ctl->private = block_group; in btrfs_init_free_space_ctl()
2691 ctl->op = &free_space_op; in btrfs_init_free_space_ctl()
2692 INIT_LIST_HEAD(&ctl->trimming_ranges); in btrfs_init_free_space_ctl()
2693 mutex_init(&ctl->cache_writeout_mutex); in btrfs_init_free_space_ctl()
2700 ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space); in btrfs_init_free_space_ctl()
2704 * for a given cluster, put all of its extents back into the free
2706 * pointed to by the cluster, someone else raced in and freed the
2707 * cluster already. In that case, we just return without changing anything
2711 struct btrfs_free_cluster *cluster) in __btrfs_return_cluster_to_free_space() argument
2713 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in __btrfs_return_cluster_to_free_space()
2717 spin_lock(&cluster->lock); in __btrfs_return_cluster_to_free_space()
2718 if (cluster->block_group != block_group) { in __btrfs_return_cluster_to_free_space()
2719 spin_unlock(&cluster->lock); in __btrfs_return_cluster_to_free_space()
2723 cluster->block_group = NULL; in __btrfs_return_cluster_to_free_space()
2724 cluster->window_start = 0; in __btrfs_return_cluster_to_free_space()
2725 list_del_init(&cluster->block_group_list); in __btrfs_return_cluster_to_free_space()
2727 node = rb_first(&cluster->root); in __btrfs_return_cluster_to_free_space()
2732 node = rb_next(&entry->offset_index); in __btrfs_return_cluster_to_free_space()
2733 rb_erase(&entry->offset_index, &cluster->root); in __btrfs_return_cluster_to_free_space()
2734 RB_CLEAR_NODE(&entry->offset_index); in __btrfs_return_cluster_to_free_space()
2736 bitmap = (entry->bitmap != NULL); in __btrfs_return_cluster_to_free_space()
2740 ctl->discardable_extents[BTRFS_STAT_CURR]--; in __btrfs_return_cluster_to_free_space()
2741 ctl->discardable_bytes[BTRFS_STAT_CURR] -= in __btrfs_return_cluster_to_free_space()
2742 entry->bytes; in __btrfs_return_cluster_to_free_space()
2750 ctl->discardable_extents[BTRFS_STAT_CURR]++; in __btrfs_return_cluster_to_free_space()
2751 ctl->discardable_bytes[BTRFS_STAT_CURR] += in __btrfs_return_cluster_to_free_space()
2752 entry->bytes; in __btrfs_return_cluster_to_free_space()
2755 tree_insert_offset(&ctl->free_space_offset, in __btrfs_return_cluster_to_free_space()
2756 entry->offset, &entry->offset_index, bitmap); in __btrfs_return_cluster_to_free_space()
2758 cluster->root = RB_ROOT; in __btrfs_return_cluster_to_free_space()
2759 spin_unlock(&cluster->lock); in __btrfs_return_cluster_to_free_space()
2769 while ((node = rb_last(&ctl->free_space_offset)) != NULL) { in __btrfs_remove_free_space_cache_locked()
2771 if (!info->bitmap) { in __btrfs_remove_free_space_cache_locked()
2778 cond_resched_lock(&ctl->tree_lock); in __btrfs_remove_free_space_cache_locked()
2784 spin_lock(&ctl->tree_lock); in __btrfs_remove_free_space_cache()
2786 if (ctl->private) in __btrfs_remove_free_space_cache()
2787 btrfs_discard_update_discardable(ctl->private, ctl); in __btrfs_remove_free_space_cache()
2788 spin_unlock(&ctl->tree_lock); in __btrfs_remove_free_space_cache()
2793 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_remove_free_space_cache()
2794 struct btrfs_free_cluster *cluster; in btrfs_remove_free_space_cache() local
2797 spin_lock(&ctl->tree_lock); in btrfs_remove_free_space_cache()
2798 while ((head = block_group->cluster_list.next) != in btrfs_remove_free_space_cache()
2799 &block_group->cluster_list) { in btrfs_remove_free_space_cache()
2800 cluster = list_entry(head, struct btrfs_free_cluster, in btrfs_remove_free_space_cache()
2803 WARN_ON(cluster->block_group != block_group); in btrfs_remove_free_space_cache()
2804 __btrfs_return_cluster_to_free_space(block_group, cluster); in btrfs_remove_free_space_cache()
2806 cond_resched_lock(&ctl->tree_lock); in btrfs_remove_free_space_cache()
2810 spin_unlock(&ctl->tree_lock); in btrfs_remove_free_space_cache()
2815 * btrfs_is_free_space_trimmed - see if everything is trimmed
2822 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_is_free_space_trimmed()
2827 spin_lock(&ctl->tree_lock); in btrfs_is_free_space_trimmed()
2828 node = rb_first(&ctl->free_space_offset); in btrfs_is_free_space_trimmed()
2841 spin_unlock(&ctl->tree_lock); in btrfs_is_free_space_trimmed()
2849 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_find_space_for_alloc()
2851 &block_group->fs_info->discard_ctl; in btrfs_find_space_for_alloc()
2859 spin_lock(&ctl->tree_lock); in btrfs_find_space_for_alloc()
2861 block_group->full_stripe_len, max_extent_size); in btrfs_find_space_for_alloc()
2866 if (entry->bitmap) { in btrfs_find_space_for_alloc()
2870 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); in btrfs_find_space_for_alloc()
2872 if (!entry->bytes) in btrfs_find_space_for_alloc()
2876 align_gap_len = offset - entry->offset; in btrfs_find_space_for_alloc()
2877 align_gap = entry->offset; in btrfs_find_space_for_alloc()
2878 align_gap_trim_state = entry->trim_state; in btrfs_find_space_for_alloc()
2881 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); in btrfs_find_space_for_alloc()
2883 entry->offset = offset + bytes; in btrfs_find_space_for_alloc()
2884 WARN_ON(entry->bytes < bytes + align_gap_len); in btrfs_find_space_for_alloc()
2886 entry->bytes -= bytes + align_gap_len; in btrfs_find_space_for_alloc()
2887 if (!entry->bytes) in btrfs_find_space_for_alloc()
2894 spin_unlock(&ctl->tree_lock); in btrfs_find_space_for_alloc()
2897 __btrfs_add_free_space(block_group->fs_info, ctl, in btrfs_find_space_for_alloc()
2904 * given a cluster, put all of its extents back into the free space
2906 * a cluster that belongs to the passed block group.
2909 * cluster and remove the cluster from it.
2913 struct btrfs_free_cluster *cluster) in btrfs_return_cluster_to_free_space() argument
2918 spin_lock(&cluster->lock); in btrfs_return_cluster_to_free_space()
2920 block_group = cluster->block_group; in btrfs_return_cluster_to_free_space()
2922 spin_unlock(&cluster->lock); in btrfs_return_cluster_to_free_space()
2925 } else if (cluster->block_group != block_group) { in btrfs_return_cluster_to_free_space()
2927 spin_unlock(&cluster->lock); in btrfs_return_cluster_to_free_space()
2931 spin_unlock(&cluster->lock); in btrfs_return_cluster_to_free_space()
2933 ctl = block_group->free_space_ctl; in btrfs_return_cluster_to_free_space()
2935 /* now return any extents the cluster had on it */ in btrfs_return_cluster_to_free_space()
2936 spin_lock(&ctl->tree_lock); in btrfs_return_cluster_to_free_space()
2937 __btrfs_return_cluster_to_free_space(block_group, cluster); in btrfs_return_cluster_to_free_space()
2938 spin_unlock(&ctl->tree_lock); in btrfs_return_cluster_to_free_space()
2940 btrfs_discard_queue_work(&block_group->fs_info->discard_ctl, block_group); in btrfs_return_cluster_to_free_space()
2947 struct btrfs_free_cluster *cluster, in btrfs_alloc_from_bitmap() argument
2952 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_alloc_from_bitmap()
2954 u64 search_start = cluster->window_start; in btrfs_alloc_from_bitmap()
2975 * given a cluster, try to allocate 'bytes' from it, returns 0
2980 struct btrfs_free_cluster *cluster, u64 bytes, in btrfs_alloc_from_cluster() argument
2983 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_alloc_from_cluster()
2985 &block_group->fs_info->discard_ctl; in btrfs_alloc_from_cluster()
2990 spin_lock(&cluster->lock); in btrfs_alloc_from_cluster()
2991 if (bytes > cluster->max_size) in btrfs_alloc_from_cluster()
2994 if (cluster->block_group != block_group) in btrfs_alloc_from_cluster()
2997 node = rb_first(&cluster->root); in btrfs_alloc_from_cluster()
3003 if (entry->bytes < bytes) in btrfs_alloc_from_cluster()
3007 if (entry->bytes < bytes || in btrfs_alloc_from_cluster()
3008 (!entry->bitmap && entry->offset < min_start)) { in btrfs_alloc_from_cluster()
3009 node = rb_next(&entry->offset_index); in btrfs_alloc_from_cluster()
3017 if (entry->bitmap) { in btrfs_alloc_from_cluster()
3019 cluster, entry, bytes, in btrfs_alloc_from_cluster()
3020 cluster->window_start, in btrfs_alloc_from_cluster()
3023 node = rb_next(&entry->offset_index); in btrfs_alloc_from_cluster()
3030 cluster->window_start += bytes; in btrfs_alloc_from_cluster()
3032 ret = entry->offset; in btrfs_alloc_from_cluster()
3034 entry->offset += bytes; in btrfs_alloc_from_cluster()
3035 entry->bytes -= bytes; in btrfs_alloc_from_cluster()
3041 spin_unlock(&cluster->lock); in btrfs_alloc_from_cluster()
3046 spin_lock(&ctl->tree_lock); in btrfs_alloc_from_cluster()
3049 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); in btrfs_alloc_from_cluster()
3051 ctl->free_space -= bytes; in btrfs_alloc_from_cluster()
3052 if (!entry->bitmap && !btrfs_free_space_trimmed(entry)) in btrfs_alloc_from_cluster()
3053 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes; in btrfs_alloc_from_cluster()
3055 spin_lock(&cluster->lock); in btrfs_alloc_from_cluster()
3056 if (entry->bytes == 0) { in btrfs_alloc_from_cluster()
3057 rb_erase(&entry->offset_index, &cluster->root); in btrfs_alloc_from_cluster()
3058 ctl->free_extents--; in btrfs_alloc_from_cluster()
3059 if (entry->bitmap) { in btrfs_alloc_from_cluster()
3061 entry->bitmap); in btrfs_alloc_from_cluster()
3062 ctl->total_bitmaps--; in btrfs_alloc_from_cluster()
3063 ctl->op->recalc_thresholds(ctl); in btrfs_alloc_from_cluster()
3065 ctl->discardable_extents[BTRFS_STAT_CURR]--; in btrfs_alloc_from_cluster()
3070 spin_unlock(&cluster->lock); in btrfs_alloc_from_cluster()
3071 spin_unlock(&ctl->tree_lock); in btrfs_alloc_from_cluster()
3078 struct btrfs_free_cluster *cluster, in btrfs_bitmap_cluster() argument
3082 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_bitmap_cluster()
3093 i = offset_to_bit(entry->offset, ctl->unit, in btrfs_bitmap_cluster()
3094 max_t(u64, offset, entry->offset)); in btrfs_bitmap_cluster()
3095 want_bits = bytes_to_bits(bytes, ctl->unit); in btrfs_bitmap_cluster()
3096 min_bits = bytes_to_bits(min_bytes, ctl->unit); in btrfs_bitmap_cluster()
3099 * Don't bother looking for a cluster in this bitmap if it's heavily in btrfs_bitmap_cluster()
3102 if (entry->max_extent_size && in btrfs_bitmap_cluster()
3103 entry->max_extent_size < cont1_bytes) in btrfs_bitmap_cluster()
3104 return -ENOSPC; in btrfs_bitmap_cluster()
3107 for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) { in btrfs_bitmap_cluster()
3108 next_zero = find_next_zero_bit(entry->bitmap, in btrfs_bitmap_cluster()
3110 if (next_zero - i >= min_bits) { in btrfs_bitmap_cluster()
3111 found_bits = next_zero - i; in btrfs_bitmap_cluster()
3116 if (next_zero - i > max_bits) in btrfs_bitmap_cluster()
3117 max_bits = next_zero - i; in btrfs_bitmap_cluster()
3122 entry->max_extent_size = (u64)max_bits * ctl->unit; in btrfs_bitmap_cluster()
3123 return -ENOSPC; in btrfs_bitmap_cluster()
3128 cluster->max_size = 0; in btrfs_bitmap_cluster()
3133 if (cluster->max_size < found_bits * ctl->unit) in btrfs_bitmap_cluster()
3134 cluster->max_size = found_bits * ctl->unit; in btrfs_bitmap_cluster()
3136 if (total_found < want_bits || cluster->max_size < cont1_bytes) { in btrfs_bitmap_cluster()
3141 cluster->window_start = start * ctl->unit + entry->offset; in btrfs_bitmap_cluster()
3142 rb_erase(&entry->offset_index, &ctl->free_space_offset); in btrfs_bitmap_cluster()
3143 ret = tree_insert_offset(&cluster->root, entry->offset, in btrfs_bitmap_cluster()
3144 &entry->offset_index, 1); in btrfs_bitmap_cluster()
3145 ASSERT(!ret); /* -EEXIST; Logic error */ in btrfs_bitmap_cluster()
3147 trace_btrfs_setup_cluster(block_group, cluster, in btrfs_bitmap_cluster()
3148 total_found * ctl->unit, 1); in btrfs_bitmap_cluster()
3153 * This searches the block group for just extents to fill the cluster with.
3154 * Try to find a cluster with at least bytes total bytes, at least one
3159 struct btrfs_free_cluster *cluster, in setup_cluster_no_bitmap() argument
3163 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in setup_cluster_no_bitmap()
3174 return -ENOSPC; in setup_cluster_no_bitmap()
3180 while (entry->bitmap || entry->bytes < min_bytes) { in setup_cluster_no_bitmap()
3181 if (entry->bitmap && list_empty(&entry->list)) in setup_cluster_no_bitmap()
3182 list_add_tail(&entry->list, bitmaps); in setup_cluster_no_bitmap()
3183 node = rb_next(&entry->offset_index); in setup_cluster_no_bitmap()
3185 return -ENOSPC; in setup_cluster_no_bitmap()
3189 window_free = entry->bytes; in setup_cluster_no_bitmap()
3190 max_extent = entry->bytes; in setup_cluster_no_bitmap()
3194 for (node = rb_next(&entry->offset_index); node; in setup_cluster_no_bitmap()
3195 node = rb_next(&entry->offset_index)) { in setup_cluster_no_bitmap()
3198 if (entry->bitmap) { in setup_cluster_no_bitmap()
3199 if (list_empty(&entry->list)) in setup_cluster_no_bitmap()
3200 list_add_tail(&entry->list, bitmaps); in setup_cluster_no_bitmap()
3204 if (entry->bytes < min_bytes) in setup_cluster_no_bitmap()
3208 window_free += entry->bytes; in setup_cluster_no_bitmap()
3209 if (entry->bytes > max_extent) in setup_cluster_no_bitmap()
3210 max_extent = entry->bytes; in setup_cluster_no_bitmap()
3214 return -ENOSPC; in setup_cluster_no_bitmap()
3216 cluster->window_start = first->offset; in setup_cluster_no_bitmap()
3218 node = &first->offset_index; in setup_cluster_no_bitmap()
3222 * cache and put them into the cluster rbtree in setup_cluster_no_bitmap()
3228 node = rb_next(&entry->offset_index); in setup_cluster_no_bitmap()
3229 if (entry->bitmap || entry->bytes < min_bytes) in setup_cluster_no_bitmap()
3232 rb_erase(&entry->offset_index, &ctl->free_space_offset); in setup_cluster_no_bitmap()
3233 ret = tree_insert_offset(&cluster->root, entry->offset, in setup_cluster_no_bitmap()
3234 &entry->offset_index, 0); in setup_cluster_no_bitmap()
3235 total_size += entry->bytes; in setup_cluster_no_bitmap()
3236 ASSERT(!ret); /* -EEXIST; Logic error */ in setup_cluster_no_bitmap()
3239 cluster->max_size = max_extent; in setup_cluster_no_bitmap()
3240 trace_btrfs_setup_cluster(block_group, cluster, total_size, 0); in setup_cluster_no_bitmap()
3245 * This specifically looks for bitmaps that may work in the cluster, we assume
3250 struct btrfs_free_cluster *cluster, in setup_cluster_bitmap() argument
3254 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in setup_cluster_bitmap()
3256 int ret = -ENOSPC; in setup_cluster_bitmap()
3259 if (ctl->total_bitmaps == 0) in setup_cluster_bitmap()
3260 return -ENOSPC; in setup_cluster_bitmap()
3269 if (!entry || entry->offset != bitmap_offset) { in setup_cluster_bitmap()
3271 if (entry && list_empty(&entry->list)) in setup_cluster_bitmap()
3272 list_add(&entry->list, bitmaps); in setup_cluster_bitmap()
3276 if (entry->bytes < bytes) in setup_cluster_bitmap()
3278 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, in setup_cluster_bitmap()
3288 return -ENOSPC; in setup_cluster_bitmap()
3292 * here we try to find a cluster of blocks in a block group. The goal
3296 * returns zero and sets up cluster if things worked out, otherwise
3297 * it returns -enospc
3300 struct btrfs_free_cluster *cluster, in btrfs_find_space_cluster() argument
3303 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_find_space_cluster()
3304 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_find_space_cluster()
3313 * cluster. For SSD_SPREAD, don't allow any fragmentation. in btrfs_find_space_cluster()
3319 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { in btrfs_find_space_cluster()
3321 min_bytes = fs_info->sectorsize; in btrfs_find_space_cluster()
3324 min_bytes = fs_info->sectorsize; in btrfs_find_space_cluster()
3327 spin_lock(&ctl->tree_lock); in btrfs_find_space_cluster()
3330 * If we know we don't have enough space to make a cluster don't even in btrfs_find_space_cluster()
3333 if (ctl->free_space < bytes) { in btrfs_find_space_cluster()
3334 spin_unlock(&ctl->tree_lock); in btrfs_find_space_cluster()
3335 return -ENOSPC; in btrfs_find_space_cluster()
3338 spin_lock(&cluster->lock); in btrfs_find_space_cluster()
3340 /* someone already found a cluster, hooray */ in btrfs_find_space_cluster()
3341 if (cluster->block_group) { in btrfs_find_space_cluster()
3349 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, in btrfs_find_space_cluster()
3353 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps, in btrfs_find_space_cluster()
3359 list_del_init(&entry->list); in btrfs_find_space_cluster()
3363 list_add_tail(&cluster->block_group_list, in btrfs_find_space_cluster()
3364 &block_group->cluster_list); in btrfs_find_space_cluster()
3365 cluster->block_group = block_group; in btrfs_find_space_cluster()
3370 spin_unlock(&cluster->lock); in btrfs_find_space_cluster()
3371 spin_unlock(&ctl->tree_lock); in btrfs_find_space_cluster()
3377 * simple code to zero out a cluster
3379 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) in btrfs_init_free_cluster() argument
3381 spin_lock_init(&cluster->lock); in btrfs_init_free_cluster()
3382 spin_lock_init(&cluster->refill_lock); in btrfs_init_free_cluster()
3383 cluster->root = RB_ROOT; in btrfs_init_free_cluster()
3384 cluster->max_size = 0; in btrfs_init_free_cluster()
3385 cluster->fragmented = false; in btrfs_init_free_cluster()
3386 INIT_LIST_HEAD(&cluster->block_group_list); in btrfs_init_free_cluster()
3387 cluster->block_group = NULL; in btrfs_init_free_cluster()
3396 struct btrfs_space_info *space_info = block_group->space_info; in do_trimming()
3397 struct btrfs_fs_info *fs_info = block_group->fs_info; in do_trimming()
3398 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in do_trimming()
3406 spin_lock(&space_info->lock); in do_trimming()
3407 spin_lock(&block_group->lock); in do_trimming()
3408 if (!block_group->ro) { in do_trimming()
3409 block_group->reserved += reserved_bytes; in do_trimming()
3410 space_info->bytes_reserved += reserved_bytes; in do_trimming()
3413 spin_unlock(&block_group->lock); in do_trimming()
3414 spin_unlock(&space_info->lock); in do_trimming()
3422 mutex_lock(&ctl->cache_writeout_mutex); in do_trimming()
3425 start - reserved_start, in do_trimming()
3428 __btrfs_add_free_space(fs_info, ctl, end, reserved_end - end, in do_trimming()
3431 list_del(&trim_entry->list); in do_trimming()
3432 mutex_unlock(&ctl->cache_writeout_mutex); in do_trimming()
3435 spin_lock(&space_info->lock); in do_trimming()
3436 spin_lock(&block_group->lock); in do_trimming()
3437 if (block_group->ro) in do_trimming()
3438 space_info->bytes_readonly += reserved_bytes; in do_trimming()
3439 block_group->reserved -= reserved_bytes; in do_trimming()
3440 space_info->bytes_reserved -= reserved_bytes; in do_trimming()
3441 spin_unlock(&block_group->lock); in do_trimming()
3442 spin_unlock(&space_info->lock); in do_trimming()
3456 &block_group->fs_info->discard_ctl; in trim_no_bitmap()
3457 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in trim_no_bitmap()
3465 const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size); in trim_no_bitmap()
3470 mutex_lock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3471 spin_lock(&ctl->tree_lock); in trim_no_bitmap()
3473 if (ctl->free_space < minlen) in trim_no_bitmap()
3481 while (entry->bitmap || in trim_no_bitmap()
3483 node = rb_next(&entry->offset_index); in trim_no_bitmap()
3490 if (entry->offset >= end) in trim_no_bitmap()
3493 extent_start = entry->offset; in trim_no_bitmap()
3494 extent_bytes = entry->bytes; in trim_no_bitmap()
3495 extent_trim_state = entry->trim_state; in trim_no_bitmap()
3497 start = entry->offset; in trim_no_bitmap()
3498 bytes = entry->bytes; in trim_no_bitmap()
3500 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3501 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3515 entry->offset += max_discard_size; in trim_no_bitmap()
3516 entry->bytes -= max_discard_size; in trim_no_bitmap()
3523 bytes = min(extent_start + extent_bytes, end) - start; in trim_no_bitmap()
3525 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3526 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3534 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3537 list_add_tail(&trim_entry.list, &ctl->trimming_ranges); in trim_no_bitmap()
3538 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3544 block_group->discard_cursor = start + bytes; in trim_no_bitmap()
3549 block_group->discard_cursor = start; in trim_no_bitmap()
3554 ret = -ERESTARTSYS; in trim_no_bitmap()
3564 block_group->discard_cursor = btrfs_block_group_end(block_group); in trim_no_bitmap()
3565 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3566 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3589 spin_lock(&ctl->tree_lock); in reset_trimming_bitmap()
3593 ctl->discardable_extents[BTRFS_STAT_CURR] += in reset_trimming_bitmap()
3594 entry->bitmap_extents; in reset_trimming_bitmap()
3595 ctl->discardable_bytes[BTRFS_STAT_CURR] += entry->bytes; in reset_trimming_bitmap()
3597 entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in reset_trimming_bitmap()
3600 spin_unlock(&ctl->tree_lock); in reset_trimming_bitmap()
3607 entry->trim_state = BTRFS_TRIM_STATE_TRIMMED; in end_trimming_bitmap()
3608 ctl->discardable_extents[BTRFS_STAT_CURR] -= in end_trimming_bitmap()
3609 entry->bitmap_extents; in end_trimming_bitmap()
3610 ctl->discardable_bytes[BTRFS_STAT_CURR] -= entry->bytes; in end_trimming_bitmap()
3622 &block_group->fs_info->discard_ctl; in trim_bitmaps()
3623 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in trim_bitmaps()
3629 const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size); in trim_bitmaps()
3635 mutex_lock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3636 spin_lock(&ctl->tree_lock); in trim_bitmaps()
3638 if (ctl->free_space < minlen) { in trim_bitmaps()
3639 block_group->discard_cursor = in trim_bitmaps()
3641 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3642 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3653 * which is the only discard index which sets minlen to 0. in trim_bitmaps()
3657 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3658 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3670 entry->trim_state = BTRFS_TRIM_STATE_TRIMMING; in trim_bitmaps()
3682 entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in trim_bitmaps()
3683 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3684 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3694 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3695 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3699 bytes = min(bytes, end - start); in trim_bitmaps()
3701 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3702 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3718 if (entry->bytes == 0) in trim_bitmaps()
3721 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3724 list_add_tail(&trim_entry.list, &ctl->trimming_ranges); in trim_bitmaps()
3725 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3731 block_group->discard_cursor = in trim_bitmaps()
3737 offset += BITS_PER_BITMAP * ctl->unit; in trim_bitmaps()
3742 block_group->discard_cursor = start; in trim_bitmaps()
3747 ret = -ERESTARTSYS; in trim_bitmaps()
3755 block_group->discard_cursor = end; in trim_bitmaps()
3764 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_trim_block_group()
3770 spin_lock(&block_group->lock); in btrfs_trim_block_group()
3771 if (block_group->removed) { in btrfs_trim_block_group()
3772 spin_unlock(&block_group->lock); in btrfs_trim_block_group()
3776 spin_unlock(&block_group->lock); in btrfs_trim_block_group()
3783 div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem); in btrfs_trim_block_group()
3800 spin_lock(&block_group->lock); in btrfs_trim_block_group_extents()
3801 if (block_group->removed) { in btrfs_trim_block_group_extents()
3802 spin_unlock(&block_group->lock); in btrfs_trim_block_group_extents()
3806 spin_unlock(&block_group->lock); in btrfs_trim_block_group_extents()
3822 spin_lock(&block_group->lock); in btrfs_trim_block_group_bitmaps()
3823 if (block_group->removed) { in btrfs_trim_block_group_bitmaps()
3824 spin_unlock(&block_group->lock); in btrfs_trim_block_group_bitmaps()
3828 spin_unlock(&block_group->lock); in btrfs_trim_block_group_bitmaps()
3839 * Find the left-most item in the cache tree, and then return the
3843 * the tree, if the left-most item is a bitmap.
3847 struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl; in btrfs_find_ino_for_alloc()
3851 spin_lock(&ctl->tree_lock); in btrfs_find_ino_for_alloc()
3853 if (RB_EMPTY_ROOT(&ctl->free_space_offset)) in btrfs_find_ino_for_alloc()
3856 entry = rb_entry(rb_first(&ctl->free_space_offset), in btrfs_find_ino_for_alloc()
3859 if (!entry->bitmap) { in btrfs_find_ino_for_alloc()
3860 ino = entry->offset; in btrfs_find_ino_for_alloc()
3863 entry->offset++; in btrfs_find_ino_for_alloc()
3864 entry->bytes--; in btrfs_find_ino_for_alloc()
3865 if (!entry->bytes) in btrfs_find_ino_for_alloc()
3880 if (entry->bytes == 0) in btrfs_find_ino_for_alloc()
3884 spin_unlock(&ctl->tree_lock); in btrfs_find_ino_for_alloc()
3894 spin_lock(&root->ino_cache_lock); in lookup_free_ino_inode()
3895 if (root->ino_cache_inode) in lookup_free_ino_inode()
3896 inode = igrab(root->ino_cache_inode); in lookup_free_ino_inode()
3897 spin_unlock(&root->ino_cache_lock); in lookup_free_ino_inode()
3905 spin_lock(&root->ino_cache_lock); in lookup_free_ino_inode()
3906 if (!btrfs_fs_closing(root->fs_info)) in lookup_free_ino_inode()
3907 root->ino_cache_inode = igrab(inode); in lookup_free_ino_inode()
3908 spin_unlock(&root->ino_cache_lock); in lookup_free_ino_inode()
3923 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; in load_free_ino_cache()
3927 u64 root_gen = btrfs_root_generation(&root->root_item); in load_free_ino_cache()
3947 if (root_gen != BTRFS_I(inode)->generation) in load_free_ino_cache()
3955 root->root_key.objectid); in load_free_ino_cache()
3968 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_write_out_ino_cache()
3969 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; in btrfs_write_out_ino_cache()
3993 inode->i_size, true); in btrfs_write_out_ino_cache()
3996 root->root_key.objectid, ret); in btrfs_write_out_ino_cache()
4012 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; in test_add_free_space_entry()
4023 return -ENOMEM; in test_add_free_space_entry()
4027 spin_lock(&ctl->tree_lock); in test_add_free_space_entry()
4028 info->offset = offset; in test_add_free_space_entry()
4029 info->bytes = bytes; in test_add_free_space_entry()
4030 info->max_extent_size = 0; in test_add_free_space_entry()
4032 spin_unlock(&ctl->tree_lock); in test_add_free_space_entry()
4042 return -ENOMEM; in test_add_free_space_entry()
4046 spin_lock(&ctl->tree_lock); in test_add_free_space_entry()
4050 info->bitmap = map; in test_add_free_space_entry()
4060 bytes -= bytes_added; in test_add_free_space_entry()
4062 spin_unlock(&ctl->tree_lock); in test_add_free_space_entry()
4082 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; in test_check_exists()
4086 spin_lock(&ctl->tree_lock); in test_check_exists()
4096 if (info->bitmap) { in test_check_exists()
4102 bit_bytes = ctl->unit; in test_check_exists()
4115 n = rb_prev(&info->offset_index); in test_check_exists()
4119 if (tmp->offset + tmp->bytes < offset) in test_check_exists()
4121 if (offset + bytes < tmp->offset) { in test_check_exists()
4122 n = rb_prev(&tmp->offset_index); in test_check_exists()
4129 n = rb_next(&info->offset_index); in test_check_exists()
4133 if (offset + bytes < tmp->offset) in test_check_exists()
4135 if (tmp->offset + tmp->bytes < offset) { in test_check_exists()
4136 n = rb_next(&tmp->offset_index); in test_check_exists()
4147 if (info->offset == offset) { in test_check_exists()
4152 if (offset > info->offset && offset < info->offset + info->bytes) in test_check_exists()
4155 spin_unlock(&ctl->tree_lock); in test_check_exists()