Lines Matching +full:cache +full:- +full:block
1 // SPDX-License-Identifier: GPL-2.0
5 #include "block-group.h"
6 #include "space-info.h"
7 #include "disk-io.h"
8 #include "free-space-cache.h"
9 #include "free-space-tree.h"
12 #include "ref-verify.h"
14 #include "tree-log.h"
15 #include "delalloc-space.h"
27 struct btrfs_balance_control *bctl = fs_info->balance_ctl; in get_restripe_target()
34 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { in get_restripe_target()
35 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; in get_restripe_target()
37 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { in get_restripe_target()
38 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; in get_restripe_target()
40 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { in get_restripe_target()
41 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; in get_restripe_target()
56 u64 num_devices = fs_info->fs_devices->rw_devices; in btrfs_reduce_alloc_profile()
65 spin_lock(&fs_info->balance_lock); in btrfs_reduce_alloc_profile()
68 spin_unlock(&fs_info->balance_lock); in btrfs_reduce_alloc_profile()
71 spin_unlock(&fs_info->balance_lock); in btrfs_reduce_alloc_profile()
80 /* Select the highest-redundancy RAID level. */ in btrfs_reduce_alloc_profile()
110 seq = read_seqbegin(&fs_info->profiles_lock); in btrfs_get_alloc_profile()
113 flags |= fs_info->avail_data_alloc_bits; in btrfs_get_alloc_profile()
115 flags |= fs_info->avail_system_alloc_bits; in btrfs_get_alloc_profile()
117 flags |= fs_info->avail_metadata_alloc_bits; in btrfs_get_alloc_profile()
118 } while (read_seqretry(&fs_info->profiles_lock, seq)); in btrfs_get_alloc_profile()
123 void btrfs_get_block_group(struct btrfs_block_group *cache) in btrfs_get_block_group() argument
125 refcount_inc(&cache->refs); in btrfs_get_block_group()
128 void btrfs_put_block_group(struct btrfs_block_group *cache) in btrfs_put_block_group() argument
130 if (refcount_dec_and_test(&cache->refs)) { in btrfs_put_block_group()
131 WARN_ON(cache->pinned > 0); in btrfs_put_block_group()
132 WARN_ON(cache->reserved > 0); in btrfs_put_block_group()
139 if (WARN_ON(!list_empty(&cache->discard_list))) in btrfs_put_block_group()
140 btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, in btrfs_put_block_group()
141 cache); in btrfs_put_block_group()
146 * And it will definitely cause use-after-free when caller in btrfs_put_block_group()
151 WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root)); in btrfs_put_block_group()
152 kfree(cache->free_space_ctl); in btrfs_put_block_group()
153 kfree(cache); in btrfs_put_block_group()
158 * This adds the block group to the fs_info rb tree for the block group cache
165 struct btrfs_block_group *cache; in btrfs_add_block_group_cache() local
167 ASSERT(block_group->length != 0); in btrfs_add_block_group_cache()
169 spin_lock(&info->block_group_cache_lock); in btrfs_add_block_group_cache()
170 p = &info->block_group_cache_tree.rb_node; in btrfs_add_block_group_cache()
174 cache = rb_entry(parent, struct btrfs_block_group, cache_node); in btrfs_add_block_group_cache()
175 if (block_group->start < cache->start) { in btrfs_add_block_group_cache()
176 p = &(*p)->rb_left; in btrfs_add_block_group_cache()
177 } else if (block_group->start > cache->start) { in btrfs_add_block_group_cache()
178 p = &(*p)->rb_right; in btrfs_add_block_group_cache()
180 spin_unlock(&info->block_group_cache_lock); in btrfs_add_block_group_cache()
181 return -EEXIST; in btrfs_add_block_group_cache()
185 rb_link_node(&block_group->cache_node, parent, p); in btrfs_add_block_group_cache()
186 rb_insert_color(&block_group->cache_node, in btrfs_add_block_group_cache()
187 &info->block_group_cache_tree); in btrfs_add_block_group_cache()
189 if (info->first_logical_byte > block_group->start) in btrfs_add_block_group_cache()
190 info->first_logical_byte = block_group->start; in btrfs_add_block_group_cache()
192 spin_unlock(&info->block_group_cache_lock); in btrfs_add_block_group_cache()
198 * This will return the block group at or after bytenr if contains is 0, else
199 * it will return the block group that contains the bytenr
204 struct btrfs_block_group *cache, *ret = NULL; in block_group_cache_tree_search() local
208 spin_lock(&info->block_group_cache_lock); in block_group_cache_tree_search()
209 n = info->block_group_cache_tree.rb_node; in block_group_cache_tree_search()
212 cache = rb_entry(n, struct btrfs_block_group, cache_node); in block_group_cache_tree_search()
213 end = cache->start + cache->length - 1; in block_group_cache_tree_search()
214 start = cache->start; in block_group_cache_tree_search()
217 if (!contains && (!ret || start < ret->start)) in block_group_cache_tree_search()
218 ret = cache; in block_group_cache_tree_search()
219 n = n->rb_left; in block_group_cache_tree_search()
222 ret = cache; in block_group_cache_tree_search()
225 n = n->rb_right; in block_group_cache_tree_search()
227 ret = cache; in block_group_cache_tree_search()
233 if (bytenr == 0 && info->first_logical_byte > ret->start) in block_group_cache_tree_search()
234 info->first_logical_byte = ret->start; in block_group_cache_tree_search()
236 spin_unlock(&info->block_group_cache_lock); in block_group_cache_tree_search()
242 * Return the block group that starts at or after bytenr
251 * Return the block group that contains the given bytenr
260 struct btrfs_block_group *cache) in btrfs_next_block_group() argument
262 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_next_block_group()
265 spin_lock(&fs_info->block_group_cache_lock); in btrfs_next_block_group()
267 /* If our block group was removed, we need a full search. */ in btrfs_next_block_group()
268 if (RB_EMPTY_NODE(&cache->cache_node)) { in btrfs_next_block_group()
269 const u64 next_bytenr = cache->start + cache->length; in btrfs_next_block_group()
271 spin_unlock(&fs_info->block_group_cache_lock); in btrfs_next_block_group()
272 btrfs_put_block_group(cache); in btrfs_next_block_group()
273 cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache; in btrfs_next_block_group()
275 node = rb_next(&cache->cache_node); in btrfs_next_block_group()
276 btrfs_put_block_group(cache); in btrfs_next_block_group()
278 cache = rb_entry(node, struct btrfs_block_group, cache_node); in btrfs_next_block_group()
279 btrfs_get_block_group(cache); in btrfs_next_block_group()
281 cache = NULL; in btrfs_next_block_group()
282 spin_unlock(&fs_info->block_group_cache_lock); in btrfs_next_block_group()
283 return cache; in btrfs_next_block_group()
295 spin_lock(&bg->lock); in btrfs_inc_nocow_writers()
296 if (bg->ro) in btrfs_inc_nocow_writers()
299 atomic_inc(&bg->nocow_writers); in btrfs_inc_nocow_writers()
300 spin_unlock(&bg->lock); in btrfs_inc_nocow_writers()
302 /* No put on block group, done by btrfs_dec_nocow_writers */ in btrfs_inc_nocow_writers()
315 if (atomic_dec_and_test(&bg->nocow_writers)) in btrfs_dec_nocow_writers()
316 wake_up_var(&bg->nocow_writers); in btrfs_dec_nocow_writers()
327 wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); in btrfs_wait_nocow_writers()
337 if (atomic_dec_and_test(&bg->reservations)) in btrfs_dec_block_group_reservations()
338 wake_up_var(&bg->reservations); in btrfs_dec_block_group_reservations()
344 struct btrfs_space_info *space_info = bg->space_info; in btrfs_wait_block_group_reservations()
346 ASSERT(bg->ro); in btrfs_wait_block_group_reservations()
348 if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) in btrfs_wait_block_group_reservations()
352 * Our block group is read only but before we set it to read only, in btrfs_wait_block_group_reservations()
357 * block group's reservations counter is incremented while a read lock in btrfs_wait_block_group_reservations()
361 down_write(&space_info->groups_sem); in btrfs_wait_block_group_reservations()
362 up_write(&space_info->groups_sem); in btrfs_wait_block_group_reservations()
364 wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); in btrfs_wait_block_group_reservations()
368 struct btrfs_block_group *cache) in btrfs_get_caching_control() argument
372 spin_lock(&cache->lock); in btrfs_get_caching_control()
373 if (!cache->caching_ctl) { in btrfs_get_caching_control()
374 spin_unlock(&cache->lock); in btrfs_get_caching_control()
378 ctl = cache->caching_ctl; in btrfs_get_caching_control()
379 refcount_inc(&ctl->count); in btrfs_get_caching_control()
380 spin_unlock(&cache->lock); in btrfs_get_caching_control()
386 if (refcount_dec_and_test(&ctl->count)) in btrfs_put_caching_control()
391 * When we wait for progress in the block group caching, its because our
396 * up, and then it will check the block group free space numbers for our min
400 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
401 * any of the information in this block group.
403 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, in btrfs_wait_block_group_cache_progress() argument
408 caching_ctl = btrfs_get_caching_control(cache); in btrfs_wait_block_group_cache_progress()
412 wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || in btrfs_wait_block_group_cache_progress()
413 (cache->free_space_ctl->free_space >= num_bytes)); in btrfs_wait_block_group_cache_progress()
418 int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) in btrfs_wait_block_group_cache_done() argument
423 caching_ctl = btrfs_get_caching_control(cache); in btrfs_wait_block_group_cache_done()
425 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; in btrfs_wait_block_group_cache_done()
427 wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); in btrfs_wait_block_group_cache_done()
428 if (cache->cached == BTRFS_CACHE_ERROR) in btrfs_wait_block_group_cache_done()
429 ret = -EIO; in btrfs_wait_block_group_cache_done()
437 struct btrfs_fs_info *fs_info = block_group->fs_info; in fragment_free_space()
438 u64 start = block_group->start; in fragment_free_space()
439 u64 len = block_group->length; in fragment_free_space()
440 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? in fragment_free_space()
441 fs_info->nodesize : fs_info->sectorsize; in fragment_free_space()
450 len -= step; in fragment_free_space()
463 struct btrfs_fs_info *info = block_group->fs_info; in add_new_free_space()
468 ret = find_first_extent_bit(&info->excluded_extents, start, in add_new_free_space()
478 size = extent_start - start; in add_new_free_space()
482 BUG_ON(ret); /* -ENOMEM or logic error */ in add_new_free_space()
490 size = end - start; in add_new_free_space()
494 BUG_ON(ret); /* -ENOMEM or logic error */ in add_new_free_space()
502 struct btrfs_block_group *block_group = caching_ctl->block_group; in load_extent_tree_free()
503 struct btrfs_fs_info *fs_info = block_group->fs_info; in load_extent_tree_free()
504 struct btrfs_root *extent_root = fs_info->extent_root; in load_extent_tree_free()
516 return -ENOMEM; in load_extent_tree_free()
518 last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); in load_extent_tree_free()
523 * allocate from this block group until we've had a chance to fragment in load_extent_tree_free()
533 * root, since its read-only in load_extent_tree_free()
535 path->skip_locking = 1; in load_extent_tree_free()
536 path->search_commit_root = 1; in load_extent_tree_free()
537 path->reada = READA_FORWARD; in load_extent_tree_free()
548 leaf = path->nodes[0]; in load_extent_tree_free()
553 last = (u64)-1; in load_extent_tree_free()
557 if (path->slots[0] < nritems) { in load_extent_tree_free()
558 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in load_extent_tree_free()
565 rwsem_is_contended(&fs_info->commit_root_sem)) { in load_extent_tree_free()
567 caching_ctl->progress = last; in load_extent_tree_free()
569 up_read(&fs_info->commit_root_sem); in load_extent_tree_free()
570 mutex_unlock(&caching_ctl->mutex); in load_extent_tree_free()
572 mutex_lock(&caching_ctl->mutex); in load_extent_tree_free()
573 down_read(&fs_info->commit_root_sem); in load_extent_tree_free()
582 leaf = path->nodes[0]; in load_extent_tree_free()
593 caching_ctl->progress = last; in load_extent_tree_free()
598 if (key.objectid < block_group->start) { in load_extent_tree_free()
599 path->slots[0]++; in load_extent_tree_free()
603 if (key.objectid >= block_group->start + block_group->length) in load_extent_tree_free()
612 fs_info->nodesize; in load_extent_tree_free()
619 wake_up(&caching_ctl->wait); in load_extent_tree_free()
622 path->slots[0]++; in load_extent_tree_free()
627 block_group->start + block_group->length); in load_extent_tree_free()
628 caching_ctl->progress = (u64)-1; in load_extent_tree_free()
643 block_group = caching_ctl->block_group; in caching_thread()
644 fs_info = block_group->fs_info; in caching_thread()
646 mutex_lock(&caching_ctl->mutex); in caching_thread()
647 down_read(&fs_info->commit_root_sem); in caching_thread()
651 * can't actually cache from the free space tree as our commit root and in caching_thread()
657 !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) in caching_thread()
662 spin_lock(&block_group->lock); in caching_thread()
663 block_group->caching_ctl = NULL; in caching_thread()
664 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; in caching_thread()
665 spin_unlock(&block_group->lock); in caching_thread()
671 spin_lock(&block_group->space_info->lock); in caching_thread()
672 spin_lock(&block_group->lock); in caching_thread()
673 bytes_used = block_group->length - block_group->used; in caching_thread()
674 block_group->space_info->bytes_used += bytes_used >> 1; in caching_thread()
675 spin_unlock(&block_group->lock); in caching_thread()
676 spin_unlock(&block_group->space_info->lock); in caching_thread()
681 caching_ctl->progress = (u64)-1; in caching_thread()
683 up_read(&fs_info->commit_root_sem); in caching_thread()
685 mutex_unlock(&caching_ctl->mutex); in caching_thread()
687 wake_up(&caching_ctl->wait); in caching_thread()
693 int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only) in btrfs_cache_block_group() argument
696 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_cache_block_group()
702 return -ENOMEM; in btrfs_cache_block_group()
704 INIT_LIST_HEAD(&caching_ctl->list); in btrfs_cache_block_group()
705 mutex_init(&caching_ctl->mutex); in btrfs_cache_block_group()
706 init_waitqueue_head(&caching_ctl->wait); in btrfs_cache_block_group()
707 caching_ctl->block_group = cache; in btrfs_cache_block_group()
708 caching_ctl->progress = cache->start; in btrfs_cache_block_group()
709 refcount_set(&caching_ctl->count, 1); in btrfs_cache_block_group()
710 btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); in btrfs_cache_block_group()
712 spin_lock(&cache->lock); in btrfs_cache_block_group()
715 * case where one thread starts to load the space cache info, and then in btrfs_cache_block_group()
717 * allocation while the other thread is still loading the space cache in btrfs_cache_block_group()
718 * info. The previous loop should have kept us from choosing this block in btrfs_cache_block_group()
720 * block groups we need to first check if we're doing a fast load here, in btrfs_cache_block_group()
722 * from a block group who's cache gets evicted for one reason or in btrfs_cache_block_group()
725 while (cache->cached == BTRFS_CACHE_FAST) { in btrfs_cache_block_group()
728 ctl = cache->caching_ctl; in btrfs_cache_block_group()
729 refcount_inc(&ctl->count); in btrfs_cache_block_group()
730 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE); in btrfs_cache_block_group()
731 spin_unlock(&cache->lock); in btrfs_cache_block_group()
735 finish_wait(&ctl->wait, &wait); in btrfs_cache_block_group()
737 spin_lock(&cache->lock); in btrfs_cache_block_group()
740 if (cache->cached != BTRFS_CACHE_NO) { in btrfs_cache_block_group()
741 spin_unlock(&cache->lock); in btrfs_cache_block_group()
745 WARN_ON(cache->caching_ctl); in btrfs_cache_block_group()
746 cache->caching_ctl = caching_ctl; in btrfs_cache_block_group()
747 cache->cached = BTRFS_CACHE_FAST; in btrfs_cache_block_group()
748 spin_unlock(&cache->lock); in btrfs_cache_block_group()
751 mutex_lock(&caching_ctl->mutex); in btrfs_cache_block_group()
752 ret = load_free_space_cache(cache); in btrfs_cache_block_group()
754 spin_lock(&cache->lock); in btrfs_cache_block_group()
756 cache->caching_ctl = NULL; in btrfs_cache_block_group()
757 cache->cached = BTRFS_CACHE_FINISHED; in btrfs_cache_block_group()
758 cache->last_byte_to_unpin = (u64)-1; in btrfs_cache_block_group()
759 caching_ctl->progress = (u64)-1; in btrfs_cache_block_group()
762 cache->caching_ctl = NULL; in btrfs_cache_block_group()
763 cache->cached = BTRFS_CACHE_NO; in btrfs_cache_block_group()
765 cache->cached = BTRFS_CACHE_STARTED; in btrfs_cache_block_group()
766 cache->has_caching_ctl = 1; in btrfs_cache_block_group()
769 spin_unlock(&cache->lock); in btrfs_cache_block_group()
772 btrfs_should_fragment_free_space(cache)) { in btrfs_cache_block_group()
775 spin_lock(&cache->space_info->lock); in btrfs_cache_block_group()
776 spin_lock(&cache->lock); in btrfs_cache_block_group()
777 bytes_used = cache->length - cache->used; in btrfs_cache_block_group()
778 cache->space_info->bytes_used += bytes_used >> 1; in btrfs_cache_block_group()
779 spin_unlock(&cache->lock); in btrfs_cache_block_group()
780 spin_unlock(&cache->space_info->lock); in btrfs_cache_block_group()
781 fragment_free_space(cache); in btrfs_cache_block_group()
784 mutex_unlock(&caching_ctl->mutex); in btrfs_cache_block_group()
786 wake_up(&caching_ctl->wait); in btrfs_cache_block_group()
789 btrfs_free_excluded_extents(cache); in btrfs_cache_block_group()
797 spin_lock(&cache->lock); in btrfs_cache_block_group()
799 cache->caching_ctl = NULL; in btrfs_cache_block_group()
800 cache->cached = BTRFS_CACHE_NO; in btrfs_cache_block_group()
802 cache->cached = BTRFS_CACHE_STARTED; in btrfs_cache_block_group()
803 cache->has_caching_ctl = 1; in btrfs_cache_block_group()
805 spin_unlock(&cache->lock); in btrfs_cache_block_group()
806 wake_up(&caching_ctl->wait); in btrfs_cache_block_group()
814 down_write(&fs_info->commit_root_sem); in btrfs_cache_block_group()
815 refcount_inc(&caching_ctl->count); in btrfs_cache_block_group()
816 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); in btrfs_cache_block_group()
817 up_write(&fs_info->commit_root_sem); in btrfs_cache_block_group()
819 btrfs_get_block_group(cache); in btrfs_cache_block_group()
821 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); in btrfs_cache_block_group()
831 write_seqlock(&fs_info->profiles_lock); in clear_avail_alloc_bits()
833 fs_info->avail_data_alloc_bits &= ~extra_flags; in clear_avail_alloc_bits()
835 fs_info->avail_metadata_alloc_bits &= ~extra_flags; in clear_avail_alloc_bits()
837 fs_info->avail_system_alloc_bits &= ~extra_flags; in clear_avail_alloc_bits()
838 write_sequnlock(&fs_info->profiles_lock); in clear_avail_alloc_bits()
844 * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
847 * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
857 struct list_head *head = &fs_info->space_info; in clear_incompat_bg_bits()
861 down_read(&sinfo->groups_sem); in clear_incompat_bg_bits()
862 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) in clear_incompat_bg_bits()
864 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) in clear_incompat_bg_bits()
866 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) in clear_incompat_bg_bits()
868 if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) in clear_incompat_bg_bits()
870 up_read(&sinfo->groups_sem); in clear_incompat_bg_bits()
883 struct btrfs_fs_info *fs_info = trans->fs_info; in remove_block_group_item()
888 root = fs_info->extent_root; in remove_block_group_item()
889 key.objectid = block_group->start; in remove_block_group_item()
891 key.offset = block_group->length; in remove_block_group_item()
893 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); in remove_block_group_item()
895 ret = -ENOENT; in remove_block_group_item()
906 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_remove_block_group()
910 struct btrfs_root *tree_root = fs_info->tree_root; in btrfs_remove_block_group()
923 BUG_ON(!block_group->ro); in btrfs_remove_block_group()
927 * Free the reserved super bytes from this block group before in btrfs_remove_block_group()
931 btrfs_free_ref_tree_range(fs_info, block_group->start, in btrfs_remove_block_group()
932 block_group->length); in btrfs_remove_block_group()
934 index = btrfs_bg_flags_to_raid_index(block_group->flags); in btrfs_remove_block_group()
935 factor = btrfs_bg_type_to_factor(block_group->flags); in btrfs_remove_block_group()
937 /* make sure this block group isn't part of an allocation cluster */ in btrfs_remove_block_group()
938 cluster = &fs_info->data_alloc_cluster; in btrfs_remove_block_group()
939 spin_lock(&cluster->refill_lock); in btrfs_remove_block_group()
941 spin_unlock(&cluster->refill_lock); in btrfs_remove_block_group()
944 * make sure this block group isn't part of a metadata in btrfs_remove_block_group()
947 cluster = &fs_info->meta_alloc_cluster; in btrfs_remove_block_group()
948 spin_lock(&cluster->refill_lock); in btrfs_remove_block_group()
950 spin_unlock(&cluster->refill_lock); in btrfs_remove_block_group()
954 ret = -ENOMEM; in btrfs_remove_block_group()
964 mutex_lock(&trans->transaction->cache_write_mutex); in btrfs_remove_block_group()
966 * Make sure our free space cache IO is done before removing the in btrfs_remove_block_group()
969 spin_lock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
970 if (!list_empty(&block_group->io_list)) { in btrfs_remove_block_group()
971 list_del_init(&block_group->io_list); in btrfs_remove_block_group()
973 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); in btrfs_remove_block_group()
975 spin_unlock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
978 spin_lock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
981 if (!list_empty(&block_group->dirty_list)) { in btrfs_remove_block_group()
982 list_del_init(&block_group->dirty_list); in btrfs_remove_block_group()
986 spin_unlock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
987 mutex_unlock(&trans->transaction->cache_write_mutex); in btrfs_remove_block_group()
996 /* One for the block groups ref */ in btrfs_remove_block_group()
997 spin_lock(&block_group->lock); in btrfs_remove_block_group()
998 if (block_group->iref) { in btrfs_remove_block_group()
999 block_group->iref = 0; in btrfs_remove_block_group()
1000 block_group->inode = NULL; in btrfs_remove_block_group()
1001 spin_unlock(&block_group->lock); in btrfs_remove_block_group()
1004 spin_unlock(&block_group->lock); in btrfs_remove_block_group()
1012 key.offset = block_group->start; in btrfs_remove_block_group()
1014 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); in btrfs_remove_block_group()
1026 spin_lock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
1027 rb_erase(&block_group->cache_node, in btrfs_remove_block_group()
1028 &fs_info->block_group_cache_tree); in btrfs_remove_block_group()
1029 RB_CLEAR_NODE(&block_group->cache_node); in btrfs_remove_block_group()
1031 /* Once for the block groups rbtree */ in btrfs_remove_block_group()
1034 if (fs_info->first_logical_byte == block_group->start) in btrfs_remove_block_group()
1035 fs_info->first_logical_byte = (u64)-1; in btrfs_remove_block_group()
1036 spin_unlock(&fs_info->block_group_cache_lock); in btrfs_remove_block_group()
1038 down_write(&block_group->space_info->groups_sem); in btrfs_remove_block_group()
1043 list_del_init(&block_group->list); in btrfs_remove_block_group()
1044 if (list_empty(&block_group->space_info->block_groups[index])) { in btrfs_remove_block_group()
1045 kobj = block_group->space_info->block_group_kobjs[index]; in btrfs_remove_block_group()
1046 block_group->space_info->block_group_kobjs[index] = NULL; in btrfs_remove_block_group()
1047 clear_avail_alloc_bits(fs_info, block_group->flags); in btrfs_remove_block_group()
1049 up_write(&block_group->space_info->groups_sem); in btrfs_remove_block_group()
1050 clear_incompat_bg_bits(fs_info, block_group->flags); in btrfs_remove_block_group()
1056 if (block_group->has_caching_ctl) in btrfs_remove_block_group()
1058 if (block_group->cached == BTRFS_CACHE_STARTED) in btrfs_remove_block_group()
1060 if (block_group->has_caching_ctl) { in btrfs_remove_block_group()
1061 down_write(&fs_info->commit_root_sem); in btrfs_remove_block_group()
1066 &fs_info->caching_block_groups, list) in btrfs_remove_block_group()
1067 if (ctl->block_group == block_group) { in btrfs_remove_block_group()
1069 refcount_inc(&caching_ctl->count); in btrfs_remove_block_group()
1074 list_del_init(&caching_ctl->list); in btrfs_remove_block_group()
1075 up_write(&fs_info->commit_root_sem); in btrfs_remove_block_group()
1083 spin_lock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1084 WARN_ON(!list_empty(&block_group->dirty_list)); in btrfs_remove_block_group()
1085 WARN_ON(!list_empty(&block_group->io_list)); in btrfs_remove_block_group()
1086 spin_unlock(&trans->transaction->dirty_bgs_lock); in btrfs_remove_block_group()
1090 spin_lock(&block_group->space_info->lock); in btrfs_remove_block_group()
1091 list_del_init(&block_group->ro_list); in btrfs_remove_block_group()
1094 WARN_ON(block_group->space_info->total_bytes in btrfs_remove_block_group()
1095 < block_group->length); in btrfs_remove_block_group()
1096 WARN_ON(block_group->space_info->bytes_readonly in btrfs_remove_block_group()
1097 < block_group->length); in btrfs_remove_block_group()
1098 WARN_ON(block_group->space_info->disk_total in btrfs_remove_block_group()
1099 < block_group->length * factor); in btrfs_remove_block_group()
1101 block_group->space_info->total_bytes -= block_group->length; in btrfs_remove_block_group()
1102 block_group->space_info->bytes_readonly -= block_group->length; in btrfs_remove_block_group()
1103 block_group->space_info->disk_total -= block_group->length * factor; in btrfs_remove_block_group()
1105 spin_unlock(&block_group->space_info->lock); in btrfs_remove_block_group()
1108 * Remove the free space for the block group from the free space tree in btrfs_remove_block_group()
1109 * and the block group's item from the extent tree before marking the in btrfs_remove_block_group()
1110 * block group as removed. This is to prevent races with tasks that in btrfs_remove_block_group()
1111 * freeze and unfreeze a block group, this task and another task in btrfs_remove_block_group()
1112 * allocating a new block group - the unfreeze task ends up removing in btrfs_remove_block_group()
1113 * the block group's extent map before the task calling this function in btrfs_remove_block_group()
1114 * deletes the block group item from the extent tree, allowing for in btrfs_remove_block_group()
1115 * another task to attempt to create another block group with the same in btrfs_remove_block_group()
1116 * item key (and failing with -EEXIST and a transaction abort). in btrfs_remove_block_group()
1126 spin_lock(&block_group->lock); in btrfs_remove_block_group()
1127 block_group->removed = 1; in btrfs_remove_block_group()
1129 * At this point trimming or scrub can't start on this block group, in btrfs_remove_block_group()
1130 * because we removed the block group from the rbtree in btrfs_remove_block_group()
1131 * fs_info->block_group_cache_tree so no one can't find it anymore and in btrfs_remove_block_group()
1132 * even if someone already got this block group before we removed it in btrfs_remove_block_group()
1133 * from the rbtree, they have already incremented block_group->frozen - in btrfs_remove_block_group()
1138 * And we must not remove the extent map from the fs_info->mapping_tree in btrfs_remove_block_group()
1140 * ranges from being reused for a new block group. This is needed to in btrfs_remove_block_group()
1146 * allowing for new block groups to be created that can reuse the same in btrfs_remove_block_group()
1150 * is mounted with -odiscard. The same protections must remain in btrfs_remove_block_group()
1154 remove_em = (atomic_read(&block_group->frozen) == 0); in btrfs_remove_block_group()
1155 spin_unlock(&block_group->lock); in btrfs_remove_block_group()
1160 em_tree = &fs_info->mapping_tree; in btrfs_remove_block_group()
1161 write_lock(&em_tree->lock); in btrfs_remove_block_group()
1163 write_unlock(&em_tree->lock); in btrfs_remove_block_group()
1180 struct extent_map_tree *em_tree = &fs_info->mapping_tree; in btrfs_start_trans_remove_block_group()
1185 read_lock(&em_tree->lock); in btrfs_start_trans_remove_block_group()
1187 read_unlock(&em_tree->lock); in btrfs_start_trans_remove_block_group()
1188 ASSERT(em && em->start == chunk_offset); in btrfs_start_trans_remove_block_group()
1192 * to remove a block group (done at btrfs_remove_chunk() and at in btrfs_start_trans_remove_block_group()
1197 * 1 unit for deleting the block group item (located in the extent in btrfs_start_trans_remove_block_group()
1204 * In order to remove a block group we also need to reserve units in the in btrfs_start_trans_remove_block_group()
1209 map = em->map_lookup; in btrfs_start_trans_remove_block_group()
1210 num_items = 3 + map->num_stripes; in btrfs_start_trans_remove_block_group()
1213 return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root, in btrfs_start_trans_remove_block_group()
1218 * Mark block group @cache read-only, so later write won't happen to block
1219 * group @cache.
1221 * If @force is not set, this function will only mark the block group readonly
1222 * if we have enough free space (1M) in other metadata/system block groups.
1223 * If @force is not set, this function will mark the block group readonly
1226 * NOTE: This function doesn't care if other block groups can contain all the
1227 * data in this block group. That check should be done by relocation routine,
1230 static int inc_block_group_ro(struct btrfs_block_group *cache, int force) in inc_block_group_ro() argument
1232 struct btrfs_space_info *sinfo = cache->space_info; in inc_block_group_ro()
1234 int ret = -ENOSPC; in inc_block_group_ro()
1236 spin_lock(&sinfo->lock); in inc_block_group_ro()
1237 spin_lock(&cache->lock); in inc_block_group_ro()
1239 if (cache->swap_extents) { in inc_block_group_ro()
1240 ret = -ETXTBSY; in inc_block_group_ro()
1244 if (cache->ro) { in inc_block_group_ro()
1245 cache->ro++; in inc_block_group_ro()
1250 num_bytes = cache->length - cache->reserved - cache->pinned - in inc_block_group_ro()
1251 cache->bytes_super - cache->used; in inc_block_group_ro()
1259 } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) { in inc_block_group_ro()
1266 if (sinfo_used + num_bytes <= sinfo->total_bytes) in inc_block_group_ro()
1273 * leeway to allow us to mark this block group as read only. in inc_block_group_ro()
1275 if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes, in inc_block_group_ro()
1281 sinfo->bytes_readonly += num_bytes; in inc_block_group_ro()
1282 cache->ro++; in inc_block_group_ro()
1283 list_add_tail(&cache->ro_list, &sinfo->ro_bgs); in inc_block_group_ro()
1286 spin_unlock(&cache->lock); in inc_block_group_ro()
1287 spin_unlock(&sinfo->lock); in inc_block_group_ro()
1288 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { in inc_block_group_ro()
1289 btrfs_info(cache->fs_info, in inc_block_group_ro()
1290 "unable to make block group %llu ro", cache->start); in inc_block_group_ro()
1291 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); in inc_block_group_ro()
1299 struct btrfs_fs_info *fs_info = bg->fs_info; in clean_pinned_extents()
1301 const u64 start = bg->start; in clean_pinned_extents()
1302 const u64 end = start + bg->length - 1; in clean_pinned_extents()
1305 spin_lock(&fs_info->trans_lock); in clean_pinned_extents()
1306 if (trans->transaction->list.prev != &fs_info->trans_list) { in clean_pinned_extents()
1307 prev_trans = list_last_entry(&trans->transaction->list, in clean_pinned_extents()
1309 refcount_inc(&prev_trans->use_count); in clean_pinned_extents()
1311 spin_unlock(&fs_info->trans_lock); in clean_pinned_extents()
1317 * transaction N - 1, and have seen a range belonging to the block in clean_pinned_extents()
1318 * group in pinned_extents before we were able to clear the whole block in clean_pinned_extents()
1320 * the block group after we unpinned it from pinned_extents and removed in clean_pinned_extents()
1323 mutex_lock(&fs_info->unused_bg_unpin_mutex); in clean_pinned_extents()
1325 ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, in clean_pinned_extents()
1331 ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, in clean_pinned_extents()
1334 mutex_unlock(&fs_info->unused_bg_unpin_mutex); in clean_pinned_extents()
1353 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) in btrfs_delete_unused_bgs()
1356 spin_lock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1357 while (!list_empty(&fs_info->unused_bgs)) { in btrfs_delete_unused_bgs()
1360 block_group = list_first_entry(&fs_info->unused_bgs, in btrfs_delete_unused_bgs()
1363 list_del_init(&block_group->bg_list); in btrfs_delete_unused_bgs()
1365 space_info = block_group->space_info; in btrfs_delete_unused_bgs()
1371 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1373 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); in btrfs_delete_unused_bgs()
1375 mutex_lock(&fs_info->delete_unused_bgs_mutex); in btrfs_delete_unused_bgs()
1378 down_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1381 * Async discard moves the final block group discard to be prior in btrfs_delete_unused_bgs()
1388 up_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1390 btrfs_discard_queue_work(&fs_info->discard_ctl, in btrfs_delete_unused_bgs()
1395 spin_lock(&block_group->lock); in btrfs_delete_unused_bgs()
1396 if (block_group->reserved || block_group->pinned || in btrfs_delete_unused_bgs()
1397 block_group->used || block_group->ro || in btrfs_delete_unused_bgs()
1398 list_is_singular(&block_group->list)) { in btrfs_delete_unused_bgs()
1401 * outstanding allocations in this block group. We do in btrfs_delete_unused_bgs()
1403 * this block group. in btrfs_delete_unused_bgs()
1406 spin_unlock(&block_group->lock); in btrfs_delete_unused_bgs()
1407 up_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1410 spin_unlock(&block_group->lock); in btrfs_delete_unused_bgs()
1414 up_write(&space_info->groups_sem); in btrfs_delete_unused_bgs()
1425 block_group->start); in btrfs_delete_unused_bgs()
1433 * We could have pending pinned extents for this block group, in btrfs_delete_unused_bgs()
1448 spin_lock(&fs_info->discard_ctl.lock); in btrfs_delete_unused_bgs()
1449 if (!list_empty(&block_group->discard_list)) { in btrfs_delete_unused_bgs()
1450 spin_unlock(&fs_info->discard_ctl.lock); in btrfs_delete_unused_bgs()
1452 btrfs_discard_queue_work(&fs_info->discard_ctl, in btrfs_delete_unused_bgs()
1456 spin_unlock(&fs_info->discard_ctl.lock); in btrfs_delete_unused_bgs()
1459 spin_lock(&space_info->lock); in btrfs_delete_unused_bgs()
1460 spin_lock(&block_group->lock); in btrfs_delete_unused_bgs()
1463 -block_group->pinned); in btrfs_delete_unused_bgs()
1464 space_info->bytes_readonly += block_group->pinned; in btrfs_delete_unused_bgs()
1465 __btrfs_mod_total_bytes_pinned(space_info, -block_group->pinned); in btrfs_delete_unused_bgs()
1466 block_group->pinned = 0; in btrfs_delete_unused_bgs()
1468 spin_unlock(&block_group->lock); in btrfs_delete_unused_bgs()
1469 spin_unlock(&space_info->lock); in btrfs_delete_unused_bgs()
1472 * The normal path here is an unused block group is passed here, in btrfs_delete_unused_bgs()
1475 * before coming down the unused block group path as trimming in btrfs_delete_unused_bgs()
1492 ret = btrfs_remove_chunk(trans, block_group->start); in btrfs_delete_unused_bgs()
1501 * If we're not mounted with -odiscard, we can just forget in btrfs_delete_unused_bgs()
1502 * about this block group. Otherwise we'll need to wait in btrfs_delete_unused_bgs()
1506 spin_lock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1509 * fs_info->unused_bgs, so use a list_move operation in btrfs_delete_unused_bgs()
1510 * to add the block group to the deleted_bgs list. in btrfs_delete_unused_bgs()
1512 list_move(&block_group->bg_list, in btrfs_delete_unused_bgs()
1513 &trans->transaction->deleted_bgs); in btrfs_delete_unused_bgs()
1514 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1520 mutex_unlock(&fs_info->delete_unused_bgs_mutex); in btrfs_delete_unused_bgs()
1522 spin_lock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1524 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_delete_unused_bgs()
1529 mutex_unlock(&fs_info->delete_unused_bgs_mutex); in btrfs_delete_unused_bgs()
1536 struct btrfs_fs_info *fs_info = bg->fs_info; in btrfs_mark_bg_unused()
1538 spin_lock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_unused()
1539 if (list_empty(&bg->bg_list)) { in btrfs_mark_bg_unused()
1542 list_add_tail(&bg->bg_list, &fs_info->unused_bgs); in btrfs_mark_bg_unused()
1544 spin_unlock(&fs_info->unused_bgs_lock); in btrfs_mark_bg_unused()
1558 slot = path->slots[0]; in read_bg_from_eb()
1559 leaf = path->nodes[0]; in read_bg_from_eb()
1561 em_tree = &fs_info->mapping_tree; in read_bg_from_eb()
1562 read_lock(&em_tree->lock); in read_bg_from_eb()
1563 em = lookup_extent_mapping(em_tree, key->objectid, key->offset); in read_bg_from_eb()
1564 read_unlock(&em_tree->lock); in read_bg_from_eb()
1568 key->objectid, key->offset); in read_bg_from_eb()
1569 return -ENOENT; in read_bg_from_eb()
1572 if (em->start != key->objectid || em->len != key->offset) { in read_bg_from_eb()
1574 "block group %llu len %llu mismatch with chunk %llu len %llu", in read_bg_from_eb()
1575 key->objectid, key->offset, em->start, em->len); in read_bg_from_eb()
1576 ret = -EUCLEAN; in read_bg_from_eb()
1585 if (flags != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { in read_bg_from_eb()
1587 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", in read_bg_from_eb()
1588 key->objectid, key->offset, flags, in read_bg_from_eb()
1589 (BTRFS_BLOCK_GROUP_TYPE_MASK & em->map_lookup->type)); in read_bg_from_eb()
1590 ret = -EUCLEAN; in read_bg_from_eb()
1602 struct btrfs_root *root = fs_info->extent_root; in find_first_block_group()
1613 slot = path->slots[0]; in find_first_block_group()
1614 leaf = path->nodes[0]; in find_first_block_group()
1625 if (found_key.objectid >= key->objectid && in find_first_block_group()
1631 path->slots[0]++; in find_first_block_group()
1642 write_seqlock(&fs_info->profiles_lock); in set_avail_alloc_bits()
1644 fs_info->avail_data_alloc_bits |= extra_flags; in set_avail_alloc_bits()
1646 fs_info->avail_metadata_alloc_bits |= extra_flags; in set_avail_alloc_bits()
1648 fs_info->avail_system_alloc_bits |= extra_flags; in set_avail_alloc_bits()
1649 write_sequnlock(&fs_info->profiles_lock); in set_avail_alloc_bits()
1653 * btrfs_rmap_block - Map a physical disk address to a list of logical addresses
1654 * @chunk_start: logical address of block group
1658 * @stripe_len: size of IO stripe for the given block group
1661 * Used primarily to exclude those portions of a block group that contain super
1662 * block copies.
1679 return -EIO; in btrfs_rmap_block()
1681 map = em->map_lookup; in btrfs_rmap_block()
1682 data_stripe_length = em->orig_block_len; in btrfs_rmap_block()
1683 io_stripe_size = map->stripe_len; in btrfs_rmap_block()
1686 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) in btrfs_rmap_block()
1687 io_stripe_size = map->stripe_len * nr_data_stripes(map); in btrfs_rmap_block()
1689 buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); in btrfs_rmap_block()
1691 ret = -ENOMEM; in btrfs_rmap_block()
1695 for (i = 0; i < map->num_stripes; i++) { in btrfs_rmap_block()
1700 if (!in_range(physical, map->stripes[i].physical, in btrfs_rmap_block()
1704 stripe_nr = physical - map->stripes[i].physical; in btrfs_rmap_block()
1705 stripe_nr = div64_u64(stripe_nr, map->stripe_len); in btrfs_rmap_block()
1707 if (map->type & BTRFS_BLOCK_GROUP_RAID10) { in btrfs_rmap_block()
1708 stripe_nr = stripe_nr * map->num_stripes + i; in btrfs_rmap_block()
1709 stripe_nr = div_u64(stripe_nr, map->sub_stripes); in btrfs_rmap_block()
1710 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { in btrfs_rmap_block()
1711 stripe_nr = stripe_nr * map->num_stripes + i; in btrfs_rmap_block()
1716 * instead of map->stripe_len in btrfs_rmap_block()
1741 static int exclude_super_stripes(struct btrfs_block_group *cache) in exclude_super_stripes() argument
1743 struct btrfs_fs_info *fs_info = cache->fs_info; in exclude_super_stripes()
1749 if (cache->start < BTRFS_SUPER_INFO_OFFSET) { in exclude_super_stripes()
1750 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; in exclude_super_stripes()
1751 cache->bytes_super += stripe_len; in exclude_super_stripes()
1752 ret = btrfs_add_excluded_extent(fs_info, cache->start, in exclude_super_stripes()
1760 ret = btrfs_rmap_block(fs_info, cache->start, in exclude_super_stripes()
1765 while (nr--) { in exclude_super_stripes()
1767 cache->start + cache->length - logical[nr]); in exclude_super_stripes()
1769 cache->bytes_super += len; in exclude_super_stripes()
1783 static void link_block_group(struct btrfs_block_group *cache) in link_block_group() argument
1785 struct btrfs_space_info *space_info = cache->space_info; in link_block_group()
1786 int index = btrfs_bg_flags_to_raid_index(cache->flags); in link_block_group()
1788 down_write(&space_info->groups_sem); in link_block_group()
1789 list_add_tail(&cache->list, &space_info->block_groups[index]); in link_block_group()
1790 up_write(&space_info->groups_sem); in link_block_group()
1796 struct btrfs_block_group *cache; in btrfs_create_block_group_cache() local
1798 cache = kzalloc(sizeof(*cache), GFP_NOFS); in btrfs_create_block_group_cache()
1799 if (!cache) in btrfs_create_block_group_cache()
1802 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), in btrfs_create_block_group_cache()
1804 if (!cache->free_space_ctl) { in btrfs_create_block_group_cache()
1805 kfree(cache); in btrfs_create_block_group_cache()
1809 cache->start = start; in btrfs_create_block_group_cache()
1811 cache->fs_info = fs_info; in btrfs_create_block_group_cache()
1812 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); in btrfs_create_block_group_cache()
1814 cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; in btrfs_create_block_group_cache()
1816 refcount_set(&cache->refs, 1); in btrfs_create_block_group_cache()
1817 spin_lock_init(&cache->lock); in btrfs_create_block_group_cache()
1818 init_rwsem(&cache->data_rwsem); in btrfs_create_block_group_cache()
1819 INIT_LIST_HEAD(&cache->list); in btrfs_create_block_group_cache()
1820 INIT_LIST_HEAD(&cache->cluster_list); in btrfs_create_block_group_cache()
1821 INIT_LIST_HEAD(&cache->bg_list); in btrfs_create_block_group_cache()
1822 INIT_LIST_HEAD(&cache->ro_list); in btrfs_create_block_group_cache()
1823 INIT_LIST_HEAD(&cache->discard_list); in btrfs_create_block_group_cache()
1824 INIT_LIST_HEAD(&cache->dirty_list); in btrfs_create_block_group_cache()
1825 INIT_LIST_HEAD(&cache->io_list); in btrfs_create_block_group_cache()
1826 btrfs_init_free_space_ctl(cache); in btrfs_create_block_group_cache()
1827 atomic_set(&cache->frozen, 0); in btrfs_create_block_group_cache()
1828 mutex_init(&cache->free_space_lock); in btrfs_create_block_group_cache()
1829 btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root); in btrfs_create_block_group_cache()
1831 return cache; in btrfs_create_block_group_cache()
1835 * Iterate all chunks and verify that each of them has the corresponding block
1840 struct extent_map_tree *map_tree = &fs_info->mapping_tree; in check_chunk_block_group_mappings()
1847 read_lock(&map_tree->lock); in check_chunk_block_group_mappings()
1854 read_unlock(&map_tree->lock); in check_chunk_block_group_mappings()
1858 bg = btrfs_lookup_block_group(fs_info, em->start); in check_chunk_block_group_mappings()
1861 "chunk start=%llu len=%llu doesn't have corresponding block group", in check_chunk_block_group_mappings()
1862 em->start, em->len); in check_chunk_block_group_mappings()
1863 ret = -EUCLEAN; in check_chunk_block_group_mappings()
1867 if (bg->start != em->start || bg->length != em->len || in check_chunk_block_group_mappings()
1868 (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != in check_chunk_block_group_mappings()
1869 (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { in check_chunk_block_group_mappings()
1871 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", in check_chunk_block_group_mappings()
1872 em->start, em->len, in check_chunk_block_group_mappings()
1873 em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK, in check_chunk_block_group_mappings()
1874 bg->start, bg->length, in check_chunk_block_group_mappings()
1875 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); in check_chunk_block_group_mappings()
1876 ret = -EUCLEAN; in check_chunk_block_group_mappings()
1881 start = em->start + em->len; in check_chunk_block_group_mappings()
1888 static void read_block_group_item(struct btrfs_block_group *cache, in read_block_group_item() argument
1892 struct extent_buffer *leaf = path->nodes[0]; in read_block_group_item()
1894 int slot = path->slots[0]; in read_block_group_item()
1896 cache->length = key->offset; in read_block_group_item()
1900 cache->used = btrfs_stack_block_group_used(&bgi); in read_block_group_item()
1901 cache->flags = btrfs_stack_block_group_flags(&bgi); in read_block_group_item()
1909 struct btrfs_block_group *cache; in read_one_block_group() local
1914 ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); in read_one_block_group()
1916 cache = btrfs_create_block_group_cache(info, key->objectid); in read_one_block_group()
1917 if (!cache) in read_one_block_group()
1918 return -ENOMEM; in read_one_block_group()
1920 read_block_group_item(cache, path, key); in read_one_block_group()
1922 set_free_space_tree_thresholds(cache); in read_one_block_group()
1926 * When we mount with old space cache, we need to in read_one_block_group()
1930 * truncate the old free space cache inode and in read_one_block_group()
1933 * the new space cache info onto disk. in read_one_block_group()
1936 cache->disk_cache_state = BTRFS_DC_CLEAR; in read_one_block_group()
1938 if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && in read_one_block_group()
1939 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { in read_one_block_group()
1941 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", in read_one_block_group()
1942 cache->start); in read_one_block_group()
1943 ret = -EINVAL; in read_one_block_group()
1952 ret = exclude_super_stripes(cache); in read_one_block_group()
1955 btrfs_free_excluded_extents(cache); in read_one_block_group()
1965 if (cache->length == cache->used) { in read_one_block_group()
1966 cache->last_byte_to_unpin = (u64)-1; in read_one_block_group()
1967 cache->cached = BTRFS_CACHE_FINISHED; in read_one_block_group()
1968 btrfs_free_excluded_extents(cache); in read_one_block_group()
1969 } else if (cache->used == 0) { in read_one_block_group()
1970 cache->last_byte_to_unpin = (u64)-1; in read_one_block_group()
1971 cache->cached = BTRFS_CACHE_FINISHED; in read_one_block_group()
1972 add_new_free_space(cache, cache->start, in read_one_block_group()
1973 cache->start + cache->length); in read_one_block_group()
1974 btrfs_free_excluded_extents(cache); in read_one_block_group()
1977 ret = btrfs_add_block_group_cache(info, cache); in read_one_block_group()
1979 btrfs_remove_free_space_cache(cache); in read_one_block_group()
1982 trace_btrfs_add_block_group(info, cache, 0); in read_one_block_group()
1983 btrfs_update_space_info(info, cache->flags, cache->length, in read_one_block_group()
1984 cache->used, cache->bytes_super, &space_info); in read_one_block_group()
1986 cache->space_info = space_info; in read_one_block_group()
1988 link_block_group(cache); in read_one_block_group()
1990 set_avail_alloc_bits(info, cache->flags); in read_one_block_group()
1991 if (btrfs_chunk_readonly(info, cache->start)) { in read_one_block_group()
1992 inc_block_group_ro(cache, 1); in read_one_block_group()
1993 } else if (cache->used == 0) { in read_one_block_group()
1994 ASSERT(list_empty(&cache->bg_list)); in read_one_block_group()
1996 btrfs_discard_queue_work(&info->discard_ctl, cache); in read_one_block_group()
1998 btrfs_mark_bg_unused(cache); in read_one_block_group()
2002 btrfs_put_block_group(cache); in read_one_block_group()
2010 struct btrfs_block_group *cache; in btrfs_read_block_groups() local
2021 return -ENOMEM; in btrfs_read_block_groups()
2023 cache_gen = btrfs_super_cache_generation(info->super_copy); in btrfs_read_block_groups()
2025 btrfs_super_generation(info->super_copy) != cache_gen) in btrfs_read_block_groups()
2037 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in btrfs_read_block_groups()
2047 list_for_each_entry(space_info, &info->space_info, list) { in btrfs_read_block_groups()
2051 if (list_empty(&space_info->block_groups[i])) in btrfs_read_block_groups()
2053 cache = list_first_entry(&space_info->block_groups[i], in btrfs_read_block_groups()
2056 btrfs_sysfs_add_block_group_type(cache); in btrfs_read_block_groups()
2059 if (!(btrfs_get_alloc_profile(info, space_info->flags) & in btrfs_read_block_groups()
2066 * Avoid allocating from un-mirrored block group if there are in btrfs_read_block_groups()
2067 * mirrored block groups. in btrfs_read_block_groups()
2069 list_for_each_entry(cache, in btrfs_read_block_groups()
2070 &space_info->block_groups[BTRFS_RAID_RAID0], in btrfs_read_block_groups()
2072 inc_block_group_ro(cache, 1); in btrfs_read_block_groups()
2073 list_for_each_entry(cache, in btrfs_read_block_groups()
2074 &space_info->block_groups[BTRFS_RAID_SINGLE], in btrfs_read_block_groups()
2076 inc_block_group_ro(cache, 1); in btrfs_read_block_groups()
2089 struct btrfs_fs_info *fs_info = trans->fs_info; in insert_block_group_item()
2094 spin_lock(&block_group->lock); in insert_block_group_item()
2095 btrfs_set_stack_block_group_used(&bgi, block_group->used); in insert_block_group_item()
2098 btrfs_set_stack_block_group_flags(&bgi, block_group->flags); in insert_block_group_item()
2099 key.objectid = block_group->start; in insert_block_group_item()
2101 key.offset = block_group->length; in insert_block_group_item()
2102 spin_unlock(&block_group->lock); in insert_block_group_item()
2104 root = fs_info->extent_root; in insert_block_group_item()
2110 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_create_pending_block_groups()
2114 if (!trans->can_flush_pending_bgs) in btrfs_create_pending_block_groups()
2117 while (!list_empty(&trans->new_bgs)) { in btrfs_create_pending_block_groups()
2120 block_group = list_first_entry(&trans->new_bgs, in btrfs_create_pending_block_groups()
2126 index = btrfs_bg_flags_to_raid_index(block_group->flags); in btrfs_create_pending_block_groups()
2131 ret = btrfs_finish_chunk_alloc(trans, block_group->start, in btrfs_create_pending_block_groups()
2132 block_group->length); in btrfs_create_pending_block_groups()
2143 if (block_group->space_info->block_group_kobjs[index] == NULL) in btrfs_create_pending_block_groups()
2149 list_del_init(&block_group->bg_list); in btrfs_create_pending_block_groups()
2157 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_make_block_group()
2158 struct btrfs_block_group *cache; in btrfs_make_block_group() local
2163 cache = btrfs_create_block_group_cache(fs_info, chunk_offset); in btrfs_make_block_group()
2164 if (!cache) in btrfs_make_block_group()
2165 return -ENOMEM; in btrfs_make_block_group()
2167 cache->length = size; in btrfs_make_block_group()
2168 set_free_space_tree_thresholds(cache); in btrfs_make_block_group()
2169 cache->used = bytes_used; in btrfs_make_block_group()
2170 cache->flags = type; in btrfs_make_block_group()
2171 cache->last_byte_to_unpin = (u64)-1; in btrfs_make_block_group()
2172 cache->cached = BTRFS_CACHE_FINISHED; in btrfs_make_block_group()
2173 cache->needs_free_space = 1; in btrfs_make_block_group()
2174 ret = exclude_super_stripes(cache); in btrfs_make_block_group()
2177 btrfs_free_excluded_extents(cache); in btrfs_make_block_group()
2178 btrfs_put_block_group(cache); in btrfs_make_block_group()
2182 add_new_free_space(cache, chunk_offset, chunk_offset + size); in btrfs_make_block_group()
2184 btrfs_free_excluded_extents(cache); in btrfs_make_block_group()
2187 if (btrfs_should_fragment_free_space(cache)) { in btrfs_make_block_group()
2188 u64 new_bytes_used = size - bytes_used; in btrfs_make_block_group()
2191 fragment_free_space(cache); in btrfs_make_block_group()
2196 * assigned to our block group. We want our bg to be added to the rbtree in btrfs_make_block_group()
2197 * with its ->space_info set. in btrfs_make_block_group()
2199 cache->space_info = btrfs_find_space_info(fs_info, cache->flags); in btrfs_make_block_group()
2200 ASSERT(cache->space_info); in btrfs_make_block_group()
2202 ret = btrfs_add_block_group_cache(fs_info, cache); in btrfs_make_block_group()
2204 btrfs_remove_free_space_cache(cache); in btrfs_make_block_group()
2205 btrfs_put_block_group(cache); in btrfs_make_block_group()
2210 * Now that our block group has its ->space_info set and is inserted in in btrfs_make_block_group()
2213 trace_btrfs_add_block_group(fs_info, cache, 1); in btrfs_make_block_group()
2214 btrfs_update_space_info(fs_info, cache->flags, size, bytes_used, in btrfs_make_block_group()
2215 cache->bytes_super, &cache->space_info); in btrfs_make_block_group()
2218 link_block_group(cache); in btrfs_make_block_group()
2220 list_add_tail(&cache->bg_list, &trans->new_bgs); in btrfs_make_block_group()
2221 trans->delayed_ref_updates++; in btrfs_make_block_group()
2229 * Mark one block group RO, can be called several times for the same block
2232 * @cache: the destination block group
2233 * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to
2235 * block group RO.
2237 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, in btrfs_inc_block_group_ro() argument
2240 struct btrfs_fs_info *fs_info = cache->fs_info; in btrfs_inc_block_group_ro()
2246 trans = btrfs_join_transaction(fs_info->extent_root); in btrfs_inc_block_group_ro()
2251 * we're not allowed to set block groups readonly after the dirty in btrfs_inc_block_group_ro()
2252 * block groups cache has started writing. If it already started, in btrfs_inc_block_group_ro()
2255 mutex_lock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2256 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { in btrfs_inc_block_group_ro()
2257 u64 transid = trans->transid; in btrfs_inc_block_group_ro()
2259 mutex_unlock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2271 * corresponding block group with the new raid level. in btrfs_inc_block_group_ro()
2273 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); in btrfs_inc_block_group_ro()
2274 if (alloc_flags != cache->flags) { in btrfs_inc_block_group_ro()
2281 if (ret == -ENOSPC) in btrfs_inc_block_group_ro()
2288 ret = inc_block_group_ro(cache, 0); in btrfs_inc_block_group_ro()
2291 if (ret == -ETXTBSY) in btrfs_inc_block_group_ro()
2297 * we still want to try our best to mark the block group read-only. in btrfs_inc_block_group_ro()
2299 if (!do_chunk_alloc && ret == -ENOSPC && in btrfs_inc_block_group_ro()
2300 (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM)) in btrfs_inc_block_group_ro()
2303 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); in btrfs_inc_block_group_ro()
2307 ret = inc_block_group_ro(cache, 0); in btrfs_inc_block_group_ro()
2308 if (ret == -ETXTBSY) in btrfs_inc_block_group_ro()
2311 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { in btrfs_inc_block_group_ro()
2312 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); in btrfs_inc_block_group_ro()
2313 mutex_lock(&fs_info->chunk_mutex); in btrfs_inc_block_group_ro()
2315 mutex_unlock(&fs_info->chunk_mutex); in btrfs_inc_block_group_ro()
2318 mutex_unlock(&fs_info->ro_block_group_mutex); in btrfs_inc_block_group_ro()
2324 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) in btrfs_dec_block_group_ro() argument
2326 struct btrfs_space_info *sinfo = cache->space_info; in btrfs_dec_block_group_ro()
2329 BUG_ON(!cache->ro); in btrfs_dec_block_group_ro()
2331 spin_lock(&sinfo->lock); in btrfs_dec_block_group_ro()
2332 spin_lock(&cache->lock); in btrfs_dec_block_group_ro()
2333 if (!--cache->ro) { in btrfs_dec_block_group_ro()
2334 num_bytes = cache->length - cache->reserved - in btrfs_dec_block_group_ro()
2335 cache->pinned - cache->bytes_super - cache->used; in btrfs_dec_block_group_ro()
2336 sinfo->bytes_readonly -= num_bytes; in btrfs_dec_block_group_ro()
2337 list_del_init(&cache->ro_list); in btrfs_dec_block_group_ro()
2339 spin_unlock(&cache->lock); in btrfs_dec_block_group_ro()
2340 spin_unlock(&sinfo->lock); in btrfs_dec_block_group_ro()
2345 struct btrfs_block_group *cache) in update_block_group_item() argument
2347 struct btrfs_fs_info *fs_info = trans->fs_info; in update_block_group_item()
2349 struct btrfs_root *root = fs_info->extent_root; in update_block_group_item()
2355 key.objectid = cache->start; in update_block_group_item()
2357 key.offset = cache->length; in update_block_group_item()
2362 ret = -ENOENT; in update_block_group_item()
2366 leaf = path->nodes[0]; in update_block_group_item()
2367 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); in update_block_group_item()
2368 btrfs_set_stack_block_group_used(&bgi, cache->used); in update_block_group_item()
2371 btrfs_set_stack_block_group_flags(&bgi, cache->flags); in update_block_group_item()
2384 struct btrfs_fs_info *fs_info = block_group->fs_info; in cache_save_setup()
2385 struct btrfs_root *root = fs_info->tree_root; in cache_save_setup()
2395 * If this block group is smaller than 100 megs don't bother caching the in cache_save_setup()
2396 * block group. in cache_save_setup()
2398 if (block_group->length < (100 * SZ_1M)) { in cache_save_setup()
2399 spin_lock(&block_group->lock); in cache_save_setup()
2400 block_group->disk_cache_state = BTRFS_DC_WRITTEN; in cache_save_setup()
2401 spin_unlock(&block_group->lock); in cache_save_setup()
2409 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { in cache_save_setup()
2419 if (block_group->ro) in cache_save_setup()
2430 * from here on out we know not to trust this cache when we load up next in cache_save_setup()
2433 BTRFS_I(inode)->generation = 0; in cache_save_setup()
2438 * super cache generation to 0 so we know to invalidate the in cache_save_setup()
2439 * cache, but then we'd have to keep track of the block groups in cache_save_setup()
2440 * that fail this way so we know we _have_ to reset this cache in cache_save_setup()
2441 * before the next commit or risk reading stale cache. So to in cache_save_setup()
2452 if (block_group->cache_generation == trans->transid && in cache_save_setup()
2460 &fs_info->global_block_rsv); in cache_save_setup()
2469 spin_lock(&block_group->lock); in cache_save_setup()
2470 if (block_group->cached != BTRFS_CACHE_FINISHED || in cache_save_setup()
2479 spin_unlock(&block_group->lock); in cache_save_setup()
2482 spin_unlock(&block_group->lock); in cache_save_setup()
2485 * We hit an ENOSPC when setting up the cache in this transaction, just in cache_save_setup()
2486 * skip doing the setup, we've already cleared the cache so we're safe. in cache_save_setup()
2488 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { in cache_save_setup()
2489 ret = -ENOSPC; in cache_save_setup()
2494 * Try to preallocate enough space based on how big the block group is. in cache_save_setup()
2497 * cache. in cache_save_setup()
2499 num_pages = div_u64(block_group->length, SZ_256M); in cache_save_setup()
2515 * Our cache requires contiguous chunks so that we don't modify a bunch in cache_save_setup()
2516 * of metadata or split extents when writing the cache out, which means in cache_save_setup()
2519 * other block groups for this transaction, maybe we'll unpin enough in cache_save_setup()
2524 else if (ret == -ENOSPC) in cache_save_setup()
2525 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); in cache_save_setup()
2532 spin_lock(&block_group->lock); in cache_save_setup()
2534 block_group->cache_generation = trans->transid; in cache_save_setup()
2535 block_group->disk_cache_state = dcs; in cache_save_setup()
2536 spin_unlock(&block_group->lock); in cache_save_setup()
2544 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_setup_space_cache()
2545 struct btrfs_block_group *cache, *tmp; in btrfs_setup_space_cache() local
2546 struct btrfs_transaction *cur_trans = trans->transaction; in btrfs_setup_space_cache()
2549 if (list_empty(&cur_trans->dirty_bgs) || in btrfs_setup_space_cache()
2555 return -ENOMEM; in btrfs_setup_space_cache()
2557 /* Could add new block groups, use _safe just in case */ in btrfs_setup_space_cache()
2558 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, in btrfs_setup_space_cache()
2560 if (cache->disk_cache_state == BTRFS_DC_CLEAR) in btrfs_setup_space_cache()
2561 cache_save_setup(cache, trans, path); in btrfs_setup_space_cache()
2569 * Transaction commit does final block group cache writeback during a critical
2571 * order for the cache to actually match the block group, but can introduce a
2574 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
2575 * There's a chance we'll have to redo some of it if the block group changes
2577 * getting rid of the easy block groups while we're still allowing others to
2582 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_start_dirty_block_groups()
2583 struct btrfs_block_group *cache; in btrfs_start_dirty_block_groups() local
2584 struct btrfs_transaction *cur_trans = trans->transaction; in btrfs_start_dirty_block_groups()
2589 struct list_head *io = &cur_trans->io_bgs; in btrfs_start_dirty_block_groups()
2592 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
2593 if (list_empty(&cur_trans->dirty_bgs)) { in btrfs_start_dirty_block_groups()
2594 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
2597 list_splice_init(&cur_trans->dirty_bgs, &dirty); in btrfs_start_dirty_block_groups()
2598 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
2601 /* Make sure all the block groups on our dirty list actually exist */ in btrfs_start_dirty_block_groups()
2607 ret = -ENOMEM; in btrfs_start_dirty_block_groups()
2614 * removal of empty block groups deleting this block group while we are in btrfs_start_dirty_block_groups()
2615 * writing out the cache in btrfs_start_dirty_block_groups()
2617 mutex_lock(&trans->transaction->cache_write_mutex); in btrfs_start_dirty_block_groups()
2621 cache = list_first_entry(&dirty, struct btrfs_block_group, in btrfs_start_dirty_block_groups()
2624 * This can happen if something re-dirties a block group that in btrfs_start_dirty_block_groups()
2628 if (!list_empty(&cache->io_list)) { in btrfs_start_dirty_block_groups()
2629 list_del_init(&cache->io_list); in btrfs_start_dirty_block_groups()
2630 btrfs_wait_cache_io(trans, cache, path); in btrfs_start_dirty_block_groups()
2631 btrfs_put_block_group(cache); in btrfs_start_dirty_block_groups()
2636 * btrfs_wait_cache_io uses the cache->dirty_list to decide if in btrfs_start_dirty_block_groups()
2643 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
2644 list_del_init(&cache->dirty_list); in btrfs_start_dirty_block_groups()
2645 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
2649 cache_save_setup(cache, trans, path); in btrfs_start_dirty_block_groups()
2651 if (cache->disk_cache_state == BTRFS_DC_SETUP) { in btrfs_start_dirty_block_groups()
2652 cache->io_ctl.inode = NULL; in btrfs_start_dirty_block_groups()
2653 ret = btrfs_write_out_cache(trans, cache, path); in btrfs_start_dirty_block_groups()
2654 if (ret == 0 && cache->io_ctl.inode) { in btrfs_start_dirty_block_groups()
2662 list_add_tail(&cache->io_list, io); in btrfs_start_dirty_block_groups()
2665 * If we failed to write the cache, the in btrfs_start_dirty_block_groups()
2672 ret = update_block_group_item(trans, path, cache); in btrfs_start_dirty_block_groups()
2674 * Our block group might still be attached to the list in btrfs_start_dirty_block_groups()
2675 * of new block groups in the transaction handle of some in btrfs_start_dirty_block_groups()
2676 * other task (struct btrfs_trans_handle->new_bgs). This in btrfs_start_dirty_block_groups()
2677 * means its block group item isn't yet in the extent in btrfs_start_dirty_block_groups()
2682 if (ret == -ENOENT) { in btrfs_start_dirty_block_groups()
2684 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
2685 if (list_empty(&cache->dirty_list)) { in btrfs_start_dirty_block_groups()
2686 list_add_tail(&cache->dirty_list, in btrfs_start_dirty_block_groups()
2687 &cur_trans->dirty_bgs); in btrfs_start_dirty_block_groups()
2688 btrfs_get_block_group(cache); in btrfs_start_dirty_block_groups()
2691 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
2697 /* If it's not on the io list, we need to put the block group */ in btrfs_start_dirty_block_groups()
2699 btrfs_put_block_group(cache); in btrfs_start_dirty_block_groups()
2704 * us from writing caches for block groups that are going to be in btrfs_start_dirty_block_groups()
2707 mutex_unlock(&trans->transaction->cache_write_mutex); in btrfs_start_dirty_block_groups()
2710 mutex_lock(&trans->transaction->cache_write_mutex); in btrfs_start_dirty_block_groups()
2712 mutex_unlock(&trans->transaction->cache_write_mutex); in btrfs_start_dirty_block_groups()
2722 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
2723 list_splice_init(&cur_trans->dirty_bgs, &dirty); in btrfs_start_dirty_block_groups()
2725 * dirty_bgs_lock protects us from concurrent block group in btrfs_start_dirty_block_groups()
2729 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
2732 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
2736 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
2737 list_splice_init(&dirty, &cur_trans->dirty_bgs); in btrfs_start_dirty_block_groups()
2738 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_start_dirty_block_groups()
2748 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_write_dirty_block_groups()
2749 struct btrfs_block_group *cache; in btrfs_write_dirty_block_groups() local
2750 struct btrfs_transaction *cur_trans = trans->transaction; in btrfs_write_dirty_block_groups()
2754 struct list_head *io = &cur_trans->io_bgs; in btrfs_write_dirty_block_groups()
2758 return -ENOMEM; in btrfs_write_dirty_block_groups()
2763 * transaction's list of dirty block groups. These tasks correspond to in btrfs_write_dirty_block_groups()
2765 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can in btrfs_write_dirty_block_groups()
2766 * allocate new block groups as a result of COWing nodes of the root in btrfs_write_dirty_block_groups()
2775 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
2776 while (!list_empty(&cur_trans->dirty_bgs)) { in btrfs_write_dirty_block_groups()
2777 cache = list_first_entry(&cur_trans->dirty_bgs, in btrfs_write_dirty_block_groups()
2782 * This can happen if cache_save_setup re-dirties a block group in btrfs_write_dirty_block_groups()
2786 if (!list_empty(&cache->io_list)) { in btrfs_write_dirty_block_groups()
2787 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
2788 list_del_init(&cache->io_list); in btrfs_write_dirty_block_groups()
2789 btrfs_wait_cache_io(trans, cache, path); in btrfs_write_dirty_block_groups()
2790 btrfs_put_block_group(cache); in btrfs_write_dirty_block_groups()
2791 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
2798 list_del_init(&cache->dirty_list); in btrfs_write_dirty_block_groups()
2799 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
2802 cache_save_setup(cache, trans, path); in btrfs_write_dirty_block_groups()
2806 (unsigned long) -1); in btrfs_write_dirty_block_groups()
2808 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { in btrfs_write_dirty_block_groups()
2809 cache->io_ctl.inode = NULL; in btrfs_write_dirty_block_groups()
2810 ret = btrfs_write_out_cache(trans, cache, path); in btrfs_write_dirty_block_groups()
2811 if (ret == 0 && cache->io_ctl.inode) { in btrfs_write_dirty_block_groups()
2813 list_add_tail(&cache->io_list, io); in btrfs_write_dirty_block_groups()
2816 * If we failed to write the cache, the in btrfs_write_dirty_block_groups()
2823 ret = update_block_group_item(trans, path, cache); in btrfs_write_dirty_block_groups()
2826 * created a new block group while updating a free space in btrfs_write_dirty_block_groups()
2827 * cache's inode (at inode.c:btrfs_finish_ordered_io()) in btrfs_write_dirty_block_groups()
2829 * which case the new block group is still attached to in btrfs_write_dirty_block_groups()
2831 * finished yet (no block group item in the extent tree in btrfs_write_dirty_block_groups()
2837 if (ret == -ENOENT) { in btrfs_write_dirty_block_groups()
2838 wait_event(cur_trans->writer_wait, in btrfs_write_dirty_block_groups()
2839 atomic_read(&cur_trans->num_writers) == 1); in btrfs_write_dirty_block_groups()
2840 ret = update_block_group_item(trans, path, cache); in btrfs_write_dirty_block_groups()
2846 /* If its not on the io list, we need to put the block group */ in btrfs_write_dirty_block_groups()
2848 btrfs_put_block_group(cache); in btrfs_write_dirty_block_groups()
2850 spin_lock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
2852 spin_unlock(&cur_trans->dirty_bgs_lock); in btrfs_write_dirty_block_groups()
2859 cache = list_first_entry(io, struct btrfs_block_group, in btrfs_write_dirty_block_groups()
2861 list_del_init(&cache->io_list); in btrfs_write_dirty_block_groups()
2862 btrfs_wait_cache_io(trans, cache, path); in btrfs_write_dirty_block_groups()
2863 btrfs_put_block_group(cache); in btrfs_write_dirty_block_groups()
2873 struct btrfs_fs_info *info = trans->fs_info; in btrfs_update_block_group()
2874 struct btrfs_block_group *cache = NULL; in btrfs_update_block_group() local
2881 /* Block accounting for super block */ in btrfs_update_block_group()
2882 spin_lock(&info->delalloc_root_lock); in btrfs_update_block_group()
2883 old_val = btrfs_super_bytes_used(info->super_copy); in btrfs_update_block_group()
2887 old_val -= num_bytes; in btrfs_update_block_group()
2888 btrfs_set_super_bytes_used(info->super_copy, old_val); in btrfs_update_block_group()
2889 spin_unlock(&info->delalloc_root_lock); in btrfs_update_block_group()
2892 cache = btrfs_lookup_block_group(info, bytenr); in btrfs_update_block_group()
2893 if (!cache) { in btrfs_update_block_group()
2894 ret = -ENOENT; in btrfs_update_block_group()
2897 factor = btrfs_bg_type_to_factor(cache->flags); in btrfs_update_block_group()
2900 * If this block group has free space cache written out, we in btrfs_update_block_group()
2903 * space back to the block group, otherwise we will leak space. in btrfs_update_block_group()
2905 if (!alloc && !btrfs_block_group_done(cache)) in btrfs_update_block_group()
2906 btrfs_cache_block_group(cache, 1); in btrfs_update_block_group()
2908 byte_in_group = bytenr - cache->start; in btrfs_update_block_group()
2909 WARN_ON(byte_in_group > cache->length); in btrfs_update_block_group()
2911 spin_lock(&cache->space_info->lock); in btrfs_update_block_group()
2912 spin_lock(&cache->lock); in btrfs_update_block_group()
2915 cache->disk_cache_state < BTRFS_DC_CLEAR) in btrfs_update_block_group()
2916 cache->disk_cache_state = BTRFS_DC_CLEAR; in btrfs_update_block_group()
2918 old_val = cache->used; in btrfs_update_block_group()
2919 num_bytes = min(total, cache->length - byte_in_group); in btrfs_update_block_group()
2922 cache->used = old_val; in btrfs_update_block_group()
2923 cache->reserved -= num_bytes; in btrfs_update_block_group()
2924 cache->space_info->bytes_reserved -= num_bytes; in btrfs_update_block_group()
2925 cache->space_info->bytes_used += num_bytes; in btrfs_update_block_group()
2926 cache->space_info->disk_used += num_bytes * factor; in btrfs_update_block_group()
2927 spin_unlock(&cache->lock); in btrfs_update_block_group()
2928 spin_unlock(&cache->space_info->lock); in btrfs_update_block_group()
2930 old_val -= num_bytes; in btrfs_update_block_group()
2931 cache->used = old_val; in btrfs_update_block_group()
2932 cache->pinned += num_bytes; in btrfs_update_block_group()
2934 cache->space_info, num_bytes); in btrfs_update_block_group()
2935 cache->space_info->bytes_used -= num_bytes; in btrfs_update_block_group()
2936 cache->space_info->disk_used -= num_bytes * factor; in btrfs_update_block_group()
2937 spin_unlock(&cache->lock); in btrfs_update_block_group()
2938 spin_unlock(&cache->space_info->lock); in btrfs_update_block_group()
2940 __btrfs_mod_total_bytes_pinned(cache->space_info, in btrfs_update_block_group()
2942 set_extent_dirty(&trans->transaction->pinned_extents, in btrfs_update_block_group()
2943 bytenr, bytenr + num_bytes - 1, in btrfs_update_block_group()
2947 spin_lock(&trans->transaction->dirty_bgs_lock); in btrfs_update_block_group()
2948 if (list_empty(&cache->dirty_list)) { in btrfs_update_block_group()
2949 list_add_tail(&cache->dirty_list, in btrfs_update_block_group()
2950 &trans->transaction->dirty_bgs); in btrfs_update_block_group()
2951 trans->delayed_ref_updates++; in btrfs_update_block_group()
2952 btrfs_get_block_group(cache); in btrfs_update_block_group()
2954 spin_unlock(&trans->transaction->dirty_bgs_lock); in btrfs_update_block_group()
2957 * No longer have used bytes in this block group, queue it for in btrfs_update_block_group()
2958 * deletion. We do this after adding the block group to the in btrfs_update_block_group()
2960 * cache writeout. in btrfs_update_block_group()
2964 btrfs_mark_bg_unused(cache); in btrfs_update_block_group()
2967 btrfs_put_block_group(cache); in btrfs_update_block_group()
2968 total -= num_bytes; in btrfs_update_block_group()
2972 /* Modified block groups are accounted for in the delayed_refs_rsv. */ in btrfs_update_block_group()
2978 * btrfs_add_reserved_bytes - update the block_group and space info counters
2979 * @cache: The cache we are manipulating
2986 * reservation and the block group has become read only we cannot make the
2987 * reservation and return -EAGAIN, otherwise this function always succeeds.
2989 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, in btrfs_add_reserved_bytes() argument
2992 struct btrfs_space_info *space_info = cache->space_info; in btrfs_add_reserved_bytes()
2995 spin_lock(&space_info->lock); in btrfs_add_reserved_bytes()
2996 spin_lock(&cache->lock); in btrfs_add_reserved_bytes()
2997 if (cache->ro) { in btrfs_add_reserved_bytes()
2998 ret = -EAGAIN; in btrfs_add_reserved_bytes()
3000 cache->reserved += num_bytes; in btrfs_add_reserved_bytes()
3001 space_info->bytes_reserved += num_bytes; in btrfs_add_reserved_bytes()
3002 trace_btrfs_space_reservation(cache->fs_info, "space_info", in btrfs_add_reserved_bytes()
3003 space_info->flags, num_bytes, 1); in btrfs_add_reserved_bytes()
3004 btrfs_space_info_update_bytes_may_use(cache->fs_info, in btrfs_add_reserved_bytes()
3005 space_info, -ram_bytes); in btrfs_add_reserved_bytes()
3007 cache->delalloc_bytes += num_bytes; in btrfs_add_reserved_bytes()
3014 btrfs_try_granting_tickets(cache->fs_info, space_info); in btrfs_add_reserved_bytes()
3016 spin_unlock(&cache->lock); in btrfs_add_reserved_bytes()
3017 spin_unlock(&space_info->lock); in btrfs_add_reserved_bytes()
3022 * btrfs_free_reserved_bytes - update the block_group and space info counters
3023 * @cache: The cache we are manipulating
3032 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, in btrfs_free_reserved_bytes() argument
3035 struct btrfs_space_info *space_info = cache->space_info; in btrfs_free_reserved_bytes()
3037 spin_lock(&space_info->lock); in btrfs_free_reserved_bytes()
3038 spin_lock(&cache->lock); in btrfs_free_reserved_bytes()
3039 if (cache->ro) in btrfs_free_reserved_bytes()
3040 space_info->bytes_readonly += num_bytes; in btrfs_free_reserved_bytes()
3041 cache->reserved -= num_bytes; in btrfs_free_reserved_bytes()
3042 space_info->bytes_reserved -= num_bytes; in btrfs_free_reserved_bytes()
3043 space_info->max_extent_size = 0; in btrfs_free_reserved_bytes()
3046 cache->delalloc_bytes -= num_bytes; in btrfs_free_reserved_bytes()
3047 spin_unlock(&cache->lock); in btrfs_free_reserved_bytes()
3049 btrfs_try_granting_tickets(cache->fs_info, space_info); in btrfs_free_reserved_bytes()
3050 spin_unlock(&space_info->lock); in btrfs_free_reserved_bytes()
3055 struct list_head *head = &info->space_info; in force_metadata_allocation()
3059 if (found->flags & BTRFS_BLOCK_GROUP_METADATA) in force_metadata_allocation()
3060 found->force_alloc = CHUNK_ALLOC_FORCE; in force_metadata_allocation()
3078 thresh = btrfs_super_total_bytes(fs_info->super_copy); in should_alloc_chunk()
3081 if (sinfo->total_bytes - bytes_used < thresh) in should_alloc_chunk()
3085 if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8)) in should_alloc_chunk()
3092 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); in btrfs_force_chunk_alloc()
3099 * - return 1 if it successfully allocates a chunk,
3100 * - return errors including -ENOSPC otherwise.
3102 * - return 0 if it doesn't need to allocate a new chunk,
3103 * - return 1 if it successfully allocates a chunk,
3104 * - return errors including -ENOSPC otherwise.
3109 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_chunk_alloc()
3115 /* Don't re-enter if we're already allocating a chunk */ in btrfs_chunk_alloc()
3116 if (trans->allocating_chunk) in btrfs_chunk_alloc()
3117 return -ENOSPC; in btrfs_chunk_alloc()
3123 spin_lock(&space_info->lock); in btrfs_chunk_alloc()
3124 if (force < space_info->force_alloc) in btrfs_chunk_alloc()
3125 force = space_info->force_alloc; in btrfs_chunk_alloc()
3127 if (space_info->full) { in btrfs_chunk_alloc()
3130 ret = -ENOSPC; in btrfs_chunk_alloc()
3133 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
3136 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
3138 } else if (space_info->chunk_alloc) { in btrfs_chunk_alloc()
3140 * Someone is already allocating, so we need to block in btrfs_chunk_alloc()
3147 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
3148 mutex_lock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
3149 mutex_unlock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
3152 space_info->chunk_alloc = 1; in btrfs_chunk_alloc()
3154 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
3160 mutex_lock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
3161 trans->allocating_chunk = true; in btrfs_chunk_alloc()
3175 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { in btrfs_chunk_alloc()
3176 fs_info->data_chunk_allocations++; in btrfs_chunk_alloc()
3177 if (!(fs_info->data_chunk_allocations % in btrfs_chunk_alloc()
3178 fs_info->metadata_ratio)) in btrfs_chunk_alloc()
3189 trans->allocating_chunk = false; in btrfs_chunk_alloc()
3191 spin_lock(&space_info->lock); in btrfs_chunk_alloc()
3193 if (ret == -ENOSPC) in btrfs_chunk_alloc()
3194 space_info->full = 1; in btrfs_chunk_alloc()
3199 space_info->max_extent_size = 0; in btrfs_chunk_alloc()
3202 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; in btrfs_chunk_alloc()
3204 space_info->chunk_alloc = 0; in btrfs_chunk_alloc()
3205 spin_unlock(&space_info->lock); in btrfs_chunk_alloc()
3206 mutex_unlock(&fs_info->chunk_mutex); in btrfs_chunk_alloc()
3208 * When we allocate a new chunk we reserve space in the chunk block in btrfs_chunk_alloc()
3214 * large number of new block groups to create in our transaction in btrfs_chunk_alloc()
3215 * handle's new_bgs list to avoid exhausting the chunk block reserve in btrfs_chunk_alloc()
3216 * in extreme cases - like having a single transaction create many new in btrfs_chunk_alloc()
3217 * block groups when starting to write out the free space caches of all in btrfs_chunk_alloc()
3218 * the block groups that were made dirty during the lifetime of the in btrfs_chunk_alloc()
3221 if (trans->chunk_bytes_reserved >= (u64)SZ_2M) in btrfs_chunk_alloc()
3233 num_dev = fs_info->fs_devices->rw_devices; in get_profile_num_devs()
3243 struct btrfs_fs_info *fs_info = trans->fs_info; in check_system_chunk()
3252 * atomic and race free space reservation in the chunk block reserve. in check_system_chunk()
3254 lockdep_assert_held(&fs_info->chunk_mutex); in check_system_chunk()
3257 spin_lock(&info->lock); in check_system_chunk()
3258 left = info->total_bytes - btrfs_space_info_used(info, true); in check_system_chunk()
3259 spin_unlock(&info->lock); in check_system_chunk()
3286 ret = btrfs_block_rsv_add(fs_info->chunk_root, in check_system_chunk()
3287 &fs_info->chunk_block_rsv, in check_system_chunk()
3290 trans->chunk_bytes_reserved += thresh; in check_system_chunk()
3305 spin_lock(&block_group->lock); in btrfs_put_block_group_cache()
3306 if (block_group->iref) in btrfs_put_block_group_cache()
3308 spin_unlock(&block_group->lock); in btrfs_put_block_group_cache()
3318 inode = block_group->inode; in btrfs_put_block_group_cache()
3319 block_group->iref = 0; in btrfs_put_block_group_cache()
3320 block_group->inode = NULL; in btrfs_put_block_group_cache()
3321 spin_unlock(&block_group->lock); in btrfs_put_block_group_cache()
3322 ASSERT(block_group->io_ctl.inode == NULL); in btrfs_put_block_group_cache()
3324 last = block_group->start + block_group->length; in btrfs_put_block_group_cache()
3330 * Must be called only after stopping all workers, since we could have block
3332 * freed the block groups before stopping them.
3341 down_write(&info->commit_root_sem); in btrfs_free_block_groups()
3342 while (!list_empty(&info->caching_block_groups)) { in btrfs_free_block_groups()
3343 caching_ctl = list_entry(info->caching_block_groups.next, in btrfs_free_block_groups()
3345 list_del(&caching_ctl->list); in btrfs_free_block_groups()
3348 up_write(&info->commit_root_sem); in btrfs_free_block_groups()
3350 spin_lock(&info->unused_bgs_lock); in btrfs_free_block_groups()
3351 while (!list_empty(&info->unused_bgs)) { in btrfs_free_block_groups()
3352 block_group = list_first_entry(&info->unused_bgs, in btrfs_free_block_groups()
3355 list_del_init(&block_group->bg_list); in btrfs_free_block_groups()
3358 spin_unlock(&info->unused_bgs_lock); in btrfs_free_block_groups()
3360 spin_lock(&info->block_group_cache_lock); in btrfs_free_block_groups()
3361 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { in btrfs_free_block_groups()
3364 rb_erase(&block_group->cache_node, in btrfs_free_block_groups()
3365 &info->block_group_cache_tree); in btrfs_free_block_groups()
3366 RB_CLEAR_NODE(&block_group->cache_node); in btrfs_free_block_groups()
3367 spin_unlock(&info->block_group_cache_lock); in btrfs_free_block_groups()
3369 down_write(&block_group->space_info->groups_sem); in btrfs_free_block_groups()
3370 list_del(&block_group->list); in btrfs_free_block_groups()
3371 up_write(&block_group->space_info->groups_sem); in btrfs_free_block_groups()
3374 * We haven't cached this block group, which means we could in btrfs_free_block_groups()
3375 * possibly have excluded extents on this block group. in btrfs_free_block_groups()
3377 if (block_group->cached == BTRFS_CACHE_NO || in btrfs_free_block_groups()
3378 block_group->cached == BTRFS_CACHE_ERROR) in btrfs_free_block_groups()
3382 ASSERT(block_group->cached != BTRFS_CACHE_STARTED); in btrfs_free_block_groups()
3383 ASSERT(list_empty(&block_group->dirty_list)); in btrfs_free_block_groups()
3384 ASSERT(list_empty(&block_group->io_list)); in btrfs_free_block_groups()
3385 ASSERT(list_empty(&block_group->bg_list)); in btrfs_free_block_groups()
3386 ASSERT(refcount_read(&block_group->refs) == 1); in btrfs_free_block_groups()
3387 ASSERT(block_group->swap_extents == 0); in btrfs_free_block_groups()
3390 spin_lock(&info->block_group_cache_lock); in btrfs_free_block_groups()
3392 spin_unlock(&info->block_group_cache_lock); in btrfs_free_block_groups()
3396 while (!list_empty(&info->space_info)) { in btrfs_free_block_groups()
3397 space_info = list_entry(info->space_info.next, in btrfs_free_block_groups()
3405 if (WARN_ON(space_info->bytes_pinned > 0 || in btrfs_free_block_groups()
3406 space_info->bytes_reserved > 0 || in btrfs_free_block_groups()
3407 space_info->bytes_may_use > 0)) in btrfs_free_block_groups()
3409 WARN_ON(space_info->reclaim_size > 0); in btrfs_free_block_groups()
3410 list_del(&space_info->list); in btrfs_free_block_groups()
3416 void btrfs_freeze_block_group(struct btrfs_block_group *cache) in btrfs_freeze_block_group() argument
3418 atomic_inc(&cache->frozen); in btrfs_freeze_block_group()
3423 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_unfreeze_block_group()
3428 spin_lock(&block_group->lock); in btrfs_unfreeze_block_group()
3429 cleanup = (atomic_dec_and_test(&block_group->frozen) && in btrfs_unfreeze_block_group()
3430 block_group->removed); in btrfs_unfreeze_block_group()
3431 spin_unlock(&block_group->lock); in btrfs_unfreeze_block_group()
3434 em_tree = &fs_info->mapping_tree; in btrfs_unfreeze_block_group()
3435 write_lock(&em_tree->lock); in btrfs_unfreeze_block_group()
3436 em = lookup_extent_mapping(em_tree, block_group->start, in btrfs_unfreeze_block_group()
3440 write_unlock(&em_tree->lock); in btrfs_unfreeze_block_group()
3448 * tasks trimming this block group have left 1 entry each one. in btrfs_unfreeze_block_group()
3451 __btrfs_remove_free_space_cache(block_group->free_space_ctl); in btrfs_unfreeze_block_group()
3459 spin_lock(&bg->lock); in btrfs_inc_block_group_swap_extents()
3460 if (bg->ro) in btrfs_inc_block_group_swap_extents()
3463 bg->swap_extents++; in btrfs_inc_block_group_swap_extents()
3464 spin_unlock(&bg->lock); in btrfs_inc_block_group_swap_extents()
3471 spin_lock(&bg->lock); in btrfs_dec_block_group_swap_extents()
3472 ASSERT(!bg->ro); in btrfs_dec_block_group_swap_extents()
3473 ASSERT(bg->swap_extents >= amount); in btrfs_dec_block_group_swap_extents()
3474 bg->swap_extents -= amount; in btrfs_dec_block_group_swap_extents()
3475 spin_unlock(&bg->lock); in btrfs_dec_block_group_swap_extents()