Lines Matching +full:wait +full:- +full:free +full:- +full:us
1 // SPDX-License-Identifier: GPL-2.0
5 #include "space-info.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
11 #include "block-group.h"
15 #include "extent-tree.h"
29 * reservations we care about total_bytes - SUM(space_info->bytes_) when
35 * code on the rules for each type, but generally block_rsv->reserved is how
36 * much space is accounted for in space_info->bytes_may_use.
50 * ->reserve
51 * space_info->bytes_may_reserve += num_bytes
53 * ->extent allocation
55 * space_info->bytes_may_reserve -= num_bytes
56 * space_info->bytes_reserved += extent_bytes
58 * ->insert reference
60 * space_info->bytes_reserved -= extent_bytes
61 * space_info->bytes_used += extent_bytes
63 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
68 * -> __reserve_bytes
69 * create a reserve_ticket with ->bytes set to our reservation, add it to
70 * the tail of space_info->tickets, kick async flush thread
72 * ->handle_reserve_ticket
73 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
76 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
77 * Flushes various things attempting to free up space.
79 * -> btrfs_try_granting_tickets()
81 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
82 * space_info->total_bytes. This loops through the ->priority_tickets and
83 * then the ->tickets list checking to see if the reservation can be
84 * completed. If it can the space is added to space_info->bytes_may_use and
87 * -> ticket wakeup
88 * Check if ->bytes == 0, if it does we got our reservation and we can carry
95 * space_info->priority_tickets, and we do not use ticket->wait, we simply
96 * call flush_space() ourselves for the states that are safe for us to call
105 * things however hold reservations, and so letting them run allows us to
119 * running delalloc, but usually we need to wait for ordered extents to
142 * out of a pre-tickets era where we could end up committing the transaction
151 * reserve more space than is currently free in the currently allocate
158 * free space in the allocated metadata chunks.
169 return s_info->bytes_used + s_info->bytes_reserved + in btrfs_space_info_used()
170 s_info->bytes_pinned + s_info->bytes_readonly + in btrfs_space_info_used()
171 s_info->bytes_zone_unusable + in btrfs_space_info_used()
172 (may_use_included ? s_info->bytes_may_use : 0); in btrfs_space_info_used()
181 struct list_head *head = &info->space_info; in btrfs_clear_space_info_full()
185 found->full = 0; in btrfs_clear_space_info_full()
200 return fs_info->zone_size; in calc_chunk_size()
210 if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G) in calc_chunk_size()
222 WRITE_ONCE(space_info->chunk_size, chunk_size); in btrfs_update_space_info_chunk_size()
234 return -ENOMEM; in create_space_info()
237 INIT_LIST_HEAD(&space_info->block_groups[i]); in create_space_info()
238 init_rwsem(&space_info->groups_sem); in create_space_info()
239 spin_lock_init(&space_info->lock); in create_space_info()
240 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; in create_space_info()
241 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; in create_space_info()
242 INIT_LIST_HEAD(&space_info->ro_bgs); in create_space_info()
243 INIT_LIST_HEAD(&space_info->tickets); in create_space_info()
244 INIT_LIST_HEAD(&space_info->priority_tickets); in create_space_info()
245 space_info->clamp = 1; in create_space_info()
249 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH; in create_space_info()
255 list_add(&space_info->list, &info->space_info); in create_space_info()
257 info->data_sinfo = space_info; in create_space_info()
270 disk_super = fs_info->super_copy; in btrfs_init_space_info()
272 return -EINVAL; in btrfs_init_space_info()
305 factor = btrfs_bg_type_to_factor(block_group->flags); in btrfs_add_bg_to_space_info()
307 found = btrfs_find_space_info(info, block_group->flags); in btrfs_add_bg_to_space_info()
309 spin_lock(&found->lock); in btrfs_add_bg_to_space_info()
310 found->total_bytes += block_group->length; in btrfs_add_bg_to_space_info()
311 found->disk_total += block_group->length * factor; in btrfs_add_bg_to_space_info()
312 found->bytes_used += block_group->used; in btrfs_add_bg_to_space_info()
313 found->disk_used += block_group->used * factor; in btrfs_add_bg_to_space_info()
314 found->bytes_readonly += block_group->bytes_super; in btrfs_add_bg_to_space_info()
315 btrfs_space_info_update_bytes_zone_unusable(info, found, block_group->zone_unusable); in btrfs_add_bg_to_space_info()
316 if (block_group->length > 0) in btrfs_add_bg_to_space_info()
317 found->full = 0; in btrfs_add_bg_to_space_info()
319 spin_unlock(&found->lock); in btrfs_add_bg_to_space_info()
321 block_group->space_info = found; in btrfs_add_bg_to_space_info()
323 index = btrfs_bg_flags_to_raid_index(block_group->flags); in btrfs_add_bg_to_space_info()
324 down_write(&found->groups_sem); in btrfs_add_bg_to_space_info()
325 list_add_tail(&block_group->list, &found->block_groups[index]); in btrfs_add_bg_to_space_info()
326 up_write(&found->groups_sem); in btrfs_add_bg_to_space_info()
332 struct list_head *head = &info->space_info; in btrfs_find_space_info()
338 if (found->flags & flags) in btrfs_find_space_info()
352 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) in calc_available_free_space()
357 avail = atomic64_read(&fs_info->free_chunk_space); in calc_available_free_space()
360 * If we have dup, raid1 or raid10 then only half of the free in calc_available_free_space()
369 * If we aren't flushing all things, let us overcommit up to in calc_available_free_space()
370 * 1/2th of the space. If we can flush, don't let us overcommit in calc_available_free_space()
388 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) in btrfs_can_overcommit()
394 if (used + bytes < space_info->total_bytes + avail) in btrfs_can_overcommit()
402 if (!list_empty(&ticket->list)) { in remove_ticket()
403 list_del_init(&ticket->list); in remove_ticket()
404 ASSERT(space_info->reclaim_size >= ticket->bytes); in remove_ticket()
405 space_info->reclaim_size -= ticket->bytes; in remove_ticket()
410 * This is for space we already have accounted in space_info->bytes_may_use, so
419 lockdep_assert_held(&space_info->lock); in btrfs_try_granting_tickets()
421 head = &space_info->priority_tickets; in btrfs_try_granting_tickets()
430 if ((used + ticket->bytes <= space_info->total_bytes) || in btrfs_try_granting_tickets()
431 btrfs_can_overcommit(fs_info, space_info, ticket->bytes, in btrfs_try_granting_tickets()
435 ticket->bytes); in btrfs_try_granting_tickets()
437 ticket->bytes = 0; in btrfs_try_granting_tickets()
438 space_info->tickets_id++; in btrfs_try_granting_tickets()
439 wake_up(&ticket->wait); in btrfs_try_granting_tickets()
445 if (head == &space_info->priority_tickets) { in btrfs_try_granting_tickets()
446 head = &space_info->tickets; in btrfs_try_granting_tickets()
454 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
455 spin_lock(&__rsv->lock); \
457 __rsv->size, __rsv->reserved); \
458 spin_unlock(&__rsv->lock); \
463 switch (space_info->flags) { in space_info_flag_to_str()
490 lockdep_assert_held(&info->lock); in __btrfs_dump_space_info()
492 /* The free space could be negative in case of overcommit */ in __btrfs_dump_space_info()
493 btrfs_info(fs_info, "space_info %s has %lld free, is %sfull", in __btrfs_dump_space_info()
495 (s64)(info->total_bytes - btrfs_space_info_used(info, true)), in __btrfs_dump_space_info()
496 info->full ? "" : "not "); in __btrfs_dump_space_info()
499 info->total_bytes, info->bytes_used, info->bytes_pinned, in __btrfs_dump_space_info()
500 info->bytes_reserved, info->bytes_may_use, in __btrfs_dump_space_info()
501 info->bytes_readonly, info->bytes_zone_unusable); in __btrfs_dump_space_info()
512 spin_lock(&info->lock); in btrfs_dump_space_info()
515 spin_unlock(&info->lock); in btrfs_dump_space_info()
520 down_read(&info->groups_sem); in btrfs_dump_space_info()
522 list_for_each_entry(cache, &info->block_groups[index], list) { in btrfs_dump_space_info()
525 spin_lock(&cache->lock); in btrfs_dump_space_info()
526 avail = cache->length - cache->used - cache->pinned - in btrfs_dump_space_info()
527 cache->reserved - cache->bytes_super - cache->zone_unusable; in btrfs_dump_space_info()
530 cache->start, cache->length, cache->used, cache->pinned, in btrfs_dump_space_info()
531 cache->reserved, cache->delalloc_bytes, in btrfs_dump_space_info()
532 cache->bytes_super, cache->zone_unusable, in btrfs_dump_space_info()
533 avail, cache->ro ? "[readonly]" : ""); in btrfs_dump_space_info()
534 spin_unlock(&cache->lock); in btrfs_dump_space_info()
540 up_read(&info->groups_sem); in btrfs_dump_space_info()
587 delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes); in shrink_delalloc()
588 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes); in shrink_delalloc()
600 * worth of reservations, however that's not available to us in shrink_delalloc()
612 trans = current->journal_info; in shrink_delalloc()
615 * If we are doing more ordered than delalloc we need to just wait on in shrink_delalloc()
617 * that likely won't give us the space back we need. in shrink_delalloc()
640 * This exists because we do not want to wait for each in shrink_delalloc()
642 * start the IO on everybody, and then come back here and wait in shrink_delalloc()
645 * can decide if we wait for that or not. in shrink_delalloc()
651 async_pages = atomic_read(&fs_info->async_delalloc_pages); in shrink_delalloc()
656 * We don't want to wait forever, if we wrote less pages in this in shrink_delalloc()
657 * loop than we have outstanding, only wait for that number of in shrink_delalloc()
658 * pages, otherwise we can wait for all async pages to finish in shrink_delalloc()
662 async_pages -= nr_pages; in shrink_delalloc()
665 wait_event(fs_info->async_submit_wait, in shrink_delalloc()
666 atomic_read(&fs_info->async_delalloc_pages) <= in shrink_delalloc()
671 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); in shrink_delalloc()
679 * If we are for preemption we just want a one-shot of delalloc in shrink_delalloc()
686 spin_lock(&space_info->lock); in shrink_delalloc()
687 if (list_empty(&space_info->tickets) && in shrink_delalloc()
688 list_empty(&space_info->priority_tickets)) { in shrink_delalloc()
689 spin_unlock(&space_info->lock); in shrink_delalloc()
692 spin_unlock(&space_info->lock); in shrink_delalloc()
695 &fs_info->delalloc_bytes); in shrink_delalloc()
697 &fs_info->ordered_bytes); in shrink_delalloc()
710 struct btrfs_root *root = fs_info->tree_root; in flush_space()
721 nr = -1; in flush_space()
726 if (ret == -ENOENT) in flush_space()
746 if (ret == -ENOENT) in flush_space()
765 btrfs_get_alloc_profile(fs_info, space_info->flags), in flush_space()
770 if (ret > 0 || ret == -ENOSPC) in flush_space()
775 * If we have pending delayed iputs then we could free up a in flush_space()
783 ASSERT(current->journal_info == NULL); in flush_space()
786 * current one or wait it fully commits in case its commit is in flush_space()
788 * because that does not wait for a transaction to fully commit in flush_space()
794 if (ret == -ENOENT) in flush_space()
801 ret = -ENOSPC; in flush_space()
805 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, in flush_space()
816 u64 to_reclaim = space_info->reclaim_size; in btrfs_calc_reclaim_metadata_size()
818 lockdep_assert_held(&space_info->lock); in btrfs_calc_reclaim_metadata_size()
826 * before, and now we're well over-committed based on our current free in btrfs_calc_reclaim_metadata_size()
830 if (space_info->total_bytes + avail < used) in btrfs_calc_reclaim_metadata_size()
831 to_reclaim += used - (space_info->total_bytes + avail); in btrfs_calc_reclaim_metadata_size()
839 const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv); in need_preemptive_reclaim()
844 thresh = mult_perc(space_info->total_bytes, 90); in need_preemptive_reclaim()
846 lockdep_assert_held(&space_info->lock); in need_preemptive_reclaim()
848 /* If we're just plain full then async reclaim just slows us down. */ in need_preemptive_reclaim()
849 if ((space_info->bytes_used + space_info->bytes_reserved + in need_preemptive_reclaim()
853 used = space_info->bytes_may_use + space_info->bytes_pinned; in need_preemptive_reclaim()
864 if (used - global_rsv_size <= SZ_128M) in need_preemptive_reclaim()
871 if (space_info->reclaim_size) in need_preemptive_reclaim()
875 * If we have over half of the free space occupied by reservations or in need_preemptive_reclaim()
886 * if our reclaimable space exceeds our clamped free space. in need_preemptive_reclaim()
888 * Our clamping range is 2^1 -> 2^8. Practically speaking that means in need_preemptive_reclaim()
905 used = space_info->bytes_used + space_info->bytes_reserved + in need_preemptive_reclaim()
906 space_info->bytes_readonly + global_rsv_size; in need_preemptive_reclaim()
907 if (used < space_info->total_bytes) in need_preemptive_reclaim()
908 thresh += space_info->total_bytes - used; in need_preemptive_reclaim()
909 thresh >>= space_info->clamp; in need_preemptive_reclaim()
911 used = space_info->bytes_pinned; in need_preemptive_reclaim()
916 * around. Preemptive flushing is only useful in that it can free up in need_preemptive_reclaim()
917 * space before tickets need to wait for things to finish. In the case in need_preemptive_reclaim()
918 * of ordered extents, preemptively waiting on ordered extents gets us in need_preemptive_reclaim()
920 * simply have to slow down writers by forcing them to wait on ordered in need_preemptive_reclaim()
928 * waste time and cause us to slow down. in need_preemptive_reclaim()
936 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1; in need_preemptive_reclaim()
937 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes); in need_preemptive_reclaim()
939 used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) + in need_preemptive_reclaim()
940 btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv); in need_preemptive_reclaim()
942 used += space_info->bytes_may_use - global_rsv_size; in need_preemptive_reclaim()
945 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); in need_preemptive_reclaim()
952 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; in steal_from_global_rsv()
955 if (!ticket->steal) in steal_from_global_rsv()
958 if (global_rsv->space_info != space_info) in steal_from_global_rsv()
961 spin_lock(&global_rsv->lock); in steal_from_global_rsv()
962 min_bytes = mult_perc(global_rsv->size, 10); in steal_from_global_rsv()
963 if (global_rsv->reserved < min_bytes + ticket->bytes) { in steal_from_global_rsv()
964 spin_unlock(&global_rsv->lock); in steal_from_global_rsv()
967 global_rsv->reserved -= ticket->bytes; in steal_from_global_rsv()
969 ticket->bytes = 0; in steal_from_global_rsv()
970 wake_up(&ticket->wait); in steal_from_global_rsv()
971 space_info->tickets_id++; in steal_from_global_rsv()
972 if (global_rsv->reserved < global_rsv->size) in steal_from_global_rsv()
973 global_rsv->full = 0; in steal_from_global_rsv()
974 spin_unlock(&global_rsv->lock); in steal_from_global_rsv()
980 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
981 * @fs_info - fs_info for this fs
982 * @space_info - the space info we were flushing
998 u64 tickets_id = space_info->tickets_id; in maybe_fail_all_tickets()
1008 while (!list_empty(&space_info->tickets) && in maybe_fail_all_tickets()
1009 tickets_id == space_info->tickets_id) { in maybe_fail_all_tickets()
1010 ticket = list_first_entry(&space_info->tickets, in maybe_fail_all_tickets()
1018 ticket->bytes); in maybe_fail_all_tickets()
1022 ticket->error = -EIO; in maybe_fail_all_tickets()
1024 ticket->error = -ENOSPC; in maybe_fail_all_tickets()
1025 wake_up(&ticket->wait); in maybe_fail_all_tickets()
1036 return (tickets_id != space_info->tickets_id); in maybe_fail_all_tickets()
1040 * This is for normal flushers, we can wait all goddamned day if we want to. We
1056 spin_lock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1059 space_info->flush = 0; in btrfs_async_reclaim_metadata_space()
1060 spin_unlock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1063 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_metadata_space()
1064 spin_unlock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1069 spin_lock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1070 if (list_empty(&space_info->tickets)) { in btrfs_async_reclaim_metadata_space()
1071 space_info->flush = 0; in btrfs_async_reclaim_metadata_space()
1072 spin_unlock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1077 if (last_tickets_id == space_info->tickets_id) { in btrfs_async_reclaim_metadata_space()
1080 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_metadata_space()
1083 commit_cycles--; in btrfs_async_reclaim_metadata_space()
1112 commit_cycles--; in btrfs_async_reclaim_metadata_space()
1114 space_info->flush = 0; in btrfs_async_reclaim_metadata_space()
1120 spin_unlock(&space_info->lock); in btrfs_async_reclaim_metadata_space()
1125 * This handles pre-flushing of metadata space before we get to the point that
1127 * from the other flush paths because it doesn't rely on tickets to tell us how
1128 * much we need to flush, instead it attempts to keep us below the 80% full
1145 delayed_block_rsv = &fs_info->delayed_block_rsv; in btrfs_preempt_reclaim_metadata_space()
1146 delayed_refs_rsv = &fs_info->delayed_refs_rsv; in btrfs_preempt_reclaim_metadata_space()
1147 global_rsv = &fs_info->global_block_rsv; in btrfs_preempt_reclaim_metadata_space()
1148 trans_rsv = &fs_info->trans_block_rsv; in btrfs_preempt_reclaim_metadata_space()
1150 spin_lock(&space_info->lock); in btrfs_preempt_reclaim_metadata_space()
1170 if (block_rsv_size < space_info->bytes_may_use) in btrfs_preempt_reclaim_metadata_space()
1171 delalloc_size = space_info->bytes_may_use - block_rsv_size; in btrfs_preempt_reclaim_metadata_space()
1178 block_rsv_size -= global_rsv_size; in btrfs_preempt_reclaim_metadata_space()
1188 } else if (space_info->bytes_pinned > in btrfs_preempt_reclaim_metadata_space()
1191 to_reclaim = space_info->bytes_pinned; in btrfs_preempt_reclaim_metadata_space()
1202 spin_unlock(&space_info->lock); in btrfs_preempt_reclaim_metadata_space()
1206 * down the to_reclaim by 1/4. If it takes us down to 0, in btrfs_preempt_reclaim_metadata_space()
1214 spin_lock(&space_info->lock); in btrfs_preempt_reclaim_metadata_space()
1218 if (loops == 1 && !space_info->reclaim_size) in btrfs_preempt_reclaim_metadata_space()
1219 space_info->clamp = max(1, space_info->clamp - 1); in btrfs_preempt_reclaim_metadata_space()
1221 spin_unlock(&space_info->lock); in btrfs_preempt_reclaim_metadata_space()
1233 * length to ->bytes_reserved, and subtracts the reserved space from
1234 * ->bytes_may_use.
1245 * immediately re-usable, it comes in the form of a delayed ref, which must be
1272 space_info = fs_info->data_sinfo; in btrfs_async_reclaim_data_space()
1274 spin_lock(&space_info->lock); in btrfs_async_reclaim_data_space()
1275 if (list_empty(&space_info->tickets)) { in btrfs_async_reclaim_data_space()
1276 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1277 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1280 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_data_space()
1281 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1283 while (!space_info->full) { in btrfs_async_reclaim_data_space()
1285 spin_lock(&space_info->lock); in btrfs_async_reclaim_data_space()
1286 if (list_empty(&space_info->tickets)) { in btrfs_async_reclaim_data_space()
1287 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1288 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1295 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_data_space()
1296 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1302 spin_lock(&space_info->lock); in btrfs_async_reclaim_data_space()
1303 if (list_empty(&space_info->tickets)) { in btrfs_async_reclaim_data_space()
1304 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1305 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1309 if (last_tickets_id == space_info->tickets_id) { in btrfs_async_reclaim_data_space()
1312 last_tickets_id = space_info->tickets_id; in btrfs_async_reclaim_data_space()
1317 if (space_info->full) { in btrfs_async_reclaim_data_space()
1321 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1331 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1337 space_info->flush = 0; in btrfs_async_reclaim_data_space()
1338 spin_unlock(&space_info->lock); in btrfs_async_reclaim_data_space()
1343 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); in btrfs_init_async_reclaim_work()
1344 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space); in btrfs_init_async_reclaim_work()
1345 INIT_WORK(&fs_info->preempt_reclaim_work, in btrfs_init_async_reclaim_work()
1376 spin_lock(&space_info->lock); in priority_reclaim_metadata_space()
1382 * to_reclaim but ->bytes == 0. in priority_reclaim_metadata_space()
1384 if (ticket->bytes == 0) { in priority_reclaim_metadata_space()
1385 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1390 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1394 spin_lock(&space_info->lock); in priority_reclaim_metadata_space()
1395 if (ticket->bytes == 0) { in priority_reclaim_metadata_space()
1396 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1405 * success to the caller if we can steal from the global rsv - this is in priority_reclaim_metadata_space()
1407 * modify the fs, making it easier to debug -ENOSPC problems. in priority_reclaim_metadata_space()
1410 ticket->error = BTRFS_FS_ERROR(fs_info); in priority_reclaim_metadata_space()
1413 ticket->error = -ENOSPC; in priority_reclaim_metadata_space()
1423 spin_unlock(&space_info->lock); in priority_reclaim_metadata_space()
1430 spin_lock(&space_info->lock); in priority_reclaim_data_space()
1433 if (ticket->bytes == 0) { in priority_reclaim_data_space()
1434 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1438 while (!space_info->full) { in priority_reclaim_data_space()
1439 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1441 spin_lock(&space_info->lock); in priority_reclaim_data_space()
1442 if (ticket->bytes == 0) { in priority_reclaim_data_space()
1443 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1448 ticket->error = -ENOSPC; in priority_reclaim_data_space()
1451 spin_unlock(&space_info->lock); in priority_reclaim_data_space()
1459 DEFINE_WAIT(wait); in wait_reserve_ticket()
1462 spin_lock(&space_info->lock); in wait_reserve_ticket()
1463 while (ticket->bytes > 0 && ticket->error == 0) { in wait_reserve_ticket()
1464 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); in wait_reserve_ticket()
1467 * Delete us from the list. After we unlock the space in wait_reserve_ticket()
1475 ticket->error = -EINTR; in wait_reserve_ticket()
1478 spin_unlock(&space_info->lock); in wait_reserve_ticket()
1482 finish_wait(&ticket->wait, &wait); in wait_reserve_ticket()
1483 spin_lock(&space_info->lock); in wait_reserve_ticket()
1485 spin_unlock(&space_info->lock); in wait_reserve_ticket()
1533 ret = ticket->error; in handle_reserve_ticket()
1534 ASSERT(list_empty(&ticket->list)); in handle_reserve_ticket()
1541 ASSERT(!(ticket->bytes == 0 && ticket->error)); in handle_reserve_ticket()
1542 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes, in handle_reserve_ticket()
1543 start_ns, flush, ticket->error); in handle_reserve_ticket()
1560 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes); in maybe_clamp_preempt()
1561 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes); in maybe_clamp_preempt()
1564 * If we're heavy on ordered operations then clamping won't help us. We in maybe_clamp_preempt()
1572 space_info->clamp = min(space_info->clamp + 1, 8); in maybe_clamp_preempt()
1614 int ret = -ENOSPC; in __reserve_bytes()
1619 * If have a transaction handle (current->journal_info != NULL), then in __reserve_bytes()
1624 if (current->journal_info) { in __reserve_bytes()
1632 async_work = &fs_info->async_data_reclaim_work; in __reserve_bytes()
1634 async_work = &fs_info->async_reclaim_work; in __reserve_bytes()
1636 spin_lock(&space_info->lock); in __reserve_bytes()
1645 pending_tickets = !list_empty(&space_info->tickets) || in __reserve_bytes()
1646 !list_empty(&space_info->priority_tickets); in __reserve_bytes()
1648 pending_tickets = !list_empty(&space_info->priority_tickets); in __reserve_bytes()
1651 * Carry on if we have enough space (short-circuit) OR call in __reserve_bytes()
1655 ((used + orig_bytes <= space_info->total_bytes) || in __reserve_bytes()
1669 if (used + orig_bytes <= space_info->total_bytes) { in __reserve_bytes()
1686 space_info->reclaim_size += ticket.bytes; in __reserve_bytes()
1687 init_waitqueue_head(&ticket.wait); in __reserve_bytes()
1695 list_add_tail(&ticket.list, &space_info->tickets); in __reserve_bytes()
1696 if (!space_info->flush) { in __reserve_bytes()
1706 space_info->flush = 1; in __reserve_bytes()
1708 space_info->flags, in __reserve_bytes()
1715 &space_info->priority_tickets); in __reserve_bytes()
1717 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { in __reserve_bytes()
1720 * which means we won't have fs_info->fs_root set, so don't do in __reserve_bytes()
1723 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && in __reserve_bytes()
1724 !work_busy(&fs_info->preempt_reclaim_work) && in __reserve_bytes()
1726 trace_btrfs_trigger_flush(fs_info, space_info->flags, in __reserve_bytes()
1729 &fs_info->preempt_reclaim_work); in __reserve_bytes()
1732 spin_unlock(&space_info->lock); in __reserve_bytes()
1762 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush); in btrfs_reserve_metadata_bytes()
1763 if (ret == -ENOSPC) { in btrfs_reserve_metadata_bytes()
1765 block_rsv->space_info->flags, in btrfs_reserve_metadata_bytes()
1769 btrfs_dump_space_info(fs_info, block_rsv->space_info, in btrfs_reserve_metadata_bytes()
1788 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; in btrfs_reserve_data_bytes()
1794 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); in btrfs_reserve_data_bytes()
1797 if (ret == -ENOSPC) { in btrfs_reserve_data_bytes()
1799 data_sinfo->flags, bytes, 1); in btrfs_reserve_data_bytes()
1812 list_for_each_entry(space_info, &fs_info->space_info, list) { in btrfs_dump_space_info_for_trans_abort()
1813 spin_lock(&space_info->lock); in btrfs_dump_space_info_for_trans_abort()
1815 spin_unlock(&space_info->lock); in btrfs_dump_space_info_for_trans_abort()
1831 if (list_empty(&sinfo->ro_bgs)) in btrfs_account_ro_block_groups_free_space()
1834 spin_lock(&sinfo->lock); in btrfs_account_ro_block_groups_free_space()
1835 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) { in btrfs_account_ro_block_groups_free_space()
1836 spin_lock(&block_group->lock); in btrfs_account_ro_block_groups_free_space()
1838 if (!block_group->ro) { in btrfs_account_ro_block_groups_free_space()
1839 spin_unlock(&block_group->lock); in btrfs_account_ro_block_groups_free_space()
1843 factor = btrfs_bg_type_to_factor(block_group->flags); in btrfs_account_ro_block_groups_free_space()
1844 free_bytes += (block_group->length - in btrfs_account_ro_block_groups_free_space()
1845 block_group->used) * factor; in btrfs_account_ro_block_groups_free_space()
1847 spin_unlock(&block_group->lock); in btrfs_account_ro_block_groups_free_space()
1849 spin_unlock(&sinfo->lock); in btrfs_account_ro_block_groups_free_space()